source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
test_seg_scan_dsb.py
|
import sys
import lasagne as nn
import numpy as np
import theano
import pathfinder
import utils
from configuration import config, set_configuration
import theano.tensor as T
import blobs_detection
import logger
import time
import multiprocessing as mp
import buffering
def extract_candidates(predictions_scan, tf_matrix, pid, outputs_path):
print 'computing blobs'
start_time = time.time()
blobs = blobs_detection.blob_dog(predictions_scan[0, 0], min_sigma=1, max_sigma=15, threshold=0.1)
print 'blobs computation time:', (time.time() - start_time) / 60.
print 'n blobs detected:', blobs.shape[0]
blobs_original_voxel_coords = []
for j in xrange(blobs.shape[0]):
blob_j = np.append(blobs[j, :3], [1])
blob_j_original = tf_matrix.dot(blob_j)
blobs_original_voxel_coords.append(blob_j_original)
blobs = np.asarray(blobs_original_voxel_coords)
print blobs.shape
utils.save_pkl(blobs, outputs_path + '/%s.pkl' % pid)
jobs = []
theano.config.warn_float64 = 'raise'
if len(sys.argv) < 2:
sys.exit("Usage: test_luna_scan.py <configuration_name>")
config_name = sys.argv[1]
set_configuration('configs_seg_scan', config_name)
# predictions path
predictions_dir = utils.get_dir_path('model-predictions', pathfinder.METADATA_PATH)
outputs_path = predictions_dir + '/%s' % config_name
utils.auto_make_dir(outputs_path)
# logs
logs_dir = utils.get_dir_path('logs', pathfinder.METADATA_PATH)
sys.stdout = logger.Logger(logs_dir + '/%s.log' % config_name)
sys.stderr = sys.stdout
# builds model and sets its parameters
model = config().build_model()
x_shared = nn.utils.shared_empty(dim=len(model.l_in.shape))
idx_z = T.lscalar('idx_z')
idx_y = T.lscalar('idx_y')
idx_x = T.lscalar('idx_x')
window_size = config().window_size
stride = config().stride
n_windows = config().n_windows
givens = {}
givens[model.l_in.input_var] = x_shared
get_predictions_patch = theano.function([],
nn.layers.get_output(model.l_out, deterministic=True),
givens=givens,
on_unused_input='ignore')
data_iterator = config().data_iterator
print
print 'Data'
print 'n samples: %d' % data_iterator.nsamples
start_time = time.time()
for n, (x, lung_mask, tf_matrix, pid) in enumerate(
buffering.buffered_gen_threaded(data_iterator.generate(), buffer_size=2)):
print '-------------------------------------'
print n, pid
predictions_scan = np.zeros((1, 1, n_windows * stride, n_windows * stride, n_windows * stride))
for iz in xrange(n_windows):
for iy in xrange(n_windows):
for ix in xrange(n_windows):
start_time_patch = time.time()
x_shared.set_value(x[:, :, iz * stride:(iz * stride) + window_size,
iy * stride:(iy * stride) + window_size,
ix * stride:(ix * stride) + window_size])
predictions_patch = get_predictions_patch()
predictions_scan[0, 0,
iz * stride:(iz + 1) * stride,
iy * stride:(iy + 1) * stride,
ix * stride:(ix + 1) * stride] = predictions_patch
if predictions_scan.shape != x.shape:
pad_width = (np.asarray(x.shape) - np.asarray(predictions_scan.shape)) / 2
pad_width = [(p, p) for p in pad_width]
predictions_scan = np.pad(predictions_scan, pad_width=pad_width, mode='constant')
if lung_mask is not None:
predictions_scan *= lung_mask
print 'saved plot'
print 'time since start:', (time.time() - start_time) / 60.
jobs = [job for job in jobs if job.is_alive]
if len(jobs) >= 3:
jobs[0].join()
del jobs[0]
jobs.append(
mp.Process(target=extract_candidates, args=(predictions_scan, tf_matrix, pid, outputs_path)))
jobs[-1].daemon = True
jobs[-1].start()
for job in jobs: job.join()
|
emails.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
from threading import Thread
from watchlist import app, mail
from flask import render_template
from flask_mail import Message
def _send_async_mail(message):
with app.app_context():
mail.send(message)
def send_message(to, subject, sender, template, **kwargs):
message = Message(subject, sender=sender, recipients=[to])
with app.app_context():
message.html = render_template('{0}.html'.format(template), **kwargs)
thr = Thread(target=_send_async_mail, args=[message])
thr.start()
return thr
def send_papers(to, **kwargs):
# send_papers(form_arg, to=current_user.email)
send_message(to=to,
subject='paper tracker {0}'.format(datetime.date.today()),
sender=("yuri", app.config['MAIL_USERNAME']),
template='email',
**kwargs)
|
extract.py
|
#!/usr/bin/env python
import pandas as pd
import numpy as np
import multiprocessing
import argparse
import operator
import os
import random
import sys
import time
import random
import subprocess
import pysam
import collections
import warnings
import math
import re
from Bio import SeqIO
base_path = os.path.split(__file__)[0]
def parseargs():
parser = argparse.ArgumentParser(description="Calculate read features")
parser.add_argument('--bam', help='index bam file for alignment')
parser.add_argument(
"--pileup",
default='pileup.out',
help="path to pileup file [pileup file generated by samtools mpileup]")
parser.add_argument(
'--output',
help='path to working directory')
parser.add_argument(
"--samtools",
default='samtools',
help='path to samtools')
parser.add_argument(
"--contig",
default='assemblies',
help='path to fasta file of assembled contigs')
parser.add_argument(
"--thread",
type=int,
default=8,
help='Maximum number of threads [default: 8]')
parser.add_argument(
'--jellyfish',
default='jellyfish',
help='path to jellyfish')
parser.add_argument(
"--mlen",
type=int,
default=5000,
help='minimum contig length [default: 5000bp]')
args = parser.parse_args()
return args
def fragment_distribution(samfile):
all_reads = samfile.fetch()
size_freq = collections.defaultdict(int)
for read in all_reads:
if read.rnext == read.tid and read.is_paired:
size = abs(read.isize)
size_freq[size] += 1
return size_freq
def FragMAD(freq):
"""
calculate median and median absolute deviation fragment size distribution
"""
all_size = []
for key, value in freq.items():
all_size.extend([key] * int(value))
median_size = np.median(all_size)
residuals = abs(np.array(all_size) - median_size)
mad_size = 1.4826 * np.median(residuals)
return median_size, mad_size
def split_sam(args):
split_command = ' '.join(['sh',
os.path.join(base_path, "split_sam.sh"),
args.contig,
args.bam,
args.output,
args.samtools])
os.system(split_command)
def seq_parse(args):
input = SeqIO.parse(args.contig, "fasta")
contig_seqs = {}
for record in input:
if len(record.seq) >= args.mlen:
contig_seqs[record.id] = str(record.seq)
return contig_seqs
def kmer_parse(seq, pool):
seq_kmer = {"position": [], "KAD": []}
for i in range(len(seq)):
if seq[i:(i + 25)] in pool:
seq_kmer["KAD"].append(pool[seq[i:(i + 25)]])
seq_kmer["position"].append(i + 1)
if (i + 25) >= len(seq):
break
return seq_kmer
def KAD_window_cal(seq_kmer):
KAD_window_dict = {"start_pos": [],
"mean_KAD": [],
"abnormal_KAD_ratio": [],
"dev_KAD": []}
for i in range(300, len(seq_kmer['position']), 100):
KAD_window_dict["start_pos"].append(i)
mean_KAD = np.mean(np.abs(seq_kmer['KAD'][i:i + 100]))
KAD_window_dict["mean_KAD"].append(mean_KAD)
KAD_window_dict["abnormal_KAD_ratio"].append(
np.sum(np.abs(seq_kmer['KAD'][i:i + 100]) > 0.5) / 100)
KAD_window_dict["dev_KAD"].append(
np.sqrt(np.var(np.abs(seq_kmer['KAD'][i:i + 100]))))
return KAD_window_dict
def KAD_feature(args):
seq_data = seq_parse(args)
KAD_dict = {"contig": [],
'start_pos': [],
'mean_KAD': [],
'abnormal_KAD_ratio': [],
'dev_KAD': []}
for contig, seq in seq_data.items():
if len(seq) < args.mlen:
continue
if os.path.exists(os.path.join(args.output, "temp/KAD/KAD_data/",
"{}.KAD".format(str(contig)))):
try:
KAD_data = pd.read_csv(os.path.join(args.output, "temp/KAD/KAD_data/",
"{}.KAD".format(str(contig))), index_col=0, sep="\t")
KAD_data = KAD_data.drop_duplicates(['k-mer'])
except BaseException:
continue
KAD_data.index = KAD_data['k-mer']
KAD_pool = KAD_data.loc[:, 'KAD'].to_dict()
seq_kmer = kmer_parse(seq, KAD_pool)
KAD_window = KAD_window_cal(seq_kmer)
KAD_dict["contig"].extend([contig] * len(KAD_window['start_pos']))
KAD_dict["start_pos"].extend(KAD_window['start_pos'])
KAD_dict["mean_KAD"].extend(KAD_window["mean_KAD"])
KAD_dict["abnormal_KAD_ratio"].extend(
KAD_window["abnormal_KAD_ratio"])
KAD_dict["dev_KAD"].extend(KAD_window["dev_KAD"])
return KAD_dict
def KAD(args, contig, file):
if os.path.exists(os.path.join(args.output, "temp/KAD/KAD_data/",
str(contig), ".KAD")):
return 0
contig_file = os.path.join(args.output, "temp/split/contigs/", "{}.fa".format(file))
read_file = os.path.join(args.output,
"temp/split/reads/{}.read.fa".format(str(contig)))
# kmer count
outputdir = os.path.join(args.output, "temp/KAD/temp")
contig_command1 = ' '.join([args.jellyfish,
"count -m 25 -o",
os.path.join(outputdir, '{}.jf'.format(str(contig))),
"-s 100M -t 8",
contig_file])
contig_command2 = ' '.join([args.jellyfish,
"dump -c -t -o",
os.path.join(outputdir, '{}_count.txt'.format(str(contig))),
os.path.join(outputdir, '{}.jf'.format(str(contig)))])
os.system(contig_command1)
os.system(contig_command2)
read_command1 = ' '.join([args.jellyfish,
"count -m 25 -o",
os.path.join(outputdir, '{}.read.jf'.format(str(contig))),
"-s 100M -t 8",
read_file])
read_command2 = ' '.join([args.jellyfish,
"dump -c -t -o",
os.path.join(outputdir, '{}_count.read.txt'.format(str(contig))),
os.path.join(outputdir, '{}.read.jf'.format(str(contig)))])
os.system(read_command1)
os.system(read_command2)
assembly_kmer = pd.read_csv(os.path.join(args.output, "temp/KAD/temp/",
"{}_count.txt".format(str(contig))), sep="\t", header=None)
assembly_kmer.index = assembly_kmer[0]
try:
read_kmer = pd.read_csv(os.path.join(args.output, "temp/KAD/temp/",
"{}_count.read.txt".format(str(contig))),
sep="\t", header=None)
read_kmer.index = read_kmer[0]
except BaseException:
# zero reads mapped to contig
return 0
shared_kmer = set(assembly_kmer.loc[assembly_kmer[1] == 1, 0]).intersection(read_kmer.index)
if len(shared_kmer) == 0:
kmer_depth = pd.value_counts(read_kmer.loc[read_kmer[1] > 5, 1]).index[0]
else:
kmer_depth = pd.value_counts(read_kmer.loc[shared_kmer, ][1]).index[0]
assembly_kmer.columns = ['k-mer', 'assembly_count']
read_kmer.columns = ['k-mer', 'read_count']
assembly_kmer.index = range(assembly_kmer.shape[0])
read_kmer.index = range(read_kmer.shape[0])
kmer_result = pd.merge(assembly_kmer, read_kmer, how='outer')
kmer_result = kmer_result.fillna(0)
kmer_result['KAD'] = np.log2((kmer_result['read_count'] + kmer_depth)
/ (kmer_depth * (kmer_result['assembly_count'] + 1)))
kmer_result.loc[(kmer_result['read_count'] == 1) *
(kmer_result['assembly_count'] == 0), 'KAD'] = np.nan
kmer_result = kmer_result.loc[kmer_result['KAD'] == kmer_result['KAD'], ]
kmer_result.loc[:, ['k-mer', 'KAD']].to_csv(
os.path.join(args.output, "temp/KAD/KAD_data/", "{}.KAD".format(str(contig))), sep="\t")
def fragment_coverage_cal(reads, mu, dev, length):
"""
calculate fragment coverage per contig
"""
frag_coverage = np.array([0] * length)
for read in reads:
if read.rnext == read.tid and read.is_proper_pair:
size = abs(read.isize)
if (mu - 3 * dev <= size <= mu + 3 * dev):
if read.next_reference_start < read.reference_start:
start = min(read.next_reference_start,
read.reference_start,
read.reference_end)
end = start + size
frag_coverage[start:end] += 1
return frag_coverage
def window_read_cal(reads, mu, dev):
read_dict = {"start_pos": [], "read_count": [], "proper_read_count": [], "inversion_read_count": [], "clipped_read_count": [],
"supplementary_read_count": [], "discordant_size_count": [], "discordant_loc_count": []}
read_temp = {"num_read": 0, "num_proper": 0, "num_inversion": 0, "num_clipped": 0, "num_supplementary": 0, "num_discordant_size": 0,
"num_discordant_loc": 0}
pos = 0
for read in reads:
new_pos = math.floor((read.reference_start - 300) / 100) * 100 + 300
if read.reference_start < 300:
continue
if pos == 0:
pos = new_pos
elif new_pos != pos:
read_dict["start_pos"].append(pos)
read_dict["read_count"].append(read_temp["num_read"])
read_dict["proper_read_count"].append(read_temp["num_proper"])
read_dict["inversion_read_count"].append(
read_temp["num_inversion"])
read_dict["clipped_read_count"].append(read_temp["num_clipped"])
read_dict["supplementary_read_count"].append(
read_temp["num_supplementary"])
read_dict["discordant_size_count"].append(
read_temp["num_discordant_size"])
read_dict["discordant_loc_count"].append(
read_temp["num_discordant_loc"])
read_temp = {"num_read": 0,
"num_proper": 0,
"num_inversion": 0,
"num_clipped": 0,
"num_supplementary": 0,
"num_discordant_size": 0,
"num_discordant_loc": 0}
pos = new_pos
read_temp["num_read"] += 1
if read.is_paired:
if read.rnext == read.tid:
if read.is_proper_pair:
read_temp["num_proper"] += 1
if (read.is_reverse + read.mate_is_reverse) != 1:
read_temp["num_inversion"] += 1
if not mu - 3 * dev <= abs(read.isize) <= mu + 3 * dev:
read_temp["num_discordant_size"] += 1
else:
read_temp["num_discordant_loc"] += 1
if read.get_cigar_stats()[0][4] > 20:
read_temp["num_clipped"] += 1
if (read.is_supplementary and read.get_cigar_stats()[0][5] > 20):
read_temp["num_supplementary"] += 1
return read_dict
def window_frag_cal(coverage):
"""
Using sliding window approach to smooth out features
"""
coverage = np.array(coverage)
cov = {"pos": [], "coverage": [], "deviation": []}
for i in range(300, len(coverage), 100):
start = i
end = i + 100
cov["coverage"].append(np.mean(coverage[start:end]))
cov["deviation"].append(
np.sqrt(np.var(coverage[start:end])) / np.mean(coverage[start:end]))
cov["pos"].append(start)
if len(coverage) - end <= 300:
break
return cov
def contig_pool(samfile):
contig_len = {}
for (ref, lens) in zip(samfile.references, samfile.lengths):
contig_len[ref] = lens
return contig_len
def pileup_window_cal(pileup_dict):
window_dict = {"contig": [], "start_pos": [], "correct_portion": [], "ambiguous_portion": [], "disagree_portion": [],
"deletion_portion": [], "insert_portion": [], "coverage": [], "deviation": []}
for i in range(300, len(pileup_dict['correct']), 100):
start = i
end = i + 100
total = np.sum(pileup_dict['depth'][start:end])
window_dict["contig"].append(pileup_dict["contig"][0])
window_dict["start_pos"].append(start)
window_dict["correct_portion"].append(
np.sum(pileup_dict['correct'][start:end]) / total)
window_dict["ambiguous_portion"].append(
np.sum(pileup_dict["ambiguous"][start:end]) / total)
window_dict["insert_portion"].append(
np.sum(pileup_dict['insert'][start:end]) / total)
window_dict["deletion_portion"].append(
np.sum(pileup_dict['deletion'][start:end]) / total)
window_dict["disagree_portion"].append(
np.sum(pileup_dict['disagree'][start:end]) / total)
window_dict["coverage"].append(
np.mean(pileup_dict["depth"][start:end]))
window_dict["deviation"].append(np.sqrt(np.var(
pileup_dict["depth"][start:end])) / np.mean(pileup_dict["depth"][start:end]))
if len(pileup_dict['correct']) - (i + 100) <= 300:
break
return window_dict
def read_breakpoint_per_contig(samfile, ref, lens):
reads = samfile.fetch(contig=ref)
break_count = {"breakcount" : np.array([0] * lens),
"readcount" : np.array( [0] * lens)}
for read in reads:
ref_end = read.reference_end
ref_start = read.reference_start
read_start = read.query_alignment_start
read_end = read.query_alignment_end
break_count["readcount"][ref_start:ref_end] += 1
if read.is_supplementary:
if re.match('^([0-9]+H)', read.cigarstring):
break_count["breakcount"][read.get_blocks()[0][0]] += 1
else:
if len(read.get_blocks()) == 1:
break_count["breakcount"][read.get_blocks()[0][1] - 1] += 1
else:
break_count["breakcount"][read.get_blocks()[-1][1] - 1] += 1
if read.get_cigar_stats()[0][4] > 0:
if re.match('^([0-9]+S)', read.cigarstring):
break_count["breakcount"][read.get_blocks()[0][0]] += 1
if (read.cigarstring).endswith('S'):
if len(read.get_blocks()) == 1:
break_count["breakcount"][read.get_blocks()[0][1] - 1] += 1
else:
break_count["breakcount"][read.get_blocks()[-1][1] - 1] += 1
data = pd.DataFrame(break_count)
data['position'] = data.index + 1
data['contig'] = ref
data = data.loc[data['breakcount'] > 0, ]
return data
def window_break_cal(data):
if data.shape[0] == 0:
# return pd.DataFrame(data = {'read_breakpoint_ratio' : None,
# 'contig' : 'contigX',
# 'start_pos' : [1]})
return pd.DataFrame(columns = ['read_breakpoint_ratio', 'contig', 'start_pos'])
data['start_pos'] = [math.floor(x) * 100 + 300 for x in (data['position'] - 300) / 100]
data = data.loc[data['start_pos'] >= 300, ]
data['read_breakpoint_ratio'] = data['read_breakpoint_count'] / data['read_count']
data['index'] = data['contig'] + '_' + [str(int(x)) for x in data['start_pos']]
grouped = data.groupby(['index'])
read_break_ratio = pd.DataFrame(grouped['read_breakpoint_ratio'].max())
read_break_ratio['contig'] = ['_'.join(x.split("_")[:-1]) for x in read_break_ratio.index]
read_break_ratio['start_pos'] = [int(x.split("_")[-1]) for x in read_break_ratio.index]
read_break_ratio.index = range(read_break_ratio.shape[0])
return read_break_ratio
def read_breakpoint_cal(args):
if os.path.exists(os.path.join(args.output,
"temp/read_breakpoint/read_breakpoint_per_window.txt")):
return 0
if os.path.exists(os.path.join(args.output,
"temp/read_breakpoint/read_breakpoint_per_base.txt")):
read_breakpoint_data = pd.read_csv(
os.path.join(args.output,
"temp/read_breakpoint/read_breakpoint_per_base.txt"),
sep="\t", index_col=0)
window_read_breakpoint_data = window_break_cal(read_breakpoint_data)
window_read_breakpoint_data.to_csv(
os.path.join(args.output,
"temp/read_breakpoint/read_breakpoint_per_window.txt"),sep="\t")
return 0
samfile = pysam.AlignmentFile(args.bam, "rb")
references = samfile.references
lengths = samfile.lengths
read_breakpoint_pool = {"contig": [],
"position": [],
"read_breakpoint_count": [],
"read_count": []}
for ref, lens in zip(references, lengths):
if lens < args.mlen:
continue
contig_break_data = read_breakpoint_per_contig(samfile, ref, lens)
if contig_break_data.shape[0] > 0:
read_breakpoint_pool["read_breakpoint_count"].extend(
list(contig_break_data['breakcount']))
read_breakpoint_pool["read_count"].extend(
list(contig_break_data['readcount']))
read_breakpoint_pool["contig"].extend(
[ref] * contig_break_data.shape[0])
read_breakpoint_pool["position"].extend(
list(contig_break_data['position']))
read_breakpoint_data = pd.DataFrame(read_breakpoint_pool)
read_breakpoint_data.to_csv(
os.path.join(args.output,
"temp/read_breakpoint/read_breakpoint_per_base.txt"), sep="\t")
window_read_breakpoint_data = window_break_cal(read_breakpoint_data)
window_read_breakpoint_data.to_csv(
os.path.join(args.output,
"temp/read_breakpoint/read_breakpoint_per_window.txt"), sep="\t")
def pileupfile_parse(args):
"""
process pileup file
"""
if os.path.exists(os.path.join(args.output,
"temp/pileup/pileup_feature.txt")):
return 0
if not os.path.exists(args.pileup):
if os.path.exists(os.path.join(args.output,
"temp/pileup/contigs_pipelup.out")):
args.pileup = os.path.join(args.output, "temp/pileup/contigs_pipelup.out")
else:
if not os.path.exists(args.contig):
if os.path.exists(os.path.join(args.output,
"temp/contig/filtered_contigs.fa")):
args.contig = os.path.join(args.output, "temp/contig/filtered_contigs.fa")
else:
sys.stderr.write(f"Error: Can not find assemblies:{args.contig}!\n")
sys.exit(1)
os.makedirs(os.path.join(args.output, "temp/pileup"), exist_ok=True)
pileup_command = ' '.join([args.samtools,
'mpileup -C 50 -A -f',
args.contig,
args.bam,
" | awk", "'", "$3 !=", "\"N\"", "'", ">",
os.path.join(args.output, "temp/pileup/contigs_pipelup.out")])
args.pileup = os.path.join(args.output, "temp/pileup/contigs_pipelup.out")
os.system(pileup_command)
samfile = pysam.AlignmentFile(args.bam, "rb")
contig_len = contig_pool(samfile)
prev_contig = None
pileup_dict = {"contig": [], "correct": [], "ambiguous": [], "insert": [],
"deletion": [], "disagree": [], "depth": []}
window_pileup_dict = {"contig": [], "start_pos": [], "correct_portion": [], "ambiguous_portion": [], "disagree_portion": [],
"deletion_portion": [], "insert_portion": [], "normalized_coverage": [], "normalized_deviation": [], "mean_coverage": []}
for line in open(args.pileup, "r"):
record = line.strip().split('\t')
if contig_len[record[0]] < args.mlen:
continue
if prev_contig is None:
prev_contig = record[0]
if record[0] != prev_contig:
window_data = pileup_window_cal(pileup_dict)
mean_cov = np.mean(window_data["coverage"])
window_pileup_dict["contig"].extend(window_data["contig"])
window_pileup_dict["start_pos"].extend(window_data["start_pos"])
window_pileup_dict["correct_portion"].extend(
window_data["correct_portion"])
window_pileup_dict["ambiguous_portion"].extend(
window_data["ambiguous_portion"])
window_pileup_dict["disagree_portion"].extend(
window_data["disagree_portion"])
window_pileup_dict["deletion_portion"].extend(
window_data["deletion_portion"])
window_pileup_dict["insert_portion"].extend(
window_data["insert_portion"])
window_pileup_dict["normalized_coverage"].extend(
window_data["coverage"] / mean_cov)
window_pileup_dict["normalized_deviation"].extend(
window_data["deviation"])
window_pileup_dict["mean_coverage"].extend(
[mean_cov] * len(window_data["start_pos"]))
pileup_dict = {"contig": [],
"correct": [],
"ambiguous": [],
"insert": [],
"deletion": [],
"disagree": [],
"depth": []}
prev_contig = record[0]
pileup_dict['contig'].append(record[0])
match_detail = record[4]
pileup_dict['correct'].append(match_detail.count('.') + match_detail.count(','))
pileup_dict['ambiguous'].append(match_detail.count('*'))
pileup_dict['insert'].append(match_detail.count("+"))
pileup_dict['deletion'].append(match_detail.count("-"))
pileup_dict['depth'].append(int(record[3]))
st = ''.join(re.split(r'[\+|\-][0-9]+[ATCGatcg]+', match_detail))
numd = st.count('a') + st.count('A') + st.count('t') + st.count('T') + \
st.count('c') + st.count('C') + st.count('g') + st.count('G')
pileup_dict['disagree'].append(numd)
if not os.path.exists(os.path.join(args.output, "temp/pileup")):
os.makedirs(os.path.join(args.output, "temp/pileup"), exist_ok=True)
data = pd.DataFrame(window_pileup_dict)
data.to_csv(os.path.join(args.output, "temp/pileup/pileup_feature.txt"), sep="\t")
return data
def read_cal(args, mu, dev):
if os.path.exists(os.path.join(args.output,
"temp/read_feature/read_feature.txt")):
return 0
samfile = pysam.AlignmentFile(args.bam, "rb")
references = samfile.references
lengths = samfile.lengths
read_dicts = {"contig": [], "start_pos": [], "read_count": [], "proper_read_count": [], "inversion_read_count": [],
"clipped_read_count": [], "supplementary_read_count": [], "discordant_size_count": [], "discordant_loc_count": [], "length": []}
for ref, lens in zip(references, lengths):
if lens < args.mlen:
continue
contig_reads = samfile.fetch(ref)
read_dict = window_read_cal(contig_reads, mu, dev)
read_dicts["start_pos"].extend(read_dict["start_pos"])
read_dicts["contig"].extend([ref] * len(read_dict["start_pos"]))
read_dicts["read_count"].extend(read_dict["read_count"])
read_dicts["proper_read_count"].extend(read_dict["proper_read_count"])
read_dicts["inversion_read_count"].extend(
read_dict["inversion_read_count"])
read_dicts["clipped_read_count"].extend(
read_dict["clipped_read_count"])
read_dicts["supplementary_read_count"].extend(
read_dict["supplementary_read_count"])
read_dicts["discordant_size_count"].extend(
read_dict["discordant_size_count"])
read_dicts["discordant_loc_count"].extend(
read_dict["discordant_loc_count"])
read_dicts["length"].extend([lens] * len(read_dict["start_pos"]))
data = pd.DataFrame(read_dicts)
data.to_csv(os.path.join(args.output,
"temp/read_feature/read_feature.txt"), sep="\t")
def fragment_cal(args, mu, dev):
if os.path.exists(os.path.join(args.output,
"temp/coverage/fragment_coverage.txt")):
return 0
samfile = pysam.AlignmentFile(args.bam, "rb")
references = samfile.references
lengths = samfile.lengths
frag_dict = {
"contig": [],
"start_pos": [],
"normalized_fragment_coverage": [],
"normalized_fragment_deviation": []}
for ref, lens in zip(references, lengths):
if lens < args.mlen:
continue
reads = samfile.fetch(ref)
frag_coverage = fragment_coverage_cal(reads, mu, dev, lens)
fragcov = window_frag_cal(frag_coverage)
frag_dict["contig"].extend([ref] * len(fragcov['pos']))
frag_dict["start_pos"].extend(fragcov["pos"])
frag_dict["normalized_fragment_coverage"].extend(
fragcov["coverage"] / np.mean(fragcov["coverage"]))
frag_dict["normalized_fragment_deviation"].extend(fragcov["deviation"])
data = pd.DataFrame(frag_dict)
data.to_csv(os.path.join(args.output,
"temp/coverage/fragment_coverage.txt"), sep="\t")
def KAD_cal(args):
if os.path.exists(os.path.join(args.output,
"temp/KAD/KAD_window_data.txt")):
return 0
contig_data = pd.read_csv(os.path.join(args.output,
"temp/split/contig_name.txt"), header=None)
split_data = pd.read_csv(os.path.join(args.output,
"temp/split/split_file_name.txt"), header=None)
data = pd.concat([contig_data, split_data], axis=1)
data.columns = ['contig', 'file']
data.index = data['contig']
contig_file = data.loc[:, 'file'].to_dict()
os.makedirs(os.path.join(args.output, 'temp/KAD/temp'), exist_ok=True)
os.makedirs(os.path.join(args.output, 'temp/KAD/KAD_data'), exist_ok=True)
pool = multiprocessing.Pool(processes=args.thread)
samfile = pysam.AlignmentFile(args.bam, "rb")
contig_len = contig_pool(samfile)
for contig, file in contig_file.items():
if contig_len[contig] < args.mlen:
continue
try:
t = pool.apply_async(func=KAD, args=(args, contig, file,))
except BaseException:
continue
pool.close()
pool.join()
KAD_dict = KAD_feature(args)
KAD_window_data = pd.DataFrame(KAD_dict)
KAD_window_data.to_csv(os.path.join(args.output,
"temp/KAD/KAD_window_data.txt"), sep="\t")
def main(args = None):
if args is None:
args = parseargs()
warnings.filterwarnings("ignore")
os.makedirs(os.path.join(args.output, 'temp', 'read_feature'), exist_ok=True)
os.makedirs(os.path.join(args.output, 'temp', 'coverage'), exist_ok=True)
os.makedirs(os.path.join(args.output, 'temp', 'pileup'), exist_ok=True)
os.makedirs(os.path.join(args.output, 'temp', 'read_breakpoint'), exist_ok=True)
samfile = pysam.AlignmentFile(args.bam, "rb")
size_freq = fragment_distribution(samfile)
mu, dev = FragMAD(size_freq)
pool = [multiprocessing.Process(target=read_cal, args=(args, mu, dev,)),
multiprocessing.Process(target=fragment_cal, args=(args, mu, dev,)),
multiprocessing.Process(target=pileupfile_parse, args=(args,)),
multiprocessing.Process(target=read_breakpoint_cal, args=(args,)),
multiprocessing.Process(target=split_sam, args=(args,))]
for t in pool:
t.start()
for t in pool:
t.join()
KAD_cal(args)
if __name__ == "__main__":
main()
|
leonbot.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# leonbot.py: LeonBot trial
#
# Copyright 2015 Tennessee Carmel-Veilleux <veilleux@tentech.ca>
#
from threading import Thread
import atexit
import Queue
import sys
import pygame
import time
import smbus
from Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor
from Adafruit_PWM_Servo_Driver import PWM
SERVO_HAT_I2C_ADDR = 0x41
#MODE = "joystick"
#MODE = "autonomous"
class VL6180XSuperBasicDriver(object):
SYSRANGE__START = 0x18
SYSRANGE__INTERMEASUREMENT_PERIOD = 0x1B
SYSALS__INTERMEASUREMENT_PERIOD = 0x3E
SYSALS__START = 0x38
SYSTEM__INTERRUPT_CLEAR = 0x15
RESULT__INTERRUPT_STATUS_GPIO = 0x4F
RESULT__RANGE_VAL = 0x62
RESULT__RANGE_STATUS = 0x4D
RESULT__ALS_STATUS = 0x4E
RESULT__ALS_VAL = 0x50
def __init__(self, busnum=1, i2c_addr=0x29):
self.i2c_addr = i2c_addr
self.bus = smbus.SMBus(busnum)
def get_register(self, reg_address):
a1 = (reg_address >> 8) & 0xFF
a0 = reg_address & 0xFF
self.bus.write_i2c_block_data(self.i2c_addr, a1, [a0])
data = self.bus.read_byte(self.i2c_addr)
return data
def get_register_16bit(self, reg_address):
a1 = (reg_address >> 8) & 0xFF
a0 = reg_address & 0xFF
self.bus.write_i2c_block_data(self.i2c_addr, a1, [a0])
data0 = self.bus.read_byte(self.i2c_addr)
data1 = self.bus.read_byte(self.i2c_addr)
return (data0 << 8) | (data1 & 0xFF)
def set_register(self, reg_address, data):
a1 = (reg_address >> 8) & 0xFF
a0 = reg_address & 0xFF
self.bus.write_i2c_block_data(self.i2c_addr, a1, [a0, (data & 0xFF)])
def set_register_16bit(self, reg_address, data):
a1 = (reg_address >> 8) & 0xFF
a0 = reg_address & 0xFF
d1 = (data >> 8) & 0xFF
d0 = data & 0xFF
self.bus.write_i2c_block_data(self.i2c_addr, a1, [a0, d1, d0])
def start_ranging(self, meas_period_ms=100, continuous=False):
self.set_register(self.SYSRANGE__INTERMEASUREMENT_PERIOD, (meas_period_ms/10))
mode = ((1 if continuous else 0) * 2) | 1
self.set_register(self.SYSRANGE__START, 0)
time.sleep(0.1)
self.set_register(self.SYSTEM__INTERRUPT_CLEAR, 0xFF)
self.set_register(self.SYSRANGE__START, mode)
def read_range_mm(self):
return self.get_register(self.RESULT__RANGE_VAL)
class ControlThread(object):
def __init__(self, main_motor_hat, params=None):
# Motor hat for locomotion
self.main_motor_hat = main_motor_hat
self.motors = {}
self.motors["b_left"] = {"motor" : self.main_motor_hat.getMotor(1), "target" : 0.0, "scaler" : 1.0}
self.motors["f_left"] = {"motor" : self.main_motor_hat.getMotor(2), "target" : 0.0, "scaler" : -1.0}
self.motors["b_right"] = {"motor" : self.main_motor_hat.getMotor(3), "target" : 0.0, "scaler" : -1.0}
self.motors["f_right"] = {"motor" : self.main_motor_hat.getMotor(4), "target" : 0.0, "scaler" : 1.0}
self.params = params if params is not None else {}
self.servo_controller = PWM(SERVO_HAT_I2C_ADDR)
self.servo_pwm_freq_hz = self.params.get("servo_pwm_freq_hz", 48)
self.servo_max_angle_deg = self.params.get("servo_max_angle_deg", 45.0)
self.servo_max_angle_us = self.params.get("servo_max_angle_us", 400)
self.servo_neutral_us = self.params.get("servo_neutral_us", 1520)
# Correction factor to apply to each duration to match the
# internal oscillator of the PCA9685 on the Servo HAT. The internal
# RC clock is supposed to be 25MHz, but it can be off
self.servo_clock_k = self.params.get("servo_clock_k", 1.073446)
self.servo_controller.setPWMFreq(self.servo_pwm_freq_hz)
# Queue for input events
self.input_queue = Queue.Queue(1)
self.thread = Thread(target=self.process)
def get_input_queue(self):
return self.input_queue
def set_motor(self, motor, target):
true_target = target * motor["scaler"]
if motor["target"] == 0.0:
if true_target > 0.0:
motor["motor"].run(Adafruit_MotorHAT.FORWARD)
motor["motor"].setSpeed(int(abs(true_target) * 255))
elif true_target < 0.0:
motor["motor"].run(Adafruit_MotorHAT.BACKWARD)
motor["motor"].setSpeed(int(abs(true_target) * 255))
elif motor["target"] >= 0.0:
if true_target > 0.0:
motor["motor"].setSpeed(int(abs(true_target) * 255))
elif true_target < 0.0:
motor["motor"].run(Adafruit_MotorHAT.BACKWARD)
motor["motor"].setSpeed(int(abs(true_target) * 255))
else:
motor["motor"].run(Adafruit_MotorHAT.RELEASE)
motor["motor"].setSpeed(0)
elif motor["target"] < 0:
if true_target > 0.0:
motor["motor"].run(Adafruit_MotorHAT.FORWARD)
motor["motor"].setSpeed(int(abs(true_target) * 255))
elif true_target < 0.0:
motor["motor"].setSpeed(int(abs(true_target) * 255))
else:
motor["motor"].run(Adafruit_MotorHAT.RELEASE)
motor["motor"].setSpeed(0)
motor["target"] = true_target
def set_servo_pulse(self, channel, angle_deg):
pulse_len_us = float(1e6) # 1,000,000 us per second at 1Hz
pulse_len_us /= float(self.servo_pwm_freq_hz) # us per pulse
duration_us = self.servo_clock_k * (self.servo_neutral_us + ((float(angle_deg) / float(self.servo_max_angle_deg)) * float(self.servo_max_angle_us)))
duration_counts = (duration_us / pulse_len_us) * 4095
#print "pulse_len_us: %.3f, duration_us=%.3f" % (pulse_len_us, duration_us)
self.servo_controller.setPWM(channel, 0, int(duration_counts))
def process(self):
running = True
while running:
event = self.input_queue.get()
if "quit" in event:
running = False
for key, motor in self.motors.items():
motor["motor"].run(Adafruit_MotorHAT.RELEASE)
motor["motor"].setSpeed(0)
elif "left_y" in event:
self.set_motor(self.motors["f_left"], event["left_y"])
self.set_motor(self.motors["b_left"], event["left_y"])
self.set_motor(self.motors["f_right"], event["right_y"])
self.set_motor(self.motors["b_right"], event["right_y"])
elif "servo_chan" in event:
servo_chan = event["servo_chan"]
angle_deg = event["angle_deg"]
if angle_deg > self.servo_max_angle_deg:
angle_deg = self.servo_max_angle_deg
elif angle_deg < -self.servo_max_angle_deg:
angle_deg = -self.servo_max_angle_deg
self.set_servo_pulse(servo_chan, angle_deg)
def start(self):
self.thread.start()
class InputThread(object):
def __init__(self):
self.thread = Thread(target=self.process)
self.thread.daemon = False
self.joystick = pygame.joystick.Joystick(0)
self.joystick.init()
self.keep_alive = True
self.listeners = []
self.left_y_axis_idx = 1
#self.right_y_axis_idx = 2
self.right_y_axis_idx = 3
self.quit_button_idx = 8
self.range_less_button_idx = 0
self.elev_less_button_idx = 1
self.range_more_button_idx = 2
self.elev_more_button_idx = 3
self.left_y_axis_multiplier = -1.0
self.right_y_axis_multiplier = -1.0
self.elev_servo_angle = 0.0
self.range_servo_angle = 0.0
self.current_range_axis = 0.0
self.current_elev_axis = 0.0
self.current_left_y = 0.0
self.current_right_y = 0.0
def process(self):
axes = [ 0.0 ] * self.joystick.get_numaxes()
buttons = [ False ] * self.joystick.get_numbuttons()
last_time = 0
interval = 0.1
# Set an interval to kick the event loop to get latest value of axes
pygame.time.set_timer(pygame.USEREVENT + 1, int((interval / 2.0) * 1000))
old_buttons = []
while self.keep_alive:
event = pygame.event.wait()
if event.type == pygame.QUIT:
self.keep_alive = False
self.dispatch(None, None)
elif event.type == pygame.JOYAXISMOTION:
e = event.dict
axes[e['axis']] = e['value']
elif event.type in [pygame.JOYBUTTONUP, pygame.JOYBUTTONDOWN ]:
e = event.dict
buttons[e['button']] ^= True
if buttons[self.quit_button_idx]:
self.dispatch(None, None)
self.keep_alive = False
# Employ time-based publishing, due to delayed system response causing queue filling-up
if ((time.time() - last_time) > interval) or sum(buttons) != sum(old_buttons):
old_buttons = buttons[:]
self.dispatch(axes, buttons)
last_time = time.time()
def send_dispath_update(self, dispatch_update):
print dispatch_update
for listener in self.listeners:
listener.put(dispatch_update)
def dispatch(self, axes, buttons):
if axes is not None:
left_y = axes[self.left_y_axis_idx] * self.left_y_axis_multiplier
right_y = axes[self.right_y_axis_idx] * self.right_y_axis_multiplier
if buttons[self.range_less_button_idx]:
range_axis = 1
elif buttons[self.range_more_button_idx]:
range_axis = -1
else:
range_axis = 0
if buttons[self.elev_less_button_idx]:
elev_axis = 1
elif buttons[self.elev_more_button_idx]:
elev_axis = -1
else:
elev_axis = 0
if left_y != self.current_left_y or right_y != self.current_right_y:
self.current_left_y = left_y
self.current_right_y = right_y
dispatch_update = {"left_y": left_y, "right_y": right_y}
self.send_dispath_update(dispatch_update)
elif range_axis != self.current_range_axis or elev_axis != self.current_elev_axis:
if range_axis != 0:
self.range_servo_angle += 5.0 if range_axis > 0 else -5.0
dispatch_update = {"servo_chan": 0, "angle_deg": self.range_servo_angle}
self.send_dispath_update(dispatch_update)
if elev_axis != 0:
self.elev_servo_angle += 5.0 if elev_axis > 0 else -5.0
dispatch_update = {"servo_chan": 1, "angle_deg": self.elev_servo_angle}
self.send_dispath_update(dispatch_update)
else:
dispatch_update = {"quit": True}
self.send_dispath_update(dispatch_update)
def add_listener(self, listener):
self.listeners.append(listener)
def start(self):
self.thread.start()
class AutonomousModeController(object):
def __init__(self, motor_controller, params):
# Range sensor interface
self.vl6180 = VL6180XSuperBasicDriver()
self.vl6180.start_ranging(100, True)
self.servo_controller = PWM(SERVO_HAT_I2C_ADDR)
self.motor_controller = motor_controller
self.obstacle_thresh_mm = params.get("obstacle_thresh_mm", 150.0)
self.forward_speed_percent = params.get("forward_speed_percent", 50.0)
self.reverse_speed_percent = params.get("reverse_speed_percent", 40.0)
self.rotation_speed_percent = params.get("rotation_speed_percent", 60.0)
self.rotation_duration_sec = params.get("rotation_duration_sec", 2.0)
self.reverse_duration_sec = params.get("reverse_duration_sec", 1.0)
self.start_time = time.time()
self.servo_start_time = time.time()
self.servo_pos_idx = 0
self.servo_pos_deg = [-10.0, 0.0, 10.0, 0.0]
#self.servo_pos_deg = [0.0]
self.servo_interval_sec = 0.4
self.state = "judge_obstacle"
self.thread = Thread(target=self.process, name="AutonomousModeController")
self.running = True
def set_state(self, new_state):
print "%s->%s" % (self.state, new_state)
self.state = new_state
def _handle_servo(self):
# Early return if not ready to change servo position.
if (time.time() - self.servo_start_time) < self.servo_interval_sec:
return
# Get next servo position
self.servo_pos_idx += 1
if self.servo_pos_idx >= len(self.servo_pos_deg):
self.servo_pos_idx = 0
servo_pos_deg = self.servo_pos_deg[self.servo_pos_idx]
dispatch_update = {"servo_chan": 0, "angle_deg": servo_pos_deg}
self.motor_controller.put(dispatch_update, block=True)
self.servo_start_time = time.time()
def process(self):
while self.running:
# Update scanning servo position
self._handle_servo()
if self.state == "judge_obstacle":
range_mm = self.vl6180.read_range_mm()
#print "judge_obstacle, range=%d" % range_mm
if range_mm < 20.0:
self.motor_controller.put({"quit"})
self.running = False
if range_mm < self.obstacle_thresh_mm:
# Saw obstacle, move to reverse
self.set_state("evade_reverse")
self.start_time = time.time()
else:
# Forward if no obstacle
forward_speed = self.forward_speed_percent / 100.0
dispatch_update = {"left_y": forward_speed, "right_y": forward_speed}
self.motor_controller.put(dispatch_update, block=True)
elif self.state == "evade_reverse":
if (time.time() - self.start_time) >= self.reverse_duration_sec:
# If we have finished backing away, go to rotate
self.set_state("evade_rotate")
self.start_time = time.time()
else:
# Reverse while evading
reverse_speed = -self.reverse_speed_percent / 100.0
dispatch_update = {"left_y": reverse_speed, "right_y": reverse_speed}
self.motor_controller.put(dispatch_update, block=True)
elif self.state == "evade_rotate":
# Check for being done
if (time.time() - self.start_time) >= self.rotation_duration_sec:
# If we have finished backing away, go to rotate
self.set_state("judge_obstacle")
self.start_time = time.time()
else:
rotate_speed = self.rotation_speed_percent / 100.0
# FIXME: Always rotating right
dispatch_update = {"left_y": rotate_speed, "right_y": -rotate_speed}
self.motor_controller.put(dispatch_update, block=True)
else:
print "INVALID STATE: %s" % self.state
self.state = "judge_obstacle"
def start(self):
self.thread.start()
def join(self):
self.thread.join()
def turnOffMotors():
global mh
mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(3).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(4).run(Adafruit_MotorHAT.RELEASE)
def main():
print "Starting..."
# create a default object, no changes to I2C address or frequency
global mh
mh = Adafruit_MotorHAT(addr=0x62)
# recommended for auto-disabling motors on shutdown!
atexit.register(turnOffMotors)
# Get mode from command line
if len(sys.argv) >= 2 and sys.argv[1].lower() == "joystick":
MODE = "joystick"
else:
MODE = "autonomous"
if MODE == "joystick":
pygame.init()
pygame.joystick.init()
#pygame.display.set_mode((1,1))
print "Initialized"
control_thread = ControlThread(mh)
control_thread.start()
if MODE == "joystick":
input_thread = InputThread()
input_thread.add_listener(control_thread.get_input_queue())
input_thread.start()
print "Threads started"
elif MODE == "autonomous":
autonomous_controller = AutonomousModeController(control_thread.get_input_queue(), {})
autonomous_controller.start()
print "Threads started"
autonomous_controller.join()
return 0
if __name__ == '__main__':
try:
print "Sleeping"
time.sleep(1)
print "Waking"
main()
print "Done"
except KeyboardInterrupt:
turnOffMotors()
sys.exit(1)
|
environmental_kernel.py
|
import numpy as np
from libmatch.chemical_kernel import Atoms2ChemicalKernelmat
from libmatch.soap import get_Soaps
from libmatch.utils import chunk_list, chunks1d_2_chuncks2d,is_notebook,dummy_queue
from soap import get_Soaps
import multiprocessing as mp
import signal, psutil, os
import threading
import quippy as qp
if is_notebook():
from tqdm import tqdm_notebook as tqdm_cs
else:
from tqdm import tqdm as tqdm_cs
try:
import numba as nb
nonumba = False
signatureEnv = 'void(double[:, :], uint32[:, :], double[:, :, :], ' \
'uint32[:, :], double[:, :, :], double[:, :])'
@nb.njit(signatureEnv,parallel=True)
def nb_frameprod_upper(result, keys1, vals1, keys2, vals2, chemicalKernelmat):
'''
Computes the environmental matrix between two AlchemyFrame. Only the upper
chemical channels are actually computed. To be compiled with numba.
:param result: np.array. output
:param keys1: np.array 2D. list of keys->(species1,species2), i.e. chemical channels, of AlchemyFrame1.
:param vals1: np.array 3D. [environment center, chemical channel, soap vector].
:param keys2: np.array 2D. list of keys->(species1,species2), i.e. chemical channels, of AlchemyFrame2.
:param vals2: np.array 3D. [environment center, chemical channel, soap vector].
:param chemicalKernelmat: np.array 2D.
:return: None. result is changed by 'reference'.
'''
Nenv1, nA, nL = vals1.shape
Nenv2, nB, nL = vals2.shape
for it in nb.prange(Nenv1):
for jt in nb.prange(Nenv2):
EnvironmentalSimilarity = 0.
for nt in range(nA):
spA = keys1[nt, :]
for mt in range(nB):
spB = keys2[mt, :]
theta1 = chemicalKernelmat[spA[0], spB[0]] * chemicalKernelmat[spA[1], spB[1]]
theta2 = chemicalKernelmat[spA[1], spB[0]] * chemicalKernelmat[spA[0], spB[1]]
if theta1 == 0. and theta2 == 0.:
continue
pp = 0.
for kt in range(nL):
pp += vals1[it, nt, kt] * vals2[jt, mt, kt]
# the symmetry of the chemicalKernel and chemical soap vector is a bit messy
if spA[0] != spA[1] and spB[0] != spB[1]:
EnvironmentalSimilarity += theta1 * pp * 2 + theta2 * pp * 2
elif (spA[0] == spA[1] and spB[0] != spB[1]) or (spA[0] != spA[1] and spB[0] == spB[1]):
EnvironmentalSimilarity += theta1 * pp + theta2 * pp
elif spA[0] == spA[1] and spB[0] == spB[1]:
EnvironmentalSimilarity += theta1 * pp
result[it, jt] = EnvironmentalSimilarity
@nb.njit(signatureEnv,parallel=True)
def nb_frameprod_upper_delta(result, keys1, vals1, keys2, vals2, chemicalKernelmat):
Nenv1, nA, nL = vals1.shape
Nenv2, nB, nL = vals2.shape
mm = np.zeros((nA,), np.int32)
union = np.zeros((nA, 2), np.int32)
for it in nb.prange(nA):
isUnion = False
for jt in range(nB):
if keys1[it][0] == keys2[jt][0] and keys1[it][1] == keys2[jt][1]:
mm[it] = jt
isUnion = True
continue
if isUnion is True:
union[it][0] = keys1[it][0]
union[it][1] = keys1[it][1]
for it in nb.prange(Nenv1):
for jt in nb.prange(Nenv2):
EnvironmentalSimilarity = 0.
for nt in range(nA):
if union[nt, 0] == 0 and union[nt, 1] == 0:
continue
pp = 0.
for kt in range(nL):
pp += vals1[it, nt, kt] * vals2[jt, mm[nt], kt]
if union[nt, 0] == union[nt, 1]:
EnvironmentalSimilarity += pp
else:
EnvironmentalSimilarity += pp * 2
result[it, jt] = EnvironmentalSimilarity
except:
nonumba = True
def framesprod(frames1, frames2=None, chemicalKernelmat=None, frameprodFunc=None, queue=None,dispbar=False):
'''
Computes the environmental matrices between two list of AlchemyFrame.
:param frames1: list of AlchemyFrame.
:param frames2: list of AlchemyFrame.
:param chemicalKernelmat:
:param frameprodFunc: function to use to compute a environmental kernel matrix
:return: dictionary of environmental kernel matrices -> (i,j):environmentalMatrix(frames1[i],frames2[j])
'''
if queue is None:
if frames2 is None:
Niter = len(frames1)*(len(frames1)+1)/2
else:
Niter = len(frames1)*len(frames2)
queue = dummy_queue(Niter,'Env kernels',dispbar=dispbar)
envkernels = {}
if frames2 is None:
# when with itself only the upper global matrix is computed
frames2 = frames1
for it, frame1 in enumerate(frames1):
keys1, vals1 = frame1.get_arrays()
# ii = 0
for jt, frame2 in enumerate(frames2):
if it > jt:
continue
keys2, vals2 = frame2.get_arrays()
kargs = {'keys1': keys1, 'keys2': keys2, 'vals1': vals1, 'vals2': vals2,
'chemicalKernelmat': chemicalKernelmat}
envkernels[(it, jt)] = frameprodFunc(**kargs)
# ii += 1
queue.put(1)
else:
for it, frame1 in enumerate(frames1):
keys1, vals1 = frame1.get_arrays()
# ii = 0
for jt, frame2 in enumerate(frames2):
keys2, vals2 = frame2.get_arrays()
kargs = {'keys1': keys1, 'keys2': keys2, 'vals1': vals1, 'vals2': vals2,
'chemicalKernelmat': chemicalKernelmat}
envkernels[(it, jt)] = frameprodFunc(**kargs)
# ii += 1
queue.put(1)
return envkernels
def nb_frameprod_upper_multithread(**kargs):
Nenv1, nA, nL = kargs['vals1'].shape
Nenv2, nB, nL = kargs['vals2'].shape
result = np.zeros((Nenv1, Nenv2), dtype=np.float64)
keys1, keys2, vals1, vals2, chemicalKernelmat = [kargs['keys1'], kargs['keys2'], kargs['vals1'], kargs['vals2'], \
kargs['chemicalKernelmat']]
numthreadsTot2nthreads = {2: (2, 1), 4: (2, 2), 6: (3, 2), 9: (3, 3),
12: (4, 3), 16: (4, 4), 25: (5, 5), 36: (6, 6),
48: (7, 7), 64: (8,8), 81: (9,9), 100: (10,10)}
numthreads1, numthreads2 = numthreadsTot2nthreads[4]
chunks1, slices1 = chunk_list(vals1, numthreads1)
chunks2, slices2 = chunk_list(vals2, numthreads2)
chunks = []
for it in range(numthreads1):
for jt in range(numthreads2):
chunks3 = result[slices1[it][0]:slices1[it][-1] + 1, slices2[jt][0]:slices2[jt][-1] + 1]
a = {'result': chunks3, 'chemicalKernelmat': chemicalKernelmat.copy(), 'keys1': keys1.copy(), 'keys2': keys2.copy()}
a.update(**{'vals1': chunks1[it]})
a.update(**{'vals2': chunks2[jt]})
chunks.append(a)
threads = [threading.Thread(target=nb_frameprod_upper, kwargs=chunk) for chunk in chunks]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return result
def nb_frameprod_upper_singlethread(**kargs):
Nenv1, nA, nL = kargs['vals1'].shape
Nenv2, nB, nL = kargs['vals2'].shape
result = np.empty((Nenv1, Nenv2), dtype=np.float64)
nb_frameprod_upper(result, **kargs)
return result
def nb_frameprod_upper_delta_singlethread(**kargs):
Nenv1, nA, nL = kargs['vals1'].shape
Nenv2, nB, nL = kargs['vals2'].shape
result = np.empty((Nenv1, Nenv2), dtype=np.float64)
nb_frameprod_upper_delta(result, **kargs)
return result
def nb_frameprod_upper_delta_multithread(**kargs):
Nenv1, nA, nL = kargs['vals1'].shape
Nenv2, nB, nL = kargs['vals2'].shape
result = np.zeros((Nenv1, Nenv2), dtype=np.float64)
keys1, keys2, vals1, vals2, chemicalKernelmat = [kargs['keys1'], kargs['keys2'], kargs['vals1'], kargs['vals2'], \
kargs['chemicalKernelmat']]
numthreadsTot2nthreads = {2: (2, 1), 4: (2, 2), 6: (3, 2), 9: (3, 3),
12: (4, 3), 16: (4, 4), 25: (5, 5), 36: (6, 6),
48: (7, 7), 64: (8,8), 81: (9,9), 100: (10,10)}
numthreads1, numthreads2 = numthreadsTot2nthreads[4]
chunks1, slices1 = chunk_list(vals1, numthreads1)
chunks2, slices2 = chunk_list(vals2, numthreads2)
chunks = []
for it in range(numthreads1):
for jt in range(numthreads2):
chunks3 = result[slices1[it][0]:slices1[it][-1] + 1, slices2[jt][0]:slices2[jt][-1] + 1]
a = {'result': chunks3, 'chemicalKernelmat': chemicalKernelmat.copy(), 'keys1': keys1.copy(), 'keys2': keys2.copy()}
a.update(**{'vals1': chunks1[it]})
a.update(**{'vals2': chunks2[jt]})
chunks.append(a)
threads = [threading.Thread(target=nb_frameprod_upper_delta, kwargs=chunk) for chunk in chunks]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return result
def np_frameprod_upper(keys1, vals1, keys2, vals2, chemicalKernelmat):
'''
Computes the environmental matrix between two AlchemyFrame. Simplest implementation, very slow.
Only the upperchemical channels are actually computed.
:param keys1: np.array 2D. list of keys->(species1,species2), i.e. chemical channels, of AlchemyFrame1.
:param vals1: np.array 3D. [environment center, chemical channel, soap vector].
:param keys2: np.array 2D. list of keys->(species1,species2), i.e. chemical channels, of AlchemyFrame2.
:param vals2: np.array 3D. [environment center, chemical channel, soap vector].
:param chemicalKernelmat: np.array 2D.
:return: np.array 2D. Environmental matrix.
'''
nenv1, Na, Nsoap = vals1.shape
nenv2, Nb, Nsoap = vals2.shape
k = np.zeros((nenv1, nenv2))
for it in range(nenv1):
for jt in range(nenv2):
similarity = 0.
for nt, spA in enumerate(keys1):
for mt, spB in enumerate(keys2):
theta1 = chemicalKernelmat[spA[0], spB[0]] * chemicalKernelmat[spA[1], spB[1]]
theta2 = chemicalKernelmat[spA[1], spB[0]] * chemicalKernelmat[spA[0], spB[1]]
if theta1 == 0. and theta2 == 0.:
continue
pp = np.dot(vals1[it, nt, :], vals2[jt, mt, :])
# the symmetry of the chemicalKernel and chemical soap vector is a bit messy
if spA[0] != spA[1] and spB[0] != spB[1]:
similarity += (theta1 + theta2) * pp * 2
elif (spA[0] == spA[1] and spB[0] != spB[1]) or (spA[0] != spA[1] and spB[0] == spB[1]):
similarity += (theta1 + theta2) * pp
elif spA[0] == spA[1] and spB[0] == spB[1]:
similarity += theta1 * pp
k[it, jt] = similarity
return k
def np_frameprod3_upper(keys1, vals1, keys2, vals2, chemicalKernelmat):
'''
Computes the environmental matrix between two AlchemyFrame. einsum implementaion, ~slow.
Only the upperchemical channels are actually computed.
:param keys1: np.array 2D. list of keys->(species1,species2), i.e. chemical channels, of AlchemyFrame1.
:param vals1: np.array 3D. [environment center, chemical channel, soap vector].
:param keys2: np.array 2D. list of keys->(species1,species2), i.e. chemical channels, of AlchemyFrame2.
:param vals2: np.array 3D. [environment center, chemical channel, soap vector].
:param chemicalKernelmat: np.array 2D.
:return: np.array 2D. Environmental matrix.
'''
Nenv1, Na, Nsoap = vals1.shape
Nenv2, Nb, Nsoap = vals2.shape
theta = np.zeros((Na, Nb))
for nt, spA in enumerate(keys1):
for mt, spB in enumerate(keys2):
theta1 = chemicalKernelmat[spA[0], spB[0]] * chemicalKernelmat[spA[1], spB[1]]
theta2 = chemicalKernelmat[spA[1], spB[0]] * chemicalKernelmat[spA[0], spB[1]]
if theta1 == 0. and theta2 == 0.:
continue
# the symmetry of the chemicalKernel and chemical soap vector is a bit messy
if spA[0] != spA[1] and spB[0] != spB[1]:
theta[nt, mt] = theta1 * 2 + theta2 * 2
elif (spA[0] == spA[1] and spB[0] != spB[1]) or (spA[0] != spA[1] and spB[0] == spB[1]):
theta[nt, mt] = theta1 + theta2
elif spA[0] == spA[1] and spB[0] == spB[1]:
theta[nt, mt] = theta1
k = np.einsum('kl,iko,jlo->ij', theta, vals1, vals2, optimize=True)
return k
def choose_envKernel_func(nthreads=4, isDeltaKernel=False,verbose=False):
'''
Compile with numba the nb_frameprod_upper function.
:param isDeltaKernel:
:param nthreads: int. Number of threads each of which computes a block of the environmental matrix
:return: Compiled inner_func_nbupper function with threads
'''
if nonumba:
print('Using numpy version of envKernel function')
get_envKernel = np_frameprod3_upper
else:
if verbose:
print('Using compiled and threaded version of envKernel function')
# if nthreads == 1:
if verbose:
print('1 threaded calc')
if isDeltaKernel:
if verbose:
print('with implicit delta kernel function')
get_envKernel = nb_frameprod_upper_delta_singlethread
else:
if verbose:
print('with explicit delta kernel function')
get_envKernel = nb_frameprod_upper_singlethread
# TODO understand why it does not work well now
# elif nthreads in [2,4,6,9,12,16,25,36,48,64,81,100]:
# if verbose:
# print('{:.0f} threaded calc'.format(nthreads))
# if isDeltaKernel:
# if verbose:
# print 'with implicit delta kernel function'
# get_envKernel = nb_frameprod_upper_delta_multithread
# else:
# get_envKernel = nb_frameprod_upper_multithread
# else:
# print('Unsuported nthreads number\n 1 threaded calc')
# get_envKernel = nb_frameprod_upper_singlethread
return get_envKernel
def framesprod_wrapper(kargs):
keys = kargs.keys()
get_envKernel = kargs.pop('frameprodFunc')
queue = kargs.pop('queue')
# to disable the progressbar
dispbar = kargs.pop('dispbar')
if 'fpointers1' in keys:
fpointers1 = kargs.pop('fpointers1')
fpointers2 = kargs.pop('fpointers2')
atoms1 = [qp.Atoms(fpointer=fpointer1) for fpointer1 in fpointers1]
chemicalKernelmat = kargs.pop('chemicalKernelmat')
frames1 = get_Soaps(atoms1,dispbar=dispbar, **kargs)
if fpointers2 is not None:
atoms2 = [qp.Atoms(fpointer=fpointer2) for fpointer2 in fpointers2]
frames2 = get_Soaps(atoms2,dispbar=dispbar, **kargs)
else:
frames2 = None
kargs = {'frames1': frames1, 'frames2': frames2,
'chemicalKernelmat': chemicalKernelmat}
elif 'atoms1' in keys:
atoms1 = kargs.pop('atoms1')
atoms2 = kargs.pop('atoms2')
chemicalKernelmat = kargs.pop('chemicalKernelmat')
frames1 = get_Soaps(atoms1,dispbar=dispbar, **kargs)
if atoms2 is not None:
frames2 = get_Soaps(atoms2,dispbar=dispbar, **kargs)
else:
frames2 = None
kargs = {'frames1': frames1, 'frames2': frames2, 'chemicalKernelmat': chemicalKernelmat}
return framesprod(queue=queue,frameprodFunc=get_envKernel,**kargs)
# class mp_framesprod(object):
# def __init__(self, chunks, nprocess, nthreads):
#
# self.nprocess = nprocess
# self.nthreads = nthreads
# self.func = framesprod_wrapper
# # get the function to compute an environmental kernel
# self.get_envKernel = choose_envKernel_func(nthreads)
# # add the frameprodFunc to the input chunks
# for chunk in chunks:
# chunk.update(**{'frameprodFunc': self.get_envKernel})
# self.chunks = chunks
#
# def run(self):
# pool = mp.Pool(self.nprocess)
# results = pool.map(self.func, self.chunks)
#
# pool.close()
# pool.join()
#
# return results
class mp_framesprod(object):
def __init__(self, chunks, nprocess, nthreads, Niter,isDeltaKernel,dispbar=False):
super(mp_framesprod, self).__init__()
self.func = framesprod_wrapper
self.parent_id = os.getpid()
self.get_envKernel = choose_envKernel_func(nthreads,isDeltaKernel)
self.nprocess = nprocess
self.nthreads = nthreads
self.dispbar = dispbar
manager = mp.Manager()
self.queue = manager.Queue()
for chunk in chunks:
chunk.update(**{"queue": self.queue,'frameprodFunc': self.get_envKernel,
'dispbar':self.dispbar})
self.chunks = chunks
self.thread = threading.Thread(target=self.listener, args=(self.queue, Niter,dispbar))
self.thread.start()
self.pool = mp.Pool(nprocess, initializer=self.worker_init,maxtasksperchild=1)
def run(self):
res = self.pool.map(self.func, self.chunks)
self.pool.close()
self.pool.join()
self.queue.put(None)
self.thread.join()
return res
@staticmethod
def listener(queue, Niter,dispbar):
print('listener ',dispbar)
tbar = tqdm_cs(total=int(Niter),desc='Env kernels',disable=dispbar)
for ii in iter(queue.get, None):
tbar.update(ii)
tbar.close()
# clean kill of the pool in interactive sessions
def worker_init(self):
def sig_int(signal_num, frame):
print('signal: %s' % signal_num)
parent = psutil.Process(self.parent_id)
for child in parent.children():
if child.pid != os.getpid():
print("killing child: %s" % child.pid)
child.kill()
print("killing parent: %s" % self.parent_id)
parent.kill()
print("suicide: %s" % os.getpid())
psutil.Process(os.getpid()).kill()
signal.signal(signal.SIGINT, sig_int)
def join_envKernel(results, slices,slices_1=None):
if slices_1 is None:
slices_1 = slices
diag = True
rr = list(set([it for sl in slices for it in sl]))
joined_results = {(it, jt): None for it in rr for jt in rr if jt >= it}
else:
diag = False
rr1 = list(set([it for sl in slices for it in sl]))
rr2 = list(set([it for sl in slices_1 for it in sl]))
joined_results = {(it, jt): None for it in rr1 for jt in rr2}
iii = 0
for nt, sl1 in enumerate(slices):
for mt, sl2 in enumerate(slices_1):
if diag is True:
if nt > mt:
continue
if np.all(sl1 == sl2):
for it, s1 in enumerate(sl1):
for jt, s2 in enumerate(sl2):
if s1 > s2:
continue
try:
joined_results[(s1, s2)] = results[iii][(it, jt)]
except:
print(s1, s2, it, jt)
else:
for it, s1 in enumerate(sl1):
for jt, s2 in enumerate(sl2):
try:
joined_results[(s1, s2)] = results[iii][(it, jt)]
except:
print(s1, s2, it, jt)
else:
for it, s1 in enumerate(sl1):
for jt, s2 in enumerate(sl2):
joined_results[(s1, s2)] = results[iii][(it, jt)]
iii += 1
return joined_results
def get_environmentalKernels_mt_mp_chunks(atoms, nocenters=None, chem_channels=True, centerweight=1.0,
gaussian_width=0.5, cutoff=3.5,cutoff_transition_width=0.5,
nmax=8, lmax=6, chemicalKernelmat=None, chemicalKernel=None,
chemicalProjection=None,
nthreads=4, nprocess=2, nchunks=2,islow_memory=False,isDeltaKernel=True,
dispbar=False,is_fast_average=False):
if nocenters is None:
nocenters = []
# Builds the kernel matrix from the species present in the frames and a specified chemical
# kernel function
if chemicalKernelmat is not None:
pass
elif chemicalProjection is not None:
pass
elif (chemicalKernelmat is None) and (chemicalKernel is not None):
chemicalKernelmat = Atoms2ChemicalKernelmat(atoms, chemicalKernel=chemicalKernel)
else:
raise ValueError('wrong chemicalKernelmat and/or chemicalKernel input')
Natoms = len(atoms)
NenvKernels = Natoms * (Natoms + 1) / 2.
# fpointers = [frame._fpointer.copy() for frame in atoms]
# chunks1d, slices = chunk_list(fpointers, nchunks=nchunks)
# cut atomsList in chunks
if islow_memory:
frames = get_Soaps(atoms, nocenters=nocenters, chem_channels=chem_channels, centerweight=centerweight,
gaussian_width=gaussian_width, cutoff=cutoff,is_fast_average=is_fast_average,
chemicalProjection=chemicalProjection,
cutoff_transition_width=cutoff_transition_width, nmax=nmax, lmax=lmax, nprocess=nprocess)
chunks1d, slices = chunk_list(frames, nchunks=nchunks)
else:
chunks1d, slices = chunk_list(atoms, nchunks=nchunks)
soap_params = {'centerweight': centerweight, 'gaussian_width': gaussian_width,
'cutoff': cutoff, 'cutoff_transition_width': cutoff_transition_width,
'nmax': nmax, 'lmax': lmax, 'chemicalKernelmat': chemicalKernelmat,
'chemicalProjection':chemicalProjection,
'chem_channels': chem_channels, 'nocenters': nocenters, 'is_fast_average':is_fast_average,
}
# create inputs for each block of the global kernel matrix
chunks = chunks1d_2_chuncks2d(chunks1d, **soap_params)
# new_atoms1 = {}
# new_atoms2 = {}
# for it,chunk in enumerate(chunks):
# atoms1 = chunk.pop('atoms1')
# atoms2 = chunk.pop('atoms2')
# # new_atoms1[it] = [qp.Atoms().copy_from(frame) for frame in atoms1]
# new_atoms1[it] = [frame.copy() for frame in atoms1]
# fpointers1 = [frame._fpointer.copy() for frame in new_atoms1[it]]
# if atoms2 is not None:
# # new_atoms2[it] = [qp.Atoms().copy_from(frame) for frame in atoms2]
# new_atoms2[it] = [frame.copy() for frame in atoms2]
# fpointers2 = [frame._fpointer.copy() for frame in new_atoms2[it]]
# else:
# fpointers2 = None
#
# chunk.update(**{'fpointers1':fpointers1,'fpointers2':fpointers2})
# get a list of environemental kernels
pool = mp_framesprod(chunks, nprocess, nthreads, NenvKernels,
isDeltaKernel=isDeltaKernel,dispbar=dispbar)
results = pool.run()
# reorder the list of environemental kernels into a dictionary which keys are the (i,j) of the global kernel matrix
environmentalKernels = join_envKernel(results, slices)
return environmentalKernels
def get_environmentalKernels_singleprocess(atoms, nocenters=None, chem_channels=True, centerweight=1.0,
gaussian_width=0.5, cutoff=3.5, cutoff_transition_width=0.5,
nmax=8, lmax=6, chemicalKernelmat=None, chemicalKernel=None,
chemicalProjection=None,
nthreads=4, nprocess=0, nchunks=0,isDeltaKernel=True,
dispbar=False,is_fast_average=False):
if nocenters is None:
nocenters = []
# Chooses the function to use to compute the kernel between two frames
get_envKernel = choose_envKernel_func(nthreads,isDeltaKernel)
# Builds the kernel matrix from the species present in the frames and a specified chemical
# kernel function
if chemicalKernelmat is not None:
pass
elif chemicalProjection is not None:
pass
elif chemicalKernelmat is None and chemicalKernel is not None:
chemicalKernelmat = Atoms2ChemicalKernelmat(atoms, chemicalKernel=chemicalKernel)
else:
raise ValueError('wrong chemicalKernelmat and/or chemicalKernel input')
# get the soap for every local environement
frames = get_Soaps(atoms, nocenters=nocenters, chem_channels=chem_channels, centerweight=centerweight,
gaussian_width=gaussian_width, cutoff=cutoff, cutoff_transition_width=cutoff_transition_width,
nmax=nmax, lmax=lmax, nprocess=nprocess,chemicalProjection=chemicalProjection,
dispbar=dispbar,is_fast_average=is_fast_average)
# get the environmental kernels as a dictionary
environmentalKernels = framesprod(frames, frameprodFunc=get_envKernel, chemicalKernelmat=chemicalKernelmat,
dispbar=dispbar)
return environmentalKernels
|
pdb-aggregator-server.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import logging
import psutil
import subprocess
import os
import yaml
from threading import Thread
import srvdb
import requests as orig_requests
import time
from flask import Flask
from flask import request
from two1.commands.config import Config
from two1.wallet.two1_wallet import Wallet
from two1.bitserv.flask import Payment
from two1.bitrequests import BitTransferRequests
# set up bitrequest client for BitTransfer requests
wallet = Wallet()
username = Config().username
requests = BitTransferRequests(wallet, username)
app = Flask(__name__)
# app.debug = True
# setup wallet
wallet = Wallet()
payment = Payment(app, wallet)
# logging
logger = logging.getLogger('werkzeug')
# db handle
db = srvdb.SrvDb("./pdb-aggregator.db")
def get_payment_amt(request):
"""
Return the amount of the request based on the number of nodes.
"""
print(request.data)
user_input = json.loads(request.data.decode('UTF-8'))
cost = 1000
nodes = db.get_cheapest_nodes(user_input['nodes'])
for node in nodes:
cost = cost + node['price']
return cost
@app.route('/', methods=['POST'])
@payment.required(get_payment_amt)
def ping():
"""
Gets the cheapest X nodes running ping21 and runs them against the url specified.
"""
user_input = json.loads(request.data.decode('UTF-8'))
if 'nodes' not in user_input:
ret_obj = {'success': False, "message": "Missing nodes parameter in post data."}
ret = json.dumps(ret_obj, indent=2)
return (ret, 200, {'Content-length': len(ret), 'Content-type': 'application/json'})
if 'website' not in user_input:
ret_obj = {'success': False, "message": "Missing website parameter in post data."}
ret = json.dumps(ret_obj, indent=2)
return (ret, 200, {'Content-length': len(ret), 'Content-type': 'application/json'})
# Get the amount of nodes the user requested + 10 in case one of them fails
requested_count = user_input['nodes']
nodes = db.get_cheapest_nodes(requested_count + 10)
# Iterate over the nodes returned from the DB
vals = []
successful_requests = 0
for node in nodes:
# If we have already found as many nodes as the user requested, bail out
if successful_requests >= requested_count:
break
try:
# Get the ping data from the node.
# Use the uri from the user in the request.
# Use the maxprice from the db (last time we saw it), so we don't get suckered.
ret = requests.get(node['url'] + "?uri=" + user_input['website'], max_price=node['price'])
# Get the json for the response
ret_obj = ret.json()
ret_obj['price_paid'] = node['price']
# Strip out sensitive info
del ret_obj['server']['ip']
del ret_obj['server']['hostname']
# Save it off
vals.append(ret_obj)
# Update the success count
successful_requests = successful_requests + 1
except Exception as err:
logger.error("Failure: {0}".format(err))
ret = json.dumps(vals, indent=2)
return (ret, 200, {'Content-length': len(ret), 'Content-type': 'application/json'})
def gather_ping_node_stats():
"""
Iterates over nodes and updates the prices and status.
"""
while True:
# Sleep for 8 hours before reloading the node stats
time.sleep(60 * 60 * 8)
nodes = db.get_node_ips()
for node in nodes:
logger.info("\n\nChecking for ping server on {}".format(node))
node_up = False
# First try port 6002
url = "http://{}:6002/".format(node)
manifest_url = url + "manifest"
try:
# If the manifest comes back, if it is running ping21 then it is up
logger.info("Checking on port 6002 with url: {}".format(manifest_url))
manifest = orig_requests.get(manifest_url, timeout=1)
logger.debug("Got back the manifest")
if "ping21" in manifest.text:
node_up = True
logger.debug("Ping21 is running on 6002 on this node")
else:
logger.debug("Ping21 was not found in the manifest")
except:
node_up = False
# Not found on standard node, see if it is running as a microservice
if not node_up:
url = "http://{}:8080/ping/".format(node)
manifest_url = url + "manifest"
try:
# If the manifest comes back, if it is running ping21 then it is up
logger.debug("Checking on port 8080")
manifest = orig_requests.get(manifest_url, timeout=1)
logger.debug("Got back the manifest")
if "ping21" in manifest.text:
node_up = True
logger.debug("Ping21 is running on 8080 on this node")
else:
logger.debug("Ping21 was not found on this node")
except:
node_up = False
# if we didn't find the ping21 service, mark the node as down
if not node_up:
logger.debug("Marking this node as down since Ping21 was not found")
db.update_node(node, False, 0, "")
continue
# We found the node and it is running ping21, so hit the endpoint to get the price
try:
# If the manifest comes back, if it is running ping21 then it is up
logger.debug("Getting ping url: {}".format(url))
ping_res = orig_requests.get(url)
price = int(ping_res.headers['Price'])
db.update_node(node, True, price, url)
logger.debug("Updated the price from the endpoint: {}".format(price))
except Exception as err:
logger.error("Failure: {0}".format(err))
db.update_node(node, False, 0, url)
if __name__ == '__main__':
import click
@click.command()
@click.option("-d", "--daemon", default=False, is_flag=True, help="Run in daemon mode.")
@click.option("-l", "--log", default="ERROR", help="Logging level to use (DEBUG, INFO, WARNING, ERROR, CRITICAL)")
def run(daemon, log):
"""
Run the service.
"""
# Set logging level
numeric_level = getattr(logging, log.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % log)
logging.basicConfig(level=numeric_level)
if daemon:
pid_file = './ping-aggregator.pid'
if os.path.isfile(pid_file):
pid = int(open(pid_file).read())
os.remove(pid_file)
try:
p = psutil.Process(pid)
p.terminate()
except:
pass
try:
p = subprocess.Popen(['python3', 'ping-aggregator-E16-server.py'])
open(pid_file, 'w').write(str(p.pid))
except subprocess.CalledProcessError:
raise ValueError("error starting ping-aggregator-E16-server.py daemon")
else:
# Start cleanup thread
cleaner = Thread(target=gather_ping_node_stats, daemon=True)
cleaner.start()
print("Server running...")
app.run(host='0.0.0.0', port=5019)
run()
|
fy.py
|
import argparse
import datetime
import json
import os
import re
import sys
import threading
import huepy
import pangu
import requests
import xmltodict
from googletrans import Translator
from pony import orm
__version__ = "1.6.0"
HEADERS = {
"X-Requested-With": "XMLHttpRequest",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36"
"(KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36",
}
ERR_MSG = "Exception occurs, check your network or just try again later"
FY_CONF_PATH = os.path.join(os.path.expanduser("~"), ".fy.json")
FY_DB_PATH = os.path.join(os.path.expanduser("~"), ".fy.sqlite")
HERE = os.path.join(os.path.abspath(os.path.dirname(__file__)), "words")
db = orm.Database()
db.bind(provider="sqlite", filename=FY_DB_PATH, create_db=True)
class Words(db.Entity):
__table__ = "words"
words = orm.PrimaryKey(str)
count = orm.Required(int)
date = orm.Required(datetime.datetime)
db.generate_mapping(create_tables=True)
@orm.db_session
def sql_update(words: str):
query = Words.get(words=words)
if query:
query.count += 1
else:
Words(
words=words,
count=1,
date=datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S"),
)
db.commit()
def generate_config(is_force: bool = False):
conf = {
# query source, split by commas
"query_source": "google,youdao,iciba",
# youdao key: http://open.iciba.com/index.php?c=api
"youdao_key": "1945325576",
"youdao_key_from": "Youdao-dict-v21",
# iciba key: http://open.iciba.com/index.php?c=api
"iciba_key": "4B26F43688FA072E0B94F68FFCE224CF",
"enable_sound": True,
}
def _write():
with open(FY_CONF_PATH, "w+", encoding="utf8") as f:
f.write(json.dumps(conf, indent=4))
if is_force:
_write()
return
if not os.path.exists(FY_CONF_PATH):
_write()
def read_config() -> dict:
generate_config()
def _read():
with open(FY_CONF_PATH, "r", encoding="utf8") as f:
return json.load(f)
try:
conf = _read()
except:
generate_config(True)
conf = _read()
return conf
class Conf:
def __init__(self, conf: dict):
self.youdao_key = conf["youdao_key"]
self.youdao_key_from = conf["youdao_key_from"]
self.iciba_key = conf["iciba_key"]
self.query_source = conf["query_source"]
self.enable_sound = conf["enable_sound"]
# global configure
CONF = Conf(read_config())
# types
Parser = argparse.ArgumentParser
def get_parser() -> Parser:
parser = argparse.ArgumentParser(description="Translate words via command line")
parser.add_argument(
"words", metavar="WORDS", type=str, nargs="*", help="the words to translate"
)
parser.add_argument(
"-s", "--shell", action="store_true", help="spawn a query prompt shell."
)
parser.add_argument(
"-r", "--records", action="store_true", help="spawn a records prompt shell."
)
parser.add_argument(
"-R", "--reset", action="store_true", help="reset fy configuration."
)
parser.add_argument(
"-v",
"--version",
action="store_true",
help="displays the current version of fy",
)
return parser
def command_line_runner():
parser = get_parser()
args = vars(parser.parse_args())
words = " ".join(args["words"])
if args["version"]:
print(huepy.cyan("fy " + __version__))
return
if args["reset"]:
generate_config(True)
return
if args["shell"]:
query_prompt_shell()
return
if args["records"]:
records_prompt_shell()
return
if not args["words"]:
parser.print_help()
return
run(words)
def translate(words: str):
if "google" in CONF.query_source:
google_api(words)
if "youdao" in CONF.query_source:
youdao_api(words)
if "iciba" in CONF.query_source:
iciba_api(words)
if ("iciba" not in CONF.query_source) and ("youdao" not in CONF.query_source):
youdao_api(words)
iciba_api(words)
def run(words: str):
threads = [
threading.Thread(target=translate, args=(words,)),
threading.Thread(target=sql_update, args=(words,)),
]
if CONF.enable_sound:
threads.append(threading.Thread(target=say, args=(words,)))
for th in threads:
th.start()
for th in threads:
th.join()
def google_api(words: str):
print()
def switch_language():
for w in words:
if "\u4e00" <= w <= "\u9fff":
return "en"
return "zh-cn"
translator = Translator(service_urls=["translate.google.cn"])
text = pangu.spacing_text(translator.translate(words, dest=switch_language()).text)
print(" " + words + huepy.grey(" ~ translate.google.cn"))
print()
print(" - " + huepy.cyan(text))
def youdao_api(words: str):
print()
print(huepy.grey(" -------- "))
print()
url = (
"http://fanyi.youdao.com/openapi.do?keyfrom={}&key={}&"
"type=data&doctype=json&version=1.1&q={}"
)
try:
resp = requests.get(
url.format(CONF.youdao_key_from, CONF.youdao_key, words), headers=HEADERS
).json()
phonetic = ""
basic = resp.get("basic", None)
if basic and resp.get("basic").get("phonetic"):
phonetic += huepy.purple(" [ " + basic.get("phonetic") + " ]")
print(" " + words + phonetic + huepy.grey(" ~ fanyi.youdao.com"))
print()
translation = resp.get("translation", [])
if len(translation) > 0:
print(" - " + pangu.spacing_text(huepy.green(translation[0])))
if basic and basic.get("explains", None):
for item in basic.get("explains"):
print(huepy.grey(" - ") + pangu.spacing_text(huepy.green(item)))
print()
web = resp.get("web", None)
if web and len(web):
for i, item in enumerate(web):
print(
huepy.grey(
" " + str(i + 1) + ". " + highlight(item.get("key"), words)
)
)
print(" " + huepy.cyan(", ".join(item.get("value"))))
except:
print(" " + huepy.red(ERR_MSG))
def iciba_api(words: str):
print()
print(huepy.grey(" -------- "))
print()
url = "http://dict-co.iciba.com/api/dictionary.php?key={key}&w={w}&type={type}"
try:
resp = requests.get(url.format(key=CONF.iciba_key, w=words, type="xml"))
resp.encoding = "utf8"
dct = xmltodict.parse(resp.text).get("dict")
ps = dct.get("ps") or ""
print(" " + words + " " + huepy.purple(ps) + huepy.grey(" ~ iciba.com"))
print()
pos = dct.get("pos")
acceptation = dct.get("acceptation")
if pos and acceptation:
if not isinstance(pos, list) and not isinstance(acceptation, list):
pos = [pos]
acceptation = [acceptation]
for p, a in zip([i for i in pos], [i for i in acceptation]):
if a and p:
print(" - " + huepy.green(p + " " + a))
print()
index = 1
sent = dct.get("sent")
if not sent:
return
if not isinstance(sent, list):
sent = [sent]
for item in sent:
for k, v in item.items():
if k == "orig":
print(highlight(huepy.grey(" {}. ".format(index) + v), words))
index += 1
elif k == "trans":
print(highlight(huepy.cyan(" " + v), words))
print()
except:
print(" " + huepy.red(ERR_MSG))
def query_prompt_shell():
try:
from prompt_toolkit import prompt
from prompt_toolkit.completion import WordCompleter
with open(os.path.join(HERE, "words.txt"), "r", encoding="utf-8") as f:
words = [w.replace("\n", "") for w in f.readlines()]
while True:
run(
prompt(
"Press <Ctrl+C> to exit shell.\nEnter words: ",
completer=WordCompleter(words),
complete_in_thread=True,
)
)
print()
except KeyboardInterrupt:
print(huepy.green("GoodBye!"))
def records_prompt_shell():
try:
from litecli.main import LiteCli
litecli = LiteCli(prompt="Type quit to exit shell.\nPrompt: ")
litecli.connect(database=FY_DB_PATH)
litecli.run_cli()
except:
print(huepy.red("sorry, it can't spawn records prompt shell."))
def highlight(text: str, keyword: str):
text = pangu.spacing_text(text)
return re.sub(
keyword,
"\33[0m" + "\33[93m" + keyword + "\33[0m" + "\33[37m",
text,
flags=re.IGNORECASE,
)
def say(words: str):
if sys.platform == "win32":
try:
from win32com.client import Dispatch
speak = Dispatch("SAPI.SpVoice")
speak.Speak(words)
except:
pass
if __name__ == "__main__":
command_line_runner()
|
base.py
|
# mypy: allow-untyped-defs
import base64
import hashlib
import io
import json
import os
import threading
import traceback
import socket
import sys
from abc import ABCMeta, abstractmethod
from typing import Any, Callable, ClassVar, Tuple, Type
from urllib.parse import urljoin, urlsplit, urlunsplit
from . import pytestrunner
from .actions import actions
from .protocol import Protocol, WdspecProtocol
here = os.path.dirname(__file__)
def executor_kwargs(test_type, test_environment, run_info_data, **kwargs):
timeout_multiplier = kwargs["timeout_multiplier"]
if timeout_multiplier is None:
timeout_multiplier = 1
executor_kwargs = {"server_config": test_environment.config,
"timeout_multiplier": timeout_multiplier,
"debug_info": kwargs["debug_info"]}
if test_type in ("reftest", "print-reftest"):
executor_kwargs["screenshot_cache"] = test_environment.cache_manager.dict()
executor_kwargs["reftest_screenshot"] = kwargs["reftest_screenshot"]
if test_type == "wdspec":
executor_kwargs["binary"] = kwargs.get("binary")
executor_kwargs["webdriver_binary"] = kwargs.get("webdriver_binary")
executor_kwargs["webdriver_args"] = kwargs.get("webdriver_args")
# By default the executor may try to cleanup windows after a test (to best
# associate any problems with the test causing them). If the user might
# want to view the results, however, the executor has to skip that cleanup.
if kwargs["pause_after_test"] or kwargs["pause_on_unexpected"]:
executor_kwargs["cleanup_after_test"] = False
executor_kwargs["debug_test"] = kwargs["debug_test"]
return executor_kwargs
def strip_server(url):
"""Remove the scheme and netloc from a url, leaving only the path and any query
or fragment.
url - the url to strip
e.g. http://example.org:8000/tests?id=1#2 becomes /tests?id=1#2"""
url_parts = list(urlsplit(url))
url_parts[0] = ""
url_parts[1] = ""
return urlunsplit(url_parts)
class TestharnessResultConverter:
harness_codes = {0: "OK",
1: "ERROR",
2: "TIMEOUT",
3: "PRECONDITION_FAILED"}
test_codes = {0: "PASS",
1: "FAIL",
2: "TIMEOUT",
3: "NOTRUN",
4: "PRECONDITION_FAILED"}
def __call__(self, test, result, extra=None):
"""Convert a JSON result into a (TestResult, [SubtestResult]) tuple"""
result_url, status, message, stack, subtest_results = result
assert result_url == test.url, ("Got results from %s, expected %s" %
(result_url, test.url))
harness_result = test.result_cls(self.harness_codes[status], message, extra=extra, stack=stack)
return (harness_result,
[test.subtest_result_cls(st_name, self.test_codes[st_status], st_message, st_stack)
for st_name, st_status, st_message, st_stack in subtest_results])
testharness_result_converter = TestharnessResultConverter()
def hash_screenshots(screenshots):
"""Computes the sha1 checksum of a list of base64-encoded screenshots."""
return [hashlib.sha1(base64.b64decode(screenshot)).hexdigest()
for screenshot in screenshots]
def _ensure_hash_in_reftest_screenshots(extra):
"""Make sure reftest_screenshots have hashes.
Marionette internal reftest runner does not produce hashes.
"""
log_data = extra.get("reftest_screenshots")
if not log_data:
return
for item in log_data:
if type(item) != dict:
# Skip relation strings.
continue
if "hash" not in item:
item["hash"] = hash_screenshots([item["screenshot"]])[0]
def get_pages(ranges_value, total_pages):
"""Get a set of page numbers to include in a print reftest.
:param ranges_value: Parsed page ranges as a list e.g. [[1,2], [4], [6,None]]
:param total_pages: Integer total number of pages in the paginated output.
:retval: Set containing integer page numbers to include in the comparison e.g.
for the example ranges value and 10 total pages this would be
{1,2,4,6,7,8,9,10}"""
if not ranges_value:
return set(range(1, total_pages + 1))
rv = set()
for range_limits in ranges_value:
if len(range_limits) == 1:
range_limits = [range_limits[0], range_limits[0]]
if range_limits[0] is None:
range_limits[0] = 1
if range_limits[1] is None:
range_limits[1] = total_pages
if range_limits[0] > total_pages:
continue
rv |= set(range(range_limits[0], range_limits[1] + 1))
return rv
def reftest_result_converter(self, test, result):
extra = result.get("extra", {})
_ensure_hash_in_reftest_screenshots(extra)
return (test.result_cls(
result["status"],
result["message"],
extra=extra,
stack=result.get("stack")), [])
def pytest_result_converter(self, test, data):
harness_data, subtest_data = data
if subtest_data is None:
subtest_data = []
harness_result = test.result_cls(*harness_data)
subtest_results = [test.subtest_result_cls(*item) for item in subtest_data]
return (harness_result, subtest_results)
def crashtest_result_converter(self, test, result):
return test.result_cls(**result), []
class ExecutorException(Exception):
def __init__(self, status, message):
self.status = status
self.message = message
class TimedRunner:
def __init__(self, logger, func, protocol, url, timeout, extra_timeout):
self.func = func
self.logger = logger
self.result = None
self.protocol = protocol
self.url = url
self.timeout = timeout
self.extra_timeout = extra_timeout
self.result_flag = threading.Event()
def run(self):
for setup_fn in [self.set_timeout, self.before_run]:
err = setup_fn()
if err:
self.result = (False, err)
return self.result
executor = threading.Thread(target=self.run_func)
executor.start()
# Add twice the extra timeout since the called function is expected to
# wait at least self.timeout + self.extra_timeout and this gives some leeway
timeout = self.timeout + 2 * self.extra_timeout if self.timeout else None
finished = self.result_flag.wait(timeout)
if self.result is None:
if finished:
# flag is True unless we timeout; this *shouldn't* happen, but
# it can if self.run_func fails to set self.result due to raising
self.result = False, ("INTERNAL-ERROR", "%s.run_func didn't set a result" %
self.__class__.__name__)
else:
if self.protocol.is_alive():
message = "Executor hit external timeout (this may indicate a hang)\n"
# get a traceback for the current stack of the executor thread
message += "".join(traceback.format_stack(sys._current_frames()[executor.ident]))
self.result = False, ("EXTERNAL-TIMEOUT", message)
else:
self.logger.info("Browser not responding, setting status to CRASH")
self.result = False, ("CRASH", None)
elif self.result[1] is None:
# We didn't get any data back from the test, so check if the
# browser is still responsive
if self.protocol.is_alive():
self.result = False, ("INTERNAL-ERROR", None)
else:
self.logger.info("Browser not responding, setting status to CRASH")
self.result = False, ("CRASH", None)
return self.result
def set_timeout(self):
raise NotImplementedError
def before_run(self):
pass
def run_func(self):
raise NotImplementedError
class TestExecutor:
"""Abstract Base class for object that actually executes the tests in a
specific browser. Typically there will be a different TestExecutor
subclass for each test type and method of executing tests.
:param browser: ExecutorBrowser instance providing properties of the
browser that will be tested.
:param server_config: Dictionary of wptserve server configuration of the
form stored in TestEnvironment.config
:param timeout_multiplier: Multiplier relative to base timeout to use
when setting test timeout.
"""
__metaclass__ = ABCMeta
test_type = None # type: ClassVar[str]
# convert_result is a class variable set to a callable converter
# (e.g. reftest_result_converter) converting from an instance of
# URLManifestItem (e.g. RefTest) + type-dependent results object +
# type-dependent extra data, returning a tuple of Result and list of
# SubtestResult. For now, any callable is accepted. TODO: Make this type
# stricter when more of the surrounding code is annotated.
convert_result = None # type: ClassVar[Callable[..., Any]]
supports_testdriver = False
supports_jsshell = False
# Extra timeout to use after internal test timeout at which the harness
# should force a timeout
extra_timeout = 5 # seconds
def __init__(self, logger, browser, server_config, timeout_multiplier=1,
debug_info=None, **kwargs):
self.logger = logger
self.runner = None
self.browser = browser
self.server_config = server_config
self.timeout_multiplier = timeout_multiplier
self.debug_info = debug_info
self.last_environment = {"protocol": "http",
"prefs": {}}
self.protocol = None # This must be set in subclasses
def setup(self, runner):
"""Run steps needed before tests can be started e.g. connecting to
browser instance
:param runner: TestRunner instance that is going to run the tests"""
self.runner = runner
if self.protocol is not None:
self.protocol.setup(runner)
def teardown(self):
"""Run cleanup steps after tests have finished"""
if self.protocol is not None:
self.protocol.teardown()
def reset(self):
"""Re-initialize internal state to facilitate repeated test execution
as implemented by the `--rerun` command-line argument."""
pass
def run_test(self, test):
"""Run a particular test.
:param test: The test to run"""
try:
if test.environment != self.last_environment:
self.on_environment_change(test.environment)
result = self.do_test(test)
except Exception as e:
exception_string = traceback.format_exc()
self.logger.warning(exception_string)
result = self.result_from_exception(test, e, exception_string)
# log result of parent test
if result[0].status == "ERROR":
self.logger.debug(result[0].message)
self.last_environment = test.environment
self.runner.send_message("test_ended", test, result)
def server_url(self, protocol, subdomain=False):
scheme = "https" if protocol == "h2" else protocol
host = self.server_config["browser_host"]
if subdomain:
# The only supported subdomain filename flag is "www".
host = "{subdomain}.{host}".format(subdomain="www", host=host)
return "{scheme}://{host}:{port}".format(scheme=scheme, host=host,
port=self.server_config["ports"][protocol][0])
def test_url(self, test):
return urljoin(self.server_url(test.environment["protocol"],
test.subdomain), test.url)
@abstractmethod
def do_test(self, test):
"""Test-type and protocol specific implementation of running a
specific test.
:param test: The test to run."""
pass
def on_environment_change(self, new_environment):
pass
def result_from_exception(self, test, e, exception_string):
if hasattr(e, "status") and e.status in test.result_cls.statuses:
status = e.status
else:
status = "INTERNAL-ERROR"
message = str(getattr(e, "message", ""))
if message:
message += "\n"
message += exception_string
return test.result_cls(status, message), []
def wait(self):
return self.protocol.base.wait()
class TestharnessExecutor(TestExecutor):
convert_result = testharness_result_converter
class RefTestExecutor(TestExecutor):
convert_result = reftest_result_converter
is_print = False
def __init__(self, logger, browser, server_config, timeout_multiplier=1, screenshot_cache=None,
debug_info=None, reftest_screenshot="unexpected", **kwargs):
TestExecutor.__init__(self, logger, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.screenshot_cache = screenshot_cache
self.reftest_screenshot = reftest_screenshot
class CrashtestExecutor(TestExecutor):
convert_result = crashtest_result_converter
class PrintRefTestExecutor(TestExecutor):
convert_result = reftest_result_converter
is_print = True
class RefTestImplementation:
def __init__(self, executor):
self.timeout_multiplier = executor.timeout_multiplier
self.executor = executor
# Cache of url:(screenshot hash, screenshot). Typically the
# screenshot is None, but we set this value if a test fails
# and the screenshot was taken from the cache so that we may
# retrieve the screenshot from the cache directly in the future
self.screenshot_cache = self.executor.screenshot_cache
self.message = None
self.reftest_screenshot = executor.reftest_screenshot
def setup(self):
pass
def teardown(self):
pass
@property
def logger(self):
return self.executor.logger
def get_hash(self, test, viewport_size, dpi, page_ranges):
key = (test.url, viewport_size, dpi)
if key not in self.screenshot_cache:
success, data = self.get_screenshot_list(test, viewport_size, dpi, page_ranges)
if not success:
return False, data
screenshots = data
hash_values = hash_screenshots(data)
self.screenshot_cache[key] = (hash_values, screenshots)
rv = (hash_values, screenshots)
else:
rv = self.screenshot_cache[key]
self.message.append(f"{test.url} {rv[0]}")
return True, rv
def reset(self):
self.screenshot_cache.clear()
def check_pass(self, hashes, screenshots, urls, relation, fuzzy):
"""Check if a test passes, and return a tuple of (pass, page_idx),
where page_idx is the zero-based index of the first page on which a
difference occurs if any, or None if there are no differences"""
assert relation in ("==", "!=")
lhs_hashes, rhs_hashes = hashes
lhs_screenshots, rhs_screenshots = screenshots
if len(lhs_hashes) != len(rhs_hashes):
self.logger.info("Got different number of pages")
return relation == "!=", -1
assert len(lhs_screenshots) == len(lhs_hashes) == len(rhs_screenshots) == len(rhs_hashes)
for (page_idx, (lhs_hash,
rhs_hash,
lhs_screenshot,
rhs_screenshot)) in enumerate(zip(lhs_hashes,
rhs_hashes,
lhs_screenshots,
rhs_screenshots)):
comparison_screenshots = (lhs_screenshot, rhs_screenshot)
if not fuzzy or fuzzy == ((0, 0), (0, 0)):
equal = lhs_hash == rhs_hash
# sometimes images can have different hashes, but pixels can be identical.
if not equal:
self.logger.info("Image hashes didn't match%s, checking pixel differences" %
("" if len(hashes) == 1 else " on page %i" % (page_idx + 1)))
max_per_channel, pixels_different = self.get_differences(comparison_screenshots,
urls)
equal = pixels_different == 0 and max_per_channel == 0
else:
max_per_channel, pixels_different = self.get_differences(comparison_screenshots,
urls,
page_idx if len(hashes) > 1 else None)
allowed_per_channel, allowed_different = fuzzy
self.logger.info("Allowed %s pixels different, maximum difference per channel %s" %
("-".join(str(item) for item in allowed_different),
"-".join(str(item) for item in allowed_per_channel)))
equal = ((pixels_different == 0 and allowed_different[0] == 0) or
(max_per_channel == 0 and allowed_per_channel[0] == 0) or
(allowed_per_channel[0] <= max_per_channel <= allowed_per_channel[1] and
allowed_different[0] <= pixels_different <= allowed_different[1]))
if not equal:
return (False if relation == "==" else True, page_idx)
# All screenshots were equal within the fuzziness
return (True if relation == "==" else False, -1)
def get_differences(self, screenshots, urls, page_idx=None):
from PIL import Image, ImageChops, ImageStat
lhs = Image.open(io.BytesIO(base64.b64decode(screenshots[0]))).convert("RGB")
rhs = Image.open(io.BytesIO(base64.b64decode(screenshots[1]))).convert("RGB")
self.check_if_solid_color(lhs, urls[0])
self.check_if_solid_color(rhs, urls[1])
diff = ImageChops.difference(lhs, rhs)
minimal_diff = diff.crop(diff.getbbox())
mask = minimal_diff.convert("L", dither=None)
stat = ImageStat.Stat(minimal_diff, mask)
per_channel = max(item[1] for item in stat.extrema)
count = stat.count[0]
self.logger.info("Found %s pixels different, maximum difference per channel %s%s" %
(count,
per_channel,
"" if page_idx is None else " on page %i" % (page_idx + 1)))
return per_channel, count
def check_if_solid_color(self, image, url):
extrema = image.getextrema()
if all(min == max for min, max in extrema):
color = ''.join('%02X' % value for value, _ in extrema)
self.message.append(f"Screenshot is solid color 0x{color} for {url}\n")
def run_test(self, test):
viewport_size = test.viewport_size
dpi = test.dpi
page_ranges = test.page_ranges
self.message = []
# Depth-first search of reference tree, with the goal
# of reachings a leaf node with only pass results
stack = list(((test, item[0]), item[1]) for item in reversed(test.references))
while stack:
hashes = [None, None]
screenshots = [None, None]
urls = [None, None]
nodes, relation = stack.pop()
fuzzy = self.get_fuzzy(test, nodes, relation)
for i, node in enumerate(nodes):
success, data = self.get_hash(node, viewport_size, dpi, page_ranges)
if success is False:
return {"status": data[0], "message": data[1]}
hashes[i], screenshots[i] = data
urls[i] = node.url
is_pass, page_idx = self.check_pass(hashes, screenshots, urls, relation, fuzzy)
log_data = [
{"url": urls[0], "screenshot": screenshots[0][page_idx],
"hash": hashes[0][page_idx]},
relation,
{"url": urls[1], "screenshot": screenshots[1][page_idx],
"hash": hashes[1][page_idx]}
]
if is_pass:
fuzzy = self.get_fuzzy(test, nodes, relation)
if nodes[1].references:
stack.extend(list(((nodes[1], item[0]), item[1])
for item in reversed(nodes[1].references)))
else:
test_result = {"status": "PASS", "message": None}
if (self.reftest_screenshot == "always" or
self.reftest_screenshot == "unexpected" and
test.expected() != "PASS"):
test_result["extra"] = {"reftest_screenshots": log_data}
# We passed
return test_result
# We failed, so construct a failure message
for i, (node, screenshot) in enumerate(zip(nodes, screenshots)):
if screenshot is None:
success, screenshot = self.retake_screenshot(node, viewport_size, dpi, page_ranges)
if success:
screenshots[i] = screenshot
test_result = {"status": "FAIL",
"message": "\n".join(self.message)}
if (self.reftest_screenshot in ("always", "fail") or
self.reftest_screenshot == "unexpected" and
test.expected() != "FAIL"):
test_result["extra"] = {"reftest_screenshots": log_data}
return test_result
def get_fuzzy(self, root_test, test_nodes, relation):
full_key = tuple([item.url for item in test_nodes] + [relation])
ref_only_key = test_nodes[1].url
fuzzy_override = root_test.fuzzy_override
fuzzy = test_nodes[0].fuzzy
sources = [fuzzy_override, fuzzy]
keys = [full_key, ref_only_key, None]
value = None
for source in sources:
for key in keys:
if key in source:
value = source[key]
break
if value:
break
return value
def retake_screenshot(self, node, viewport_size, dpi, page_ranges):
success, data = self.get_screenshot_list(node,
viewport_size,
dpi,
page_ranges)
if not success:
return False, data
key = (node.url, viewport_size, dpi)
hash_val, _ = self.screenshot_cache[key]
self.screenshot_cache[key] = hash_val, data
return True, data
def get_screenshot_list(self, node, viewport_size, dpi, page_ranges):
success, data = self.executor.screenshot(node, viewport_size, dpi, page_ranges)
if success and not isinstance(data, list):
return success, [data]
return success, data
class WdspecExecutor(TestExecutor):
convert_result = pytest_result_converter
protocol_cls = WdspecProtocol # type: ClassVar[Type[Protocol]]
def __init__(self, logger, browser, server_config, webdriver_binary,
webdriver_args, timeout_multiplier=1, capabilities=None,
debug_info=None, **kwargs):
super().__init__(logger, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.webdriver_binary = webdriver_binary
self.webdriver_args = webdriver_args
self.timeout_multiplier = timeout_multiplier
self.capabilities = capabilities
def setup(self, runner):
self.protocol = self.protocol_cls(self, self.browser)
super().setup(runner)
def is_alive(self):
return self.protocol.is_alive()
def on_environment_change(self, new_environment):
pass
def do_test(self, test):
timeout = test.timeout * self.timeout_multiplier + self.extra_timeout
success, data = WdspecRun(self.do_wdspec,
test.abs_path,
timeout).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_wdspec(self, path, timeout):
session_config = {"host": self.browser.host,
"port": self.browser.port,
"capabilities": self.capabilities,
"webdriver": {
"binary": self.webdriver_binary,
"args": self.webdriver_args
}}
return pytestrunner.run(path,
self.server_config,
session_config,
timeout=timeout)
class WdspecRun:
def __init__(self, func, path, timeout):
self.func = func
self.result = (None, None)
self.path = path
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
"""Runs function in a thread and interrupts it if it exceeds the
given timeout. Returns (True, (Result, [SubtestResult ...])) in
case of success, or (False, (status, extra information)) in the
event of failure.
"""
executor = threading.Thread(target=self._run)
executor.start()
self.result_flag.wait(self.timeout)
if self.result[1] is None:
self.result = False, ("EXTERNAL-TIMEOUT", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.path, self.timeout)
except (socket.timeout, OSError):
self.result = False, ("CRASH", None)
except Exception as e:
message = getattr(e, "message")
if message:
message += "\n"
message += traceback.format_exc()
self.result = False, ("INTERNAL-ERROR", message)
finally:
self.result_flag.set()
class CallbackHandler:
"""Handle callbacks from testdriver-using tests.
The default implementation here makes sense for things that are roughly like
WebDriver. Things that are more different to WebDriver may need to create a
fully custom implementation."""
unimplemented_exc = (NotImplementedError,) # type: ClassVar[Tuple[Type[Exception], ...]]
def __init__(self, logger, protocol, test_window):
self.protocol = protocol
self.test_window = test_window
self.logger = logger
self.callbacks = {
"action": self.process_action,
"complete": self.process_complete
}
self.actions = {cls.name: cls(self.logger, self.protocol) for cls in actions}
def __call__(self, result):
url, command, payload = result
self.logger.debug("Got async callback: %s" % result[1])
try:
callback = self.callbacks[command]
except KeyError:
raise ValueError("Unknown callback type %r" % result[1])
return callback(url, payload)
def process_complete(self, url, payload):
rv = [strip_server(url)] + payload
return True, rv
def process_action(self, url, payload):
action = payload["action"]
cmd_id = payload["id"]
self.logger.debug("Got action: %s" % action)
try:
action_handler = self.actions[action]
except KeyError:
raise ValueError("Unknown action %s" % action)
try:
with ActionContext(self.logger, self.protocol, payload.get("context")):
result = action_handler(payload)
except self.unimplemented_exc:
self.logger.warning("Action %s not implemented" % action)
self._send_message(cmd_id, "complete", "error", "Action %s not implemented" % action)
except Exception:
self.logger.warning("Action %s failed" % action)
self.logger.warning(traceback.format_exc())
self._send_message(cmd_id, "complete", "error")
raise
else:
self.logger.debug(f"Action {action} completed with result {result}")
return_message = {"result": result}
self._send_message(cmd_id, "complete", "success", json.dumps(return_message))
return False, None
def _send_message(self, cmd_id, message_type, status, message=None):
self.protocol.testdriver.send_message(cmd_id, message_type, status, message=message)
class ActionContext:
def __init__(self, logger, protocol, context):
self.logger = logger
self.protocol = protocol
self.context = context
self.initial_window = None
def __enter__(self):
if self.context is None:
return
self.initial_window = self.protocol.base.current_window
self.logger.debug("Switching to window %s" % self.context)
self.protocol.testdriver.switch_to_window(self.context, self.initial_window)
def __exit__(self, *args):
if self.context is None:
return
self.logger.debug("Switching back to initial window")
self.protocol.base.set_window(self.initial_window)
self.initial_window = None
|
googlenet_resnet50.py
|
#!/usr/bin/env python
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import glob
import time
import multiprocessing as mp
from apps.aks.libs import aks
def usage(exe):
print("[INFO] Usage: ")
print("[INFO] ---------------------- ")
print("[INFO] ", exe, " <Image Directory Path>")
def main(imageDirectory, graphName, graphJson):
fileExtension = ('*.jpg', '*.JPEG', '*.png')
images = []
for ext in fileExtension:
images.extend(glob.glob(imageDirectory + '/' + ext))
kernelDir = "kernel_zoo"
sysMan = aks.SysManager()
sysMan.loadKernels(kernelDir)
sysMan.loadGraphs(graphJson)
graph = sysMan.getGraph(graphName)
print("[INFO] Starting enqueue... ")
print("[INFO] Running", len(images), "images")
t0 = time.time()
for i, img in enumerate(images):
sysMan.enqueueJob(graph, img)
sysMan.waitForAllResults()
t1 = time.time()
print("[INFO] Overall FPS : ", len(images)/(t1-t0))
sysMan.report(graph)
if __name__ == "__main__":
if (len(sys.argv) != 2):
print("[ERROR] Invalid Usage!")
usage(sys.argv[0])
exit(1)
imageDirectory = sys.argv[1]
graphs = {}
graphs['googlenet_no_runner'] = 'graph_zoo/graph_googlenet_no_runner.json'
graphs['resnet50_no_runner'] = 'graph_zoo/graph_resnet50_no_runner.json'
procs = []
for name, json in graphs.items():
print(name, json)
p = mp.Process(target=main, args=(imageDirectory, name, json,))
p.start()
procs.append(p)
for proc in procs:
proc.join()
|
bot.py
|
import os
import youtube_dl
import telepotpro
from random import randint
from multiprocessing import Process
from youtubesearchpython import VideosSearch
from dotenv import load_dotenv
from os.path import join, dirname
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
TOKEN = os.environ.get("TOKEN")
bot = telepotpro.Bot(TOKEN)
class Music:
def __init__(self, user_input, msg):
self.chat = Chat
self.user_input = user_input[6:]
def search_music(self, user_input):
return VideosSearch(user_input, limit = 1).result()
def get_link(self, result):
return result['result'][0]['link']
def get_title(self, result):
return result['result'][0]['title']
def get_duration(self, result):
result = result['result'][0]['duration'].split(':')
min_duration = int(result[0])
split_count = len(result)
return min_duration, split_count
def download_music(self, file_name, link):
ydl_opts = {
'outtmpl': './'+file_name,
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '256',
}],
'prefer_ffmpeg': True
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
info_dict = ydl.extract_info(link, download=True)
pass
class Chat:
def __init__(self, msg):
self.chat_id = msg['chat']['id']
self.user_input = msg['text']
self.user_input = self.user_input.replace('@LuciferV12Bot', '')
self.user_name = msg['from']['first_name']
self.message_id = msg['message_id']
self.messages = {
'start':'🤖 Hello, '+ self.user_name +'!\n\n'
'📩 Send me:\n\n'
'"*/music* _song name_" or\n'
'"*/music* _musician name - song name_"\n\n'
'to order some music. 🎶',
'spotify_input_error':"‼️ *Oops! The bot doesn't support Spotify links!*\n"
'Try: "*/music* _song name_"\n'
'or: "*/music* _musician name - song name_"',
'invalid_command':'‼️ *Oops! Invalid command!*\n'
'Try: "*/music* _song name_"\n'
'or: "*/music* _musician name - song name_"',
'too_long':'‼️ *Oops! Video too long to convert!*\n'
'Order something 30 minutes or less.'
}
self.check_input(self.user_input, msg)
pass
def send_message(self, content):
return bot.sendMessage(self.chat_id, content, reply_to_message_id=self.message_id, parse_mode='Markdown')
def delete_message(self, message):
chat_id = message['chat']['id']
message_id = message['message_id']
bot.deleteMessage((chat_id, message_id))
pass
def send_audio(self, file_name):
bot.sendAudio(self.chat_id,audio=open(file_name,'rb'), reply_to_message_id=self.message_id)
pass
def process_request(self, user_input):
result = Music.search_music(self, user_input[6:])
min_duration, split_count = Music.get_duration(self, result)
if int(min_duration) < 30 and split_count < 3:
file_name = Music.get_title(self, result) +' - @LuciferV12Bot '+str(randint(0,999999))+'.mp3'
file_name = file_name.replace('"', '')
self.send_message(f"🎵 {Music.get_title(self, result)}\n🔗 {Music.get_link(self, result)}")
downloading_message = self.send_message('⬇️ Downloading... \n_(this may take a while.)_')
Music.download_music(self, file_name, Music.get_link(self, result))
try:
self.send_audio(file_name)
self.delete_message(downloading_message)
self.send_message('✅ Sucess!')
print ("\nSucess!\n")
except:
print("\nError")
os.remove(file_name)
pass
def check_input(self, user_input, msg):
if user_input.startswith('/start'):
self.send_message(self.messages['start'])
elif user_input.startswith('/music') and user_input[6:]!='':
if 'open.spotify.com' in user_input[6:]:
self.send_message(self.messages['spotify_input_error'])
else:
#Valid command
self.process_request(user_input)
else:
#Invalid command
self.send_message(self.messages['invalid_command'])
pass
def start_new_chat(msg):
Process(target=Chat, args=(msg,)).start()
bot.message_loop(start_new_chat, run_forever=True)
|
gdbclientutils.py
|
import ctypes
import errno
import io
import threading
import socket
import traceback
from lldbsuite.support import seven
def checksum(message):
"""
Calculate the GDB server protocol checksum of the message.
The GDB server protocol uses a simple modulo 256 sum.
"""
check = 0
for c in message:
check += ord(c)
return check % 256
def frame_packet(message):
"""
Create a framed packet that's ready to send over the GDB connection
channel.
Framing includes surrounding the message between $ and #, and appending
a two character hex checksum.
"""
return "$%s#%02x" % (message, checksum(message))
def escape_binary(message):
"""
Escape the binary message using the process described in the GDB server
protocol documentation.
Most bytes are sent through as-is, but $, #, and { are escaped by writing
a { followed by the original byte mod 0x20.
"""
out = ""
for c in message:
d = ord(c)
if d in (0x23, 0x24, 0x7d):
out += chr(0x7d)
out += chr(d ^ 0x20)
else:
out += c
return out
def hex_encode_bytes(message):
"""
Encode the binary message by converting each byte into a two-character
hex string.
"""
out = ""
for c in message:
out += "%02x" % ord(c)
return out
def hex_decode_bytes(hex_bytes):
"""
Decode the hex string into a binary message by converting each two-character
hex string into a single output byte.
"""
out = ""
hex_len = len(hex_bytes)
i = 0
while i < hex_len - 1:
out += chr(int(hex_bytes[i:i + 2], 16))
i += 2
return out
class MockGDBServerResponder:
"""
A base class for handling client packets and issuing server responses for
GDB tests.
This handles many typical situations, while still allowing subclasses to
completely customize their responses.
Most subclasses will be interested in overriding the other() method, which
handles any packet not recognized in the common packet handling code.
"""
registerCount = 40
packetLog = None
class RESPONSE_DISCONNECT: pass
def __init__(self):
self.packetLog = []
def respond(self, packet):
"""
Return the unframed packet data that the server should issue in response
to the given packet received from the client.
"""
self.packetLog.append(packet)
if packet is MockGDBServer.PACKET_INTERRUPT:
return self.interrupt()
if packet == "c":
return self.cont()
if packet.startswith("vCont;c"):
return self.vCont(packet)
if packet[0] == "A":
return self.A(packet)
if packet[0] == "D":
return self.D(packet)
if packet[0] == "g":
return self.readRegisters()
if packet[0] == "G":
# Gxxxxxxxxxxx
# Gxxxxxxxxxxx;thread:1234;
return self.writeRegisters(packet[1:].split(';')[0])
if packet[0] == "p":
regnum = packet[1:].split(';')[0]
return self.readRegister(int(regnum, 16))
if packet[0] == "P":
register, value = packet[1:].split("=")
return self.writeRegister(int(register, 16), value)
if packet[0] == "m":
addr, length = [int(x, 16) for x in packet[1:].split(',')]
return self.readMemory(addr, length)
if packet[0] == "M":
location, encoded_data = packet[1:].split(":")
addr, length = [int(x, 16) for x in location.split(',')]
return self.writeMemory(addr, encoded_data)
if packet[0:7] == "qSymbol":
return self.qSymbol(packet[8:])
if packet[0:10] == "qSupported":
return self.qSupported(packet[11:].split(";"))
if packet == "qfThreadInfo":
return self.qfThreadInfo()
if packet == "qsThreadInfo":
return self.qsThreadInfo()
if packet == "qC":
return self.qC()
if packet == "QEnableErrorStrings":
return self.QEnableErrorStrings()
if packet == "?":
return self.haltReason()
if packet == "s":
return self.haltReason()
if packet[0] == "H":
tid = packet[2:]
if "." in tid:
assert tid.startswith("p")
# TODO: do we want to do anything with PID?
tid = tid.split(".", 1)[1]
return self.selectThread(packet[1], int(tid, 16))
if packet[0:6] == "qXfer:":
obj, read, annex, location = packet[6:].split(":")
offset, length = [int(x, 16) for x in location.split(',')]
data, has_more = self.qXferRead(obj, annex, offset, length)
if data is not None:
return self._qXferResponse(data, has_more)
return ""
if packet.startswith("vAttach;"):
pid = packet.partition(';')[2]
return self.vAttach(int(pid, 16))
if packet[0] == "Z":
return self.setBreakpoint(packet)
if packet.startswith("qThreadStopInfo"):
threadnum = int (packet[15:], 16)
return self.threadStopInfo(threadnum)
if packet == "QThreadSuffixSupported":
return self.QThreadSuffixSupported()
if packet == "QListThreadsInStopReply":
return self.QListThreadsInStopReply()
if packet.startswith("qMemoryRegionInfo:"):
return self.qMemoryRegionInfo(int(packet.split(':')[1], 16))
if packet == "qQueryGDBServer":
return self.qQueryGDBServer()
if packet == "qHostInfo":
return self.qHostInfo()
if packet == "qGetWorkingDir":
return self.qGetWorkingDir()
if packet == "qOffsets":
return self.qOffsets();
if packet == "qProcessInfo":
return self.qProcessInfo()
if packet == "qsProcessInfo":
return self.qsProcessInfo()
if packet.startswith("qfProcessInfo"):
return self.qfProcessInfo(packet)
if packet.startswith("qPathComplete:"):
return self.qPathComplete()
if packet.startswith("vFile:"):
return self.vFile(packet)
if packet.startswith("vRun;"):
return self.vRun(packet)
if packet.startswith("qLaunchSuccess"):
return self.qLaunchSuccess()
if packet.startswith("QEnvironment:"):
return self.QEnvironment(packet)
if packet.startswith("QEnvironmentHexEncoded:"):
return self.QEnvironmentHexEncoded(packet)
if packet.startswith("qRegisterInfo"):
regnum = int(packet[len("qRegisterInfo"):], 16)
return self.qRegisterInfo(regnum)
if packet == "k":
return self.k()
return self.other(packet)
def qsProcessInfo(self):
return "E04"
def qfProcessInfo(self, packet):
return "E04"
def qGetWorkingDir(self):
return "2f"
def qOffsets(self):
return ""
def qProcessInfo(self):
return ""
def qHostInfo(self):
return "ptrsize:8;endian:little;"
def qQueryGDBServer(self):
return "E04"
def interrupt(self):
raise self.UnexpectedPacketException()
def cont(self):
raise self.UnexpectedPacketException()
def vCont(self, packet):
raise self.UnexpectedPacketException()
def A(self, packet):
return ""
def D(self, packet):
return "OK"
def readRegisters(self):
return "00000000" * self.registerCount
def readRegister(self, register):
return "00000000"
def writeRegisters(self, registers_hex):
return "OK"
def writeRegister(self, register, value_hex):
return "OK"
def readMemory(self, addr, length):
return "00" * length
def writeMemory(self, addr, data_hex):
return "OK"
def qSymbol(self, symbol_args):
return "OK"
def qSupported(self, client_supported):
return "qXfer:features:read+;PacketSize=3fff;QStartNoAckMode+"
def qfThreadInfo(self):
return "l"
def qsThreadInfo(self):
return "l"
def qC(self):
return "QC0"
def QEnableErrorStrings(self):
return "OK"
def haltReason(self):
# SIGINT is 2, return type is 2 digit hex string
return "S02"
def qXferRead(self, obj, annex, offset, length):
return None, False
def _qXferResponse(self, data, has_more):
return "%s%s" % ("m" if has_more else "l", escape_binary(data))
def vAttach(self, pid):
raise self.UnexpectedPacketException()
def selectThread(self, op, thread_id):
return "OK"
def setBreakpoint(self, packet):
raise self.UnexpectedPacketException()
def threadStopInfo(self, threadnum):
return ""
def other(self, packet):
# empty string means unsupported
return ""
def QThreadSuffixSupported(self):
return ""
def QListThreadsInStopReply(self):
return ""
def qMemoryRegionInfo(self, addr):
return ""
def qPathComplete(self):
return ""
def vFile(self, packet):
return ""
def vRun(self, packet):
return ""
def qLaunchSuccess(self):
return ""
def QEnvironment(self, packet):
return "OK"
def QEnvironmentHexEncoded(self, packet):
return "OK"
def qRegisterInfo(self, num):
return ""
def k(self):
return ["W01", self.RESPONSE_DISCONNECT]
"""
Raised when we receive a packet for which there is no default action.
Override the responder class to implement behavior suitable for the test at
hand.
"""
class UnexpectedPacketException(Exception):
pass
class ServerChannel:
"""
A wrapper class for TCP or pty-based server.
"""
def get_connect_address(self):
"""Get address for the client to connect to."""
def get_connect_url(self):
"""Get URL suitable for process connect command."""
def close_server(self):
"""Close all resources used by the server."""
def accept(self):
"""Accept a single client connection to the server."""
def close_connection(self):
"""Close all resources used by the accepted connection."""
def recv(self):
"""Receive a data packet from the connected client."""
def sendall(self, data):
"""Send the data to the connected client."""
class ServerSocket(ServerChannel):
def __init__(self, family, type, proto, addr):
self._server_socket = socket.socket(family, type, proto)
self._connection = None
self._server_socket.bind(addr)
self._server_socket.listen(1)
def close_server(self):
self._server_socket.close()
def accept(self):
assert self._connection is None
# accept() is stubborn and won't fail even when the socket is
# shutdown, so we'll use a timeout
self._server_socket.settimeout(30.0)
client, client_addr = self._server_socket.accept()
# The connected client inherits its timeout from self._socket,
# but we'll use a blocking socket for the client
client.settimeout(None)
self._connection = client
def close_connection(self):
assert self._connection is not None
self._connection.close()
self._connection = None
def recv(self):
assert self._connection is not None
return self._connection.recv(4096)
def sendall(self, data):
assert self._connection is not None
return self._connection.sendall(data)
class TCPServerSocket(ServerSocket):
def __init__(self):
family, type, proto, _, addr = socket.getaddrinfo(
"localhost", 0, proto=socket.IPPROTO_TCP)[0]
super().__init__(family, type, proto, addr)
def get_connect_address(self):
return "[{}]:{}".format(*self._server_socket.getsockname())
def get_connect_url(self):
return "connect://" + self.get_connect_address()
class UnixServerSocket(ServerSocket):
def __init__(self, addr):
super().__init__(socket.AF_UNIX, socket.SOCK_STREAM, 0, addr)
def get_connect_address(self):
return self._server_socket.getsockname()
def get_connect_url(self):
return "unix-connect://" + self.get_connect_address()
class PtyServerSocket(ServerChannel):
def __init__(self):
import pty
import tty
primary, secondary = pty.openpty()
tty.setraw(primary)
self._primary = io.FileIO(primary, 'r+b')
self._secondary = io.FileIO(secondary, 'r+b')
def get_connect_address(self):
libc = ctypes.CDLL(None)
libc.ptsname.argtypes = (ctypes.c_int,)
libc.ptsname.restype = ctypes.c_char_p
return libc.ptsname(self._primary.fileno()).decode()
def get_connect_url(self):
return "serial://" + self.get_connect_address()
def close_server(self):
self._secondary.close()
self._primary.close()
def recv(self):
try:
return self._primary.read(4096)
except OSError as e:
# closing the pty results in EIO on Linux, convert it to EOF
if e.errno == errno.EIO:
return b''
raise
def sendall(self, data):
return self._primary.write(data)
class MockGDBServer:
"""
A simple TCP-based GDB server that can test client behavior by receiving
commands and issuing custom-tailored responses.
Responses are generated via the .responder property, which should be an
instance of a class based on MockGDBServerResponder.
"""
responder = None
_socket = None
_thread = None
_receivedData = None
_receivedDataOffset = None
_shouldSendAck = True
def __init__(self, socket):
self._socket = socket
self.responder = MockGDBServerResponder()
def start(self):
# Start a thread that waits for a client connection.
self._thread = threading.Thread(target=self.run)
self._thread.start()
def stop(self):
self._thread.join()
self._thread = None
def get_connect_address(self):
return self._socket.get_connect_address()
def get_connect_url(self):
return self._socket.get_connect_url()
def run(self):
# For testing purposes, we only need to worry about one client
# connecting just one time.
try:
self._socket.accept()
except:
traceback.print_exc()
return
self._shouldSendAck = True
self._receivedData = ""
self._receivedDataOffset = 0
data = None
try:
while True:
data = seven.bitcast_to_string(self._socket.recv())
if data is None or len(data) == 0:
break
self._receive(data)
except self.TerminateConnectionException:
pass
except Exception as e:
print("An exception happened when receiving the response from the gdb server. Closing the client...")
traceback.print_exc()
finally:
self._socket.close_connection()
self._socket.close_server()
def _receive(self, data):
"""
Collects data, parses and responds to as many packets as exist.
Any leftover data is kept for parsing the next time around.
"""
self._receivedData += data
packet = self._parsePacket()
while packet is not None:
self._handlePacket(packet)
packet = self._parsePacket()
def _parsePacket(self):
"""
Reads bytes from self._receivedData, returning:
- a packet's contents if a valid packet is found
- the PACKET_ACK unique object if we got an ack
- None if we only have a partial packet
Raises an InvalidPacketException if unexpected data is received
or if checksums fail.
Once a complete packet is found at the front of self._receivedData,
its data is removed form self._receivedData.
"""
data = self._receivedData
i = self._receivedDataOffset
data_len = len(data)
if data_len == 0:
return None
if i == 0:
# If we're looking at the start of the received data, that means
# we're looking for the start of a new packet, denoted by a $.
# It's also possible we'll see an ACK here, denoted by a +
if data[0] == '+':
self._receivedData = data[1:]
return self.PACKET_ACK
if ord(data[0]) == 3:
self._receivedData = data[1:]
return self.PACKET_INTERRUPT
if data[0] == '$':
i += 1
else:
raise self.InvalidPacketException(
"Unexpected leading byte: %s" % data[0])
# If we're looking beyond the start of the received data, then we're
# looking for the end of the packet content, denoted by a #.
# Note that we pick up searching from where we left off last time
while i < data_len and data[i] != '#':
i += 1
# If there isn't enough data left for a checksum, just remember where
# we left off so we can pick up there the next time around
if i > data_len - 3:
self._receivedDataOffset = i
return None
# If we have enough data remaining for the checksum, extract it and
# compare to the packet contents
packet = data[1:i]
i += 1
try:
check = int(data[i:i + 2], 16)
except ValueError:
raise self.InvalidPacketException("Checksum is not valid hex")
i += 2
if check != checksum(packet):
raise self.InvalidPacketException(
"Checksum %02x does not match content %02x" %
(check, checksum(packet)))
# remove parsed bytes from _receivedData and reset offset so parsing
# can start on the next packet the next time around
self._receivedData = data[i:]
self._receivedDataOffset = 0
return packet
def _sendPacket(self, packet):
self._socket.sendall(seven.bitcast_to_bytes(frame_packet(packet)))
def _handlePacket(self, packet):
if packet is self.PACKET_ACK:
# Ignore ACKs from the client. For the future, we can consider
# adding validation code to make sure the client only sends ACKs
# when it's supposed to.
return
response = ""
# We'll handle the ack stuff here since it's not something any of the
# tests will be concerned about, and it'll get turned off quickly anyway.
if self._shouldSendAck:
self._socket.sendall(seven.bitcast_to_bytes('+'))
if packet == "QStartNoAckMode":
self._shouldSendAck = False
response = "OK"
elif self.responder is not None:
# Delegate everything else to our responder
response = self.responder.respond(packet)
if not isinstance(response, list):
response = [response]
for part in response:
if part is MockGDBServerResponder.RESPONSE_DISCONNECT:
raise self.TerminateConnectionException()
self._sendPacket(part)
PACKET_ACK = object()
PACKET_INTERRUPT = object()
class TerminateConnectionException(Exception):
pass
class InvalidPacketException(Exception):
pass
|
tests.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import errno
import os
import shutil
import sys
import tempfile
import threading
import time
import unittest
from datetime import datetime, timedelta
from django.core.cache import cache
from django.core.exceptions import SuspiciousFileOperation, SuspiciousOperation
from django.core.files.base import ContentFile, File
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.core.files.uploadedfile import (
InMemoryUploadedFile, SimpleUploadedFile, TemporaryUploadedFile,
)
from django.db.models.fields.files import FileDescriptor
from django.test import (
LiveServerTestCase, SimpleTestCase, TestCase, override_settings,
)
from django.test.utils import requires_tz_support
from django.urls import NoReverseMatch, reverse_lazy
from django.utils import six, timezone
from django.utils._os import upath
from django.utils.six.moves.urllib.request import urlopen
from .models import Storage, temp_storage, temp_storage_location
FILE_SUFFIX_REGEX = '[A-Za-z0-9]{7}'
class GetStorageClassTests(SimpleTestCase):
def test_get_filesystem_storage(self):
"""
get_storage_class returns the class for a storage backend name/path.
"""
self.assertEqual(
get_storage_class('django.core.files.storage.FileSystemStorage'),
FileSystemStorage)
def test_get_invalid_storage_module(self):
"""
get_storage_class raises an error if the requested import don't exist.
"""
with self.assertRaisesRegex(ImportError, "No module named '?storage'?"):
get_storage_class('storage.NonExistingStorage')
def test_get_nonexisting_storage_class(self):
"""
get_storage_class raises an error if the requested class don't exist.
"""
with self.assertRaises(ImportError):
get_storage_class('django.core.files.storage.NonExistingStorage')
def test_get_nonexisting_storage_module(self):
"""
get_storage_class raises an error if the requested module don't exist.
"""
# Error message may or may not be the fully qualified path.
with self.assertRaisesRegex(ImportError, "No module named '?(django.core.files.)?non_existing_storage'?"):
get_storage_class('django.core.files.non_existing_storage.NonExistingStorage')
class FileSystemStorageTests(unittest.TestCase):
def test_deconstruction(self):
path, args, kwargs = temp_storage.deconstruct()
self.assertEqual(path, "django.core.files.storage.FileSystemStorage")
self.assertEqual(args, tuple())
self.assertEqual(kwargs, {'location': temp_storage_location})
kwargs_orig = {
'location': temp_storage_location,
'base_url': 'http://myfiles.example.com/'
}
storage = FileSystemStorage(**kwargs_orig)
path, args, kwargs = storage.deconstruct()
self.assertEqual(kwargs, kwargs_orig)
def test_lazy_base_url_init(self):
"""
FileSystemStorage.__init__() shouldn't evaluate base_url.
"""
storage = FileSystemStorage(base_url=reverse_lazy('app:url'))
with self.assertRaises(NoReverseMatch):
storage.url(storage.base_url)
class FileStorageTests(SimpleTestCase):
storage_class = FileSystemStorage
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = self.storage_class(location=self.temp_dir, base_url='/test_media_url/')
# Set up a second temporary directory which is ensured to have a mixed
# case name.
self.temp_dir2 = tempfile.mkdtemp(suffix='aBc')
def tearDown(self):
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir2)
def test_empty_location(self):
"""
Makes sure an exception is raised if the location is empty
"""
storage = self.storage_class(location='')
self.assertEqual(storage.base_location, '')
self.assertEqual(storage.location, upath(os.getcwd()))
def test_file_access_options(self):
"""
Standard file access options are available, and work as expected.
"""
self.assertFalse(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'w')
f.write('storage contents')
f.close()
self.assertTrue(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'r')
self.assertEqual(f.read(), 'storage contents')
f.close()
self.storage.delete('storage_test')
self.assertFalse(self.storage.exists('storage_test'))
def _test_file_time_getter(self, getter):
# Check for correct behavior under both USE_TZ=True and USE_TZ=False.
# The tests are similar since they both set up a situation where the
# system time zone, Django's TIME_ZONE, and UTC are distinct.
self._test_file_time_getter_tz_handling_on(getter)
self._test_file_time_getter_tz_handling_off(getter)
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Algiers')
def _test_file_time_getter_tz_handling_on(self, getter):
# Django's TZ (and hence the system TZ) is set to Africa/Algiers which
# is UTC+1 and has no DST change. We can set the Django TZ to something
# else so that UTC, Django's TIME_ZONE, and the system timezone are all
# different.
now_in_algiers = timezone.make_aware(datetime.now())
with timezone.override(timezone.get_fixed_timezone(-300)):
# At this point the system TZ is +1 and the Django TZ
# is -5. The following will be aware in UTC.
now = timezone.now()
self.assertFalse(self.storage.exists('test.file.tz.on'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file.tz.on', f)
self.addCleanup(self.storage.delete, f_name)
dt = getter(f_name)
# dt should be aware, in UTC
self.assertTrue(timezone.is_aware(dt))
self.assertEqual(now.tzname(), dt.tzname())
# The three timezones are indeed distinct.
naive_now = datetime.now()
algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now)
django_offset = timezone.get_current_timezone().utcoffset(naive_now)
utc_offset = timezone.utc.utcoffset(naive_now)
self.assertGreater(algiers_offset, utc_offset)
self.assertLess(django_offset, utc_offset)
# dt and now should be the same effective time.
self.assertLess(abs(dt - now), timedelta(seconds=2))
@override_settings(USE_TZ=False, TIME_ZONE='Africa/Algiers')
def _test_file_time_getter_tz_handling_off(self, getter):
# Django's TZ (and hence the system TZ) is set to Africa/Algiers which
# is UTC+1 and has no DST change. We can set the Django TZ to something
# else so that UTC, Django's TIME_ZONE, and the system timezone are all
# different.
now_in_algiers = timezone.make_aware(datetime.now())
with timezone.override(timezone.get_fixed_timezone(-300)):
# At this point the system TZ is +1 and the Django TZ
# is -5.
self.assertFalse(self.storage.exists('test.file.tz.off'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file.tz.off', f)
self.addCleanup(self.storage.delete, f_name)
dt = getter(f_name)
# dt should be naive, in system (+1) TZ
self.assertTrue(timezone.is_naive(dt))
# The three timezones are indeed distinct.
naive_now = datetime.now()
algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now)
django_offset = timezone.get_current_timezone().utcoffset(naive_now)
utc_offset = timezone.utc.utcoffset(naive_now)
self.assertGreater(algiers_offset, utc_offset)
self.assertLess(django_offset, utc_offset)
# dt and naive_now should be the same effective time.
self.assertLess(abs(dt - naive_now), timedelta(seconds=2))
# If we convert dt to an aware object using the Algiers
# timezone then it should be the same effective time to
# now_in_algiers.
_dt = timezone.make_aware(dt, now_in_algiers.tzinfo)
self.assertLess(abs(_dt - now_in_algiers), timedelta(seconds=2))
def test_file_get_accessed_time(self):
"""
File storage returns a Datetime object for the last accessed time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
atime = self.storage.get_accessed_time(f_name)
self.assertEqual(atime, datetime.fromtimestamp(os.path.getatime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_accessed_time(f_name), timedelta(seconds=2))
@requires_tz_support
def test_file_get_accessed_time_timezone(self):
self._test_file_time_getter(self.storage.get_accessed_time)
def test_file_get_created_time(self):
"""
File storage returns a datetime for the creation time of a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
ctime = self.storage.get_created_time(f_name)
self.assertEqual(ctime, datetime.fromtimestamp(os.path.getctime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_created_time(f_name), timedelta(seconds=2))
@requires_tz_support
def test_file_get_created_time_timezone(self):
self._test_file_time_getter(self.storage.get_created_time)
def test_file_get_modified_time(self):
"""
File storage returns a datetime for the last modified time of a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
mtime = self.storage.get_modified_time(f_name)
self.assertEqual(mtime, datetime.fromtimestamp(os.path.getmtime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_modified_time(f_name), timedelta(seconds=2))
@requires_tz_support
def test_file_get_modified_time_timezone(self):
self._test_file_time_getter(self.storage.get_modified_time)
def test_file_save_without_name(self):
"""
File storage extracts the filename from the content object if no
name is given explicitly.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f.name = 'test.file'
storage_f_name = self.storage.save(None, f)
self.assertEqual(storage_f_name, f.name)
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, f.name)))
self.storage.delete(storage_f_name)
def test_file_save_with_path(self):
"""
Saving a pathname should create intermediate directories as necessary.
"""
self.assertFalse(self.storage.exists('path/to'))
self.storage.save('path/to/test.file', ContentFile('file saved with path'))
self.assertTrue(self.storage.exists('path/to'))
with self.storage.open('path/to/test.file') as f:
self.assertEqual(f.read(), b'file saved with path')
self.assertTrue(os.path.exists(
os.path.join(self.temp_dir, 'path', 'to', 'test.file')))
self.storage.delete('path/to/test.file')
def test_save_doesnt_close(self):
with TemporaryUploadedFile('test', 'text/plain', 1, 'utf8') as file:
file.write(b'1')
file.seek(0)
self.assertFalse(file.closed)
self.storage.save('path/to/test.file', file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
file = InMemoryUploadedFile(six.StringIO('1'), '', 'test', 'text/plain', 1, 'utf8')
with file:
self.assertFalse(file.closed)
self.storage.save('path/to/test.file', file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
def test_file_path(self):
"""
File storage returns the full path of a file
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.assertEqual(self.storage.path(f_name), os.path.join(self.temp_dir, f_name))
self.storage.delete(f_name)
def test_file_url(self):
"""
File storage returns a url to access a given file from the Web.
"""
self.assertEqual(self.storage.url('test.file'), self.storage.base_url + 'test.file')
# should encode special chars except ~!*()'
# like encodeURIComponent() JavaScript function do
self.assertEqual(
self.storage.url(r"~!*()'@#$%^&*abc`+ =.file"),
"/test_media_url/~!*()'%40%23%24%25%5E%26*abc%60%2B%20%3D.file"
)
self.assertEqual(self.storage.url("ab\0c"), "/test_media_url/ab%00c")
# should translate os path separator(s) to the url path separator
self.assertEqual(self.storage.url("""a/b\\c.file"""), "/test_media_url/a/b/c.file")
# #25905: remove leading slashes from file names to prevent unsafe url output
self.assertEqual(self.storage.url("/evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(r"\evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url("///evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(r"\\\evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(None), "/test_media_url/")
def test_base_url(self):
"""
File storage returns a url even when its base_url is unset or modified.
"""
self.storage.base_url = None
with self.assertRaises(ValueError):
self.storage.url('test.file')
# #22717: missing ending slash in base_url should be auto-corrected
storage = self.storage_class(location=self.temp_dir, base_url='/no_ending_slash')
self.assertEqual(
storage.url('test.file'),
'%s%s' % (storage.base_url, 'test.file')
)
def test_listdir(self):
"""
File storage returns a tuple containing directories and files.
"""
self.assertFalse(self.storage.exists('storage_test_1'))
self.assertFalse(self.storage.exists('storage_test_2'))
self.assertFalse(self.storage.exists('storage_dir_1'))
self.storage.save('storage_test_1', ContentFile('custom content'))
self.storage.save('storage_test_2', ContentFile('custom content'))
os.mkdir(os.path.join(self.temp_dir, 'storage_dir_1'))
dirs, files = self.storage.listdir('')
self.assertEqual(set(dirs), {'storage_dir_1'})
self.assertEqual(set(files), {'storage_test_1', 'storage_test_2'})
self.storage.delete('storage_test_1')
self.storage.delete('storage_test_2')
os.rmdir(os.path.join(self.temp_dir, 'storage_dir_1'))
def test_file_storage_prevents_directory_traversal(self):
"""
File storage prevents directory traversal (files can only be accessed if
they're below the storage location).
"""
with self.assertRaises(SuspiciousOperation):
self.storage.exists('..')
with self.assertRaises(SuspiciousOperation):
self.storage.exists('/etc/passwd')
def test_file_storage_preserves_filename_case(self):
"""The storage backend should preserve case of filenames."""
# Create a storage backend associated with the mixed case name
# directory.
other_temp_storage = self.storage_class(location=self.temp_dir2)
# Ask that storage backend to store a file with a mixed case filename.
mixed_case = 'CaSe_SeNsItIvE'
file = other_temp_storage.open(mixed_case, 'w')
file.write('storage contents')
file.close()
self.assertEqual(os.path.join(self.temp_dir2, mixed_case), other_temp_storage.path(mixed_case))
other_temp_storage.delete(mixed_case)
def test_makedirs_race_handling(self):
"""
File storage should be robust against directory creation race conditions.
"""
real_makedirs = os.makedirs
# Monkey-patch os.makedirs, to simulate a normal call, a raced call,
# and an error.
def fake_makedirs(path):
if path == os.path.join(self.temp_dir, 'normal'):
real_makedirs(path)
elif path == os.path.join(self.temp_dir, 'raced'):
real_makedirs(path)
raise OSError(errno.EEXIST, 'simulated EEXIST')
elif path == os.path.join(self.temp_dir, 'error'):
raise OSError(errno.EACCES, 'simulated EACCES')
else:
self.fail('unexpected argument %r' % path)
try:
os.makedirs = fake_makedirs
self.storage.save('normal/test.file', ContentFile('saved normally'))
with self.storage.open('normal/test.file') as f:
self.assertEqual(f.read(), b'saved normally')
self.storage.save('raced/test.file', ContentFile('saved with race'))
with self.storage.open('raced/test.file') as f:
self.assertEqual(f.read(), b'saved with race')
# OSErrors aside from EEXIST are still raised.
with self.assertRaises(OSError):
self.storage.save('error/test.file', ContentFile('not saved'))
finally:
os.makedirs = real_makedirs
def test_remove_race_handling(self):
"""
File storage should be robust against file removal race conditions.
"""
real_remove = os.remove
# Monkey-patch os.remove, to simulate a normal call, a raced call,
# and an error.
def fake_remove(path):
if path == os.path.join(self.temp_dir, 'normal.file'):
real_remove(path)
elif path == os.path.join(self.temp_dir, 'raced.file'):
real_remove(path)
raise OSError(errno.ENOENT, 'simulated ENOENT')
elif path == os.path.join(self.temp_dir, 'error.file'):
raise OSError(errno.EACCES, 'simulated EACCES')
else:
self.fail('unexpected argument %r' % path)
try:
os.remove = fake_remove
self.storage.save('normal.file', ContentFile('delete normally'))
self.storage.delete('normal.file')
self.assertFalse(self.storage.exists('normal.file'))
self.storage.save('raced.file', ContentFile('delete with race'))
self.storage.delete('raced.file')
self.assertFalse(self.storage.exists('normal.file'))
# OSErrors aside from ENOENT are still raised.
self.storage.save('error.file', ContentFile('delete with error'))
with self.assertRaises(OSError):
self.storage.delete('error.file')
finally:
os.remove = real_remove
def test_file_chunks_error(self):
"""
Test behavior when file.chunks() is raising an error
"""
f1 = ContentFile('chunks fails')
def failing_chunks():
raise IOError
f1.chunks = failing_chunks
with self.assertRaises(IOError):
self.storage.save('error.file', f1)
def test_delete_no_name(self):
"""
Calling delete with an empty name should not try to remove the base
storage directory, but fail loudly (#20660).
"""
with self.assertRaises(AssertionError):
self.storage.delete('')
@override_settings(
MEDIA_ROOT='media_root',
MEDIA_URL='media_url/',
FILE_UPLOAD_PERMISSIONS=0o777,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o777,
)
def test_setting_changed(self):
"""
Properties using settings values as defaults should be updated on
referenced settings change while specified values should be unchanged.
"""
storage = self.storage_class(
location='explicit_location',
base_url='explicit_base_url/',
file_permissions_mode=0o666,
directory_permissions_mode=0o666,
)
defaults_storage = self.storage_class()
settings = {
'MEDIA_ROOT': 'overriden_media_root',
'MEDIA_URL': 'overriden_media_url/',
'FILE_UPLOAD_PERMISSIONS': 0o333,
'FILE_UPLOAD_DIRECTORY_PERMISSIONS': 0o333,
}
with self.settings(**settings):
self.assertEqual(storage.base_location, 'explicit_location')
self.assertIn('explicit_location', storage.location)
self.assertEqual(storage.base_url, 'explicit_base_url/')
self.assertEqual(storage.file_permissions_mode, 0o666)
self.assertEqual(storage.directory_permissions_mode, 0o666)
self.assertEqual(defaults_storage.base_location, settings['MEDIA_ROOT'])
self.assertIn(settings['MEDIA_ROOT'], defaults_storage.location)
self.assertEqual(defaults_storage.base_url, settings['MEDIA_URL'])
self.assertEqual(defaults_storage.file_permissions_mode, settings['FILE_UPLOAD_PERMISSIONS'])
self.assertEqual(
defaults_storage.directory_permissions_mode, settings['FILE_UPLOAD_DIRECTORY_PERMISSIONS']
)
class CustomStorage(FileSystemStorage):
def get_available_name(self, name, max_length=None):
"""
Append numbers to duplicate files rather than underscores, like Trac.
"""
parts = name.split('.')
basename, ext = parts[0], parts[1:]
number = 2
while self.exists(name):
name = '.'.join([basename, str(number)] + ext)
number += 1
return name
class CustomStorageTests(FileStorageTests):
storage_class = CustomStorage
def test_custom_get_available_name(self):
first = self.storage.save('custom_storage', ContentFile('custom contents'))
self.assertEqual(first, 'custom_storage')
second = self.storage.save('custom_storage', ContentFile('more contents'))
self.assertEqual(second, 'custom_storage.2')
self.storage.delete(first)
self.storage.delete(second)
class DiscardingFalseContentStorage(FileSystemStorage):
def _save(self, name, content):
if content:
return super(DiscardingFalseContentStorage, self)._save(name, content)
return ''
class DiscardingFalseContentStorageTests(FileStorageTests):
storage_class = DiscardingFalseContentStorage
def test_custom_storage_discarding_empty_content(self):
"""
When Storage.save() wraps a file-like object in File, it should include
the name argument so that bool(file) evaluates to True (#26495).
"""
output = six.StringIO('content')
self.storage.save('tests/stringio', output)
self.assertTrue(self.storage.exists('tests/stringio'))
with self.storage.open('tests/stringio') as f:
self.assertEqual(f.read(), b'content')
class FileFieldStorageTests(TestCase):
def tearDown(self):
shutil.rmtree(temp_storage_location)
def _storage_max_filename_length(self, storage):
"""
Query filesystem for maximum filename length (e.g. AUFS has 242).
"""
dir_to_test = storage.location
while not os.path.exists(dir_to_test):
dir_to_test = os.path.dirname(dir_to_test)
try:
return os.pathconf(dir_to_test, 'PC_NAME_MAX')
except Exception:
return 255 # Should be safe on most backends
def test_files(self):
self.assertIsInstance(Storage.normal, FileDescriptor)
# An object without a file has limited functionality.
obj1 = Storage()
self.assertEqual(obj1.normal.name, "")
with self.assertRaises(ValueError):
obj1.normal.size
# Saving a file enables full functionality.
obj1.normal.save("django_test.txt", ContentFile("content"))
self.assertEqual(obj1.normal.name, "tests/django_test.txt")
self.assertEqual(obj1.normal.size, 7)
self.assertEqual(obj1.normal.read(), b"content")
obj1.normal.close()
# File objects can be assigned to FileField attributes, but shouldn't
# get committed until the model it's attached to is saved.
obj1.normal = SimpleUploadedFile("assignment.txt", b"content")
dirs, files = temp_storage.listdir("tests")
self.assertEqual(dirs, [])
self.assertNotIn("assignment.txt", files)
obj1.save()
dirs, files = temp_storage.listdir("tests")
self.assertEqual(sorted(files), ["assignment.txt", "django_test.txt"])
# Save another file with the same name.
obj2 = Storage()
obj2.normal.save("django_test.txt", ContentFile("more content"))
obj2_name = obj2.normal.name
self.assertRegex(obj2_name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
self.assertEqual(obj2.normal.size, 12)
obj2.normal.close()
# Deleting an object does not delete the file it uses.
obj2.delete()
obj2.normal.save("django_test.txt", ContentFile("more content"))
self.assertNotEqual(obj2_name, obj2.normal.name)
self.assertRegex(obj2.normal.name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
obj2.normal.close()
def test_filefield_read(self):
# Files can be read in a little at a time, if necessary.
obj = Storage.objects.create(
normal=SimpleUploadedFile("assignment.txt", b"content"))
obj.normal.open()
self.assertEqual(obj.normal.read(3), b"con")
self.assertEqual(obj.normal.read(), b"tent")
self.assertEqual(list(obj.normal.chunks(chunk_size=2)), [b"co", b"nt", b"en", b"t"])
obj.normal.close()
def test_filefield_write(self):
# Files can be written to.
obj = Storage.objects.create(normal=SimpleUploadedFile('rewritten.txt', b'content'))
with obj.normal as normal:
normal.open('wb')
normal.write(b'updated')
obj.refresh_from_db()
self.assertEqual(obj.normal.read(), b'updated')
obj.normal.close()
def test_filefield_reopen(self):
obj = Storage.objects.create(normal=SimpleUploadedFile('reopen.txt', b'content'))
with obj.normal as normal:
normal.open()
obj.normal.open()
obj.normal.file.seek(0)
obj.normal.close()
def test_duplicate_filename(self):
# Multiple files with the same name get _(7 random chars) appended to them.
objs = [Storage() for i in range(2)]
for o in objs:
o.normal.save("multiple_files.txt", ContentFile("Same Content"))
try:
names = [o.normal.name for o in objs]
self.assertEqual(names[0], "tests/multiple_files.txt")
self.assertRegex(names[1], "tests/multiple_files_%s.txt" % FILE_SUFFIX_REGEX)
finally:
for o in objs:
o.delete()
def test_file_truncation(self):
# Given the max_length is limited, when multiple files get uploaded
# under the same name, then the filename get truncated in order to fit
# in _(7 random chars). When most of the max_length is taken by
# dirname + extension and there are not enough characters in the
# filename to truncate, an exception should be raised.
objs = [Storage() for i in range(2)]
filename = 'filename.ext'
for o in objs:
o.limited_length.save(filename, ContentFile('Same Content'))
try:
# Testing truncation.
names = [o.limited_length.name for o in objs]
self.assertEqual(names[0], 'tests/%s' % filename)
self.assertRegex(names[1], 'tests/fi_%s.ext' % FILE_SUFFIX_REGEX)
# Testing exception is raised when filename is too short to truncate.
filename = 'short.longext'
objs[0].limited_length.save(filename, ContentFile('Same Content'))
with self.assertRaisesMessage(SuspiciousFileOperation, 'Storage can not find an available filename'):
objs[1].limited_length.save(*(filename, ContentFile('Same Content')))
finally:
for o in objs:
o.delete()
@unittest.skipIf(
sys.platform.startswith('win'),
"Windows supports at most 260 characters in a path.",
)
def test_extended_length_storage(self):
# Testing FileField with max_length > 255. Most systems have filename
# length limitation of 255. Path takes extra chars.
filename = (self._storage_max_filename_length(temp_storage) - 4) * 'a' # 4 chars for extension.
obj = Storage()
obj.extended_length.save('%s.txt' % filename, ContentFile('Same Content'))
self.assertEqual(obj.extended_length.name, 'tests/%s.txt' % filename)
self.assertEqual(obj.extended_length.read(), b'Same Content')
obj.extended_length.close()
def test_filefield_default(self):
# Default values allow an object to access a single file.
temp_storage.save('tests/default.txt', ContentFile('default content'))
obj = Storage.objects.create()
self.assertEqual(obj.default.name, "tests/default.txt")
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
# But it shouldn't be deleted, even if there are no more objects using
# it.
obj.delete()
obj = Storage()
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
def test_empty_upload_to(self):
# upload_to can be empty, meaning it does not use subdirectory.
obj = Storage()
obj.empty.save('django_test.txt', ContentFile('more content'))
self.assertEqual(obj.empty.name, "django_test.txt")
self.assertEqual(obj.empty.read(), b"more content")
obj.empty.close()
def test_random_upload_to(self):
# Verify the fix for #5655, making sure the directory is only
# determined once.
obj = Storage()
obj.random.save("random_file", ContentFile("random content"))
self.assertTrue(obj.random.name.endswith("/random_file"))
obj.random.close()
def test_custom_valid_name_callable_upload_to(self):
"""
Storage.get_valid_name() should be called when upload_to is a callable.
"""
obj = Storage()
obj.custom_valid_name.save("random_file", ContentFile("random content"))
# CustomValidNameStorage.get_valid_name() appends '_valid' to the name
self.assertTrue(obj.custom_valid_name.name.endswith("/random_file_valid"))
obj.custom_valid_name.close()
def test_filefield_pickling(self):
# Push an object into the cache to make sure it pickles properly
obj = Storage()
obj.normal.save("django_test.txt", ContentFile("more content"))
obj.normal.close()
cache.set("obj", obj)
self.assertEqual(cache.get("obj").normal.name, "tests/django_test.txt")
def test_file_object(self):
# Create sample file
temp_storage.save('tests/example.txt', ContentFile('some content'))
# Load it as python file object
with open(temp_storage.path('tests/example.txt')) as file_obj:
# Save it using storage and read its content
temp_storage.save('tests/file_obj', file_obj)
self.assertTrue(temp_storage.exists('tests/file_obj'))
with temp_storage.open('tests/file_obj') as f:
self.assertEqual(f.read(), b'some content')
def test_stringio(self):
# Test passing StringIO instance as content argument to save
output = six.StringIO()
output.write('content')
output.seek(0)
# Save it and read written file
temp_storage.save('tests/stringio', output)
self.assertTrue(temp_storage.exists('tests/stringio'))
with temp_storage.open('tests/stringio') as f:
self.assertEqual(f.read(), b'content')
# Tests for a race condition on file saving (#4948).
# This is written in such a way that it'll always pass on platforms
# without threading.
class SlowFile(ContentFile):
def chunks(self):
time.sleep(1)
return super(ContentFile, self).chunks()
class FileSaveRaceConditionTest(SimpleTestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
self.thread = threading.Thread(target=self.save_file, args=['conflict'])
def tearDown(self):
shutil.rmtree(self.storage_dir)
def save_file(self, name):
name = self.storage.save(name, SlowFile(b"Data"))
def test_race_condition(self):
self.thread.start()
self.save_file('conflict')
self.thread.join()
files = sorted(os.listdir(self.storage_dir))
self.assertEqual(files[0], 'conflict')
self.assertRegex(files[1], 'conflict_%s' % FILE_SUFFIX_REGEX)
@unittest.skipIf(sys.platform.startswith('win'), "Windows only partially supports umasks and chmod.")
class FileStoragePermissions(unittest.TestCase):
def setUp(self):
self.umask = 0o027
self.old_umask = os.umask(self.umask)
self.storage_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.storage_dir)
os.umask(self.old_umask)
@override_settings(FILE_UPLOAD_PERMISSIONS=0o654)
def test_file_upload_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_file", ContentFile("data"))
actual_mode = os.stat(self.storage.path(name))[0] & 0o777
self.assertEqual(actual_mode, 0o654)
@override_settings(FILE_UPLOAD_PERMISSIONS=None)
def test_file_upload_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
fname = self.storage.save("some_file", ContentFile("data"))
mode = os.stat(self.storage.path(fname))[0] & 0o777
self.assertEqual(mode, 0o666 & ~self.umask)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765)
def test_file_upload_directory_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/the_file", ContentFile("data"))
dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777
self.assertEqual(dir_mode, 0o765)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=None)
def test_file_upload_directory_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/the_file", ContentFile("data"))
dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777
self.assertEqual(dir_mode, 0o777 & ~self.umask)
class FileStoragePathParsing(SimpleTestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_directory_with_dot(self):
"""Regression test for #9610.
If the directory name contains a dot and the file name doesn't, make
sure we still mangle the file name instead of the directory name.
"""
self.storage.save('dotted.path/test', ContentFile("1"))
self.storage.save('dotted.path/test', ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertEqual(files[0], 'test')
self.assertRegex(files[1], 'test_%s' % FILE_SUFFIX_REGEX)
def test_first_character_dot(self):
"""
File names with a dot as their first character don't have an extension,
and the underscore should get added to the end.
"""
self.storage.save('dotted.path/.test', ContentFile("1"))
self.storage.save('dotted.path/.test', ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertEqual(files[0], '.test')
self.assertRegex(files[1], '.test_%s' % FILE_SUFFIX_REGEX)
class ContentFileStorageTestCase(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_content_saving(self):
"""
ContentFile can be saved correctly with the filesystem storage,
if it was initialized with either bytes or unicode content.
"""
self.storage.save('bytes.txt', ContentFile(b"content"))
self.storage.save('unicode.txt', ContentFile("español"))
@override_settings(ROOT_URLCONF='file_storage.urls')
class FileLikeObjectTestCase(LiveServerTestCase):
"""
Test file-like objects (#15644).
"""
available_apps = []
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(location=self.temp_dir)
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_urllib2_urlopen(self):
"""
Test the File storage API with a file like object coming from urllib2.urlopen()
"""
file_like_object = urlopen(self.live_server_url + '/')
f = File(file_like_object)
stored_filename = self.storage.save("remote_file.html", f)
remote_file = urlopen(self.live_server_url + '/')
with self.storage.open(stored_filename) as stored_file:
self.assertEqual(stored_file.read(), remote_file.read())
|
build_operation.py
|
import logging
import os
import subprocess
from craftbuildtools.operations import OperationPlugin
from craftbuildtools.utils import ChangeDir
logger = logging.getLogger("craft-buildtools")
class MavenBuildOperation(OperationPlugin):
def __init__(self):
super(MavenBuildOperation, self).__init__()
self.name = "build_operation"
self.description = "Mange your projects and generate builds (Maven Only)"
def perform(self, *args, **kwargs):
import click
failed_builds = []
successful_builds = []
invalid_project_folders = []
total_project_count = 0
build_projects = kwargs.pop('build_projects')
build_projects.sort()
projects = kwargs.pop('projects')
if (build_projects is None or len(build_projects) is 0) or (projects is None or len(projects) is 0):
click.echo("There are no projects to be built")
return
from threading import Thread
def call_maven_build(bp):
with ChangeDir(bp.directory):
click.echo("Executing build command '%s' on Project %s" % (bp.build_command, bp.name))
# TODO Implement timer to see how long build starts, or spawn in a subprocess.
build_process = subprocess.Popen(bp.build_command, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
logger.debug("Build process has been spawned %s" % build_process.pid)
build_success = False
for line in build_process.stdout.readlines():
if b"BUILD SUCCESS" in line:
logger.debug("Maven Build Success in line: '%s'" % line)
build_success = True
break
build_process.wait()
if build_success is True:
click.echo("Project %s has been built successfully" % bp.name)
successful_builds.append(bp.name)
else:
click.echo("Project %s has failed to build" % bp.name)
failed_builds.append(bp.name)
if build_projects is None or len(build_projects) == 0:
click.echo("There were no projects specified to be built")
return None, None
for project_name in build_projects:
total_project_count += 1
project = projects[project_name]
if not os.path.exists(project.directory):
invalid_project_folders.append(project.name)
click.echo("Project %s folder (%s) doesn't exist... Is it valid?" % (project.name, project.directory))
continue
build_thread = Thread(target=call_maven_build, args=(project,))
build_thread.start()
logger.debug("Thread to build %s has been executed" % project.name)
build_thread.join()
logger.debug("Build Thread for %s has expired" % project.name)
failed_projects = len(failed_builds)
built_project = total_project_count - failed_projects - len(invalid_project_folders)
click.echo(
"BUILD OPERATION COMPLETE\nInvalid Projects: %s\nSuccessful Builds: %s\n\tNames: %s\nFailed Builds: %s\n\tNames: %s" %
(",".join(name for name in invalid_project_folders),
built_project,
",".join(name for name in successful_builds),
failed_projects,
','.join(name for name in failed_builds)
))
return successful_builds, failed_builds
build_plugin = MavenBuildOperation()
|
testDriver.py
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
from flask import Flask, flash, request, redirect, url_for
import threading
import requests
import time
import boto3
app = Flask(__name__)
currentrate = 0.1
# get list of s3 objects from file
print("reading bucket list file")
bucketListFile = open("inputbuckets.txt", "r")
bucketList = []
for line in bucketListFile:
bucketList.append(line)
#Configure S3 buckets
s3 = boto3.resource('s3')
goes16bucket = "noaa-goes16"
loopcounter = 0
url = 'http://etl-ingest.eksfg-etl/message'
def datapump():
while True:
global loopcounter
print("Reading: ", "s3://",goes16bucket,"/",bucketList[loopcounter] , sep='')
# obj = s3.Object('njdavids-eksfg','7547259005708861619.jpg')
obj = s3.Object(goes16bucket, str(bucketList[loopcounter]).strip())
my_img = obj.get()['Body'].read()
print("Sending file to ", url)
r = requests.post(url, files={'file': my_img})
time.sleep(1/currentrate)
loopcounter=loopcounter+1
@app.route('/')
def index():
return 'I am alive'
@app.route('/rate', methods = ['POST','GET'])
def rate():
global currentrate
if request.method == 'GET':
print ("GET", currentrate, "per second")
return str(currentrate) + " messages per second"
if request.method == 'POST':
print(request)
content = request.json
print(content)
newrate = content['value']
currentrate = newrate
print(newrate)
return str(newrate) + " messages per second"
if __name__ == '__main__':
x = threading.Thread(target=datapump)
print("Starting datapump thread")
x.start()
app.run(host="0.0.0.0", port=8080)
|
__init__.py
|
# -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2020 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pybuilder.remote import Process, PipeShutdownError, RemoteObjectPipe, logger, log_to_stderr
__all__ = ["RemoteObjectPipe", "start_tool", "Tool", "PipeShutdownError", "logger"]
class Tool:
def start(self, pipe):
"""Starts the tool in the tool process"""
pass
def stop(self, pipe):
"""Stops the tool in the tool process"""
pass
def start_tool(pyenv, tools, group=None, name=None, logging=None, tracing=None):
"""
Starts a tool process
"""
if logging:
log_to_stderr()
logger.setLevel(int(logging))
pipe = RemoteObjectPipe.new_pipe()
proc = Process(pyenv, group=group, name=name,
target=_traced_tool if tracing else _instrumented_tool, args=(tools, pipe))
try:
proc.start()
finally:
pipe.close_client_side()
pipe.receive() # Pickle protocol selection
return proc, pipe
def _traced_tool(tools, pipe):
import trace
def _print(*objects, sep=' ', end='', **kwargs):
logger.debug((sep.join(objects) + end).rstrip("\r\n"))
trace.print = _print
trace.Trace(count=0).runfunc(_instrumented_tool, tools, pipe)
def _instrumented_tool(tools, pipe):
try:
for tool in tools:
tool.start(pipe)
while True:
pipe.receive()
except PipeShutdownError:
for tool in reversed(tools):
tool.stop(pipe)
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
pipe.close(e)
finally:
try:
pipe.close()
finally:
import threading
main = threading.main_thread()
current = threading.current_thread()
if main != current:
logger.warn("current thread %s is not the main %s in the tool process", current, main)
blocked_threads = False
for t in threading.enumerate():
if not t.daemon and t != current:
logger.warn("non-daemon thread %s is blocking the tool process shutdown", t)
blocked_threads = True
if blocked_threads:
import os
import atexit
try:
atexit._run_exitfuncs()
finally:
os._exit(1)
|
s3op.py
|
from __future__ import print_function
import json
import time
import math
import sys
import os
import traceback
from hashlib import sha1
from tempfile import NamedTemporaryFile
from multiprocessing import Process, Queue
from itertools import starmap, chain, islice
try:
# python2
from urlparse import urlparse
from Queue import Full as QueueFull
except:
# python3
from urllib.parse import urlparse
from queue import Full as QueueFull
import click
# s3op can be launched as a stand-alone script. We must set
# PYTHONPATH for the parent Metaflow explicitly.
sys.path.insert(0,\
os.path.abspath(os.path.join(os.path.dirname(__file__), '../../')))
# we use Metaflow's parallel_imap_unordered instead of
# multiprocessing.Pool because https://bugs.python.org/issue31886
from metaflow.util import TempDir, url_quote, url_unquote
from metaflow.multicore_utils import parallel_map
from metaflow.datastore.util.s3util import aws_retry
NUM_WORKERS_DEFAULT = 64
class S3Url(object):
def __init__(self,
bucket, path, url, local, prefix,
content_type=None, metadata=None, range=None):
self.bucket = bucket
self.path = path
self.url = url
self.local = local
self.prefix = prefix
self.content_type = content_type
self.metadata = metadata
self.range = range
def __str__(self):
return self.url
# We use error codes instead of Exceptions, which are trickier to
# handle reliably in a multi-process world
ERROR_INVALID_URL = 4
ERROR_NOT_FULL_PATH = 5
ERROR_URL_NOT_FOUND = 6
ERROR_URL_ACCESS_DENIED = 7
ERROR_WORKER_EXCEPTION = 8
ERROR_VERIFY_FAILED = 9
ERROR_LOCAL_FILE_NOT_FOUND = 10
def format_triplet(prefix, url='', local=''):
return u' '.join(url_quote(x).decode('utf-8') for x in (prefix, url, local))
# I can't understand what's the right way to deal
# with boto errors. This function can be replaced
# with better error handling code.
def normalize_client_error(err):
error_code = err.response['Error']['Code']
try:
return int(error_code)
except ValueError:
if error_code == 'AccessDenied':
return 403
return error_code
# S3 worker pool
def worker(result_file_name, queue, mode):
# Interpret mode, it can either be a single op or something like
# info_download or info_upload which implies:
# - for download: we need to return the information as well
# - for upload: we need to not overwrite the file if it exists
modes = mode.split('_')
pre_op_info = False
if len(modes) > 1:
pre_op_info = True
mode = modes[1]
else:
mode = modes[0]
def op_info(url):
try:
head = s3.head_object(Bucket=url.bucket, Key=url.path)
to_return = {
'error': None,
'size': head['ContentLength'],
'content_type': head['ContentType'],
'metadata': head['Metadata']}
except client_error as err:
error_code = normalize_client_error(err)
if error_code == 404:
to_return = {'error': ERROR_URL_NOT_FOUND, 'raise_error': err}
elif error_code == 403:
to_return = {'error': ERROR_URL_ACCESS_DENIED, 'raise_error': err}
else:
to_return = {'error': error_code, 'raise_error': err}
return to_return
with open(result_file_name, 'w') as result_file:
try:
from metaflow.datastore.util.s3util import get_s3_client
s3, client_error = get_s3_client()
while True:
url, idx = queue.get()
if url is None:
break
if mode == 'info':
result = op_info(url)
orig_error = result.get('raise_error', None)
if orig_error:
del result['raise_error']
with open(url.local, 'w') as f:
json.dump(result, f)
elif mode == 'download':
result_info = None
is_missing = False
if pre_op_info:
result_info = op_info(url)
if result_info['error'] == ERROR_URL_NOT_FOUND:
is_missing = True
result_file.write("%d %d\n" % (idx, -ERROR_URL_NOT_FOUND))
elif result_info['error'] == ERROR_URL_ACCESS_DENIED:
is_missing = True
result_file.write("%d %d\n" % (idx, -ERROR_URL_ACCESS_DENIED))
elif result_info['error'] is not None:
raise result_info['raise_error']
if is_missing:
continue
tmp = NamedTemporaryFile(dir='.', delete=False)
try:
if url.range is None:
s3.download_file(url.bucket, url.path, tmp.name)
else:
# We do get_object. We don't actually do any retries
# here because the higher levels will do the retry if
# needed
resp = s3.get_object(
Bucket=url.bucket,
Key=url.path,
Range=url.range)
code = str(resp['ResponseMetadata']['HTTPStatusCode'])
if code[0] == '2':
tmp.write(resp['Body'].read())
else:
# TODO: Better raised error
raise RuntimeError("Could not load file")
tmp.close()
os.rename(tmp.name, url.local)
except client_error as err:
error_code = normalize_client_error(err)
if error_code == 404:
pass # We skip this
else:
raise
except:
# TODO specific error message for out of disk space
tmp.close()
os.unlink(tmp.name)
raise
# If we have metadata that we retrieved, we also write it out
# to a file
if result_info:
with open('%s_meta' % url.local, mode='w') as f:
args = {'size': result_info['size']}
if result_info['content_type']:
args['content_type'] = result_info['content_type']
if result_info['metadata'] is not None:
args['metadata'] = result_info['metadata']
json.dump(args, f)
# Finally, we push out the size to the result_pipe since
# the size is used for verification and other purposes and
# we want to avoid file operations for this simple process
result_file.write("%d %d\n" % (idx, result_info['size']))
else:
# This is upload, if we have a pre_op, it means we do not
# want to overwrite
do_upload = False
if pre_op_info:
result_info = op_info(url)
if result_info['error'] == ERROR_URL_NOT_FOUND:
# We only upload if the file is not found
do_upload = True
else:
# No pre-op so we upload
do_upload = True
if do_upload:
extra=None
if url.content_type or url.metadata:
extra = {}
if url.content_type:
extra['ContentType'] = url.content_type
if url.metadata is not None:
extra['Metadata'] = url.metadata
s3.upload_file(url.local, url.bucket, url.path, ExtraArgs=extra)
# We indicate that the file was uploaded
result_file.write("%d %d\n" % (idx, 0))
except:
traceback.print_exc()
sys.exit(ERROR_WORKER_EXCEPTION)
def start_workers(mode, urls, num_workers):
# We start the minimum of len(urls) or num_workers to avoid starting
# workers that will definitely do nothing
num_workers = min(num_workers, len(urls))
queue = Queue(len(urls) + num_workers)
procs = {}
# 1. push sources and destinations to the queue
for idx, elt in enumerate(urls):
queue.put((elt, idx))
# 2. push end-of-queue markers
for i in range(num_workers):
queue.put((None, None))
# 3. Prepare the result structure
sz_results = [None]*len(urls)
# 4. start processes
with TempDir() as output_dir:
for i in range(num_workers):
file_path = os.path.join(output_dir, str(i))
p = Process(target=worker, args=(file_path, queue, mode))
p.start()
procs[p] = file_path
# 5. wait for the processes to finish; we continuously update procs
# to remove all processes that have finished already
while procs:
new_procs = {}
for proc, out_path in procs.items():
proc.join(timeout=1)
if proc.exitcode is not None:
if proc.exitcode != 0:
msg = 'Worker process failed (exit code %d)'\
% proc.exitcode
exit(msg, proc.exitcode)
# Read the output file if all went well
with open(out_path, 'r') as out_file:
for line in out_file:
line_split = line.split(' ')
sz_results[int(line_split[0])] = int(line_split[1])
else:
# Put this process back in the processes to check
new_procs[proc] = out_path
procs = new_procs
return sz_results
def process_urls(mode, urls, verbose, num_workers):
if verbose:
print('%sing %d files..' % (mode.capitalize(), len(urls)),
file=sys.stderr)
start = time.time()
sz_results = start_workers(mode, urls, num_workers)
end = time.time()
if verbose:
total_size = sum(sz for sz in sz_results if sz is not None and sz > 0)
bw = total_size / (end - start)
print('%sed %d files, %s in total, in %d seconds (%s/s).'\
% (mode.capitalize(),
len(urls),
with_unit(total_size),
end - start,
with_unit(bw)),
file=sys.stderr)
return sz_results
# Utility functions
def with_unit(x):
if x > 1024**3:
return '%.1fGB' % (x / 1024.**3)
elif x > 1024**2:
return '%.1fMB' % (x / 1024.**2)
elif x > 1024:
return '%.1fKB' % (x / 1024.)
else:
return '%d bytes' % x
# S3Ops class is just a wrapper for get_size and list_prefix
# required by @aws_retry decorator, which needs the reset_client
# method. Otherwise they would be just stand-alone functions.
class S3Ops(object):
def __init__(self):
self.s3 = None
self.client_error = None
def reset_client(self, hard_reset=False):
from metaflow.datastore.util.s3util import get_s3_client
if hard_reset or self.s3 is None:
self.s3, self.client_error = get_s3_client()
@aws_retry
def get_info(self, url):
self.reset_client()
try:
head = self.s3.head_object(Bucket=url.bucket, Key=url.path)
return True, url, [(S3Url(
bucket=url.bucket,
path=url.path,
url=url.url,
local=url.local,
prefix=url.prefix,
content_type=head['ContentType'],
metadata=head['Metadata'],
range=url.range), head['ContentLength'])]
except self.client_error as err:
error_code = normalize_client_error(err)
if error_code == 404:
return False, url, ERROR_URL_NOT_FOUND
elif error_code == 403:
return False, url, ERROR_URL_ACCESS_DENIED
else:
raise
@aws_retry
def list_prefix(self, prefix_url, delimiter=''):
self.reset_client()
url_base = 's3://%s/' % prefix_url.bucket
try:
paginator = self.s3.get_paginator('list_objects_v2')
urls = []
for page in paginator.paginate(Bucket=prefix_url.bucket,
Prefix=prefix_url.path,
Delimiter=delimiter):
# note that an url may be both a prefix and an object
# - the trailing slash is significant in S3
if 'Contents' in page:
for key in page.get('Contents', []):
url = url_base + key['Key']
urlobj = S3Url(url=url,
bucket=prefix_url.bucket,
path=key['Key'],
local=generate_local_path(url),
prefix=prefix_url.url)
urls.append((urlobj, key['Size']))
if 'CommonPrefixes' in page:
# we get CommonPrefixes if Delimiter is a non-empty string
for key in page.get('CommonPrefixes', []):
url = url_base + key['Prefix']
urlobj = S3Url(url=url,
bucket=prefix_url.bucket,
path=key['Prefix'],
local=None,
prefix=prefix_url.url)
urls.append((urlobj, None))
return True, prefix_url, urls
except self.s3.exceptions.NoSuchBucket:
return False, prefix_url, ERROR_URL_NOT_FOUND
except self.client_error as err:
if err.response['Error']['Code'] == 'AccessDenied':
return False, prefix_url, ERROR_URL_ACCESS_DENIED
else:
raise
# We want to reuse an s3 client instance over multiple operations.
# This is accomplished by op_ functions below.
def op_get_info(urls):
s3 = S3Ops()
return [s3.get_info(url) for url in urls]
def op_list_prefix(prefix_urls):
s3 = S3Ops()
return [s3.list_prefix(prefix) for prefix in prefix_urls]
def op_list_prefix_nonrecursive(prefix_urls):
s3 = S3Ops()
return [s3.list_prefix(prefix, delimiter='/') for prefix in prefix_urls]
def exit(exit_code, url):
if exit_code == ERROR_INVALID_URL:
msg = 'Invalid url: %s' % url.url
elif exit_code == ERROR_NOT_FULL_PATH:
msg = 'URL not a full path: %s' % url.url
elif exit_code == ERROR_URL_NOT_FOUND:
msg = 'URL not found: %s' % url.url
elif exit_code == ERROR_URL_ACCESS_DENIED:
msg = 'Access denied to URL: %s' % url.url
elif exit_code == ERROR_WORKER_EXCEPTION:
msg = 'Download failed'
elif exit_code == ERROR_VERIFY_FAILED:
msg = 'Verification failed for URL %s, local file %s'\
% (url.url, url.local)
elif exit_code == ERROR_LOCAL_FILE_NOT_FOUND:
msg = 'Local file not found: %s' % url
else:
msg = 'Unknown error'
print('s3op failed:\n%s' % msg, file=sys.stderr)
sys.exit(exit_code)
def verify_results(urls, verbose=False):
for url, expected in urls:
if verbose:
print('verifying %s, expected %s' % (url, expected),
file=sys.stderr)
try:
got = os.stat(url.local).st_size
except OSError:
raise
exit(ERROR_VERIFY_FAILED, url)
if expected != got:
exit(ERROR_VERIFY_FAILED, url)
if url.content_type or url.metadata:
# Verify that we also have a metadata file present
try:
os.stat('%s_meta' % url.local)
except OSError:
exit(ERROR_VERIFY_FAILED, url)
def generate_local_path(url, suffix=None):
# this function generates a safe local file name corresponding to
# an S3 URL. URLs may be longer than maximum file length limit on Linux,
# so we mostly hash the URL but retain the leaf part as a convenience
# feature to ease eyeballing
quoted = url_quote(url)
fname = quoted.split(b'/')[-1].replace(b'.', b'_').replace(b'-', b'_')
sha = sha1(quoted).hexdigest()
if suffix:
return u'-'.join((sha, fname.decode('utf-8'), suffix))
return u'-'.join((sha, fname.decode('utf-8')))
def parallel_op(op, lst, num_workers):
# parallel op divides work equally amongst num_workers
# processes. This is a good strategy if the cost is
# uniform over the units of work, e.g. op_get_info, which
# is a single HEAD request to S3.
#
# This approach is less optimal with op_list_prefix where
# the cost of S3 listing per prefix can vary drastically.
# We could optimize this case by using a worker model with
# a queue, like for downloads but the difference here is
# that we need to return a value, which would require a
# bit more work - something to consider if this turns out
# to be a bottleneck.
if lst:
num = min(len(lst), num_workers)
batch_size = math.ceil(len(lst) / float(num))
batches = []
it = iter(lst)
while True:
batch = list(islice(it, batch_size))
if batch:
batches.append(batch)
else:
break
it = parallel_map(op, batches, max_parallel=num)
for x in chain.from_iterable(it):
yield x
# CLI
@click.group()
def cli():
pass
@cli.command('list', help='List S3 objects')
@click.option('--inputs',
type=click.Path(exists=True),
help='Read input prefixes from the given file.')
@click.option('--num-workers',
default=NUM_WORKERS_DEFAULT,
show_default=True,
help='Number of concurrent connections.')
@click.option('--recursive/--no-recursive',
default=False,
show_default=True,
help='Download prefixes recursively.')
@click.argument('prefixes', nargs=-1)
def lst(prefixes,
inputs=None,
num_workers=None,
recursive=None):
urllist = []
for prefix, _ in _populate_prefixes(prefixes, inputs):
src = urlparse(prefix)
url = S3Url(url=prefix,
bucket=src.netloc,
path=src.path.lstrip('/'),
local=None,
prefix=prefix)
if src.scheme != 's3':
exit(ERROR_INVALID_URL, url)
urllist.append(url)
op = op_list_prefix if recursive else op_list_prefix_nonrecursive
urls = []
for success, prefix_url, ret in parallel_op(op, urllist, num_workers):
if success:
urls.extend(ret)
else:
exit(ret, prefix_url)
for url, size in urls:
if size is None:
print(format_triplet(url.prefix, url.url))
else:
print(format_triplet(url.prefix, url.url, str(size)))
@cli.command(help='Upload files to S3')
@click.option('--file',
'files',
type=(click.Path(exists=True), str),
multiple=True,
help='Local file->S3Url pair to upload. '
'Can be specified multiple times.')
@click.option('--filelist',
type=click.Path(exists=True),
help='Read local file -> S3 URL mappings from the given file.')
@click.option('--num-workers',
default=NUM_WORKERS_DEFAULT,
show_default=True,
help='Number of concurrent connections.')
@click.option('--verbose/--no-verbose',
default=True,
show_default=True,
help='Print status information on stderr.')
@click.option('--overwrite/--no-overwrite',
default=True,
show_default=True,
help='Overwrite key if it already exists in S3.')
@click.option('--listing/--no-listing',
default=False,
show_default=True,
help='Print S3 URLs upload to on stdout.')
def put(files=None,
filelist=None,
num_workers=None,
verbose=None,
overwrite=True,
listing=None):
def _files():
for local, url in files:
yield url_unquote(local), url_unquote(url), None, None
if filelist:
for line in open(filelist, mode='rb'):
r = json.loads(line)
local = r['local']
url = r['url']
content_type = r.get('content_type', None)
metadata = r.get('metadata', None)
if not os.path.exists(local):
exit(ERROR_LOCAL_FILE_NOT_FOUND, local)
yield local, url, content_type, metadata
def _make_url(local, user_url, content_type, metadata):
src = urlparse(user_url)
url = S3Url(url=user_url,
bucket=src.netloc,
path=src.path.lstrip('/'),
local=local,
prefix=None,
content_type=content_type,
metadata=metadata)
if src.scheme != 's3':
exit(ERROR_INVALID_URL, url)
if not src.path:
exit(ERROR_NOT_FULL_PATH, url)
return url
urls = list(starmap(_make_url, _files()))
ul_op = 'upload'
if not overwrite:
ul_op = 'info_upload'
sz_results = process_urls(ul_op, urls, verbose, num_workers)
urls = [url for url, sz in zip(urls, sz_results) if sz is not None]
if listing:
for url in urls:
print(format_triplet(url.url))
def _populate_prefixes(prefixes, inputs):
# Returns a tuple: first element is the prefix and second element
# is the optional range (or None if the entire prefix is requested)
if prefixes:
prefixes = [(url_unquote(p), None) for p in prefixes]
else:
prefixes = []
if inputs:
with open(inputs, mode='rb') as f:
for l in f:
s = l.split(b' ')
if len(s) > 1:
prefixes.append(
(url_unquote(s[0].strip()), url_unquote(s[1].strip())))
else:
prefixes.append((url_unquote(s[0].strip()), None))
return prefixes
@cli.command(help='Download files from S3')
@click.option('--recursive/--no-recursive',
default=False,
show_default=True,
help='Download prefixes recursively.')
@click.option('--num-workers',
default=NUM_WORKERS_DEFAULT,
show_default=True,
help='Number of concurrent connections.')
@click.option('--inputs',
type=click.Path(exists=True),
help='Read input prefixes from the given file.')
@click.option('--verify/--no-verify',
default=True,
show_default=True,
help='Verify that files were loaded correctly.')
@click.option('--info/--no-info',
default=True,
show_default=True,
help='Return user tags and content-type')
@click.option('--allow-missing/--no-allow-missing',
default=False,
show_default=True,
help='Do not exit if missing files are detected. '\
'Implies --verify.')
@click.option('--verbose/--no-verbose',
default=True,
show_default=True,
help='Print status information on stderr.')
@click.option('--listing/--no-listing',
default=False,
show_default=True,
help='Print S3 URL -> local file mapping on stdout.')
@click.argument('prefixes', nargs=-1)
def get(prefixes,
recursive=None,
num_workers=None,
inputs=None,
verify=None,
info=None,
allow_missing=None,
verbose=None,
listing=None):
# Construct a list of URL (prefix) objects
urllist = []
for prefix, r in _populate_prefixes(prefixes, inputs):
src = urlparse(prefix)
url = S3Url(url=prefix,
bucket=src.netloc,
path=src.path.lstrip('/'),
local=generate_local_path(prefix),
prefix=prefix,
range=r)
if src.scheme != 's3':
exit(ERROR_INVALID_URL, url)
if not recursive and not src.path:
exit(ERROR_NOT_FULL_PATH, url)
urllist.append(url)
# Construct a url->size mapping and get content-type and metadata if needed
op = None
dl_op = 'download'
if recursive:
op = op_list_prefix
if verify or verbose or info:
dl_op = 'info_download'
if op:
urls = []
# NOTE - we must retain the order of prefixes requested
# and the listing order returned by S3
for success, prefix_url, ret in parallel_op(op, urllist, num_workers):
if success:
urls.extend(ret)
elif ret == ERROR_URL_NOT_FOUND and allow_missing:
urls.append((prefix_url, None))
else:
exit(ret, prefix_url)
else:
# pretend zero size since we don't need it for anything.
# it can't be None though, to make sure the listing below
# works correctly (None denotes a missing file)
urls = [(prefix_url, 0) for prefix_url in urllist]
# exclude the non-existent files from loading
to_load = [url for url, size in urls if size is not None]
sz_results = process_urls(dl_op, to_load, verbose, num_workers)
# We check if there is any access denied
is_denied = [sz == -ERROR_URL_ACCESS_DENIED for sz in sz_results]
if any(is_denied):
# Find the first one to return that as an error
for i, b in enumerate(is_denied):
if b:
exit(ERROR_URL_ACCESS_DENIED, to_load[i])
if not allow_missing:
is_missing = [sz == -ERROR_URL_NOT_FOUND for sz in sz_results]
if any(is_missing):
# Find the first one to return that as an error
for i, b in enumerate(is_missing):
if b:
exit(ERROR_URL_NOT_FOUND, to_load[i])
# Postprocess
if verify:
# Verify only results with an actual size (so actual files)
verify_results([(url, sz) for url, sz in zip(to_load, sz_results)
if sz != -ERROR_URL_NOT_FOUND], verbose=verbose)
idx_in_sz = 0
if listing:
for url, _ in urls:
sz = None
if idx_in_sz != len(to_load) and url.url == to_load[idx_in_sz].url:
sz = sz_results[idx_in_sz] if sz_results[idx_in_sz] >= 0 else None
idx_in_sz += 1
if sz is None:
# This means that either the initial url had a None size or
# that after loading, we found a None size
print(format_triplet(url.url))
else:
print(format_triplet(url.prefix, url.url, url.local))
@cli.command(help='Get info about files from S3')
@click.option('--num-workers',
default=NUM_WORKERS_DEFAULT,
show_default=True,
help='Number of concurrent connections.')
@click.option('--inputs',
type=click.Path(exists=True),
help='Read input prefixes from the given file.')
@click.option('--verbose/--no-verbose',
default=True,
show_default=True,
help='Print status information on stderr.')
@click.option('--listing/--no-listing',
default=False,
show_default=True,
help='Print S3 URL -> local file mapping on stdout.')
@click.argument('prefixes', nargs=-1)
def info(prefixes,
num_workers=None,
inputs=None,
verbose=None,
listing=None):
# Construct a list of URL (prefix) objects
urllist = []
for prefix, _ in _populate_prefixes(prefixes, inputs):
src = urlparse(prefix)
url = S3Url(url=prefix,
bucket=src.netloc,
path=src.path.lstrip('/'),
local=generate_local_path(prefix, suffix='info'),
prefix=prefix,
range=None)
if src.scheme != 's3':
exit(ERROR_INVALID_URL, url)
urllist.append(url)
process_urls('info', urllist, verbose, num_workers)
if listing:
for url in urllist:
print(format_triplet(url.prefix, url.url, url.local))
if __name__ == '__main__':
cli(auto_envvar_prefix='S3OP')
|
test_add_vectors.py
|
import time
import pdb
import threading
import logging
import threading
from multiprocessing import Pool, Process
import pytest
from milvus import IndexType, MetricType
from utils import *
dim = 128
index_file_size = 10
collection_id = "test_add"
ADD_TIMEOUT = 60
tag = "1970-01-01"
add_interval_time = 1.5
nb = 6000
class TestAddBase:
"""
******************************************************************
The following cases are used to test `add_vectors` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in cpu mode")
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("Skip PQ Temporary")
return request.param
def test_add_vector_create_collection(self, connect, collection):
'''
target: test add vector, then create collection again
method: add vector and create collection
expected: status not ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
param = {'collection_name': collection,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
assert not status.OK()
def test_add_vector_has_collection(self, connect, collection):
'''
target: test add vector, then check collection existence
method: add vector and call Hascollection
expected: collection exists, status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
assert assert_has_collection(connect, collection)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_drop_collection_add_vector(self, connect, collection):
'''
target: test add vector after collection deleted
method: delete collection and add vector
expected: status not ok
'''
status = connect.drop_collection(collection)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_drop_collection_add_vector_another(self, connect, collection):
'''
target: test add vector to collection_1 after collection_2 deleted
method: delete collection_2 and add vector to collection_1
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
status = connect.drop_collection(collection)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(param['collection_name'], vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_drop_collection(self, connect, collection):
'''
target: test delete collection after add vector
method: add vector and delete collection
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
status = connect.drop_collection(collection)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_delete_another_collection(self, connect, collection):
'''
target: test delete collection_1 collection after add vector to collection_2
method: add vector and delete collection
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_drop_collection(self, connect, collection):
'''
target: test delete collection after add vector for a while
method: add vector, sleep, and delete collection
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
assert status.OK()
connect.flush([collection])
status = connect.drop_collection(collection)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_delete_another_collection(self, connect, collection):
'''
target: test delete collection_1 collection after add vector to collection_2 for a while
method: add vector , sleep, and delete collection
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
connect.flush([collection])
status = connect.drop_collection(param['collection_name'])
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_create_index_add_vector(self, connect, collection, get_simple_index):
'''
target: test add vector after build index
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status = connect.create_index(collection, index_type, index_param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_create_index_add_vector_another(self, connect, collection, get_simple_index):
'''
target: test add vector to collection_2 after build index for collection_1
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
status = connect.create_index(collection, index_type, index_param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
connect.drop_collection(param['collection_name'])
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_create_index(self, connect, collection, get_simple_index):
'''
target: test build index add after vector
method: add vector and build index
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(index_param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_create_index_another(self, connect, collection, get_simple_index):
'''
target: test add vector to collection_2 after build index for collection_1
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
status = connect.create_index(param['collection_name'], index_type, index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_create_index(self, connect, collection, get_simple_index):
'''
target: test build index add after vector for a while
method: add vector and build index
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
connect.flush([collection])
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_create_index_another(self, connect, collection, get_simple_index):
'''
target: test add vector to collection_2 after build index for collection_1 for a while
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
connect.flush([collection])
status = connect.create_index(param['collection_name'], index_type, index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_search_vector_add_vector(self, connect, collection):
'''
target: test add vector after search collection
method: search collection and add vector
expected: status ok
'''
vector = gen_single_vector(dim)
status, result = connect.search_vectors(collection, 1, vector)
status, ids = connect.add_vectors(collection, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_search_vector_add_vector_another(self, connect, collection):
'''
target: test add vector to collection_1 after search collection_2
method: search collection and add vector
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, result = connect.search_vectors(collection, 1, vector)
status, ids = connect.add_vectors(param['collection_name'], vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_search_vector(self, connect, collection):
'''
target: test search vector after add vector
method: add vector and search collection
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
assert status.OK()
connect.flush([collection])
status, result = connect.search_vectors(collection, 1, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_search_vector_another(self, connect, collection):
'''
target: test add vector to collection_1 after search collection_2
method: search collection and add vector
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
status, result = connect.search_vectors(param['collection_name'], 1, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_search_vector(self, connect, collection):
'''
target: test search vector after add vector after a while
method: add vector, sleep, and search collection
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
connect.flush([collection])
status, result = connect.search_vectors(collection, 1, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_search_vector_another(self, connect, collection):
'''
target: test add vector to collection_1 after search collection_2 a while
method: search collection , sleep, and add vector
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
connect.flush([collection])
status, result = connect.search_vectors(param['collection_name'], 1, vector)
assert status.OK()
"""
******************************************************************
The following cases are used to test `add_vectors` function
******************************************************************
"""
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_ids(self, connect, collection):
'''
target: test add vectors in collection, use customize ids
method: create collection and add vectors in it, check the ids returned and the collection length after vectors added
expected: the length of ids and the collection row count
'''
nq = 5; top_k = 1;
vectors = gen_vectors(nq, dim)
ids = [i for i in range(nq)]
status, ids = connect.add_vectors(collection, vectors, ids)
connect.flush([collection])
assert status.OK()
assert len(ids) == nq
status, result = connect.search_vectors(collection, top_k, query_records=vectors)
logging.getLogger().info(result)
assert len(result) == nq
for i in range(nq):
assert result[i][0].id == i
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_twice_ids_no_ids(self, connect, collection):
'''
target: check the result of add_vectors, with params ids and no ids
method: test add vectors twice, use customize ids first, and then use no ids
expected: status not OK
'''
nq = 5; top_k = 1;
vectors = gen_vectors(nq, dim)
ids = [i for i in range(nq)]
status, ids = connect.add_vectors(collection, vectors, ids)
assert status.OK()
status, ids = connect.add_vectors(collection, vectors)
logging.getLogger().info(status)
logging.getLogger().info(ids)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_twice_not_ids_ids(self, connect, collection):
'''
target: check the result of add_vectors, with params ids and no ids
method: test add vectors twice, use not ids first, and then use customize ids
expected: status not OK
'''
nq = 5; top_k = 1;
vectors = gen_vectors(nq, dim)
ids = [i for i in range(nq)]
status, ids = connect.add_vectors(collection, vectors)
assert status.OK()
status, ids = connect.add_vectors(collection, vectors, ids)
logging.getLogger().info(status)
logging.getLogger().info(ids)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_ids_length_not_match(self, connect, collection):
'''
target: test add vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and add vectors in it
expected: raise an exception
'''
nq = 5
vectors = gen_vectors(nq, dim)
ids = [i for i in range(1, nq)]
with pytest.raises(Exception) as e:
status, ids = connect.add_vectors(collection, vectors, ids)
@pytest.fixture(
scope="function",
params=gen_invalid_vector_ids()
)
def get_vector_id(self, request):
yield request.param
@pytest.mark.level(2)
def test_add_vectors_ids_invalid(self, connect, collection, get_vector_id):
'''
target: test add vectors in collection, use customize ids, which are not int64
method: create collection and add vectors in it
expected: raise an exception
'''
nq = 5
vectors = gen_vectors(nq, dim)
vector_id = get_vector_id
ids = [vector_id for _ in range(nq)]
with pytest.raises(Exception):
connect.add_vectors(collection, vectors, ids)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors(self, connect, collection):
'''
target: test add vectors in collection created before
method: create collection and add vectors in it, check the ids returned and the collection length after vectors added
expected: the length of ids and the collection row count
'''
nq = 5
vectors = gen_vectors(nq, dim)
status, ids = connect.add_vectors(collection, vectors)
assert status.OK()
assert len(ids) == nq
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_tag(self, connect, collection):
'''
target: test add vectors in collection created before
method: create collection and add vectors in it, with the partition_tag param
expected: the collection row count equals to nq
'''
nq = 5
vectors = gen_vectors(nq, dim)
status = connect.create_partition(collection, tag)
status, ids = connect.add_vectors(collection, vectors, partition_tag=tag)
assert status.OK()
assert len(ids) == nq
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_tag_A(self, connect, collection):
'''
target: test add vectors in collection created before
method: create partition and add vectors in it
expected: the collection row count equals to nq
'''
nq = 5
vectors = gen_vectors(nq, dim)
status = connect.create_partition(collection, tag)
status, ids = connect.add_vectors(collection, vectors, partition_tag=tag)
assert status.OK()
assert len(ids) == nq
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_tag_not_existed(self, connect, collection):
'''
target: test add vectors in collection created before
method: create collection and add vectors in it, with the not existed partition_tag param
expected: status not ok
'''
nq = 5
vectors = gen_vectors(nq, dim)
status, ids = connect.add_vectors(collection, vectors, partition_tag=tag)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_tag_not_existed_A(self, connect, collection):
'''
target: test add vectors in collection created before
method: create partition, add vectors with the not existed partition_tag param
expected: status not ok
'''
nq = 5
vectors = gen_vectors(nq, dim)
new_tag = "new_tag"
status = connect.create_partition(collection, tag)
status, ids = connect.add_vectors(collection, vectors, partition_tag=new_tag)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_tag_existed(self, connect, collection):
'''
target: test add vectors in collection created before
method: create collection and add vectors in it repeatly, with the partition_tag param
expected: the collection row count equals to nq
'''
nq = 5
vectors = gen_vectors(nq, dim)
status = connect.create_partition(collection, tag)
status, ids = connect.add_vectors(collection, vectors, partition_tag=tag)
for i in range(5):
status, ids = connect.add_vectors(collection, vectors, partition_tag=tag)
assert status.OK()
assert len(ids) == nq
# @pytest.mark.level(2)
# def test_add_vectors_without_connect(self, dis_connect, collection):
# '''
# target: test add vectors without connection
# method: create collection and add vectors in it, check if added successfully
# expected: raise exception
# '''
# nq = 5
# vectors = gen_vectors(nq, dim)
# with pytest.raises(Exception) as e:
# status, ids = dis_connect.add_vectors(collection, vectors)
def test_add_collection_not_existed(self, connect):
'''
target: test add vectors in collection, which not existed before
method: add vectors collection not existed, check the status
expected: status not ok
'''
nq = 5
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(gen_unique_str("not_exist_collection"), vector)
assert not status.OK()
assert not ids
def test_add_vector_dim_not_matched(self, connect, collection):
'''
target: test add vector, the vector dimension is not equal to the collection dimension
method: the vector dimension is half of the collection dimension, check the status
expected: status not ok
'''
vector = gen_single_vector(int(dim)//2)
status, ids = connect.add_vectors(collection, vector)
assert not status.OK()
def test_add_vectors_dim_not_matched(self, connect, collection):
'''
target: test add vectors, the vector dimension is not equal to the collection dimension
method: the vectors dimension is half of the collection dimension, check the status
expected: status not ok
'''
nq = 5
vectors = gen_vectors(nq, int(dim)//2)
status, ids = connect.add_vectors(collection, vectors)
assert not status.OK()
def test_add_vector_query_after_sleep(self, connect, collection):
'''
target: test add vectors, and search it after sleep
method: set vector[0][1] as query vectors
expected: status ok and result length is 1
'''
nq = 5
vectors = gen_vectors(nq, dim)
status, ids = connect.add_vectors(collection, vectors)
connect.flush([collection])
status, result = connect.search_vectors(collection, 1, [vectors[0]])
assert status.OK()
assert len(result) == 1
# TODO: enable
# @pytest.mark.repeat(10)
@pytest.mark.timeout(ADD_TIMEOUT)
def _test_add_vector_with_multiprocessing(self, args):
'''
target: test add vectors, with multi processes
method: 10 processed add vectors concurrently
expected: status ok and result length is equal to the length off added vectors
'''
collection = gen_unique_str()
param = {'collection_name': collection,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
milvus = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
milvus.create_collection(param)
vector = gen_single_vector(dim)
process_num = 4
loop_num = 5
processes = []
def add():
milvus = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
i = 0
while i < loop_num:
status, ids = milvus.add_vectors(collection, vector)
i = i + 1
# milvus.disconnect()
for i in range(process_num):
p = Process(target=add, args=())
processes.append(p)
p.start()
time.sleep(0.2)
for p in processes:
p.join()
time.sleep(2)
status, count = milvus.count_collection(collection)
assert count == process_num * loop_num
@pytest.mark.level(2)
@pytest.mark.timeout(30)
def test_collection_add_rows_count_multi_threading(self, args):
'''
target: test collection rows_count is correct or not with multi threading
method: create collection and add vectors in it(idmap),
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
if args["handler"] == "HTTP":
pytest.skip("Skip test in http mode")
thread_num = 8
threads = []
collection = gen_unique_str()
param = {'collection_name': collection,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
milvus = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
milvus.create_collection(param)
vectors = gen_vectors(nb, dim)
def add(thread_i):
logging.getLogger().info("In thread-%d" % thread_i)
milvus = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
status, result = milvus.add_vectors(collection, records=vectors)
assert status.OK()
status = milvus.flush([collection])
assert status.OK()
for i in range(thread_num):
x = threading.Thread(target=add, args=(i, ))
threads.append(x)
x.start()
for th in threads:
th.join()
status, res = milvus.count_collection(collection)
assert res == thread_num * nb
def test_add_vector_multi_collections(self, connect):
'''
target: test add vectors is correct or not with multiple collections of L2
method: create 50 collections and add vectors into them in turn
expected: status ok
'''
nq = 100
vectors = gen_vectors(nq, dim)
collection_list = []
for i in range(20):
collection_name = gen_unique_str('test_add_vector_multi_collections')
collection_list.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
connect.create_collection(param)
for j in range(5):
for i in range(20):
status, ids = connect.add_vectors(collection_name=collection_list[i], records=vectors)
assert status.OK()
class TestAddAsync:
@pytest.fixture(
scope="function",
params=[
1,
1000
],
)
def insert_count(self, request):
yield request.param
def check_status(self, status, result):
logging.getLogger().info("In callback check status")
assert status.OK()
def check_status_not_ok(self, status, result):
logging.getLogger().info("In callback check status")
assert not status.OK()
def test_insert_async(self, connect, collection, insert_count):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
insert_vec_list = gen_vectors(nb, dim)
future = connect.add_vectors(collection, insert_vec_list, _async=True)
status, ids = future.result()
connect.flush([collection])
assert len(ids) == nb
assert status.OK()
@pytest.mark.level(2)
def test_insert_async_false(self, connect, collection, insert_count):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
insert_vec_list = gen_vectors(nb, dim)
status, ids = connect.add_vectors(collection, insert_vec_list, _async=False)
connect.flush([collection])
assert len(ids) == nb
assert status.OK()
def test_insert_async_callback(self, connect, collection, insert_count):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
insert_vec_list = gen_vectors(nb, dim)
future = connect.add_vectors(collection, insert_vec_list, _async=True, _callback=self.check_status)
future.done()
@pytest.mark.level(2)
def test_insert_async_long(self, connect, collection):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = 50000
insert_vec_list = gen_vectors(nb, dim)
future = connect.add_vectors(collection, insert_vec_list, _async=True, _callback=self.check_status)
status, result = future.result()
assert status.OK()
assert len(result) == nb
connect.flush([collection])
status, count = connect.count_collection(collection)
assert status.OK()
logging.getLogger().info(status)
logging.getLogger().info(count)
assert count == nb
def test_insert_async_callback_timeout(self, connect, collection):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = 100000
insert_vec_list = gen_vectors(nb, dim)
future = connect.add_vectors(collection, insert_vec_list, _async=True, _callback=self.check_status, timeout=1)
future.done()
def test_insert_async_invalid_params(self, connect, collection):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
insert_vec_list = gen_vectors(nb, dim)
collection_new = gen_unique_str()
future = connect.add_vectors(collection_new, insert_vec_list, _async=True)
status, result = future.result()
assert not status.OK()
# TODO: add assertion
def test_insert_async_invalid_params_raise_exception(self, connect, collection):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
insert_vec_list = []
collection_new = gen_unique_str()
with pytest.raises(Exception) as e:
future = connect.add_vectors(collection_new, insert_vec_list, _async=True)
class TestAddIP:
"""
******************************************************************
The following cases are used to test `add_vectors / index / search / delete` mixed function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in cpu mode")
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("Skip PQ Temporary")
return request.param
def test_add_vector_create_collection(self, connect, ip_collection):
'''
target: test add vector, then create collection again
method: add vector and create collection
expected: status not ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
param = {'collection_name': ip_collection,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
assert not status.OK()
def test_add_vector_has_collection(self, connect, ip_collection):
'''
target: test add vector, then check collection existence
method: add vector and call Hascollection
expected: collection exists, status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
assert assert_has_collection(connect, ip_collection)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_drop_collection_add_vector(self, connect, ip_collection):
'''
target: test add vector after collection deleted
method: delete collection and add vector
expected: status not ok
'''
status = connect.drop_collection(ip_collection)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_drop_collection_add_vector_another(self, connect, ip_collection):
'''
target: test add vector to collection_1 after collection_2 deleted
method: delete collection_2 and add vector to collection_1
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
status = connect.drop_collection(ip_collection)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(param['collection_name'], vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_drop_collection(self, connect, ip_collection):
'''
target: test delete collection after add vector
method: add vector and delete collection
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
status = connect.drop_collection(ip_collection)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_delete_another_collection(self, connect, ip_collection):
'''
target: test delete collection_1 collection after add vector to collection_2
method: add vector and delete collection
expected: status ok
'''
param = {'collection_name': 'test_add_vector_delete_another_collection',
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
status = connect.drop_collection(param['collection_name'])
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_drop_collection(self, connect, ip_collection):
'''
target: test delete collection after add vector for a while
method: add vector, sleep, and delete collection
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
connect.flush([ip_collection])
status = connect.drop_collection(ip_collection)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_delete_another_collection(self, connect, ip_collection):
'''
target: test delete collection_1 collection after add vector to collection_2 for a while
method: add vector , sleep, and delete collection
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
connect.flush([ip_collection])
status = connect.drop_collection(param['collection_name'])
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_create_index_add_vector(self, connect, ip_collection, get_simple_index):
'''
target: test add vector after build index
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status = connect.create_index(ip_collection, index_type, index_param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_create_index_add_vector_another(self, connect, ip_collection, get_simple_index):
'''
target: test add vector to collection_2 after build index for collection_1
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
status = connect.create_index(ip_collection, index_type, index_param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_create_index(self, connect, ip_collection, get_simple_index):
'''
target: test build index add after vector
method: add vector and build index
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
status, mode = connect._cmd("mode")
assert status.OK()
status = connect.create_index(ip_collection, index_type, index_param)
if str(mode) == "GPU" and (index_type == IndexType.IVF_PQ):
assert not status.OK()
else:
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_create_index_another(self, connect, ip_collection, get_simple_index):
'''
target: test add vector to collection_2 after build index for collection_1
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
status = connect.create_index(param['collection_name'], index_type, index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_create_index(self, connect, ip_collection, get_simple_index):
'''
target: test build index add after vector for a while
method: add vector and build index
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
if index_type == IndexType.IVF_PQ:
pytest.skip("Skip some PQ cases")
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
assert status.OK()
time.sleep(add_interval_time)
status = connect.create_index(ip_collection, index_type, index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_create_index_another(self, connect, ip_collection, get_simple_index):
'''
target: test add vector to collection_2 after build index for collection_1 for a while
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
connect.flush([ip_collection])
status = connect.create_index(param['collection_name'], index_type, index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_search_vector_add_vector(self, connect, ip_collection):
'''
target: test add vector after search collection
method: search collection and add vector
expected: status ok
'''
vector = gen_single_vector(dim)
status, result = connect.search_vectors(ip_collection, 1, vector)
status, ids = connect.add_vectors(ip_collection, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_search_vector_add_vector_another(self, connect, ip_collection):
'''
target: test add vector to collection_1 after search collection_2
method: search collection and add vector
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, result = connect.search_vectors(ip_collection, 1, vector)
status, ids = connect.add_vectors(param['collection_name'], vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_search_vector(self, connect, ip_collection):
'''
target: test search vector after add vector
method: add vector and search collection
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
assert status.OK()
connect.flush([ip_collection])
status, result = connect.search_vectors(ip_collection, 1, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_search_vector_another(self, connect, ip_collection):
'''
target: test add vector to collection_1 after search collection_2
method: search collection and add vector
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
connect.flush([ip_collection])
status, result = connect.search_vectors(param['collection_name'], 1, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_search_vector(self, connect, ip_collection):
'''
target: test search vector after add vector after a while
method: add vector, sleep, and search collection
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
time.sleep(add_interval_time)
status, result = connect.search_vectors(ip_collection, 1, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_search_vector_another(self, connect, ip_collection):
'''
target: test add vector to collection_1 after search collection_2 a while
method: search collection , sleep, and add vector
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
assert status.OK()
time.sleep(add_interval_time)
status, result = connect.search_vectors(param['collection_name'], 1, vector)
assert status.OK()
"""
******************************************************************
The following cases are used to test `add_vectors` function
******************************************************************
"""
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_ids(self, connect, ip_collection):
'''
target: test add vectors in collection, use customize ids
method: create collection and add vectors in it, check the ids returned and the collection length after vectors added
expected: the length of ids and the collection row count
'''
nq = 5; top_k = 1
vectors = gen_vectors(nq, dim)
ids = [i for i in range(nq)]
status, ids = connect.add_vectors(ip_collection, vectors, ids)
assert status.OK()
connect.flush([ip_collection])
assert len(ids) == nq
# check search result
status, result = connect.search_vectors(ip_collection, top_k, vectors)
logging.getLogger().info(result)
assert len(result) == nq
for i in range(nq):
assert result[i][0].id == i
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_twice_ids_no_ids(self, connect, ip_collection):
'''
target: check the result of add_vectors, with params ids and no ids
method: test add vectors twice, use customize ids first, and then use no ids
expected: status not OK
'''
nq = 5; top_k = 1
vectors = gen_vectors(nq, dim)
ids = [i for i in range(nq)]
status, ids = connect.add_vectors(ip_collection, vectors, ids)
assert status.OK()
status, ids = connect.add_vectors(ip_collection, vectors)
logging.getLogger().info(status)
logging.getLogger().info(ids)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_twice_not_ids_ids(self, connect, ip_collection):
'''
target: check the result of add_vectors, with params ids and no ids
method: test add vectors twice, use not ids first, and then use customize ids
expected: status not OK
'''
nq = 5; top_k = 1
vectors = gen_vectors(nq, dim)
ids = [i for i in range(nq)]
status, ids = connect.add_vectors(ip_collection, vectors)
assert status.OK()
status, ids = connect.add_vectors(ip_collection, vectors, ids)
logging.getLogger().info(status)
logging.getLogger().info(ids)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_ids_length_not_match(self, connect, ip_collection):
'''
target: test add vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and add vectors in it
expected: raise an exception
'''
nq = 5
vectors = gen_vectors(nq, dim)
ids = [i for i in range(1, nq)]
with pytest.raises(Exception) as e:
status, ids = connect.add_vectors(ip_collection, vectors, ids)
@pytest.fixture(
scope="function",
params=gen_invalid_vector_ids()
)
def get_vector_id(self, request):
yield request.param
@pytest.mark.level(2)
def test_add_vectors_ids_invalid(self, connect, ip_collection, get_vector_id):
'''
target: test add vectors in collection, use customize ids, which are not int64
method: create collection and add vectors in it
expected: raise an exception
'''
nq = 5
vectors = gen_vectors(nq, dim)
vector_id = get_vector_id
ids = [vector_id for i in range(nq)]
with pytest.raises(Exception) as e:
status, ids = connect.add_vectors(ip_collection, vectors, ids)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors(self, connect, ip_collection):
'''
target: test add vectors in collection created before
method: create collection and add vectors in it, check the ids returned and the collection length after vectors added
expected: the length of ids and the collection row count
'''
nq = 5
vectors = gen_vectors(nq, dim)
status, ids = connect.add_vectors(ip_collection, vectors)
assert status.OK()
assert len(ids) == nq
# @pytest.mark.level(2)
# def test_add_vectors_without_connect(self, dis_connect, ip_collection):
# '''
# target: test add vectors without connection
# method: create collection and add vectors in it, check if added successfully
# expected: raise exception
# '''
# nq = 5
# vectors = gen_vectors(nq, dim)
# with pytest.raises(Exception) as e:
# status, ids = dis_connect.add_vectors(ip_collection, vectors)
def test_add_vector_dim_not_matched(self, connect, ip_collection):
'''
target: test add vector, the vector dimension is not equal to the collection dimension
method: the vector dimension is half of the collection dimension, check the status
expected: status not ok
'''
vector = gen_single_vector(int(dim)//2)
status, ids = connect.add_vectors(ip_collection, vector)
assert not status.OK()
def test_add_vectors_dim_not_matched(self, connect, ip_collection):
'''
target: test add vectors, the vector dimension is not equal to the collection dimension
method: the vectors dimension is half of the collection dimension, check the status
expected: status not ok
'''
nq = 5
vectors = gen_vectors(nq, int(dim)//2)
status, ids = connect.add_vectors(ip_collection, vectors)
assert not status.OK()
def test_add_vector_query_after_sleep(self, connect, ip_collection):
'''
target: test add vectors, and search it after sleep
method: set vector[0][1] as query vectors
expected: status ok and result length is 1
'''
nq = 5
vectors = gen_vectors(nq, dim)
status, ids = connect.add_vectors(ip_collection, vectors)
time.sleep(add_interval_time)
status, result = connect.search_vectors(ip_collection, 1, [vectors[0]])
assert status.OK()
assert len(result) == 1
def test_add_vector_multi_collections(self, connect):
'''
target: test add vectors is correct or not with multiple collections of IP
method: create 50 collections and add vectors into them in turn
expected: status ok
'''
nq = 100
vectors = gen_vectors(nq, dim)
collection_list = []
for i in range(20):
collection_name = gen_unique_str('test_add_vector_multi_collections')
collection_list.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.IP}
connect.create_collection(param)
for j in range(10):
for i in range(20):
status, ids = connect.add_vectors(collection_name=collection_list[i], records=vectors)
assert status.OK()
class TestAddAdvance:
@pytest.fixture(
scope="function",
params=[
1,
1000,
6000
],
)
def insert_count(self, request):
yield request.param
def test_insert_much(self, connect, collection, insert_count):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
insert_vec_list = gen_vectors(nb, dim)
status, ids = connect.add_vectors(collection, insert_vec_list)
assert len(ids) == nb
assert status.OK()
def test_insert_much_ip(self, connect, ip_collection, insert_count):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
insert_vec_list = gen_vectors(nb, dim)
status, ids = connect.add_vectors(ip_collection, insert_vec_list)
assert len(ids) == nb
assert status.OK()
def test_insert_much_jaccard(self, connect, jac_collection, insert_count):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
tmp, insert_vec_list = gen_binary_vectors(nb, dim)
status, ids = connect.add_vectors(jac_collection, insert_vec_list)
assert len(ids) == nb
assert status.OK()
def test_insert_much_hamming(self, connect, ham_collection, insert_count):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
tmp, insert_vec_list = gen_binary_vectors(nb, dim)
status, ids = connect.add_vectors(ham_collection, insert_vec_list)
assert len(ids) == nb
assert status.OK()
def test_insert_much_tanimoto(self, connect, tanimoto_collection, insert_count):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
tmp, insert_vec_list = gen_binary_vectors(nb, dim)
status, ids = connect.add_vectors(tanimoto_collection, insert_vec_list)
assert len(ids) == nb
assert status.OK()
class TestNameInvalid(object):
"""
Test adding vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_collection_names()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_collection_names()
)
def get_tag_name(self, request):
yield request.param
@pytest.mark.level(2)
def test_add_vectors_with_invalid_collection_name(self, connect, get_collection_name):
collection_name = get_collection_name
vectors = gen_vectors(1, dim)
status, result = connect.add_vectors(collection_name, vectors)
assert not status.OK()
@pytest.mark.level(2)
def test_add_vectors_with_invalid_tag_name(self, connect, get_collection_name, get_tag_name):
collection_name = get_collection_name
tag_name = get_tag_name
vectors = gen_vectors(1, dim)
status, result = connect.add_vectors(collection_name, vectors, partition_tag=tag_name)
assert not status.OK()
class TestAddCollectionVectorsInvalid(object):
single_vector = gen_single_vector(dim)
vectors = gen_vectors(2, dim)
"""
Test adding vectors with invalid vectors
"""
@pytest.fixture(
scope="function",
params=gen_invalid_vectors()
)
def gen_vector(self, request):
yield request.param
@pytest.mark.level(2)
def test_add_vector_with_invalid_vectors(self, connect, collection, gen_vector):
tmp_single_vector = copy.deepcopy(self.single_vector)
tmp_single_vector[0][1] = gen_vector
with pytest.raises(Exception) as e:
status, result = connect.add_vectors(collection, tmp_single_vector)
@pytest.mark.level(2)
def test_add_vectors_with_invalid_vectors(self, connect, collection, gen_vector):
tmp_vectors = copy.deepcopy(self.vectors)
tmp_vectors[1][1] = gen_vector
with pytest.raises(Exception) as e:
status, result = connect.add_vectors(collection, tmp_vectors)
@pytest.mark.level(2)
def test_add_vectors_with_invalid_vectors_jaccard(self, connect, jac_collection, gen_vector):
tmp_vectors = copy.deepcopy(self.vectors)
tmp_vectors[1][1] = gen_vector
with pytest.raises(Exception) as e:
status, result = connect.add_vectors(jac_collection, tmp_vectors)
@pytest.mark.level(2)
def test_add_vectors_with_invalid_vectors_hamming(self, connect, ham_collection, gen_vector):
tmp_vectors = copy.deepcopy(self.vectors)
tmp_vectors[1][1] = gen_vector
with pytest.raises(Exception) as e:
status, result = connect.add_vectors(ham_collection, tmp_vectors)
|
ApiController_Mouse.py
|
from AirSimClient import *
import inputs
from threading import Thread, Event
from time import sleep
import signal
import sys
# Change if mouse has different absolute return values
mouse_absolute_maximum = 2000
class Receiver:
def __init__(self):
self.recording_signal = Event()
self.recording_Thread = Thread(target=self.recording_function, args=[])
self.recording_Thread.daemon = True
self.default_roll = 0
self.default_pitch = 0
self.default_yaw = 0
self.default_throttle = 0
# Axis values
self.roll = self.default_roll
self.pitch = self.default_pitch
self.yaw = self.default_yaw
self.throttle = self.default_throttle
# Checks if Receiver is recording input
def recording(self):
return self.recording_signal.is_set()
# Creates the thread needed for the receiver to record data
def create_threads(self):
self.recording_Thread = Thread(target=self.recording_function, args=[])
self.recording_Thread.daemon = True
# Starts recording inputs async
def get_inputs(self):
self.recording_signal.set()
self.create_threads()
self.recording_Thread.start()
# Stops recording
def stop_inputs(self):
self.recording_signal.clear()
self.recording_Thread.join()
def reset(self):
# Stop if running
if self.recording():
self.stop_inputs()
# Set inputs to default values
self.roll = self.default_roll
self.pitch = self.default_pitch
self.yaw = self.default_yaw
self.throttle = self.default_throttle
# Runs async and records input
def recording_function(self):
try:
while self.recording_signal.is_set():
events = inputs.get_mouse()
for event in events:
if event.code == "ABS_X":
self.roll = (event.state / mouse_absolute_maximum) * 3
elif event.code == "ABS_Y":
self.pitch = (event.state / mouse_absolute_maximum) * 3
elif event.code == "BTN_LEFT" and event.state == 1:
self.yaw += 1
elif event.code == "BTN_RIGHT" and event.state == 1:
self.yaw -= 1
elif event.code == "REL_WHEEL":
self.throttle += event.state
if self.throttle < 0:
self.throttle = 0
return 0
except:
return 1
rec = Receiver()
def signal_handler(signal, frame):
print("You pressed Ctrl+C!")
if rec.recording():
rec.stop_inputs()
print("Exiting program")
sys.exit(0)
# MAIN
if __name__ == '__main__':
# connect to the AirSim simulator
client = MultirotorClient()
client.confirmConnection()
client.enableApiControl(True)
client.armDisarm(True)
# Assign interrupt call
signal.signal(signal.SIGINT, signal_handler)
print("-----------------------\nCONTROLS:\nMouse Y => Pitch\nMouse X => Roll\nLMB => -Yaw\nRMB => +Yaw"
"\nMouse wheel => Throttle\n^C => Exit gracefully\n-----------------------\n")
client.reset()
client.enableApiControl(True)
client.wait_key("Press any key to start")
# Stat receiving inputs
rec.get_inputs()
print("-----------------------\nStarting")
try:
while True:
print("Clients Pitch, Roll, Yaw: ")
print(client.getPitchRollYaw())
print("Pitch, Roll, Yaw, Throttle from input: ")
print([rec.pitch, rec.roll, rec.yaw, rec.throttle])
client.moveByAngleThrottle(rec.pitch - client.getPitchRollYaw()[0], rec.roll - client.getPitchRollYaw()[1],
rec.throttle, client.getPitchRollYaw()[2] + rec.yaw, 0.225)
sleep(0.225)
except SystemExit:
os._exit(0)
except Exception as e:
print("Something went horribly wrong") # some other exception got
print(str(e))
os._exit(1)
|
prueba.py
|
import threading
import time
import sys
def cuenta(n, name):
count = n
while count < 10:
print(count, name)
count +=1
time.sleep(2)
t = threading.Thread(target=cuenta, args=(1, '1'))
t2 = threading.Thread(target=cuenta, args=(2, '2'))
t3 = threading.Thread(target=cuenta, args=(3, '3'))
t.start()
t2.start()
t3.start()
|
_logger.py
|
"""
.. References and links rendered by Sphinx are kept here as "module documentation" so that they can
be used in the ``Logger`` docstrings but do not pollute ``help(logger)`` output.
.. |Logger| replace:: :class:`~Logger`
.. |add| replace:: :meth:`~Logger.add()`
.. |remove| replace:: :meth:`~Logger.remove()`
.. |complete| replace:: :meth:`~Logger.complete()`
.. |catch| replace:: :meth:`~Logger.catch()`
.. |bind| replace:: :meth:`~Logger.bind()`
.. |contextualize| replace:: :meth:`~Logger.contextualize()`
.. |patch| replace:: :meth:`~Logger.patch()`
.. |opt| replace:: :meth:`~Logger.opt()`
.. |log| replace:: :meth:`~Logger.log()`
.. |level| replace:: :meth:`~Logger.level()`
.. |enable| replace:: :meth:`~Logger.enable()`
.. |disable| replace:: :meth:`~Logger.disable()`
.. |str| replace:: :class:`str`
.. |int| replace:: :class:`int`
.. |bool| replace:: :class:`bool`
.. |tuple| replace:: :class:`tuple`
.. |namedtuple| replace:: :func:`namedtuple<collections.namedtuple>`
.. |list| replace:: :class:`list`
.. |dict| replace:: :class:`dict`
.. |str.format| replace:: :meth:`str.format()`
.. |Path| replace:: :class:`pathlib.Path`
.. |match.groupdict| replace:: :meth:`re.Match.groupdict()`
.. |Handler| replace:: :class:`logging.Handler`
.. |sys.stderr| replace:: :data:`sys.stderr`
.. |sys.exc_info| replace:: :func:`sys.exc_info()`
.. |time| replace:: :class:`datetime.time`
.. |datetime| replace:: :class:`datetime.datetime`
.. |timedelta| replace:: :class:`datetime.timedelta`
.. |open| replace:: :func:`open()`
.. |logging| replace:: :mod:`logging`
.. |signal| replace:: :mod:`signal`
.. |contextvars| replace:: :mod:`contextvars`
.. |Thread.run| replace:: :meth:`Thread.run()<threading.Thread.run()>`
.. |Exception| replace:: :class:`Exception`
.. |locale.getpreferredencoding| replace:: :func:`locale.getpreferredencoding()`
.. |AbstractEventLoop| replace:: :class:`AbstractEventLoop<asyncio.AbstractEventLoop>`
.. |asyncio.get_event_loop| replace:: :func:`asyncio.get_event_loop()`
.. |asyncio.run| replace:: :func:`asyncio.run()`
.. |loop.run_until_complete| replace::
:meth:`loop.run_until_complete()<asyncio.loop.run_until_complete()>`
.. |loop.create_task| replace:: :meth:`loop.create_task()<asyncio.loop.create_task()>`
.. |logger.trace| replace:: :meth:`logger.trace()<Logger.trace()>`
.. |logger.debug| replace:: :meth:`logger.debug()<Logger.debug()>`
.. |logger.info| replace:: :meth:`logger.info()<Logger.info()>`
.. |logger.success| replace:: :meth:`logger.success()<Logger.success()>`
.. |logger.warning| replace:: :meth:`logger.warning()<Logger.warning()>`
.. |logger.error| replace:: :meth:`logger.error()<Logger.error()>`
.. |logger.critical| replace:: :meth:`logger.critical()<Logger.critical()>`
.. |file-like object| replace:: ``file-like object``
.. _file-like object: https://docs.python.org/3/glossary.html#term-file-object
.. |callable| replace:: ``callable``
.. _callable: https://docs.python.org/3/library/functions.html#callable
.. |coroutine function| replace:: ``coroutine function``
.. _coroutine function: https://docs.python.org/3/glossary.html#term-coroutine-function
.. |re.Pattern| replace:: ``re.Pattern``
.. _re.Pattern: https://docs.python.org/3/library/re.html#re-objects
.. |better_exceptions| replace:: ``better_exceptions``
.. _better_exceptions: https://github.com/Qix-/better-exceptions
.. _Pendulum: https://pendulum.eustace.io/docs/#tokens
.. _@sdispater: https://github.com/sdispater
.. _@Qix-: https://github.com/Qix-
.. _Formatting directives: https://docs.python.org/3/library/string.html#format-string-syntax
.. _reentrant: https://en.wikipedia.org/wiki/Reentrancy_(computing)
"""
import asyncio
import builtins
import contextlib
import functools
import itertools
import logging
import re
import sys
import warnings
from collections import namedtuple
from inspect import isclass, iscoroutinefunction, isgeneratorfunction
from multiprocessing import current_process
from os.path import basename, splitext
from threading import current_thread
from . import _colorama, _defaults, _filters
from ._better_exceptions import ExceptionFormatter
from ._colorizer import Colorizer
from ._datetime import aware_now
from ._error_interceptor import ErrorInterceptor
from ._file_sink import FileSink
from ._get_frame import get_frame
from ._handler import Handler
from ._locks_machinery import create_logger_lock
from ._recattrs import RecordException, RecordFile, RecordLevel, RecordProcess, RecordThread
from ._simple_sinks import AsyncSink, CallableSink, StandardSink, StreamSink
if sys.version_info >= (3, 6):
from os import PathLike
else:
from pathlib import PurePath as PathLike
if sys.version_info >= (3, 7):
from contextvars import ContextVar
elif sys.version_info >= (3, 5, 3):
from aiocontextvars import ContextVar
else:
from contextvars import ContextVar
Level = namedtuple("Level", ["name", "no", "color", "icon"])
start_time = aware_now()
context = ContextVar("loguru_context", default={})
class Core:
def __init__(self):
levels = [
Level(
"TRACE",
_defaults.LOGURU_TRACE_NO,
_defaults.LOGURU_TRACE_COLOR,
_defaults.LOGURU_TRACE_ICON,
),
Level(
"DEBUG",
_defaults.LOGURU_DEBUG_NO,
_defaults.LOGURU_DEBUG_COLOR,
_defaults.LOGURU_DEBUG_ICON,
),
Level(
"INFO",
_defaults.LOGURU_INFO_NO,
_defaults.LOGURU_INFO_COLOR,
_defaults.LOGURU_INFO_ICON,
),
Level(
"SUCCESS",
_defaults.LOGURU_SUCCESS_NO,
_defaults.LOGURU_SUCCESS_COLOR,
_defaults.LOGURU_SUCCESS_ICON,
),
Level(
"WARNING",
_defaults.LOGURU_WARNING_NO,
_defaults.LOGURU_WARNING_COLOR,
_defaults.LOGURU_WARNING_ICON,
),
Level(
"ERROR",
_defaults.LOGURU_ERROR_NO,
_defaults.LOGURU_ERROR_COLOR,
_defaults.LOGURU_ERROR_ICON,
),
Level(
"CRITICAL",
_defaults.LOGURU_CRITICAL_NO,
_defaults.LOGURU_CRITICAL_COLOR,
_defaults.LOGURU_CRITICAL_ICON,
),
]
self.levels = {level.name: level for level in levels}
self.levels_ansi_codes = {
name: Colorizer.ansify(level.color) for name, level in self.levels.items()
}
self.levels_ansi_codes[None] = ""
self.handlers_count = itertools.count()
self.handlers = {}
self.extra = {}
self.patcher = None
self.min_level = float("inf")
self.enabled = {}
self.activation_list = []
self.activation_none = True
self.lock = create_logger_lock()
def __getstate__(self):
state = self.__dict__.copy()
state["lock"] = None
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.lock = create_logger_lock()
class Logger:
"""An object to dispatch logging messages to configured handlers.
The |Logger| is the core object of ``loguru``, every logging configuration and usage pass
through a call to one of its methods. There is only one logger, so there is no need to retrieve
one before usage.
Once the ``logger`` is imported, it can be used to write messages about events happening in your
code. By reading the output logs of your application, you gain a better understanding of the
flow of your program and you more easily track and debug unexpected behaviors.
Handlers to which the logger sends log messages are added using the |add| method. Note that you
can use the |Logger| right after import as it comes pre-configured (logs are emitted to
|sys.stderr| by default). Messages can be logged with different severity levels and using braces
attributes like the |str.format| method do.
When a message is logged, a "record" is associated with it. This record is a dict which contains
information about the logging context: time, function, file, line, thread, level... It also
contains the ``__name__`` of the module, this is why you don't need named loggers.
You should not instantiate a |Logger| by yourself, use ``from loguru import logger`` instead.
"""
def __init__(self, core, exception, depth, record, lazy, colors, raw, capture, patcher, extra):
self._core = core
self._options = (exception, depth, record, lazy, colors, raw, capture, patcher, extra)
def __repr__(self):
return "<loguru.logger handlers=%r>" % list(self._core.handlers.values())
def add(
self,
sink,
*,
level=_defaults.LOGURU_LEVEL,
format=_defaults.LOGURU_FORMAT,
filter=_defaults.LOGURU_FILTER,
colorize=_defaults.LOGURU_COLORIZE,
serialize=_defaults.LOGURU_SERIALIZE,
backtrace=_defaults.LOGURU_BACKTRACE,
diagnose=_defaults.LOGURU_DIAGNOSE,
enqueue=_defaults.LOGURU_ENQUEUE,
catch=_defaults.LOGURU_CATCH,
**kwargs
):
r"""Add a handler sending log messages to a sink adequately configured.
Parameters
----------
sink : |file-like object|_, |str|, |Path|, |callable|_, |coroutine function|_ or |Handler|
An object in charge of receiving formatted logging messages and propagating them to an
appropriate endpoint.
level : |int| or |str|, optional
The minimum severity level from which logged messages should be sent to the sink.
format : |str| or |callable|_, optional
The template used to format logged messages before being sent to the sink.
filter : |callable|_, |str| or |dict|, optional
A directive optionally used to decide for each logged message whether it should be sent
to the sink or not.
colorize : |bool|, optional
Whether the color markups contained in the formatted message should be converted to ansi
codes for terminal coloration, or stripped otherwise. If ``None``, the choice is
automatically made based on the sink being a tty or not.
serialize : |bool|, optional
Whether the logged message and its records should be first converted to a JSON string
before being sent to the sink.
backtrace : |bool|, optional
Whether the exception trace formatted should be extended upward, beyond the catching
point, to show the full stacktrace which generated the error.
diagnose : |bool|, optional
Whether the exception trace should display the variables values to eases the debugging.
This should be set to ``False`` in production to avoid leaking sensitive data.
enqueue : |bool|, optional
Whether the messages to be logged should first pass through a multiprocess-safe queue
before reaching the sink. This is useful while logging to a file through multiple
processes. This also has the advantage of making logging calls non-blocking.
catch : |bool|, optional
Whether errors occurring while sink handles logs messages should be automatically
caught. If ``True``, an exception message is displayed on |sys.stderr| but the exception
is not propagated to the caller, preventing your app to crash.
**kwargs
Additional parameters that are only valid to configure a coroutine or file sink (see
below).
If and only if the sink is a coroutine function, the following parameter applies:
Parameters
----------
loop : |AbstractEventLoop|, optional
The event loop in which the asynchronous logging task will be scheduled and executed. If
``None``, the loop returned by |asyncio.get_event_loop| is used.
If and only if the sink is a file path, the following parameters apply:
Parameters
----------
rotation : |str|, |int|, |time|, |timedelta| or |callable|_, optional
A condition indicating whenever the current logged file should be closed and a new one
started.
retention : |str|, |int|, |timedelta| or |callable|_, optional
A directive filtering old files that should be removed during rotation or end of
program.
compression : |str| or |callable|_, optional
A compression or archive format to which log files should be converted at closure.
delay : |bool|, optional
Whether the file should be created as soon as the sink is configured, or delayed until
first logged message. It defaults to ``False``.
mode : |str|, optional
The opening mode as for built-in |open| function. It defaults to ``"a"`` (open the
file in appending mode).
buffering : |int|, optional
The buffering policy as for built-in |open| function. It defaults to ``1`` (line
buffered file).
encoding : |str|, optional
The file encoding as for built-in |open| function. If ``None``, it defaults to
|locale.getpreferredencoding|.
**kwargs
Others parameters are passed to the built-in |open| function.
Returns
-------
:class:`int`
An identifier associated with the added sink and which should be used to
|remove| it.
Notes
-----
Extended summary follows.
.. _sink:
.. rubric:: The sink parameter
The ``sink`` handles incoming log messages and proceed to their writing somewhere and
somehow. A sink can take many forms:
- A |file-like object|_ like ``sys.stderr`` or ``open("somefile.log", "w")``. Anything with
a ``.write()`` method is considered as a file-like object. Custom handlers may also
implement ``flush()`` (called after each logged message), ``stop()`` (called at sink
termination) and ``complete()`` (awaited by the eponymous method).
- A file path as |str| or |Path|. It can be parametrized with some additional parameters,
see below.
- A |callable|_ (such as a simple function) like ``lambda msg: print(msg)``. This
allows for logging procedure entirely defined by user preferences and needs.
- A asynchronous |coroutine function|_ defined with the ``async def`` statement. The
coroutine object returned by such function will be added to the event loop using
|loop.create_task|. The tasks should be awaited before ending the loop by using
|complete|.
- A built-in |Handler| like ``logging.StreamHandler``. In such a case, the `Loguru` records
are automatically converted to the structure expected by the |logging| module.
Note that the logging functions are not `reentrant`_. This means you should avoid using
the ``logger`` inside any of your sinks or from within |signal| handlers. Otherwise, you
may face deadlock if the module's sink was not explicitly disabled.
.. _message:
.. rubric:: The logged message
The logged message passed to all added sinks is nothing more than a string of the
formatted log, to which a special attribute is associated: the ``.record`` which is a dict
containing all contextual information possibly needed (see below).
Logged messages are formatted according to the ``format`` of the added sink. This format
is usually a string containing braces fields to display attributes from the record dict.
If fine-grained control is needed, the ``format`` can also be a function which takes the
record as parameter and return the format template string. However, note that in such a
case, you should take care of appending the line ending and exception field to the returned
format, while ``"\n{exception}"`` is automatically appended for convenience if ``format`` is
a string.
The ``filter`` attribute can be used to control which messages are effectively passed to the
sink and which one are ignored. A function can be used, accepting the record as an
argument, and returning ``True`` if the message should be logged, ``False`` otherwise. If
a string is used, only the records with the same ``name`` and its children will be allowed.
One can also pass a ``dict`` mapping module names to minimum required level. In such case,
each log record will search for it's closest parent in the ``dict`` and use the associated
level as the filter. The ``dict`` values can be ``int`` severity, ``str`` level name or
``True`` and ``False`` to respectively authorize and discard all module logs
unconditionally. In order to set a default level, the ``""`` module name should be used as
it is the parent of all modules (it does not suppress global ``level`` threshold, though).
Note that while calling a logging method, the keyword arguments (if any) are automatically
added to the ``extra`` dict for convenient contextualization (in addition to being used for
formatting).
.. _levels:
.. rubric:: The severity levels
Each logged message is associated with a severity level. These levels make it possible to
prioritize messages and to choose the verbosity of the logs according to usages. For
example, it allows to display some debugging information to a developer, while hiding it to
the end user running the application.
The ``level`` attribute of every added sink controls the minimum threshold from which log
messages are allowed to be emitted. While using the ``logger``, you are in charge of
configuring the appropriate granularity of your logs. It is possible to add even more custom
levels by using the |level| method.
Here are the standard levels with their default severity value, each one is associated with
a logging method of the same name:
+----------------------+------------------------+------------------------+
| Level name | Severity value | Logger method |
+======================+========================+========================+
| ``TRACE`` | 5 | |logger.trace| |
+----------------------+------------------------+------------------------+
| ``DEBUG`` | 10 | |logger.debug| |
+----------------------+------------------------+------------------------+
| ``INFO`` | 20 | |logger.info| |
+----------------------+------------------------+------------------------+
| ``SUCCESS`` | 25 | |logger.success| |
+----------------------+------------------------+------------------------+
| ``WARNING`` | 30 | |logger.warning| |
+----------------------+------------------------+------------------------+
| ``ERROR`` | 40 | |logger.error| |
+----------------------+------------------------+------------------------+
| ``CRITICAL`` | 50 | |logger.critical| |
+----------------------+------------------------+------------------------+
.. _record:
.. rubric:: The record dict
The record is just a Python dict, accessible from sinks by ``message.record``. It contains
all contextual information of the logging call (time, function, file, line, level, etc.).
Each of its key can be used in the handler's ``format`` so the corresponding value is
properly displayed in the logged message (e.g. ``"{level}"`` -> ``"INFO"``). Some record's
values are objects with two or more attributes, these can be formatted with ``"{key.attr}"``
(``"{key}"`` would display one by default). `Formatting directives`_ like ``"{key: >3}"``
also works and is particularly useful for time (see below).
+------------+---------------------------------+----------------------------+
| Key | Description | Attributes |
+============+=================================+============================+
| elapsed | The time elapsed since the | See |timedelta| |
| | start of the program | |
+------------+---------------------------------+----------------------------+
| exception | The formatted exception if any, | ``type``, ``value``, |
| | ``None`` otherwise | ``traceback`` |
+------------+---------------------------------+----------------------------+
| extra | The dict of attributes | None |
| | bound by the user (see |bind|) | |
+------------+---------------------------------+----------------------------+
| file | The file where the logging call | ``name`` (default), |
| | was made | ``path`` |
+------------+---------------------------------+----------------------------+
| function | The function from which the | None |
| | logging call was made | |
+------------+---------------------------------+----------------------------+
| level | The severity used to log the | ``name`` (default), |
| | message | ``no``, ``icon`` |
+------------+---------------------------------+----------------------------+
| line | The line number in the source | None |
| | code | |
+------------+---------------------------------+----------------------------+
| message | The logged message (not yet | None |
| | formatted) | |
+------------+---------------------------------+----------------------------+
| module | The module where the logging | None |
| | call was made | |
+------------+---------------------------------+----------------------------+
| name | The ``__name__`` where the | None |
| | logging call was made | |
+------------+---------------------------------+----------------------------+
| process | The process in which the | ``name``, ``id`` (default) |
| | logging call was made | |
+------------+---------------------------------+----------------------------+
| thread | The thread in which the | ``name``, ``id`` (default) |
| | logging call was made | |
+------------+---------------------------------+----------------------------+
| time | The aware local time when the | See |datetime| |
| | logging call was made | |
+------------+---------------------------------+----------------------------+
.. _time:
.. rubric:: The time formatting
To use your favorite time representation, you can set it directly in the time formatter
specifier of your handler format, like for example ``format="{time:HH:mm:ss} {message}"``.
Note that this datetime represents your local time, and it is also made timezone-aware,
so you can display the UTC offset to avoid ambiguities.
The time field can be formatted using more human-friendly tokens. These constitute a subset
of the one used by the `Pendulum`_ library of `@sdispater`_. To escape a token, just add
square brackets around it, for example ``"[YY]"`` would display literally ``"YY"``.
If you prefer to display UTC rather than local time, you can add ``"!UTC"`` at the very end
of the time format, like ``{time:HH:mm:ss!UTC}``. Doing so will convert the ``datetime``
to UTC before formatting.
If no time formatter specifier is used, like for example if ``format="{time} {message}"``,
the default one will use ISO 8601.
+------------------------+---------+----------------------------------------+
| | Token | Output |
+========================+=========+========================================+
| Year | YYYY | 2000, 2001, 2002 ... 2012, 2013 |
| +---------+----------------------------------------+
| | YY | 00, 01, 02 ... 12, 13 |
+------------------------+---------+----------------------------------------+
| Quarter | Q | 1 2 3 4 |
+------------------------+---------+----------------------------------------+
| Month | MMMM | January, February, March ... |
| +---------+----------------------------------------+
| | MMM | Jan, Feb, Mar ... |
| +---------+----------------------------------------+
| | MM | 01, 02, 03 ... 11, 12 |
| +---------+----------------------------------------+
| | M | 1, 2, 3 ... 11, 12 |
+------------------------+---------+----------------------------------------+
| Day of Year | DDDD | 001, 002, 003 ... 364, 365 |
| +---------+----------------------------------------+
| | DDD | 1, 2, 3 ... 364, 365 |
+------------------------+---------+----------------------------------------+
| Day of Month | DD | 01, 02, 03 ... 30, 31 |
| +---------+----------------------------------------+
| | D | 1, 2, 3 ... 30, 31 |
+------------------------+---------+----------------------------------------+
| Day of Week | dddd | Monday, Tuesday, Wednesday ... |
| +---------+----------------------------------------+
| | ddd | Mon, Tue, Wed ... |
| +---------+----------------------------------------+
| | d | 0, 1, 2 ... 6 |
+------------------------+---------+----------------------------------------+
| Days of ISO Week | E | 1, 2, 3 ... 7 |
+------------------------+---------+----------------------------------------+
| Hour | HH | 00, 01, 02 ... 23, 24 |
| +---------+----------------------------------------+
| | H | 0, 1, 2 ... 23, 24 |
| +---------+----------------------------------------+
| | hh | 01, 02, 03 ... 11, 12 |
| +---------+----------------------------------------+
| | h | 1, 2, 3 ... 11, 12 |
+------------------------+---------+----------------------------------------+
| Minute | mm | 00, 01, 02 ... 58, 59 |
| +---------+----------------------------------------+
| | m | 0, 1, 2 ... 58, 59 |
+------------------------+---------+----------------------------------------+
| Second | ss | 00, 01, 02 ... 58, 59 |
| +---------+----------------------------------------+
| | s | 0, 1, 2 ... 58, 59 |
+------------------------+---------+----------------------------------------+
| Fractional Second | S | 0 1 ... 8 9 |
| +---------+----------------------------------------+
| | SS | 00, 01, 02 ... 98, 99 |
| +---------+----------------------------------------+
| | SSS | 000 001 ... 998 999 |
| +---------+----------------------------------------+
| | SSSS... | 000[0..] 001[0..] ... 998[0..] 999[0..]|
| +---------+----------------------------------------+
| | SSSSSS | 000000 000001 ... 999998 999999 |
+------------------------+---------+----------------------------------------+
| AM / PM | A | AM, PM |
+------------------------+---------+----------------------------------------+
| Timezone | Z | -07:00, -06:00 ... +06:00, +07:00 |
| +---------+----------------------------------------+
| | ZZ | -0700, -0600 ... +0600, +0700 |
| +---------+----------------------------------------+
| | zz | EST CST ... MST PST |
+------------------------+---------+----------------------------------------+
| Seconds timestamp | X | 1381685817, 1234567890.123 |
+------------------------+---------+----------------------------------------+
| Microseconds timestamp | x | 1234567890123 |
+------------------------+---------+----------------------------------------+
.. _file:
.. rubric:: The file sinks
If the sink is a |str| or a |Path|, the corresponding file will be opened for writing logs.
The path can also contain a special ``"{time}"`` field that will be formatted with the
current date at file creation.
The ``rotation`` check is made before logging each message. If there is already an existing
file with the same name that the file to be created, then the existing file is renamed by
appending the date to its basename to prevent file overwriting. This parameter accepts:
- an |int| which corresponds to the maximum file size in bytes before that the current
logged file is closed and a new one started over.
- a |timedelta| which indicates the frequency of each new rotation.
- a |time| which specifies the hour when the daily rotation should occur.
- a |str| for human-friendly parametrization of one of the previously enumerated types.
Examples: ``"100 MB"``, ``"0.5 GB"``, ``"1 month 2 weeks"``, ``"4 days"``, ``"10h"``,
``"monthly"``, ``"18:00"``, ``"sunday"``, ``"w0"``, ``"monday at 12:00"``, ...
- a |callable|_ which will be invoked before logging. It should accept two arguments: the
logged message and the file object, and it should return ``True`` if the rotation should
happen now, ``False`` otherwise.
The ``retention`` occurs at rotation or at sink stop if rotation is ``None``. Files are
selected if they match the pattern ``"basename(.*).ext(.*)"`` (possible time fields are
beforehand replaced with ``.*``) based on the sink file. This parameter accepts:
- an |int| which indicates the number of log files to keep, while older files are removed.
- a |timedelta| which specifies the maximum age of files to keep.
- a |str| for human-friendly parametrization of the maximum age of files to keep.
Examples: ``"1 week, 3 days"``, ``"2 months"``, ...
- a |callable|_ which will be invoked before the retention process. It should accept the
list of log files as argument and process to whatever it wants (moving files, removing
them, etc.).
The ``compression`` happens at rotation or at sink stop if rotation is ``None``. This
parameter accepts:
- a |str| which corresponds to the compressed or archived file extension. This can be one
of: ``"gz"``, ``"bz2"``, ``"xz"``, ``"lzma"``, ``"tar"``, ``"tar.gz"``, ``"tar.bz2"``,
``"tar.xz"``, ``"zip"``.
- a |callable|_ which will be invoked before file termination. It should accept the path of
the log file as argument and process to whatever it wants (custom compression, network
sending, removing it, etc.).
Either way, if you use a custom function designed according to your preferences, you must be
very careful not to use the ``logger`` within your function. Otherwise, there is a risk that
your program hang because of a deadlock.
.. _color:
.. rubric:: The color markups
To add colors to your logs, you just have to enclose your format string with the appropriate
tags (e.g. ``<red>some message</red>``). These tags are automatically removed if the sink
doesn't support ansi codes. For convenience, you can use ``</>`` to close the last opening
tag without repeating its name (e.g. ``<red>another message</>``).
The special tag ``<level>`` (abbreviated with ``<lvl>``) is transformed according to
the configured color of the logged message level.
Tags which are not recognized will raise an exception during parsing, to inform you about
possible misuse. If you wish to display a markup tag literally, you can escape it by
prepending a ``\`` like for example ``\<blue>``. If, for some reason, you need to escape a
string programmatically, note that the regex used internally to parse markup tags is
``r"\\?</?((?:[fb]g\s)?[^<>\s]*)>"``.
Note that when logging a message with ``opt(colors=True)``, color tags present in the
formatting arguments (``args`` and ``kwargs``) are completely ignored. This is important if
you need to log strings containing markups that might interfere with the color tags (in this
case, do not use f-string).
Here are the available tags (note that compatibility may vary depending on terminal):
+------------------------------------+--------------------------------------+
| Color (abbr) | Styles (abbr) |
+====================================+======================================+
| Black (k) | Bold (b) |
+------------------------------------+--------------------------------------+
| Blue (e) | Dim (d) |
+------------------------------------+--------------------------------------+
| Cyan (c) | Normal (n) |
+------------------------------------+--------------------------------------+
| Green (g) | Italic (i) |
+------------------------------------+--------------------------------------+
| Magenta (m) | Underline (u) |
+------------------------------------+--------------------------------------+
| Red (r) | Strike (s) |
+------------------------------------+--------------------------------------+
| White (w) | Reverse (v) |
+------------------------------------+--------------------------------------+
| Yellow (y) | Blink (l) |
+------------------------------------+--------------------------------------+
| | Hide (h) |
+------------------------------------+--------------------------------------+
Usage:
+-----------------+-------------------------------------------------------------------+
| Description | Examples |
| +---------------------------------+---------------------------------+
| | Foreground | Background |
+=================+=================================+=================================+
| Basic colors | ``<red>``, ``<r>`` | ``<GREEN>``, ``<G>`` |
+-----------------+---------------------------------+---------------------------------+
| Light colors | ``<light-blue>``, ``<le>`` | ``<LIGHT-CYAN>``, ``<LC>`` |
+-----------------+---------------------------------+---------------------------------+
| 8-bit colors | ``<fg 86>``, ``<fg 255>`` | ``<bg 42>``, ``<bg 9>`` |
+-----------------+---------------------------------+---------------------------------+
| Hex colors | ``<fg #00005f>``, ``<fg #EE1>`` | ``<bg #AF5FD7>``, ``<bg #fff>`` |
+-----------------+---------------------------------+---------------------------------+
| RGB colors | ``<fg 0,95,0>`` | ``<bg 72,119,65>`` |
+-----------------+---------------------------------+---------------------------------+
| Stylizing | ``<bold>``, ``<b>``, ``<underline>``, ``<u>`` |
+-----------------+-------------------------------------------------------------------+
.. _env:
.. rubric:: The environment variables
The default values of sink parameters can be entirely customized. This is particularly
useful if you don't like the log format of the pre-configured sink.
Each of the |add| default parameter can be modified by setting the ``LOGURU_[PARAM]``
environment variable. For example on Linux: ``export LOGURU_FORMAT="{time} - {message}"``
or ``export LOGURU_DIAGNOSE=NO``.
The default levels' attributes can also be modified by setting the ``LOGURU_[LEVEL]_[ATTR]``
environment variable. For example, on Windows: ``setx LOGURU_DEBUG_COLOR "<blue>"``
or ``setx LOGURU_TRACE_ICON "🚀"``. If you use the ``set`` command, do not include quotes
but escape special symbol as needed, e.g. ``set LOGURU_DEBUG_COLOR=^<blue^>``.
If you want to disable the pre-configured sink, you can set the ``LOGURU_AUTOINIT``
variable to ``False``.
On Linux, you will probably need to edit the ``~/.profile`` file to make this persistent. On
Windows, don't forget to restart your terminal for the change to be taken into account.
Examples
--------
>>> logger.add(sys.stdout, format="{time} - {level} - {message}", filter="sub.module")
>>> logger.add("file_{time}.log", level="TRACE", rotation="100 MB")
>>> def debug_only(record):
... return record["level"].name == "DEBUG"
...
>>> logger.add("debug.log", filter=debug_only) # Other levels are filtered out
>>> def my_sink(message):
... record = message.record
... update_db(message, time=record["time"], level=record["level"])
...
>>> logger.add(my_sink)
>>> level_per_module = {
... "": "DEBUG",
... "third.lib": "WARNING",
... "anotherlib": False
... }
>>> logger.add(lambda m: print(m, end=""), filter=level_per_module, level=0)
>>> async def publish(message):
... await api.post(message)
...
>>> logger.add(publish, serialize=True)
>>> from logging import StreamHandler
>>> logger.add(StreamHandler(sys.stderr), format="{message}")
>>> class RandomStream:
... def __init__(self, seed, threshold):
... self.threshold = threshold
... random.seed(seed)
... def write(self, message):
... if random.random() > self.threshold:
... print(message)
...
>>> stream_object = RandomStream(seed=12345, threshold=0.25)
>>> logger.add(stream_object, level="INFO")
"""
with self._core.lock:
handler_id = next(self._core.handlers_count)
error_interceptor = ErrorInterceptor(catch, handler_id)
if colorize is None and serialize:
colorize = False
if isinstance(sink, (str, PathLike)):
path = sink
name = "'%s'" % path
if colorize is None:
colorize = False
wrapped_sink = FileSink(path, **kwargs)
kwargs = {}
encoding = wrapped_sink.encoding
terminator = "\n"
exception_prefix = ""
elif hasattr(sink, "write") and callable(sink.write):
name = getattr(sink, "name", None) or repr(sink)
if colorize is None:
colorize = _colorama.should_colorize(sink)
if colorize is True and _colorama.should_wrap(sink):
stream = _colorama.wrap(sink)
else:
stream = sink
wrapped_sink = StreamSink(stream)
encoding = getattr(sink, "encoding", None)
terminator = "\n"
exception_prefix = ""
elif isinstance(sink, logging.Handler):
name = repr(sink)
if colorize is None:
colorize = False
wrapped_sink = StandardSink(sink)
encoding = getattr(sink, "encoding", None)
terminator = ""
exception_prefix = "\n"
elif iscoroutinefunction(sink) or iscoroutinefunction(getattr(sink, "__call__", None)):
name = getattr(sink, "__name__", None) or repr(sink)
if colorize is None:
colorize = False
loop = kwargs.pop("loop", None)
# The worker thread needs an event loop, it can't create a new one internally because it
# has to be accessible by the user while calling "complete()", instead we use the global
# one when the sink is added. If "enqueue=False" the event loop is dynamically retrieved
# at each logging call, which is much more convenient. However, coroutine can't access
# running loop in Python 3.5.2 and earlier versions, see python/asyncio#452.
if enqueue and loop is None:
loop = asyncio.get_event_loop()
coro = sink if iscoroutinefunction(sink) else sink.__call__
wrapped_sink = AsyncSink(coro, loop, error_interceptor)
encoding = "utf8"
terminator = "\n"
exception_prefix = ""
elif callable(sink):
name = getattr(sink, "__name__", None) or repr(sink)
if colorize is None:
colorize = False
wrapped_sink = CallableSink(sink)
encoding = "utf8"
terminator = "\n"
exception_prefix = ""
else:
raise TypeError("Cannot log to objects of type '%s'" % type(sink).__name__)
if kwargs:
raise TypeError("add() got an unexpected keyword argument '%s'" % next(iter(kwargs)))
if filter is None:
filter_func = None
elif filter == "":
filter_func = _filters.filter_none
elif isinstance(filter, str):
parent = filter + "."
length = len(parent)
filter_func = functools.partial(_filters.filter_by_name, parent=parent, length=length)
elif isinstance(filter, dict):
level_per_module = {}
for module, level_ in filter.items():
if module is not None and not isinstance(module, str):
raise TypeError(
"The filter dict contains an invalid module, "
"it should be a string (or None), not: '%s'" % type(module).__name__
)
if level_ is False:
levelno_ = False
elif level_ is True:
levelno_ = 0
elif isinstance(level_, str):
try:
levelno_ = self.level(level_).no
except ValueError:
raise ValueError(
"The filter dict contains a module '%s' associated to a level name "
"which does not exist: '%s'" % (module, level_)
)
elif isinstance(level_, int):
levelno_ = level_
else:
raise TypeError(
"The filter dict contains a module '%s' associated to an invalid level, "
"it should be an integer, a string or a boolean, not: '%s'"
% (module, type(level_).__name__)
)
if levelno_ < 0:
raise ValueError(
"The filter dict contains a module '%s' associated to an invalid level, "
"it should be a positive integer, not: '%d'" % (module, levelno_)
)
level_per_module[module] = levelno_
filter_func = functools.partial(
_filters.filter_by_level, level_per_module=level_per_module
)
elif callable(filter):
if filter == builtins.filter:
raise ValueError(
"The built-in 'filter()' function cannot be used as a 'filter' parameter, "
"this is most likely a mistake (please double-check the arguments passed "
"to 'logger.add()')."
)
filter_func = filter
else:
raise TypeError(
"Invalid filter, it should be a function, a string or a dict, not: '%s'"
% type(filter).__name__
)
if isinstance(level, str):
levelno = self.level(level).no
elif isinstance(level, int):
levelno = level
else:
raise TypeError(
"Invalid level, it should be an integer or a string, not: '%s'"
% type(level).__name__
)
if levelno < 0:
raise ValueError(
"Invalid level value, it should be a positive integer, not: %d" % levelno
)
if isinstance(format, str):
try:
formatter = Colorizer.prepare_format(format + terminator + "{exception}")
except ValueError as e:
raise ValueError(
"Invalid format, color markups could not be parsed correctly"
) from e
is_formatter_dynamic = False
elif callable(format):
if format == builtins.format:
raise ValueError(
"The built-in 'format()' function cannot be used as a 'format' parameter, "
"this is most likely a mistake (please double-check the arguments passed "
"to 'logger.add()')."
)
formatter = format
is_formatter_dynamic = True
else:
raise TypeError(
"Invalid format, it should be a string or a function, not: '%s'"
% type(format).__name__
)
if not isinstance(encoding, str):
encoding = "ascii"
with self._core.lock:
exception_formatter = ExceptionFormatter(
colorize=colorize,
encoding=encoding,
diagnose=diagnose,
backtrace=backtrace,
hidden_frames_filename=self.catch.__code__.co_filename,
prefix=exception_prefix,
)
handler = Handler(
name=name,
sink=wrapped_sink,
levelno=levelno,
formatter=formatter,
is_formatter_dynamic=is_formatter_dynamic,
filter_=filter_func,
colorize=colorize,
serialize=serialize,
enqueue=enqueue,
id_=handler_id,
error_interceptor=error_interceptor,
exception_formatter=exception_formatter,
levels_ansi_codes=self._core.levels_ansi_codes,
)
handlers = self._core.handlers.copy()
handlers[handler_id] = handler
self._core.min_level = min(self._core.min_level, levelno)
self._core.handlers = handlers
return handler_id
def remove(self, handler_id=None):
"""Remove a previously added handler and stop sending logs to its sink.
Parameters
----------
handler_id : |int| or ``None``
The id of the sink to remove, as it was returned by the |add| method. If ``None``, all
handlers are removed. The pre-configured handler is guaranteed to have the index ``0``.
Raises
------
ValueError
If ``handler_id`` is not ``None`` but there is no active handler with such id.
Examples
--------
>>> i = logger.add(sys.stderr, format="{message}")
>>> logger.info("Logging")
Logging
>>> logger.remove(i)
>>> logger.info("No longer logging")
"""
if not (handler_id is None or isinstance(handler_id, int)):
raise TypeError(
"Invalid handler id, it should be an integer as returned "
"by the 'add()' method (or None), not: '%s'" % type(handler_id).__name__
)
with self._core.lock:
handlers = self._core.handlers.copy()
if handler_id is not None and handler_id not in handlers:
raise ValueError("There is no existing handler with id %d" % handler_id) from None
if handler_id is None:
handler_ids = list(handlers.keys())
else:
handler_ids = [handler_id]
for handler_id in handler_ids:
handler = handlers.pop(handler_id)
# This needs to be done first in case "stop()" raises an exception
levelnos = (h.levelno for h in handlers.values())
self._core.min_level = min(levelnos, default=float("inf"))
self._core.handlers = handlers
handler.stop()
def complete(self):
"""Wait for the end of enqueued messages and asynchronous tasks scheduled by handlers.
This method proceeds in two steps: first it waits for all logging messages added to handlers
with ``enqueue=True`` to be processed, then it returns an object that can be awaited to
finalize all logging tasks added to the event loop by coroutine sinks.
It can be called from non-asynchronous code. This is especially recommended when the
``logger`` is utilized with ``multiprocessing`` to ensure messages put to the internal
queue have been properly transmitted before leaving a child process.
The returned object should be awaited before the end of a coroutine executed by
|asyncio.run| or |loop.run_until_complete| to ensure all asynchronous logging messages are
processed. The function |asyncio.get_event_loop| is called beforehand, only tasks scheduled
in the same loop that the current one will be awaited by the method.
Returns
-------
:term:`awaitable`
An awaitable object which ensures all asynchronous logging calls are completed when
awaited.
Examples
--------
>>> async def sink(message):
... await asyncio.sleep(0.1) # IO processing...
... print(message, end="")
...
>>> async def work():
... logger.info("Start")
... logger.info("End")
... await logger.complete()
...
>>> logger.add(sink)
1
>>> asyncio.run(work())
Start
End
>>> def process():
... logger.info("Message sent from the child")
... logger.complete()
...
>>> logger.add(sys.stderr, enqueue=True)
1
>>> process = multiprocessing.Process(target=process)
>>> process.start()
>>> process.join()
Message sent from the child
"""
with self._core.lock:
handlers = self._core.handlers.copy()
for handler in handlers.values():
handler.complete_queue()
class AwaitableCompleter:
def __await__(self_):
with self._core.lock:
handlers = self._core.handlers.copy()
for handler in handlers.values():
yield from handler.complete_async().__await__()
return AwaitableCompleter()
def catch(
self,
exception=Exception,
*,
level="ERROR",
reraise=False,
onerror=None,
exclude=None,
default=None,
message="An error has been caught in function '{record[function]}', "
"process '{record[process].name}' ({record[process].id}), "
"thread '{record[thread].name}' ({record[thread].id}):",
**kwargs
):
"""Return a decorator to automatically log possibly caught error in wrapped function.
This is useful to ensure unexpected exceptions are logged, the entire program can be
wrapped by this method. This is also very useful to decorate |Thread.run| methods while
using threads to propagate errors to the main logger thread.
Note that the visibility of variables values (which uses the great |better_exceptions|_
library from `@Qix-`_) depends on the ``diagnose`` option of each configured sink.
The returned object can also be used as a context manager.
Parameters
----------
exception : |Exception|, optional
The type of exception to intercept. If several types should be caught, a tuple of
exceptions can be used too.
level : |str| or |int|, optional
The level name or severity with which the message should be logged.
reraise : |bool|, optional
Whether the exception should be raised again and hence propagated to the caller.
onerror : |callable|_, optional
A function that will be called if an error occurs, once the message has been logged.
It should accept the exception instance as it sole argument.
exclude : |Exception|, optional
A type of exception (or a tuple of types) that will be purposely ignored and hence
propagated to the caller without being logged.
default : optional
The value to be returned by the decorated function if an error occurred without being
re-raised.
message : |str|, optional
The message that will be automatically logged if an exception occurs. Note that it will
be formatted with the ``record`` attribute.
Returns
-------
:term:`decorator` / :term:`context manager`
An object that can be used to decorate a function or as a context manager to log
exceptions possibly caught.
Examples
--------
>>> @logger.catch
... def f(x):
... 100 / x
...
>>> def g():
... f(10)
... f(0)
...
>>> g()
ERROR - An error has been caught in function 'g', process 'Main' (367), thread 'ch1' (1398):
Traceback (most recent call last):
File "program.py", line 12, in <module>
g()
└ <function g at 0x7f225fe2bc80>
> File "program.py", line 10, in g
f(0)
└ <function f at 0x7f225fe2b9d8>
File "program.py", line 6, in f
100 / x
└ 0
ZeroDivisionError: division by zero
>>> with logger.catch(message="Because we never know..."):
... main() # No exception, no logs
>>> # Use 'onerror' to prevent the program exit code to be 0 (if 'reraise=False') while
>>> # also avoiding the stacktrace to be duplicated on stderr (if 'reraise=True').
>>> @logger.catch(onerror=lambda _: sys.exit(1))
... def main():
... 1 / 0
"""
if callable(exception) and (
not isclass(exception) or not issubclass(exception, BaseException)
):
return self.catch()(exception)
class Catcher:
def __init__(self_, from_decorator):
self_._from_decorator = from_decorator
def __enter__(self_):
return None
def __exit__(self_, type_, value, traceback_):
if type_ is None:
return
if not issubclass(type_, exception):
return False
if exclude is not None and issubclass(type_, exclude):
return False
from_decorator = self_._from_decorator
_, depth, _, *options = self._options
if from_decorator:
depth += 1
catch_options = [(type_, value, traceback_), depth, True] + options
level_id, static_level_no = self._dynamic_level(level)
self._log(level_id, static_level_no, from_decorator, catch_options, message, (), kwargs)
if onerror is not None:
onerror(value)
return not reraise
def __call__(_, function):
catcher = Catcher(True)
if iscoroutinefunction(function):
async def catch_wrapper(*args, **kwargs):
with catcher:
return await function(*args, **kwargs)
return default
elif isgeneratorfunction(function):
def catch_wrapper(*args, **kwargs):
with catcher:
return (yield from function(*args, **kwargs))
return default
else:
def catch_wrapper(*args, **kwargs):
with catcher:
return function(*args, **kwargs)
return default
functools.update_wrapper(catch_wrapper, function)
return catch_wrapper
return Catcher(False)
def opt(
self,
*,
exception=None,
record=False,
lazy=False,
colors=False,
raw=False,
capture=True,
depth=0,
ansi=False
):
r"""Parametrize a logging call to slightly change generated log message.
Note that it's not possible to chain |opt| calls, the last one takes precedence over the
others as it will "reset" the options to their default values.
Parameters
----------
exception : |bool|, |tuple| or |Exception|, optional
If it does not evaluate as ``False``, the passed exception is formatted and added to the
log message. It could be an |Exception| object or a ``(type, value, traceback)`` tuple,
otherwise the exception information is retrieved from |sys.exc_info|.
record : |bool|, optional
If ``True``, the record dict contextualizing the logging call can be used to format the
message by using ``{record[key]}`` in the log message.
lazy : |bool|, optional
If ``True``, the logging call attribute to format the message should be functions which
will be called only if the level is high enough. This can be used to avoid expensive
functions if not necessary.
colors : |bool|, optional
If ``True``, logged message will be colorized according to the markups it possibly
contains.
raw : |bool|, optional
If ``True``, the formatting of each sink will be bypassed and the message will be sent
as is.
capture : |bool|, optional
If ``False``, the ``**kwargs`` of logged message will not automatically populate
the ``extra`` dict (although they are still used for formatting).
depth : |int|, optional
Specify which stacktrace should be used to contextualize the logged message. This is
useful while using the logger from inside a wrapped function to retrieve worthwhile
information.
ansi : |bool|, optional
Deprecated since version 0.4.1: the ``ansi`` parameter will be removed in Loguru 1.0.0,
it is replaced by ``colors`` which is a more appropriate name.
Returns
-------
:class:`~Logger`
A logger wrapping the core logger, but transforming logged message adequately before
sending.
Examples
--------
>>> try:
... 1 / 0
... except ZeroDivisionError:
... logger.opt(exception=True).debug("Exception logged with debug level:")
...
[18:10:02] DEBUG in '<module>' - Exception logged with debug level:
Traceback (most recent call last, catch point marked):
> File "<stdin>", line 2, in <module>
ZeroDivisionError: division by zero
>>> logger.opt(record=True).info("Current line is: {record[line]}")
[18:10:33] INFO in '<module>' - Current line is: 1
>>> logger.opt(lazy=True).debug("If sink <= DEBUG: {x}", x=lambda: math.factorial(2**5))
[18:11:19] DEBUG in '<module>' - If sink <= DEBUG: 263130836933693530167218012160000000
>>> logger.opt(colors=True).warning("We got a <red>BIG</red> problem")
[18:11:30] WARNING in '<module>' - We got a BIG problem
>>> logger.opt(raw=True).debug("No formatting\n")
No formatting
>>> logger.opt(capture=False).info("Displayed but not captured: {value}", value=123)
[18:11:41] Displayed but not captured: 123
>>> def wrapped():
... logger.opt(depth=1).info("Get parent context")
...
>>> def func():
... wrapped()
...
>>> func()
[18:11:54] DEBUG in 'func' - Get parent context
"""
if ansi:
colors = True
warnings.warn(
"The 'ansi' parameter is deprecated, please use 'colors' instead",
DeprecationWarning,
)
args = self._options[-2:]
return Logger(self._core, exception, depth, record, lazy, colors, raw, capture, *args)
def bind(__self, **kwargs):
"""Bind attributes to the ``extra`` dict of each logged message record.
This is used to add custom context to each logging call.
Parameters
----------
**kwargs
Mapping between keys and values that will be added to the ``extra`` dict.
Returns
-------
:class:`~Logger`
A logger wrapping the core logger, but which sends record with the customized ``extra``
dict.
Examples
--------
>>> logger.add(sys.stderr, format="{extra[ip]} - {message}")
>>> class Server:
... def __init__(self, ip):
... self.ip = ip
... self.logger = logger.bind(ip=ip)
... def call(self, message):
... self.logger.info(message)
...
>>> instance_1 = Server("192.168.0.200")
>>> instance_2 = Server("127.0.0.1")
>>> instance_1.call("First instance")
192.168.0.200 - First instance
>>> instance_2.call("Second instance")
127.0.0.1 - Second instance
"""
*options, extra = __self._options
return Logger(__self._core, *options, {**extra, **kwargs})
@contextlib.contextmanager
def contextualize(__self, **kwargs):
"""Bind attributes to the context-local ``extra`` dict while inside the ``with`` block.
Contrary to |bind| there is no ``logger`` returned, the ``extra`` dict is modified in-place
and updated globally. Most importantly, it uses |contextvars| which means that
contextualized values are unique to each threads and asynchronous tasks.
The ``extra`` dict will retrieve its initial state once the context manager is exited.
Parameters
----------
**kwargs
Mapping between keys and values that will be added to the context-local ``extra`` dict.
Returns
-------
:term:`context manager` / :term:`decorator`
A context manager (usable as a decorator too) that will bind the attributes once entered
and restore the initial state of the ``extra`` dict while exited.
Examples
--------
>>> logger.add(sys.stderr, format="{message} | {extra}")
1
>>> def task():
... logger.info("Processing!")
...
>>> with logger.contextualize(task_id=123):
... task()
...
Processing! | {'task_id': 123}
>>> logger.info("Done.")
Done. | {}
"""
with __self._core.lock:
new_context = {**context.get(), **kwargs}
token = context.set(new_context)
try:
yield
finally:
with __self._core.lock:
context.reset(token)
def patch(self, patcher):
"""Attach a function to modify the record dict created by each logging call.
The ``patcher`` may be used to update the record on-the-fly before it's propagated to the
handlers. This allows the "extra" dict to be populated with dynamic values and also permits
advanced modifications of the record emitted while logging a message. The function is called
once before sending the log message to the different handlers.
It is recommended to apply modification on the ``record["extra"]`` dict rather than on the
``record`` dict itself, as some values are used internally by `Loguru`, and modify them may
produce unexpected results.
Parameters
----------
patcher: |callable|_
The function to which the record dict will be passed as the sole argument. This function
is in charge of updating the record in-place, the function does not need to return any
value, the modified record object will be re-used.
Returns
-------
:class:`~Logger`
A logger wrapping the core logger, but which records are passed through the ``patcher``
function before being sent to the added handlers.
Examples
--------
>>> logger.add(sys.stderr, format="{extra[utc]} {message}")
>>> logger = logger.patch(lambda record: record["extra"].update(utc=datetime.utcnow())
>>> logger.info("That's way, you can log messages with time displayed in UTC")
>>> def wrapper(func):
... @functools.wraps(func)
... def wrapped(*args, **kwargs):
... logger.patch(lambda r: r.update(function=func.__name__)).info("Wrapped!")
... return func(*args, **kwargs)
... return wrapped
>>> def recv_record_from_network(pipe):
... record = pickle.loads(pipe.read())
... level, message = record["level"], record["message"]
... logger.patch(lambda r: r.update(record)).log(level, message)
"""
*options, _, extra = self._options
return Logger(self._core, *options, patcher, extra)
def level(self, name, no=None, color=None, icon=None):
"""Add, update or retrieve a logging level.
Logging levels are defined by their ``name`` to which a severity ``no``, an ansi ``color``
tag and an ``icon`` are associated and possibly modified at run-time. To |log| to a custom
level, you should necessarily use its name, the severity number is not linked back to levels
name (this implies that several levels can share the same severity).
To add a new level, its ``name`` and its ``no`` are required. A ``color`` and an ``icon``
can also be specified or will be empty by default.
To update an existing level, pass its ``name`` with the parameters to be changed. It is not
possible to modify the ``no`` of a level once it has been added.
To retrieve level information, the ``name`` solely suffices.
Parameters
----------
name : |str|
The name of the logging level.
no : |int|
The severity of the level to be added or updated.
color : |str|
The color markup of the level to be added or updated.
icon : |str|
The icon of the level to be added or updated.
Returns
-------
``Level``
A |namedtuple| containing information about the level.
Raises
------
ValueError
If there is no level registered with such ``name``.
Examples
--------
>>> level = logger.level("ERROR")
>>> print(level)
Level(name='ERROR', no=40, color='<red><bold>', icon='❌')
>>> logger.add(sys.stderr, format="{level.no} {level.icon} {message}")
1
>>> logger.level("CUSTOM", no=15, color="<blue>", icon="@")
Level(name='CUSTOM', no=15, color='<blue>', icon='@')
>>> logger.log("CUSTOM", "Logging...")
15 @ Logging...
>>> logger.level("WARNING", icon=r"/!\\")
Level(name='WARNING', no=30, color='<yellow><bold>', icon='/!\\\\')
>>> logger.warning("Updated!")
30 /!\\ Updated!
"""
if not isinstance(name, str):
raise TypeError(
"Invalid level name, it should be a string, not: '%s'" % type(name).__name__
)
if no is color is icon is None:
try:
return self._core.levels[name]
except KeyError:
raise ValueError("Level '%s' does not exist" % name) from None
if name not in self._core.levels:
if no is None:
raise ValueError(
"Level '%s' does not exist, you have to create it by specifying a level no"
% name
)
else:
old_color, old_icon = "", " "
elif no is not None:
raise TypeError("Level '%s' already exists, you can't update its severity no" % name)
else:
_, no, old_color, old_icon = self.level(name)
if color is None:
color = old_color
if icon is None:
icon = old_icon
if not isinstance(no, int):
raise TypeError(
"Invalid level no, it should be an integer, not: '%s'" % type(no).__name__
)
if no < 0:
raise ValueError("Invalid level no, it should be a positive integer, not: %d" % no)
ansi = Colorizer.ansify(color)
level = Level(name, no, color, icon)
with self._core.lock:
self._core.levels[name] = level
self._core.levels_ansi_codes[name] = ansi
for handler in self._core.handlers.values():
handler.update_format(name)
return level
def disable(self, name):
"""Disable logging of messages coming from ``name`` module and its children.
Developers of library using `Loguru` should absolutely disable it to avoid disrupting
users with unrelated logs messages.
Note that in some rare circumstances, it is not possible for `Loguru` to
determine the module's ``__name__`` value. In such situation, ``record["name"]`` will be
equal to ``None``, this is why ``None`` is also a valid argument.
Parameters
----------
name : |str| or ``None``
The name of the parent module to disable.
Examples
--------
>>> logger.info("Allowed message by default")
[22:21:55] Allowed message by default
>>> logger.disable("my_library")
>>> logger.info("While publishing a library, don't forget to disable logging")
"""
self._change_activation(name, False)
def enable(self, name):
"""Enable logging of messages coming from ``name`` module and its children.
Logging is generally disabled by imported library using `Loguru`, hence this function
allows users to receive these messages anyway.
To enable all logs regardless of the module they are coming from, an empty string ``""`` can
be passed.
Parameters
----------
name : |str| or ``None``
The name of the parent module to re-allow.
Examples
--------
>>> logger.disable("__main__")
>>> logger.info("Disabled, so nothing is logged.")
>>> logger.enable("__main__")
>>> logger.info("Re-enabled, messages are logged.")
[22:46:12] Re-enabled, messages are logged.
"""
self._change_activation(name, True)
def configure(self, *, handlers=None, levels=None, extra=None, patcher=None, activation=None):
"""Configure the core logger.
It should be noted that ``extra`` values set using this function are available across all
modules, so this is the best way to set overall default values.
Parameters
----------
handlers : |list| of |dict|, optional
A list of each handler to be added. The list should contain dicts of params passed to
the |add| function as keyword arguments. If not ``None``, all previously added
handlers are first removed.
levels : |list| of |dict|, optional
A list of each level to be added or updated. The list should contain dicts of params
passed to the |level| function as keyword arguments. This will never remove previously
created levels.
extra : |dict|, optional
A dict containing additional parameters bound to the core logger, useful to share
common properties if you call |bind| in several of your files modules. If not ``None``,
this will remove previously configured ``extra`` dict.
patcher : |callable|_, optional
A function that will be applied to the record dict of each logged messages across all
modules using the logger. It should modify the dict in-place without returning anything.
The function is executed prior to the one possibly added by the |patch| method. If not
``None``, this will replace previously configured ``patcher`` function.
activation : |list| of |tuple|, optional
A list of ``(name, state)`` tuples which denotes which loggers should be enabled (if
``state`` is ``True``) or disabled (if ``state`` is ``False``). The calls to |enable|
and |disable| are made accordingly to the list order. This will not modify previously
activated loggers, so if you need a fresh start prepend your list with ``("", False)``
or ``("", True)``.
Returns
-------
:class:`list` of :class:`int`
A list containing the identifiers of added sinks (if any).
Examples
--------
>>> logger.configure(
... handlers=[
... dict(sink=sys.stderr, format="[{time}] {message}"),
... dict(sink="file.log", enqueue=True, serialize=True),
... ],
... levels=[dict(name="NEW", no=13, icon="¤", color="")],
... extra={"common_to_all": "default"},
... patcher=lambda record: record["extra"].update(some_value=42),
... activation=[("my_module.secret", False), ("another_library.module", True)],
... )
[1, 2]
>>> # Set a default "extra" dict to logger across all modules, without "bind()"
>>> extra = {"context": "foo"}
>>> logger.configure(extra=extra)
>>> logger.add(sys.stderr, format="{extra[context]} - {message}")
>>> logger.info("Context without bind")
>>> # => "foo - Context without bind"
>>> logger.bind(context="bar").info("Suppress global context")
>>> # => "bar - Suppress global context"
"""
if handlers is not None:
self.remove()
else:
handlers = []
if levels is not None:
for params in levels:
self.level(**params)
if patcher is not None:
with self._core.lock:
self._core.patcher = patcher
if extra is not None:
with self._core.lock:
self._core.extra.clear()
self._core.extra.update(extra)
if activation is not None:
for name, state in activation:
if state:
self.enable(name)
else:
self.disable(name)
return [self.add(**params) for params in handlers]
def _change_activation(self, name, status):
if not (name is None or isinstance(name, str)):
raise TypeError(
"Invalid name, it should be a string (or None), not: '%s'" % type(name).__name__
)
with self._core.lock:
enabled = self._core.enabled.copy()
if name is None:
for n in enabled:
if n is None:
enabled[n] = status
self._core.activation_none = status
self._core.enabled = enabled
return
if name != "":
name += "."
activation_list = [
(n, s) for n, s in self._core.activation_list if n[: len(name)] != name
]
parent_status = next((s for n, s in activation_list if name[: len(n)] == n), None)
if parent_status != status and not (name == "" and status is True):
activation_list.append((name, status))
def modules_depth(x):
return x[0].count(".")
activation_list.sort(key=modules_depth, reverse=True)
for n in enabled:
if n is not None and (n + ".")[: len(name)] == name:
enabled[n] = status
self._core.activation_list = activation_list
self._core.enabled = enabled
@staticmethod
def parse(file, pattern, *, cast={}, chunk=2 ** 16):
"""Parse raw logs and extract each entry as a |dict|.
The logging format has to be specified as the regex ``pattern``, it will then be
used to parse the ``file`` and retrieve each entry based on the named groups present
in the regex.
Parameters
----------
file : |str|, |Path| or |file-like object|_
The path of the log file to be parsed, or an already opened file object.
pattern : |str| or |re.Pattern|_
The regex to use for logs parsing, it should contain named groups which will be included
in the returned dict.
cast : |callable|_ or |dict|, optional
A function that should convert in-place the regex groups parsed (a dict of string
values) to more appropriate types. If a dict is passed, it should be a mapping between
keys of parsed log dict and the function that should be used to convert the associated
value.
chunk : |int|, optional
The number of bytes read while iterating through the logs, this avoids having to load
the whole file in memory.
Yields
------
:class:`dict`
The dict mapping regex named groups to matched values, as returned by |match.groupdict|
and optionally converted according to ``cast`` argument.
Examples
--------
>>> reg = r"(?P<lvl>[0-9]+): (?P<msg>.*)" # If log format is "{level.no} - {message}"
>>> for e in logger.parse("file.log", reg): # A file line could be "10 - A debug message"
... print(e) # => {'lvl': '10', 'msg': 'A debug message'}
>>> caster = dict(lvl=int) # Parse 'lvl' key as an integer
>>> for e in logger.parse("file.log", reg, cast=caster):
... print(e) # => {'lvl': 10, 'msg': 'A debug message'}
>>> def cast(groups):
... if "date" in groups:
... groups["date"] = datetime.strptime(groups["date"], "%Y-%m-%d %H:%M:%S")
...
>>> with open("file.log") as file:
... for log in logger.parse(file, reg, cast=cast):
... print(log["date"], log["something_else"])
"""
if isinstance(file, (str, PathLike)):
should_close = True
fileobj = open(str(file))
elif hasattr(file, "read") and callable(file.read):
should_close = False
fileobj = file
else:
raise TypeError(
"Invalid file, it should be a string path or a file object, not: '%s'"
% type(file).__name__
)
if isinstance(cast, dict):
def cast_function(groups):
for key, converter in cast.items():
if key in groups:
groups[key] = converter(groups[key])
elif callable(cast):
cast_function = cast
else:
raise TypeError(
"Invalid cast, it should be a function or a dict, not: '%s'" % type(cast).__name__
)
try:
regex = re.compile(pattern)
except TypeError:
raise TypeError(
"Invalid pattern, it should be a string or a compiled regex, not: '%s'"
% type(pattern).__name__
) from None
matches = Logger._find_iter(fileobj, regex, chunk)
for match in matches:
groups = match.groupdict()
cast_function(groups)
yield groups
if should_close:
fileobj.close()
@staticmethod
def _find_iter(fileobj, regex, chunk):
buffer = fileobj.read(0)
while 1:
text = fileobj.read(chunk)
buffer += text
matches = list(regex.finditer(buffer))
if not text:
yield from matches
break
if len(matches) > 1:
end = matches[-2].end()
buffer = buffer[end:]
yield from matches[:-1]
def _log(self, level_id, static_level_no, from_decorator, options, message, args, kwargs):
core = self._core
if not core.handlers:
return
(exception, depth, record, lazy, colors, raw, capture, patcher, extra) = options
frame = get_frame(depth + 2)
try:
name = frame.f_globals["__name__"]
except KeyError:
name = None
try:
if not core.enabled[name]:
return
except KeyError:
enabled = core.enabled
if name is None:
status = core.activation_none
enabled[name] = status
if not status:
return
else:
dotted_name = name + "."
for dotted_module_name, status in core.activation_list:
if dotted_name[: len(dotted_module_name)] == dotted_module_name:
if status:
break
enabled[name] = False
return
enabled[name] = True
current_datetime = aware_now()
if level_id is None:
level_icon = " "
level_no = static_level_no
level_name = "Level %d" % level_no
else:
try:
level_name, level_no, _, level_icon = core.levels[level_id]
except KeyError:
raise ValueError("Level '%s' does not exist" % level_id) from None
if level_no < core.min_level:
return
code = frame.f_code
file_path = code.co_filename
file_name = basename(file_path)
thread = current_thread()
process = current_process()
elapsed = current_datetime - start_time
if exception:
if isinstance(exception, BaseException):
type_, value, traceback = (type(exception), exception, exception.__traceback__)
elif isinstance(exception, tuple):
type_, value, traceback = exception
else:
type_, value, traceback = sys.exc_info()
exception = RecordException(type_, value, traceback)
else:
exception = None
log_record = {
"elapsed": elapsed,
"exception": exception,
"extra": {**core.extra, **context.get(), **extra},
"file": RecordFile(file_name, file_path),
"function": code.co_name,
"level": RecordLevel(level_name, level_no, level_icon),
"line": frame.f_lineno,
"message": str(message),
"module": splitext(file_name)[0],
"name": name,
"process": RecordProcess(process.ident, process.name),
"thread": RecordThread(thread.ident, thread.name),
"time": current_datetime,
}
if lazy:
args = [arg() for arg in args]
kwargs = {key: value() for key, value in kwargs.items()}
if capture and kwargs:
log_record["extra"].update(kwargs)
if record:
if "record" in kwargs:
raise TypeError(
"The message can't be formatted: 'record' shall not be used as a keyword "
"argument while logger has been configured with '.opt(record=True)'"
)
kwargs.update(record=log_record)
if colors:
if args or kwargs:
colored_message = Colorizer.prepare_message(message, args, kwargs)
else:
colored_message = Colorizer.prepare_simple_message(str(message))
log_record["message"] = colored_message.stripped
elif args or kwargs:
colored_message = None
log_record["message"] = message.format(*args, **kwargs)
else:
colored_message = None
if core.patcher:
core.patcher(log_record)
if patcher:
patcher(log_record)
for handler in core.handlers.values():
handler.emit(log_record, level_id, from_decorator, raw, colored_message)
def trace(__self, __message, *args, **kwargs):
r"""Log ``message.format(*args, **kwargs)`` with severity ``'TRACE'``."""
__self._log("TRACE", None, False, __self._options, __message, args, kwargs)
def debug(__self, __message, *args, **kwargs):
r"""Log ``message.format(*args, **kwargs)`` with severity ``'DEBUG'``."""
__self._log("DEBUG", None, False, __self._options, __message, args, kwargs)
def info(__self, __message, *args, **kwargs):
r"""Log ``message.format(*args, **kwargs)`` with severity ``'INFO'``."""
__self._log("INFO", None, False, __self._options, __message, args, kwargs)
def success(__self, __message, *args, **kwargs):
r"""Log ``message.format(*args, **kwargs)`` with severity ``'SUCCESS'``."""
__self._log("SUCCESS", None, False, __self._options, __message, args, kwargs)
def warning(__self, __message, *args, **kwargs):
r"""Log ``message.format(*args, **kwargs)`` with severity ``'WARNING'``."""
__self._log("WARNING", None, False, __self._options, __message, args, kwargs)
def error(__self, __message, *args, **kwargs):
r"""Log ``message.format(*args, **kwargs)`` with severity ``'ERROR'``."""
__self._log("ERROR", None, False, __self._options, __message, args, kwargs)
def critical(__self, __message, *args, **kwargs):
r"""Log ``message.format(*args, **kwargs)`` with severity ``'CRITICAL'``."""
__self._log("CRITICAL", None, False, __self._options, __message, args, kwargs)
def exception(__self, __message, *args, **kwargs):
r"""Convenience method for logging an ``'ERROR'`` with exception information."""
options = (True,) + __self._options[1:]
__self._log("ERROR", None, False, options, __message, args, kwargs)
def log(__self, __level, __message, *args, **kwargs):
r"""Log ``message.format(*args, **kwargs)`` with severity ``level``."""
level_id, static_level_no = __self._dynamic_level(__level)
__self._log(level_id, static_level_no, False, __self._options, __message, args, kwargs)
@staticmethod
@functools.lru_cache(maxsize=32)
def _dynamic_level(level):
if isinstance(level, str):
return (level, None)
if isinstance(level, int):
if level < 0:
raise ValueError(
"Invalid level value, it should be a positive integer, not: %d" % level
)
return (None, level)
raise TypeError(
"Invalid level, it should be an integer or a string, not: '%s'" % type(level).__name__
)
def start(self, *args, **kwargs):
"""Deprecated function to |add| a new handler.
Warnings
--------
.. deprecated:: 0.2.2
``start()`` will be removed in Loguru 1.0.0, it is replaced by ``add()`` which is a less
confusing name.
"""
warnings.warn(
"The 'start()' method is deprecated, please use 'add()' instead", DeprecationWarning
)
return self.add(*args, **kwargs)
def stop(self, *args, **kwargs):
"""Deprecated function to |remove| an existing handler.
Warnings
--------
.. deprecated:: 0.2.2
``stop()`` will be removed in Loguru 1.0.0, it is replaced by ``remove()`` which is a less
confusing name.
"""
warnings.warn(
"The 'stop()' method is deprecated, please use 'remove()' instead", DeprecationWarning
)
return self.remove(*args, **kwargs)
|
test_p2p_grpform.py
|
# P2P group formation test cases
# Copyright (c) 2013-2014, Jouni Malinen <j@w1.fi>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import logging
logger = logging.getLogger()
import time
import threading
import Queue
import os
import hostapd
import hwsim_utils
import utils
from utils import HwsimSkip
from wpasupplicant import WpaSupplicant
def check_grpform_results(i_res, r_res):
if i_res['result'] != 'success' or r_res['result'] != 'success':
raise Exception("Failed group formation")
if i_res['ssid'] != r_res['ssid']:
raise Exception("SSID mismatch")
if i_res['freq'] != r_res['freq']:
raise Exception("freq mismatch")
if 'go_neg_freq' in r_res and i_res['go_neg_freq'] != r_res['go_neg_freq']:
raise Exception("go_neg_freq mismatch")
if i_res['freq'] != i_res['go_neg_freq']:
raise Exception("freq/go_neg_freq mismatch")
if i_res['role'] != i_res['go_neg_role']:
raise Exception("role/go_neg_role mismatch")
if 'go_neg_role' in r_res and r_res['role'] != r_res['go_neg_role']:
raise Exception("role/go_neg_role mismatch")
if i_res['go_dev_addr'] != r_res['go_dev_addr']:
raise Exception("GO Device Address mismatch")
def go_neg_init(i_dev, r_dev, pin, i_method, i_intent, res):
logger.debug("Initiate GO Negotiation from i_dev")
try:
i_res = i_dev.p2p_go_neg_init(r_dev.p2p_dev_addr(), pin, i_method, timeout=20, go_intent=i_intent)
logger.debug("i_res: " + str(i_res))
except Exception, e:
i_res = None
logger.info("go_neg_init thread caught an exception from p2p_go_neg_init: " + str(e))
res.put(i_res)
def go_neg_pin(i_dev, r_dev, i_intent=None, r_intent=None, i_method='enter', r_method='display'):
r_dev.p2p_listen()
i_dev.p2p_listen()
pin = r_dev.wps_read_pin()
logger.info("Start GO negotiation " + i_dev.ifname + " -> " + r_dev.ifname)
r_dev.dump_monitor()
res = Queue.Queue()
t = threading.Thread(target=go_neg_init, args=(i_dev, r_dev, pin, i_method, i_intent, res))
t.start()
logger.debug("Wait for GO Negotiation Request on r_dev")
ev = r_dev.wait_global_event(["P2P-GO-NEG-REQUEST"], timeout=15)
if ev is None:
raise Exception("GO Negotiation timed out")
r_dev.dump_monitor()
logger.debug("Re-initiate GO Negotiation from r_dev")
r_res = r_dev.p2p_go_neg_init(i_dev.p2p_dev_addr(), pin, r_method, go_intent=r_intent, timeout=20)
logger.debug("r_res: " + str(r_res))
r_dev.dump_monitor()
t.join()
i_res = res.get()
if i_res is None:
raise Exception("go_neg_init thread failed")
logger.debug("i_res: " + str(i_res))
logger.info("Group formed")
hwsim_utils.test_connectivity_p2p(r_dev, i_dev)
i_dev.dump_monitor()
return [i_res, r_res]
def go_neg_pin_authorized(i_dev, r_dev, i_intent=None, r_intent=None, expect_failure=False, i_go_neg_status=None, i_method='enter', r_method='display', test_data=True, i_freq=None, r_freq=None):
i_dev.p2p_listen()
pin = r_dev.wps_read_pin()
logger.info("Start GO negotiation " + i_dev.ifname + " -> " + r_dev.ifname)
r_dev.p2p_go_neg_auth(i_dev.p2p_dev_addr(), pin, r_method, go_intent=r_intent, freq=r_freq)
r_dev.p2p_listen()
i_res = i_dev.p2p_go_neg_init(r_dev.p2p_dev_addr(), pin, i_method, timeout=20, go_intent=i_intent, expect_failure=expect_failure, freq=i_freq)
r_res = r_dev.p2p_go_neg_auth_result(expect_failure=expect_failure)
logger.debug("i_res: " + str(i_res))
logger.debug("r_res: " + str(r_res))
r_dev.dump_monitor()
i_dev.dump_monitor()
if i_go_neg_status:
if i_res['result'] != 'go-neg-failed':
raise Exception("Expected GO Negotiation failure not reported")
if i_res['status'] != i_go_neg_status:
raise Exception("Expected GO Negotiation status not seen")
if expect_failure:
return
logger.info("Group formed")
if test_data:
hwsim_utils.test_connectivity_p2p(r_dev, i_dev)
return [i_res, r_res]
def go_neg_init_pbc(i_dev, r_dev, i_intent, res, freq, provdisc):
logger.debug("Initiate GO Negotiation from i_dev")
try:
i_res = i_dev.p2p_go_neg_init(r_dev.p2p_dev_addr(), None, "pbc",
timeout=20, go_intent=i_intent, freq=freq,
provdisc=provdisc)
logger.debug("i_res: " + str(i_res))
except Exception, e:
i_res = None
logger.info("go_neg_init_pbc thread caught an exception from p2p_go_neg_init: " + str(e))
res.put(i_res)
def go_neg_pbc(i_dev, r_dev, i_intent=None, r_intent=None, i_freq=None, r_freq=None, provdisc=False, r_listen=False):
if r_listen:
r_dev.p2p_listen()
else:
r_dev.p2p_find(social=True)
i_dev.p2p_find(social=True)
logger.info("Start GO negotiation " + i_dev.ifname + " -> " + r_dev.ifname)
r_dev.dump_monitor()
res = Queue.Queue()
t = threading.Thread(target=go_neg_init_pbc, args=(i_dev, r_dev, i_intent, res, i_freq, provdisc))
t.start()
logger.debug("Wait for GO Negotiation Request on r_dev")
ev = r_dev.wait_global_event(["P2P-GO-NEG-REQUEST"], timeout=15)
if ev is None:
raise Exception("GO Negotiation timed out")
r_dev.dump_monitor()
# Allow some time for the GO Neg Resp to go out before initializing new
# GO Negotiation.
time.sleep(0.2)
logger.debug("Re-initiate GO Negotiation from r_dev")
r_res = r_dev.p2p_go_neg_init(i_dev.p2p_dev_addr(), None, "pbc",
go_intent=r_intent, timeout=20, freq=r_freq)
logger.debug("r_res: " + str(r_res))
r_dev.dump_monitor()
t.join()
i_res = res.get()
if i_res is None:
raise Exception("go_neg_init_pbc thread failed")
logger.debug("i_res: " + str(i_res))
logger.info("Group formed")
hwsim_utils.test_connectivity_p2p(r_dev, i_dev)
i_dev.dump_monitor()
return [i_res, r_res]
def go_neg_pbc_authorized(i_dev, r_dev, i_intent=None, r_intent=None,
expect_failure=False, i_freq=None, r_freq=None):
i_dev.p2p_listen()
logger.info("Start GO negotiation " + i_dev.ifname + " -> " + r_dev.ifname)
r_dev.p2p_go_neg_auth(i_dev.p2p_dev_addr(), None, "pbc",
go_intent=r_intent, freq=r_freq)
r_dev.p2p_listen()
i_res = i_dev.p2p_go_neg_init(r_dev.p2p_dev_addr(), None, "pbc", timeout=20,
go_intent=i_intent,
expect_failure=expect_failure, freq=i_freq)
r_res = r_dev.p2p_go_neg_auth_result(expect_failure=expect_failure)
logger.debug("i_res: " + str(i_res))
logger.debug("r_res: " + str(r_res))
r_dev.dump_monitor()
i_dev.dump_monitor()
if expect_failure:
return
logger.info("Group formed")
return [i_res, r_res]
def remove_group(dev1, dev2):
dev1.remove_group()
try:
dev2.remove_group()
except:
pass
def test_grpform(dev):
"""P2P group formation using PIN and authorized connection (init -> GO)"""
try:
dev[0].global_request("SET p2p_group_idle 2")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0)
check_grpform_results(i_res, r_res)
dev[1].remove_group()
ev = dev[0].wait_global_event(["P2P-GROUP-REMOVED"], timeout=10)
if ev is None:
raise Exception("GO did not remove group on idle timeout")
if "GO reason=IDLE" not in ev:
raise Exception("Unexpected group removal event: " + ev)
finally:
dev[0].global_request("SET p2p_group_idle 0")
def test_grpform_a(dev):
"""P2P group formation using PIN and authorized connection (init -> GO) (init: group iface)"""
dev[0].global_request("SET p2p_no_group_iface 0")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0)
if "p2p-wlan" not in i_res['ifname']:
raise Exception("Unexpected group interface name")
check_grpform_results(i_res, r_res)
remove_group(dev[0], dev[1])
if i_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
def test_grpform_b(dev):
"""P2P group formation using PIN and authorized connection (init -> GO) (resp: group iface)"""
dev[1].global_request("SET p2p_no_group_iface 0")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0)
if "p2p-wlan" not in r_res['ifname']:
raise Exception("Unexpected group interface name")
check_grpform_results(i_res, r_res)
remove_group(dev[0], dev[1])
if r_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
def test_grpform_c(dev):
"""P2P group formation using PIN and authorized connection (init -> GO) (group iface)"""
dev[0].global_request("SET p2p_no_group_iface 0")
dev[1].global_request("SET p2p_no_group_iface 0")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0)
if "p2p-wlan" not in i_res['ifname']:
raise Exception("Unexpected group interface name")
if "p2p-wlan" not in r_res['ifname']:
raise Exception("Unexpected group interface name")
check_grpform_results(i_res, r_res)
remove_group(dev[0], dev[1])
if i_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
if r_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
def test_grpform2(dev):
"""P2P group formation using PIN and authorized connection (resp -> GO)"""
go_neg_pin_authorized(i_dev=dev[0], i_intent=0, r_dev=dev[1], r_intent=15)
remove_group(dev[0], dev[1])
def test_grpform2_c(dev):
"""P2P group formation using PIN and authorized connection (resp -> GO) (group iface)"""
dev[0].global_request("SET p2p_no_group_iface 0")
dev[1].global_request("SET p2p_no_group_iface 0")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0, r_dev=dev[1], r_intent=15)
remove_group(dev[0], dev[1])
if i_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
if r_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
def test_grpform3(dev):
"""P2P group formation using PIN and re-init GO Negotiation"""
go_neg_pin(i_dev=dev[0], i_intent=15, r_dev=dev[1], r_intent=0)
remove_group(dev[0], dev[1])
def test_grpform3_c(dev):
"""P2P group formation using PIN and re-init GO Negotiation (group iface)"""
dev[0].global_request("SET p2p_no_group_iface 0")
dev[1].global_request("SET p2p_no_group_iface 0")
[i_res, r_res] = go_neg_pin(i_dev=dev[0], i_intent=15, r_dev=dev[1], r_intent=0)
remove_group(dev[0], dev[1])
if i_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
if r_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
def test_grpform_pbc(dev):
"""P2P group formation using PBC and re-init GO Negotiation"""
[i_res, r_res] = go_neg_pbc(i_dev=dev[0], i_intent=15, r_dev=dev[1], r_intent=0)
check_grpform_results(i_res, r_res)
if i_res['role'] != 'GO' or r_res['role'] != 'client':
raise Exception("Unexpected device roles")
remove_group(dev[0], dev[1])
def test_grpform_pd(dev):
"""P2P group formation with PD-before-GO-Neg workaround"""
[i_res, r_res] = go_neg_pbc(i_dev=dev[0], provdisc=True, r_dev=dev[1], r_listen=True)
check_grpform_results(i_res, r_res)
remove_group(dev[0], dev[1])
def test_grpform_ext_listen(dev):
"""P2P group formation with extended listen timing enabled"""
addr0 = dev[0].p2p_dev_addr()
try:
if "FAIL" not in dev[0].global_request("P2P_EXT_LISTEN 100"):
raise Exception("Invalid P2P_EXT_LISTEN accepted")
if "OK" not in dev[0].global_request("P2P_EXT_LISTEN 300 1000"):
raise Exception("Failed to set extended listen timing")
if "OK" not in dev[1].global_request("P2P_EXT_LISTEN 200 40000"):
raise Exception("Failed to set extended listen timing")
[i_res, r_res] = go_neg_pbc(i_dev=dev[0], provdisc=True, r_dev=dev[1],
r_listen=True, i_freq="2417", r_freq="2417",
i_intent=1, r_intent=15)
check_grpform_results(i_res, r_res)
peer1 = dev[0].get_peer(dev[1].p2p_dev_addr())
if peer1['ext_listen_interval'] != "40000":
raise Exception("Extended listen interval not discovered correctly")
if peer1['ext_listen_period'] != "200":
raise Exception("Extended listen period not discovered correctly")
peer0 = dev[1].get_peer(dev[0].p2p_dev_addr())
if peer0['ext_listen_interval'] != "1000":
raise Exception("Extended listen interval not discovered correctly")
if peer0['ext_listen_period'] != "300":
raise Exception("Extended listen period not discovered correctly")
if not dev[2].discover_peer(addr0):
raise Exception("Could not discover peer during ext listen")
remove_group(dev[0], dev[1])
finally:
if "OK" not in dev[0].global_request("P2P_EXT_LISTEN"):
raise Exception("Failed to clear extended listen timing")
if "OK" not in dev[1].global_request("P2P_EXT_LISTEN"):
raise Exception("Failed to clear extended listen timing")
def test_both_go_intent_15(dev):
"""P2P GO Negotiation with both devices using GO intent 15"""
go_neg_pin_authorized(i_dev=dev[0], i_intent=15, r_dev=dev[1], r_intent=15, expect_failure=True, i_go_neg_status=9)
def test_both_go_neg_display(dev):
"""P2P GO Negotiation with both devices trying to display PIN"""
go_neg_pin_authorized(i_dev=dev[0], r_dev=dev[1], expect_failure=True, i_go_neg_status=10, i_method='display', r_method='display')
def test_both_go_neg_enter(dev):
"""P2P GO Negotiation with both devices trying to enter PIN"""
go_neg_pin_authorized(i_dev=dev[0], r_dev=dev[1], expect_failure=True, i_go_neg_status=10, i_method='enter', r_method='enter')
def test_go_neg_pbc_vs_pin(dev):
"""P2P GO Negotiation with one device using PBC and the other PIN"""
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
dev[1].p2p_listen()
if not dev[0].discover_peer(addr1):
raise Exception("Could not discover peer")
dev[0].p2p_listen()
if "OK" not in dev[0].request("P2P_CONNECT " + addr1 + " pbc auth"):
raise Exception("Failed to authorize GO Neg")
if not dev[1].discover_peer(addr0):
raise Exception("Could not discover peer")
if "OK" not in dev[1].request("P2P_CONNECT " + addr0 + " 12345670 display"):
raise Exception("Failed to initiate GO Neg")
ev = dev[1].wait_global_event(["P2P-GO-NEG-FAILURE"], timeout=10)
if ev is None:
raise Exception("GO Negotiation failure timed out")
if "status=10" not in ev:
raise Exception("Unexpected failure reason: " + ev)
def test_go_neg_pin_vs_pbc(dev):
"""P2P GO Negotiation with one device using PIN and the other PBC"""
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
dev[1].p2p_listen()
if not dev[0].discover_peer(addr1):
raise Exception("Could not discover peer")
dev[0].p2p_listen()
if "OK" not in dev[0].request("P2P_CONNECT " + addr1 + " 12345670 display auth"):
raise Exception("Failed to authorize GO Neg")
if not dev[1].discover_peer(addr0):
raise Exception("Could not discover peer")
if "OK" not in dev[1].request("P2P_CONNECT " + addr0 + " pbc"):
raise Exception("Failed to initiate GO Neg")
ev = dev[1].wait_global_event(["P2P-GO-NEG-FAILURE"], timeout=10)
if ev is None:
raise Exception("GO Negotiation failure timed out")
if "status=10" not in ev:
raise Exception("Unexpected failure reason: " + ev)
def test_grpform_per_sta_psk(dev):
"""P2P group formation with per-STA PSKs"""
dev[0].global_request("P2P_SET per_sta_psk 1")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15, r_dev=dev[1], r_intent=0)
check_grpform_results(i_res, r_res)
pin = dev[2].wps_read_pin()
dev[0].p2p_go_authorize_client(pin)
c_res = dev[2].p2p_connect_group(dev[0].p2p_dev_addr(), pin, timeout=60)
check_grpform_results(i_res, c_res)
if r_res['psk'] == c_res['psk']:
raise Exception("Same PSK assigned for both clients")
hwsim_utils.test_connectivity_p2p(dev[1], dev[2])
dev[0].remove_group()
dev[1].wait_go_ending_session()
dev[2].wait_go_ending_session()
def test_grpform_per_sta_psk_wps(dev):
"""P2P group formation with per-STA PSKs with non-P2P WPS STA"""
dev[0].global_request("P2P_SET per_sta_psk 1")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15, r_dev=dev[1], r_intent=0)
check_grpform_results(i_res, r_res)
dev[0].p2p_go_authorize_client_pbc()
dev[2].request("WPS_PBC")
dev[2].wait_connected(timeout=30)
hwsim_utils.test_connectivity_p2p_sta(dev[1], dev[2])
dev[0].remove_group()
dev[2].request("DISCONNECT")
dev[1].wait_go_ending_session()
def test_grpform_force_chan_go(dev):
"""P2P group formation forced channel selection by GO"""
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
i_freq=2432,
r_dev=dev[1], r_intent=0,
test_data=False)
check_grpform_results(i_res, r_res)
if i_res['freq'] != "2432":
raise Exception("Unexpected channel - did not follow GO's forced channel")
remove_group(dev[0], dev[1])
def test_grpform_force_chan_cli(dev):
"""P2P group formation forced channel selection by client"""
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0,
i_freq=2417,
r_dev=dev[1], r_intent=15,
test_data=False)
check_grpform_results(i_res, r_res)
if i_res['freq'] != "2417":
raise Exception("Unexpected channel - did not follow GO's forced channel")
remove_group(dev[0], dev[1])
def test_grpform_force_chan_conflict(dev):
"""P2P group formation fails due to forced channel mismatch"""
go_neg_pin_authorized(i_dev=dev[0], i_intent=0, i_freq=2422,
r_dev=dev[1], r_intent=15, r_freq=2427,
expect_failure=True, i_go_neg_status=7)
def test_grpform_pref_chan_go(dev):
"""P2P group formation preferred channel selection by GO"""
dev[0].request("SET p2p_pref_chan 81:7")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0,
test_data=False)
check_grpform_results(i_res, r_res)
if i_res['freq'] != "2442":
raise Exception("Unexpected channel - did not follow GO's p2p_pref_chan")
remove_group(dev[0], dev[1])
def test_grpform_pref_chan_go_overridden(dev):
"""P2P group formation preferred channel selection by GO overridden by client"""
dev[1].request("SET p2p_pref_chan 81:7")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0,
i_freq=2422,
r_dev=dev[1], r_intent=15,
test_data=False)
check_grpform_results(i_res, r_res)
if i_res['freq'] != "2422":
raise Exception("Unexpected channel - did not follow client's forced channel")
remove_group(dev[0], dev[1])
def test_grpform_no_go_freq_forcing_chan(dev):
"""P2P group formation with no-GO freq forcing channel"""
dev[1].request("SET p2p_no_go_freq 100-200,300,4000-6000")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0,
r_dev=dev[1], r_intent=15,
test_data=False)
check_grpform_results(i_res, r_res)
if int(i_res['freq']) > 4000:
raise Exception("Unexpected channel - did not follow no-GO freq")
remove_group(dev[0], dev[1])
def test_grpform_no_go_freq_conflict(dev):
"""P2P group formation fails due to no-GO range forced by client"""
dev[1].request("SET p2p_no_go_freq 2000-3000")
go_neg_pin_authorized(i_dev=dev[0], i_intent=0, i_freq=2422,
r_dev=dev[1], r_intent=15,
expect_failure=True, i_go_neg_status=7)
def test_grpform_no_5ghz_world_roaming(dev):
"""P2P group formation with world roaming regulatory"""
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0,
r_dev=dev[1], r_intent=15,
test_data=False)
check_grpform_results(i_res, r_res)
if int(i_res['freq']) > 4000:
raise Exception("Unexpected channel - did not follow world roaming rules")
remove_group(dev[0], dev[1])
def test_grpform_no_5ghz_add_cli(dev):
"""P2P group formation with passive scan 5 GHz and p2p_add_cli_chan=1"""
dev[0].request("SET p2p_add_cli_chan 1")
dev[1].request("SET p2p_add_cli_chan 1")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0,
r_dev=dev[1], r_intent=14,
test_data=False)
check_grpform_results(i_res, r_res)
if int(i_res['freq']) > 4000:
raise Exception("Unexpected channel - did not follow world roaming rules")
remove_group(dev[0], dev[1])
def test_grpform_no_5ghz_add_cli2(dev):
"""P2P group formation with passive scan 5 GHz and p2p_add_cli_chan=1 (reverse)"""
dev[0].request("SET p2p_add_cli_chan 1")
dev[1].request("SET p2p_add_cli_chan 1")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=14,
r_dev=dev[1], r_intent=0,
test_data=False)
check_grpform_results(i_res, r_res)
if int(i_res['freq']) > 4000:
raise Exception("Unexpected channel - did not follow world roaming rules")
remove_group(dev[0], dev[1])
def test_grpform_no_5ghz_add_cli3(dev):
"""P2P group formation with passive scan 5 GHz and p2p_add_cli_chan=1 (intent 15)"""
dev[0].request("SET p2p_add_cli_chan 1")
dev[1].request("SET p2p_add_cli_chan 1")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0,
r_dev=dev[1], r_intent=15,
test_data=False)
check_grpform_results(i_res, r_res)
if int(i_res['freq']) > 4000:
raise Exception("Unexpected channel - did not follow world roaming rules")
remove_group(dev[0], dev[1])
def test_grpform_no_5ghz_add_cli4(dev):
"""P2P group formation with passive scan 5 GHz and p2p_add_cli_chan=1 (reverse; intent 15)"""
dev[0].request("SET p2p_add_cli_chan 1")
dev[1].request("SET p2p_add_cli_chan 1")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0,
test_data=False)
check_grpform_results(i_res, r_res)
if int(i_res['freq']) > 4000:
raise Exception("Unexpected channel - did not follow world roaming rules")
remove_group(dev[0], dev[1])
def test_grpform_incorrect_pin(dev):
"""P2P GO Negotiation with incorrect PIN"""
dev[1].p2p_listen()
addr1 = dev[1].p2p_dev_addr()
if not dev[0].discover_peer(addr1):
raise Exception("Peer not found")
res = dev[1].global_request("P2P_CONNECT " + dev[0].p2p_dev_addr() + " pin auth go_intent=0")
if "FAIL" in res:
raise Exception("P2P_CONNECT failed to generate PIN")
logger.info("PIN from P2P_CONNECT: " + res)
dev[0].global_request("P2P_CONNECT " + addr1 + " 00000000 enter go_intent=15")
ev = dev[0].wait_global_event(["P2P-GO-NEG-SUCCESS"], timeout=15)
if ev is None:
raise Exception("GO Negotiation did not complete successfully(0)")
ev = dev[1].wait_global_event(["P2P-GO-NEG-SUCCESS"], timeout=15)
if ev is None:
raise Exception("GO Negotiation did not complete successfully(1)")
ev = dev[1].wait_global_event(["WPS-FAIL"], timeout=15)
if ev is None:
raise Exception("WPS failure not reported(1)")
if "msg=8 config_error=18" not in ev:
raise Exception("Unexpected WPS failure(1): " + ev)
ev = dev[0].wait_global_event(["WPS-FAIL"], timeout=15)
if ev is None:
raise Exception("WPS failure not reported")
if "msg=8 config_error=18" not in ev:
raise Exception("Unexpected WPS failure: " + ev)
ev = dev[1].wait_global_event(["P2P-GROUP-FORMATION-FAILURE"], timeout=10)
if ev is None:
raise Exception("Group formation failure timed out")
ev = dev[0].wait_global_event(["P2P-GROUP-FORMATION-FAILURE"], timeout=5)
if ev is None:
raise Exception("Group formation failure timed out")
def test_grpform_reject(dev):
"""User rejecting group formation attempt by a P2P peer"""
addr0 = dev[0].p2p_dev_addr()
dev[0].p2p_listen()
dev[1].p2p_go_neg_init(addr0, None, "pbc")
ev = dev[0].wait_global_event(["P2P-GO-NEG-REQUEST"], timeout=15)
if ev is None:
raise Exception("GO Negotiation timed out")
if "OK" in dev[0].global_request("P2P_REJECT foo"):
raise Exception("Invalid P2P_REJECT accepted")
if "FAIL" in dev[0].global_request("P2P_REJECT " + ev.split(' ')[1]):
raise Exception("P2P_REJECT failed")
dev[1].request("P2P_STOP_FIND")
dev[1].p2p_go_neg_init(addr0, None, "pbc")
ev = dev[1].wait_global_event(["GO-NEG-FAILURE"], timeout=10)
if ev is None:
raise Exception("Rejection not reported")
if "status=11" not in ev:
raise Exception("Unexpected status code in rejection")
def test_grpform_pd_no_probe_resp(dev):
"""GO Negotiation after PD, but no Probe Response"""
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
dev[0].p2p_listen()
if not dev[1].discover_peer(addr0):
raise Exception("Peer not found")
dev[1].p2p_stop_find()
dev[0].p2p_stop_find()
peer = dev[0].get_peer(addr1)
if peer['listen_freq'] == '0':
raise Exception("Peer listen frequency not learned from Probe Request")
time.sleep(0.3)
dev[0].request("P2P_FLUSH")
dev[0].p2p_listen()
dev[1].global_request("P2P_PROV_DISC " + addr0 + " display")
ev = dev[0].wait_global_event(["P2P-PROV-DISC-SHOW-PIN"], timeout=5)
if ev is None:
raise Exception("PD Request timed out")
ev = dev[1].wait_global_event(["P2P-PROV-DISC-ENTER-PIN"], timeout=5)
if ev is None:
raise Exception("PD Response timed out")
peer = dev[0].get_peer(addr1)
if peer['listen_freq'] != '0':
raise Exception("Peer listen frequency learned unexpectedly from PD Request")
pin = dev[0].wps_read_pin()
if "FAIL" in dev[1].global_request("P2P_CONNECT " + addr0 + " " + pin + " enter"):
raise Exception("P2P_CONNECT on initiator failed")
ev = dev[0].wait_global_event(["P2P-GO-NEG-REQUEST"], timeout=5)
if ev is None:
raise Exception("GO Negotiation start timed out")
peer = dev[0].get_peer(addr1)
if peer['listen_freq'] == '0':
raise Exception("Peer listen frequency not learned from PD followed by GO Neg Req")
if "FAIL" in dev[0].global_request("P2P_CONNECT " + addr1 + " " + pin + " display"):
raise Exception("P2P_CONNECT on responder failed")
ev = dev[0].wait_global_event(["P2P-GROUP-STARTED"], timeout=15)
if ev is None:
raise Exception("Group formation timed out")
ev = dev[1].wait_global_event(["P2P-GROUP-STARTED"], timeout=15)
if ev is None:
raise Exception("Group formation timed out")
def test_go_neg_two_peers(dev):
"""P2P GO Negotiation rejected due to already started negotiation with another peer"""
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
addr2 = dev[2].p2p_dev_addr()
dev[1].p2p_listen()
dev[2].p2p_listen()
if not dev[0].discover_peer(addr1):
raise Exception("Could not discover peer")
if not dev[0].discover_peer(addr2):
raise Exception("Could not discover peer")
if "OK" not in dev[0].request("P2P_CONNECT " + addr2 + " pbc auth"):
raise Exception("Failed to authorize GO Neg")
dev[0].p2p_listen()
if not dev[2].discover_peer(addr0):
raise Exception("Could not discover peer")
if "OK" not in dev[0].request("P2P_CONNECT " + addr1 + " pbc"):
raise Exception("Failed to initiate GO Neg")
ev = dev[1].wait_global_event(["P2P-GO-NEG-REQUEST"], timeout=5)
if ev is None:
raise Exception("timeout on GO Neg RX event")
dev[2].request("P2P_CONNECT " + addr0 + " pbc")
ev = dev[2].wait_global_event(["GO-NEG-FAILURE"], timeout=10)
if ev is None:
raise Exception("Rejection not reported")
if "status=5" not in ev:
raise Exception("Unexpected status code in rejection: " + ev)
def clear_pbc_overlap(dev, ifname):
hapd_global = hostapd.HostapdGlobal()
hapd_global.remove(ifname)
dev[0].request("P2P_CANCEL")
dev[1].request("P2P_CANCEL")
dev[0].p2p_stop_find()
dev[1].p2p_stop_find()
dev[0].dump_monitor()
dev[1].dump_monitor()
time.sleep(0.1)
dev[0].flush_scan_cache()
dev[1].flush_scan_cache()
time.sleep(0.1)
def test_grpform_pbc_overlap(dev, apdev):
"""P2P group formation during PBC overlap"""
params = { "ssid": "wps", "eap_server": "1", "wps_state": "1" }
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
hapd.request("WPS_PBC")
time.sleep(0.1)
# Since P2P Client scan case is now optimzied to use a specific SSID, the
# WPS AP will not reply to that and the scan after GO Negotiation can quite
# likely miss the AP due to dwell time being short enoguh to miss the Beacon
# frame. This has made the test case somewhat pointless, but keep it here
# for now with an additional scan to confirm that PBC detection works if
# there is a BSS entry for a overlapping AP.
for i in range(0, 5):
dev[0].scan(freq="2412")
if dev[0].get_bss(apdev[0]['bssid']) is not None:
break
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
dev[0].p2p_listen()
if not dev[1].discover_peer(addr0):
raise Exception("Could not discover peer")
dev[1].p2p_listen()
if not dev[0].discover_peer(addr1):
raise Exception("Could not discover peer")
dev[0].p2p_listen()
if "OK" not in dev[0].global_request("P2P_CONNECT " + addr1 + " pbc auth go_intent=0"):
raise Exception("Failed to authorize GO Neg")
if "OK" not in dev[1].global_request("P2P_CONNECT " + addr0 + " pbc go_intent=15 freq=2412"):
raise Exception("Failed to initiate GO Neg")
ev = dev[0].wait_global_event(["WPS-OVERLAP-DETECTED"], timeout=15)
if ev is None:
raise Exception("PBC overlap not reported")
clear_pbc_overlap(dev, apdev[0]['ifname'])
def test_grpform_pbc_overlap_group_iface(dev, apdev):
"""P2P group formation during PBC overlap using group interfaces"""
# Note: Need to include P2P IE from the AP to get the P2P interface BSS
# update use this information.
params = { "ssid": "wps", "eap_server": "1", "wps_state": "1",
"beacon_int": "15", 'manage_p2p': '1' }
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
hapd.request("WPS_PBC")
dev[0].request("SET p2p_no_group_iface 0")
dev[1].request("SET p2p_no_group_iface 0")
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
dev[0].p2p_listen()
if not dev[1].discover_peer(addr0):
raise Exception("Could not discover peer")
dev[1].p2p_listen()
if not dev[0].discover_peer(addr1):
raise Exception("Could not discover peer")
dev[0].p2p_stop_find()
dev[0].scan(freq="2412")
dev[0].p2p_listen()
if "OK" not in dev[0].global_request("P2P_CONNECT " + addr1 + " pbc auth go_intent=0"):
raise Exception("Failed to authorize GO Neg")
if "OK" not in dev[1].global_request("P2P_CONNECT " + addr0 + " pbc go_intent=15 freq=2412"):
raise Exception("Failed to initiate GO Neg")
ev = dev[0].wait_global_event(["WPS-OVERLAP-DETECTED",
"P2P-GROUP-FORMATION-SUCCESS"], timeout=15)
if ev is None or "WPS-OVERLAP-DETECTED" not in ev:
# Do not report this as failure since the P2P group formation case
# using a separate group interface has limited chances of "seeing" the
# overlapping AP due to a per-SSID scan and no prior scan operations on
# the group interface.
logger.info("PBC overlap not reported")
clear_pbc_overlap(dev, apdev[0]['ifname'])
def test_grpform_goneg_fail_with_group_iface(dev):
"""P2P group formation fails while using group interface"""
dev[0].request("SET p2p_no_group_iface 0")
dev[1].p2p_listen()
peer = dev[1].p2p_dev_addr()
if not dev[0].discover_peer(peer):
raise Exception("Peer " + peer + " not found")
if "OK" not in dev[1].request("P2P_REJECT " + dev[0].p2p_dev_addr()):
raise Exception("P2P_REJECT failed")
if "OK" not in dev[0].request("P2P_CONNECT " + peer + " pbc"):
raise Exception("P2P_CONNECT failed")
ev = dev[0].wait_global_event(["P2P-GO-NEG-FAILURE"], timeout=10)
if ev is None:
raise Exception("GO Negotiation failure timed out")
def test_grpform_cred_ready_timeout(dev, apdev, params):
"""P2P GO Negotiation wait for credentials to become ready [long]"""
if not params['long']:
raise HwsimSkip("Skip test case with long duration due to --long not specified")
dev[1].p2p_listen()
addr1 = dev[1].p2p_dev_addr()
if not dev[0].discover_peer(addr1):
raise Exception("Peer " + addr1 + " not found")
if not dev[2].discover_peer(addr1):
raise Exception("Peer " + addr1 + " not found(2)")
start = os.times()[4]
cmd = "P2P_CONNECT " + addr1 + " 12345670 display"
if "OK" not in dev[0].global_request(cmd):
raise Exception("Failed to initiate GO Neg")
if "OK" not in dev[2].global_request(cmd):
raise Exception("Failed to initiate GO Neg(2)")
# First, check with p2p_find
ev = dev[2].wait_global_event(["P2P-GO-NEG-FAILURE"], timeout=30)
if ev is not None:
raise Exception("Too early GO Negotiation timeout reported(2)")
dev[2].dump_monitor()
logger.info("Starting p2p_find to change state")
dev[2].p2p_find()
ev = dev[2].wait_global_event(["P2P-GO-NEG-FAILURE"], timeout=100)
if ev is None:
raise Exception("GO Negotiation failure timed out(2)")
dev[2].dump_monitor()
end = os.times()[4]
logger.info("GO Negotiation wait time: {} seconds(2)".format(end - start))
if end - start < 120:
raise Exception("Too short GO Negotiation wait time(2): {}".format(end - start))
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5")
wpas.p2p_listen()
ev = dev[2].wait_global_event(["P2P-DEVICE-FOUND"], timeout=10)
if ev is None:
raise Exception("Did not discover new device after GO Negotiation failure")
if wpas.p2p_dev_addr() not in ev:
raise Exception("Unexpected device found: " + ev)
dev[2].p2p_stop_find()
wpas.p2p_stop_find()
# Finally, verify without p2p_find
ev = dev[0].wait_global_event(["P2P-GO-NEG-FAILURE"], timeout=120)
if ev is None:
raise Exception("GO Negotiation failure timed out")
end = os.times()[4]
logger.info("GO Negotiation wait time: {} seconds".format(end - start))
if end - start < 120:
raise Exception("Too short GO Negotiation wait time: {}".format(end - start))
def test_grpform_no_wsc_done(dev):
"""P2P group formation with WSC-Done not sent"""
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
for i in range(0, 2):
dev[0].request("SET ext_eapol_frame_io 1")
dev[1].request("SET ext_eapol_frame_io 1")
dev[0].p2p_listen()
dev[1].p2p_go_neg_auth(addr0, "12345670", "display", 0)
dev[1].p2p_listen()
dev[0].p2p_go_neg_init(addr1, "12345670", "enter", timeout=20,
go_intent=15, wait_group=False)
mode = None
while True:
ev = dev[0].wait_event(["EAPOL-TX"], timeout=15)
if ev is None:
raise Exception("Timeout on EAPOL-TX from GO")
if not mode:
mode = dev[0].get_status_field("mode")
res = dev[1].request("EAPOL_RX " + addr0 + " " + ev.split(' ')[2])
if "OK" not in res:
raise Exception("EAPOL_RX failed")
ev = dev[1].wait_event(["EAPOL-TX"], timeout=15)
if ev is None:
raise Exception("Timeout on EAPOL-TX from P2P Client")
msg = ev.split(' ')[2]
if msg[46:56] == "102200010f":
logger.info("Drop WSC_Done")
dev[0].request("SET ext_eapol_frame_io 0")
dev[1].request("SET ext_eapol_frame_io 0")
# Fake EAP-Failure to complete session on the client
id = msg[10:12]
dev[1].request("EAPOL_RX " + addr0 + " 0300000404" + id + "0004")
break
res = dev[0].request("EAPOL_RX " + addr1 + " " + msg)
if "OK" not in res:
raise Exception("EAPOL_RX failed")
ev = dev[0].wait_global_event(["P2P-GROUP-STARTED"], timeout=15)
if ev is None:
raise Exception("Group formation timed out on GO")
ev = dev[1].wait_global_event(["P2P-GROUP-STARTED"], timeout=15)
if ev is None:
raise Exception("Group formation timed out on P2P Client")
dev[0].remove_group()
dev[1].wait_go_ending_session()
if mode != "P2P GO - group formation":
raise Exception("Unexpected mode on GO during group formation: " + mode)
def test_grpform_wait_peer(dev):
"""P2P group formation wait for peer to become ready"""
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
dev[1].p2p_listen()
if not dev[0].discover_peer(addr1):
raise Exception("Peer " + addr1 + " not found")
dev[0].request("SET extra_roc_dur 500")
if "OK" not in dev[0].request("P2P_CONNECT " + addr1 + " 12345670 display go_intent=15"):
raise Exception("Failed to initiate GO Neg")
time.sleep(3)
dev[1].request("P2P_CONNECT " + addr0 + " 12345670 enter go_intent=0")
ev = dev[0].wait_global_event(["P2P-GROUP-STARTED"], timeout=15)
if ev is None:
raise Exception("Group formation timed out")
dev[0].request("SET extra_roc_dur 0")
ev = dev[1].wait_global_event(["P2P-GROUP-STARTED"], timeout=15)
if ev is None:
raise Exception("Group formation timed out")
dev[0].remove_group()
def test_invalid_p2p_connect_command(dev):
"""P2P_CONNECT error cases"""
id = dev[0].add_network()
for cmd in [ "foo",
"00:11:22:33:44:55",
"00:11:22:33:44:55 pbc persistent=123",
"00:11:22:33:44:55 pbc persistent=%d" % id,
"00:11:22:33:44:55 pbc go_intent=-1",
"00:11:22:33:44:55 pbc go_intent=16",
"00:11:22:33:44:55 pin",
"00:11:22:33:44:55 pbc freq=0" ]:
if "FAIL" not in dev[0].request("P2P_CONNECT " + cmd):
raise Exception("Invalid P2P_CONNECT command accepted: " + cmd)
if "FAIL-INVALID-PIN" not in dev[0].request("P2P_CONNECT 00:11:22:33:44:55 1234567"):
raise Exception("Invalid PIN was not rejected")
if "FAIL-CHANNEL-UNSUPPORTED" not in dev[0].request("P2P_CONNECT 00:11:22:33:44:55 pin freq=3000"):
raise Exception("Unsupported channel not reported")
def test_p2p_unauthorize(dev):
"""P2P_UNAUTHORIZE to unauthorize a peer"""
if "FAIL" not in dev[0].request("P2P_UNAUTHORIZE foo"):
raise Exception("Invalid P2P_UNAUTHORIZE accepted")
if "FAIL" not in dev[0].request("P2P_UNAUTHORIZE 00:11:22:33:44:55"):
raise Exception("P2P_UNAUTHORIZE for unknown peer accepted")
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
dev[1].p2p_listen()
pin = dev[0].wps_read_pin()
dev[0].p2p_go_neg_auth(addr1, pin, "display")
dev[0].p2p_listen()
if "OK" not in dev[0].request("P2P_UNAUTHORIZE " + addr1):
raise Exception("P2P_UNAUTHORIZE failed")
dev[1].p2p_go_neg_init(addr0, pin, "keypad", timeout=0)
ev = dev[0].wait_global_event(["P2P-GO-NEG-REQUEST"], timeout=10)
if ev is None:
raise Exception("No GO Negotiation Request RX reported")
def test_grpform_pbc_multiple(dev):
"""P2P group formation using PBC multiple times in a row"""
try:
dev[1].request("SET passive_scan 1")
for i in range(5):
[i_res, r_res] = go_neg_pbc_authorized(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0)
remove_group(dev[0], dev[1])
finally:
dev[1].request("SET passive_scan 0")
dev[1].flush_scan_cache()
def test_grpform_not_ready(dev):
"""Not ready for GO Negotiation (listen)"""
addr0 = dev[0].p2p_dev_addr()
addr2 = dev[2].p2p_dev_addr()
dev[0].p2p_listen()
if not dev[1].discover_peer(addr0):
raise Exception("Could not discover peer")
dev[1].global_request("P2P_CONNECT " + addr0 + " pbc")
ev = dev[0].wait_global_event(["P2P-GO-NEG-REQUEST"], timeout=5)
if ev is None:
raise Exception("No P2P-GO-NEG-REQUEST event")
dev[0].dump_monitor()
time.sleep(5)
if not dev[2].discover_peer(addr0):
raise Exception("Could not discover peer(2)")
for i in range(3):
dev[i].p2p_stop_find()
def test_grpform_not_ready2(dev):
"""Not ready for GO Negotiation (search)"""
addr0 = dev[0].p2p_dev_addr()
addr2 = dev[2].p2p_dev_addr()
dev[0].p2p_find(social=True)
if not dev[1].discover_peer(addr0):
raise Exception("Could not discover peer")
dev[1].global_request("P2P_CONNECT " + addr0 + " pbc")
ev = dev[0].wait_global_event(["P2P-GO-NEG-REQUEST"], timeout=5)
if ev is None:
raise Exception("No P2P-GO-NEG-REQUEST event")
dev[0].dump_monitor()
time.sleep(1)
dev[2].p2p_listen()
ev = dev[0].wait_global_event(["P2P-DEVICE-FOUND"], timeout=10)
if ev is None:
raise Exception("Peer not discovered after GO Neg Resp(status=1) TX")
if addr2 not in ev:
raise Exception("Unexpected peer discovered: " + ev)
for i in range(3):
dev[i].p2p_stop_find()
|
exit_sentinel.py
|
#!/usr/bin/env python3
#
# Copyright (c) 2020 Two Sigma Open Source, LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""Cook sidecar exit sentinel file watcher thread logic."""
import logging
import os
import signal
import threading
import time
def watch_for_file(sentinel_file_path, started_event):
def daemon_routine():
# wait for other components to finish starting
logging.info(f'Waiting for all components to start...')
started_event.wait()
# wait for sentinel file to appear
logging.info(f'Watching for sentinel file: {sentinel_file_path}')
while not os.path.exists(sentinel_file_path):
time.sleep(0.1)
# trigger this process's termination handler
logging.info(f'Sidecar termination triggered by sentinel file: {sentinel_file_path}')
os.kill(os.getpid(), signal.SIGTERM)
threading.Thread(target=daemon_routine, args=(), daemon=True).start()
|
test_proxies.py
|
'''
Modifier
Date: 21/03/2021
Author: AlexxLy
Description: Reads a file that contains a list of proxies and determines whether or not that list is good.
Each line in the file must be in the format of ip:port
'''
import platform
from os import system
from time import sleep
from requests import Session
from threading import Thread, RLock
proxy_list = 'proxies.txt'
target_site = 'https://instagram.com'
def get_proxies():
proxies = []
with open(proxy_list, 'rt', encoding='utf-8') as proxies_file:
for line in proxies_file:
if not line:
continue
ip, port = line.replace('\r', '').split(':')
port = int(port)
proxy = {'ip': ip, 'port': port}
proxies.append(proxy)
return proxies
class TestProxies:
def __init__(self, proxies):
self.worked = 0
self.failed = 0
self.lock = RLock()
self.active_brs = 0
self.is_alive = True
self.proxies = proxies
self.total = len(proxies)
self.test_link = target_site
def display(self):
system('cls' if platform.system() == 'Windows' else 'clear')
worked, failed, total = self.worked, self.failed, self.total
worked_per = round((worked/total) * 100, 2)
failed_per = round((failed/total) * 100, 2)
complete = round(worked_per + failed_per, 2)
print(f'Complete: {complete}%')
print(f'Active browsers: {self.active_brs}')
print(f'Proxies worked: {worked_per}% [{worked}]')
print(f'Proxies failed: {failed_per}% [{failed}]')
def test_proxy(self, proxy):
br = Session()
addr = '{}:{}'.format(proxy['ip'], proxy['port'])
addr = {'http': addr, 'https': addr}
br.proxies.update(addr)
try:
br.get(self.test_link, timeout=(10, 15))
with self.lock:
self.worked += 1
except:
with self.lock:
self.failed += 1
finally:
br.close()
if self.is_alive:
with self.lock:
self.display()
self.active_brs -= 1
def start(self):
for proxy in self.proxies:
while self.is_alive and self.active_brs >= 512:
pass
if not self.is_alive:
break
with self.lock:
self.active_brs += 1
Thread(target=self.test_proxy, args=[proxy], daemon=True).start()
while self.is_alive and self.active_brs:
sleep(0.5)
self.display()
def stop(self):
self.is_alive = False
while self.active_brs:
try:
with self.lock:
self.display()
sleep(0.5)
except KeyboardInterrupt:
break
def examine(self):
failed = self.failed / self.total
worked = self.worked / self.total
if worked == 0:
print('Bad proxy list')
elif (failed - worked) >= 0.1:
print('Bad proxy list')
elif (failed - worked) == 0:
print('Bad proxy list')
else:
print('Good proxy list')
if __name__ == '__main__':
test_proxies = TestProxies(get_proxies())
try:
test_proxies.start()
except KeyboardInterrupt:
test_proxies.stop()
finally:
test_proxies.examine()
|
run_tests.py
|
# Adapted from a Karma test startup script
# developebd by the Jupyter team here;
# https://github.com/jupyter/jupyter-js-services/blob/master/test/run_test.py
#
# Also uses the flow where we assign a os process group id and shut down the
# server based on that - since the subprocess actually executes the kbase-narrative
# script.
# (recipe here)
# http://stackoverflow.com/questions/4789837/how-to-terminate-a-python-subprocess-launched-with-shell-true
from __future__ import print_function
import subprocess
import sys
import argparse
import threading
import time
import os
import signal
KARMA_PORT = 9876
JUPYTER_PORT = 9999
argparser = argparse.ArgumentParser(
description='Run KBase Narrative unit tests'
)
argparser.add_argument('-b', '--browsers', default='Firefox',
help="Browsers to use for Karma test")
argparser.add_argument('-d', '--debug', action='store_true',
help="Whether to enter debug mode in Karma")
options = argparser.parse_args(sys.argv[1:])
nb_command = ['kbase-narrative', '--no-browser', '--NotebookApp.allow_origin="*"', '--port={}'.format(JUPYTER_PORT)]
if not hasattr(sys, 'real_prefix'):
nb_command[0] = 'narrative-venv/bin/kbase-narrative'
nb_server = subprocess.Popen(nb_command,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
preexec_fn = os.setsid
)
# wait for notebook server to start up
while 1:
line = nb_server.stdout.readline().decode('utf-8').strip()
if not line:
continue
print(line)
if 'The Jupyter Notebook is running at: http://localhost:{}/'.format(JUPYTER_PORT) in line:
break
if 'is already in use' in line:
os.killpg(os.getpgid(nb_server.pid), signal.SIGTERM)
# nb_server.terminate()
raise ValueError(
'The port {} was already taken, kill running notebook servers'.format(JUPYTER_PORT)
)
def readlines():
"""Print the notebook server output."""
while 1:
line = nb_server.stdout.readline().decode('utf-8').strip()
if line:
print(line)
thread = threading.Thread(target=readlines)
thread.setDaemon(True)
thread.start()
# time.sleep(15)
test_command = ['grunt', 'test']
resp = 1
try:
print("Jupyter server started, starting test script.")
resp = subprocess.check_call(test_command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
pass
finally:
print("Done running tests, killing server.")
os.killpg(os.getpgid(nb_server.pid), signal.SIGTERM)
# nb_server.terminate()
sys.exit(resp)
|
client.py
|
import socket
import threading
import random
import json
import sys
from RSA import RSA
class Client:
SERVER_UDP_IP_ADDRESS = "127.0.0.1"
SERVER_UDP_PORT_NO = 6789
user = ""
room = "geral"
clientSock = None
def __init__(self, ip):
self.SERVER_UDP_IP_ADDRESS = ip
self.room = 'lobby'
def autenticate(self):
usr = input('Insira seu nickname: ')
if(usr == ''):
usr = 'Visitante'+str(random.randint(1000, 2000))
self.user = usr
print("Autenticado como " + self.user)
def sendMessage(self, message):
messagePackage = {'user': self.user, 'room': self.room, 'connecting': False,
'message': self.RSA.encryptString(message, self.serverPK)}
self.clientSock.sendto(json.dumps(messagePackage).encode(
'utf-8'), (self.SERVER_UDP_IP_ADDRESS, self.SERVER_UDP_PORT_NO))
def changeRoom(self, room):
self.room = room
def connectToServer(self):
self.clientSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
messagePackage = {'user': self.user, 'room': self.room,
'connecting': True, 'message': '', 'key': self.RSA.getPublicKey()}
self.clientSock.sendto(json.dumps(messagePackage).encode(
'utf-8'), (self.SERVER_UDP_IP_ADDRESS, self.SERVER_UDP_PORT_NO))
def listenMessages(self):
while True:
data, addr = self.clientSock.recvfrom(1024)
incoming = json.loads(data.decode('utf-8'))
if('keys' in incoming):
self.serverPK = incoming['keys']
continue
msg = self.RSA.decryptString(
incoming['message'], self.RSA.getPrivateKey())
if(incoming['user'] == self.SERVER_UDP_IP_ADDRESS+str(self.SERVER_UDP_PORT_NO)):
if(msg[0:5].strip() == 'nick'):
newUser = msg[5:]
self.user = newUser
print(
'[SERVER] -> Nome de usuario em uso! Seu novo nome e ' + newUser)
elif(msg[0:5].strip() == 'room'):
newRoom = msg[5:]
self.room = newRoom
print('[SERVER] -> Sala alterada para ' + newRoom)
else:
sys.stdout.write('\r'+'['+incoming['user']+'] -> '+msg)
sys.stdout.write('\n['+self.user+']: ')
def chat(self):
while True:
data = input("[" + self.user + "]: ")
if data == 'croom':
sys.stdout.write("\033[F")
newRoom = input("Digite a nova sala: ")
self.room = newRoom
self.sendMessage('croom ' + newRoom)
continue
elif data == '':
continue
elif data == 'disconnect':
self.sendMessage(data)
print('Desconectado do servidor')
break
sys.stdout.write("\033[F")
print('['+self.user+'] -> ' + data)
self.sendMessage(data)
def initClient(self):
self.RSA = RSA()
self.autenticate()
self.connectToServer()
threading.Thread(target=self.listenMessages).start()
threading.Thread(target=self.chat).start()
if len(sys.argv) == 1:
print('Para iniciar -> client.py server-ip')
elif len(sys.argv) == 2:
client = Client(sys.argv[1])
client.initClient()
|
main.py
|
import wave
import sys
import sound
from pynput.keyboard import KeyCode, Key, Controller, Listener
import threading
import time
#
keyboard = Controller()
global play
play = False
global track
track = 0
def on_press(key):
global play
global track
# print('{0} press'.format(key))
try:
if key == KeyCode.from_char('a'):
play = True
track = 0
if key == KeyCode.from_char('w'):
play = True
track = 1
if key == KeyCode.from_char('s'):
play = True
track = 2
if key == KeyCode.from_char('e'):
play = True
track = 3
if key == KeyCode.from_char('d'):
play = True
track = 4
if key == KeyCode.from_char('f'):
play = True
track = 5
if key == KeyCode.from_char('t'):
play = True
track = 6
if key == KeyCode.from_char('g'):
play = True
track = 7
if key == KeyCode.from_char('y'):
play = True
track = 8
if key == KeyCode.from_char('h'):
play = True
track = 9
if key == KeyCode.from_char('u'):
play = True
track = 10
if key == KeyCode.from_char('j'):
play = True
track = 11
except AttributeError:
print('special key {0} pressed'.format(key))
def on_release(key):
global play
global track
# print('{0} release'.format(key))
try:
if key == Key.esc:
return False
if key == KeyCode.from_char('a'):
play = False
if key == KeyCode.from_char('w'):
play = False
if key == KeyCode.from_char('s'):
play = False
if key == KeyCode.from_char('e'):
play = False
if key == KeyCode.from_char('d'):
play = False
if key == KeyCode.from_char('f'):
play = False
if key == KeyCode.from_char('t'):
play = False
if key == KeyCode.from_char('g'):
play = False
if key == KeyCode.from_char('h'):
play = False
if key == KeyCode.from_char('y'):
play = False
if key == KeyCode.from_char('h'):
play = False
if key == KeyCode.from_char('u'):
play = False
if key == KeyCode.from_char('j'):
play = False
except AttributeError:
print('special key {0} pressed'.format(key))
def task():
if len(sys.argv) < 2:
print("Plays a wave file.\n\nUsage: %s filename.wav" % sys.argv[0])
sys.exit(-1)
wf = []
for index in range(1,len(sys.argv)):
wf.append(wave.open(sys.argv[index], 'rb'))
# wf.append(wave.open(wavefile, 'rb'))
# wf[1] = wave.open(sys.argv[1], 'rb')
channels = []
for wavefile in wf:
channels.append(wavefile.getnchannels())
# channels[1]=wf[1].getnchannels()
rate=[]
for wavefile in wf:
rate.append(wavefile.getframerate())
# rate[1]=wf[1].getframerate()
# for_mat=p.get_format_from_width(wf.getsampwidth())
file_open = 0
#
try:
global play
global track
data, stream, py_audio_handle = sound.OpenStream(wf[track])
file_open = 1
prev_track = track
while len(data) > 0:
if play==True and track==prev_track:
if file_open==0:
data, stream, py_audio_handle = sound.OpenStream(wf[track])
file_open = 1
data, stream, wf[track] = sound.play(data, stream, wf[track])
if play==False or track!=prev_track:
sound.stop(stream, py_audio_handle)
file_open = 0
prev_track = track
finally:
sound.stop(stream, py_audio_handle)
listener = Listener(on_press=on_press, on_release=on_release)
t = threading.Thread(target=task)
listener.start()
t.start()
try:
listener.join()
t.join()
finally:
listener.stop()
|
main.py
|
import pdb
import time
import os
import subprocess
import re
import random
import json
import numpy as np
import glob
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
import socket
import argparse
import threading
import _thread
import signal
from datetime import datetime
parser = argparse.ArgumentParser(description='TCP client')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='select testcase')
args = parser.parse_args()
queue = [6, 33, 4, 43, 15, 47, 18, 42, 35, 40, 34, 20, 9, 29, 19, 22, 3, 5, 38, 7, 41, 39, 46, 17, 24, 28, 26, 45, 16, 14, 50, 48, 36, 27, 32, 8, 10, 49, 2, 12, 23, 1, 37, 31, 44, 21, 30, 11, 13, 25]
queue_dict = {}
arrival_time = 0
for item in queue:
arrival_time += np.random.poisson(30)
queue_dict[item] = arrival_time
queue_timer = time.time()
job_start = {} #{'49': time1, '15': time2...}
JCT = {}
overhead = {} # initialize so that every job starts with 0s overhead time
for item in queue:
overhead[str(item)] = 0
ovhd_start = {} # initialize this to 0 as well
for item in queue:
ovhd_start[str(item)] = 0
num_mig = {} # initialize migration time to 0
for item in queue:
num_mig[str(item)] = 0
queue_start = {} # initialize this to 0 as well
for item in queue:
queue_start[str(item)] = 0
queue_time = {} # initialize this to 0 as well
for item in queue:
queue_time[str(item)] = 0
index = 0
K80_cap = 8
V100_cap = 4
K80_used = 0
V100_used = 0
qualified_jobs = 0
K80_job = {}
for i in range(8):
K80_job[str(i)] = 'idle'
V100_job = {}
for i in range(4):
V100_job[str(i)] = 'idle'
qualified_job = []
pc_job = []
epoch_end_job = []
K80_node = 'c2180'
V100_node = 'd1020'
host_node = 'c0175'
testcase = args.tc
### also, change .h5 file folder in jobs ###
INTERVAL = 30 # make decision every 30s
QUALIFY_TIME = 300 # 600s or 10min as threshold
def send_signal(node, cmd):
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = 10000 if node == K80_node else 10001
# Connect the socket to the port where the server is listening
server_address = (node, int(port))
print('connecting to {} port {}'.format(*server_address))
sock.connect(server_address)
try:
# Send data
message = cmd.encode('utf-8') #b'save 35' #b'start 35 gpu 6'#b'save 35'
print('sending {!r}'.format(message))
sock.sendall(message)
while True:
data = sock.recv(32)
if 'success' in data.decode('utf-8'):
print('received {!r}'.format(data))
break
else:
print('waiting for success signal')
time.sleep(1)
finally:
#print('closing socket')
sock.close()
def max_power_to_idle(V100_free, promote_list):
power_dict = {}
with open('power.json', 'r') as fp:
power_dict = json.load(fp)
pool_dict = {}
for job in promote_list:
if 'job'+job in power_dict:
pool_dict[job] = power_dict['job'+job]
# sort the jobs by parameter size
sorted_pool = sorted(pool_dict, key=pool_dict.get, reverse=True)[:V100_free]
# return the jobs promoted to idle V100s
return sorted_pool
def max_power_one(K80_free, single_job, promote_list, force_demote):
power_dict = {}
with open('power.json', 'r') as fp:
power_dict = json.load(fp)
# first check if job is force demote
if single_job in force_demote:
if len(promote_list) > 0: # promotion and demotion
pool_dict = {}
for job in promote_list:
if 'job'+job in power_dict:
pool_dict[job] = power_dict['job'+job]
# sort the jobs by power size
sorted_pool = sorted(pool_dict, key=pool_dict.get, reverse=True)[:1]
promoted = list(set(promote_list).intersection(sorted_pool))
demoted = list(set([single_job]).difference(sorted_pool))
return promoted, demoted
elif K80_free > 0: # demotion only
return [], [single_job]
else: # no migration
return [], []
else: # compare power
if len(promote_list) > 0:
V100_pool = list(set([single_job]).union(promote_list))
pool_dict = {}
for job in V100_pool:
if 'job'+job in power_dict:
pool_dict[job] = power_dict['job'+job]
sorted_pool = sorted(pool_dict, key=pool_dict.get, reverse=True)[:1]
promoted = list(set(promote_list).intersection(sorted_pool))
demoted = list(set([single_job]).difference(sorted_pool))
return promoted, demoted
else:
return [], []
def save_job(node, job): # save_job('c2176', '50')
# first wait for the job to be qualified for checkpointing
while True: # wait for ckpt_qual to be available
global ckpt_qual_dict
if ckpt_qual_dict['job'+job] == 1:
ckpt_qual_dict['job'+job] = 0
break
time.sleep(5)
send_signal(node, 'save ' + job)
global ovhd_start
ovhd_start[job] = time.time()
# after sending checkpoint signal, wait for it to finish
while True:
time.sleep(5)
with open('checkpoint.json', 'r') as fp2:
checkpoint_dict = json.load(fp2)
if checkpoint_dict['job'+job] == 1: # checkpoint has finished
print('checkpointed successfully')
checkpoint_dict['job'+job] = 0 # reset it
json_file = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp2:
fp2.write(json_file)
break
# also check if job has already finished
global finish_dict
if finish_dict['job'+job] == 1:
break
def kill_job(node, job): # kill_job('c2176', '50')
send_signal(node, 'kill ' + job)
# resume job
def resume_job(node, gpu, job): # resume_job('c2176', '3', '50')
while True:
if os.path.exists('pid.json'):
os.rename('pid.json', 'pid_lock.json')
break
else:
time.sleep(1)
cmd = 'resume ' + job + ' gpu ' + gpu
send_signal(node, cmd)
while True:
if os.path.exists('pid.json'):
break
else:
time.sleep(1)
# start job
def start_job(node, gpu, job):
# first wait for pid.json to show up, rename pid.json to pid_lock.json
# then in jobx.py, modify pid_lock.json, rename it to pid.json
# then wait for pid.json to show up
while True:
if os.path.exists('pid.json'):
os.rename('pid.json', 'pid_lock.json')
break
else:
time.sleep(1)
cmd = 'start ' + job + ' gpu ' + gpu
send_signal(node, cmd)
while True:
if os.path.exists('pid.json'):
break
else:
time.sleep(1)
# measure job
def measure_job(node, gpu, job):
cmd = 'measure ' + job + ' gpu ' + gpu
send_signal(node, cmd)
############### first clear finish status of all jobs ####################
pid_dict = {}
with open('pid.json', 'r') as fp:
pid_dict = json.load(fp)
for key in pid_dict:
pid_dict[key] = 0
json_file = json.dumps(pid_dict)
with open('pid.json', 'w') as fp:
fp.write(json_file)
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
for key in checkpoint_dict:
checkpoint_dict[key] = 0
json_file = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file)
power_dict = {}
with open('power.json', 'r') as fp:
power_dict = json.load(fp)
for key in power_dict:
power_dict[key] = 0
json_file = json.dumps(power_dict)
with open('power.json', 'w') as fp:
fp.write(json_file)
run_pid_dict = {}
with open('run_pid.json', 'r') as fp:
run_pid_dict = json.load(fp)
for key in run_pid_dict:
run_pid_dict[key] = 0
json_file = json.dumps(run_pid_dict)
with open('run_pid.json', 'w') as fp:
fp.write(json_file)
# initialize all parameters to 0
ckpt_qual_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
ckpt_qual_dict[job_name] = 0
finish_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
finish_dict[job_name] = 0
epoch_waste_dict = {}
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
for key in epoch_waste_dict:
epoch_waste_dict[key] = 0
json_file = json.dumps(epoch_waste_dict)
with open('epoch_waste.json', 'w') as fp:
fp.write(json_file)
#################### background thread running TCP socket ########################
def thread_function():
# here listen on the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (host_node, 10002)
print('starting up on {} port {}'.format(*server_address))
sock.bind(server_address)
sock.listen(5)
while True:
# Wait for a connection
connection, client_address = sock.accept()
try:
while True:
data = connection.recv(32)
if data:
data_str = data.decode('utf-8')
if 'param' in data_str:
pass
elif 'ckpt_qual' in data_str:
global ckpt_qual_dict
job_name = data_str.split(' ')[0]
ckpt_qual_dict[job_name] = 1
elif 'finish' in data_str:
global finish_dict
job_name = data_str.split(' ')[0]
finish_dict[job_name] = 1
elif 'epoch_end' in data_str:
global epoch_end_job
job_name = data_str.split(' ')[0]
if job_name not in epoch_end_job:
epoch_end_job.append(job_name)
print(epoch_end_job)
print('received ' + data_str)
connection.sendall(b'success')
#time.sleep(5)
else:
break
finally:
connection.close()
x = threading.Thread(target=thread_function, daemon=True)
x.start()
###############################################################################
######################################################################
while True:
# termination condition:
# all the jobs have finished
################### check for finished jobs on K80 and V100 ##############################
for gpu, job in K80_job.items():
if job != 'idle':
if finish_dict['job'+job] == 1:
K80_used -= 1
K80_job[gpu] = 'idle'
print('K80 finished job: ' + job)
JCT[job] = int(time.time() - job_start[job])
# if the job is not qualified for promotion, kill its run.sh processes
if job not in qualified_job:
kill_job(K80_node, job)
elif ovhd_start[job] != 0:
# check if ckpt overhead has finished
if ckpt_qual_dict['job'+job] == 1:
overhead[job] += int(time.time() - ovhd_start[job])
ovhd_start[job] = 0
for gpu, job in V100_job.items():
if job != 'idle':
if finish_dict['job'+job] == 1:
V100_used -= 1
V100_job[gpu] = 'idle'
print('V100 finished job: ' + job)
JCT[job] = int(time.time() - job_start[job])
elif ovhd_start[job] != 0:
# check if ckpt overhead has finished
if ckpt_qual_dict['job'+job] == 1:
overhead[job] += int(time.time() - ovhd_start[job])
ovhd_start[job] = 0
################ check run time of current K80 job, update qualified_job #################
with open('power.json', 'r') as fp:
power_dict = json.load(fp)
for job in list(K80_job.values()):
if job not in qualified_job and job != 'idle':
pwr_meas = power_dict['job'+job]
if pwr_meas > 0:
qualified_job.append(job)
print('job' + job + ' has been qualified for promotion')
################ make promotion decisions ########################
V100_free = V100_cap - V100_used
# this returns available jobs for promotion. Has to be qualified, and currently in K80, but not practically complete
promote_list = list(set(qualified_job).intersection(list(K80_job.values())).difference(pc_job))
# this returns job forced to be demoted. Currently in V100, and is practically complete
force_demote = list(set(list(V100_job.values())).intersection(pc_job))
# first for empty gpus, promote from promote_list
promote_idle = max_power_to_idle(V100_free, promote_list)
promote_list_new = list(set(promote_list).difference(promote_idle))
if len(promote_idle) > 0:
print('promoted jobs to idle: ', promote_idle)
# stop all promoted jobs on K80
for gpu, job in K80_job.items():
if job in promote_idle:
save_job(K80_node, job)
K80_job[gpu] = 'idle'
K80_used -= 1
# resume promoted jobs on V100, make sure the gpu is idle
for job_new in promote_idle[:]:
if finish_dict['job'+job_new] != 1:
for gpu, job in V100_job.items():
if job == 'idle': # if gpu idle, schedule new job here
resume_job(V100_node, gpu, job_new)
num_mig[job_new] += 1
V100_job[gpu] = job_new
promote_idle.remove(job_new)
V100_used += 1
break
else: # job that has already finished before checkpointing
JCT[job_new] = int(time.time() - job_start[job_new])
promote_idle.remove(job_new)
K80_free = K80_cap - K80_used
V100_epoch = []
if len(epoch_end_job) > 0:
job = epoch_end_job[0]
epoch_end_job.remove(job)
if job in V100_job.values() and job not in promote_idle:
V100_epoch.append(job)
if len(V100_epoch) > 0:
promoted_job, demoted_job = max_power_one(K80_free, V100_epoch[0], promote_list_new, force_demote)
if len(promoted_job) > 0:
print('promoted jobs: ', promoted_job)
for gpu, job in K80_job.items():
if job in promoted_job:
save_job(K80_node, job)
K80_job[gpu] = 'idle'
K80_used -= 1
if len(demoted_job) > 0:
print('demoted jobs: ', demoted_job)
for gpu, job in V100_job.items():
if job in demoted_job:
save_job(V100_node, job)
V100_job[gpu] = 'idle'
V100_used -= 1
if len(promoted_job) > 0:
job_new = promoted_job[0]
if finish_dict['job'+job_new] != 1:
for gpu, job in V100_job.items():
if job == 'idle': # if gpu idle, schedule new job here
resume_job(V100_node, gpu, job_new)
num_mig[job_new] += 1
V100_job[gpu] = job_new
promoted_job.remove(job_new)
V100_used += 1
break
else: # job has already finished before checkpointing
JCT[job_new] = int(time.time() - job_start[job_new])
promoted_job.remove(job_new)
if len(demoted_job) > 0:
job_new = demoted_job[0]
if finish_dict['job'+job_new] != 1:
for gpu, job in K80_job.items():
if job == 'idle': # if gpu idle, schedule new job here
resume_job(K80_node, gpu, job_new)
num_mig[job_new] += 1
K80_job[gpu] = job_new
demoted_job.remove(job_new)
K80_used += 1
break
else: # job has already finished before checkpointing
JCT[job_new] = int(time.time() - job_start[job_new])
demoted_job.remove(job_new)
# perform a check, make sure all promoted/demoted jobs are scheduled
if len(promoted_job) > 0 or len(demoted_job) > 0:
raise ValueError('Bug with promotion scheme, more jobs than free gpus')
################ submit new jobs to vacant K80 GPUs ############################
# check if there are vacant K80s
## yes: submit jobs from queue
## no: do nothing
if K80_used < K80_cap:
K80_free = K80_cap - K80_used
for i in range(K80_free):
time_passed = int(time.time() - queue_timer)
if index < len(queue) and queue_dict[queue[index]] < time_passed: # make sure job has arrived in the queue
job_new = str(queue[index])
for gpu, job in K80_job.items():
if job == 'idle': # schedule new job here if idle
start_job(K80_node, gpu, job_new)
measure_job(K80_node, gpu, job_new)
K80_job[gpu] = job_new
job_start[job_new] = time.time()
index += 1
K80_used += 1
time.sleep(5) # don't communicate too often
break
############### wait for next iteration
time.sleep(1)
################ check if termination condition is met ################
K80_idle_num = sum(value == 'idle' for value in K80_job.values())
V100_idle_num = sum(value == 'idle' for value in V100_job.values())
if K80_idle_num == K80_cap and V100_idle_num == V100_cap and index == len(queue):
print('all jobs are finished!')
break
# get average JCT
average_JCT = np.average(list(JCT.values()))
JCT['average'] = average_JCT
average_overhead = np.average(list(overhead.values()))
overhead['average'] = average_overhead
# after everything is finished
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
print('finished all runs')
JCT_name = testcase + '_JCT.json'
overhead_name = testcase + '_overhead.json'
num_mig_name = testcase + '_num_mig.json'
epoch_waste_name = testcase + '_epoch_waste.json'
ckpt_qual_name = 'ckpt_qual.json'
finish_name = 'finish.json'
with open(JCT_name, 'w') as fp1:
json.dump(JCT, fp1, sort_keys=True, indent=4)
with open(overhead_name, 'w') as fp3:
json.dump(overhead, fp3, sort_keys=True, indent=4)
with open(num_mig_name, 'w') as fp3:
json.dump(num_mig, fp3, sort_keys=True, indent=4)
with open(epoch_waste_name, 'w') as fp3:
json.dump(epoch_waste_dict, fp3, sort_keys=True, indent=4)
with open(ckpt_qual_name, 'w') as fp1:
json.dump(ckpt_qual_dict, fp1, sort_keys=True, indent=4)
with open(finish_name, 'w') as fp1:
json.dump(finish_dict, fp1, sort_keys=True, indent=4)
|
semihost.py
|
"""
mbed CMSIS-DAP debugger
Copyright (c) 2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import io
import logging
import time
import datetime
import threading
import socket
import traceback
import six
import pyOCD
from ..gdbserver.gdb_socket import GDBSocket
from ..gdbserver.gdb_websocket import GDBWebSocket
from ..core import exceptions
# Debug logging options
LOG_SEMIHOST = True
## bkpt #0xab instruction
BKPT_INSTR = 0xbeab
# ARM semihosting request numbers.
TARGET_SYS_OPEN = 0x01
TARGET_SYS_CLOSE = 0x02
TARGET_SYS_WRITEC = 0x03
TARGET_SYS_WRITE0 = 0x04
TARGET_SYS_WRITE = 0x05
TARGET_SYS_READ = 0x06
TARGET_SYS_READC = 0x07
TARGET_SYS_ISERROR = 0x08
TARGET_SYS_ISTTY = 0x09
TARGET_SYS_SEEK = 0x0a
TARGET_SYS_FLEN = 0x0c
TARGET_SYS_TMPNAM = 0x0d
TARGET_SYS_REMOVE = 0x0e
TARGET_SYS_RENAME = 0x0f
TARGET_SYS_CLOCK = 0x10
TARGET_SYS_TIME = 0x11
TARGET_SYS_SYSTEM = 0x12
TARGET_SYS_ERRNO = 0x13
TARGET_SYS_GET_CMDLINE = 0x15
TARGET_SYS_HEAPINFO = 0x16
angel_SWIreason_EnterSVC = 0x17
TARGET_SYS_EXIT = 0x18 # Also called angel_SWIreason_ReportException
TARGET_SYS_ELAPSED = 0x30
TARGET_SYS_TICKFREQ = 0x31
# Pseudo-file descriptor numbers. The fds must be non-zero according to the
# ARM semihosting spec.
STDIN_FD = 1
STDOUT_FD = 2
STDERR_FD = 3
## Maximum length of a null-terminated string we'll attempt to read from target memory.
#
# The length is limited in case the string isn't terminated.
#
# @see SemihostAgent::_get_string()
MAX_STRING_LENGTH = 2048
##
# @brief Interface for semihosting file I/O handlers.
#
# This class is also used as the default I/O handler if none is provided to SemihostAgent.
# In this case, all file I/O requests are rejected.
class SemihostIOHandler(object):
def __init__(self):
self.agent = None
self._errno = 0
def cleanup(self):
pass
@property
def errno(self):
return self._errno
## @brief Helper for standard I/O open requests.
#
# In the ARM semihosting spec, standard I/O files are opened using a filename of ":tt"
# with the open mode specifying which standard I/O file to open. This method takes care
# of these special open requests, and is intended to be used by concrete I/O handler
# subclasses.
#
# @return A 2-tuple of the file descriptor and filename. The filename is returned so it
# only has to be read from target memory once if the request is not for standard I/O.
# The returned file descriptor may be one of 0, 1, or 2 for the standard I/O files,
# -1 if an invalid combination was requested, or None if the request was not for
# a standard I/O file (i.e., the filename was not ":tt"). If None is returned for the
# file descriptor, the caller must handle the open request.
def _std_open(self, fnptr, fnlen, mode):
filename = self.agent._get_string(fnptr, fnlen)
logging.debug("Semihost: open '%s' mode %s", filename, mode)
# Handle standard I/O.
if filename == ':tt':
if mode == 'r':
fd = STDIN_FD
elif mode == 'w':
fd = STDOUT_FD
elif mode == 'a':
fd = STDERR_FD
else:
logging.warning("Unrecognized semihosting console open file combination: mode=%s", mode)
return -1, filename
return fd, filename
return None, filename
def open(self, fnptr, fnlen, mode):
raise NotImplementedError()
def close(self, fd):
raise NotImplementedError()
def write(self, fd, ptr, length):
raise NotImplementedError()
def read(self, fd, ptr, length):
raise NotImplementedError()
def readc(self):
raise NotImplementedError()
def istty(self, fd):
raise NotImplementedError()
def seek(self, fd, pos):
raise NotImplementedError()
def flen(self, fd):
raise NotImplementedError()
def remove(self, ptr, length):
raise NotImplementedError()
def rename(self, oldptr, oldlength, newptr, newlength):
raise NotImplementedError()
##
# @brief Implements semihosting requests directly in the Python process.
#
# This class maintains its own list of pseudo-file descriptors for files opened by the
# debug target. By default, this class uses the system stdin, stdout, and stderr file objects
# for file desscriptors 1, 2, and 3.
class InternalSemihostIOHandler(SemihostIOHandler):
def __init__(self):
super(InternalSemihostIOHandler, self).__init__()
self.next_fd = STDERR_FD + 1
# Go ahead and connect standard I/O.
self.open_files = {
STDIN_FD : sys.stdin,
STDOUT_FD : sys.stdout,
STDERR_FD : sys.stderr
}
def _is_valid_fd(self, fd):
return fd in self.open_files and self.open_files[fd] is not None
def cleanup(self):
for f in (self.open_files[k] for k in self.open_files if k > STDERR_FD):
f.close()
def open(self, fnptr, fnlen, mode):
fd, filename = self._std_open(fnptr, fnlen, mode)
if fd is not None:
return fd
try:
fd = self.next_fd
self.next_fd += 1
f = io.open(filename, mode)
self.open_files[fd] = f
return fd
except IOError as e:
self._errno = e.errno
logging.error("Semihost: failed to open file '%s'", filename)
traceback.print_exc()
return -1
def close(self, fd):
if fd > STDERR_FD:
if not self._is_valid_fd(fd):
return -1
f = self.open_files.pop(fd)
try:
f.close()
except OSError:
# Ignore errors closing files.
pass
return 0
def write(self, fd, ptr, length):
if not self._is_valid_fd(fd):
# Return byte count not written.
return length
data = self.agent._get_string(ptr, length)
try:
f = self.open_files[fd]
if 'b' not in f.mode:
data = six.text_type(data)
f.write(data)
f.flush()
return 0
except IOError as e:
self._errno = e.errno
logging.debug("Semihost: exception: %s", e)
return -1
def read(self, fd, ptr, length):
if not self._is_valid_fd(fd):
# Return byte count not read.
return length
try:
f = self.open_files[fd]
data = f.read(length)
if 'b' not in f.mode:
data = data.encode()
except IOError as e:
self._errno = e.errno
logging.debug("Semihost: exception: %s", e)
return -1
data = bytearray(data)
self.agent.context.writeBlockMemoryUnaligned8(ptr, data)
return length - len(data)
def readc(self):
try:
f = self.open_files[STDIN_FD]
if f is not None:
data = f.read(1)
if 'b' not in f.mode:
data = data.encode()
return data
else:
return 0
except OSError as e:
self._errno = e.errno
return 0
def istty(self, fd):
if not self._is_valid_fd(fd):
return -1
# Just assume that stdio is a terminal and other files aren't.
return int(not fd > STDERR_FD)
def seek(self, fd, pos):
if not self._is_valid_fd(fd):
return -1
try:
self.open_files[fd].seek(pos)
return 0
except IOError as e:
self._errno = e.errno
return -1
def flen(self, fd):
if not self._is_valid_fd(fd):
return -1
try:
info = os.fstat(self.open_files[fd].fileno())
return info.st_size
except OSError as e:
self._errno = e.errno
return -1
##
# @brief Serves a telnet connection for semihosting.
#
# Not all semihost requests are support. This class is meant to be used only for the
# debug console. Pass an instance for the @i console parameter of the SemihostAgent
# constructor.
#
# The server thread will automatically be started by the constructor. To shut down the
# server and its thread, call the stop() method.
class TelnetSemihostIOHandler(SemihostIOHandler):
def __init__(self, port_or_url, serve_local_only=True):
super(TelnetSemihostIOHandler, self).__init__()
self._abstract_socket = None
self._wss_server = None
self._port = 0
if isinstance(port_or_url, str) == True:
self._wss_server = port_or_url
self._abstract_socket = GDBWebSocket(self._wss_server)
else:
self._port = port_or_url
self._abstract_socket = GDBSocket(self._port, 4096)
if serve_local_only:
self._abstract_socket.host = 'localhost'
self._buffer = bytearray()
self._buffer_lock = threading.Lock()
self.connected = None
self._shutdown_event = threading.Event()
self._thread = threading.Thread(target=self._server, name="semihost-telnet")
self._thread.daemon = True
self._thread.start()
def stop(self):
self._shutdown_event.set()
self._thread.join()
def _server(self):
logging.info("Telnet: server started on port %s", str(self._port))
self.connected = None
try:
while not self._shutdown_event.is_set():
# Wait for a client to connect.
# TODO support multiple client connections
while not self._shutdown_event.is_set():
self.connected = self._abstract_socket.connect()
if self.connected is not None:
logging.debug("Telnet: client connected")
break
if self._shutdown_event.is_set():
break
# Set timeout on new connection.
self._abstract_socket.setTimeout(0.1)
# Keep reading from the client until we either get a shutdown event, or
# the client disconnects. The incoming data is appended to our read buffer.
while not self._shutdown_event.is_set():
try:
data = self._abstract_socket.read()
if len(data) == 0:
# Client disconnected.
self._abstract_socket.close()
self.connected = None
break
self._buffer_lock.acquire()
self._buffer += bytearray(data)
self._buffer_lock.release()
except socket.timeout:
pass
finally:
self._abstract_socket.close()
logging.info("Telnet: server stopped")
def write(self, fd, ptr, length):
# If nobody is connected, act like all data was written anyway.
if self.connected is None:
return 0
data = self.agent._get_string(ptr, length)
remaining = len(data)
while remaining:
count = self._abstract_socket.write(data)
remaining -= count
if remaining:
data = data[count:]
return 0
## @brief Extract requested amount of data from the read buffer.
def _get_input(self, length):
self._buffer_lock.acquire()
try:
actualLength = min(length, len(self._buffer))
if actualLength:
data = self._buffer[:actualLength]
self._buffer = self._buffer[actualLength:]
else:
data = bytearray()
return data
finally:
self._buffer_lock.release()
def read(self, fd, ptr, length):
if self.connected is None:
return -1
# Extract requested amount of data from the read buffer.
data = self._get_input(length)
# Stuff data into provided buffer.
if data:
self.agent.context.writeBlockMemoryUnaligned8(ptr, data)
result = length - len(data)
if not data:
self._errno = 5
return -1
return result
def readc(self):
if self.connected is None:
return -1
data = self._get_input(1)
if data:
return data[0]
else:
return -1
##
# @brief Handler for ARM semihosting requests.
#
# Semihosting requests are made by the target by executing a 'bkpt #0xab' instruction. The
# requested operation is specified by R0 and any arguments by R1. Many requests use a block
# of word-sized arguments pointed to by R1. The return value is passed back to the target
# in R0.
#
# This class does not handle any file-related requests by itself. It uses I/O handler objects
# passed in to the constructor. The requests handled directly by this class are #TARGET_SYS_CLOCK
# and #TARGET_SYS_TIME.
#
# There are two types of I/O handlers used by this class. The main I/O handler, set
# with the constructor's @i io_handler parameter, is used for most file operations.
# You may optionally pass another I/O handler for the @i console constructor parameter. The
# console handler is used solely for standard I/O and debug console I/O requests. If no console
# handler is provided, the main handler is used instead. TARGET_SYS_OPEN requests are not
# passed to the console handler in any event, they are always passed to the main handler.
#
# If no main I/O handler is provided, the class will use SemihostIOHandler, which causes all
# file I/O requests to be rejected as an error.
#
# The SemihostAgent assumes standard I/O file descriptor numbers are #STDIN_FD, #STDOUT_FD,
# and #STDERR_FD. When it receives a read or write request for one of these descriptors, it
# passes the request to the console handler. This means the main handler must return these
# numbers for standard I/O open requests (those with a file name of ":tt").
#
# Not all semihosting requests are supported. Those that are not implemented are:
# - TARGET_SYS_TMPNAM
# - TARGET_SYS_SYSTEM
# - TARGET_SYS_GET_CMDLINE
# - TARGET_SYS_HEAPINFO
# - TARGET_SYS_EXIT
# - TARGET_SYS_ELAPSED
# - TARGET_SYS_TICKFREQ
class SemihostAgent(object):
## Index into this array is the file open mode argument to TARGET_SYS_OPEN.
OPEN_MODES = ['r', 'rb', 'r+', 'r+b', 'w', 'wb', 'w+', 'w+b', 'a', 'ab', 'a+', 'a+b']
EPOCH = datetime.datetime(1970, 1, 1)
def __init__(self, context, io_handler=None, console=None):
self.context = context
self.start_time = time.time()
self.io_handler = io_handler or SemihostIOHandler()
self.io_handler.agent = self
self.console = console or self.io_handler
self.console.agent = self
self.request_map = {
TARGET_SYS_OPEN : self.handle_sys_open,
TARGET_SYS_CLOSE : self.handle_sys_close,
TARGET_SYS_WRITEC : self.handle_sys_writec,
TARGET_SYS_WRITE0 : self.handle_sys_write0,
TARGET_SYS_WRITE : self.handle_sys_write,
TARGET_SYS_READ : self.handle_sys_read,
TARGET_SYS_READC : self.handle_sys_readc,
TARGET_SYS_ISERROR : self.handle_sys_iserror,
TARGET_SYS_ISTTY : self.handle_sys_istty,
TARGET_SYS_SEEK : self.handle_sys_seek,
TARGET_SYS_FLEN : self.handle_sys_flen,
TARGET_SYS_TMPNAM : self.handle_sys_tmpnam,
TARGET_SYS_REMOVE : self.handle_sys_remove,
TARGET_SYS_RENAME : self.handle_sys_rename,
TARGET_SYS_CLOCK : self.handle_sys_clock,
TARGET_SYS_TIME : self.handle_sys_time,
TARGET_SYS_SYSTEM : self.handle_sys_system,
TARGET_SYS_ERRNO : self.handle_sys_errno,
TARGET_SYS_GET_CMDLINE : self.handle_sys_get_cmdline,
TARGET_SYS_HEAPINFO : self.handle_sys_heapinfo,
TARGET_SYS_EXIT : self.handle_sys_exit,
TARGET_SYS_ELAPSED : self.handle_sys_elapsed,
TARGET_SYS_TICKFREQ : self.handle_sys_tickfreq
}
## @brief Handle a semihosting request.
#
# This method should be called after the target has halted, to check if the halt was
# due to a semihosting request. It first checks to see if the target halted because
# of a breakpoint. If so, it reads the instruction at PC to make sure it is a 'bkpt #0xAB'
# instruction. If so, the target is making a semihosting request. If not, nothing more is done.
#
# After the request is handled, the PC is advanced to the next instruction after the 'bkpt'.
# A boolean is return indicating whether a semihosting request was handled. If True, the
# caller should resume the target immediately.
#
# @retval True A semihosting request was handled.
# @retval False The target halted for a reason other than semihosting, i.e. a user-installed
# debugging breakpoint.
def check_and_handle_semihost_request(self):
# Nothing to do if this is not a bkpt.
if (self.context.read32(pyOCD.coresight.cortex_m.CortexM.DFSR) &
pyOCD.coresight.cortex_m.CortexM.DFSR_BKPT) == 0:
return False
pc = self.context.readCoreRegister('pc')
# Are we stopped due to one of our own breakpoints?
bp = self.context.core.findBreakpoint(pc)
if bp:
return False
# Get the instruction at the breakpoint.
instr = self.context.read16(pc)
# Check for semihost bkpt.
if instr != BKPT_INSTR:
return False
# Advance PC beyond the bkpt instruction.
self.context.writeCoreRegister('pc', pc + 2)
# Get args
op = self.context.readCoreRegister('r0')
args = self.context.readCoreRegister('r1')
# Handle request
handler = self.request_map.get(op, None)
if handler:
try:
result = handler(args)
except NotImplementedError:
logging.warning("Semihost: unimplemented request pc=%x r0=%x r1=%x", pc, op, args)
result = -1
except Exception as e:
logging.warning("Exception while handling semihost request: %s", e)
traceback.print_exc(e)
result = -1
else:
result = -1
# Set return value.
self.context.writeCoreRegister('r0', result)
return True
## @brief Clean up any resources allocated by semihost requests.
#
# @note May be called more than once.
def cleanup(self):
self.io_handler.cleanup()
if self.console is not self.io_handler:
self.console.cleanup()
def _get_args(self, args, count):
args = self.context.readBlockMemoryAligned32(args, count)
if count == 1:
return args[0]
else:
return args
def _get_string(self, ptr, length=None):
if length is not None:
data = self.context.readBlockMemoryUnaligned8(ptr, length)
return str(bytearray(data))
target_str = ''
# TODO - use memory map to make sure we don't try to read off the end of memory
# Limit string size in case it isn't terminated.
while len(target_str) < MAX_STRING_LENGTH:
try:
# Read 32 bytes at a time for efficiency.
data = self.context.readBlockMemoryUnaligned8(ptr, 32)
terminator = data.index(0)
# Found a null terminator, append data up to but not including the null
# and then exit the loop.
target_str += str(bytearray(data[:terminator]))
break
except exceptions.TransferError:
# Failed to read some or all of the string.
break
except ValueError:
# No null terminator was found. Append all of data.
target_str += str(bytearray(data))
ptr += 32
return target_str
def handle_sys_open(self, args):
fnptr, mode, fnlen = self._get_args(args, 3)
if mode >= len(self.OPEN_MODES):
return -1
mode = self.OPEN_MODES[mode]
if LOG_SEMIHOST:
logging.debug("Semihost: open %x/%x, mode %s", fnptr, fnlen, mode)
return self.io_handler.open(fnptr, fnlen, mode)
def handle_sys_close(self, args):
fd = self._get_args(args, 1)
if LOG_SEMIHOST:
logging.debug("Semihost: close fd=%d", fd)
return self.io_handler.close(fd)
def handle_sys_writec(self, args):
if LOG_SEMIHOST:
logging.debug("Semihost: writec %x", args)
return self.console.write(STDOUT_FD, args, 1)
def handle_sys_write0(self, args):
msg = self._get_string(args)
if LOG_SEMIHOST:
logging.debug("Semihost: write0 msg='%s'", msg)
return self.console.write(STDOUT_FD, args, len(msg))
def handle_sys_write(self, args):
fd, data_ptr, length = self._get_args(args, 3)
if LOG_SEMIHOST:
logging.debug("Semihost: write fd=%d ptr=%x len=%d", fd, data_ptr, length)
if fd in (STDOUT_FD, STDERR_FD):
return self.console.write(fd, data_ptr, length)
else:
return self.io_handler.write(fd, data_ptr, length)
def handle_sys_read(self, args):
fd, ptr, length = self._get_args(args, 3)
if LOG_SEMIHOST:
logging.debug("Semihost: read fd=%d ptr=%x len=%d", fd, ptr, length)
if fd == STDIN_FD:
return self.console.read(fd, ptr, length)
else:
return self.io_handler.read(fd, ptr, length)
def handle_sys_readc(self, args):
if LOG_SEMIHOST:
logging.debug("Semihost: readc")
return self.console.readc()
def handle_sys_iserror(self, args):
raise NotImplementedError()
def handle_sys_istty(self, args):
fd = self._get_args(args, 1)
if LOG_SEMIHOST:
logging.debug("Semihost: istty fd=%d", fd)
return self.io_handler.istty(fd)
def handle_sys_seek(self, args):
fd, pos = self._get_args(args, 2)
if LOG_SEMIHOST:
logging.debug("Semihost: seek fd=%d pos=%d", fd, pos)
return self.io_handler.seek(fd, pos)
def handle_sys_flen(self, args):
fd = self._get_args(args, 1)
if LOG_SEMIHOST:
logging.debug("Semihost: flen fd=%d", fd)
return self.io_handler.flen(fd)
def handle_sys_tmpnam(self, args):
raise NotImplementedError()
def handle_sys_remove(self, args):
ptr, length = self._get_args(args, 2)
return self.io_handler.remove(ptr, length)
def handle_sys_rename(self, args):
oldptr, oldlength, newptr, newlength = self._get_args(args, 4)
return self.io_handler.rename(oldptr, oldlength, newptr, newlength)
def handle_sys_clock(self, args):
now = time.time()
delta = now - self.start_time
return int(delta * 100)
def handle_sys_time(self, args):
now = datetime.datetime.now()
delta = now - self.EPOCH
seconds = (delta.days * 86400) + delta.seconds
return seconds
def handle_sys_system(self, args):
raise NotImplementedError()
def handle_sys_errno(self, args):
return self.io_handler.errno
def handle_sys_get_cmdline(self, args):
raise NotImplementedError()
def handle_sys_heapinfo(self, args):
raise NotImplementedError()
def handle_sys_exit(self, args):
raise NotImplementedError()
def handle_sys_elapsed(self, args):
raise NotImplementedError()
def handle_sys_tickfreq(self, args):
raise NotImplementedError()
|
__init__.py
|
import time
import threading
import math
import arrow
import click
import pydash
from jesse.models import Candle
from jesse.exceptions import CandleNotFoundInExchange
from jesse.modes.import_candles_mode.drivers.interface import CandleExchange
from jesse.modes.import_candles_mode.drivers import drivers
import jesse.helpers as jh
def run(exchange: str, symbol: str, start_date_str: str, skip_confirmation=False):
try:
start_timestamp = jh.arrow_to_timestamp(arrow.get(start_date_str, 'YYYY-MM-DD'))
except:
raise ValueError('start_date must be a string representing a date before today. ex: 2020-01-17')
# more start_date validations
today = arrow.utcnow().floor('day').timestamp * 1000
if start_timestamp == today:
raise ValueError("Today's date is not accepted. start_date must be a string a representing date BEFORE today.")
elif start_timestamp > today:
raise ValueError("Future's date is not accepted. start_date must be a string a representing date BEFORE today.")
click.clear()
symbol = symbol.upper()
until_date = arrow.utcnow().floor('day')
start_date = arrow.get(start_timestamp / 1000)
days_count = jh.date_diff_in_days(start_date, until_date)
candles_count = days_count * 1440
exchange = exchange.title()
try:
driver: CandleExchange = drivers[exchange]()
except KeyError:
raise ValueError('entered exchange is not supported')
loop_length = int(candles_count / driver.count) + 1
# ask for confirmation
if not skip_confirmation:
click.confirm(
'Importing {} days candles from "{}" for "{}". Duplicates will be skipped. All good?'
.format(days_count, exchange, symbol), abort=True, default=True)
with click.progressbar(length=loop_length, label='Importing candles...') as progressbar:
for _ in range(candles_count):
temp_start_timestamp = start_date.timestamp * 1000
temp_end_timestamp = temp_start_timestamp + (driver.count - 1) * 60000
# to make sure it won't try to import candles from the future! LOL
if temp_start_timestamp > jh.now():
break
# prevent duplicates calls to boost performance
count = Candle.select().where(
Candle.timestamp.between(temp_start_timestamp, temp_end_timestamp),
Candle.symbol == symbol,
Candle.exchange == exchange
).count()
already_exists = count == driver.count
if not already_exists:
# it's today's candles if temp_end_timestamp < now
if temp_end_timestamp > jh.now():
temp_end_timestamp = arrow.utcnow().floor('minute').timestamp * 1000 - 60000
# fetch from market
candles = driver.fetch(symbol, temp_start_timestamp)
if not len(candles):
click.clear()
first_existing_timestamp = driver.get_starting_time(symbol)
# if driver can't provide accurate get_starting_time()
if first_existing_timestamp is None:
raise CandleNotFoundInExchange(
'No candles exists in the market for this day: {} \n'
'Try another start_date'.format(
jh.timestamp_to_time(temp_start_timestamp)[:10],
)
)
# handle when there's missing candles during the period
if temp_start_timestamp > first_existing_timestamp:
# see if there are candles for the same date for the backup exchange,
# if so, get those, if not, download from that exchange.
driver.init_backup_exchange()
if driver.backup_exchange is not None:
candles = _get_candles_from_backup_exchange(
exchange, driver.backup_exchange, symbol, temp_start_timestamp, temp_end_timestamp
)
else:
if not skip_confirmation:
print(jh.color('No candle exists in the market for {}\n'.format(
jh.timestamp_to_time(temp_start_timestamp)[:10]), 'yellow'))
click.confirm(
'First present candle is since {}. Would you like to continue?'.format(
jh.timestamp_to_time(first_existing_timestamp)[:10]
)
, abort=True, default=True)
run(exchange, symbol, jh.timestamp_to_time(first_existing_timestamp)[:10], True)
return
# fill absent candles (if there's any)
candles = _fill_absent_candles(candles, temp_start_timestamp, temp_end_timestamp)
# store in the database
if skip_confirmation:
_insert_to_database(candles)
else:
threading.Thread(target=_insert_to_database, args=[candles]).start()
# add as much as driver's count to the temp_start_time
start_date = start_date.shift(minutes=driver.count)
progressbar.update(1)
# sleep so that the exchange won't get angry at us
if not already_exists:
time.sleep(driver.sleep_time)
def _get_candles_from_backup_exchange(
exchange: str,
backup_driver: CandleExchange,
symbol: str,
start_timestamp: int,
end_timestamp: int
):
total_candles = []
# try fetching from database first
backup_candles = Candle.select(
Candle.timestamp, Candle.open, Candle.close, Candle.high, Candle.low,
Candle.volume
).where(
Candle.timestamp.between(start_timestamp, end_timestamp),
Candle.exchange == backup_driver.name,
Candle.symbol == symbol).order_by(Candle.timestamp.asc()).tuples()
already_exists = len(backup_candles) == (end_timestamp - start_timestamp) / 60_000 + 1
if already_exists:
# loop through them and set new ID and exchange
for c in backup_candles:
total_candles.append({
'id': jh.generate_unique_id(),
'symbol': symbol,
'exchange': exchange,
'timestamp': c[0],
'open': c[1],
'close': c[2],
'high': c[3],
'low': c[4],
'volume': c[5]
})
return total_candles
# try fetching from market now
days_count = jh.date_diff_in_days(jh.timestamp_to_arrow(start_timestamp), jh.timestamp_to_arrow(end_timestamp))
# make sure it's rounded up so that we import maybe more candles, but not less
if days_count < 1:
days_count = 1
if type(days_count) is float and not days_count.is_integer():
days_count = math.ceil(days_count)
candles_count = days_count * 1440
start_date = jh.timestamp_to_arrow(start_timestamp).floor('day')
for _ in range(candles_count):
temp_start_timestamp = start_date.timestamp * 1000
temp_end_timestamp = temp_start_timestamp + (backup_driver.count - 1) * 60000
# to make sure it won't try to import candles from the future! LOL
if temp_start_timestamp > jh.now():
break
# prevent duplicates
count = Candle.select().where(
Candle.timestamp.between(temp_start_timestamp, temp_end_timestamp),
Candle.symbol == symbol,
Candle.exchange == backup_driver.name
).count()
already_exists = count == backup_driver.count
if not already_exists:
# it's today's candles if temp_end_timestamp < now
if temp_end_timestamp > jh.now():
temp_end_timestamp = arrow.utcnow().floor('minute').timestamp * 1000 - 60000
# fetch from market
candles = backup_driver.fetch(symbol, temp_start_timestamp)
if not len(candles):
raise CandleNotFoundInExchange(
'No candles exists in the market for this day: {} \n'
'Try another start_date'.format(
jh.timestamp_to_time(temp_start_timestamp)[:10],
)
)
# fill absent candles (if there's any)
candles = _fill_absent_candles(candles, temp_start_timestamp, temp_end_timestamp)
# store in the database
_insert_to_database(candles)
# add as much as driver's count to the temp_start_time
start_date = start_date.shift(minutes=backup_driver.count)
# sleep so that the exchange won't get angry at us
if not already_exists:
time.sleep(backup_driver.sleep_time)
# now try fetching from database again. Why? because we might have fetched more
# than what's needed, but we only want as much was requested. Don't worry, the next
# request will probably fetch from database and there won't be any waste!
backup_candles = Candle.select(
Candle.timestamp, Candle.open, Candle.close, Candle.high, Candle.low,
Candle.volume
).where(
Candle.timestamp.between(start_timestamp, end_timestamp),
Candle.exchange == backup_driver.name,
Candle.symbol == symbol).order_by(Candle.timestamp.asc()).tuples()
already_exists = len(backup_candles) == (end_timestamp - start_timestamp) / 60_000 + 1
if already_exists:
# loop through them and set new ID and exchange
for c in backup_candles:
total_candles.append({
'id': jh.generate_unique_id(),
'symbol': symbol,
'exchange': exchange,
'timestamp': c[0],
'open': c[1],
'close': c[2],
'high': c[3],
'low': c[4],
'volume': c[5]
})
return total_candles
def _fill_absent_candles(temp_candles, start_timestamp, end_timestamp):
if len(temp_candles) == 0:
raise CandleNotFoundInExchange(
'No candles exists in the market for this day: {} \n'
'Try another start_date'.format(
jh.timestamp_to_time(start_timestamp)[:10],
)
)
symbol = temp_candles[0]['symbol']
exchange = temp_candles[0]['exchange']
candles = []
first_candle = temp_candles[0]
started = False
loop_length = ((end_timestamp - start_timestamp) / 60000) + 1
i = 0
while i < loop_length:
candle_for_timestamp = pydash.find(
temp_candles, lambda c: c['timestamp'] == start_timestamp)
if candle_for_timestamp is None:
if started:
last_close = candles[-1]['close']
candles.append({
'id': jh.generate_unique_id(),
'symbol': symbol,
'exchange': exchange,
'timestamp': start_timestamp,
'open': last_close,
'high': last_close,
'low': last_close,
'close': last_close,
'volume': 0
})
else:
candles.append({
'id': jh.generate_unique_id(),
'symbol': symbol,
'exchange': exchange,
'timestamp': start_timestamp,
'open': first_candle['open'],
'high': first_candle['open'],
'low': first_candle['open'],
'close': first_candle['open'],
'volume': 0
})
# candle is present
else:
started = True
candles.append(candle_for_timestamp)
start_timestamp += 60000
i += 1
return candles
def _insert_to_database(candles):
Candle.insert_many(candles).on_conflict_ignore().execute()
|
model_inference_server.py
|
"""
model_inference_server.py
Server side logic to handle model inference from client web_server.py
Delivers frame by frame of model's playthrough to the web_server.py through TCP/IP connection.
author: @justjoshtings
created: 3/16/2022
"""
import socket
import threading
import numpy as np
import cv2
import pickle
import struct
from time import sleep
import time
import copy
from GameModels import DQNAgentService
from ImageProcessor import PostProcessor
import socket_server_credentials
def get_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.settimeout(0)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except Exception:
IP = '127.0.0.1'
finally:
s.close()
return IP
def start():
'''
Start server listening procedure and passes connections to handle_client() with multithread.
'''
PORT = socket_server_credentials.PORT
HOST_NAME = socket.gethostname()
print(HOST_NAME)
print(PORT)
HOST_IP = socket.gethostbyname('localhost')
#HOST_IP = socket.gethostbyname(HOST_NAME) # use this when on servers, not local
HOST_IP = get_ip()
ADDR = (HOST_IP, PORT)
print("[STARTING] Sever is starting...")
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(ADDR)
server.listen()
print(f"[LISTENING] Server listening on IP: {HOST_IP} and PORT: {PORT}")
while True:
conn, addr = server.accept()
thread = threading.Thread(target=handle_client, args=(conn, addr))
thread.start()
print(f"[ACTIVE CONNECTIONS] {threading.activeCount() - 1}")
def handle_client(conn, addr):
'''
Handles new client connection and sends message.
Params:
conn (socket.socket object) : object representing connection to client
addr (tuple) : (IP address of client connection as str, port number as int)
'''
print(f"[NEW CONNECTION] {addr} connected.")
connected = True
try:
while connected:
for frame in stream_gameplay():
serialise_frame = pickle.dumps(frame)
message = struct.pack("Q",len(serialise_frame))+serialise_frame #Q: unsigned long long format, 8 bytes size
# message consists of first n bytes of message + byte stream of image frame
print('Sending',len(message))
conn.sendall(message)
# For testing, send static
# for i in range(100):
# img = np.random.randint(0,255,size=(400,400))
# serialise_img = pickle.dumps(img)
# message = struct.pack("Q",len(serialise_img))+serialise_img #Q: unsigned long long format, 8 bytes size
# # message consists of first n bytes of message + byte stream of image frame
# conn.sendall(message)
# img = img.astype(np.uint8)
connected = False
except BrokenPipeError:
print(f"[LOST CONNECTION] Lost/Broken connection to {addr}.")
conn.close()
print(f"[CLOSED CONNECTION] {addr} successfully closed.")
def load_model(model_path, env_name):
# Model parameters
window_length = 4
input_shape = (105, 105)
ms_pacman_model = DQNAgentService(model_height=input_shape[0], model_width=input_shape[1], env_name=env_name, window_length=window_length, model_name='Final_Model', model_channels=0)
ms_pacman_model.build_model()
ms_pacman_model.build_agent(policy_value_max=1., policy_value_min=.1, policy_value_test=.1, policy_nb_steps=8000,
enable_double_dqn=False, enable_dueling_network=True, dueling_type='avg', nb_steps_warmup=2500)
ms_pacman_model.load_weights(model_path)
return window_length, input_shape, ms_pacman_model
def stream_gameplay():
MODEL_WEIGHTS_PATH = './models/Dueling_DQN_Round2_weights_final_steps15000.h5f'
GAME_ENV_NAME = 'ALE/MsPacman-v5'
# Load model and environment
window_length, input_shape, ms_pacman_model = load_model(MODEL_WEIGHTS_PATH, GAME_ENV_NAME)
# Init objects to calculate and maintain fps
n_frame = 1
n_frames = []
start_time = time.time()
fps_maintain = 20
last_n_frames = 5
# Post-processor on observations returned from gameplay
broadcast_processor = PostProcessor()
# Start gameplay
for observation, observation_deprocessed, action, done in ms_pacman_model.play_gen():
# Calc most recent last_n_frames fps
if len(n_frames) < last_n_frames:
fps = 0
else:
fps = round(last_n_frames/sum(n_frames[-last_n_frames:]))
# Post-processor on observations returned from gameplay
observation_broadcast = copy.deepcopy(observation_deprocessed)
observation_broadcast = broadcast_processor.broadcast_ready(observation_broadcast, n_frame, fps)
yield observation_broadcast
# Maintain fps of fps_maintain
processing_end_time = time.time()
processing_time = processing_end_time - start_time
sleep_time = (1/fps_maintain) - (processing_time)
if sleep_time < 0:
sleep_time = 0
time.sleep(sleep_time)
frame_end_time = time.time()
n_frames.append(frame_end_time-start_time)
start_time = time.time()
n_frame += 1
if done:
break
if __name__ == "__main__":
start()
|
test_remote.py
|
# -*- coding: utf-8 -*-
import logging
import os
import socket
import sys
import time
from copy import deepcopy
from multiprocessing import Process, Queue
import env
import pytest
import plumbum
from plumbum import (
NOHUP,
CommandNotFound,
ProcessExecutionError,
ProcessTimedOut,
RemotePath,
SshMachine,
local,
)
from plumbum._testtools import skip_on_windows, skip_without_chown
from plumbum.lib import six
from plumbum.machines.session import HostPublicKeyUnknown, IncorrectLogin
try:
import paramiko
except ImportError:
paramiko = None
else:
from plumbum.machines.paramiko_machine import ParamikoMachine
pytestmark = pytest.mark.ssh
def strassert(one, two):
assert str(one) == str(two)
# TEST_HOST = "192.168.1.143"
TEST_HOST = "127.0.0.1"
if TEST_HOST not in ("::1", "127.0.0.1", "localhost"):
plumbum.local.env.path.append("c:\\Program Files\\Git\\bin")
@pytest.fixture(scope="session")
def sshpass():
try:
return plumbum.local["sshpass"]
except CommandNotFound:
pytest.skip("Test requires sshpass")
@skip_on_windows
def test_connection():
SshMachine(TEST_HOST)
@pytest.mark.skip(
env.LINUX and env.PY[:2] == (3, 5), reason="Doesn't work on 3.5 on Linux on GHA"
)
def test_incorrect_login(sshpass):
with pytest.raises(IncorrectLogin):
SshMachine(
TEST_HOST,
password="swordfish",
ssh_opts=[
"-o",
"PubkeyAuthentication=no",
"-o",
"PreferredAuthentications=password",
],
)
@pytest.mark.xfail(env.LINUX, reason="TODO: no idea why this fails on linux")
def test_hostpubkey_unknown(sshpass):
with pytest.raises(HostPublicKeyUnknown):
SshMachine(
TEST_HOST,
password="swordfish",
ssh_opts=["-o", "UserKnownHostsFile=/dev/null", "-o", "UpdateHostKeys=no"],
)
@skip_on_windows
class TestRemotePath:
def _connect(self):
return SshMachine(TEST_HOST)
def test_name(self):
name = RemotePath(self._connect(), "/some/long/path/to/file.txt").name
assert isinstance(name, six.string_types)
assert "file.txt" == str(name)
def test_dirname(self):
name = RemotePath(self._connect(), "/some/long/path/to/file.txt").dirname
assert isinstance(name, RemotePath)
assert "/some/long/path/to" == str(name)
def test_uri(self):
p1 = RemotePath(self._connect(), "/some/long/path/to/file.txt")
assert "ftp://" == p1.as_uri("ftp")[:6]
assert "ssh://" == p1.as_uri("ssh")[:6]
assert "/some/long/path/to/file.txt" == p1.as_uri()[-27:]
def test_stem(self):
p = RemotePath(self._connect(), "/some/long/path/to/file.txt")
assert p.stem == "file"
p = RemotePath(self._connect(), "/some/long/path/")
assert p.stem == "path"
def test_suffix(self):
p1 = RemotePath(self._connect(), "/some/long/path/to/file.txt")
p2 = RemotePath(self._connect(), "file.tar.gz")
assert p1.suffix == ".txt"
assert p1.suffixes == [".txt"]
assert p2.suffix == ".gz"
assert p2.suffixes == [".tar", ".gz"]
strassert(
p1.with_suffix(".tar.gz"),
RemotePath(self._connect(), "/some/long/path/to/file.tar.gz"),
)
strassert(
p2.with_suffix(".other"), RemotePath(self._connect(), "file.tar.other")
)
strassert(
p2.with_suffix(".other", 2), RemotePath(self._connect(), "file.other")
)
strassert(
p2.with_suffix(".other", 0),
RemotePath(self._connect(), "file.tar.gz.other"),
)
strassert(
p2.with_suffix(".other", None), RemotePath(self._connect(), "file.other")
)
def test_newname(self):
p1 = RemotePath(self._connect(), "/some/long/path/to/file.txt")
p2 = RemotePath(self._connect(), "file.tar.gz")
strassert(
p1.with_name("something.tar"),
RemotePath(self._connect(), "/some/long/path/to/something.tar"),
)
strassert(
p2.with_name("something.tar"), RemotePath(self._connect(), "something.tar")
)
@skip_without_chown
def test_chown(self):
with self._connect() as rem:
with rem.tempdir() as dir:
p = dir / "foo.txt"
p.write(six.b("hello"))
# because we're connected to localhost, we expect UID and GID to be the same
assert p.uid == os.getuid()
assert p.gid == os.getgid()
p.chown(p.uid.name)
assert p.uid == os.getuid()
def test_parent(self):
p1 = RemotePath(self._connect(), "/some/long/path/to/file.txt")
p2 = p1.parent
assert str(p2) == "/some/long/path/to"
def test_mkdir(self):
# (identical to test_local.TestLocalPath.test_mkdir)
with self._connect() as rem:
with rem.tempdir() as tmp:
(tmp / "a").mkdir(exist_ok=False, parents=False)
assert (tmp / "a").exists()
assert (tmp / "a").is_dir()
(tmp / "a").mkdir(exist_ok=True, parents=False)
(tmp / "a").mkdir(exist_ok=True, parents=True)
with pytest.raises(OSError):
(tmp / "a").mkdir(exist_ok=False, parents=False)
with pytest.raises(OSError):
(tmp / "a").mkdir(exist_ok=False, parents=True)
(tmp / "b" / "bb").mkdir(exist_ok=False, parents=True)
assert (tmp / "b" / "bb").exists()
assert (tmp / "b" / "bb").is_dir()
assert not tmp.exists()
@pytest.mark.xfail(
reason="mkdir's mode argument is not yet implemented " "for remote paths",
strict=True,
)
def test_mkdir_mode(self):
# (identical to test_local.TestLocalPath.test_mkdir_mode)
with self._connect() as rem:
with rem.tempdir() as tmp:
# just verify that mode argument works the same way it does for
# Python's own os.mkdir, which takes into account the umask
# (different from shell mkdir mode argument!); umask on my
# system is 022 by default, so 033 is ok for testing this
try:
(tmp / "pb_333").mkdir(exist_ok=False, parents=False, mode=0o333)
rem.python(
"-c",
"import os; os.mkdir({}, 0o333)".format(
repr(str(tmp / "py_333"))
),
)
pb_final_mode = oct((tmp / "pb_333").stat().st_mode)
py_final_mode = oct((tmp / "py_333").stat().st_mode)
assert pb_final_mode == py_final_mode
finally:
# we have to revert this so the tempdir deletion works
if (tmp / "pb_333").exists():
(tmp / "pb_333").chmod(0o777)
if (tmp / "py_333").exists():
(tmp / "py_333").chmod(0o777)
assert not tmp.exists()
def test_copy(self):
"""
tests `RemotePath.copy` for the following scenarios:
* copying a simple file from `file_a` to `copy_of_a` succeeds
* copying file `file_a` into a directory `a_dir/copy_of_a` succeeds
* copying a directory `a_dir` over an existing directory path with
`override=False` fails
* copying a directory `a_dir` over an existing directory path with
`override=True` succeeds
"""
with self._connect() as rem:
with rem.tempdir() as tmp:
# setup a file and make sure it exists...
(tmp / "file_a").touch()
assert (tmp / "file_a").exists()
assert (tmp / "file_a").is_file()
# setup a directory for copying into...
(tmp / "a_dir").mkdir(exist_ok=False, parents=False)
assert (tmp / "a_dir").exists()
assert (tmp / "a_dir").is_dir()
# setup a 2nd directory for testing `override=False`
(tmp / "b_dir").mkdir(exist_ok=False, parents=False)
assert (tmp / "b_dir").exists()
assert (tmp / "b_dir").is_dir()
# copying a simple file
(tmp / "file_a").copy(tmp / "copy_of_a")
assert (tmp / "copy_of_a").exists()
assert (tmp / "copy_of_a").is_file()
# copying into a directory
(tmp / "file_a").copy(tmp / "a_dir/copy_of_a")
assert (tmp / "a_dir/copy_of_a").exists()
assert (tmp / "a_dir/copy_of_a").is_file()
# copying a directory on top of an existing directory using
# `override=False` (should fail with TypeError)
with pytest.raises(TypeError):
(tmp / "a_dir").copy(tmp / "b_dir", override=False)
# copying a directory on top of an existing directory using
# `override=True` (should copy transparently)
(tmp / "a_dir").copy(tmp / "b_dir", override=True)
assert "copy_of_a" in (tmp / "b_dir")
assert not tmp.exists()
class BaseRemoteMachineTest(object):
TUNNEL_PROG_AF_INET = r"""import sys, socket
s = socket.socket()
s.bind(("", 0))
s.listen(1)
sys.stdout.write("{0}\n".format(s.getsockname()[1]))
sys.stdout.flush()
s2, _ = s.accept()
data = s2.recv(100)
s2.send(b"hello " + data)
s2.close()
s.close()
"""
TUNNEL_PROG_AF_UNIX = r"""import sys, socket, tempfile
s = socket.socket(family=socket.AF_UNIX)
socket_location = tempfile.NamedTemporaryFile()
socket_location.close()
s.bind(socket_location.name)
s.listen(1)
sys.stdout.write("{0}\n".format(s.getsockname()))
sys.stdout.flush()
s2, _ = s.accept()
data = s2.recv(100)
s2.send(b"hello " + data)
s2.close()
s.close()
"""
def test_basic(self):
with self._connect() as rem:
r_ssh = rem["ssh"]
r_ls = rem["ls"]
r_grep = rem["grep"]
lines = r_ls("-a").splitlines()
assert ".bashrc" in lines or ".bash_profile" in lines
with rem.cwd(os.path.dirname(os.path.abspath(__file__))):
cmd = r_ssh[
"localhost", "cd", rem.cwd, "&&", r_ls, "|", r_grep["\\.py"]
]
assert "'|'" in str(cmd)
assert "test_remote.py" in cmd()
assert "test_remote.py" in [f.name for f in rem.cwd // "*.py"]
# Testing for #271
def test_double_chdir(self):
with self._connect() as rem:
with rem.cwd(os.path.dirname(os.path.abspath(__file__))):
rem["ls"]()
with rem.cwd("/tmp"):
rem["pwd"]()
def test_glob(self):
with self._connect() as rem:
with rem.cwd(os.path.dirname(os.path.abspath(__file__))):
filenames = [f.name for f in rem.cwd // ("*.py", "*.bash")]
assert "test_remote.py" in filenames
assert "slow_process.bash" in filenames
def test_glob_spaces(self):
with self._connect() as rem:
with rem.cwd(os.path.dirname(os.path.abspath(__file__))):
filenames = [f.name for f in rem.cwd // ("*space.txt")]
assert "file with space.txt" in filenames
filenames = [f.name for f in rem.cwd // ("*with space.txt")]
assert "file with space.txt" in filenames
def test_cmd(self):
with self._connect() as rem:
rem.cmd.ls("/tmp")
@pytest.mark.usefixtures("testdir")
def test_download_upload(self):
with self._connect() as rem:
rem.upload("test_remote.py", "/tmp")
r_ls = rem["ls"]
r_rm = rem["rm"]
assert "test_remote.py" in r_ls("/tmp").splitlines()
rem.download("/tmp/test_remote.py", "/tmp/test_download.txt")
r_rm("/tmp/test_remote.py")
r_rm("/tmp/test_download.txt")
def test_session(self):
with self._connect() as rem:
sh = rem.session()
for _ in range(4):
_, out, _ = sh.run("ls -a")
assert ".bashrc" in out or ".bash_profile" in out
def test_env(self):
with self._connect() as rem:
with pytest.raises(ProcessExecutionError):
rem.python("-c", "import os;os.environ['FOOBAR72']")
with rem.env(FOOBAR72="lala"):
with rem.env(FOOBAR72="baba"):
out = rem.python("-c", "import os;print(os.environ['FOOBAR72'])")
assert out.strip() == "baba"
out = rem.python("-c", "import os;print(os.environ['FOOBAR72'])")
assert out.strip() == "lala"
# path manipulation
with pytest.raises(CommandNotFound):
rem.which("dummy-executable")
with rem.cwd(os.path.dirname(os.path.abspath(__file__))):
rem.env.path.insert(0, rem.cwd / "not-in-path")
p = rem.which("dummy-executable")
assert p == rem.cwd / "not-in-path" / "dummy-executable"
@pytest.mark.parametrize(
"env",
[
"lala",
"-Wl,-O2 -Wl,--sort-common",
"{{}}",
"''",
"!@%_-+=:",
"'",
"`",
"$",
"\\",
],
)
def test_env_special_characters(self, env):
with self._connect() as rem:
with pytest.raises(ProcessExecutionError):
rem.python("-c", "import os;print(os.environ['FOOBAR72'])")
rem.env["FOOBAR72"] = env
out = rem.python("-c", "import os;print(os.environ['FOOBAR72'])")
assert out.strip() == env
def test_read_write(self):
with self._connect() as rem:
with rem.tempdir() as dir:
assert dir.is_dir()
data = six.b("hello world")
(dir / "foo.txt").write(data)
assert (dir / "foo.txt").read() == data
assert not dir.exists()
def test_contains(self):
with self._connect() as rem:
assert "ls" in rem
def test_iter_lines_timeout(self):
with self._connect() as rem:
try:
for i, (out, err) in enumerate(
rem["ping"]["-i", 0.5, "127.0.0.1"].popen().iter_lines(timeout=4)
):
print("out:", out)
print("err:", err)
except NotImplementedError:
try:
pytest.skip(str(sys.exc_info()[1]))
except AttributeError:
return
except ProcessTimedOut:
assert i > 3
else:
pytest.fail("Expected a timeout")
def test_iter_lines_error(self):
with self._connect() as rem:
with pytest.raises(ProcessExecutionError) as ex:
for i, lines in enumerate(rem["ls"]["--bla"].popen()):
pass
assert i == 1
assert "/bin/ls: " in ex.value.stderr
def test_touch(self):
with self._connect() as rem:
rfile = rem.cwd / "sillyfile"
assert not rfile.exists()
rfile.touch()
assert rfile.exists()
rfile.delete()
@skip_on_windows
class TestRemoteMachine(BaseRemoteMachineTest):
def _connect(self):
return SshMachine(TEST_HOST)
def test_tunnel(self):
for tunnel_prog in (self.TUNNEL_PROG_AF_INET, self.TUNNEL_PROG_AF_UNIX):
with self._connect() as rem:
p = (rem.python["-u"] << tunnel_prog).popen()
port_or_socket = p.stdout.readline().decode("ascii").strip()
try:
port_or_socket = int(port_or_socket)
dhost = "localhost"
except ValueError:
dhost = None
with rem.tunnel(12222, port_or_socket, dhost=dhost) as tun:
s = socket.socket()
s.connect(("localhost", 12222))
s.send(six.b("world"))
data = s.recv(100)
s.close()
print(p.communicate())
assert data == b"hello world"
def test_reverse_tunnel(self):
def serve_reverse_tunnel(queue):
s = socket.socket()
s.bind(("", 12222))
s.listen(1)
s2, _ = s.accept()
data = s2.recv(100).decode("ascii").strip()
queue.put(data)
s2.close()
s.close()
with self._connect() as rem:
get_unbound_socket_remote = """import sys, socket
s = socket.socket()
s.bind(("", 0))
s.listen(1)
sys.stdout.write(str(s.getsockname()[1]))
sys.stdout.flush()
s.close()
"""
p = (rem.python["-u"] << get_unbound_socket_remote).popen()
remote_socket = p.stdout.readline().decode("ascii").strip()
queue = Queue()
tunnel_server = Process(target=serve_reverse_tunnel, args=(queue,))
tunnel_server.start()
message = str(time.time_ns())
with rem.tunnel(12222, remote_socket, dhost="localhost", reverse=True):
remote_send_af_inet = """import sys, socket
s = socket.socket()
s.connect(("localhost", {}))
s.send("{}".encode("ascii"))
s.close()
""".format(remote_socket, message)
(rem.python["-u"] << remote_send_af_inet).popen()
tunnel_server.join()
assert queue.get() == message
def test_get(self):
with self._connect() as rem:
assert str(rem["ls"]) == str(rem.get("ls"))
assert str(rem["ls"]) == str(rem.get("not_a_valid_process_234", "ls"))
assert "ls" in rem
assert "not_a_valid_process_234" not in rem
def test_list_processes(self):
with self._connect() as rem:
assert list(rem.list_processes())
def test_pgrep(self):
with self._connect() as rem:
assert list(rem.pgrep("ssh"))
def test_nohup(self):
with self._connect() as rem:
sleep = rem["sleep"]
sleep["5.793817"] & NOHUP(stdout=None, append=False)
time.sleep(0.5)
print(rem["ps"]("aux"))
assert list(rem.pgrep("5.793817"))
time.sleep(6)
assert not list(rem.pgrep("5.793817"))
def test_bound_env(self):
with self._connect() as rem:
printenv = rem["printenv"]
with rem.env(FOO="hello"):
assert printenv.with_env(BAR="world")("FOO") == "hello\n"
assert printenv.with_env(BAR="world")("BAR") == "world\n"
assert printenv.with_env(FOO="sea", BAR="world")("FOO") == "sea\n"
assert printenv.with_env(FOO="sea", BAR="world")("BAR") == "world\n"
assert rem.cmd.pwd.with_cwd("/")() == "/\n"
assert rem.cmd.pwd["-L"].with_env(A="X").with_cwd("/")() == "/\n"
@pytest.mark.skipif(
"useradd" not in local, reason="System does not have useradd (Mac?)"
)
def test_sshpass(self):
with local.as_root():
local["useradd"]("-m", "-b", "/tmp", "testuser")
try:
with local.as_root():
try:
(local["passwd"] << "123456")("--stdin", "testuser")
except ProcessExecutionError:
# some versions of passwd don't support --stdin, nothing to do in this case
logging.warning("passwd failed")
return
with SshMachine("localhost", user="testuser", password="123456") as rem:
assert rem["pwd"]().strip() == "/tmp/testuser"
finally:
with local.as_root():
local["userdel"]("-r", "testuser")
@skip_on_windows
class TestParamikoMachine(BaseRemoteMachineTest):
def _connect(self):
if paramiko is None:
pytest.skip("System does not have paramiko installed")
return ParamikoMachine(TEST_HOST, missing_host_policy=paramiko.AutoAddPolicy())
def test_tunnel(self):
with self._connect() as rem:
p = rem.python["-c", self.TUNNEL_PROG_AF_INET].popen()
try:
port = int(p.stdout.readline().strip())
except ValueError:
print(p.communicate())
raise
s = rem.connect_sock(port)
s.send(b"world")
data = s.recv(100)
s.close()
print(p.communicate())
assert data == b"hello world"
def test_piping(self):
with self._connect() as rem:
try:
cmd = rem["ls"] | rem["cat"]
except NotImplementedError:
pass
else:
pytest.fail("Should not pipe")
@pytest.mark.xfail(message="Not working yet")
def test_encoding(self):
with self._connect() as rem:
unicode_half = b"\xc2\xbd".decode("utf8")
ret = rem["bash"]("-c", 'echo -e "\xC2\xBD"')
assert ret == "%s\n" % unicode_half
ret = list(rem["bash"]["-c", 'echo -e "\xC2\xBD"'].popen())
assert ret == [["%s\n" % unicode_half, None]]
def test_path_open_remote_write_local_read(self):
with self._connect() as rem:
# TODO: once Python 2.6 support is dropped, the nested
# with-statements below can be combined using "with x as a, y as b"
with rem.tempdir() as remote_tmpdir:
with local.tempdir() as tmpdir:
assert remote_tmpdir.is_dir()
assert tmpdir.is_dir()
data = six.b("hello world")
with (remote_tmpdir / "bar.txt").open("wb") as f:
f.write(data)
rem.download((remote_tmpdir / "bar.txt"), (tmpdir / "bar.txt"))
assert (tmpdir / "bar.txt").open("rb").read() == data
assert not remote_tmpdir.exists()
assert not tmpdir.exists()
def test_path_open_local_write_remote_read(self):
with self._connect() as rem:
# TODO: cf. note on Python 2.6 support above
with rem.tempdir() as remote_tmpdir:
with local.tempdir() as tmpdir:
assert remote_tmpdir.is_dir()
assert tmpdir.is_dir()
data = six.b("hello world")
with (tmpdir / "bar.txt").open("wb") as f:
f.write(data)
rem.upload((tmpdir / "bar.txt"), (remote_tmpdir / "bar.txt"))
assert (remote_tmpdir / "bar.txt").open("rb").read() == data
assert not remote_tmpdir.exists()
assert not tmpdir.exists()
|
opencachehttp.py
|
#!/usr/bin/env python2.7
"""opencachehttp.py: HTTP Server - serves cached HTTP objects to requesting clients."""
import BaseHTTPServer
import collections
import hashlib
import httplib
import os
import signal
import SocketServer
import sys
import threading
import opencache.lib.opencachelib as lib
import opencache.node.state.opencachemongodb as database
import zmq
TAG = 'server'
class Server:
_server_path = None
_server = None
_port = None
_ipc_socket = None
_context = None
_node = None
_expr = None
_load = 0
_load_data = None
def __init__(self, node, expr, port):
"""Initialise server instance.
Creates new connection manager. Creates new HTTP server. Passes objects to the server to facilitate
callbacks. Sets server status to 'start'. Runs server until terminated.
"""
self._setup_signal_handling()
self._database = node.database
self._node = node
self._expr = expr
self._port = port
self._load_data = collections.deque(maxlen=int(self._node.config["stat_refresh"]))
self._set_path(expr)
lib.create_directory(self._server_path)
self._server = self.ThreadedHTTPServer(('', self._port), self.HandlerClass)
#self._server = self.ThreadedHTTPServer((self._node.config["node_host"], self._port), self.HandlerClass)
self._server._setup_signal_handling()
self._server._server = self
self._server._node = self._node
self._server._expr = self._expr
self._server._server_path = self._server_path
threading.Thread(target=self._conn_manager, args=(expr, )).start()
threading.Thread(target=self._load_monitor, args=()).start()
threading.Thread(target=self._stat_reporter, args=()).start()
self._start()
self._server.serve_forever()
def _setup_signal_handling(self):
"""Setup signal handling for SIGQUIT and SIGINT events"""
signal.signal(signal.SIGINT, self._exit_server)
signal.signal(signal.SIGQUIT, self._exit_server)
def _exit_server(self, signal, frame):
raise SystemExit
def _conn_manager(self, expr):
"""Manage inter-process communication (IPC) connections.
Receives messages from the OpenCache node process, instructing it call start/stop/pause/stat methods.
"""
self._context = zmq.Context()
self._ipc_socket = self._context.socket(zmq.SUB)
self._ipc_socket.connect("ipc://oc")
self._ipc_socket.setsockopt_string(zmq.SUBSCRIBE, expr.decode('ascii'))
while True:
string = self._ipc_socket.recv_string()
expr, call, path, transaction = string.split()
if transaction == '?' or path == '?':
getattr(self, "_" + str(call))()
else:
getattr(self, "_" + str(call))(path, transaction)
def _send_message_to_controller(self, message):
"""Send given message to controller notification port."""
context = zmq.Context()
ipc_socket = context.socket(zmq.PUSH)
ipc_socket.connect("tcp://" + self._node.config["controller_host"] + ":" + self._node.config["notification_port"])
ipc_socket.send_json(message)
def _stat_reporter(self):
"""Report statistics back to the controller periodcially."""
threading.Timer(interval=int(self._node.config["stat_refresh"]), function=self._stat_reporter, args=()).start()
self._stat()
def _load_monitor(self):
"""Monitor the request load every second. Send alert to controller if it exceeds a configured amount."""
threading.Timer(interval=int(1), function=self._load_monitor, args=()).start()
self._current_load = self._server._load
self._load_data.append(self._current_load)
self._server._load = 0
if int(self._current_load) > int(self._node.config["alert_load"]):
self._send_message_to_controller(self._get_alert('load', self._current_load))
def _get_average_load(self):
"""Calculate load average over given time."""
average = 0
for data_point in self._load_data:
average += int(data_point)
average = average/int(self._node.config["stat_refresh"])
return average
def _get_alert(self, alert_type, value):
"""Get message body for an alert notification to the controller."""
alert = dict()
alert['method'] = 'alert'
alert['id'] = None
alert['params'] = dict()
alert['params']['expr'] = self._server._expr
alert['params']['node_id'] = self._node.node_id
alert['params']['type'] = alert_type
alert['params']['value'] = value
return alert
def _start(self):
"""Start the HTTP server.
Create directory for cached content to be stored and allow HTTP server to start receiving requests.
Set status to indicate new state.
"""
self._server._stop = False
self._server._status = 'start'
self._send_message_to_controller(self._get_redirect('add'))
def _stop(self):
"""Stop the HTTP server.
Stop HTTP server from receiving requests and remove directory used to store cached content.
Set status to indicate new state.
"""
self._send_message_to_controller(self._get_redirect('remove'))
self._server._stop = True
self._database.remove({'expr' : self._expr})
lib.delete_directory(self._server_path)
self._server._status = 'stop'
self._stat()
def _pause(self):
"""Pause the HTTP server.
Pause HTTP server, temporarily preventing the receipt of requests. Set status to indicate new state.
"""
self._send_message_to_controller(self._get_redirect('remove'))
self._server._stop = True
self._server._status = 'pause'
def _get_redirect(self, action):
"""Get message body for a redirect notification to the controller."""
redirect = dict()
redirect['method'] = 'redir'
redirect['id'] = None
redirect['params'] = dict()
redirect['params']['expr'] = self._server._expr
redirect['params']['node_id'] = self._node.node_id
redirect['params']['host'] = self._node.config['node_host']
redirect['params']['port'] = self._port
redirect['params']['action'] = action
return redirect
def _stat(self):
"""Retrieve statistics for this HTTP server and send them to the controller."""
self._send_message_to_controller(self._get_stats())
def _get_stats(self):
"""Get message body for a statistics notification to the controller.
The statistics returned to the controller include:
status -- current status of the HTTP server
expr -- the OpenCache expression to which this node is serving
node_id -- the ID number given to the node by the OpenCache controller
cache_miss -- number of cache miss (content not found in cache) events (one per request)
cache_miss_size -- number of bytes served whilst handling cache miss (content not found in cache) events
cache_hit -- number of cache hit (content already found in cache) events (one per request)
cache_hit_size -- number of bytes served whilst handling cache hit (content already found in cache) events
cache_object -- number of objects currently stored by the cache
cache_object_size -- size of cached objects on disk (actual, in bytes)
"""
statistics = dict()
statistics['method'] = 'stat'
statistics['id'] = None
statistics['params'] = dict()
statistics['params']['status'] = self._server._status
statistics['params']['avg_load'] = self._get_average_load()
statistics['params']['expr'] = self._server._expr
statistics['params']['node_id'] = self._node.node_id
statistics['params']['cache_miss'] = self._server._cache_miss
statistics['params']['cache_miss_size'] = self._server._cache_miss_size
statistics['params']['cache_hit'] = self._server._cache_hit
statistics['params']['cache_hit_size'] = self._server._cache_hit_size
statistics['params']['cache_object'] = len(self._database.lookup({}))
dir_size = get_dir_size(self._server_path)
statistics['params']['cache_object_size'] = dir_size
return statistics
def _set_path(self, expr):
"""Set the path used to store cached content specific to this HTTP server's expression."""
self._server_path = self._node.config["cache_path"] + hashlib.sha224(expr).hexdigest()
class ThreadedHTTPServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
"""Create a threaded HTTP server."""
allow_reuse_address = True
daemon_threads = True
_stop = True
_status = 'start'
_cache_hit_size = 0
_cache_miss_size = 0
_cache_hit = 0
_cache_miss = 0
_load = 0
_status = None
_node = None
_server_path = None
_expr = None
_server = None
def _setup_signal_handling(self):
"""Setup signal handling for SIGQUIT and SIGINT events"""
signal.signal(signal.SIGINT, self._exit_server)
signal.signal(signal.SIGQUIT, self._exit_server)
def _exit_server(self, signal, frame):
raise SystemExit
def serve_forever (self):
"""Overide default behaviour to handle one request at a time until state is changed.
Serve content as long as the HTTP server is a 'start' state. When 'paused' or 'stopped',
requests will not be handled.
"""
while True:
if self._stop != True:
self.handle_request()
self._load += 1
class HandlerClass(BaseHTTPServer.BaseHTTPRequestHandler):
protocol_version = 'HTTP/1.1'
def log_message( self, format, *args ):
"""Ignore log messages."""
pass
def do_GET(self):
"""Handle incoming GET messages from clients.
Calculate hash value for content request. Check to see if this has already been cached.
If it has, a cache hit occurs. If the content is not present on the disk or has not
been cached previously, a cache miss occurs.
"""
key = hashlib.sha224(self.path).hexdigest()
if len(self.server._server._database.lookup({'key' : key})) == 1:
try:
self._cache_hit(key)
except (IOError, OSError) as e:
self.server._node.print_warn(TAG, ('Could not retrieve content from filesystem, cache miss\'ing instead: %s' % e))
self._cache_miss(key)
else:
self._cache_miss(key)
def do_POST(self):
"""Ignore POST messages."""
pass
def _cache_hit(self, key):
"""The content has been seen before, and should be sent to the client using the cached copy.
Statistics updated accordingly.
"""
path = self.server._server._database.lookup({'key' : key})[0]['path']
try:
self.server._node.print_debug(TAG, 'cache hit: %s%s' %(self.server._expr, self.path))
f = open(path, 'r')
local_object = f.read()
self._send_object(local_object)
f.close()
self.server._cache_hit += 1
self.server._cache_hit_size += sys.getsizeof(local_object)
except IOError:
raise
def _cache_miss(self, key):
"""The content has not been seen before, and needs to be retrieved before it can be
sent to the client.
Once the content has been delivered, the object can be stored on disk to serve future
cache requests. Statistics updated accordingly.
"""
self.server._node.print_debug(TAG, 'cache miss: %s%s' %(self.server._expr, self.path))
remote_object = self._fetch_and_send_object(self.server._expr)
if self._disk_check():
lookup = self.server._server._database.lookup({'key' : key})
if len(lookup) == 1:
object_path = lookup[0]['path']
else:
object_path = self.server._server_path + "/" + key
try:
f = open(object_path, 'w')
f.write(remote_object)
f.close()
self.server._server._database.create({'expr' : self.server._expr, 'key' : key, 'path' : object_path})
except (IOError, OSError) as e:
self.server._node.print_warn(TAG, ('Could not save content to filesystem: %s' % e))
else:
self.server._node.print_info(TAG, 'Cache instance has reached maximum disk usage and cannot store object: %s%s' %(self.server._expr, self.path))
self.server._cache_miss += 1
self.server._cache_miss_size += sys.getsizeof(remote_object)
def _disk_check(self):
"""Check if it possible to write a given object to disk.
If the current directory size is greater than the 'alert_disk' configuration setting, send an alert to the controller.
"""
dir_size = get_dir_size(self.server._server_path)
if int(dir_size) > int(self.server._node.config["alert_disk"]):
self.server._server._send_message_to_controller(self.server._server._get_alert('disk', dir_size))
if int(dir_size) > int(self.server._node.config["max_disk"]):
return False
return True
def _fetch_and_send_object(self, url):
"""Fetch the object from the original external location and deliver this to the client. """
connection = httplib.HTTPConnection(url)
connection.request("GET", self.path)
response = connection.getresponse()
length = int(response.getheader('content-length'))
self.send_response(200)
self.send_header('Content-type','text-html')
self.send_header('Content-length', length)
self.end_headers()
total_payload = ""
bytes_read = 0
while True:
try:
read_payload = response.read(1448)
except Exception as e:
self.server._node.print_error(TAG, 'Could not retrieve content from origin server: %s', e)
break
try:
self.wfile.write(read_payload)
except Exception as e:
self.server._node.print_error(TAG, 'Could not deliver fetched content to client: %s', e)
break
total_payload += read_payload
bytes_read += 1448
if bytes_read > length:
break
connection.close()
self.server._node.print_debug(TAG, 'cache fetched: %s%s at approx. %s bytes' %(url, self.path, bytes_read))
return total_payload
def _send_object(self, data):
"""Deliver the cached object to the client"""
self.send_response(200)
self.send_header('Content-type','text-html')
self.send_header('Content-length', len(data))
self.end_headers()
try:
self.wfile.write(data)
except Exception as e:
self.server._node.print_error(TAG, 'Could not deliver cached content to client: %s', e)
return
def get_dir_size(path):
"""Get size of files (actual, in bytes) for given path"""
total_size = 0
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size
|
eventrecorder.py
|
'''The MIT License (MIT)
Copyright (c) 2021, Demetrius Almada
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
from collections import deque
from queue import Queue
from threading import Thread
import numpy as np
import cv2
import time
class EventRecorder:
def __init__(self, buffer_size=64, timeout=1.0):
self.buffer_size = buffer_size
self.timeout = timeout
self.frame_buffer = deque(maxlen=buffer_size)
self.frame_queue = None
self.is_recording = None
self.thread = None
self.writer = None
self.frames_without_motion = 0
self.consecutive_frames = 0
self.frames_since_screenshot = np.inf
def start(self, output_path, video_codec, fps):
self.is_recording = True
self.frame_queue = Queue()
(height, width, _) = self.frame_buffer[0].shape
self.writer = cv2.VideoWriter(
output_path,
video_codec,
fps,
(height, width)
)
for i in range(len(self.frame_buffer), 0, -1):
self.frame_queue.put(self.frame_buffer[i - 1])
self.thread = Thread(target=self.record_video, args=())
self.thread.daemon = True
self.thread.start()
def update(self, frame):
'''
'''
self.frame_buffer.appendleft(frame)
if self.is_recording:
self.frame_queue.put(frame)
self.consecutive_frames += 1
def record_video(self):
while True:
if not self.is_recording:
return
if not self.frame_queue.empty():
frame = self.frame_queue.get()
self.writer.write(frame)
else:
time.sleep(self.timeout)
def take_screenshot(self, image, screenshot_path, delay=30):
if self.frames_since_screenshot >= delay:
cv2.imwrite(screenshot_path, image)
self.frames_since_screenshot = 0
self.frames_since_screenshot += 1
def stop(self):
self.is_recording = False
self.consecutive_frames = 0
self.thread.join()
while not self.frame_queue.empty():
frame = self.frame_queue.get()
self.writer.write(frame)
self.writer.release()
|
fuse.py
|
from __future__ import print_function
import os
import stat
import threading
import time
from errno import EIO, ENOENT
from fuse import FUSE, FuseOSError, Operations
class FUSEr(Operations):
def __init__(self, fs, path):
self.fs = fs
self.cache = {}
self.root = path.rstrip("/") + "/"
self.counter = 0
def getattr(self, path, fh=None):
path = "".join([self.root, path.lstrip("/")]).rstrip("/")
try:
info = self.fs.info(path)
except FileNotFoundError:
raise FuseOSError(ENOENT)
data = {"st_uid": 1000, "st_gid": 1000}
perm = 0o777
if info["type"] != "file":
data["st_mode"] = stat.S_IFDIR | perm
data["st_size"] = 0
data["st_blksize"] = 0
else:
data["st_mode"] = stat.S_IFREG | perm
data["st_size"] = info["size"]
data["st_blksize"] = 5 * 2 ** 20
data["st_nlink"] = 1
data["st_atime"] = time.time()
data["st_ctime"] = time.time()
data["st_mtime"] = time.time()
return data
def readdir(self, path, fh):
path = "".join([self.root, path.lstrip("/")])
files = self.fs.ls(path, False)
files = [os.path.basename(f.rstrip("/")) for f in files]
return [".", ".."] + files
def mkdir(self, path, mode):
path = "".join([self.root, path.lstrip("/")])
self.fs.mkdir(path)
return 0
def rmdir(self, path):
path = "".join([self.root, path.lstrip("/")])
self.fs.rmdir(path)
return 0
def read(self, path, size, offset, fh):
f = self.cache[fh]
f.seek(offset)
out = f.read(size)
return out
def write(self, path, data, offset, fh):
f = self.cache[fh]
f.write(data)
return len(data)
def create(self, path, flags, fi=None):
fn = "".join([self.root, path.lstrip("/")])
f = self.fs.open(fn, "wb")
self.cache[self.counter] = f
self.counter += 1
return self.counter - 1
def open(self, path, flags):
fn = "".join([self.root, path.lstrip("/")])
if flags % 2 == 0:
# read
mode = "rb"
else:
# write/create
mode = "wb"
self.cache[self.counter] = self.fs.open(fn, mode)
self.counter += 1
return self.counter - 1
def truncate(self, path, length, fh=None):
fn = "".join([self.root, path.lstrip("/")])
if length != 0:
raise NotImplementedError
# maybe should be no-op since open with write sets size to zero anyway
self.fs.touch(fn)
def unlink(self, path):
fn = "".join([self.root, path.lstrip("/")])
try:
self.fs.rm(fn, False)
except (IOError, FileNotFoundError):
raise FuseOSError(EIO)
def release(self, path, fh):
try:
if fh in self.cache:
f = self.cache[fh]
f.close()
self.cache.pop(fh)
except Exception as e:
print(e)
return 0
def chmod(self, path, mode):
raise NotImplementedError
def run(fs, path, mount_point, foreground=True, threads=False):
"""Mount stuff in a local directory
This uses fusepy to make it appear as if a given path on an fsspec
instance is in fact resident within the local file-system.
This requires that fusepy by installed, and that FUSE be available on
the system (typically requiring a package to be installed with
apt, yum, brew, etc.).
Parameters
----------
fs: file-system instance
From one of the compatible implementations
path: str
Location on that file-system to regard as the root directory to
mount. Note that you typically should include the terminating "/"
character.
mount_point: str
An empty directory on the local file-system where the contents of
the remote path will appear
foreground: bool
Whether or not calling this function will block. Operation will
typically be more stable if True.
threads: bool
Whether or not to create threads when responding to file operations
within the mounter directory. Operation will typically be more
stable if False.
"""
func = lambda: FUSE(
FUSEr(fs, path), mount_point, nothreads=not threads, foreground=foreground
)
if foreground is False:
th = threading.Thread(target=func)
th.daemon = True
th.start()
return th
else: # pragma: no cover
try:
func()
except KeyboardInterrupt:
pass
|
Analysis.py
|
"""
This module contains the ``analysis`` class.
It includes common classes for file management and messaging and all
calls to AEDT modules like the modeler, mesh, postprocessing, and setup.
"""
from __future__ import absolute_import # noreorder
import os
import shutil
import threading
import warnings
from collections import OrderedDict
from pyaedt.application.Design import Design
from pyaedt.application.JobManager import update_hpc_option
from pyaedt.generic.constants import AXIS
from pyaedt.generic.constants import CoordinateSystemAxis
from pyaedt.generic.constants import CoordinateSystemPlane
from pyaedt.generic.constants import GRAVITY
from pyaedt.generic.constants import GravityDirection
from pyaedt.generic.constants import PLANE
from pyaedt.generic.constants import Plane
from pyaedt.generic.constants import SETUPS
from pyaedt.generic.constants import SOLUTIONS
from pyaedt.generic.constants import VIEW
from pyaedt.generic.general_methods import filter_tuple
from pyaedt.generic.general_methods import generate_unique_name
from pyaedt.generic.general_methods import pyaedt_function_handler
from pyaedt.modules.Boundary import NativeComponentObject
from pyaedt.modules.DesignXPloration import OptimizationSetups
from pyaedt.modules.DesignXPloration import ParametricSetups
from pyaedt.modules.MaterialLib import Materials
from pyaedt.modules.SolveSetup import Setup
class Analysis(Design, object):
"""Contains all common analysis functions.
This class is inherited in the caller application and is accessible through it ( eg. ``hfss.method_name``).
It is automatically initialized by a call from an application, such as HFSS or Q3D.
See the application function for its parameter descriptions.
Parameters
----------
application : str
Application that is to initialize the call.
projectname : str
Name of the project to select or the full path to the project
or AEDTZ archive to open.
designname : str
Name of the design to select.
solution_type : str
Solution type to apply to the design.
setup_name : str
Name of the setup to use as the nominal.
specified_version : str
Version of AEDT to use.
NG : bool
Whether to run AEDT in the non-graphical mode.
new_desktop_session : bool
Whether to launch an instance of AEDT in a new thread, even if
another instance of the ``specified_version`` is active on the
machine.
close_on_exit : bool
Whether to release AEDT on exit.
student_version : bool
Whether to enable the student version of AEDT.
"""
def __init__(
self,
application,
projectname,
designname,
solution_type,
setup_name,
specified_version,
non_graphical,
new_desktop_session,
close_on_exit,
student_version,
):
self.setups = []
Design.__init__(
self,
application,
projectname,
designname,
solution_type,
specified_version,
non_graphical,
new_desktop_session,
close_on_exit,
student_version,
)
self.logger.info("Design Loaded")
self._setup = None
if setup_name:
self.analysis_setup = setup_name
self.solution_type = solution_type
self._materials = Materials(self)
self.logger.info("Materials Loaded")
self._available_variations = self.AvailableVariations(self)
if "HFSS 3D Layout Design" in self.design_type:
self._oanalysis = self._odesign.GetModule("SolveSetups")
elif "EMIT" in self.design_type or "Maxwell Circuit" in self.design_type:
self._oanalysis = None
elif "Circuit Design" in self.design_type or "Twin Builder" in self.design_type:
self._oanalysis = self._odesign.GetModule("SimSetup")
else:
self._oanalysis = self._odesign.GetModule("AnalysisSetup")
if self.design_type != "Maxwell Circuit":
self._ooptimetrics = self._odesign.GetModule("Optimetrics")
self._ooutput_variable = self._odesign.GetModule("OutputVariable")
self.setups = [self.get_setup(setup_name) for setup_name in self.setup_names]
self.parametrics = ParametricSetups(self)
self.optimizations = OptimizationSetups(self)
self.native_components = self._get_native_data()
self.SOLUTIONS = SOLUTIONS()
self.SETUPS = SETUPS()
self.AXIS = AXIS()
self.PLANE = PLANE()
self.VIEW = VIEW()
self.GRAVITY = GRAVITY()
@property
def ooptimetrics(self):
"""AEDT Optimetrics Module.
References
----------
>>> oDesign.GetModule("Optimetrics")
"""
return self._ooptimetrics
@property
def ooutput_variable(self):
"""AEDT Output Variable Module.
References
----------
>>> oDesign.GetModule("OutputVariable")
"""
return self._ooutput_variable
@property
def oanalysis(self):
"""Analysis AEDT Module.
References
----------
>>> oDesign.GetModule("SolveSetups")
>>> oDesign.GetModule("SimSetup")
>>> oDesign.GetModule("AnalysisSetup")
"""
return self._oanalysis
@property
def output_variables(self):
"""List of output variables.
Returns
-------
list of str
References
----------
>>> oModule.GetOutputVariables()
"""
return self.ooutput_variable.GetOutputVariables()
@property
def materials(self):
"""Materials in the project.
Returns
-------
:class:`pyaedt.modules.MaterialLib.Materials`
Materials in the project.
"""
return self._materials
@property
def Position(self):
"""Position of the object.
Returns
-------
type
Position object.
"""
return self.modeler.Position
@property
def available_variations(self):
"""Available variation object.
Returns
-------
:class:`pyaedt.application.Analysis.Analysis.AvailableVariations`
Available variation object.
"""
return self._available_variations
@property
def CoordinateSystemAxis(self):
"""Coordinate system axis constant.
.. deprecated:: 0.4.8
Use :attr:`AXIS` instead.
Returns
-------
:class:`pyaedt.modeler.constants.AXIS`
Coordinate system axis constants tuple (.X, .Y, .Z).
"""
return CoordinateSystemAxis()
@property
def CoordinateSystemPlane(self):
"""Coordinate system plane constants.
.. deprecated:: 0.4.8
Use :attr:`PLANE` instead.
Returns
-------
:class:`pyaedt.modeler.constants.PLANE`
Coordinate system plane constants tuple (.XY, .YZ, .XZ).
"""
return CoordinateSystemPlane()
@property
def View(self):
"""Planes.
.. deprecated:: 0.4.8
Use :attr:`VIEW` instead.
Returns
-------
:class:`pyaedt.modeler.constants.PLANE`
Coordinate system plane string tuple ("XY", "YZ", "XZ").
"""
return Plane()
@property
def GravityDirection(self):
"""Gravity direction.
.. deprecated:: 0.4.8
Use :attr:`GRAVITY` instead.
Returns
-------
tuple
Gravity direction tuple (XNeg, YNeg, ZNeg, XPos, YPos, ZPos).
"""
return GravityDirection()
@property
def modeler(self):
"""Modeler.
Returns
-------
:class:`pyaedt.modeler.Modeler.Modeler`
Modeler object.
"""
return self._modeler
@property
def mesh(self):
"""Mesh.
Returns
-------
:class:`pyaedt.modules.Mesh.Mesh`
Mesh object.
"""
return self._mesh
@property
def post(self):
"""PostProcessor.
Returns
-------
:class:`pyaedt.modules.AdvancedPostProcessing.PostProcessor`
PostProcessor object.
"""
return self._post
@property
def analysis_setup(self):
"""Analysis setup.
Returns
-------
str
Name of the active or first analysis setup.
References
----------
>>> oModule.GetAllSolutionSetups()
"""
if self._setup:
return self._setup
elif self.existing_analysis_setups:
return self.existing_analysis_setups[0]
else:
self._setup = None
return self._setup
@analysis_setup.setter
def analysis_setup(self, setup_name):
setup_list = self.existing_analysis_setups
if setup_list:
assert setup_name in setup_list, "Invalid setup name {}".format(setup_name)
self._setup = setup_name
else:
self._setup = setup_list[0]
@property
def existing_analysis_sweeps(self):
"""Existing analysis sweeps.
Returns
-------
list of str
List of all analysis sweeps in the design.
References
----------
>>> oModule.GelAllSolutionNames
>>> oModule.GetSweeps
"""
setup_list = self.existing_analysis_setups
sweep_list = []
if self.solution_type == "HFSS3DLayout" or self.solution_type == "HFSS 3D Layout Design":
sweep_list = self.oanalysis.GetAllSolutionNames()
sweep_list = [i for i in sweep_list if "Adaptive Pass" not in i]
sweep_list.reverse()
else:
for el in setup_list:
if self.solution_type == "HFSS3DLayout" or self.solution_type == "HFSS 3D Layout Design":
sweeps = self.oanalysis.GelAllSolutionNames()
else:
setuptype = self.design_solutions.default_adaptive
if setuptype:
sweep_list.append(el + " : " + setuptype)
else:
sweep_list.append(el)
try:
sweeps = list(self.oanalysis.GetSweeps(el))
except:
sweeps = []
for sw in sweeps:
if el + " : " + sw not in sweep_list:
sweep_list.append(el + " : " + sw)
return sweep_list
@property
def nominal_adaptive(self):
"""Nominal adaptive sweep.
Returns
-------
str
Name of the nominal adaptive sweep.
References
----------
>>> oModule.GelAllSolutionNames
>>> oModule.GetSweeps
"""
if len(self.existing_analysis_sweeps) > 0:
return self.existing_analysis_sweeps[0]
else:
return ""
@property
def nominal_sweep(self):
"""Nominal sweep.
Returns
-------
str
Name of the last adaptive sweep if a sweep is available or
the name of the nominal adaptive sweep if present.
References
----------
>>> oModule.GelAllSolutionNames
>>> oModule.GetSweeps
"""
if len(self.existing_analysis_sweeps) > 1:
return self.existing_analysis_sweeps[1]
else:
return self.nominal_adaptive
@property
def existing_analysis_setups(self):
"""Existing analysis setups.
Returns
-------
list of str
List of all analysis setups in the design.
References
----------
>>> oModule.GetSetups
"""
setups = list(self.oanalysis.GetSetups())
return setups
@property
def setup_names(self):
"""Setup names.
Returns
-------
list of str
List of names of all analysis setups in the design.
References
----------
>>> oModule.GetSetups
"""
return self.oanalysis.GetSetups()
@property
def SimulationSetupTypes(self):
"""Simulation setup types.
Returns
-------
SETUPS
List of all simulation setup types categorized by application.
"""
return SETUPS()
@property
def SolutionTypes(self):
"""Solution types.
Returns
-------
SOLUTIONS
List of all solution type categorized by application.
"""
return SOLUTIONS()
@property
def excitations(self):
"""Get all excitation names.
Returns
-------
list
List of excitation names. Excitations with multiple modes will return one
excitation for each mode.
References
----------
>>> oModule.GetExcitations
"""
try:
list_names = list(self.oboundary.GetExcitations())
del list_names[1::2]
return list_names
except:
return []
@pyaedt_function_handler()
def get_excitations_name(self):
"""Get all excitation names.
.. deprecated:: 0.4.27
Use :func:`excitations` property instead.
Returns
-------
list
List of excitation names. Excitations with multiple modes will return one
excitation for each mode.
References
----------
>>> oModule.GetExcitations
"""
warnings.warn("`get_excitations_name` is deprecated. Use `excitations` property instead.", DeprecationWarning)
return self.excitations
@pyaedt_function_handler()
def get_traces_for_plot(
self,
get_self_terms=True,
get_mutual_terms=True,
first_element_filter=None,
second_element_filter=None,
category="dB(S",
):
"""Retrieve a list of traces of specified designs ready to use in plot reports.
Parameters
----------
get_self_terms : bool, optional
Whether to return self terms. The default is ``True``.
get_mutual_terms : bool, optional
Whether to return mutual terms. The default is ``True``.
first_element_filter : str, optional
Filter to apply to the first element of the equation. This parameter accepts ``*``
and ``?`` as special characters. The default is ``None``.
second_element_filter : str, optional
Filter to apply to the second element of the equation. This parameter accepts ``*``
and ``?`` as special characters. The default is ``None``.
category : str
Plot category name as in the report (including operator). The default is ``"dB(S"`,
which is the plot category name for capacitance.
Returns
-------
list
List of traces of specified designs ready to use in plot reports.
Examples
--------
>>> from pyaedt import Q3d
>>> hfss = hfss(project_path)
>>> hfss.get_traces_for_plot(first_element_filter="Bo?1",
... second_element_filter="GND*", category="dB(S")
"""
if not first_element_filter:
first_element_filter = "*"
if not second_element_filter:
second_element_filter = "*"
list_output = []
end_str = ")" * (category.count("(") + 1)
if get_self_terms:
for el in self.excitations:
value = "{}({},{}{}".format(category, el, el, end_str)
if filter_tuple(value, first_element_filter, second_element_filter):
list_output.append(value)
if get_mutual_terms:
for el1 in self.excitations:
for el2 in self.excitations:
if el1 != el2:
value = "{}({},{}{}".format(category, el1, el2, end_str)
if filter_tuple(value, first_element_filter, second_element_filter):
list_output.append(value)
return list_output
@pyaedt_function_handler()
def analyze_all(self):
"""Analyze all setups in a design.
Returns
-------
bool
``True`` when simulation is finished.
"""
self.odesign.AnalyzeAll()
return True
@pyaedt_function_handler()
def list_of_variations(self, setup_name=None, sweep_name=None):
"""Retrieve a list of active variations for input setup.
Parameters
----------
setup_name : str, optional
Setup name. The default is ``None``, in which case the nominal adaptive
is used.
sweep_name : str, optional
Sweep name. The default is``None``, in which case the nominal adaptive
is used.
Returns
-------
list
List of active variations for input setup.
References
----------
>>> oModule.ListVariations
"""
if not setup_name and ":" in self.nominal_sweep:
setup_name = self.nominal_adaptive.split(":")[0].strip()
elif not setup_name:
self.logger.warning("No Setup defined.")
return False
if not sweep_name and ":" in self.nominal_sweep:
sweep_name = self.nominal_adaptive.split(":")[1].strip()
elif not sweep_name:
self.logger.warning("No Sweep defined.")
return False
if (
self.solution_type == "HFSS3DLayout"
or self.solution_type == "HFSS 3D Layout Design"
or self.design_type == "2D Extractor"
):
try:
return list(self.osolution.ListVariations("{0} : {1}".format(setup_name, sweep_name)))
except:
return [""]
else:
try:
return list(self.odesign.ListVariations("{0} : {1}".format(setup_name, sweep_name)))
except:
return [""]
@pyaedt_function_handler()
def export_results(self, analyze=False, export_folder=None):
"""Export all available reports to a file, including sNp, profile, and convergence.
Parameters
----------
analyze : bool
Whether to analyze before export. Solutions must be present for the design.
export_folder : str, optional
Full path to the project folder. The default is ``None``, in which case the
working directory is used.
Returns
-------
list
List of all exported files.
References
----------
>>> oModule.GetAllPortsList
>>> oDesign.ExportProfile
>>> oModule.ExportToFile
>>> oModule.ExportConvergence
>>> oModule.ExportNetworkData
"""
exported_files = []
if not export_folder:
export_folder = self.working_directory
if analyze:
self.analyze_all()
setups = self.oanalysis.GetSetups()
if self.solution_type == "HFSS3DLayout" or self.solution_type == "HFSS 3D Layout Design":
excitations = len(self.oexcitation.GetAllPortsList())
elif self.design_type == "2D Extractor":
excitations = self.oboundary.GetNumExcitations("SignalLine")
elif self.design_type == "Q3D Extractor":
excitations = self.oboundary.GetNumExcitations("Source")
else:
excitations = self.oboundary.GetNumExcitations()
reportnames = self.post.oreportsetup.GetAllReportNames()
for report_name in reportnames:
name_no_space = report_name.replace(" ", "_")
self.post.oreportsetup.UpdateReports([str(report_name)])
export_path = os.path.join(
export_folder, "{0}_{1}_{2}.csv".format(self.project_name, self.design_name, name_no_space)
)
try:
self.post.oreportsetup.ExportToFile(str(report_name), export_path)
self.logger.info("Export Data: {}".format(export_path))
except:
pass
exported_files.append(export_path)
for s in setups:
sweeps = self.oanalysis.GetSweeps(s)
if len(sweeps) == 0:
sweeps = ["LastAdaptive"]
else:
pass
for sweep in sweeps:
variation_array = self.list_of_variations(s, sweep)
if len(variation_array) == 1:
export_path = os.path.join(export_folder, "{}.prof".format(self.project_name))
result = self.export_profile(s, variation_array[0], export_path)
if result:
exported_files.append(export_path)
export_path = os.path.join(export_folder, "{}.conv".format(self.project_name))
result = self.export_convergence(s, variation_array[0], export_path)
if result:
exported_files.append(export_path)
if self.solution_type in ["HFSS3DLayout", "HFSS 3D Layout Design", "HFSS", "Circuit"]:
try:
export_path = os.path.join(
export_folder, "{0}.s{1}p".format(self.project_name, excitations)
)
self.osolution.ExportNetworkData(
variation_array[0],
["{0}:{1}".format(s, sweep)],
3,
export_path,
["All"],
True,
50,
"S",
-1,
0,
15,
True,
False,
False,
)
exported_files.append(export_path)
self.logger.info("Exported Touchstone: %s", export_path)
except:
self.logger.warning("Export SnP failed: no solutions found")
else:
varCount = 0
for variation in variation_array:
varCount += 1
export_path = os.path.join(export_folder, "{0}_{1}.prof".format(self.project_name, varCount))
result = self.export_profile(s, variation, export_path)
if result:
exported_files.append(export_path)
export_path = os.path.join(export_folder, "{0}_{1}.conv".format(self.project_name, varCount))
self.logger.info("Export Convergence: %s", export_path)
result = self.export_convergence(s, variation, export_path)
if result:
exported_files.append(export_path)
if self.solution_type in ["HFSS3DLayout", "HFSS 3D Layout Design", "HFSS", "Circuit"]:
try:
export_path = os.path.join(
export_folder, "{0}_{1}.s{2}p".format(self.project_name, varCount, excitations)
)
self.logger.info("Export SnP: {}".format(export_path))
self.osolution.ExportNetworkData(
variation,
["{0}:{1}".format(s, sweep)],
3,
export_path,
["All"],
True,
50,
"S",
-1,
0,
15,
True,
False,
False,
)
exported_files.append(export_path)
self.logger.info("Exported Touchstone: %s", export_path)
except:
self.logger.warning("Export SnP failed: no solutions found")
return exported_files
@pyaedt_function_handler()
def export_convergence(self, setup_name, variation_string="", file_path=None):
"""Export a solution convergence to a file.
Parameters
----------
setup_name : str
Setup name. For example, ``'Setup1'``.
variation_string : str
Variation string with values. For example, ``'radius=3mm'``.
file_path : str, optional
Full path to the PROF file. The default is ``None``, in which
case the working directory is used.
Returns
-------
str
File path if created.
References
----------
>>> oModule.ExportConvergence
"""
if not file_path:
file_path = os.path.join(self.working_directory, generate_unique_name("Convergence") + ".prop")
if not variation_string:
val_str = []
for el, val in self.available_variations.nominal_w_values_dict.items():
val_str.append("{}={}".format(el, val))
variation_string = ",".join(val_str)
if self.design_type == "2D Extractor":
for setup in self.setups:
if setup.name == setup_name:
if "CGDataBlock" in setup.props:
file_path = os.path.splitext(file_path)[0] + "CG" + os.path.splitext(file_path)[1]
self.odesign.ExportConvergence(setup_name, variation_string, "CG", file_path, True)
self.logger.info("Export Convergence to %s", file_path)
if "RLDataBlock" in setup.props:
file_path = os.path.splitext(file_path)[0] + "RL" + os.path.splitext(file_path)[1]
self.odesign.ExportConvergence(setup_name, variation_string, "RL", file_path, True)
self.logger.info("Export Convergence to %s", file_path)
break
elif self.design_type == "Q3D Extractor":
for setup in self.setups:
if setup.name == setup_name:
if "Cap" in setup.props:
file_path = os.path.splitext(file_path)[0] + "CG" + os.path.splitext(file_path)[1]
self.odesign.ExportConvergence(setup_name, variation_string, "CG", file_path, True)
self.logger.info("Export Convergence to %s", file_path)
if "AC" in setup.props:
file_path = os.path.splitext(file_path)[0] + "ACRL" + os.path.splitext(file_path)[1]
self.odesign.ExportConvergence(setup_name, variation_string, "AC RL", file_path, True)
self.logger.info("Export Convergence to %s", file_path)
if "DC" in setup.props:
file_path = os.path.splitext(file_path)[0] + "DC" + os.path.splitext(file_path)[1]
self.odesign.ExportConvergence(setup_name, variation_string, "DC RL", file_path, True)
self.logger.info("Export Convergence to %s", file_path)
break
else:
self.odesign.ExportConvergence(setup_name, variation_string, file_path)
self.logger.info("Export Convergence to %s", file_path)
return file_path
@pyaedt_function_handler()
def _get_native_data(self):
"""Retrieve Native Components data."""
boundaries = []
try:
data_vals = self.design_properties["ModelSetup"]["GeometryCore"]["GeometryOperations"][
"SubModelDefinitions"
]["NativeComponentDefinition"]
if not isinstance(data_vals, list) and isinstance(data_vals, (OrderedDict, dict)):
boundaries.append(
NativeComponentObject(
self,
data_vals["NativeComponentDefinitionProvider"]["Type"],
data_vals["BasicComponentInfo"]["ComponentName"],
data_vals,
)
)
for ds in data_vals:
try:
if isinstance(ds, (OrderedDict, dict)):
boundaries.append(
NativeComponentObject(
self,
ds["NativeComponentDefinitionProvider"]["Type"],
ds["BasicComponentInfo"]["ComponentName"],
ds,
)
)
except:
pass
except:
pass
return boundaries
class AvailableVariations(object):
def __init__(self, app):
"""Contains available variations.
Parameters
----------
app :
Inherited parent object.
Returns
-------
object
Parent object.
"""
self._app = app
@property
def variables(self):
"""Variables.
Returns
-------
list of str
List of names of independent variables.
"""
return [i for i in self._app.variable_manager.independent_variables]
@pyaedt_function_handler()
def variations(self, setup_sweep=None):
"""Variations.
Parameters
----------
setup_sweep : str, optional
Setup name with the sweep to search for variations on. The default is ``None``.
Returns
-------
list of lists
List of variation families.
References
----------
>>> oModule.GetAvailableVariations
"""
if not setup_sweep:
setup_sweep = self._app.existing_analysis_sweeps[0]
vs = self._app.osolution.GetAvailableVariations(setup_sweep)
families = []
for v in vs:
variations = v.split(" ")
family = []
for el in self.variables:
family.append(el + ":=")
i = 0
while i < len(variations):
if variations[i][0 : len(el)] == el:
family.append([variations[i][len(el) + 2 : -1]])
i += 1
families.append(family)
return families
@pyaedt_function_handler()
def get_variation_strings(self, setup_sweep=None):
"""Return variation strings.
Parameters
----------
setup_sweep : str, optional
Setup name with the sweep to search for variations on. The default is ``None``.
Returns
-------
list of str
List of variation families.
References
----------
>>> oModule.GetAvailableVariations
"""
if not setup_sweep:
setup_sweep = self._app.existing_analysis_sweeps[0]
return self._app.osolution.GetAvailableVariations(setup_sweep)
@property
def nominal(self):
"""Nominal."""
families = []
for el in self.variables:
families.append(el + ":=")
families.append(["Nominal"])
return families
@property
def nominal_w_values(self):
"""Nominal with values.
References
----------
>>> oDesign.GetChildObject('Variables').GetChildNames
>>> oDesign.GetVariables
>>> oDesign.GetVariableValue
>>> oDesign.GetNominalVariation"""
families = []
if self._app.design_type == "HFSS 3D Layout Design":
if self._app._is_object_oriented_enabled():
listvar = list(self._app._odesign.GetChildObject("Variables").GetChildNames())
else:
listvar = list(self._app._odesign.GetVariables())
for el in listvar:
families.append(el + ":=")
families.append([self._app._odesign.GetVariableValue(el)])
else:
variation = self._app._odesign.GetNominalVariation()
for el in self.variables:
families.append(el + ":=")
families.append([self._app._odesign.GetVariationVariableValue(variation, el)])
return families
@property
def nominal_w_values_dict(self):
"""Nominal with values in a dictionary.
References
----------
>>> oDesign.GetChildObject('Variables').GetChildNames
>>> oDesign.GetVariables
>>> oDesign.GetVariableValue
>>> oDesign.GetNominalVariation"""
families = {}
if self._app.design_type in ["HFSS 3D Layout Design", "Circuit Design", "Twin Builder"]:
if self._app._is_object_oriented_enabled():
listvar = list(self._app._odesign.GetChildObject("Variables").GetChildNames())
else:
listvar = list(self._app._odesign.GetVariables())
for el in listvar:
families[el] = self._app._odesign.GetVariableValue(el)
else:
variation = self._app._odesign.GetNominalVariation()
for el in self.variables:
families[el] = self._app._odesign.GetVariationVariableValue(variation, el)
return families
@property
def all(self):
"""All."""
families = []
for el in self.variables:
families.append(el + ":=")
families.append(["All"])
return families
class AxisDir(object):
"""Contains constants for the axis directions."""
(XNeg, YNeg, ZNeg, XPos, YPos, ZPos) = range(0, 6)
@pyaedt_function_handler()
def get_setups(self):
"""Retrieve setups.
Returns
-------
list of str
List of names of all setups.
References
----------
>>> oModule.GetSetups
"""
setups = self.oanalysis.GetSetups()
return list(setups)
@pyaedt_function_handler()
def get_nominal_variation(self):
"""Retrieve the nominal variation.
Returns
-------
list of str
List of nominal variations.
"""
return self.available_variations.nominal
@pyaedt_function_handler()
def get_sweeps(self, name):
"""Retrieve all sweeps for a setup.
Parameters
----------
name : str
Name of the setup.
Returns
-------
list of str
List of names of all sweeps for the setup.
References
----------
>>> oModule.GetSweeps
"""
sweeps = self.oanalysis.GetSweeps(name)
return list(sweeps)
@pyaedt_function_handler()
def export_parametric_results(self, sweepname, filename, exportunits=True):
"""Export a list of all parametric variations solved for a sweep to a CSV file.
Parameters
----------
sweepname : str
Name of the optimetrics sweep.
filename : str
Full path and name for the CSV file.
exportunits : bool, optional
Whether to export units with the value. The default is ``True``. When ``False``,
only the value is exported.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oModule.ExportParametricResults
"""
self.ooptimetrics.ExportParametricResults(sweepname, filename, exportunits)
return True
@pyaedt_function_handler()
def analyze_from_initial_mesh(self):
"""Revert the solution to the initial mesh and re-run the solve.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oModule.RevertSetupToInitial
>>> oDesign.Analyze
"""
self.oanalysis.RevertSetupToInitial(self._setup)
self.analyze_nominal()
return True
@pyaedt_function_handler()
def analyse_nominal(self):
"""Solve the nominal design.
.. deprecated:: 0.4.0
Use :func:`Analysis.analyze_nominal` instead.
"""
warnings.warn("`analyse_nominal` is deprecated. Use `analyze_nominal` instead.", DeprecationWarning)
self.analyze_nominal()
@pyaedt_function_handler()
def analyze_nominal(self, num_cores=None, num_tasks=None, num_gpu=None, acf_file=None):
"""Solve the nominal design.
Parameters
----------
num_cores : int, optional
Number of simulation cores.
num_tasks : int, optional
Number of simulation tasks.
num_gpu : int, optional
Number of simulation graphic processing units to use.
acf_file : str, optional
Full path to the custom ACF file.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oDesign.Analyze
"""
return self.analyze_setup(self.analysis_setup, num_cores, num_tasks, num_gpu, acf_file)
@pyaedt_function_handler()
def generate_unique_setup_name(self, setup_name=None):
"""Generate a new setup with an unique name.
Parameters
----------
setup_name : str, optional
Name of the setup. The default is ``None``.
Returns
-------
str
Name of the setup.
"""
if not setup_name:
setup_name = "Setup"
index = 2
while setup_name in self.existing_analysis_setups:
setup_name = setup_name + "_{}".format(index)
index += 1
return setup_name
@pyaedt_function_handler()
def create_setup(self, setupname="MySetupAuto", setuptype=None, props=None):
"""Create a setup.
Parameters
----------
setupname : str, optional
Name of the setup. The default is ``"MySetupAuto"``.
setuptype : optional
Type of the setup. The default is ``None``, in which case
the default type is applied.
props : dict, optional
Dictionary of analysis properties appropriate for the design and analysis.
If no values are passed, default values are used.
Returns
-------
:class:`pyaedt.modules.SolveSetup.Setup`
References
----------
>>> oModule.InsertSetup
Examples
--------
Create a setup for SBR+ setup using advanced Doppler
processing for automotive radar.
>>> import pyaedt
>>> hfss = pyaedt.Hfss(solution_type='SBR+')
>>> setup1 = hfss.create_setup(setupname='Setup1')
>>> setup1.props["IsSbrRangeDoppler"] = True
>>> setup1.props["SbrRangeDopplerTimeVariable"] = "time_var"
>>> setup1.props["SbrRangeDopplerCenterFreq"] = "76.5GHz"
>>> setup1.props["SbrRangeDopplerRangeResolution"] = "0.15meter"
>>> setup1.props["SbrRangeDopplerRangePeriod"] = "100meter"
>>> setup1.props["SbrRangeDopplerVelocityResolution"] = "0.2m_per_sec"
>>> setup1.props["SbrRangeDopplerVelocityMin"] = "-30m_per_sec"
>>> setup1.props["SbrRangeDopplerVelocityMax"] = "30m_per_sec"
>>> setup1.props["DopplerRayDensityPerWavelength"] = "0.2"
>>> setup1.props["MaxNumberOfBounces"] = "3"
>>> setup1.update()
...
pyaedt info: Sweep was created correctly.
"""
if props == None:
props = {}
if setuptype is None:
setuptype = self.design_solutions.default_setup
name = self.generate_unique_setup_name(setupname)
setup = Setup(self, setuptype, name)
if self.design_type == "HFSS" and not self.excitations and "MaxDeltaS" in setup.props:
new_dict = OrderedDict()
for k, v in setup.props.items():
if k == "MaxDeltaS":
new_dict["MaxDeltaE"] = 0.01
else:
new_dict[k] = v
setup.props = new_dict
setup.create()
if props:
for el in props:
setup.props[el] = props[el]
setup.update()
self.analysis_setup = name
self.setups.append(setup)
return setup
@pyaedt_function_handler()
def delete_setup(self, setupname):
"""Delete a setup.
Parameters
----------
setupname : str
Name of the setup.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oModule.DeleteSetups
Examples
--------
Create a setup and then delete it.
>>> import pyaedt
>>> hfss = pyaedt.Hfss()
>>> setup1 = hfss.create_setup(setupname='Setup1')
>>> hfss.delete_setup(setupname='Setup1')
...
pyaedt info: Sweep was deleted correctly.
"""
if setupname in self.existing_analysis_setups:
self.oanalysis.DeleteSetups([setupname])
for s in self.setups:
if s.name == setupname:
self.setups.remove(s)
return True
return False
@pyaedt_function_handler()
def edit_setup(self, setupname, properties_dict):
"""Modify a setup.
Parameters
----------
setupname : str
Name of the setup.
properties_dict : dict
Dictionary containing the property to update with the value.
Returns
-------
:class:`pyaedt.modules.SolveSetup.Setup`
References
----------
>>> oModule.EditSetup
"""
setuptype = self.design_solutions.default_setup
setup = Setup(self, setuptype, setupname, isnewsetup=False)
setup.update(properties_dict)
self.analysis_setup = setupname
return setup
@pyaedt_function_handler()
def get_setup(self, setupname):
"""Get the setup from the current design.
Parameters
----------
setupname : str
Name of the setup.
Returns
-------
:class:`pyaedt.modules.SolveSetup.Setup`
"""
setuptype = self.design_solutions.default_setup
setup = Setup(self, setuptype, setupname, isnewsetup=False)
if setup.props:
self.analysis_setup = setupname
return setup
@pyaedt_function_handler()
def create_output_variable(self, variable, expression):
"""Create or modify an output variable.
Parameters
----------
variable : str
Name of the variable.
expression :
Value for the variable.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oModule.CreateOutputVariable
"""
oModule = self.ooutput_variable
if variable in self.output_variables:
oModule.EditOutputVariable(
variable, expression, variable, self.existing_analysis_sweeps[0], self.solution_type, []
)
else:
oModule.CreateOutputVariable(variable, expression, self.existing_analysis_sweeps[0], self.solution_type, [])
return True
@pyaedt_function_handler()
def get_output_variable(self, variable):
"""Retrieve the value of the output variable.
Parameters
----------
variable : str
Name of the variable.
Returns
-------
type
Value of the output variable.
References
----------
>>> oDesign.GetNominalVariation
>>> oModule.GetOutputVariableValue
"""
assert variable in self.output_variables, "Output variable {} does not exist.".format(variable)
nominal_variation = self.odesign.GetNominalVariation()
value = self.ooutput_variable.GetOutputVariableValue(
variable, nominal_variation, self.existing_analysis_sweeps[0], self.solution_type, []
)
return value
@pyaedt_function_handler()
def get_object_material_properties(self, object_list=None, prop_names=None):
"""Retrieve the material properties for a list of objects and return them in a dictionary.
This high-level function ignores objects with no defined material properties.
Parameters
----------
object_list : list, optional
List of objects to get material properties for. The default is ``None``,
in which case material properties are retrieved for all objects.
prop_names : str or list
Property or list of properties to export. The default is ``None``, in
which case all properties are exported.
Returns
-------
dict
Dictionary of objects with material properties.
"""
if object_list:
if not isinstance(object_list, list):
object_list = [object_list]
else:
object_list = self.modeler.object_names
if prop_names:
if not isinstance(prop_names, list):
prop_names = [prop_names]
dict = {}
for entry in object_list:
mat_name = self.modeler[entry].material_name
mat_props = self._materials[mat_name]
if prop_names is None:
dict[entry] = mat_props._props
else:
dict[entry] = {}
for prop_name in prop_names:
dict[entry][prop_name] = mat_props._props[prop_name]
return dict
@pyaedt_function_handler()
def analyze_setup(self, name, num_cores=None, num_tasks=None, num_gpu=None, acf_file=None):
"""Analyze a design setup.
Parameters
----------
name : str
Name of the setup, which can be an optimetric setup or a simple setup.
num_cores : int, optional
Number of simulation cores. The default is ``None.``
num_tasks : int, optional
Number of simulation tasks. The default is ``None.``
num_gpu : int, optional
Number of simulation graphics processing units. The default is ``None.``
acf_file : str, optional
Full path to custom ACF file. The default is ``None.``
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oDesign.Analyze
"""
set_custom_dso = False
active_config = self._desktop.GetRegistryString(r"Desktop/ActiveDSOConfigurations/" + self.design_type)
if acf_file:
self._desktop.SetRegistryFromFile(acf_file)
name = ""
with open(acf_file, "r") as f:
lines = f.readlines()
for line in lines:
if "ConfigName" in line:
name = line.strip().split("=")[1]
break
if name:
try:
self.set_registry_key(r"Desktop/ActiveDSOConfigurations/" + self.design_type, name)
set_custom_dso = True
except:
pass
elif num_gpu or num_tasks or num_cores:
config_name = "pyaedt_config"
source_name = os.path.join(self.pyaedt_dir, "misc", "pyaedt_local_config.acf")
target_name = os.path.join(self.working_directory, config_name + ".acf")
shutil.copy2(source_name, target_name)
if num_cores:
update_hpc_option(target_name, "NumCores", num_cores, False)
if num_gpu:
update_hpc_option(target_name, "NumGPUs", num_gpu, False)
if num_tasks:
update_hpc_option(target_name, "NumEngines", num_tasks, False)
update_hpc_option(target_name, "ConfigName", config_name, True)
update_hpc_option(target_name, "DesignType", self.design_type, True)
if self.design_type == "Icepak":
update_hpc_option(target_name, "UseAutoSettings", self.design_type, False)
try:
self._desktop.SetRegistryFromFile(target_name)
self.set_registry_key(r"Desktop/ActiveDSOConfigurations/" + self.design_type, config_name)
set_custom_dso = True
except:
pass
if name in self.existing_analysis_setups:
try:
self.logger.info("Solving design setup %s", name)
self.odesign.Analyze(name)
except:
if set_custom_dso:
self.set_registry_key(r"Desktop/ActiveDSOConfigurations/" + self.design_type, active_config)
self.logger.error("Error in Solving Setup %s", name)
return False
else:
try:
self.logger.info("Solving Optimetrics")
self.ooptimetrics.SolveSetup(name)
except:
if set_custom_dso:
self.set_registry_key(r"Desktop/ActiveDSOConfigurations/" + self.design_type, active_config)
self.logger.error("Error in Solving or Missing Setup %s", name)
return False
if set_custom_dso:
self.set_registry_key(r"Desktop/ActiveDSOConfigurations/" + self.design_type, active_config)
self.logger.info("Design setup %s solved correctly", name)
return True
@pyaedt_function_handler()
def solve_in_batch(self, filename=None, machine="local", run_in_thread=False):
"""Analyze a design setup in batch mode.
.. note::
To use this function, the project must be closed.
Parameters
----------
filename : str, optional
Name of the setup. The default is ``None``, which means that the active project
is to be solved.
machine : str, optional
Name of the machine if remote. The default is ``"local"``.
run_in_thread : bool, optional
Whether to submit the batch command as a thread. The default is
``False``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
if not filename:
filename = self.project_file
self.close_project()
if machine == "local":
# -Monitor option used as workaround for R2 BatchSolve not exiting properly at the end of the Batch job
options = " -ng -BatchSolve -Monitor "
else:
options = " -ng -distribute -machinelist list=" + machine + " -Batchsolve "
self.logger.info("Batch Solve Options: " + options)
if os.name == "posix":
batch_run = os.path.join(
self.desktop_install_dir + "/ansysedt" + chr(34) + options + chr(34) + filename + chr(34)
)
else:
batch_run = (
chr(34) + self.desktop_install_dir + "/ansysedt.exe" + chr(34) + options + chr(34) + filename + chr(34)
)
"""
check for existing solution directory and delete if present so we
dont have old .asol files etc
"""
self.logger.info("Solving model in batch mode on " + machine)
self.logger.info("Batch Job command:" + batch_run)
if run_in_thread:
def thread_run():
""" """
os.system(batch_run)
x = threading.Thread(target=thread_run)
x.start()
else:
os.system(batch_run)
self.logger.info("Batch job finished.")
return True
@pyaedt_function_handler()
def submit_job(
self, clustername, aedt_full_exe_path=None, numnodes=1, numcores=32, wait_for_license=True, setting_file=None
):
"""Submit a job to be solved on a cluster.
Parameters
----------
clustername : str
Name of the cluster to submit the job to.
aedt_full_exe_path : str, optional
Full path to the AEDT executable file. The default is ``None``, in which
case ``"/clustername/AnsysEM/AnsysEM2x.x/Win64/ansysedt.exe"`` is used.
numnodes : int, optional
Number of nodes. The default is ``1``.
numcores : int, optional
Number of cores. The default is ``32``.
wait_for_license : bool, optional
Whether to wait for the license to be validated. The default is ``True``.
setting_file : str, optional
Name of the file to use as a template. The default value is ``None``.
Returns
-------
type
ID of the job.
References
----------
>>> oDesktop.SubmitJob
"""
project_file = self.project_file
project_path = self.project_path
if not aedt_full_exe_path:
version = self.odesktop.GetVersion()[2:6]
if os.path.exists(r"\\" + clustername + r"\AnsysEM\AnsysEM{}\Win64\ansysedt.exe".format(version)):
aedt_full_exe_path = (
r"\\\\\\\\" + clustername + r"\\\\AnsysEM\\\\AnsysEM{}\\\\Win64\\\\ansysedt.exe".format(version)
)
elif os.path.exists(r"\\" + clustername + r"\AnsysEM\AnsysEM{}\Linux64\ansysedt".format(version)):
aedt_full_exe_path = (
r"\\\\\\\\" + clustername + r"\\\\AnsysEM\\\\AnsysEM{}\\\\Linux64\\\\ansysedt".format(version)
)
else:
self.logger.error("AEDT path does not exist. Please provide a full path.")
return False
else:
if not os.path.exists(aedt_full_exe_path):
self.logger.error("Aedt Path doesn't exists. Please provide a full path")
return False
aedt_full_exe_path.replace("\\", "\\\\")
self.close_project()
path_file = os.path.dirname(__file__)
destination_reg = os.path.join(project_path, "Job_settings.areg")
if not setting_file:
setting_file = os.path.join(path_file, "..", "misc", "Job_Settings.areg")
shutil.copy(setting_file, destination_reg)
f1 = open(destination_reg, "w")
with open(setting_file) as f:
lines = f.readlines()
for line in lines:
if "\\ $begin" == line[:8]:
lin = "\\ $begin \\'{}\\'\\\n".format(clustername)
f1.write(lin)
elif "\\ $end" == line[:6]:
lin = "\\ $end \\'{}\\'\\\n".format(clustername)
f1.write(lin)
elif "NumCores" in line:
lin = "\\ \\ \\ \\ NumCores={}\\\n".format(numcores)
f1.write(lin)
elif "NumNodes=1" in line:
lin = "\\ \\ \\ \\ NumNodes={}\\\n".format(numnodes)
f1.write(lin)
elif "ProductPath" in line:
lin = "\\ \\ ProductPath =\\'{}\\'\\\n".format(aedt_full_exe_path)
f1.write(lin)
elif "WaitForLicense" in line:
lin = "\\ \\ WaitForLicense={}\\\n".format(str(wait_for_license).lower())
f1.write(lin)
else:
f1.write(line)
f1.close()
return self.odesktop.SubmitJob(os.path.join(project_path, "Job_settings.areg"), project_file)
|
PH EC NEW GUI AJE.py
|
import RPi.GPIO as GPIO
import tkinter as tk
import time
import threading
from AtlasOEM_PH import AtlasOEM_PH
from AtlasOEM_EC import AtlasOEM_EC
import time
from ATLAS_OEM_Calibration4 import main
from tkinter import messagebox
global fullscreen
from PIL import ImageTk,Image
from tkinter import *
#c = Canvas(top, bg="blue", height=250, width=300)
# Return to windowed mode
# Automatically resize font size based on window siz
def read_sensor():
PH = AtlasOEM_PH(name = "PH") # create an OEM PH object
EC = AtlasOEM_EC(name = "EC") # create an OEM EC object
#DO = AtlasOEM_DO(name = "DO") # create an OEM DO object
PH.write_active_hibernate(1) # tell the circuits to start taking readings
EC.write_active_hibernate(1)
#DO.write_active_hibernate(1)
def get_OEM_reading(OEM_circuit, readfunction): # creates a closure to take readings for each circuit
reading = [1] # we use a list to approximate a static variable to cache previous readings
def OEM_reading_closure(): # make a custom function to do the readings
if OEM_circuit.read_new_reading_available(): # if we have a new reading
reading[0] = readfunction() # get it from the circuit
#print("OEM " + OEM_circuit.get_name() + \
# " reading: " + str(reading)) # print the reading
OEM_circuit.write_new_reading_available(0) # then clear the new reading register
# so the circuit can set the register
# high again when it acquires a new reading
return reading[0] # return the value in the list
return OEM_reading_closure # return the custom function without calling it, so we can call it when we want readings
def get_all_EC_values(): # we can gt all 3 EC values by returning them in a list
EC_val = EC.read_EC_reading()
#TDS_val = EC.read_TDS_reading()
#sal_val = EC.read_salinitiy_reading()
return EC_val #,TDS_val, sal_val]
read_pH = get_OEM_reading(PH, PH.read_PH_reading) #assign the closures so we can call them to get readings
read_EC = get_OEM_reading(EC, get_all_EC_values)
#read_DO = get_OEM_reading(DO, DO.read_DO_reading)
time.sleep(.5) # give circuits time to get the initial readings
while True:
ec_val = read_EC() #take readings from the closures
ph_val = read_pH()
#do_val = read_DO()
var.set(f'PH : {ph_val : }')
var1.set(f'EC : {ec_val : }')
print("EC: " + str(ec_val)#", TDS: " + str(ec_val[1]) # print the readings
+ "\t PH: " + str(ph_val))
# wait 1 second to get more readings
time.sleep(.5)
# create the thread
task = threading.Thread(target=read_sensor, daemon=True)
task1 = threading.Thread(target=main, daemon=True)
#task1 = threading.Thread(target=read_sensor, daemon=True)
root = tk.Tk()
root.title("PH EC Controller")
var = tk.StringVar()
var1 = tk.StringVar()
lbl = tk.Label(root, textvariable=var, width=40, height=5, font=('Consolas', 24, 'bold'))
lbl.pack()
lbl1 = tk.Label(root, textvariable=var1, width=40, height=5, font=('Consolas', 24, 'bold'))
lbl1.pack()
task.start()
#root.attributes('-fullscreen',True)
root.mainloop()
|
inference_worker.py
|
import collections
import logging
import threading
import time
from typing import Dict
import torch
from torch import nn
from torch.distributed import rpc
from hearthstone.training.pytorch.worker.distributed.tensorize_batch import _tensorize_batch, _untensorize_batch
logger = logging.getLogger(__name__)
class InferenceWorker:
def __init__(self, max_batch_size: int, num_inference_threads: int, device):
self.id = rpc.get_worker_info().id
self.max_batch_size = max_batch_size
self.num_inference_threads = num_inference_threads
self.device = device
self.nets: Dict[str, nn.Module] = {}
self.queued_tasks_by_name = collections.defaultdict(list)
self.inference_example_count = 0
self.inference_count = 0
# These are the only variables accessed from multiple threads.
self.communication_queue = collections.deque()
self.communication_event = threading.Event()
self.done_event = threading.Event()
self.inference_thread_lock = threading.Lock()
def set_nets(self, nets: Dict[str, nn.Module]):
self.nets = nets
for name, net in nets.items():
net.to(self.device)
@rpc.functions.async_execution
def infer(self, net_name: str, args):
future = rpc.Future()
self.communication_queue.append((net_name, future, args))
self.communication_event.set()
return future
def _unload_communication_queue(self):
logger.debug("unloading queue size {}".format(len(self.communication_queue)))
while self.communication_queue:
net_name, future, args = self.communication_queue.popleft()
self.queued_tasks_by_name[net_name].append((future, args))
logger.debug("queued task size {} {}".format(len(self.queued_tasks_by_name),
sum([len(v) for k, v in self.queued_tasks_by_name.items()])))
def _worker_thread(self):
while True:
with self.inference_thread_lock:
self.communication_event.clear()
self._unload_communication_queue()
# Select the longest queue
if self.queued_tasks_by_name:
net_name, _ = max(self.queued_tasks_by_name.items(),
key=lambda kv: len(kv[1]))
tasks = self.queued_tasks_by_name.pop(net_name)
# Remove the first batch worth from the net specific queue
length = min(len(tasks), self.max_batch_size)
batched_tasks = [tasks.pop() for _ in range(length)]
self.queued_tasks_by_name[net_name] += tasks
else:
length = 0
if length:
# Run inference on batched tensor
batch_args = [args for _, args in batched_tasks]
t = time.time()
state_batch, valid_actions_batch, chosen_actions_batch = _tensorize_batch(batch_args
, self.device)
self.inference_count += 1
self.inference_example_count += state_batch[0].shape[0]
logger.debug("Inference #{}: {} requests, {} total batch size, {} average batch size".format(
self.inference_count, len(batched_tasks),
state_batch[0].shape[0],
float(self.inference_example_count) / self.inference_count))
net = self.nets[net_name]
output_actions, action_log_probs, value, debug_info = net(state_batch, valid_actions_batch,
chosen_actions_batch)
for (future, _), unbatched in zip(
batched_tasks,
_untensorize_batch(batch_args, output_actions, action_log_probs, value, debug_info,
torch.device('cpu'))):
future.set_result(unbatched)
logger.debug(f"Time taken is {time.time() - t}")
self.communication_event.wait(1)
if self.done_event.is_set():
return
def start_worker_thread(self):
for _ in range(self.num_inference_threads):
inference_thread = threading.Thread(target=self._worker_thread)
inference_thread.start()
def kill_worker_thread(self):
self.done_event.set()
|
test_xmlrpc.py
|
import base64
import datetime
import decimal
import sys
import time
import unittest
from unittest import mock
import xmlrpc.client as xmlrpclib
import xmlrpc.server
import http.client
import http, http.server
import socket
import re
import io
import contextlib
from test import support
try:
import gzip
except ImportError:
gzip = None
try:
import threading
except ImportError:
threading = None
alist = [{'astring': 'foo@bar.baz.spam', 'afloat': 7283.43, 'anint': 2 **
20, 'ashortlong': 2, 'anotherlist': ['.zyx.41'], 'abase64': xmlrpclib.
Binary(b'my dog has fleas'), 'b64bytes': b'my dog has fleas',
'b64bytearray': bytearray(b'my dog has fleas'), 'boolean': False,
'unicode': '䀀怀耀', 'ukey䀀': 'regular value', 'datetime1': xmlrpclib.
DateTime('20050210T11:41:23'), 'datetime2': xmlrpclib.DateTime((2005, 2,
10, 11, 41, 23, 0, 1, -1)), 'datetime3': xmlrpclib.DateTime(datetime.
datetime(2005, 2, 10, 11, 41, 23))}]
class XMLRPCTestCase(unittest.TestCase):
def test_dump_load(self):
dump = xmlrpclib.dumps((alist,))
load = xmlrpclib.loads(dump)
self.assertEqual(alist, load[0][0])
def test_dump_bare_datetime(self):
dt = datetime.datetime(2005, 2, 10, 11, 41, 23)
self.assertEqual(dt, xmlrpclib.DateTime('20050210T11:41:23'))
s = xmlrpclib.dumps((dt,))
result, m = xmlrpclib.loads(s, use_builtin_types=True)
newdt, = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), datetime.datetime)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_builtin_types=False)
newdt, = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), xmlrpclib.DateTime)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_datetime=True)
newdt, = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), datetime.datetime)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_datetime=False)
newdt, = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), xmlrpclib.DateTime)
self.assertIsNone(m)
def test_datetime_before_1900(self):
dt = datetime.datetime(1, 2, 10, 11, 41, 23)
self.assertEqual(dt, xmlrpclib.DateTime('00010210T11:41:23'))
s = xmlrpclib.dumps((dt,))
result, m = xmlrpclib.loads(s, use_builtin_types=True)
newdt, = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), datetime.datetime)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_builtin_types=False)
newdt, = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), xmlrpclib.DateTime)
self.assertIsNone(m)
def test_bug_1164912(self):
d = xmlrpclib.DateTime()
(new_d,), dummy = xmlrpclib.loads(xmlrpclib.dumps((d,),
methodresponse=True))
self.assertIsInstance(new_d.value, str)
s = xmlrpclib.dumps((new_d,), methodresponse=True)
self.assertIsInstance(s, str)
def test_newstyle_class(self):
class T(object):
pass
t = T()
t.x = 100
t.y = 'Hello'
(t2,), dummy = xmlrpclib.loads(xmlrpclib.dumps((t,)))
self.assertEqual(t2, t.__dict__)
def test_dump_big_long(self):
self.assertRaises(OverflowError, xmlrpclib.dumps, (2 ** 99,))
def test_dump_bad_dict(self):
self.assertRaises(TypeError, xmlrpclib.dumps, ({(1, 2, 3): 1},))
def test_dump_recursive_seq(self):
l = [1, 2, 3]
t = [3, 4, 5, l]
l.append(t)
self.assertRaises(TypeError, xmlrpclib.dumps, (l,))
def test_dump_recursive_dict(self):
d = {'1': 1, '2': 1}
t = {'3': 3, 'd': d}
d['t'] = t
self.assertRaises(TypeError, xmlrpclib.dumps, (d,))
def test_dump_big_int(self):
if sys.maxsize > 2 ** 31 - 1:
self.assertRaises(OverflowError, xmlrpclib.dumps, (int(2 ** 34),))
xmlrpclib.dumps((xmlrpclib.MAXINT, xmlrpclib.MININT))
self.assertRaises(OverflowError, xmlrpclib.dumps, (xmlrpclib.MAXINT +
1,))
self.assertRaises(OverflowError, xmlrpclib.dumps, (xmlrpclib.MININT -
1,))
def dummy_write(s):
pass
m = xmlrpclib.Marshaller()
m.dump_int(xmlrpclib.MAXINT, dummy_write)
m.dump_int(xmlrpclib.MININT, dummy_write)
self.assertRaises(OverflowError, m.dump_int, xmlrpclib.MAXINT + 1,
dummy_write)
self.assertRaises(OverflowError, m.dump_int, xmlrpclib.MININT - 1,
dummy_write)
def test_dump_double(self):
xmlrpclib.dumps((float(2 ** 34),))
xmlrpclib.dumps((float(xmlrpclib.MAXINT), float(xmlrpclib.MININT)))
xmlrpclib.dumps((float(xmlrpclib.MAXINT + 42), float(xmlrpclib.
MININT - 42)))
def dummy_write(s):
pass
m = xmlrpclib.Marshaller()
m.dump_double(xmlrpclib.MAXINT, dummy_write)
m.dump_double(xmlrpclib.MININT, dummy_write)
m.dump_double(xmlrpclib.MAXINT + 42, dummy_write)
m.dump_double(xmlrpclib.MININT - 42, dummy_write)
def test_dump_none(self):
value = alist + [None]
arg1 = alist + [None],
strg = xmlrpclib.dumps(arg1, allow_none=True)
self.assertEqual(value, xmlrpclib.loads(strg)[0][0])
self.assertRaises(TypeError, xmlrpclib.dumps, (arg1,))
def test_dump_encoding(self):
value = {'key€¤': 'value€¤'}
strg = xmlrpclib.dumps((value,), encoding='iso-8859-15')
strg = "<?xml version='1.0' encoding='iso-8859-15'?>" + strg
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
strg = strg.encode('iso-8859-15', 'xmlcharrefreplace')
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
strg = xmlrpclib.dumps((value,), encoding='iso-8859-15',
methodresponse=True)
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
strg = strg.encode('iso-8859-15', 'xmlcharrefreplace')
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
methodname = 'method€¤'
strg = xmlrpclib.dumps((value,), encoding='iso-8859-15', methodname
=methodname)
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
self.assertEqual(xmlrpclib.loads(strg)[1], methodname)
def test_dump_bytes(self):
sample = b'my dog has fleas'
self.assertEqual(sample, xmlrpclib.Binary(sample))
for type_ in (bytes, bytearray, xmlrpclib.Binary):
value = type_(sample)
s = xmlrpclib.dumps((value,))
result, m = xmlrpclib.loads(s, use_builtin_types=True)
newvalue, = result
self.assertEqual(newvalue, sample)
self.assertIs(type(newvalue), bytes)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_builtin_types=False)
newvalue, = result
self.assertEqual(newvalue, sample)
self.assertIs(type(newvalue), xmlrpclib.Binary)
self.assertIsNone(m)
def test_loads_unsupported(self):
ResponseError = xmlrpclib.ResponseError
data = '<params><param><value><spam/></value></param></params>'
self.assertRaises(ResponseError, xmlrpclib.loads, data)
data = (
'<params><param><value><array><value><spam/></value></array></value></param></params>'
)
self.assertRaises(ResponseError, xmlrpclib.loads, data)
data = (
'<params><param><value><struct><member><name>a</name><value><spam/></value></member><member><name>b</name><value><spam/></value></member></struct></value></param></params>'
)
self.assertRaises(ResponseError, xmlrpclib.loads, data)
def check_loads(self, s, value, **kwargs):
dump = '<params><param><value>%s</value></param></params>' % s
result, m = xmlrpclib.loads(dump, **kwargs)
newvalue, = result
self.assertEqual(newvalue, value)
self.assertIs(type(newvalue), type(value))
self.assertIsNone(m)
def test_load_standard_types(self):
check = self.check_loads
check('string', 'string')
check('<string>string</string>', 'string')
check('<string>𝔘𝔫𝔦𝔠𝔬𝔡𝔢 string</string>', '𝔘𝔫𝔦𝔠𝔬𝔡𝔢 string')
check('<int>2056183947</int>', 2056183947)
check('<int>-2056183947</int>', -2056183947)
check('<i4>2056183947</i4>', 2056183947)
check('<double>46093.78125</double>', 46093.78125)
check('<boolean>0</boolean>', False)
check('<base64>AGJ5dGUgc3RyaW5n/w==</base64>', xmlrpclib.Binary(
b'\x00byte string\xff'))
check('<base64>AGJ5dGUgc3RyaW5n/w==</base64>',
b'\x00byte string\xff', use_builtin_types=True)
check('<dateTime.iso8601>20050210T11:41:23</dateTime.iso8601>',
xmlrpclib.DateTime('20050210T11:41:23'))
check('<dateTime.iso8601>20050210T11:41:23</dateTime.iso8601>',
datetime.datetime(2005, 2, 10, 11, 41, 23), use_builtin_types=True)
check(
'<array><data><value><int>1</int></value><value><int>2</int></value></data></array>'
, [1, 2])
check(
'<struct><member><name>b</name><value><int>2</int></value></member><member><name>a</name><value><int>1</int></value></member></struct>'
, {'a': 1, 'b': 2})
def test_load_extension_types(self):
check = self.check_loads
check('<nil/>', None)
check('<ex:nil/>', None)
check('<i1>205</i1>', 205)
check('<i2>20561</i2>', 20561)
check('<i8>9876543210</i8>', 9876543210)
check('<biginteger>98765432100123456789</biginteger>',
98765432100123456789)
check('<float>93.78125</float>', 93.78125)
check('<bigdecimal>9876543210.0123456789</bigdecimal>', decimal.
Decimal('9876543210.0123456789'))
def test_get_host_info(self):
transp = xmlrpc.client.Transport()
self.assertEqual(transp.get_host_info('user@host.tld'), ('host.tld',
[('Authorization', 'Basic dXNlcg==')], {}))
def test_ssl_presence(self):
try:
import ssl
except ImportError:
has_ssl = False
else:
has_ssl = True
try:
xmlrpc.client.ServerProxy('https://localhost:9999').bad_function()
except NotImplementedError:
self.assertFalse(has_ssl, "xmlrpc client's error with SSL support")
except OSError:
self.assertTrue(has_ssl)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_keepalive_disconnect(self):
class RequestHandler(http.server.BaseHTTPRequestHandler):
protocol_version = 'HTTP/1.1'
handled = False
def do_POST(self):
length = int(self.headers.get('Content-Length'))
self.rfile.read(length)
if self.handled:
self.close_connection = True
return
response = xmlrpclib.dumps((5,), methodresponse=True)
response = response.encode()
self.send_response(http.HTTPStatus.OK)
self.send_header('Content-Length', len(response))
self.end_headers()
self.wfile.write(response)
self.handled = True
self.close_connection = False
def run_server():
server.socket.settimeout(float(1))
server.handle_request()
server.handle_request()
server = http.server.HTTPServer((support.HOST, 0), RequestHandler)
self.addCleanup(server.server_close)
thread = threading.Thread(target=run_server)
thread.start()
self.addCleanup(thread.join)
url = 'http://{}:{}/'.format(*server.server_address)
with xmlrpclib.ServerProxy(url) as p:
self.assertEqual(p.method(), 5)
self.assertEqual(p.method(), 5)
class SimpleXMLRPCDispatcherTestCase(unittest.TestCase):
class DispatchExc(Exception):
"""Raised inside the dispatched functions when checking for
chained exceptions"""
def test_call_registered_func(self):
"""Calls explicitly registered function"""
exp_params = 1, 2, 3
def dispatched_func(*params):
raise self.DispatchExc(params)
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_function(dispatched_func)
with self.assertRaises(self.DispatchExc) as exc_ctx:
dispatcher._dispatch('dispatched_func', exp_params)
self.assertEqual(exc_ctx.exception.args, (exp_params,))
self.assertIsNone(exc_ctx.exception.__cause__)
self.assertIsNone(exc_ctx.exception.__context__)
def test_call_instance_func(self):
"""Calls a registered instance attribute as a function"""
exp_params = 1, 2, 3
class DispatchedClass:
def dispatched_func(self, *params):
raise SimpleXMLRPCDispatcherTestCase.DispatchExc(params)
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_instance(DispatchedClass())
with self.assertRaises(self.DispatchExc) as exc_ctx:
dispatcher._dispatch('dispatched_func', exp_params)
self.assertEqual(exc_ctx.exception.args, (exp_params,))
self.assertIsNone(exc_ctx.exception.__cause__)
self.assertIsNone(exc_ctx.exception.__context__)
def test_call_dispatch_func(self):
"""Calls the registered instance's `_dispatch` function"""
exp_method = 'method'
exp_params = 1, 2, 3
class TestInstance:
def _dispatch(self, method, params):
raise SimpleXMLRPCDispatcherTestCase.DispatchExc(method, params
)
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_instance(TestInstance())
with self.assertRaises(self.DispatchExc) as exc_ctx:
dispatcher._dispatch(exp_method, exp_params)
self.assertEqual(exc_ctx.exception.args, (exp_method, exp_params))
self.assertIsNone(exc_ctx.exception.__cause__)
self.assertIsNone(exc_ctx.exception.__context__)
def test_registered_func_is_none(self):
"""Calls explicitly registered function which is None"""
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_function(None, name='method')
with self.assertRaisesRegex(Exception, 'method'):
dispatcher._dispatch('method', ('param',))
def test_instance_has_no_func(self):
"""Attempts to call nonexistent function on a registered instance"""
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_instance(object())
with self.assertRaisesRegex(Exception, 'method'):
dispatcher._dispatch('method', ('param',))
def test_cannot_locate_func(self):
"""Calls a function that the dispatcher cannot locate"""
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
with self.assertRaisesRegex(Exception, 'method'):
dispatcher._dispatch('method', ('param',))
class HelperTestCase(unittest.TestCase):
def test_escape(self):
self.assertEqual(xmlrpclib.escape('a&b'), 'a&b')
self.assertEqual(xmlrpclib.escape('a<b'), 'a<b')
self.assertEqual(xmlrpclib.escape('a>b'), 'a>b')
class FaultTestCase(unittest.TestCase):
def test_repr(self):
f = xmlrpclib.Fault(42, 'Test Fault')
self.assertEqual(repr(f), "<Fault 42: 'Test Fault'>")
self.assertEqual(repr(f), str(f))
def test_dump_fault(self):
f = xmlrpclib.Fault(42, 'Test Fault')
s = xmlrpclib.dumps((f,))
(newf,), m = xmlrpclib.loads(s)
self.assertEqual(newf, {'faultCode': 42, 'faultString': 'Test Fault'})
self.assertEqual(m, None)
s = xmlrpclib.Marshaller().dumps(f)
self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, s)
def test_dotted_attribute(self):
self.assertRaises(AttributeError, xmlrpc.server.
resolve_dotted_attribute, str, '__add')
self.assertTrue(xmlrpc.server.resolve_dotted_attribute(str, 'title'))
class DateTimeTestCase(unittest.TestCase):
def test_default(self):
with mock.patch('time.localtime') as localtime_mock:
time_struct = time.struct_time([2013, 7, 15, 0, 24, 49, 0, 196, 0])
localtime_mock.return_value = time_struct
localtime = time.localtime()
t = xmlrpclib.DateTime()
self.assertEqual(str(t), time.strftime('%Y%m%dT%H:%M:%S',
localtime))
def test_time(self):
d = 1181399930.036952
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), time.strftime('%Y%m%dT%H:%M:%S', time.
localtime(d)))
def test_time_tuple(self):
d = 2007, 6, 9, 10, 38, 50, 5, 160, 0
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), '20070609T10:38:50')
def test_time_struct(self):
d = time.localtime(1181399930.036952)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), time.strftime('%Y%m%dT%H:%M:%S', d))
def test_datetime_datetime(self):
d = datetime.datetime(2007, 1, 2, 3, 4, 5)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), '20070102T03:04:05')
def test_repr(self):
d = datetime.datetime(2007, 1, 2, 3, 4, 5)
t = xmlrpclib.DateTime(d)
val = "<DateTime '20070102T03:04:05' at %#x>" % id(t)
self.assertEqual(repr(t), val)
def test_decode(self):
d = ' 20070908T07:11:13 '
t1 = xmlrpclib.DateTime()
t1.decode(d)
tref = xmlrpclib.DateTime(datetime.datetime(2007, 9, 8, 7, 11, 13))
self.assertEqual(t1, tref)
t2 = xmlrpclib._datetime(d)
self.assertEqual(t2, tref)
def test_comparison(self):
now = datetime.datetime.now()
dtime = xmlrpclib.DateTime(now.timetuple())
self.assertTrue(dtime == now)
self.assertTrue(now == dtime)
then = now + datetime.timedelta(seconds=4)
self.assertTrue(then >= dtime)
self.assertTrue(dtime < then)
dstr = now.strftime('%Y%m%dT%H:%M:%S')
self.assertTrue(dtime == dstr)
self.assertTrue(dstr == dtime)
dtime_then = xmlrpclib.DateTime(then.timetuple())
self.assertTrue(dtime_then >= dstr)
self.assertTrue(dstr < dtime_then)
dbytes = dstr.encode('ascii')
dtuple = now.timetuple()
with self.assertRaises(TypeError):
dtime == 1970
with self.assertRaises(TypeError):
dtime != dbytes
with self.assertRaises(TypeError):
dtime == bytearray(dbytes)
with self.assertRaises(TypeError):
dtime != dtuple
with self.assertRaises(TypeError):
dtime < float(1970)
with self.assertRaises(TypeError):
dtime > dbytes
with self.assertRaises(TypeError):
dtime <= bytearray(dbytes)
with self.assertRaises(TypeError):
dtime >= dtuple
class BinaryTestCase(unittest.TestCase):
def test_default(self):
t = xmlrpclib.Binary()
self.assertEqual(str(t), '')
def test_string(self):
d = b'\x01\x02\x03abc123\xff\xfe'
t = xmlrpclib.Binary(d)
self.assertEqual(str(t), str(d, 'latin-1'))
def test_decode(self):
d = b'\x01\x02\x03abc123\xff\xfe'
de = base64.encodebytes(d)
t1 = xmlrpclib.Binary()
t1.decode(de)
self.assertEqual(str(t1), str(d, 'latin-1'))
t2 = xmlrpclib._binary(de)
self.assertEqual(str(t2), str(d, 'latin-1'))
ADDR = PORT = URL = None
def http_server(evt, numrequests, requestHandler=None, encoding=None):
class TestInstanceClass:
def div(self, x, y):
return x // y
def _methodHelp(self, name):
if name == 'div':
return 'This is the div function'
class Fixture:
@staticmethod
def getData():
return '42'
def my_function():
"""This is my function"""
return True
class MyXMLRPCServer(xmlrpc.server.SimpleXMLRPCServer):
def get_request(self):
s, port = self.socket.accept()
s.setblocking(True)
return s, port
if not requestHandler:
requestHandler = xmlrpc.server.SimpleXMLRPCRequestHandler
serv = MyXMLRPCServer(('localhost', 0), requestHandler, encoding=
encoding, logRequests=False, bind_and_activate=False)
try:
serv.server_bind()
global ADDR, PORT, URL
ADDR, PORT = serv.socket.getsockname()
URL = 'http://%s:%d' % (ADDR, PORT)
serv.server_activate()
serv.register_introspection_functions()
serv.register_multicall_functions()
serv.register_function(pow)
serv.register_function(lambda x, y: x + y, 'add')
serv.register_function(lambda x: x, 'têšt')
serv.register_function(my_function)
testInstance = TestInstanceClass()
serv.register_instance(testInstance, allow_dotted_names=True)
evt.set()
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.socket.close()
PORT = None
evt.set()
def http_multi_server(evt, numrequests, requestHandler=None):
class TestInstanceClass:
def div(self, x, y):
return x // y
def _methodHelp(self, name):
if name == 'div':
return 'This is the div function'
def my_function():
"""This is my function"""
return True
class MyXMLRPCServer(xmlrpc.server.MultiPathXMLRPCServer):
def get_request(self):
s, port = self.socket.accept()
s.setblocking(True)
return s, port
if not requestHandler:
requestHandler = xmlrpc.server.SimpleXMLRPCRequestHandler
class MyRequestHandler(requestHandler):
rpc_paths = []
class BrokenDispatcher:
def _marshaled_dispatch(self, data, dispatch_method=None, path=None):
raise RuntimeError('broken dispatcher')
serv = MyXMLRPCServer(('localhost', 0), MyRequestHandler, logRequests=
False, bind_and_activate=False)
serv.socket.settimeout(3)
serv.server_bind()
try:
global ADDR, PORT, URL
ADDR, PORT = serv.socket.getsockname()
URL = 'http://%s:%d' % (ADDR, PORT)
serv.server_activate()
paths = ['/foo', '/foo/bar']
for path in paths:
d = serv.add_dispatcher(path, xmlrpc.server.
SimpleXMLRPCDispatcher())
d.register_introspection_functions()
d.register_multicall_functions()
serv.get_dispatcher(paths[0]).register_function(pow)
serv.get_dispatcher(paths[1]).register_function(lambda x, y: x + y,
'add')
serv.add_dispatcher('/is/broken', BrokenDispatcher())
evt.set()
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.socket.close()
PORT = None
evt.set()
def is_unavailable_exception(e):
"""Returns True if the given ProtocolError is the product of a server-side
exception caused by the 'temporarily unavailable' response sometimes
given by operations on non-blocking sockets."""
try:
if e.errcode == -1 or e.headers is None:
return True
exc_mess = e.headers.get('X-exception')
except AttributeError:
exc_mess = str(e)
if exc_mess and 'temporarily unavailable' in exc_mess.lower():
return True
def make_request_and_skipIf(condition, reason):
if not condition:
return lambda func: func
def decorator(func):
def make_request_and_skip(self):
try:
xmlrpclib.ServerProxy(URL).my_function()
except (xmlrpclib.ProtocolError, OSError) as e:
if not is_unavailable_exception(e):
raise
raise unittest.SkipTest(reason)
return make_request_and_skip
return decorator
@unittest.skipUnless(threading, 'Threading required for this test.')
class BaseServerTestCase(unittest.TestCase):
requestHandler = None
request_count = 1
threadFunc = staticmethod(http_server)
def setUp(self):
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = True
self.evt = threading.Event()
serv_args = self.evt, self.request_count, self.requestHandler
threading.Thread(target=self.threadFunc, args=serv_args).start()
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = False
class SimpleServerTestCase(BaseServerTestCase):
def test_simple1(self):
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.pow(6, 8), 6 ** 8)
except (xmlrpclib.ProtocolError, OSError) as e:
if not is_unavailable_exception(e):
self.fail('%s\n%s' % (e, getattr(e, 'headers', '')))
def test_nonascii(self):
start_string = 'Pŷt'
end_string = 'hơn'
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.add(start_string, end_string), start_string +
end_string)
except (xmlrpclib.ProtocolError, OSError) as e:
if not is_unavailable_exception(e):
self.fail('%s\n%s' % (e, getattr(e, 'headers', '')))
def test_client_encoding(self):
start_string = '€'
end_string = '¤'
try:
p = xmlrpclib.ServerProxy(URL, encoding='iso-8859-15')
self.assertEqual(p.add(start_string, end_string), start_string +
end_string)
except (xmlrpclib.ProtocolError, socket.error) as e:
if not is_unavailable_exception(e):
self.fail('%s\n%s' % (e, getattr(e, 'headers', '')))
def test_nonascii_methodname(self):
try:
p = xmlrpclib.ServerProxy(URL, encoding='ascii')
self.assertEqual(p.têšt(42), 42)
except (xmlrpclib.ProtocolError, socket.error) as e:
if not is_unavailable_exception(e):
self.fail('%s\n%s' % (e, getattr(e, 'headers', '')))
def XXXtest_404(self):
conn = httplib.client.HTTPConnection(ADDR, PORT)
conn.request('POST', '/this-is-not-valid')
response = conn.getresponse()
conn.close()
self.assertEqual(response.status, 404)
self.assertEqual(response.reason, 'Not Found')
def test_introspection1(self):
expected_methods = set(['pow', 'div', 'my_function', 'add', 'têšt',
'system.listMethods', 'system.methodHelp',
'system.methodSignature', 'system.multicall', 'Fixture'])
try:
p = xmlrpclib.ServerProxy(URL)
meth = p.system.listMethods()
self.assertEqual(set(meth), expected_methods)
except (xmlrpclib.ProtocolError, OSError) as e:
if not is_unavailable_exception(e):
self.fail('%s\n%s' % (e, getattr(e, 'headers', '')))
def test_introspection2(self):
try:
p = xmlrpclib.ServerProxy(URL)
divhelp = p.system.methodHelp('div')
self.assertEqual(divhelp, 'This is the div function')
except (xmlrpclib.ProtocolError, OSError) as e:
if not is_unavailable_exception(e):
self.fail('%s\n%s' % (e, getattr(e, 'headers', '')))
@make_request_and_skipIf(sys.flags.optimize >= 2,
'Docstrings are omitted with -O2 and above')
def test_introspection3(self):
try:
p = xmlrpclib.ServerProxy(URL)
myfunction = p.system.methodHelp('my_function')
self.assertEqual(myfunction, 'This is my function')
except (xmlrpclib.ProtocolError, OSError) as e:
if not is_unavailable_exception(e):
self.fail('%s\n%s' % (e, getattr(e, 'headers', '')))
def test_introspection4(self):
try:
p = xmlrpclib.ServerProxy(URL)
divsig = p.system.methodSignature('div')
self.assertEqual(divsig, 'signatures not supported')
except (xmlrpclib.ProtocolError, OSError) as e:
if not is_unavailable_exception(e):
self.fail('%s\n%s' % (e, getattr(e, 'headers', '')))
def test_multicall(self):
try:
p = xmlrpclib.ServerProxy(URL)
multicall = xmlrpclib.MultiCall(p)
multicall.add(2, 3)
multicall.pow(6, 8)
multicall.div(127, 42)
add_result, pow_result, div_result = multicall()
self.assertEqual(add_result, 2 + 3)
self.assertEqual(pow_result, 6 ** 8)
self.assertEqual(div_result, 127 // 42)
except (xmlrpclib.ProtocolError, OSError) as e:
if not is_unavailable_exception(e):
self.fail('%s\n%s' % (e, getattr(e, 'headers', '')))
def test_non_existing_multicall(self):
try:
p = xmlrpclib.ServerProxy(URL)
multicall = xmlrpclib.MultiCall(p)
multicall.this_is_not_exists()
result = multicall()
self.assertEqual(result.results[0]['faultCode'], 1)
self.assertEqual(result.results[0]['faultString'],
'<class \'Exception\'>:method "this_is_not_exists" is not supported'
)
except (xmlrpclib.ProtocolError, OSError) as e:
if not is_unavailable_exception(e):
self.fail('%s\n%s' % (e, getattr(e, 'headers', '')))
def test_dotted_attribute(self):
self.assertRaises(AttributeError, xmlrpc.server.
resolve_dotted_attribute, str, '__add')
self.assertTrue(xmlrpc.server.resolve_dotted_attribute(str, 'title'))
self.test_simple1()
def test_allow_dotted_names_true(self):
server = xmlrpclib.ServerProxy('http://%s:%d/RPC2' % (ADDR, PORT))
data = server.Fixture.getData()
self.assertEqual(data, '42')
def test_unicode_host(self):
server = xmlrpclib.ServerProxy('http://%s:%d/RPC2' % (ADDR, PORT))
self.assertEqual(server.add('a', 'é'), 'aé')
def test_partial_post(self):
conn = http.client.HTTPConnection(ADDR, PORT)
conn.request('POST', '/RPC2 HTTP/1.0\r\nContent-Length: 100\r\n\r\nbye'
)
conn.close()
def test_context_manager(self):
with xmlrpclib.ServerProxy(URL) as server:
server.add(2, 3)
self.assertNotEqual(server('transport')._connection, (None, None))
self.assertEqual(server('transport')._connection, (None, None))
def test_context_manager_method_error(self):
try:
with xmlrpclib.ServerProxy(URL) as server:
server.add(2, 'a')
except xmlrpclib.Fault:
pass
self.assertEqual(server('transport')._connection, (None, None))
class SimpleServerEncodingTestCase(BaseServerTestCase):
@staticmethod
def threadFunc(evt, numrequests, requestHandler=None, encoding=None):
http_server(evt, numrequests, requestHandler, 'iso-8859-15')
def test_server_encoding(self):
start_string = '€'
end_string = '¤'
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.add(start_string, end_string), start_string +
end_string)
except (xmlrpclib.ProtocolError, socket.error) as e:
if not is_unavailable_exception(e):
self.fail('%s\n%s' % (e, getattr(e, 'headers', '')))
class MultiPathServerTestCase(BaseServerTestCase):
threadFunc = staticmethod(http_multi_server)
request_count = 2
def test_path1(self):
p = xmlrpclib.ServerProxy(URL + '/foo')
self.assertEqual(p.pow(6, 8), 6 ** 8)
self.assertRaises(xmlrpclib.Fault, p.add, 6, 8)
def test_path2(self):
p = xmlrpclib.ServerProxy(URL + '/foo/bar')
self.assertEqual(p.add(6, 8), 6 + 8)
self.assertRaises(xmlrpclib.Fault, p.pow, 6, 8)
def test_path3(self):
p = xmlrpclib.ServerProxy(URL + '/is/broken')
self.assertRaises(xmlrpclib.Fault, p.add, 6, 8)
class BaseKeepaliveServerTestCase(BaseServerTestCase):
class RequestHandler(xmlrpc.server.SimpleXMLRPCRequestHandler):
parentClass = xmlrpc.server.SimpleXMLRPCRequestHandler
protocol_version = 'HTTP/1.1'
myRequests = []
def handle(self):
self.myRequests.append([])
self.reqidx = len(self.myRequests) - 1
return self.parentClass.handle(self)
def handle_one_request(self):
result = self.parentClass.handle_one_request(self)
self.myRequests[self.reqidx].append(self.raw_requestline)
return result
requestHandler = RequestHandler
def setUp(self):
self.RequestHandler.myRequests = []
return BaseServerTestCase.setUp(self)
class KeepaliveServerTestCase1(BaseKeepaliveServerTestCase):
def test_two(self):
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.pow(6, 8), 6 ** 8)
self.assertEqual(p.pow(6, 8), 6 ** 8)
self.assertEqual(p.pow(6, 8), 6 ** 8)
p('close')()
self.assertEqual(len(self.RequestHandler.myRequests), 1)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-1]), 2)
class KeepaliveServerTestCase2(BaseKeepaliveServerTestCase):
request_count = 2
def test_close(self):
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.pow(6, 8), 6 ** 8)
self.assertEqual(p.pow(6, 8), 6 ** 8)
self.assertEqual(p.pow(6, 8), 6 ** 8)
p('close')()
self.assertEqual(p.pow(6, 8), 6 ** 8)
self.assertEqual(p.pow(6, 8), 6 ** 8)
self.assertEqual(p.pow(6, 8), 6 ** 8)
p('close')()
self.assertEqual(len(self.RequestHandler.myRequests), 2)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-1]), 2)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-2]), 2)
def test_transport(self):
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.pow(6, 8), 6 ** 8)
p('transport').close()
self.assertEqual(p.pow(6, 8), 6 ** 8)
p('close')()
self.assertEqual(len(self.RequestHandler.myRequests), 2)
@unittest.skipIf(gzip is None, 'requires gzip')
class GzipServerTestCase(BaseServerTestCase):
class RequestHandler(xmlrpc.server.SimpleXMLRPCRequestHandler):
parentClass = xmlrpc.server.SimpleXMLRPCRequestHandler
protocol_version = 'HTTP/1.1'
def do_POST(self):
self.__class__.content_length = int(self.headers['content-length'])
return self.parentClass.do_POST(self)
requestHandler = RequestHandler
class Transport(xmlrpclib.Transport):
fake_gzip = False
def parse_response(self, response):
self.response_length = int(response.getheader('content-length', 0))
return xmlrpclib.Transport.parse_response(self, response)
def send_content(self, connection, body):
if self.fake_gzip:
connection.putheader('Content-Encoding', 'gzip')
return xmlrpclib.Transport.send_content(self, connection, body)
def setUp(self):
BaseServerTestCase.setUp(self)
def test_gzip_request(self):
t = self.Transport()
t.encode_threshold = None
p = xmlrpclib.ServerProxy(URL, transport=t)
self.assertEqual(p.pow(6, 8), 6 ** 8)
a = self.RequestHandler.content_length
t.encode_threshold = 0
self.assertEqual(p.pow(6, 8), 6 ** 8)
b = self.RequestHandler.content_length
self.assertTrue(a > b)
p('close')()
def test_bad_gzip_request(self):
t = self.Transport()
t.encode_threshold = None
t.fake_gzip = True
p = xmlrpclib.ServerProxy(URL, transport=t)
cm = self.assertRaisesRegex(xmlrpclib.ProtocolError, re.compile(
'\\b400\\b'))
with cm:
p.pow(6, 8)
p('close')()
def test_gzip_response(self):
t = self.Transport()
p = xmlrpclib.ServerProxy(URL, transport=t)
old = self.requestHandler.encode_threshold
self.requestHandler.encode_threshold = None
self.assertEqual(p.pow(6, 8), 6 ** 8)
a = t.response_length
self.requestHandler.encode_threshold = 0
self.assertEqual(p.pow(6, 8), 6 ** 8)
p('close')()
b = t.response_length
self.requestHandler.encode_threshold = old
self.assertTrue(a > b)
@unittest.skipIf(gzip is None, 'requires gzip')
class GzipUtilTestCase(unittest.TestCase):
def test_gzip_decode_limit(self):
max_gzip_decode = 20 * 1024 * 1024
data = b'\x00' * max_gzip_decode
encoded = xmlrpclib.gzip_encode(data)
decoded = xmlrpclib.gzip_decode(encoded)
self.assertEqual(len(decoded), max_gzip_decode)
data = b'\x00' * (max_gzip_decode + 1)
encoded = xmlrpclib.gzip_encode(data)
with self.assertRaisesRegex(ValueError,
'max gzipped payload length exceeded'):
xmlrpclib.gzip_decode(encoded)
xmlrpclib.gzip_decode(encoded, max_decode=-1)
class ServerProxyTestCase(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
if threading:
self.url = URL
else:
self.url = 'http://'
def test_close(self):
p = xmlrpclib.ServerProxy(self.url)
self.assertEqual(p('close')(), None)
def test_transport(self):
t = xmlrpclib.Transport()
p = xmlrpclib.ServerProxy(self.url, transport=t)
self.assertEqual(p('transport'), t)
class FailingMessageClass(http.client.HTTPMessage):
def get(self, key, failobj=None):
key = key.lower()
if key == 'content-length':
return 'I am broken'
return super().get(key, failobj)
@unittest.skipUnless(threading, 'Threading required for this test.')
class FailingServerTestCase(unittest.TestCase):
def setUp(self):
self.evt = threading.Event()
serv_args = self.evt, 1
threading.Thread(target=http_server, args=serv_args).start()
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = False
default_class = http.client.HTTPMessage
xmlrpc.server.SimpleXMLRPCRequestHandler.MessageClass = default_class
def test_basic(self):
flagval = xmlrpc.server.SimpleXMLRPCServer._send_traceback_header
self.assertEqual(flagval, False)
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = True
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.pow(6, 8), 6 ** 8)
except (xmlrpclib.ProtocolError, OSError) as e:
if not is_unavailable_exception(e):
self.fail('%s\n%s' % (e, getattr(e, 'headers', '')))
def test_fail_no_info(self):
xmlrpc.server.SimpleXMLRPCRequestHandler.MessageClass = (
FailingMessageClass)
try:
p = xmlrpclib.ServerProxy(URL)
p.pow(6, 8)
except (xmlrpclib.ProtocolError, OSError) as e:
if not is_unavailable_exception(e) and hasattr(e, 'headers'):
self.assertTrue(e.headers.get('X-exception') is None)
self.assertTrue(e.headers.get('X-traceback') is None)
else:
self.fail('ProtocolError not raised')
def test_fail_with_info(self):
xmlrpc.server.SimpleXMLRPCRequestHandler.MessageClass = (
FailingMessageClass)
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = True
try:
p = xmlrpclib.ServerProxy(URL)
p.pow(6, 8)
except (xmlrpclib.ProtocolError, OSError) as e:
if not is_unavailable_exception(e) and hasattr(e, 'headers'):
expected_err = (
"invalid literal for int() with base 10: 'I am broken'")
self.assertEqual(e.headers.get('X-exception'), expected_err)
self.assertTrue(e.headers.get('X-traceback') is not None)
else:
self.fail('ProtocolError not raised')
@contextlib.contextmanager
def captured_stdout(encoding='utf-8'):
"""A variation on support.captured_stdout() which gives a text stream
having a `buffer` attribute.
"""
orig_stdout = sys.stdout
sys.stdout = io.TextIOWrapper(io.BytesIO(), encoding=encoding)
try:
yield sys.stdout
finally:
sys.stdout = orig_stdout
class CGIHandlerTestCase(unittest.TestCase):
def setUp(self):
self.cgi = xmlrpc.server.CGIXMLRPCRequestHandler()
def tearDown(self):
self.cgi = None
def test_cgi_get(self):
with support.EnvironmentVarGuard() as env:
env['REQUEST_METHOD'] = 'GET'
with captured_stdout(encoding=self.cgi.encoding) as data_out:
self.cgi.handle_request()
data_out.seek(0)
handle = data_out.read()
status = handle.split()[1]
message = ' '.join(handle.split()[2:4])
self.assertEqual(status, '400')
self.assertEqual(message, 'Bad Request')
def test_cgi_xmlrpc_response(self):
data = """<?xml version='1.0'?>
<methodCall>
<methodName>test_method</methodName>
<params>
<param>
<value><string>foo</string></value>
</param>
<param>
<value><string>bar</string></value>
</param>
</params>
</methodCall>
"""
with support.EnvironmentVarGuard() as env, captured_stdout(encoding
=self.cgi.encoding) as data_out, support.captured_stdin(
) as data_in:
data_in.write(data)
data_in.seek(0)
env['CONTENT_LENGTH'] = str(len(data))
self.cgi.handle_request()
data_out.seek(0)
handle = data_out.read()
self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, handle[44:])
content = handle[handle.find('<?xml'):]
self.assertEqual(int(re.search('Content-Length: (\\d+)', handle).
group(1)), len(content))
class UseBuiltinTypesTestCase(unittest.TestCase):
def test_use_builtin_types(self):
self.log = []
expected_bytes = b'my dog has fleas'
expected_date = datetime.datetime(2008, 5, 26, 18, 25, 12)
marshaled = xmlrpclib.dumps((expected_bytes, expected_date), 'foobar')
def foobar(*args):
self.log.extend(args)
handler = xmlrpc.server.SimpleXMLRPCDispatcher(allow_none=True,
encoding=None, use_builtin_types=True)
handler.register_function(foobar)
handler._marshaled_dispatch(marshaled)
self.assertEqual(len(self.log), 2)
mybytes, mydate = self.log
self.assertEqual(self.log, [expected_bytes, expected_date])
self.assertIs(type(mydate), datetime.datetime)
self.assertIs(type(mybytes), bytes)
def test_cgihandler_has_use_builtin_types_flag(self):
handler = xmlrpc.server.CGIXMLRPCRequestHandler(use_builtin_types=True)
self.assertTrue(handler.use_builtin_types)
def test_xmlrpcserver_has_use_builtin_types_flag(self):
server = xmlrpc.server.SimpleXMLRPCServer(('localhost', 0),
use_builtin_types=True)
server.server_close()
self.assertTrue(server.use_builtin_types)
@support.reap_threads
def test_main():
support.run_unittest(XMLRPCTestCase, HelperTestCase, DateTimeTestCase,
BinaryTestCase, FaultTestCase, UseBuiltinTypesTestCase,
SimpleServerTestCase, SimpleServerEncodingTestCase,
KeepaliveServerTestCase1, KeepaliveServerTestCase2,
GzipServerTestCase, GzipUtilTestCase, MultiPathServerTestCase,
ServerProxyTestCase, FailingServerTestCase, CGIHandlerTestCase,
SimpleXMLRPCDispatcherTestCase)
if __name__ == '__main__':
test_main()
|
server.py
|
# -*- coding: utf-8 -*-
"""Ironworks module"""
import sys
import os
import subprocess
import threading
import keyring
from cherrypy import wsgiserver
from logger import IronworksLogger
from lib.apscheduler.scheduler import Scheduler
import serverTools
class IronworksServer:
def __init__(self, DAEMON, VERBOSE, PIDFILE, RUNDIR, DATA_DIR, FULL_PATH, ARGS, PORT, LOG_FILE, DEVELOPMENT, DATABASE, WEBROOT, HOST, KIOSK, UPDATER, APP):
self.FULL_PATH = FULL_PATH
self.RUNDIR = RUNDIR
self.ARGS = ARGS
self.DAEMON = DAEMON
self.PIDFILE = PIDFILE
self.VERBOSE = VERBOSE
self.LOG_FILE = LOG_FILE
self.LOG_LIST = []
self.PORT = PORT
self.INIT_LOCK = threading.Lock()
self.__INITIALIZED__ = False
self.DEVELOPMENT = DEVELOPMENT
self.SCHEDULE = Scheduler()
self.DATABASE = DATABASE
self.WEBROOT = WEBROOT
self.logger = None
self.SERVER = None
self.HOST = HOST
self.KIOSK = KIOSK
self.DATA_DIR = DATA_DIR
self.SCRIPT_DIR = None
self.THREADS = []
self.APP = APP
self.AUTH = {
'username': None,
'password': None,
}
self.UPDATER = UPDATER
self.USE_GIT = False
self.version_file = None
self.CURRENT_COMMIT = None
self.LATEST_COMMIT = None
self.COMMITS_BEHIND = 0
self.COMMITS_COMPARE_URL = ''
self.FIRST_RUN = 0
serverTools.setApp(self.APP)
serverTools.setWebroot(self.WEBROOT)
serverTools.setRunDir(self.RUNDIR)
serverTools.setDataDir(self.DATA_DIR)
serverTools.setThreads(self.THREADS)
serverTools.setHost(self.HOST)
serverTools.setPort(self.PORT)
serverTools.setPrefsDb(self.DATABASE)
def initialize(self):
"""Init function for this module"""
with self.INIT_LOCK:
if self.__INITIALIZED__:
return False
# Set up logger
if not self.LOG_FILE:
self.LOG_FILE = os.path.join(self.DATA_DIR, 'logs', 'ironworks.log')
FILENAME = os.path.basename(self.LOG_FILE)
LOG_DIR = self.LOG_FILE[:-len(FILENAME)]
if not os.path.exists(LOG_DIR):
try:
os.makedirs(LOG_DIR)
except OSError:
if self.VERBOSE:
print(('Unable to create the log directory.'))
serverTools.setLogList(self.LOG_LIST)
serverTools.setLogFile(self.LOG_FILE)
self.logger = IronworksLogger(self.LOG_FILE, self.VERBOSE, self.DEVELOPMENT)
serverTools.setLogger(self.logger)
#set up script dir
if not self.SCRIPT_DIR:
self.SCRIPT_DIR = os.path.join(self.RUNDIR, 'scripts')
if self.KIOSK:
self.logger.log('Running in KIOSK Mode, settings disabled.', 'INFO')
#Check if a version file exists. If not assume latest revision.
self.version_file = os.path.join(self.DATA_DIR, 'Version.txt')
if not os.path.exists(self.version_file):
self.FIRST_RUN = 1
serverTools.setFirstRun(self.FIRST_RUN)
# check if database exists or create it
try:
self.logger.log('Checking if PATH exists: %s' % (self.DATABASE), 'WARNING')
dbpath = os.path.dirname(self.DATABASE)
if not os.path.exists(dbpath):
try:
self.logger.log('It does not exist, creating it...', 'WARNING')
os.makedirs(dbpath)
except:
self.logger.log('Could not create %s.' % (self.DATABASE), 'CRITICAL')
print(('Could not create %s.' % (self.DATABASE)))
quit()
except:
self.logger.log('Could not create %s.' % (self.DATABASE), 'CRITICAL')
quit()
self.logger.log('Database successfully initialised', 'INFO')
if self.WEBROOT:
if self.WEBROOT[0] != '/':
self.WEBROOT = '/' + self.WEBROOT
d = wsgiserver.WSGIPathInfoDispatcher({self.WEBROOT: self.APP})
else:
d = wsgiserver.WSGIPathInfoDispatcher({'/': self.APP})
self.SERVER = wsgiserver.CherryPyWSGIServer((self.HOST, self.PORT), d)
self.__INITIALIZED__ = True
return True
def init_updater(self):
from ironworks.updater import checkGithub, gitCurrentVersion
if self.UPDATER:
if os.name == 'nt':
self.USE_GIT = False
else:
self.USE_GIT = os.path.isdir(os.path.join(self.RUNDIR, '.git'))
if self.USE_GIT:
gitCurrentVersion()
self.version_file = os.path.join(self.DATA_DIR, 'Version.txt')
if os.path.isfile(self.version_file):
f = open(self.version_file, 'r')
self.CURRENT_COMMIT = f.read()
f.close()
else:
self.COMMITS_BEHIND = -1
threading.Thread(target=checkGithub).start()
serverTools.setCommitsBehind(self.COMMITS_BEHIND)
serverTools.setCommitsCompareURL(self.COMMITS_COMPARE_URL)
serverTools.setUseGit(self.USE_GIT)
serverTools.setCurrentCommit(self.CURRENT_COMMIT)
def start_schedules(self):
"""Add all periodic jobs to the scheduler"""
if self.UPDATER:
# check every 6 hours for a new version
from ironworks.updater import checkGithub
self.SCHEDULE.add_interval_job(checkGithub, hours=6)
self.SCHEDULE.start()
def start(self):
"""Start the actual server"""
if self.__INITIALIZED__:
self.start_schedules()
if not self.DEVELOPMENT:
try:
self.logger.log('Starting IRONWORKS on %s:%i%s' % (self.HOST, self.PORT, self.WEBROOT), 'INFO')
self.SERVER.start()
while not True:
pass
except KeyboardInterrupt:
self.stop()
else:
self.logger.log('Starting IRONWORKS development server on port: %i' % (self.PORT), 'INFO')
self.logger.log(' ##### IMPORTANT : WEBROOT DOES NOT WORK UNDER THE DEV SERVER #######', 'INFO')
self.APP.run(debug=True, port=self.PORT, host=self.HOST)
def stop(self):
"""Shutdown Ironworks"""
self.logger.log('Shutting down IRONWORKS...', 'INFO')
if not self.DEVELOPMENT:
self.SERVER.stop()
else:
from flask import request
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
self.SCHEDULE.shutdown(wait=False)
if self.PIDFILE:
self.logger.log('Removing pidfile: %s' % str(self.PIDFILE), 'INFO')
os.remove(self.PIDFILE)
def restart(self):
"""Restart Ironworks"""
self.SERVER.stop()
popen_list = [sys.executable, self.FULL_PATH]
popen_list += self.ARGS
self.logger.log('Restarting IRONWORKS with: %s' % popen_list, 'INFO')
self.SCHEDULE.shutdown(wait=False)
subprocess.Popen(popen_list, cwd=self.RUNDIR)
def daemonize(self):
"""Start Ironworks as a daemon"""
if threading.activeCount() != 1:
self.logger.log('There are %s active threads. Daemonizing may cause strange behavior.' % threading.activeCount(), 'WARNING')
sys.stdout.flush()
sys.stderr.flush()
try:
pid = os.fork()
if pid == 0:
pass
else:
self.logger.log('Forking once...', 'DEBUG')
os._exit(0)
except OSError as e:
sys.exit('1st fork failed: %s [%d]' % (e.strerror, e.errno))
os.chdir('/')
os.umask(0)
os.setsid()
try:
pid = os.fork()
if pid > 0:
self.logger.log('Forking twice...', 'DEBUG')
os._exit(0)
except OSError as e:
sys.exit('2nd fork failed: %s [%d]' % (e.strerror, e.errno))
pid = os.getpid()
self.logger.log('Daemonized to PID: %s' % pid, 'INFO')
if self.PIDFILE:
self.logger.log('Writing PID %s to %s' % (pid, self.PIDFILE), 'INFO')
file(self.PIDFILE, 'w').write("%s\n" % pid)
def setLoginDb(self, host, userName, dbName):
#userPassword = self.checkDbKey("Ironworks-Login-", userName)
userPassword = 'your db key goes here'
db = serverTools.getLoginDb()
if db is None:
serverTools.setLoginDb(host, userName, userPassword, dbName)
def setSystemDb(self, host, userName, dbName):
#userPassword = self.checkDbKey("Ironworks-MySQL-", userName)
userPassword = 'your db key goes here'
db = serverTools.getSystemDb()
if db is None:
serverTools.setSystemDb(host, userName, userPassword, dbName)
def setPyEMONCMSDb(self, host, userName, dbName):
#userPassword = self.checkDbKey("Ironworks-PyEMONCMS-", userName)
userPassword = 'your db key goes here'
db = serverTools.getPyEMONCMSDb()
if db is None:
serverTools.setPyEMONCMSDb(host, userName, userPassword, dbName)
def checkDbKey(self, prefix, userName):
userName = userName
try:
try:
dbPassword = keyring.get_password(prefix + userName, userName)
except:
dbPassword = None
if dbPassword is None:
password = 'your db key goes here'
keyring.set_password(prefix + userName, userName, password)
self.logger.log('Initial database password added to keyring.', "INFO")
elif str(dbPassword) == 'your db key goes here':
self.logger.log('Initial database password in keyring.', "WARNING")
self.logger.log('Please change your password.', "WARNING")
else:
self.logger.log('Userdefined database password set.', "INFO")
return dbPassword
except:
self.logger.log('Either could not access keyring or an entry could not be made.', "ERROR")
return ""
|
keep_alive.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def main():
return "I'm playing Unital Ring"
def run():
app.run(host="0.0.0.0")
def keep_alive():
server = Thread(target=run)
server.start()
|
lisp-rtr.py
|
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-rtr.py
#
# This file performs LISP Reencapsualting Tunnel Router (RTR) functionality.
#
# -----------------------------------------------------------------------------
import lisp
import lispconfig
import socket
import time
import select
import threading
import pcappy
import os
import copy
import commands
import binascii
#------------------------------------------------------------------------------
#
# Global data structures relative to the lisp-itr process.
#
lisp_send_sockets = [None, None, None]
lisp_trace_listen_socket = None
lisp_ipc_listen_socket = None
lisp_ipc_punt_socket = None
lisp_ephem_listen_socket = None
lisp_ephem_port = lisp.lisp_get_ephemeral_port()
lisp_raw_socket = None
lisp_raw_v6_socket = None
lisp_periodic_timer = None
lisp_threads = []
#
# In AWS, the source RLOC must be a private address or it will not outbound
# forward encapsulated packets. dmidecode MUST BE installed in the AWS VM
# so we can tell if lispers.net is running on AWS. And if a container is
# running on an AWS VM, dmidecode must be installed in the container. Do this
# by using "apt-get install dmidecode".
#
lisp_rtr_source_rloc = None
#
# Check if fast python data-plane should run.
#
lisp_rtr_fast_mode = (os.getenv("LISP_RTR_FAST_DATA_PLANE") != None)
lisp_rtr_latency_debug = (os.getenv("LISP_RTR_LATENCY_DEBUG") != None)
#------------------------------------------------------------------------------
#
# lisp_rtr_show_command
#
# Display state in an RTR.
#
def lisp_rtr_show_command(parameter):
global lisp_threads
return(lispconfig.lisp_itr_rtr_show_command(parameter, "RTR",
lisp_threads))
#enddef
#
# lisp_rtr_show_command_dns
#
# Display state in an RTR but pass in boolean to not do a DNS lookup.
#
def lisp_rtr_show_command_dns(parameter):
global lisp_threads
return(lispconfig.lisp_itr_rtr_show_command(parameter, "RTR", lisp_threads,
True))
#enddef
#
# lisp_rtr_show_keys_command
#
# Call lispconfig.lisp_show_crypto_list().
#
def lisp_rtr_show_keys_command(parameter):
return(lispconfig.lisp_show_crypto_list("RTR"))
#enddef
#
# lisp_rtr_database_mapping_command
#
# Add database-mapping entry so RTR can sign Map-Requests.
#
def lisp_rtr_database_mapping_command(kv_pair):
lispconfig.lisp_database_mapping_command(kv_pair)
#enddef
#
# lisp_rtr_glean_mapping_command
#
# Add a configured glean_mapping to the lisp_glean_mapping array.
#
def lisp_rtr_glean_mapping_command(kv_pair):
entry = { "rloc-probe" : False, "igmp-query" : False }
for kw in kv_pair.keys():
value = kv_pair[kw]
if (kw == "instance-id"):
v = value.split("-")
entry["instance-id"] = [0, 0]
if (len(v) == 1):
entry["instance-id"][0] = int(v[0])
entry["instance-id"][1] = int(v[0])
else:
entry["instance-id"][0] = int(v[0])
entry["instance-id"][1] = int(v[1])
#endif
#endif
if (kw == "eid-prefix"):
eid = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
eid.store_prefix(value)
entry["eid-prefix"] = eid
#endif
if (kw == "group-prefix"):
geid = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
geid.store_prefix(value)
entry["group-prefix"] = geid
#endif
if (kw == "rloc-prefix"):
rloc = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
rloc.store_prefix(value)
entry["rloc-prefix"] = rloc
#endif
if (kw == "rloc-probe"):
entry["rloc-probe"] = (value == "yes")
#endif
if (kw == "igmp-query"):
entry["igmp-query"] = (value == "yes")
#endif
#endfor
#
# Check if entry already exists. If so, just return.
#
for e in lisp.lisp_glean_mappings:
if (e.has_key("eid-prefix") ^ entry.has_key("eid-prefix")): continue
if (e.has_key("eid-prefix") and entry.has_key("eid-prefix")):
old = e["eid-prefix"]
new = entry["eid-prefix"]
if (old.is_exact_match(new) == False): continue
#endif
if (e.has_key("group-prefix") ^ entry.has_key("group-prefix")):
continue
#endif
if (e.has_key("group-prefix") and entry.has_key("group-prefix")):
old = e["group-prefix"]
new = entry["group-prefix"]
if (old.is_exact_match(new) == False): continue
#endif
if (e.has_key("rloc-prefix") ^ entry.has_key("rloc-prefix")): continue
if (e.has_key("rloc-prefix") and entry.has_key("rloc-prefix")):
old = e["rloc-prefix"]
new = entry["rloc-prefix"]
if (old.is_exact_match(new) == False): continue
#endif
if (e.has_key("instance-id") ^ entry.has_key("instance-id")): continue
if (e.has_key("instance-id") and entry.has_key("instance-id")):
old = e["instance-id"]
new = entry["instance-id"]
if (old != new): continue
#endif
#
# Found a match. Do not append existing entry to array.
#
return
#endfor
#
# Add dictionary array to array.
#
lisp.lisp_glean_mappings.append(entry)
#enddef
#
# lisp_rtr_show_rloc_probe_command
#
# Display RLOC-probe list state in an RTR.
#
def lisp_rtr_show_rloc_probe_command(parameter):
return(lispconfig.lisp_itr_rtr_show_rloc_probe_command("RTR"))
#enddef
#
# lisp_fix_rloc_encap_state_entry
#
# Examine one map-cache entry.
#
def lisp_fix_rloc_encap_state_entry(mc, parms):
lisp_sockets, rloc, port, hostname = parms
addr = "{}:{}".format(rloc.print_address_no_iid(), port)
eid = lisp.green(mc.print_eid_tuple(), False)
msg = "Changed '{}' translated address:port to {} for EID {}, {} {}". \
format(hostname, lisp.red(addr, False), eid, "{}", "{}")
for rloc_entry in mc.rloc_set:
if (rloc_entry.rle):
for rle_node in rloc_entry.rle.rle_nodes:
if (rle_node.rloc_name != hostname): continue
rle_node.store_translated_rloc(rloc, port)
old_addr = rle_node.address.print_address_no_iid() + ":" + \
str(rle_node.translated_port)
lisp.lprint(msg.format("RLE", old_addr))
#endfor
#endif
if (rloc_entry.rloc_name != hostname): continue
#
# Update lisp-crypto encap array. Put keys in new dictionary array
# location since translated address and port changed. We don't want
# to rekey because of a NAT change.
#
old_addr = rloc_entry.rloc.print_address_no_iid() + ":" + \
str(rloc_entry.translated_port)
if (lisp.lisp_crypto_keys_by_rloc_encap.has_key(old_addr)):
keys = lisp.lisp_crypto_keys_by_rloc_encap[old_addr]
lisp.lisp_crypto_keys_by_rloc_encap[addr] = keys
#endif
#
# Update translated information with new information.
#
rloc_entry.delete_from_rloc_probe_list(mc.eid, mc.group)
rloc_entry.store_translated_rloc(rloc, port)
rloc_entry.add_to_rloc_probe_list(mc.eid, mc.group)
lisp.lprint(msg.format("RLOC", old_addr))
#
# Trigger RLOC-probe if enabled.
#
if (lisp.lisp_rloc_probing):
seid = None if (mc.group.is_null()) else mc.eid
deid = mc.eid if (mc.group.is_null()) else mc.group
lisp.lisp_send_map_request(lisp_sockets, 0, seid, deid, rloc_entry)
#endif
#endfor
#
# Write change to external data-plane.
#
lisp.lisp_write_ipc_map_cache(True, mc)
return(True, parms)
#enddef
#
# lisp_fix_rloc_encap_state_walk
#
# Walk main cache and source-cache for each entry to handle multicast entries.
#
def lisp_fix_rloc_encap_state_walk(mc, parms):
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()): return(lisp_fix_rloc_encap_state_entry(mc, parms))
if (mc.source_cache == None): return(True, parms)
#
# There is (source, group) state so walk all sources for this group
# entry.
#
mc.source_cache.walk_cache(lisp_fix_rloc_encap_state_entry, parms)
return(True, parms)
#enddef
#
# lisp_fix_rloc_encap_state
#
# Walk map-cache looking for supplied RLOC and change its encap-port to
# the supplied port passed to this function.
#
def lisp_fix_rloc_encap_state(sockets, hostname, rloc, port):
lisp.lisp_map_cache.walk_cache(lisp_fix_rloc_encap_state_walk,
[sockets, rloc, port, hostname])
return
#enddef
#
# lisp_fast_debug
#
# Print out debug for lisp_rtr_fast_data_plane().
#
def lisp_fast_debug(sred, packet):
if (lisp.lisp_data_plane_logging == False): return
if (sred in ["Send", "Receive"]):
p = binascii.hexlify(packet[0:20])
lisp.lprint("Fast-{}: ip {} {} {} {} {}".format(sred, p[0:8], p[8:16],
p[16:24], p[24:32], p[32:40]))
elif (sred in ["Encap", "Decap"]):
p = binascii.hexlify(packet[0:36])
lisp.lprint("Fast-{}: ip {} {} {} {} {}, udp {} {}, lisp {} {}". \
format(sred, p[0:8], p[8:16], p[16:24], p[24:32], p[32:40],
p[40:48], p[48:56], p[56:64], p[64:72]))
#endif
#enddef
#
# lisp_fast_lookup_debug
#
# Print out lisp_rtr_fast_data_plane() lookup information.
#
def lisp_fast_lookup_debug(dest, mc):
if (lisp.lisp_data_plane_logging == False): return
hm = "miss" if mc == None else "hit!"
lisp.lprint("Fast-Lookup {} {}".format(dest.print_address(), hm))
#enddef
#
# lisp_latency_debug
#
# Set or print latency timing. Used by both lisp_rtr_data_plane() and lisp_
# rtr_fast_data_plane().
#
def lisp_latency_debug(ts, msg):
global lisp_rtr_latency_debug
if (lisp_rtr_latency_debug == False): return(None)
#
# Return the initial timestamp when requested.
#
if (ts == None): return(time.time())
#
# Compute elapsed time from initial timestamp.
#
ts = (time.time() - ts) * 1000000
lisp.lprint("{}-Latency: {} usecs".format(msg, round(ts, 1)), "force")
return(None)
#enddef
#
# lisp_fast_address_to_binary
#
# Convert 4-byte address from packet format to binary. Used to store in
# lisp_address.address for other support functions to be used.
#
def lisp_fast_address_to_binary(a):
binary = ord(a[0]) << 24 | ord(a[1]) << 16 | ord(a[2]) << 8 | ord(a[3])
return(binary)
#enddef
#
# lisp_rtr_fast_data_plane
#
# This is a python fast data plane that is limited in features and process
# packets in a raw manner. That is, there are no library calls and no byte
# swaps done. It is designed to make the gleaning RTR data-plane with LISP
# to non-LISP interworking go faster.
#
# Any non-fast operations returns False to allow lisp_rtr_data_plane() to
# process the packet normally.
#
# The first byte of 'packet' is assumed to be either the first byte of the
# LISP encapsulated packet (coming from an ITR) or a regular IP packet
# (arriving from a non-LISP source). All other packets (like LISP control-plane
# packets that can come in the form of both encapsulated or non encapsulated,
# return False, for lisp_rtr_data_plane() to process.
#
lisp_seid_cached = lisp.lisp_address(lisp.LISP_AFI_IPV4, "", 32, 0)
lisp_deid_cached = lisp.lisp_address(lisp.LISP_AFI_IPV4, "", 32, 0)
def lisp_rtr_fast_data_plane(packet):
global lisp_map_cache, lisp_raw_socket
ts = lisp_latency_debug(None, "Fast")
#
# Check if UDP ports for any type of LISP packet. Strict outer headers
# if LISP encapsulated packet.
#
iid = 0
srloc = None
if (packet[9] == '\x11'):
if (packet[20:22] == '\x10\xf6'): return(False)
if (packet[22:24] == '\x10\xf6'): return(False)
if (packet[20:22] == '\x10\xf5' or packet[22:24] == '\x10\xf5'):
srloc = packet[12:16]
iid = packet[32:35]
iid = ord(iid[0]) << 16 | ord(iid[1]) << 8 | ord(iid[2])
if (iid == 0xffffff): return(False)
lisp_fast_debug("Decap", packet)
packet = packet[36::]
#endif
#endif
lisp_fast_debug("Receive", packet)
#
# Get destination in a form for map_cache lookup.
#
dest = lisp_fast_address_to_binary(packet[16:20])
lisp_deid_cached.instance_id = iid
lisp_deid_cached.address = dest
#
# Don't switch multicast for now.
#
if ((dest & 0xe0000000) == 0xe0000000): return(False)
#
# Do map-cache lookup.
#
dest = lisp_deid_cached
mc = lisp.lisp_map_cache.lookup_cache(dest, False)
lisp_fast_lookup_debug(dest, mc)
if (mc == None): return(False)
#
# Check for source gleaning. If gleaned entry and RLOC changes from SRLOC
# return to do more general processing.
#
if (srloc != None):
src = lisp_fast_address_to_binary(packet[12:16])
lisp_seid_cached.instance_id = iid
lisp_seid_cached.address = src
src_mc = lisp.lisp_map_cache.lookup_cache(lisp_seid_cached, False)
if (src_mc == None):
allow, x, y = lisp.lisp_allow_gleaning(lisp_seid_cached, None,
None)
if (allow): return(False)
elif (src_mc.gleaned):
srloc = lisp_fast_address_to_binary(srloc)
if (src_mc.rloc_set[0].rloc.address != srloc): return(False)
#endif
#
# Cache source for map-cache display.
#
mc.add_recent_source(lisp_seid_cached)
#endif
#
# Need this check for interworking.
#
if (mc.action == lisp.LISP_NATIVE_FORWARD_ACTION and
mc.eid.instance_id == 0):
dest.instance_id = lisp.lisp_default_secondary_iid
mc = lisp.lisp_map_cache.lookup_cache(dest, False)
lisp_fast_lookup_debug(dest, mc)
if (mc == None): return(False)
#endif
#
# Determine if new LISP encap is to be prepended or we are forwarding
# a decapsulated packet.
#
if (mc.action != lisp.LISP_NATIVE_FORWARD_ACTION):
if (mc.best_rloc_set == []): return(False)
dest = mc.best_rloc_set[0]
if (dest.state != lisp.LISP_RLOC_UP_STATE): return(False)
iid = mc.eid.instance_id
port = dest.translated_port
stats = dest.stats
dest = dest.rloc
drloc = dest.address
srloc = lisp.lisp_myrlocs[0].address
#
# Build outer IPv4 header.
#
outer = '\x45\x00'
length = len(packet) + 20 + 8 + 8
outer += chr((length >> 8) & 0xff) + chr(length & 0xff)
outer += '\xff\xff\x40\x00\x10\x11\x00\x00'
outer += chr((srloc >> 24) & 0xff)
outer += chr((srloc >> 16) & 0xff)
outer += chr((srloc >> 8) & 0xff)
outer += chr(srloc & 0xff)
outer += chr((drloc >> 24) & 0xff)
outer += chr((drloc >> 16) & 0xff)
outer += chr((drloc >> 8) & 0xff)
outer += chr(drloc & 0xff)
outer = lisp.lisp_ip_checksum(outer)
#
# Build UDP and LISP headers.
#
udplen = length - 20
udplisp = '\xff\x00' if (port == 4341) else '\x10\xf5'
udplisp += chr((port >> 8) & 0xff) + chr(port & 0xff)
udplisp += chr((udplen >> 8) & 0xff) + chr(udplen & 0xff) + '\x00\x00'
udplisp += '\x08\xdf\xdf\xdf'
udplisp += chr((iid >> 16) & 0xff)
udplisp += chr((iid >> 8) & 0xff)
udplisp += chr(iid & 0xff)
udplisp += '\x00'
#
# Append all outer headers.
#
packet = outer + udplisp + packet
lisp_fast_debug("Encap", packet)
else:
length = len(packet)
stats = mc.stats
lisp_fast_debug("Send", packet)
#endif
#
# Increment stats.
#
mc.last_refresh_time = time.time()
stats.increment(length)
#
# Send it.
#
dest = dest.print_address_no_iid()
lisp_raw_socket.sendto(packet, (dest, 0))
lisp_latency_debug(ts, "Fast")
return(True)
#endif
#
# lisp_rtr_data_plane
#
# Capture a LISP encapsulated packet, decap it, process inner header, and
# re-encapsulated it.
#
def lisp_rtr_data_plane(lisp_packet, thread_name):
global lisp_send_sockets, lisp_ephem_prot, lisp_data_packet
global lisp_raw_socket, lisp_raw_v6_socket
global lisp_trace_listen_socket
global lisp_rtr_source_rloc
global lisp_rtr_fast_mode
ts = lisp_latency_debug(None, "RTR")
#
# Try switching packet fast.
#
if (lisp_rtr_fast_mode):
if (lisp_rtr_fast_data_plane(lisp_packet.packet)): return
#endif
#
# Feature-rich forwarding path.
#
packet = lisp_packet
is_lisp_packet = packet.is_lisp_packet(packet.packet)
#
# Check RLOC-probe Map-Request. We need to grab the TTL from IP header.
#
if (is_lisp_packet == False):
orig_packet = packet.packet
pkt, source, port, ttl = lisp.lisp_is_rloc_probe(orig_packet, -1)
if (orig_packet != pkt):
if (source == None): return
lisp.lisp_parse_packet(lisp_send_sockets, pkt, source, port, ttl)
return
#endif
#endif
#
# First check if we are assembling IPv4 fragments.
#
packet.packet = lisp.lisp_reassemble(packet.packet)
if (packet.packet == None): return
#
# We need to cache the input encapsualted packet as well as the output
# encapsulated packet.
#
if (lisp.lisp_flow_logging): packet = copy.deepcopy(packet)
#
# If we are a PITR as well, we are receiving non encapsulated packets
# via return packets from doing LISP-NAT. Print some useful header fields
# and strip outer headers. Strip outer headers if LISP encapsulated packet
# and start inner header forwarding logic.
#
if (is_lisp_packet):
if (packet.decode(True, None, lisp.lisp_decap_stats) == None): return
packet.print_packet("Receive-({})".format(thread_name), True)
packet.strip_outer_headers()
else:
if (packet.decode(False, None, None) == None): return
packet.print_packet("Receive-({})".format(thread_name), False)
#endif
#
# If instance-id is 0xffffff, this is a Info-Request packet encapsulated
# to port 4341. We need to store the source port and source RLOC for
# NAT-traversal reasons.
#
# We don't need to send an Info-Reply from the 4341 data port. There is no
# information the xTR needs. It has the translated address from the
# map-server, and the NAT is ready for packets from port 4341 since we
# received this Info-Request.
#
if (is_lisp_packet and packet.lisp_header.get_instance_id() == 0xffffff):
header = lisp.lisp_control_header()
header.decode(packet.packet)
if (header.is_info_request()):
info = lisp.lisp_info()
info.decode(packet.packet)
info.print_info()
#
# Store/refresh NAT state and Fix map-cache entries if there was
# a change.
#
h = info.hostname if (info.hostname != None) else ""
s = packet.outer_source
p = packet.udp_sport
if (lisp.lisp_store_nat_info(h, s, p)):
lisp_fix_rloc_encap_state(lisp_send_sockets, h, s, p)
#endif
else:
source = packet.outer_source.print_address_no_iid()
ttl = packet.outer_ttl
packet = packet.packet
if (lisp.lisp_is_rloc_probe_request(packet[28]) == False and
lisp.lisp_is_rloc_probe_reply(packet[28]) == False): ttl = -1
packet = packet[28::]
lisp.lisp_parse_packet(lisp_send_sockets, packet, source, 0, ttl)
#endif
return
#endif
#
# Packets are arriving on pcap interface. Need to check if another data-
# plane is running. If so, don't deliver duplicates.
#
if (lisp.lisp_ipc_data_plane):
lisp.dprint("Drop packet, external data-plane active")
return
#endif
#
# Increment global stats.
#
if (is_lisp_packet):
lisp.lisp_decap_stats["good-packets"].increment(len(packet.packet))
#endif
#
# Process inner header (checksum and decrement ttl).
#
igmp = None
if (packet.inner_dest.is_mac()):
packet.packet = lisp.lisp_mac_input(packet.packet)
if (packet.packet == None): return
packet.encap_port = lisp.LISP_VXLAN_DATA_PORT
elif (packet.inner_version == 4):
igmp, packet.packet = lisp.lisp_ipv4_input(packet.packet)
if (packet.packet == None): return
packet.inner_ttl = packet.outer_ttl
elif (packet.inner_version == 6):
packet.packet = lisp.lisp_ipv6_input(packet)
if (packet.packet == None): return
packet.inner_ttl = packet.outer_ttl
else:
lisp.dprint("Cannot parse inner packet header")
return
#endif
#
# Process decap node trace function.
#
if (packet.is_trace()):
if (lisp.lisp_trace_append(packet, ed="decap") == False): return
packet.outer_source.afi = lisp.LISP_AFI_NONE
packet.outer_dest.afi = lisp.LISP_AFI_NONE
#endif
#
# Should we glean source information from packet and add it to the
# map-cache??
#
allow, x, y = lisp.lisp_allow_gleaning(packet.inner_source, None,
packet.outer_source)
if (allow):
igmp_packet = packet.packet if (igmp) else None
lisp.lisp_glean_map_cache(packet.inner_source, packet.outer_source,
packet.udp_sport, igmp_packet)
if (igmp): return
#endif
#
# Is the destination gleaned which means we should suppress a mapping
# system lookup.
#
deid = packet.inner_dest
if (deid.is_multicast_address()):
gleaned_dest, x, y = lisp.lisp_allow_gleaning(packet.inner_source,
deid, None)
else:
gleaned_dest, x, y = lisp.lisp_allow_gleaning(deid, None, None)
#endif
packet.gleaned_dest = gleaned_dest
#
# Do map-cache lookup. If no entry found, send Map-Request.
#
mc = lisp.lisp_map_cache_lookup(packet.inner_source, packet.inner_dest)
if (mc): mc.add_recent_source(packet.inner_source)
#
# Check if we are doing secondary-instance-ids only when we have a
# map-cache entry in the IID that is possibly a non-LISP site.
#
if (mc and (mc.action == lisp.LISP_NATIVE_FORWARD_ACTION or
mc.eid.address == 0)):
db = lisp.lisp_db_for_lookups.lookup_cache(packet.inner_source, False)
if (db and db.secondary_iid):
dest_eid = packet.inner_dest
dest_eid.instance_id = db.secondary_iid
mc = lisp.lisp_map_cache_lookup(packet.inner_source, dest_eid)
if (mc):
packet.gleaned_dest = mc.gleaned
mc.add_recent_source(packet.inner_source)
else:
gleaned_dest, x, y = lisp.lisp_allow_gleaning(dest_eid, None,
None)
packet.gleaned_dest = gleaned_dest
#endif
#endif
#endif
#
# Map-cache lookup miss. Do not send Map-Request to mapping system if
# dest-EID is configured to be gleaned. We want to give preference to
# the gleaned mapping and not the mapping in the mapping system.
#
if (mc == None and gleaned_dest):
lisp.lprint("Suppress Map-Request for gleaned EID {}".format( \
lisp.green(packet.inner_dest.print_address(), False)))
return
#endif
if (mc == None or mc.action == lisp.LISP_SEND_MAP_REQUEST_ACTION):
if (lisp.lisp_rate_limit_map_request(packet.inner_source,
packet.inner_dest)): return
lisp.lisp_send_map_request(lisp_send_sockets, lisp_ephem_port,
packet.inner_source, packet.inner_dest, None)
if (packet.is_trace()):
s = lisp_trace_listen_socket
r = "map-cache miss"
lisp.lisp_trace_append(packet, reason=r, lisp_socket=s)
#endif
return
#endif
#
# Send Map-Request to see if there is a RLOC change or to refresh an
# entry that is about to time out.
#
if (mc and mc.is_active() and mc.has_ttl_elapsed() and
mc.gleaned == False):
lisp.lprint("Refresh map-cache entry {}".format( \
lisp.green(mc.print_eid_tuple(), False)))
lisp.lisp_send_map_request(lisp_send_sockets, lisp_ephem_port,
packet.inner_source, packet.inner_dest, None)
#endif
#
# Update stats for entry. Stats per RLOC is done in lisp_mapping.select_
# rloc().
#
mc.stats.increment(len(packet.packet))
#
# Encapsulate or native forward packet.
#
dest_rloc, dest_port, nonce, action, rle, rloc_entry = \
mc.select_rloc(packet, None)
if (dest_rloc == None and rle == None):
if (action == lisp.LISP_NATIVE_FORWARD_ACTION):
lisp.dprint("Natively forwarding")
packet.send_packet(lisp_raw_socket, packet.inner_dest)
if (packet.is_trace()):
s = lisp_trace_listen_socket
r = "not an EID"
lisp.lisp_trace_append(packet, reason=r, lisp_socket=s)
#endif
lisp_latency_debug(ts, "RTR")
return
#endif
r = "No reachable RLOCs found"
lisp.dprint(r)
if (packet.is_trace()):
s = lisp_trace_listen_socket
lisp.lisp_trace_append(packet, reason=r, lisp_socket=s)
#endif
return
#endif
if (dest_rloc and dest_rloc.is_null()):
lisp.dprint("Drop action RLOC found")
if (packet.is_trace()):
s = lisp_trace_listen_socket
r = "drop action"
lisp.lisp_trace_append(packet, reason=r, lisp_socket=s)
#endif
return
#endif
#
# Setup outer header for either unicast or multicast transmission..
#
packet.outer_tos = packet.inner_tos
packet.outer_ttl = packet.inner_ttl
#
# Do unicast encapsulation.
#
if (dest_rloc):
packet.encap_port = dest_port
if (dest_port == 0): packet.encap_port = lisp.LISP_DATA_PORT
packet.outer_dest.copy_address(dest_rloc)
version = packet.outer_dest.afi_to_version()
packet.outer_version = version
source_rloc = lisp_rtr_source_rloc if (version == 4) else \
lisp.lisp_myrlocs[1]
packet.outer_source.copy_address(source_rloc)
if (packet.is_trace()):
s = lisp_trace_listen_socket
if (lisp.lisp_trace_append(packet, rloc_entry=rloc_entry,
lisp_socket=s) == False): return
#endif
#
# Encode new LISP, UDP, and outer header.
#
if (packet.encode(nonce) == None): return
if (len(packet.packet) <= 1500): packet.print_packet("Send", True)
#
# Send out on raw socket.
#
raw_socket = lisp_raw_v6_socket if version == 6 else lisp_raw_socket
packet.send_packet(raw_socket, packet.outer_dest)
elif (rle):
#
# Do replication of RLE is returned.
#
orig_len = len(packet.packet)
for node in rle.rle_forwarding_list:
packet.outer_dest.copy_address(node.address)
packet.encap_port = lisp.LISP_DATA_PORT if \
node.translated_port == 0 else node.translated_port
version = packet.outer_dest.afi_to_version()
packet.outer_version = version
source_rloc = lisp_rtr_source_rloc if (version == 4) else \
lisp.lisp_myrlocs[1]
packet.outer_source.copy_address(source_rloc)
if (packet.is_trace()):
s = lisp_trace_listen_socket
r = "replicate"
if (lisp.lisp_trace_append(packet, reason=r, lisp_socket=s) \
== False): return
#endif
if (packet.encode(None) == None): return
packet.print_packet("Replicate-to-L{}".format(node.level), True)
packet.send_packet(lisp_raw_socket, packet.outer_dest)
#
# We need to strip the encapsulation header so we can add a new
# one for the next replication.
#
strip_len = len(packet.packet) - orig_len
packet.packet = packet.packet[strip_len::]
if (lisp.lisp_flow_logging): packet = copy.deepcopy(packet)
#endfor
#endif
#
# Don't need packet structure anymore.
#
del(packet)
lisp_latency_debug(ts, "RTR")
return
#enddef
#
# lisp_rtr_worker_thread
#
# This function runs for each thread started.
#
def lisp_rtr_worker_thread(lisp_thread):
lisp.lisp_set_exception()
while (True):
#
# Dequeue packet from pcap's enqueue.
#
packet = lisp_thread.input_queue.get()
#
# Count input packets and bytes.
#
lisp_thread.input_stats.increment(len(packet))
#
# Use pre-defined packet data structure, store packet buffer in it.
#
lisp_thread.lisp_packet.packet = packet
#
# Decap and encap, go, go, go.
#
lisp_rtr_data_plane(lisp_thread.lisp_packet, lisp_thread.thread_name)
#endwhile
return
#enddef
#
# lisp_triage
#
# Decide which RTR thread should process packet. Do a modulus on the timestamp
# to randomly have a single thread process a received packet.
#
def lisp_triage(thread):
seed = (time.time() % thread.number_of_pcap_threads)
return(int(seed) == thread.thread_number)
#enddef
#
# lisp_rtr_pcap_process_packet
#
# Receive LISP encapsulated packet from pcap.loop(). IPC it to ourselves so
# main thread can get access to lisp.lisp_map_cache.
#
def lisp_rtr_pcap_process_packet(parms, not_used, packet):
if (lisp_triage(parms[1]) == False): return
device = parms[0]
lisp_thread = parms[1]
use_workers = lisp_thread.number_of_worker_threads
lisp_thread.input_stats.increment(len(packet))
#
# Jump over MAC header if packet received on interface. There is a 4-byte
# internal header in any case (loopback interfaces will have a 4 byte
# header)..
#
offset = 4 if device == "lo0" else (14 if lisp.lisp_is_macos() else 16)
packet = packet[offset::]
#
# If we are using worker threads, queue packet so they can process packet.
#
if (use_workers):
index = lisp_thread.input_stats.packet_count % use_workers
index = index + (len(lisp_threads) - use_workers)
thread = lisp_threads[index]
thread.input_queue.put(packet)
else:
lisp_thread.lisp_packet.packet = packet
lisp_rtr_data_plane(lisp_thread.lisp_packet, lisp_thread.thread_name)
#endif
return
#enddef
#
# lisp_rtr_pcap_thread
#
# Setup pcap filters for this thread to receive packets in lisps_rtr_pcap_
# process_packet().
#
def lisp_rtr_pcap_thread(lisp_thread):
lisp.lisp_set_exception()
if (lisp.lisp_myrlocs[0] == None): return
device = "lo0" if lisp.lisp_is_macos() else "any"
pcap = pcappy.open_live(device, 9000, 0, 100)
#
# If "lisp-nat = yes" is configured, then a PETR is co-located with this
# RTR functionality. We need to pcap *all* packets (0.0.0.0/0 and 0::/0).
#
lisp_nat = commands.getoutput("egrep 'lisp-nat = yes' ./lisp.config")
lisp_nat = (lisp_nat != "" and lisp_nat[0] == " ")
pfilter = "(dst host "
afilter = ""
for addr in lisp.lisp_get_all_addresses():
pfilter += "{} or ".format(addr)
afilter += "{} or ".format(addr)
#endif
pfilter = pfilter[0:-4]
pfilter += ") and ((udp dst port 4341 or 8472 or 4789) or "
pfilter += "(proto 17 and (ip[6]&0xe0 == 0x20 or " + \
"(ip[6]&0xe0 == 0 and ip[7] != 0))))"
#
# For RLOC-probe messages that come via pcap interface so we have the
# IP header to grab the TTL.
#
afilter = afilter[0:-4]
pfilter += (" or (not (src host {}) and " + \
"((udp src port 4342 and ip[28] == 0x28) or " + \
"(udp dst port 4342 and ip[28] == 0x12)))").format(afilter)
if (lisp_nat):
pfilter += (" or (dst net 0.0.0.0/0 and " + \
"not (host {} or src net 127.0.0.0/8))").format(afilter)
#endif
lisp.lprint("Capturing packets for: '{}'".format(pfilter))
pcap.filter = pfilter
#
# Enter receive loop.
#
pcap.loop(-1, lisp_rtr_pcap_process_packet, [device, lisp_thread])
return
#enddef
#
# lisp_encapsulate_igmp_query
#
# LISP encapsulate an IGMP query to the RLOC of the EID that has joined any
# group.
#
def lisp_encapsulate_igmp_query(lisp_raw_socket, eid, geid, igmp):
#
# Setup fields we need for lisp_packet.encode().
#
packet = lisp.lisp_packet(igmp)
#
# Get RLOC of EID from RLE record.
#
mc = lisp.lisp_map_cache_lookup(eid, geid)
if (mc == None): return
if (mc.rloc_set == []): return
if (mc.rloc_set[0].rle == None): return
eid_name = eid.print_address_no_iid()
for rle_node in mc.rloc_set[0].rle.rle_nodes:
if (rle_node.rloc_name == eid_name):
packet.outer_dest.copy_address(rle_node.address)
packet.encap_port = rle_node.translated_port
break
#endif
#endfor
if (packet.outer_dest.is_null()): return
packet.outer_source.copy_address(lisp.lisp_myrlocs[0])
packet.outer_version = packet.outer_dest.afi_to_version()
packet.outer_ttl = 32
packet.inner_source.copy_address(lisp.lisp_myrlocs[0])
packet.inner_dest.store_address("[{}]224.0.0.1".format(geid.instance_id))
packet.inner_ttl = 1
e = lisp.green(eid.print_address(), False)
r = lisp.red("{}:{}".format(packet.outer_dest.print_address_no_iid(),
packet.encap_port), False)
q = lisp.bold("IGMP Query", False)
lisp.lprint("Data encapsulate {} to gleaned EID {}, RLOC {}".format( \
q, e, r))
#
# Build data encapsulation header.
#
if (packet.encode(None) == None): return
packet.print_packet("Send", True)
packet.send_packet(lisp_raw_socket, packet.outer_dest)
#enddef
#
# lisp_send_igmp_queries
#
# Send General Query to each EID that has joiined groups. The Group Address
# field below is set to 0.0.0.0 and the Number of Sources is set to 0.
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 0x11 | Max Resp Code | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Group Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Resv |S| QRV | QQIC | Number of Sources (N) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source Address [1] |
# +- -+
# . . .
# . . .
# +- -+
# | Source Address [N] |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lisp_send_igmp_queries(lisp_raw_socket):
if (lisp.lisp_gleaned_groups == {}): return
#
# Build an IP header and checksum it. Put Router-Alert option after
# destination address.
#
ip = "\x46\xc0\x00\x24\x00\x00\x40\x00\x01\x02\x00\x00"
myrloc = lisp.lisp_myrlocs[0]
rloc = myrloc.address
ip += chr((rloc >> 24) & 0xff)
ip += chr((rloc >> 16) & 0xff)
ip += chr((rloc >> 8) & 0xff)
ip += chr(rloc & 0xff)
ip += "\xe0\x00\x00\x01"
ip += "\x94\x04\x00\x00"
ip = lisp.lisp_ip_checksum(ip, 24)
#
# Build an IGMP query and checksum it. The mrc is 100 (10 secs), qrv is 2,
# and qqic is 60. Just like cisco would send.
#
igmp = "\x11\x64\x00\x00" + "\x00\x00\x00\x00" + "\x02\x3c\x00\x00"
igmp = lisp.lisp_igmp_checksum(igmp)
#
# Send to EIDs that have joined group and that we have configured to send
# queries to.
#
seid = lisp.lisp_address(lisp.LISP_AFI_IPV4, "", 32, 0)
geid = lisp.lisp_address(lisp.LISP_AFI_IPV4, "", 32, 0)
for eid in lisp.lisp_gleaned_groups:
seid.store_address(eid)
for group in lisp.lisp_gleaned_groups[eid]:
geid.store_address(group)
x, y, query = lisp.lisp_allow_gleaning(seid, geid, None)
if (query == False): continue
lisp_encapsulate_igmp_query(lisp_raw_socket, seid, geid, ip + igmp)
#endfor
#endfor
#enddef
#
# lisp_timeout_gleaned_groups
#
# Go through the lisp_gleaned_groups{} array to see if any timers are older
# than 3 minutes.
#
def lisp_timeout_gleaned_groups():
seid = lisp.lisp_address(lisp.LISP_AFI_IPV4, "", 32, 0)
geid = lisp.lisp_address(lisp.LISP_AFI_IPV4, "", 32, 0)
delete_list = []
for eid in lisp.lisp_gleaned_groups:
for group in lisp.lisp_gleaned_groups[eid]:
last_refresh = lisp.lisp_gleaned_groups[eid][group]
elapsed = time.time() - last_refresh
if (elapsed < lisp.LISP_IGMP_TIMEOUT_INTERVAL): continue
delete_list.append([eid, group])
#endfor
#endfor
#
# Remove from rloc-set and lisp_gleaned_groups (since we are not
# traversing it anymore.
#
to_str = lisp.bold("timed out", False)
for eid, group in delete_list:
seid.store_address(eid)
geid.store_address(group)
e = lisp.green(eid, False)
g = lisp.green(group, False)
lisp.lprint("{} RLE {} for gleaned group {}".format(e, to_str, g))
lisp.lisp_remove_gleaned_multicast(seid, geid)
#endfor
#enddef
#
# lisp_rtr_process_timer
#
# Call general timeout routine to process the RTR map-cache.
#
def lisp_rtr_process_timer(lisp_raw_socket):
lisp.lisp_set_exception()
#
# Remove nonce entries from crypto-list.
#
for keys in lisp.lisp_crypto_keys_by_nonce.values():
for key in keys: del(key)
#endfor
lisp.lisp_crypto_keys_by_nonce.clear()
lisp.lisp_crypto_keys_by_nonce = {}
#
# Walk map-cache.
#
lisp.lisp_timeout_map_cache(lisp.lisp_map_cache)
#
# Clear the LISP-Trace cache so we can optimize memory usage. There is only
# a one-time use for the cahced entries.
#
lisp.lisp_rtr_nat_trace_cache.clear()
lisp.lisp_rtr_nat_trace_cache = {}
#
# Process gleaned groups refresh timer. If IGMP reports have not been
# received, remove RLE from (*,G) and (S,G) map-cache entries.
#
lisp_timeout_gleaned_groups()
#
# Send IGMP queries to gleaned EIDs that have joined groups.
#
lisp_send_igmp_queries(lisp_raw_socket)
#
# Restart periodic timer.
#
lisp_periodic_timer = threading.Timer(60, lisp_rtr_process_timer,
[lisp_raw_socket])
lisp_periodic_timer.start()
return
#enddef
#
# lisp_rtr_startup
#
# Intialize this LISP RTR process. This function returns no values.
#
def lisp_rtr_startup():
global lisp_ipc_listen_socket, lisp_send_sockets, lisp_ephem_listen_socket
global lisp_raw_socket, lisp_raw_v6_socket, lisp_threads
global lisp_ipc_punt_socket, lisp_trace_listen_socket
global lisp_rtr_source_rloc
lisp.lisp_i_am("rtr")
lisp.lisp_set_exception()
lisp.lisp_print_banner("RTR starting up")
#
# Get local address for source RLOC for encapsulation.
#
if (lisp.lisp_get_local_addresses() == False): return(False)
#
# Get interface address for RTR source RLOC if env variable defined. It
# should be the private translatable address from a AWS resident NAT.
#
lisp_rtr_source_rloc = lisp.lisp_myrlocs[0]
if (lisp.lisp_on_aws()):
lisp_rtr_source_rloc = lisp.lisp_get_interface_address("eth0")
#endif
#
# Open network send socket and internal listen socket. For an RTR, that
# may be behind a NAT, all Map-Requests are sent with the ephemeral port
# so the Map-Request port and the ECM port will be the same.
#
address = "0.0.0.0" if lisp.lisp_is_raspbian() else "0::0"
lisp_ephem_listen_socket = lisp.lisp_open_listen_socket(address,
str(lisp_ephem_port))
lisp_ipc_listen_socket = lisp.lisp_open_listen_socket("", "lisp-rtr")
lisp_ipc_punt_socket = lisp.lisp_open_listen_socket("", "lispers.net-itr")
lisp_send_sockets[0] = lisp_ephem_listen_socket
# lisp_send_sockets[0] = lisp.lisp_open_send_socket("", lisp.LISP_AFI_IPV4)
lisp_send_sockets[1] = lisp.lisp_open_send_socket("", lisp.LISP_AFI_IPV6)
lisp_send_sockets[2] = lisp_ipc_listen_socket
#
# Open up raw socket so we can send with IP headers after decapsulation.
# There is a special case where the RTR's lisp_send_sockets array is of
# size 4 since we need to pass the raw socket through the lisp.py module
# to send a data encapsulated RLOC-probe to an ETR that sits behind a NAT.
# The test is in lisp_send_map_request() for this. This is the case in
# ETRs as well. All other components use an array size of 3 modulo.
#
lisp_raw_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW,
socket.IPPROTO_RAW)
lisp_raw_socket.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
lisp_send_sockets.append(lisp_raw_socket)
#
# Open up a listen socket on the LISP-Trace port so the RTR can cache
# translated RLOC information from an ltr client program.
#
lisp_trace_listen_socket = lisp.lisp_open_listen_socket("0.0.0.0",
str(lisp.LISP_TRACE_PORT))
if (lisp.lisp_is_raspbian() == False):
lisp_raw_v6_socket = socket.socket(socket.AF_INET6, socket.SOCK_RAW,
socket.IPPROTO_UDP)
#endif
pcap_threads = os.getenv("LISP_PCAP_THREADS")
pcap_threads = 1 if (pcap_threads == None) else int(pcap_threads)
worker_threads = os.getenv("LISP_WORKER_THREADS")
worker_threads = 0 if (worker_threads == None) else int(worker_threads)
#
# Setup packet capture.
#
for i in range(pcap_threads):
t = lisp.lisp_thread("pcap-{}".format(i))
t.thread_number = i
t.number_of_pcap_threads = pcap_threads
t.number_of_worker_threads = worker_threads
lisp_threads.append(t)
threading.Thread(target=lisp_rtr_pcap_thread, args=[t]).start()
#endif
#
# Start worker threads. If you want to change the number of them, only
# this constant needs changing.
#
for i in range(worker_threads):
t = lisp.lisp_thread("worker-{}".format(i))
lisp_threads.append(t)
threading.Thread(target=lisp_rtr_worker_thread, args=[t]).start()
#endfor
#
# Load map-cache from checkpoint file before we start writing to it.
#
lisp.lisp_load_checkpoint()
#
# Should we load-split pings?
#
lisp.lisp_load_split_pings = (os.getenv("LISP_LOAD_SPLIT_PINGS") != None)
#
# Start map-cache timeout timer.
#
lisp_periodic_timer = threading.Timer(60, lisp_rtr_process_timer,
[lisp_raw_socket])
lisp_periodic_timer.start()
return(True)
#enddef
#
# lisp_rtr_shutdown
#
# Shut down this process.
#
def lisp_rtr_shutdown():
#
# Close sockets.
#
lisp.lisp_close_socket(lisp_send_sockets[0], "")
lisp.lisp_close_socket(lisp_send_sockets[1], "")
lisp.lisp_close_socket(lisp_ipc_listen_socket, "lisp-rtr")
lisp.lisp_close_socket(lisp_ephem_listen_socket, "")
lisp.lisp_close_socket(lisp_trace_listen_socket, "")
lisp.lisp_close_socket(lisp_ipc_punt_socket, "lispers.net-itr")
lisp_raw_socket.close()
return
#enddef
#
# lisp_rtr_map_resolver_command
#
# Call lispconfig.lisp_map_resolver_command and set "test-mr" timer.
#
def lisp_rtr_map_resolver_command(kv_pair):
global lisp_send_sockets
global lisp_ephem_port
lispconfig.lisp_map_resolver_command(kv_pair)
if (lisp.lisp_test_mr_timer == None or
lisp.lisp_test_mr_timer.is_alive() == False):
lisp.lisp_test_mr_timer = threading.Timer(2, lisp.lisp_test_mr,
[lisp_send_sockets, lisp_ephem_port])
lisp.lisp_test_mr_timer.start()
#endif
return
#enddef
#
# lisp_rtr_xtr_command
#
# Call lispconfig.lisp_xtr_command() but pass socket parameters to starting
# the RLOC-probing timer if "rloc-probing = yes".
#
def lisp_rtr_xtr_command(kv_pair):
global lisp_ephem_listen_socket, lisp_raw_socket, lisp_ephem_port
rloc_probing = lisp.lisp_rloc_probing
#
# Execute command.
#
lispconfig.lisp_xtr_command(kv_pair)
#
# Trigger if "rloc-probing = yes" just happened and it was previously
# set to "no".
#
if (rloc_probing == False and lisp.lisp_rloc_probing):
lisp_sockets = [lisp_ephem_listen_socket, lisp_ephem_listen_socket,
None, lisp_raw_socket]
lisp.lisp_start_rloc_probe_timer(1, lisp_sockets)
entry = { "type" : "itr-crypto-port", "port" : lisp_ephem_port }
lisp.lisp_write_to_dp_socket(entry)
#endif
#
# Write to external data-plane if enabled.
#
lisp.lisp_ipc_write_xtr_parameters(lisp.lisp_debug_logging,
lisp.lisp_data_plane_logging)
return
#enddef
#
# RTR commands processed by this process.
#
lisp_rtr_commands = {
"lisp xtr-parameters" : [lisp_rtr_xtr_command, {
"rloc-probing" : [True, "yes", "no"],
"nonce-echoing" : [True, "yes", "no"],
"data-plane-security" : [True, "yes", "no"],
"data-plane-logging" : [True, "yes", "no"],
"frame-logging" : [True, "yes", "no"],
"flow-logging" : [True, "yes", "no"],
"nat-traversal" : [True, "yes", "no"],
"checkpoint-map-cache" : [True, "yes", "no"],
"ipc-data-plane" : [True, "yes", "no"],
"decentralized-push-xtr" : [True, "yes", "no"],
"decentralized-pull-xtr-modulus" : [True, 1, 0xff],
"decentralized-pull-xtr-dns-suffix" : [True],
"register-reachable-rtrs" : [True, "yes", "no"],
"program-hardware" : [True, "yes", "no"] }],
"lisp interface" : [lispconfig.lisp_interface_command, {
"interface-name" : [True],
"device" : [True],
"instance-id" : [True, 0, 0xffffffff],
"dynamic-eid" : [True],
"dynamic-eid-device" : [True],
"lisp-nat" : [True, "yes", "no"],
"dynamic-eid-timeout" : [True, 0, 0xff] }],
"lisp map-resolver" : [lisp_rtr_map_resolver_command, {
"mr-name" : [True],
"ms-name" : [True],
"dns-name" : [True],
"address" : [True] }],
"lisp map-cache" : [lispconfig.lisp_map_cache_command, {
"prefix" : [],
"mr-name" : [True],
"ms-name" : [True],
"instance-id" : [True, 0, 0xffffffff],
"eid-prefix" : [True],
"group-prefix" : [True],
"send-map-request" : [True, "yes", "no"],
"rloc" : [],
"rloc-record-name" : [True],
"rle-name" : [True],
"elp-name" : [True],
"address" : [True],
"priority" : [True, 0, 255],
"weight" : [True, 0, 100] }],
"lisp rtr-map-cache" : [lispconfig.lisp_map_cache_command, {
"prefix" : [],
"instance-id" : [True, 0, 0xffffffff],
"eid-prefix" : [True],
"group-prefix" : [True],
"rloc" : [],
"rloc-record-name" : [True],
"rle-name" : [True],
"elp-name" : [True],
"address" : [True],
"priority" : [True, 0, 255],
"weight" : [True, 0, 100] }],
"lisp explicit-locator-path" : [lispconfig.lisp_elp_command, {
"elp-name" : [False],
"elp-node" : [],
"address" : [True],
"probe" : [True, "yes", "no"],
"strict" : [True, "yes", "no"],
"eid" : [True, "yes", "no"] }],
"lisp replication-list-entry" : [lispconfig.lisp_rle_command, {
"rle-name" : [False],
"rle-node" : [],
"address" : [True],
"level" : [True, 0, 255] }],
"lisp json" : [lispconfig.lisp_json_command, {
"json-name" : [False],
"json-string" : [False] }],
"lisp database-mapping" : [lisp_rtr_database_mapping_command, {
"prefix" : [],
"mr-name" : [True],
"ms-name" : [True],
"instance-id" : [True, 0, 0xffffffff],
"secondary-instance-id" : [True, 0, 0xffffffff],
"eid-prefix" : [True],
"group-prefix" : [True],
"dynamic-eid" : [True, "yes", "no"],
"signature-eid" : [True, "yes", "no"],
"rloc" : [],
"rloc-record-name" : [True],
"elp-name" : [True],
"geo-name" : [True],
"rle-name" : [True],
"json-name" : [True],
"address" : [True],
"interface" : [True],
"priority" : [True, 0, 255],
"weight" : [True, 0, 100] }],
"lisp glean-mapping" : [lisp_rtr_glean_mapping_command, {
"instance-id" : [False],
"eid-prefix" : [True],
"group-prefix" : [True],
"rloc-prefix" : [True],
"rloc-probe" : [True, "yes", "no"],
"igmp-query" : [True, "yes", "no"] }],
"show rtr-rloc-probing" : [lisp_rtr_show_rloc_probe_command, { }],
"show rtr-keys" : [lisp_rtr_show_keys_command, {}],
"show rtr-map-cache" : [lisp_rtr_show_command, {}],
"show rtr-map-cache-dns" : [lisp_rtr_show_command_dns, {}]
}
#
# lisp_rtr_process_trace_packet
#
# Process RLOC-based LISP-Trace message.
#
def lisp_rtr_process_trace_packet(lisp_socket):
#
# Read from listen socket for port 2434 and parse LISP-Trace packet.
#
opcode, source, port, packet = lisp.lisp_receive(lisp_socket, False)
trace = lisp.lisp_trace()
if (trace.decode(packet) == False): return
#
# Cache the translated information. Will use local addressing info to
# find translated information in lisp_trace_append().
#
trace.rtr_cache_nat_trace(source, port)
#enddef
#------------------------------------------------------------------------------
#
# Main entry point for process.
#
if (lisp_rtr_startup() == False):
lisp.lprint("lisp_rtr_startup() failed")
lisp.lisp_print_banner("RTR abnormal exit")
exit(1)
#endif
socket_list = [lisp_ephem_listen_socket, lisp_ipc_listen_socket,
lisp_ipc_punt_socket, lisp_trace_listen_socket]
ephem_sockets = [lisp_ephem_listen_socket] * 3
while (True):
try: ready_list, w, x = select.select(socket_list, [], [])
except: break
#
# Process Punt signal message from another data-plane (snabb).
#
if (lisp.lisp_ipc_data_plane and lisp_ipc_punt_socket in ready_list):
lisp.lisp_process_punt(lisp_ipc_punt_socket, lisp_send_sockets,
lisp_ephem_port)
#endif
#
# LISP-TRACE messages coming from an ltr client program.
#
if (lisp_trace_listen_socket in ready_list):
lisp_rtr_process_trace_packet(lisp_trace_listen_socket)
#endif
#
# Process Map-Reply messages received on ephemeral port.
#
if (lisp_ephem_listen_socket in ready_list):
opcode, source, port, packet = lisp.lisp_receive(ephem_sockets[0],
False)
if (source == ""): break
if (lisp.lisp_is_rloc_probe_request(packet[0])):
lisp.lprint("RTR ignoring RLOC-probe request, using pcap")
continue
#endif
if (lisp.lisp_is_rloc_probe_reply(packet[0])):
lisp.lprint("RTR ignoring RLOC-probe reply, using pcap")
continue
#endif
lisp.lisp_parse_packet(ephem_sockets, packet, source, port)
#endif
#
# Process either commands, an IPC data-packet (for testing), or any
# protocol message on the IPC listen socket..
#
if (lisp_ipc_listen_socket in ready_list):
opcode, source, port, packet = \
lisp.lisp_receive(lisp_ipc_listen_socket, True)
if (source == ""): break
if (opcode == "command"):
if (packet == "clear"):
lisp.lisp_clear_map_cache()
continue
#endif
if (packet.find("clear%") != -1):
lispconfig.lisp_clear_decap_stats(packet)
continue
#endif
lispconfig.lisp_process_command(lisp_ipc_listen_socket, opcode,
packet, "lisp-rtr", [lisp_rtr_commands])
elif (opcode == "api"):
lisp.lisp_process_api("lisp-rtr", lisp_ipc_listen_socket, packet)
elif (opcode == "data-packet"):
lisp_rtr_data_plane(packet, "")
else:
if (lisp.lisp_is_rloc_probe_request(packet[0])):
lisp.lprint("RTR ignoring RLOC-probe request, using pcap")
continue
#endif
if (lisp.lisp_is_rloc_probe_reply(packet[0])):
lisp.lprint("RTR ignoring RLOC-probe reply, using pcap")
continue
#endif
lisp.lisp_parse_packet(lisp_send_sockets, packet, source, port)
#endif
#endif
#endwhile
lisp_rtr_shutdown()
lisp.lisp_print_banner("RTR normal exit")
exit(0)
#------------------------------------------------------------------------------
|
util.py
|
#! /usr/bin/env jython
# Copyright (C) 2011 Sun Ning<classicning@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import sys
import time
import queue
import threading
import logging
JIP_USER_AGENT = 'jip-koalanlp/1.0'
BUF_SIZE = 4096
# Logging setup
logging.basicConfig(level=logging.INFO, format="[%(name)s] %(message)s")
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("py4j").setLevel(logging.WARNING)
logger = logging.getLogger('koalanlp.jip')
class DownloadException(Exception):
pass
def download(url, target, asynchronous=False, close_target=False, quiet=True):
import requests
# download file to target (target is a file-like object)
if asynchronous:
pool.submit(url, target)
else:
try:
t0 = time.time()
source = requests.get(url, headers={'User-Agent': JIP_USER_AGENT})
source.raise_for_status()
size = source.headers['Content-Length']
if not quiet:
logger.info('[Downloading] %s %s bytes to download' % (url, size))
for buf in source.iter_content(BUF_SIZE):
target.write(buf)
source.close()
if close_target:
target.close()
t1 = time.time()
if not quiet:
logger.info('[Downloading] Download %s completed in %f secs' % (url, (t1 - t0)))
except requests.exceptions.RequestException:
_, e, _ = sys.exc_info()
raise DownloadException(url, e)
def download_string(url):
import requests
try:
response = requests.get(url, headers={'User-Agent': JIP_USER_AGENT})
response.raise_for_status()
return response.text
except requests.exceptions.RequestException:
_, e, _ = sys.exc_info()
raise DownloadException(url, e)
def wait_until_download_finished():
pool.join()
class DownloadThreadPool(object):
def __init__(self, size=3):
self.queue = queue.Queue()
self.workers = [threading.Thread(target=self._do_work) for _ in range(size)]
self.initialized = False
def init_threads(self):
for worker in self.workers:
worker.setDaemon(True)
worker.start()
self.initialized = True
def _do_work(self):
while True:
url, target = self.queue.get()
download(url, target, close_target=True, quiet=False)
self.queue.task_done()
def join(self):
self.queue.join()
def submit(self, url, target):
if not self.initialized:
self.init_threads()
self.queue.put((url, target))
pool = DownloadThreadPool(3)
__all__ = ['DownloadException', 'download', 'download_string', 'wait_until_download_finished', 'logger']
|
quantize_pvalite-CLN.py
|
#!/usr/bin/env python
# --------------------------------------------------------
# Quantize Fast R-CNN based Network
# Written by Chia-Chi Tsai
# --------------------------------------------------------
"""Quantize a Fast R-CNN network on an image database."""
import os
os.environ['GLOG_minloglevel'] = '2'
import _init_paths
from fast_rcnn.test import test_net, test_net_silent, im_detect
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
from datasets.factory import get_imdb
import caffe
import argparse
import pprint
import time, os, sys
import numpy as np
from caffe.proto import caffe_pb2
import google.protobuf.text_format as txtf
import math
import cv2
from utils.timer import Timer
import multiprocessing
import json
import shutil
import warnings
warnings.filterwarnings("ignore")
from utils.timer import Timer
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Quantize a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use',
default=0, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--def_quant', dest='prototxt_quantized',
help='quantized prototxt file defining the network',
default=None, type=str)
parser.add_argument('--def_quant_BAC', dest='prototxt_quantized_BAC',
help='quantized prototxt file defining the network',
default=None, type=str)
parser.add_argument('--act_analysis', dest='act_analysis',
help='input and output analysis file',
default=None, type=str)
parser.add_argument('--accumulator_analysis', dest='accumulator_analysis',
help='adder and multiplier analysis file',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to test',
default='voc_2007_test', type=str)
parser.add_argument('--comp', dest='comp_mode', help='competition mode',
action='store_true')
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--vis', dest='vis', help='visualize detections',
action='store_true')
parser.add_argument('--num_dets', dest='max_per_image',
help='max number of detections per image',
default=100, type=int)
parser.add_argument('--error_margin', dest='error_margin',
help='tolerance error of quantized network',
default=0.1, type=float)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def analyze_network(net_proto):
has_fc = False
has_deconv = False
has_conv = False
for l in net_proto.layer:
if l.type == 'Convolution':
has_conv = True
elif l.type == 'Deconvolution':
has_deconv = True
elif l.type =='InnerProduct':
has_fc = True
return has_conv, has_deconv, has_fc
# convert network to quantized network with 32 bit width
def convert_net_to_qnet(ori_net_path, q_net_path):
net_proto = read_from_prototxt(ori_net_path)
new_blob_name = {}
for l in net_proto.layer:
for i in range(len(l.top)):
for j in range(len(l.bottom)):
if l.top[i] == l.bottom[j]:
if not l.top[i] in new_blob_name.keys():
new_blob_name[l.top[i]]=l.top[i]+'/t'
else:
l.bottom[j] = new_blob_name[l.bottom[j]]
new_blob_name[l.top[i]]=new_blob_name[l.top[i]]+'/t'
l.top[i] = new_blob_name[l.top[i]]
else:
for k in range(len(l.bottom)):
if l.bottom[k] in new_blob_name.keys():
l.bottom[k] = new_blob_name[l.bottom[k]]
if l.type == 'Convolution':
l.type = 'ConvolutionIVS'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_layer_in = 32
l.quantization_param.bw_layer_out = 32
l.quantization_param.bw_params = 32
l.quantization_param.fl_layer_in = 16
l.quantization_param.fl_layer_out= 16
l.quantization_param.fl_params = 16
l.quantization_param.rounding_time = 0
elif l.type =='InnerProduct':
l.type = 'FcIVS'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_layer_in = 32
l.quantization_param.bw_layer_out = 32
l.quantization_param.bw_params = 32
l.quantization_param.fl_layer_in = 16
l.quantization_param.fl_layer_out= 16
l.quantization_param.fl_params = 16
l.quantization_param.rounding_time = 0
elif l.type =='Deconvolution':
l.type = 'DeconvolutionRistretto'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_layer_in = 32
l.quantization_param.bw_layer_out = 32
l.quantization_param.bw_params = 32
l.quantization_param.fl_layer_in = 16
l.quantization_param.fl_layer_out= 16
l.quantization_param.fl_params = 16
l.quantization_param.rounding_time = 0
write_to_prototxt(net_proto, q_net_path)
# convert network to quantized network with 32 bit width
def convert_net_to_qnet_BAC_analysis(ori_net_path, q_net_path):
net_proto = read_from_prototxt(ori_net_path)
new_blob_name = {}
for l in net_proto.layer:
for i in range(len(l.top)):
for j in range(len(l.bottom)):
if l.top[i] == l.bottom[j]:
if not l.top[i] in new_blob_name.keys():
new_blob_name[l.top[i]]=l.top[i]+'/t'
else:
l.bottom[j] = new_blob_name[l.bottom[j]]
new_blob_name[l.top[i]]=new_blob_name[l.top[i]]+'/t'
l.top[i] = new_blob_name[l.top[i]]
else:
for k in range(len(l.bottom)):
if l.bottom[k] in new_blob_name.keys():
l.bottom[k] = new_blob_name[l.bottom[k]]
if l.type == 'Convolution' or l.type == 'ConvolutionIVS':
l.type = 'ConvolutionIVS'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_add = 32
l.quantization_param.bw_multiply = 32
l.quantization_param.fl_add = 16
l.quantization_param.fl_multiply = 16
l.quantization_param.rounding_time = 1
l.quantization_param.analyze_mode = 3
if l.type == 'InnerProduct' or l.type == 'FcIVS':
l.type = 'FcIVS'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_add = 32
l.quantization_param.bw_multiply = 32
l.quantization_param.fl_add = 16
l.quantization_param.fl_multiply = 16
l.quantization_param.rounding_time = 1
l.quantization_param.analyze_mode = 3
write_to_prototxt(net_proto, q_net_path)
def convert_net_to_qnet_BAC(ori_net_path, q_net_path):
net_proto = read_from_prototxt(ori_net_path)
new_blob_name = {}
for l in net_proto.layer:
for i in range(len(l.top)):
for j in range(len(l.bottom)):
if l.top[i] == l.bottom[j]:
if not l.top[i] in new_blob_name.keys():
new_blob_name[l.top[i]]=l.top[i]+'/t'
else:
l.bottom[j] = new_blob_name[l.bottom[j]]
new_blob_name[l.top[i]]=new_blob_name[l.top[i]]+'/t'
l.top[i] = new_blob_name[l.top[i]]
else:
for k in range(len(l.bottom)):
if l.bottom[k] in new_blob_name.keys():
l.bottom[k] = new_blob_name[l.bottom[k]]
if l.type == 'Convolution' or l.type == 'ConvolutionIVS':
l.type = 'ConvolutionIVS'
l.quantization_param.analyze_mode = 0
l.quantization_param.rounding_time = 1
if l.type == 'InnerProduct' or l.type == 'FcIVS':
l.type = 'FcIVS'
l.quantization_param.analyze_mode = 0
l.quantization_param.rounding_time = 1
write_to_prototxt(net_proto, q_net_path)
#change single layer bit width
def change_layer_bw(net_proto, layer_name,
bw_layer_in, fl_layer_in,
bw_layer_out, fl_layer_out,
bw_params, fl_params,
bw_add, fl_add,
bw_multiply, fl_multiply):
for l in net_proto.layer:
if l.name == layer_name:
l.quantization_param.precision = 0
l.quantization_param.bw_layer_in = int(bw_layer_in)
l.quantization_param.bw_layer_out = int(bw_layer_out)
l.quantization_param.bw_params = int(bw_params)
l.quantization_param.bw_add = int(bw_add)
l.quantization_param.bw_multiply = int(bw_multiply)
l.quantization_param.fl_layer_in = int(fl_layer_in)
l.quantization_param.fl_layer_out= int(fl_layer_out)
l.quantization_param.fl_params = int(fl_params)
l.quantization_param.fl_add = int(fl_add)
l.quantization_param.fl_multiply = int(fl_multiply)
return net_proto
def change_layer_BAC_bw(net_proto, lVayer_name,
bw_add, fl_add,
bw_multiply, fl_multiply):
for l in net_proto.layer:
if l.name == layer_name:
l.quantization_param.bw_add = bw_add
l.quantization_param.fl_add = fl_add
l.quantization_param.bw_multiply = bw_multiply
l.quantization_param.fl_multiply = fw_multiply
return net_proto
def change_layer_bottom_name(net_proto, layer_name,
layer_bottom_name):
for l in net_proto.layer:
if l.name == layer_name:
l.bottom = layer_bottom_name
return net_proto
def change_layer_top_name(net_proto, layer_name,
layer_top_name):
for l in net_proto.layer:
if l.name == layer_name:
l.top = layer_top_name
return net_proto
#calculate needed Integer Length of layer parameters
def calc_layer_param_IL(net,layer):
layer_param = net.params[layer.name]
max_weight = max(layer_param[0].data[...].max(), layer_param[0].data[...].min(), key=abs)
if layer.convolution_param.bias_term:
max_bias = max(layer_param[1].data[...].max(), layer_param[1].data[...].min(), key=abs)
else:
max_bias = 0
max_param = max(max_weight, max_bias, key=abs)
return math.ceil(math.log(abs(max_param), 2)) + 1
def analyze_net_param_IL(net, net_proto):
net_param_IL = dict()
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' \
or layer.type == 'DeconvolutionRistretto':
net_param_IL[layer.name] = calc_layer_param_IL(net, layer)
return net_param_IL
#calculate needed Integer Length of layer output
def calc_layer_inout_IL(net, layer_bottom_name):
layer_output = net.blobs[layer_bottom_name].data
layer_output_max = abs(max(layer_output.max(), layer_output.min(), key=abs))
#if layer_bottom_name == 'data':
# print net.blobs[layer_bottom_name].data
# print math.ceil(math.log(layer_output_max, 2)) + 1
return math.ceil(math.log(layer_output_max, 2)) + 1
def analyze_net_output_IL(net, net_proto, imdb, max_per_image=100, thresh=0.05, vis=False):
num_images = len(imdb.image_index)
_t = {'im_preproc': Timer(), 'im_net' : Timer(), 'im_postproc': Timer(), 'misc' : Timer()}
if not cfg.TEST.HAS_RPN:
roidb = imdb.roidb
net_output_IL = dict()
net_input_IL = dict()
for layer in net_proto.layer:
assert layer.top[0] != layer.bottom[0],"bottom name cannot be the same as top name in the same layer, at layer:{} top:{} bottom:{}".format(layer.name,layer.top[0],layer.bottom[0])
#if layer.top[0] == layer.bottom[0]:
# print layer.name, layer.type
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' \
or layer.type == 'DeconvolutionRistretto':
net_output_IL[layer.name] = -sys.maxint - 1
net_input_IL[layer.name] = -sys.maxint - 1
for i in xrange(num_images):
if cfg.TEST.HAS_RPN:
box_proposals = None
else:
box_proposals = roidb[i]['boxes'][roidb[i]['gt_classes'] == 0]
im = cv2.imread(imdb.image_path_at(i))
scores, boxes = im_detect(net, im, _t, box_proposals)
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' \
or layer.type == 'DeconvolutionRistretto':
net_output_IL[layer.name] = max(calc_layer_inout_IL(net, layer.top[0]), net_output_IL[layer.name])
net_input_IL[layer.name] = max(calc_layer_inout_IL(net, layer.bottom[0]), net_input_IL[layer.name])
#print layer.type, layer.name, net_output_IL[layer.name],net_input_IL[layer.name]
return net_output_IL, net_input_IL
#calculate needed Integer Length of layer adder
def calc_layer_adder_IL(net, layer_top_name):
layer_adder_max = abs(max(
net.blobs[layer_top_name].data.reshape(net.blobs[layer_top_name].data.size)[0],
net.blobs[layer_top_name].data.reshape(net.blobs[layer_top_name].data.size)[1],
key=abs))
return math.ceil(math.log(layer_adder_max, 2)) + 1
#calculate needed Integer Length of layer multiplier
def calc_layer_multiplier_IL(net, layer_top_name):
layer_multiplier_max = abs(max(
net.blobs[layer_top_name].data.reshape(net.blobs[layer_top_name].data.size)[2],
net.blobs[layer_top_name].data.reshape(net.blobs[layer_top_name].data.size)[3],
key=abs))
return math.ceil(math.log(layer_multiplier_max, 2)) + 1
#analyze adder and multiplier of each layer in network
def analyze_net_adder_multiplier_IL(net, net_proto, imdb, max_per_image=100, thresh=0.05, vis=False):
num_images = len(imdb.image_index)
_t = {'im_preproc': Timer(), 'im_net' : Timer(), 'im_postproc': Timer(), 'misc' : Timer()}
if not cfg.TEST.HAS_RPN:
roidb = imdb.roidb
net_adder_IL = dict()
net_multiplier_IL = dict()
for layer in net_proto.layer:
assert layer.top[0] != layer.bottom[0],"bottom name cannot be the same as top name in the same layer, at layer:{} top:{} bottom:{}".format(layer.name,layer.top[0],layer.bottom[0])
#if layer.top[0] == layer.bottom[0]:
# print layer.name, layer.type
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' :
net_adder_IL[layer.name] = -sys.maxint - 1
net_multiplier_IL[layer.name] = -sys.maxint - 1
for i in xrange(num_images):
if cfg.TEST.HAS_RPN:
box_proposals = None
else:
box_proposals = roidb[i]['boxes'][roidb[i]['gt_classes'] == 0]
im = cv2.imread(imdb.image_path_at(i))
scores, boxes = im_detect(net, im, _t, box_proposals)
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS':
net.params[layer.name][0].data[0][0][0][0]=2610214
elif layer.type == 'FcIVS':
net.params[layer.name][0].data[0][0]=2610214
net.forward()
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS':
net_adder_IL[layer.name] = max(calc_layer_adder_IL(net, layer.top[0]),
net_adder_IL[layer.name])
net_multiplier_IL[layer.name] = max(calc_layer_multiplier_IL(net, layer.top[0]),
net_multiplier_IL[layer.name])
return net_adder_IL, net_multiplier_IL
#quantize adder in network
def quantize_net_adder(net_proto, net_adder_IL, adder_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS':
adder_IL = net_adder_IL[layer.name] + extra_IL
adder_FL = adder_bw - adder_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
layer.quantization_param.bw_params, \
layer.quantization_param.fl_params, \
adder_bw, adder_FL, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#quantize multiplier in network
def quantize_net_multiplier(net_proto, net_multiplier_IL, multiplier_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS':
multiplier_IL = net_multiplier_IL[layer.name] + extra_IL
multiplier_FL = multiplier_bw - multiplier_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
layer.quantization_param.bw_params, \
layer.quantization_param.fl_params, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
multiplier_bw, multiplier_FL, \
)
#quantize input and output of each layer in network
def quantize_net_output(net_proto, net_output_IL, net_input_IL, output_bw, extra_IL):
input_bw = output_bw;
#input_FL = 0;
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' \
or layer.type == 'DeconvolutionRistretto':
output_IL = net_output_IL[layer.name] + extra_IL
output_FL = output_bw - output_IL
input_IL = net_input_IL[layer.name] + extra_IL
input_FL = input_bw - input_IL
#if layer.name=='conv1_1/conv':
# print input_IL,output_IL
#print layer.name
#if layer.name == 'conv1_1/conv':
# print output_IL
# continue
change_layer_bw(net_proto, layer.name, \
input_bw, input_FL, \
output_bw, output_FL, \
layer.quantization_param.bw_params, \
layer.quantization_param.fl_params, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#input_FL = output_FL
#quantize convolution layers in network
def quantize_net_conv(net_proto, net_param_IL, weighting_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS':
weighting_IL = net_param_IL[layer.name] + extra_IL
weighting_FL = weighting_bw - weighting_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
weighting_bw, weighting_FL, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#quantize fully connected layer in network
def quantize_net_fc(net_proto, net_param_IL, weighting_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'FcIVS':
weighting_IL = net_param_IL[layer.name] + extra_IL
weighting_FL = weighting_bw - weighting_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
weighting_bw, weighting_FL, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#quantize deconvolution layer in network
def quantize_net_deconv(net_proto, net_param_IL, weighting_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'DeconvolutionRistretto':
weighting_IL = net_param_IL[layer.name] + extra_IL
weighting_FL = weighting_bw - weighting_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
weighting_bw, weighting_FL, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#read network spec in prototxt
def read_from_prototxt(ori_net_path):
net_proto = caffe_pb2.NetParameter()
fn = ori_net_path;
with open(fn) as f:
s = f.read()
txtf.Merge(s, net_proto)
return net_proto
#write network spec to prototxt
def write_to_prototxt(net_proto, out_net_path):
outf = out_net_path
#print 'writing', outf
with open(outf, 'w') as f:
f.write(str(net_proto))
#test network with no string printed
def test_qnet(net_path, caffemodel_path, imdb):
net = caffe.Net(net_path, caffemodel_path, caffe.TEST)
net.name = os.path.splitext(os.path.basename(caffemodel_path))[0]
ap = test_net_silent(net, imdb, max_per_image=args.max_per_image, vis=args.vis)
return ap
#print each layer name and spec
def print_net_layer_names(net):
print("Network layers:")
for name, layer in zip(net._layer_names, net.layers):
if layer.type == 'ConvolutionIVS' or layer.type == 'Convolution':
print("{:<30}: {:22s}({} blobs)".format(name, layer.type, len(layer.blobs)))
print dir(layer)
print layer.reshape
print layer.convolution_param
print net.layer[1].name
def mAP_worker(i, net_path, shared_dict, GPU_ID):
#caffe.set_mode_cpu()
#GPU_ID = 2 # Switch between 0 and 1 depending on the GPU you want to use.
cfg.GPU_ID = GPU_ID
caffe.set_device(GPU_ID)
caffe.set_mode_gpu()
imdb = get_imdb(args.imdb_name)
imdb.competition_mode(args.comp_mode)
if not cfg.TEST.HAS_RPN:
imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
ap = test_qnet(net_path, args.caffemodel, imdb)
shared_dict[i] = ap
def analyze_net_output_IL_worker(net_output_IL, net_input_IL, GPU_ID):
cfg.GPU_ID = GPU_ID
caffe.set_device(GPU_ID)
caffe.set_mode_gpu()
#caffe.set_mode_cpu()
net_proto = read_from_prototxt(args.prototxt_quantized)
net = caffe.Net(args.prototxt_quantized, args.caffemodel, caffe.TEST)
imdb = get_imdb(args.imdb_name)
imdb.competition_mode(args.comp_mode)
if not cfg.TEST.HAS_RPN:
imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
net_output_IL_, net_input_IL_ = analyze_net_output_IL(net, net_proto, imdb, max_per_image=args.max_per_image, vis=args.vis)
for t in net_output_IL_.keys():
net_output_IL[t] = net_output_IL_[t]
for t in net_input_IL_.keys():
net_input_IL[t] = net_input_IL_[t]
def analyze_net_adder_multiplier_IL_worker(net_adder_IL, net_multiplier_IL, GPU_ID):
cfg.GPU_ID = GPU_ID
#caffe.set_mode_cpu()
caffe.set_device(GPU_ID)
caffe.set_mode_gpu()
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
net_BAC = caffe.Net(args.prototxt_quantized_BAC, args.caffemodel, caffe.TEST)
imdb = get_imdb(args.imdb_name)
imdb.competition_mode(args.comp_mode)
if not cfg.TEST.HAS_RPN:
imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
net_adder_IL_, net_multiplier_IL_ = analyze_net_adder_multiplier_IL(net_BAC, net_proto_BAC, imdb,
max_per_image=args.max_per_image, vis=args.vis)
for t in net_adder_IL_.keys():
net_adder_IL[t] = net_adder_IL_[t]
for t in net_multiplier_IL_.keys():
net_multiplier_IL[t] = net_multiplier_IL_[t]
def analyze_net_param_IL_worker(net_param_IL, GPU_ID):
cfg.GPU_ID = GPU_ID
caffe.set_device(GPU_ID)
caffe.set_mode_gpu()
net_proto = read_from_prototxt(args.prototxt_quantized)
net = caffe.Net(args.prototxt_quantized, args.caffemodel, caffe.TEST)
net_param_IL_ = analyze_net_param_IL(net, net_proto)
for t in net_param_IL_.keys():
net_param_IL[t] = net_param_IL_[t]
if __name__ == '__main__':
args = parse_args()
GPU1 = 1
GPU2 = 1
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
print('Using config:')
pprint.pprint(cfg)
convert_net_to_qnet(args.prototxt, args.prototxt_quantized)
print 'Create quantized prototxt'
print 'Testing Full Precision Accuracy'
manager = multiprocessing.Manager()
shared_dict = manager.dict()
p = multiprocessing.Process(target=mAP_worker, args=('FP-FP-FP-FP-FP', args.prototxt, shared_dict, GPU1))
timer = Timer()
timer.tic()
p.start()
p.join()
timer.toc()
print ('Took {:.3f}s').format(timer.total_time)
full_ap = shared_dict['FP-FP-FP-FP-FP']
#full_ap = 0.540425
print 'Full precision accuracy : {}'.format(full_ap)
# Bit Width for Analyze
bw_range_conv = [8, 4] #bit width for convolution layers
bw_range_deconv = [32, 16, 8, 4, 2] #bit width for deconvolution layers
bw_range_fc = [32, 16, 8, 7, 6, 5, 4, 2] #bit width for fully connected layers
bw_range_output = [32, 16, 8, 4, 2] #bit width for layer input and output
bw_conv = 0 #just initial
bw_deconv = 0 #just initial
bw_fc = 0 #just initial
bw_output = 0 #just initial
bw_adder = 12 #just initial
bw_multiplier = 12 #just initial
convIL_reduction = 0
deconvIL_reduction = 0
fcIL_reduction = 0
actIL_reduction = 0
adderIL_reduction = 0
multIL_reduction = 0
print 'Analyzing network'
net_proto = read_from_prototxt(args.prototxt)
has_conv, has_deconv, has_fc = analyze_network(net_proto)
print 'Network Structure'
print 'CONV:{}, DECONV:{}, FC:{}'.format(has_conv, has_deconv, has_fc)
print '-----------------------------------'
net_proto = read_from_prototxt(args.prototxt_quantized)
print 'Analyzing network parameter IL'
net_param_IL = manager.dict()
p = multiprocessing.Process(target=analyze_net_param_IL_worker,
args=(net_param_IL, GPU1, ))
p.start()
p.join()
# Analyze Convolution and DeConvolution Layers
if has_conv:
print 'Analyzing CONV and DECONV'
print '\tbit width\t accuracy'
i = 3
not_found = True
while not_found:
timer = Timer()
timer.tic()
jobs = []
net_proto = read_from_prototxt(args.prototxt_quantized)
quantize_net_conv(net_proto, net_param_IL, i, convIL_reduction)
quantize_net_deconv(net_proto, net_param_IL, i, deconvIL_reduction)
write_to_prototxt(net_proto, './temp'+str(i)+'.prototxt')
p1 = multiprocessing.Process(target=mAP_worker, args=(str(i)+'-32-32-32-32',
'./temp'+str(i)+'.prototxt',
shared_dict,GPU1))
jobs.append(p1)
p1.start()
net_proto = read_from_prototxt(args.prototxt_quantized)
quantize_net_conv(net_proto, net_param_IL, i+1, convIL_reduction)
quantize_net_deconv(net_proto, net_param_IL, i+1, deconvIL_reduction)
write_to_prototxt(net_proto, './temp'+str(i+1)+'.prototxt')
p2 = multiprocessing.Process(target=mAP_worker, args=(str(i+1)+'-32-32-32-32',
'./temp'+str(i+1)+'.prototxt',
shared_dict,GPU2))
jobs.append(p2)
p2.start()
for proc in jobs:
proc.join()
timer.toc()
for j in range(i, i+2):
print '\t{}bit:\t\t{} {:.3f}s'.format(j,shared_dict[str(j)+'-32-32-32-32'],timer.total_time)
for j in range(i, i+2):
if shared_dict[str(j)+'-32-32-32-32'] > (full_ap - args.error_margin):
bw_conv = j
not_found = False
break;
i = i + 2
# Analyze Convolution and DeConvolution Layers
#if has_conv:
# print 'Analyzing CONV and DECONV'
# print '\tbit width\t accuracy'
# bw_h = 16
# bw_l = 0
# bw = 16
# while True:
# net_proto = read_from_prototxt(args.prototxt_quantized)
# quantize_net_conv(net, net_proto, net_param_IL, bw)
# quantize_net_deconv(net, net_proto, net_param_IL, bw)
# write_to_prototxt(net_proto, './temp.prototxt')
# ap = test_qnet('./temp.prototxt', args.caffemodel, imdb)
# print '\t{}bit:\t\t{}'.format(bw,ap)
# if ap < (full_ap - args.error_margin):
# bw_l = bw
# else:
# bw_h = bw
# bw_conv = bw
# if bw_h - bw_l <= 1:
# break
# bw = bw_l + (bw_h-bw_l)/2
# Analyze Fully Connected Layers
if has_fc:
print 'Analyzing FC'
print '\tbit width\t accuracy'
i = 3
not_found = True
while not_found:
timer = Timer()
timer.tic()
jobs = []
net_proto = read_from_prototxt(args.prototxt_quantized)
quantize_net_fc(net_proto, net_param_IL, i, fcIL_reduction)
write_to_prototxt(net_proto, './temp'+str(i)+'.prototxt')
p1 = multiprocessing.Process(target=mAP_worker, args=('32-'+str(i)+'-32-32-32',
'./temp'+str(i)+'.prototxt',
shared_dict,GPU1))
jobs.append(p1)
p1.start()
net_proto = read_from_prototxt(args.prototxt_quantized)
quantize_net_fc(net_proto, net_param_IL, i+1, fcIL_reduction)
write_to_prototxt(net_proto, './temp'+str(i+1)+'.prototxt')
p2 = multiprocessing.Process(target=mAP_worker, args=('32-'+str(i+1)+'-32-32-32',
'./temp'+str(i+1)+'.prototxt',
shared_dict,GPU2))
jobs.append(p2)
p2.start()
for proc in jobs:
proc.join()
timer.toc()
for j in range(i, i+2):
print '\t{}bit:\t\t{} {:.3f}s'.format(j,shared_dict['32-'+str(j)+'-32-32-32'],timer.total_time)
for j in range(i, i+2):
if shared_dict['32-'+str(j)+'-32-32-32'] > (full_ap - args.error_margin):
bw_fc = j
not_found = False
break;
i = i + 2
# Analyze Fully Connected Layers
#if has_fc:
# print 'Analyzing FC'
# print '\tbit width\t accuracy'
# bw_h = 16
# bw_l = 0
# bw = 16
# while True:
# net_proto = read_from_prototxt(args.prototxt_quantized)
# quantize_net_fc(net, net_proto, net_param_IL, bw)
# write_to_prototxt(net_proto, './temp.prototxt')
# ap = test_qnet('./temp.prototxt', args.caffemodel, imdb)
# print '\t{}bit:\t\t{}'.format(bw,ap)
# if ap < (full_ap - args.error_margin):
# bw_l = bw
# else:
# bw_h = bw
# bw_fc = bw
# if bw_h - bw_l <=1:
# break
# bw = bw_l + (bw_h-bw_l)/2
# Analyze input and output of layers
#net_proto = read_from_prototxt(args.prototxt_quantized)
#quantize_net_conv(net, net_proto, net_param_IL, bw_conv, -1)
#quantize_net_deconv(net, net_proto, net_param_IL, bw_conv, -1)
#quantize_net_fc(net, net_proto, net_param_IL, bw_fc, -1)
#write_to_prototxt(net_proto, args.prototxt_quantized)
net_output_IL = manager.dict()
net_input_IL = manager.dict()
if args.act_analysis == None:
print 'Analyzing network output IL'
p = multiprocessing.Process(target=analyze_net_output_IL_worker,
args=(net_output_IL, net_input_IL, GPU1))
p.start()
p.join()
with open('act_analysis.json', 'w') as outfile:
act_analysis = dict()
act_analysis['net_output_IL'] = dict()
act_analysis['net_input_IL'] = dict()
for t in net_output_IL.keys():
act_analysis['net_output_IL'][t] = net_output_IL[t]
for t in net_input_IL.keys():
act_analysis['net_input_IL'][t] = net_input_IL[t]
json.dump(act_analysis, outfile)
else:
print 'Loading network output IL'
with open(args.act_analysis) as json_data:
act_analysis = json.load(json_data)
for t in act_analysis['net_output_IL'].keys():
net_output_IL[t] = act_analysis['net_output_IL'][t]
for t in act_analysis['net_input_IL'].keys():
net_input_IL[t] = act_analysis['net_input_IL'][t]
print 'Analyzing layer output'
print '\tbit width\t accuracy'
i = 5
not_found = True
while not_found:
timer = Timer()
timer.tic()
jobs = []
net_proto = read_from_prototxt(args.prototxt_quantized)
quantize_net_output(net_proto, net_output_IL, net_input_IL, i, actIL_reduction)
write_to_prototxt(net_proto, './temp'+str(i)+'.prototxt')
p1 = multiprocessing.Process(target=mAP_worker, args=('32-32-'+str(i)+'-32-32',
'./temp'+str(i)+'.prototxt',
shared_dict,GPU1))
jobs.append(p1)
p1.start()
net_proto = read_from_prototxt(args.prototxt_quantized)
quantize_net_output(net_proto, net_output_IL, net_input_IL, i+1, actIL_reduction)
write_to_prototxt(net_proto, './temp'+str(i+1)+'.prototxt')
p2 = multiprocessing.Process(target=mAP_worker, args=('32-32-'+str(i+1)+'-32-32',
'./temp'+str(i+1)+'.prototxt',
shared_dict,GPU2))
jobs.append(p2)
p2.start()
for proc in jobs:
proc.join()
timer.toc()
for j in range(i, i+2):
print '\t{}bit:\t\t{} {:.3f}s'.format(j,shared_dict['32-32-'+str(j)+'-32-32'],timer.total_time)
for j in range(i, i+2):
if shared_dict['32-32-'+str(j)+'-32-32'] > (full_ap - args.error_margin):
bw_output = j
not_found = False
break;
i = i + 2
# Analyze input and output of layers
#bw_h = 16
#bw_l = 0
#bw = 16
#print 'Analyzing layer output'
#print '\tbit width\t accuracy'
#while True:
# net_proto = read_from_prototxt(args.prototxt_quantized)
# quantize_net_output(net, net_proto, net_output_IL, net_input_IL, bw)
# write_to_prototxt(net_proto, './temp.prototxt')
# ap = test_qnet('./temp.prototxt', args.caffemodel, imdb)
# print '\t{}bit:\t\t{}'.format(bw,ap)
# if ap < (full_ap - args.error_margin):
# bw_l = bw
# else:
# bw_h = bw
# bw_output = bw
# if bw_h - bw_l <=1:
# break
# bw = bw_l + (bw_h-bw_l)/2
#Create 8-bit quantization model
#if bw_conv < 8:
# bw_conv = 8
#if bw_fc < 8:
# bw_fc = 8
#if bw_output < 8:
# bw_output = 8
#Make Final Quantized Prototxt
print 'Final Quantization Testing'
net_proto = read_from_prototxt(args.prototxt_quantized)
quantize_net_conv(net_proto, net_param_IL, 8, convIL_reduction)
quantize_net_deconv(net_proto, net_param_IL, 8, deconvIL_reduction)
quantize_net_fc(net_proto, net_param_IL, 8, fcIL_reduction)
quantize_net_output(net_proto, net_output_IL, net_input_IL, 8, actIL_reduction)
write_to_prototxt(net_proto, './temp.prototxt')
p = multiprocessing.Process(target=mAP_worker, args=('DQ-DQ-DQ-32-32', './temp.prototxt',
shared_dict, GPU1))
p.start()
p.join()
ap = shared_dict['DQ-DQ-DQ-32-32']
layer_ap = ap
#ap = test_qnet('./temp.prototxt', args.caffemodel, imdb)
print '----------------------------------------'
print '{}bit CONV, {}bit FC, {}bit layer output'.format(8, 8, 8)
print 'Accuracy {}'.format(ap)
print 'Dynamic fixed point net:'
print '{}bit CONV and DECONV weights'.format(8)
print '{}bit FC weights'.format(8)
print '{}bit layer activations'.format(8)
print 'Please fine-tune'
write_to_prototxt(net_proto, args.prototxt_quantized)
print 'Quantized Model saved to', args.prototxt_quantized
sys.exit(0)
print 'Create Bit-Accurate quantized prototxt'
convert_net_to_qnet_BAC_analysis(args.prototxt_quantized, args.prototxt_quantized_BAC)
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
print 'Loading Bit-Accurate quantized prototxt'
#print 'Analyzing network adder and multiplier'
net_adder_IL = manager.dict()
net_multiplier_IL = manager.dict()
if args.accumulator_analysis == None:
print 'Analyzing network adder and multiplier'
p = multiprocessing.Process(target=analyze_net_adder_multiplier_IL_worker,
args=(net_adder_IL, net_multiplier_IL, GPU1))
p.start()
p.join()
with open('accumulator_analysis.json', 'w') as outfile:
accumulator_analysis = dict()
accumulator_analysis['net_adder_IL'] = dict()
accumulator_analysis['net_multiplier_IL'] = dict()
for t in net_adder_IL.keys():
accumulator_analysis['net_adder_IL'][t] = net_adder_IL[t]
for t in net_multiplier_IL.keys():
accumulator_analysis['net_multiplier_IL'][t] = net_multiplier_IL[t]
json.dump(accumulator_analysis, outfile)
else:
print 'Loading network adder and multiplier analysis file'
with open(args.accumulator_analysis) as json_data:
accumulator_analysis = json.load(json_data)
for t in accumulator_analysis['net_adder_IL'].keys():
net_adder_IL[t] = accumulator_analysis['net_adder_IL'][t]
for t in accumulator_analysis['net_multiplier_IL'].keys():
net_multiplier_IL[t] = accumulator_analysis['net_multiplier_IL'][t]
convert_net_to_qnet_BAC(args.prototxt_quantized, args.prototxt_quantized_BAC)
print 'Analyzing layer multiplier'
print '\tbit width\t accuracy'
i = bw_output
not_found = True
while not_found:
timer = Timer()
timer.tic()
jobs = []
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
quantize_net_multiplier(net_proto_BAC, net_multiplier_IL, i, multIL_reduction)
write_to_prototxt(net_proto_BAC, './temp'+str(i)+'.prototxt')
p1 = multiprocessing.Process(target=mAP_worker, args=('32-32-32-32-'+str(i),
'./temp'+str(i)+'.prototxt',
shared_dict,GPU1))
jobs.append(p1)
p1.start()
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
quantize_net_multiplier(net_proto_BAC, net_multiplier_IL, i+1, multIL_reduction)
write_to_prototxt(net_proto_BAC, './temp'+str(i+1)+'.prototxt')
p2 = multiprocessing.Process(target=mAP_worker, args=('32-32-32-32-'+str(i+1),
'./temp'+str(i+1)+'.prototxt',
shared_dict,GPU2))
jobs.append(p2)
p2.start()
for proc in jobs:
proc.join()
timer.toc()
for j in range(i, i+2):
print '\t{}bit:\t\t{} {:.3f}s'.format(j,shared_dict['32-32-32-32-'+str(j)],timer.total_time)
for j in range(i, i+2):
if shared_dict['32-32-32-32-'+str(j)] > (layer_ap - 0.005):
bw_multiplier = j
not_found = False
break;
i = i + 2
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
quantize_net_multiplier(net_proto_BAC, net_multiplier_IL, bw_multiplier, multIL_reduction)
write_to_prototxt(net_proto_BAC, args.prototxt_quantized_BAC)
#bw_h = 16
#bw_l = 0
#bw = 16
#print 'Analyzing layer multiplier'
#print '\tbit width\t accuracy'
#while True:
# net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
# quantize_net_multiplier(net_BAC, net_proto_BAC, net_multiplier_IL, bw)
# write_to_prototxt(net_proto_BAC, './temp.prototxt')
# ap = test_qnet('./temp.prototxt', args.caffemodel, imdb)
# print '\t{}bit:\t\t{}'.format(bw,ap)
# if ap < (full_ap - args.error_margin):
# bw_l = bw
# else:
# bw_h = bw
# bw_multiplier = bw
# if bw_h - bw_l <=1:
# break
# bw = bw_l + (bw_h-bw_l)/2
print 'Analyzing layer adder'
print '\tbit width\t accuracy'
i = bw_output
not_found = True
while not_found:
timer = Timer()
timer.tic()
jobs = []
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
quantize_net_adder(net_proto_BAC, net_adder_IL, i, adderIL_reduction)
write_to_prototxt(net_proto_BAC, './temp'+str(i)+'.prototxt')
p1 = multiprocessing.Process(target=mAP_worker, args=('32-32-32-'+str(i)+'-32',
'./temp'+str(i)+'.prototxt',
shared_dict,GPU1))
jobs.append(p1)
p1.start()
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
quantize_net_adder(net_proto_BAC, net_adder_IL, i+1, adderIL_reduction)
write_to_prototxt(net_proto_BAC, './temp'+str(i+1)+'.prototxt')
p2 = multiprocessing.Process(target=mAP_worker, args=('32-32-32-'+str(i+1)+'-32',
'./temp'+str(i+1)+'.prototxt',
shared_dict,GPU2))
jobs.append(p2)
p2.start()
for proc in jobs:
proc.join()
timer.toc()
for j in range(i, i+2):
print '\t{}bit:\t\t{} {:.3f}s'.format(j,shared_dict['32-32-32-'+str(j)+'-32'],timer.total_time)
for j in range(i, i+2):
if shared_dict['32-32-32-'+str(j)+'-32'] > (layer_ap - 0.005):
bw_adder = j
not_found = False
break;
i = i + 2
#bw_h = 16
#bw_l = 0
#bw = 16
#print 'Analyzing layer adder'
#print '\tbit width\t accuracy'
#while True:
# net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
# quantize_net_adder(net_BAC, net_proto_BAC, net_adder_IL, bw)
# write_to_prototxt(net_proto_BAC, './temp.prototxt')
# ap = test_qnet('./temp.prototxt', args.caffemodel, imdb)
# print '\t{}bit:\t\t{}'.format(bw,ap)
# if ap < (full_ap - args.error_margin):
# bw_l = bw
# else:
# bw_h = bw
# bw_adder = bw
# if bw_h - bw_l <=1:
# break
# bw = bw_l + (bw_h-bw_l)/2
print 'Create Final Bit-Accurate quantized prototxt'
convert_net_to_qnet(args.prototxt, args.prototxt_quantized)
convert_net_to_qnet_BAC(args.prototxt_quantized, args.prototxt_quantized_BAC)
net_proto_final = read_from_prototxt(args.prototxt_quantized_BAC)
print 'Loading Final Bit-Accurate quantized prototxt'
quantize_net_conv(net_proto_final, net_param_IL, bw_conv, convIL_reduction)
quantize_net_deconv(net_proto_final, net_param_IL, bw_conv, deconvIL_reduction)
quantize_net_fc(net_proto_final, net_param_IL, bw_fc, fcIL_reduction)
quantize_net_output(net_proto_final, net_output_IL, net_input_IL, bw_output, actIL_reduction)
quantize_net_multiplier(net_proto_final, net_multiplier_IL, bw_multiplier, multIL_reduction)
quantize_net_adder(net_proto_final, net_adder_IL, bw_adder, adderIL_reduction)
write_to_prototxt(net_proto_final, './temp_f.prototxt')
p = multiprocessing.Process(target=mAP_worker, args=('DQ-DQ-DQ-DQ-DQ', './temp_f.prototxt',
shared_dict,GPU1))
p.start()
p.join()
ap = shared_dict['DQ-DQ-DQ-DQ-DQ']
#ap = test_qnet('./temp_f.prototxt', args.caffemodel, imdb)
print '----------------------------------------'
print '{}bit adder, {}bit multiplier,'.format(bw_adder, bw_multiplier)
print 'Accuracy {}'.format(ap)
print 'Dynamic fixed point net:'
print '{}bit CONV and DECONV weights'.format(bw_conv)
print '{}bit FC weights'.format(bw_fc)
print '{}bit layer activations'.format(bw_output)
print '{}bit adder'.format(bw_adder)
print '{}bit multiplier'.format(bw_multiplier)
print 'Please fine-tune'
write_to_prototxt(net_proto_final, args.prototxt_quantized_BAC)
print 'Bit-Accurate Quantized Model saved to', args.prototxt_quantized_BAC
GPU_ID = 2 # Switch between 0 and 1 depending on the GPU you want to use.
|
load.py
|
# -*- coding: utf-8 -*-
#
# Display the "habitable-zone" (i.e. the range of distances in which you might find an Earth-Like World)
#
from __future__ import print_function
from collections import defaultdict
import requests
import sys
import threading
# Python 2 deprecated
from urllib.parse import quote
import tkinter as tk
from ttkHyperlinkLabel import HyperlinkLabel
import myNotebook as nb
if __debug__:
from traceback import print_exc
from config import config
from l10n import Locale
VERSION = '1.20'
SETTING_DEFAULT = 0x0002 # Earth-like
SETTING_EDSM = 0x1000
SETTING_NONE = 0xffff
WORLDS = [
# Type Black-body temp range EDSM description
('Metal-Rich', 0, 1103.0, 'Metal-rich body'),
('Earth-Like', 278.0, 227.0, 'Earth-like world'),
('Water', 307.0, 156.0, 'Water world'),
('Ammonia', 193.0, 117.0, 'Ammonia world'),
('Class II Giant', 250.0, 150.0, 'Class II gas giant'),
('Terraformable', 315.0, 223.0, 'terraformable'),
]
# Journal planet type to EDSM planet type
JRNL2TYPE = {
'Earthlike body':'Earth-like world',
'Water world':'Water world',
'Ammonia world':'Ammonia world',
'Metal rich body':'Metal-rich body',
'Sudarsky class II gas giant':'Class II gas giant'
}
LS = 300000000.0 # 1 ls in m (approx)
this = sys.modules[__name__] # For holding module globals
this.frame = None
this.worlds = []
this.scanned_worlds = {'system': None, 'bodies': {}}
this.edsm_session = None
this.edsm_data = None
# Used during preferences
this.settings = None
this.edsm_setting = None
def plugin_start3(plugin_dir):
return plugin_start()
def plugin_start():
# App isn't initialised at this point so can't do anything interesting
return 'HabZone'
def plugin_app(parent):
# Create and display widgets
this.frame = tk.Frame(parent)
this.frame.columnconfigure(3, weight=1)
this.frame.bind('<<HabZoneData>>', edsm_data) # callback when EDSM data received
for (name, high, low, subType) in WORLDS:
this.worlds.append((tk.Label(this.frame, text = name + ':'),
HyperlinkLabel(this.frame, wraplength=100), # edsm
tk.Label(this.frame), # near
tk.Label(this.frame), # dash
tk.Label(this.frame), # far
tk.Label(this.frame), # ls
))
this.spacer = tk.Frame(this.frame) # Main frame can't be empty or it doesn't resize
update_visibility()
return this.frame
def plugin_prefs(parent, cmdr, is_beta):
frame = nb.Frame(parent)
nb.Label(frame, text = 'Display:').grid(row = 0, padx = 10, pady = (10,0), sticky=tk.W)
setting = get_setting()
this.settings = []
row = 1
for (name, high, low, subType) in WORLDS:
var = tk.IntVar(value = (setting & row) and 1)
nb.Checkbutton(frame, text = name, variable = var).grid(row = row, padx = 10, pady = 2, sticky=tk.W)
this.settings.append(var)
row *= 2
nb.Label(frame, text = 'Elite Dangerous Star Map:').grid(padx = 10, pady = (10,0), sticky=tk.W)
this.edsm_setting = tk.IntVar(value = (setting & SETTING_EDSM) and 1)
nb.Checkbutton(frame, text = 'Look up system in EDSM database', variable = this.edsm_setting).grid(padx = 10, pady = 2, sticky=tk.W)
nb.Label(frame, text = 'Version %s' % VERSION).grid(padx = 10, pady = 10, sticky=tk.W)
return frame
def prefs_changed(cmdr, is_beta):
row = 1
setting = 0
for var in this.settings:
setting += var.get() and row
row *= 2
setting += this.edsm_setting.get() and SETTING_EDSM
config.set('habzone', setting or SETTING_NONE)
this.settings = None
this.edsm_setting = None
update_visibility()
def journal_entry(cmdr, is_beta, system, station, entry, state):
if entry['event'] == 'Scan':
try:
if not float(entry['DistanceFromArrivalLS']): # Only calculate for arrival star
r = float(entry['Radius'])
t = float(entry['SurfaceTemperature'])
for i in range(len(WORLDS)):
(name, high, low, subType) = WORLDS[i]
(label, edsm, near, dash, far, ls) = this.worlds[i]
far_dist = int(0.5 + dfort(r, t, low))
radius = int(0.5 + r / LS)
if far_dist <= radius:
near['text'] = ''
dash['text'] = u'×'
far['text'] = ''
ls['text'] = ''
else:
if not high:
near['text'] = Locale.stringFromNumber(radius)
else:
near['text'] = Locale.stringFromNumber(int(0.5 + dfort(r, t, high)))
dash['text'] = '-'
far['text'] = Locale.stringFromNumber(far_dist)
ls['text'] = 'ls'
if entry.get('TerraformState', False) or (entry.get('PlanetClass', False)):
mapped = entry.get('WasMapped')
# TODO: Clean up repetitive code - perhaps integrate Journal types into WORLDS constant?
body_type = None
if entry.get('TerraformState') == 'Terraformable':
body_type = 'terraformable'
else:
try:
body_type = JRNL2TYPE[entry.get('PlanetClass')]
except:
pass
if body_type:
data = this.scanned_worlds['bodies'].get(entry.get('BodyName'), {})
data.update({'type': body_type, 'was_mapped': mapped})
this.scanned_worlds['bodies'][entry.get('BodyName')] = data
list_bodies(system)
except (RuntimeError, TypeError) as err:
for (label, edsm, near, dash, far, ls) in this.worlds:
near['text'] = ''
dash['text'] = ''
far['text'] = ''
ls['text'] = '?'
edsm['test'] = err
elif entry['event'] == 'FSDJump':
for (label, edsm, near, dash, far, ls) in this.worlds:
edsm['text'] = ''
edsm['url'] = ''
near['text'] = ''
dash['text'] = ''
far['text'] = ''
ls['text'] = ''
this.scanned_worlds['system'] = entry['StarSystem']
this.scanned_worlds['bodies'].clear()
if entry['event'] == 'SAAScanComplete':
for name in this.scanned_worlds['bodies']:
print('Scan Name: ' + name + ' | ' + entry['BodyName'])
if name == entry['BodyName']:
this.scanned_worlds['bodies'][name].update({'mapped': True})
list_bodies(system)
if entry['event'] in ['Location', 'FSDJump'] and get_setting() & SETTING_EDSM:
thread = threading.Thread(target = edsm_worker, name = 'EDSM worker', args = (entry['StarSystem'],))
thread.daemon = True
thread.start()
def cmdr_data(data, is_beta):
# Manual Update
if get_setting() & SETTING_EDSM and not data['commander']['docked']:
thread = threading.Thread(target = edsm_worker, name = 'EDSM worker', args = (data['lastSystem']['name'],))
thread.daemon = True
thread.start()
# Distance for target black-body temperature
# From Jackie Silver's Hab-Zone Calculator https://forums.frontier.co.uk/showthread.php?p=5452081
def dfort(r, t, target):
return (((r ** 2) * (t ** 4) / (4 * (target ** 4))) ** 0.5) / LS
def list_bodies(system):
body_data = {}
for name in this.scanned_worlds['bodies']:
if this.scanned_worlds['bodies'][name].get('type', False):
final_name = name
if this.scanned_worlds['bodies'][name].get('was_mapped', False):
final_name += u'⍻'
elif this.scanned_worlds['bodies'][name].get('mapped', False):
final_name += u'🗸'
data = body_data.get(this.scanned_worlds['bodies'][name]['type'], [])
data.append(final_name)
body_data[this.scanned_worlds['bodies'][name]['type']] = data
for i in range(len(WORLDS)):
(name, high, low, subType) = WORLDS[i]
(label, edsm, near, dash, far, ls) = this.worlds[i]
edsm['text'] = ' '.join([x[len(system):].replace(' ', '') if x.startswith(system) else '' for x in
sorted(body_data.get(subType, []))])
# EDSM lookup
def edsm_worker(systemName):
if not this.edsm_session:
this.edsm_session = requests.Session()
try:
r = this.edsm_session.get('https://www.edsm.net/api-system-v1/bodies?systemName=%s' % quote(systemName), timeout=10)
r.raise_for_status()
this.edsm_data = r.json() or {} # Unknown system represented as empty list
except:
this.edsm_data = None
# Tk is not thread-safe, so can't access widgets in this thread.
# event_generate() is the only safe way to poke the main thread from this thread.
this.frame.event_generate('<<HabZoneData>>', when='tail')
# EDSM data received
def edsm_data(event):
if this.edsm_data is None:
# error
for (label, edsm, near, dash, far, ls) in this.worlds:
edsm['text'] = '?'
edsm['url'] = None
return
# Collate
for body in this.edsm_data.get('bodies', []):
data = this.scanned_worlds['bodies'].get(body['name'], {})
data.update({'type': ('terraformable' if (body.get('terraformingState') == 'Candidate for terraforming') else body['subType'])})
this.scanned_worlds['bodies'][body['name']] = data
# Display
systemName = this.edsm_data.get('name', '')
url = 'https://www.edsm.net/show-system?systemName=%s&bodyName=ALL' % quote(systemName)
for i in range(len(WORLDS)):
(name, high, low, subType) = WORLDS[i]
(label, edsm, near, dash, far, ls) = this.worlds[i]
list_bodies(systemName)
edsm['url'] = url
#edsm['url'] = len(this.scanned_worlds[subType]) == 1 and 'https://www.edsm.net/show-system?systemName=%s&bodyName=%s' % (quote(systemName), quote(this.scanned_worlds[subType][0])) or url
def get_setting():
setting = config.getint('habzone')
if setting == 0:
return SETTING_DEFAULT # Default to Earth-Like
elif setting == SETTING_NONE:
return 0 # Explicitly set by the user to display nothing
else:
return setting
def update_visibility():
setting = get_setting()
row = 1
for (label, edsm, near, dash, far, ls) in this.worlds:
if setting & row:
label.grid(row = row, column = 0, sticky=tk.W)
edsm.grid(row = row, column = 1, sticky=tk.W, padx = (0,10))
near.grid(row = row, column = 2, sticky=tk.E)
dash.grid(row = row, column = 3, sticky=tk.E)
far.grid(row = row, column = 4, sticky=tk.E)
ls.grid(row = row, column = 5, sticky=tk.W)
else:
for elem in (label, edsm, near, dash, far, ls):
elem.grid_remove()
row *= 2
if setting:
this.spacer.grid_remove()
else:
this.spacer.grid(row = 0)
|
installwizard.py
|
import sys
import os
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtCore as QtCore
import electrum
from electrum import Wallet, WalletStorage
from electrum.util import UserCancelled, InvalidPassword
from electrum.base_wizard import BaseWizard
from electrum.i18n import _
from seed_dialog import SeedLayout, KeysLayout
from network_dialog import NetworkChoiceLayout
from util import *
from password_dialog import PasswordLayout, PW_NEW
class GoBack(Exception):
pass
MSG_GENERATING_WAIT = _("Electrum is generating your addresses, please wait...")
MSG_ENTER_ANYTHING = _("Please enter a seed phrase, a master key, a list of "
"ION addresses, or a list of private keys")
MSG_ENTER_SEED_OR_MPK = _("Please enter a seed phrase or a master key (xpub or xprv):")
MSG_COSIGNER = _("Please enter the master public key of cosigner #%d:")
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_RESTORE_PASSPHRASE = \
_("Please enter your seed derivation passphrase. "
"Note: this is NOT your encryption password. "
"Leave this field empty if you did not use one or are unsure.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
import math
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, QtCore.Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0]
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
except GoBack:
wizard.go_back() if wizard.can_go_back() else wizard.close()
return
except UserCancelled:
return
#if out is None:
# out = ()
if type(out) is not tuple:
out = (out,)
apply(run_next, out)
return func_wrapper
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
def __init__(self, config, app, plugins, storage):
BaseWizard.__init__(self, config, storage)
QDialog.__init__(self, None)
self.setWindowTitle('Electrum ION - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.plugins = plugins
self.language_for_seed = config.get('language')
self.setMinimumSize(600, 400)
self.connect(self, QtCore.SIGNAL('accept'), self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addLayout(inner_vbox)
hbox.setStretchFactor(inner_vbox, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon(':icons/electrum.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def run_and_get_wallet(self):
def on_filename():
wallet_folder = os.path.dirname(self.storage.path)
path = unicode(QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder))
if path:
self.name_e.setText(path)
self.storage = WalletStorage(path)
update_layout()
def update_layout():
name = os.path.basename(self.storage.path)
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit(text=name)
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
button.clicked.connect(on_filename)
hbox.addWidget(button)
vbox.addLayout(hbox)
self.pw_e = None
if not self.storage.file_exists():
msg = _("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or chose another file.")
vbox.addWidget(QLabel(msg))
elif self.storage.file_exists() and self.storage.is_encrypted():
msg = _("This file is encrypted.") + '\n' + _('Enter your password or choose another file.')
vbox.addWidget(QLabel(msg))
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
hbox2.addWidget(QLabel(_('Password') + ':'))
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
else:
msg = _("Press 'Next' to open this wallet.")
vbox.addWidget(QLabel(msg))
self.set_layout(vbox, title=_('Electrum wallet'))
if self.pw_e:
self.pw_e.show()
self.pw_e.setFocus()
while True:
update_layout()
if self.storage.file_exists() and not self.storage.is_encrypted():
break
if not self.loop.exec_():
return
if not self.storage.file_exists():
break
if self.storage.file_exists() and self.storage.is_encrypted():
password = unicode(self.pw_e.text())
try:
self.storage.decrypt(password)
break
except InvalidPassword as e:
QMessageBox.information(None, _('Error'), str(e), _('OK'))
continue
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e), _('OK'))
return
path = self.storage.path
if self.storage.requires_split():
self.hide()
msg = _("The wallet '%s' contains multiple accounts, which are no longer supported in Electrum 2.9.\n\n"
"Do you want to split your wallet into multiple files?"%path)
if not self.question(msg):
return
file_list = '\n'.join(self.storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
return
if self.storage.requires_upgrade():
self.hide()
msg = _("The format of your wallet '%s' must be upgraded for Electrum. This change will not be backward compatible"%path)
if not self.question(msg):
return
self.storage.upgrade()
self.show_warning(_('Your wallet was upgraded successfully'))
self.wallet = Wallet(self.storage)
self.terminate()
return self.wallet
action = self.storage.get_action()
if action and action != 'new':
self.hide()
msg = _("The file '%s' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?") % path
if not self.question(msg):
if self.question(_("Do you want to delete '%s'?") % path):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
if action:
# self.wallet is set in run
self.run(action)
return self.wallet
self.wallet = Wallet(self.storage)
self.terminate()
return self.wallet
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(filename).scaledToWidth(60))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid):
slayout = KeysLayout(parent=self, title=message, is_valid=is_valid)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next):
return self.text_input(title, message, is_valid)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind):
playout = PasswordLayout(None, msg, kind, self.next_button)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW)
def show_restore(self, wallet, network):
# FIXME: these messages are shown after the install wizard is
# finished and the window closed. On MacOSX they appear parented
# with a re-appeared ghost install wizard window...
if network:
def task():
wallet.wait_until_synchronized()
if wallet.is_found():
msg = _("Recovery successful")
else:
msg = _("No transactions found for this seed")
self.emit(QtCore.SIGNAL('synchronized'), msg)
self.connect(self, QtCore.SIGNAL('synchronized'), self.show_message)
t = threading.Thread(target = task)
t.daemon = True
t.start()
else:
msg = _("This wallet was restored offline. It may "
"contain more addresses than displayed.")
self.show_message(msg)
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self):
self.emit(QtCore.SIGNAL('accept'))
def waiting_dialog(self, task, msg):
self.please_wait.setText(MSG_GENERATING_WAIT)
self.refresh_gui()
t = threading.Thread(target = task)
t.start()
t.join()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = map(lambda x: x[0], choices)
c_titles = map(lambda x: x[1], choices)
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning=''):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
self.exec_layout(vbox, title, next_enabled=test(default))
return ' '.join(unicode(line.text()).split())
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfil the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require %d signatures')%m)
cw.set_m(m)
def on_n(n):
n_label.setText(_('From %d cosigners')%n)
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
server.py
|
#!/usr/bin/env python3
# python 3.6+
# Server for web component and state machine for expressions.
import getopt
import io
import json
import os
import re
import subprocess
import sys
import time
import threading
import traceback
import webbrowser
from collections import OrderedDict
from http.server import HTTPServer, SimpleHTTPRequestHandler
from socketserver import ThreadingMixIn
from urllib.parse import parse_qs
_RUNNING_AS_SINGLE_SCRIPT = False # AUTO_REMOVE_IN_SINGLE_SCRIPT
_VERSION = '1.1.6'
_ONE_FUNCS = {'N', 'O', 'S', 'beta', 'gamma', 'Gamma', 'Lambda', 'zeta'}
_ENV_OPTS = {'EI', 'quick', 'pyS', 'simplify', 'matsimp', 'ufuncmap', 'prodrat', 'doit', 'strict', *_ONE_FUNCS}
_ENV_OPTS_ALL = _ENV_OPTS.union (f'no{opt}' for opt in _ENV_OPTS)
__OPTS, __ARGV = getopt.getopt (sys.argv [1:], 'hvnudr', ['child', 'firstrun', 'help', 'version', 'nobrowser', 'ugly', 'debug', 'restert', *_ENV_OPTS_ALL])
__IS_MAIN = __name__ == '__main__'
__IS_MODULE_RUN = sys.argv [0] == '-m'
_SERVER_DEBUG = __IS_MAIN and __ARGV and __ARGV [0] == 'server-debug'
_SYMPAD_PATH = os.path.dirname (sys.argv [0])
_SYMPAD_NAME = os.path.basename (sys.argv [0])
_SYMPAD_RESTART = not __IS_MODULE_RUN and (('-r', '') in __OPTS or ('--restart', '') in __OPTS)
_SYMPAD_CHILD = not _SYMPAD_RESTART or ('--child', '') in __OPTS
_SYMPAD_FIRSTRUN = not _SYMPAD_RESTART or ('--firstrun', '') in __OPTS
_SYMPAD_DEBUG = os.environ.get ('SYMPAD_DEBUG')
_DEFAULT_ADDRESS = ('localhost', 9000)
_FILES = {} # pylint food # AUTO_REMOVE_IN_SINGLE_SCRIPT
_STATIC_FILES = {'/style.css': 'text/css', '/script.js': 'text/javascript', '/index.html': 'text/html',
'/help.html': 'text/html', '/bg.png': 'image/png', '/wait.webp': 'image/webp'}
_HELP = f'usage: sympad [options] [host:port | host | :port]' '''
-h, --help - Show help information
-v, --version - Show version string
-n, --nobrowser - Don't start system browser to SymPad page
-u, --ugly - Start in draft display style (only on command line)
-d, --debug - Dump debug info to server log
-r, --restart - Restart server on source file changes (for development)
--EI, --noEI - Start with SymPy constants 'E' and 'I' or regular 'e' and 'i'
--quick, --noquick - Start in/not quick input mode
--pyS, --nopyS - Start with/out Python S escaping
--simplify, --nosimplify - Start with/out post-evaluation simplification
--matsimp, --nomatsimp - Start with/out matrix simplification
--ufuncmap, --noufuncmap - Start with/out undefined function mapping back to variables
--prodrat, --noprodrat - Start with/out separate product leading rational
--doit, --nodoit - Start with/out automatic expression doit()
--strict, --nostrict - Start with/out strict LaTeX formatting
--N, --noN - Start with/out N function
--S, --noS - Start with/out S function
--O, --noO - Start with/out O function
--beta, --nobeta - Start with/out beta function
--gamma, --nogamma - Start with/out gamma function
--Gamma, --noGamma - Start with/out Gamma function
--Lambda, --noLambda - Start with/out Lambda function
--zeta, --nozeta - Start with/out zeta function
'''.lstrip ()
if _SYMPAD_CHILD: # sympy slow to import so don't do it for watcher process as is unnecessary there
sys.path.insert (0, '') # allow importing from current directory first (for SymPy development version) # AUTO_REMOVE_IN_SINGLE_SCRIPT
import sympy as sp
import lalr1 # AUTO_REMOVE_IN_SINGLE_SCRIPT
from sast import AST # AUTO_REMOVE_IN_SINGLE_SCRIPT
import sym # AUTO_REMOVE_IN_SINGLE_SCRIPT
import sparser # AUTO_REMOVE_IN_SINGLE_SCRIPT
import spatch # AUTO_REMOVE_IN_SINGLE_SCRIPT
import splot # AUTO_REMOVE_IN_SINGLE_SCRIPT
_SYS_STDOUT = sys.stdout
_DISPLAYSTYLE = [1] # use "\displaystyle{}" formatting in MathJax
_HISTORY = [] # persistent history across browser closings
_UFUNC_MAPBACK = True # map undefined functions from SymPy back to variables if possible
_UFUNC_MAP = {} # map of ufunc asts to ordered sequence of variable names
_SYM_MAP = {} # map of sym asts to ordered sequence of variable names
_SYM_VARS = set () # set of all variables mapped to symbols
_PARSER = sparser.Parser ()
_START_ENV = OrderedDict ([
('EI', False), ('quick', False), ('pyS', True), ('simplify', False), ('matsimp', True), ('ufuncmap', True), ('prodrat', False), ('doit', True), ('strict', False),
('N', True), ('O', True), ('S', True), ('beta', True), ('gamma', True), ('Gamma', True), ('Lambda', True), ('zeta', True)])
_ENV = _START_ENV.copy () # This is individual session STATE! Threading can corrupt this! It is GLOBAL to survive multiple Handlers.
_VARS = {'_': AST.Zero} # This also!
_VARS_FLAT = _VARS.copy () # Flattened vars.
#...............................................................................................
def _admin_vars (*args):
asts = _sorted_vars ()
if not asts:
return 'No variables defined.'
return asts
def _admin_del (*args):
vars = OrderedDict ()
msgs = []
for arg in args:
var = arg.as_identifier
if var is None or var == '_':
raise TypeError (f'invalid argument {sym.ast2nat (arg)!r}')
vars [var] = _VARS.get (var)
if vars [var] is None:
raise AE35UnitError (f'Variable {var!r} is not defined, it can only be attributable to human error.')
for var, ast in vars.items ():
msgs.append (f'{"Lambda function" if ast.is_lamb else "Undefined function" if ast.is_ufunc else "Variable"} {var!r} deleted.')
del _VARS [var]
_vars_updated ()
if not msgs:
msgs.append ('No variables specified!')
return msgs
def _admin_delall (*args):
last_var = _VARS ['_']
_VARS.clear ()
_VARS ['_'] = last_var
_vars_updated ()
return 'All variables deleted.'
def _admin_env (*args):
vars_updated = False
def _envop (env, apply):
nonlocal vars_updated
msgs = []
for var, state in env.items ():
if apply:
_ENV [var] = state
if var == 'EI':
msgs.append (f'Uppercase E and I is {"on" if state else "off"}.')
if apply:
AST.EI (state)
for var in (AST.E.var, AST.I.var):
if var in _VARS:
del _VARS [var]
elif var == 'quick':
msgs.append (f'Quick input mode is {"on" if state else "off"}.')
if apply:
sym.set_quick (state)
_PARSER.set_quick (state)
vars_updated = True
elif var == 'pyS':
msgs.append (f'Python S escaping is {"on" if state else "off"}.')
if apply:
sym.set_pyS (state)
elif var == 'simplify':
msgs.append (f'Post-evaluation simplify is {"on" if state else "off"}.')
if apply:
sym.set_simplify (state)
elif var == 'matsimp':
msgs.append (f'Matrix simplify is {"broken" if not spatch.SPATCHED else "on" if state else "off"}.')
if apply:
spatch.set_matmulsimp (state)
elif var == 'ufuncmap':
msgs.append (f'Undefined function map to variable is {"on" if state else "off"}.')
if apply:
global _UFUNC_MAPBACK
_UFUNC_MAPBACK = state
elif var == 'prodrat':
msgs.append (f'Leading product rational is {"on" if state else "off"}.')
if apply:
sym.set_prodrat (state)
elif var == 'doit':
msgs.append (f'Expression doit is {"on" if state else "off"}.')
if apply:
sym.set_doit (state)
elif var == 'strict':
msgs.append (f'Strict LaTeX formatting is {"on" if state else "off"}.')
if apply:
sym.set_strict (state)
elif var in _ONE_FUNCS:
msgs.append (f'Function {var} is {"on" if state else "off"}.')
if apply:
vars_updated = True
return msgs
# start here
if not args:
return _envop (_ENV, False)
env = OrderedDict ()
for arg in args:
if arg.is_ass:
var = arg.lhs.as_identifier
if var:
state = bool (sym.ast2spt (arg.rhs))
else:
var = arg.as_identifier
if var:
if var [:2] == 'no':
var, state = var [2:], False
else:
state = True
if var is None:
raise TypeError (f'invalid argument {sym.ast2nat (arg)!r}')
elif var not in _ENV_OPTS:
raise NameError (f'invalid environment setting {var!r}')
env [var] = state
ret = _envop (env, True)
if vars_updated:
_vars_updated ()
return ret
def _admin_envreset (*args):
return ['Environment has been reset.'] + _admin_env (*(AST ('@', var if state else f'no{var}') for var, state in _START_ENV.items ()))
#...............................................................................................
class RealityRedefinitionError (NameError): pass
class CircularReferenceError (RecursionError): pass
class AE35UnitError (Exception): pass
def _mapback (ast, assvar = None, exclude = set ()): # map back ufuncs and symbols to the variables they are assigned to if possible
if not isinstance (ast, AST):
return ast
if ast.is_var:
if ast.var not in _SYM_VARS:
return ast
if ast.var == assvar:
raise CircularReferenceError ('trying to assign unqualified symbol to variable of the same name')
return AST ('-sym', ast.var)
if ast.is_sym:
vars = _SYM_MAP.get (ast)
if not vars:
return ast
if ast.sym in vars:
return AST ('@', ast.sym)
return AST ('@', next (iter (vars)))
if _UFUNC_MAPBACK:
if ast.is_ass and ast.lhs.is_ufunc:
return AST ('=', ast.lhs, _mapback (ast.rhs, assvar, exclude))
elif not ast.is_ufunc:
return AST (*(_mapback (a, assvar, exclude) for a in ast))
vars = _UFUNC_MAP.get (ast)
if vars: # prevent mapping to self on assignment
if ast.ufunc in vars and ast.ufunc not in exclude:
return AST ('@', ast.ufunc)
for var in vars:
if var not in exclude:
return AST ('@', var)
return AST (*(_mapback (a, assvar, exclude) for a in ast))
def _present_vars (vars):
asts = []
for v, e in vars:
if v != '_':
if e.is_lamb:
asts.append (AST ('=', ('-ufunc', v, tuple (('@', vv) for vv in e.vars)), e.lamb))
else:
asts.append (AST ('=', ('@', v), e))
return asts
def _sorted_vars ():
return _present_vars (sorted (_VARS.items (), key = lambda kv: (kv [1].op not in {'-lamb', '-ufunc'}, kv [0])))
def _vars_updated ():
global _VARS_FLAT
vars = {v: a if a.is_lamb else AST.apply_vars (a, _VARS, mode = False) for v, a in _VARS.items ()} # flattened vars so sym and sparser don't need to do apply_vars()
one = (f for f in filter (lambda f: _ENV.get (f), _ONE_FUNCS)) # hidden functions for stuff like Gamma
lamb = (va [0] for va in filter (lambda va: va [1].is_lamb, vars.items ())) # user lambda functions
assfunc = (va [0] for va in filter (lambda va: va [1].is_var and va [1].var in AST.Func.PYBASE, vars.items ())) # user variables assigned to concrete functions
funcs = {*one, *lamb, *assfunc}
sym.set_sym_user_vars (vars)
sym.set_sym_user_funcs (funcs)
sparser.set_sp_user_vars (vars)
sparser.set_sp_user_funcs (funcs)
_UFUNC_MAP.clear ()
_SYM_MAP.clear ()
_SYM_VARS.clear ()
_VARS_FLAT = vars
for v, a in vars.items (): # build ufunc and sym mapback dict
if v != '_':
if a.is_ufunc:
_UFUNC_MAP.setdefault (a, set ()).add (v)
elif a.is_sym:
_SYM_MAP.setdefault (a, set ()).add (v)
_SYM_VARS.add (v)
def _prepare_ass (ast): # check and prepare for simple or tuple assignment
if not ast.ass_valid:
vars = None
elif ast.ass_valid.error:
raise RealityRedefinitionError (ast.ass_valid.error)
else:
vars, ast = ast.ass_valid.lhs, ast.ass_valid.rhs
vars = list (vars.comma) if vars.is_comma else [vars]
return AST.apply_vars (ast, _VARS_FLAT), vars
def _execute_ass (ast, vars): # execute assignment if it was detected
def set_vars (vars):
nvars = {}
for v, a in vars.items ():
v = v.var
if a.is_ufunc:
if v in sparser.RESERVED_FUNCS:
raise NameError (f'cannot assign undefined function to concrete function name {v!r}')
if a.is_ufunc_anonymous:
a = AST (a.op, v, *a [2:])
elif a.is_sym_anonymous:
if a.is_sym_unqualified:
raise CircularReferenceError ('cannot asign unqualified anonymous symbol')
a = AST (a.op, v, *a [2:])
nvars [v] = a
try: # check for circular references
AST.apply_vars (AST (',', tuple (('@', v) for v in nvars)), {**_VARS, **nvars})
except RecursionError:
raise CircularReferenceError ("I'm sorry, Dave. I'm afraid I can't do that.") from None
_VARS.update (nvars)
return list (nvars.items ())
# start here
if not vars: # no assignment
if not ast.is_ufunc:
ast = _mapback (ast)
_VARS ['_'] = ast
_vars_updated ()
return [ast]
if len (vars) == 1: # simple assignment
if ast.op not in {'-ufunc', '-sym'}:
ast = _mapback (ast, vars [0].var, {vars [0].var})
vars = set_vars ({vars [0]: ast})
else: # tuple assignment
ast = ast.strip_paren
if ast.op in {',', '[', '-set'}:
asts = ast [1]
else:
asts = []
itr = iter (sym.ast2spt (ast))
for i in range (len (vars) + 1):
try:
ast = sym.spt2ast (next (itr))
except StopIteration:
break
if vars [i].is_ufunc_named:
asts.append (AST.Ass.ufunc2lamb (vars [i], ast))
vars [i] = AST ('@', vars [i].ufunc)
else:
asts.append (ast)
if len (vars) < len (asts):
raise ValueError (f'too many values to unpack (expected {len (vars)})')
elif len (vars) > len (asts):
raise ValueError (f'not enough values to unpack (expected {len (vars)}, got {len (asts)})')
vasts = list (zip (vars, asts))
exclude = set (va [0].var for va in filter (lambda va: va [1].is_ufunc, vasts))
asts = [a if a.op in {'-ufunc', '-sym'} else _mapback (a, v.var, exclude) for v, a in vasts]
vars = set_vars (dict (zip (vars, asts)))
_vars_updated ()
return _present_vars (vars)
#...............................................................................................
class Handler (SimpleHTTPRequestHandler):
def vars (self, request):
asts = _sorted_vars ()
return {'vars': [{
'tex': sym.ast2tex (ast),
'nat': sym.ast2nat (ast),
'py' : sym.ast2py (ast),
} for ast in asts]}
def validate (self, request):
ast, erridx, autocomplete, error = _PARSER.parse (request ['text'])
tex = nat = py = None
if ast is not None:
tex, xlattex = sym.ast2tex (ast, retxlat = True)
nat, xlatnat = sym.ast2nat (ast, retxlat = True)
py, xlatpy = sym.ast2py (ast, retxlat = True)
if _SYMPAD_DEBUG:
print ('free:', list (v.var for v in ast.free_vars), file = sys.stderr)
print ('ast: ', ast, file = sys.stderr)
if xlattex:
print ('astt:', repr (xlattex), file = sys.stderr)
if xlatnat:
print ('astn:', repr (xlatnat), file = sys.stderr)
if xlatpy:
print ('astp:', repr (xlatpy), file = sys.stderr)
print ('tex: ', tex, file = sys.stderr)
print ('nat: ', nat, file = sys.stderr)
print ('py: ', py, file = sys.stderr)
print (file = sys.stderr)
if isinstance (error, Exception):
error = (f'{error.__class__.__name__}: ' if not isinstance (error, SyntaxError) else '') + error.args [0].replace ('\n', ' ').strip ()
return {
'tex' : tex,
'nat' : nat,
'py' : py,
'erridx' : erridx,
'autocomplete': autocomplete,
'error' : error,
}
def evaluate (self, request):
def evalexpr (ast):
sym.ast2spt.set_precision (ast)
if ast.is_func and ast.func in AST.Func.PLOT: # plotting?
args, kw = AST.args2kwargs (AST.apply_vars (ast.args, _VARS), sym.ast2spt)
ret = getattr (splot, ast.func) (*args, **kw)
return {'msg': ['Plotting not available because matplotlib is not installed.']} if ret is None else {'img': ret}
elif ast.op in {'@', '-func'} and ast [1] in AST.Func.ADMIN: # special admin function?
asts = globals () [f'_admin_{ast [1]}'] (*(ast.args if ast.is_func else ()))
if isinstance (asts, str):
return {'msg': [asts]}
elif isinstance (asts, list) and isinstance (asts [0], str):
return {'msg': asts}
else: # not admin function, normal evaluation
ast, vars = _prepare_ass (ast)
if _SYMPAD_DEBUG:
print ('ast: ', ast, file = sys.stderr)
try:
spt, xlat = sym.ast2spt (ast, retxlat = True) # , _VARS)
if _SYMPAD_DEBUG and xlat:
print ('xlat: ', xlat, file = sys.stderr)
sptast = sym.spt2ast (spt)
except:
if _SYMPAD_DEBUG:
print (file = sys.stderr)
raise
if _SYMPAD_DEBUG:
try:
print ('spt: ', repr (spt), file = sys.stderr)
except:
pass
print ('spt type: ', type (spt), file = sys.stderr)
try:
print ('spt args: ', repr (spt.args), file = sys.stderr)
except:
pass
print ('spt latex: ', sp.latex (spt), file = sys.stderr)
print ('spt ast: ', sptast, file = sys.stderr)
print ('spt tex: ', sym.ast2tex (sptast), file = sys.stderr)
print ('spt nat: ', sym.ast2nat (sptast), file = sys.stderr)
print ('spt py: ', sym.ast2py (sptast), file = sys.stderr)
print (file = sys.stderr)
asts = _execute_ass (sptast, vars)
response = {}
if asts and asts [0] != AST.None_:
response.update ({'math': [{
'tex': sym.ast2tex (ast),
'nat': sym.ast2nat (ast),
'py' : sym.ast2py (ast),
} for ast in asts]})
return response
# start here
responses = []
try:
_HISTORY.append (request ['text'])
ast, _, _, _ = _PARSER.parse (request ['text'])
if ast:
for ast in (ast.scolon if ast.is_scolon else (ast,)):
sys.stdout = _SYS_STDOUT if _SERVER_DEBUG else io.StringIO ()
response = evalexpr (ast)
if sys.stdout.tell ():
responses.append ({'msg': sys.stdout.getvalue ().strip ().split ('\n')})
responses.append (response)
except Exception:
if sys.stdout is not _SYS_STDOUT and sys.stdout.tell (): # flush any printed messages before exception
responses.append ({'msg': sys.stdout.getvalue ().strip ().split ('\n')})
etype, exc, tb = sys.exc_info ()
if exc.args and isinstance (exc.args [0], str):
exc = etype (exc.args [0].replace ('\n', ' ').strip (), *exc.args [1:]).with_traceback (tb) # reformat text to remove newlines
responses.append ({'err': ''.join (traceback.format_exception (etype, exc, tb)).strip ().split ('\n')})
finally:
sys.stdout = _SYS_STDOUT
return {'data': responses} if responses else {}
def do_GET (self):
if self.path == '/':
self.path = '/index.html'
fnm = os.path.join (_SYMPAD_PATH, self.path.lstrip ('/'))
if self.path != '/env.js' and (self.path not in _STATIC_FILES or (not _RUNNING_AS_SINGLE_SCRIPT and not os.path.isfile (fnm))):
self.send_error (404, f'Invalid path {self.path!r}')
else:
self.send_response (200)
if self.path == '/env.js':
content = 'text/javascript'
data = f'History = {_HISTORY}\nHistIdx = {len (_HISTORY)}\nVersion = {_VERSION!r}\nSymPyVersion = {sp.__version__!r}\nDisplayStyle = {_DISPLAYSTYLE [0]}'.encode ('utf8')
self.send_header ('Cache-Control', 'no-store')
else:
content = _STATIC_FILES [self.path]
if _RUNNING_AS_SINGLE_SCRIPT:
data = _FILES [self.path [1:]]
else:
data = open (fnm, 'rb').read ()
self.send_header ('Content-type', f'{content}')
self.end_headers ()
self.wfile.write (data)
def do_POST (self):
request = parse_qs (self.rfile.read (int (self.headers ['Content-Length'])).decode ('utf8'), keep_blank_values = True)
for key, val in list (request.items ()):
if isinstance (val, list) and len (val) == 1:
request [key] = val [0]
if request ['mode'] == 'vars':
response = self.vars (request)
else:
if request ['mode'] == 'validate':
response = self.validate (request)
else: # if request ['mode'] == 'evaluate':
response = {**self.evaluate (request), **self.vars (request)}
response ['idx'] = request ['idx']
response ['text'] = request ['text']
response ['mode'] = request ['mode']
self.send_response (200)
self.send_header ('Content-type', 'application/json')
self.send_header ('Cache-Control', 'no-store')
self.end_headers ()
self.wfile.write (json.dumps (response).encode ('utf8'))
# self.wfile.write (json.dumps ({**request, **response}).encode ('utf8'))
#...............................................................................................
def start_server (logging = True):
if not logging:
Handler.log_message = lambda *args, **kwargs: None
if ('--ugly', '') in __OPTS or ('-u', '') in __OPTS:
_DISPLAYSTYLE [0] = 0
for opt, _ in __OPTS:
opt = opt.lstrip ('-')
if opt in _ENV_OPTS_ALL:
_admin_env (AST ('@', opt))
_START_ENV.update (_ENV)
_vars_updated ()
if not __ARGV:
host, port = _DEFAULT_ADDRESS
else:
host, port = (re.split (r'(?<=\]):' if __ARGV [0].startswith ('[') else ':', __ARGV [0]) + [_DEFAULT_ADDRESS [1]]) [:2]
host, port = host.strip ('[]'), int (port)
try:
httpd = HTTPServer ((host, port), Handler)
thread = threading.Thread (target = httpd.serve_forever, daemon = True)
thread.start ()
return httpd
except OSError as e:
if e.errno != 98:
raise
print (f'Port {port} seems to be in use, try specifying different port as a command line parameter, e.g. localhost:9001')
sys.exit (-1)
_MONTH_NAME = (None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec')
def child ():
def log_message (msg):
y, m, d, hh, mm, ss, _, _, _ = time.localtime (time.time ())
sys.stderr.write (f'{httpd.server_address [0]} - - ' \
f'[{"%02d/%3s/%04d %02d:%02d:%02d" % (d, _MONTH_NAME [m], y, hh, mm, ss)}] {msg}\n')
# start here
httpd = start_server ()
if _SYMPAD_FIRSTRUN and ('--nobrowser', '') not in __OPTS and ('-n', '') not in __OPTS:
webbrowser.open (f'http://{httpd.server_address [0] if httpd.server_address [0] != "0.0.0.0" else "127.0.0.1"}:{httpd.server_address [1]}')
if _SYMPAD_FIRSTRUN:
print (f'SymPad v{_VERSION} server running. If a browser window does not automatically open to the address below then try navigating to that URL manually.\n')
log_message (f'Serving at http://{httpd.server_address [0]}:{httpd.server_address [1]}/')
if not _SYMPAD_RESTART:
try:
while 1:
time.sleep (0.5) # thread.join () doesn't catch KeyboardInterupt on Windows
except KeyboardInterrupt:
sys.exit (0)
else:
fnms = (_SYMPAD_NAME,) if _RUNNING_AS_SINGLE_SCRIPT else (_SYMPAD_NAME, 'splot.py', 'spatch.py', 'sparser.py', 'sym.py', 'sxlat.py', 'sast.py', 'lalr1.py')
watch = [os.path.join (_SYMPAD_PATH, fnm) for fnm in fnms]
tstamps = [os.stat (fnm).st_mtime for fnm in watch]
try:
while 1:
time.sleep (0.5)
if [os.stat (fnm).st_mtime for fnm in watch] != tstamps:
log_message ('Files changed, restarting...')
sys.exit (0)
except KeyboardInterrupt:
sys.exit (0)
sys.exit (-1)
def parent ():
if not _SYMPAD_RESTART or __IS_MODULE_RUN:
child () # does not return
# continue as parent process and wait for child process to return due to file changes and restart it
base = [sys.executable] + sys.argv [:1] + ['--child'] # (['--child'] if __IS_MAIN else ['sympad', '--child'])
opts = [o [0] for o in __OPTS]
first_run = ['--firstrun']
try:
while 1:
ret = subprocess.run (base + opts + first_run + __ARGV)
first_run = []
if ret.returncode != 0 and not _SYMPAD_DEBUG:
sys.exit (0)
except KeyboardInterrupt:
sys.exit (0)
#...............................................................................................
# AUTO_REMOVE_IN_SINGLE_SCRIPT_BLOCK_START
if _SERVER_DEBUG: # DEBUG!
Handler.__init__ = lambda self: None
h = Handler ()
# _VARS ['_'] = AST ('[', (('=', ('-ufunc', 'x', (('@', 't'),)), ('*', (('+', (('@', 'C1'), ('*', (('#', '8'), ('@', 'C2'), ('-intg', ('/', ('^', ('@', 'e'), ('/', ('*', (('#', '19'), ('^', ('@', 't'), ('#', '2')))), ('#', '2'))), ('^', ('-ufunc', 'x0', (('@', 't'),)), ('#', '2'))), ('@', 'dt')))))), ('-ufunc', 'x0', (('@', 't'),))))), ('=', ('-ufunc', 'y', (('@', 't'),)), ('+', (('*', (('@', 'C1'), ('-ufunc', 'y0', (('@', 't'),)))), ('*', (('@', 'C2'), ('+', (('/', ('^', ('@', 'e'), ('/', ('*', (('#', '19'), ('^', ('@', 't'), ('#', '2')))), ('#', '2'))), ('-ufunc', 'x0', (('@', 't'),))), ('*', (('#', '8'), ('-intg', ('/', ('^', ('@', 'e'), ('/', ('*', (('#', '19'), ('^', ('@', 't'), ('#', '2')))), ('#', '2'))), ('^', ('-ufunc', 'x0', (('@', 't'),)), ('#', '2'))), ('@', 'dt')), ('-ufunc', 'y0', (('@', 't'),))), {2}))))))))))
_VARS ['_'] = AST.Zero
# print (h.validate ({'text': r'f = g'}))
print (h.evaluate ({'text': r'sin = ?(x)'}))
print (h.evaluate ({'text': r'sin (2)'}))
sys.exit (0)
# AUTO_REMOVE_IN_SINGLE_SCRIPT_BLOCK_END
def main ():
if ('--help', '') in __OPTS or ('-h', '') in __OPTS:
print (_HELP)
sys.exit (0)
if ('--version', '') in __OPTS or ('-v', '') in __OPTS:
print (_VERSION)
sys.exit (0)
if ('--debug', '') in __OPTS or ('-d', '') in __OPTS:
_SYMPAD_DEBUG = os.environ ['SYMPAD_DEBUG'] = '1'
if _SYMPAD_CHILD:
child ()
else:
parent ()
if __IS_MAIN:
main ()
|
test_threading.py
|
"""
Tests for the threading module.
"""
import test.support
from test.support import threading_helper
from test.support import verbose, cpython_only, os_helper
from test.support.import_helper import import_module
from test.support.script_helper import assert_python_ok, assert_python_failure
import random
import sys
import _thread
import threading
import time
import unittest
import weakref
import os
import subprocess
import signal
import textwrap
import traceback
from unittest import mock
from test import lock_tests
from test import support
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('netbsd5', 'hp-ux11')
# Is Python built with Py_DEBUG macro defined?
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
def restore_default_excepthook(testcase):
testcase.addCleanup(setattr, threading, 'excepthook', threading.excepthook)
threading.excepthook = threading.__excepthook__
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertLessEqual(self.nrunning.get(), 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertGreaterEqual(self.nrunning.get(), 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = threading_helper.threading_setup()
def tearDown(self):
threading_helper.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
@cpython_only
def test_name(self):
def func(): pass
thread = threading.Thread(name="myname1")
self.assertEqual(thread.name, "myname1")
# Convert int name to str
thread = threading.Thread(name=123)
self.assertEqual(thread.name, "123")
# target name is ignored if name is specified
thread = threading.Thread(target=func, name="myname2")
self.assertEqual(thread.name, "myname2")
with mock.patch.object(threading, '_counter', return_value=2):
thread = threading.Thread(name="")
self.assertEqual(thread.name, "Thread-2")
with mock.patch.object(threading, '_counter', return_value=3):
thread = threading.Thread()
self.assertEqual(thread.name, "Thread-3")
with mock.patch.object(threading, '_counter', return_value=5):
thread = threading.Thread(target=func)
self.assertEqual(thread.name, "Thread-5 (func)")
@cpython_only
def test_disallow_instantiation(self):
# Ensure that the type disallows instantiation (bpo-43916)
lock = threading.Lock()
test.support.check_disallow_instantiation(self, type(lock))
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertIsNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, initial\)>$')
t.start()
if hasattr(threading, 'get_native_id'):
native_ids = set(t.native_id for t in threads) | {threading.get_native_id()}
self.assertNotIn(None, native_ids)
self.assertEqual(len(native_ids), NUMTASKS + 1)
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertFalse(t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertIsNotNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, stopped -?\d+\)>$')
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertIsNotNone(threading.current_thread().ident)
def f():
ident.append(threading.current_thread().ident)
done.set()
done = threading.Event()
ident = []
with threading_helper.wait_threads_exit():
tid = _thread.start_new_thread(f, ())
done.wait()
self.assertEqual(ident[0], tid)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256 KiB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256 KiB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1 MiB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1 MiB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
with threading_helper.wait_threads_exit():
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
#Issue 29376
self.assertTrue(threading._active[tid].is_alive())
self.assertRegex(repr(threading._active[tid]), '_DummyThread')
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
set_async_exc.argtypes = (ctypes.c_ulong, ctypes.py_object)
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
try:
result = set_async_exc(tid, exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(-1, exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertFalse(t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(t.id, exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=support.SHORT_TIMEOUT)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_running_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
restore_default_excepthook(self)
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
with self.assertWarnsRegex(DeprecationWarning,
r'get the daemon attribute'):
t.isDaemon()
with self.assertWarnsRegex(DeprecationWarning,
r'set the daemon attribute'):
t.setDaemon(True)
with self.assertWarnsRegex(DeprecationWarning,
r'get the name attribute'):
t.getName()
with self.assertWarnsRegex(DeprecationWarning,
r'set the name attribute'):
t.setName("name")
e = threading.Event()
with self.assertWarnsRegex(DeprecationWarning, 'use is_set()'):
e.isSet()
cond = threading.Condition()
cond.acquire()
with self.assertWarnsRegex(DeprecationWarning, 'use notify_all()'):
cond.notifyAll()
with self.assertWarnsRegex(DeprecationWarning, 'use active_count()'):
threading.activeCount()
with self.assertWarnsRegex(DeprecationWarning, 'use current_thread()'):
threading.currentThread()
def test_repr_daemon(self):
t = threading.Thread()
self.assertNotIn('daemon', repr(t))
t.daemon = True
self.assertIn('daemon', repr(t))
def test_daemon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@unittest.skipUnless(hasattr(os, 'fork'), 'needs os.fork()')
def test_fork_at_exit(self):
# bpo-42350: Calling os.fork() after threading._shutdown() must
# not log an error.
code = textwrap.dedent("""
import atexit
import os
import sys
from test.support import wait_process
# Import the threading module to register its "at fork" callback
import threading
def exit_handler():
pid = os.fork()
if not pid:
print("child process ok", file=sys.stderr, flush=True)
# child process
else:
wait_process(pid, exitcode=0)
# exit_handler() will be called after threading._shutdown()
atexit.register(exit_handler)
""")
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err.rstrip(), b'child process ok')
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
test.support.setswitchinterval(1e-6)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
pid = os.fork()
if pid == 0:
os._exit(11 if t.is_alive() else 10)
else:
t.join()
support.wait_process(pid, exitcode=10)
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
from test import support
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
else:
support.wait_process(pid, exitcode=0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "MainThread\nTrue\nTrue\n")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys
from test import support
def func():
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
support.wait_process(pid, exitcode=0)
th = threading.Thread(target=func)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1 (func)\nTrue\nTrue\n")
def test_main_thread_during_shutdown(self):
# bpo-31516: current_thread() should still point to the main thread
# at shutdown
code = """if 1:
import gc, threading
main_thread = threading.current_thread()
assert main_thread is threading.main_thread() # sanity check
class RefCycle:
def __init__(self):
self.cycle = self
def __del__(self):
print("GC:",
threading.current_thread() is main_thread,
threading.main_thread() is main_thread,
threading.enumerate() == [main_thread])
RefCycle()
gc.collect() # sanity check
x = RefCycle()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode()
self.assertEqual(err, b"")
self.assertEqual(data.splitlines(),
["GC: True True True"] * 2)
def test_finalization_shutdown(self):
# bpo-36402: Py_Finalize() calls threading._shutdown() which must wait
# until Python thread states of all non-daemon threads get deleted.
#
# Test similar to SubinterpThreadingTests.test_threads_join_2(), but
# test the finalization of the main interpreter.
code = """if 1:
import os
import threading
import time
import random
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_Finalize() is called.
random_sleep()
tls.x = Sleeper()
random_sleep()
threading.Thread(target=f).start()
random_sleep()
"""
rc, out, err = assert_python_ok("-c", code)
self.assertEqual(err, b"")
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock is None until the thread is started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, None)
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertFalse(tstate_lock.acquire(timeout=0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(tstate_lock.acquire(timeout=support.SHORT_TIMEOUT), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
self.assertTrue(t.is_alive())
# Let is_alive() find out the C code is done.
tstate_lock.release()
self.assertFalse(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertIsNone(t._tstate_lock)
t.join()
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
t.join()
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "generator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
import _testcapi
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
def test_gettrace(self):
def noop_trace(frame, event, arg):
# no operation
return noop_trace
old_trace = threading.gettrace()
try:
threading.settrace(noop_trace)
trace_func = threading.gettrace()
self.assertEqual(noop_trace,trace_func)
finally:
threading.settrace(old_trace)
def test_getprofile(self):
def fn(*args): pass
old_profile = threading.getprofile()
try:
threading.setprofile(fn)
self.assertEqual(fn, threading.getprofile())
finally:
threading.setprofile(old_profile)
@cpython_only
def test_shutdown_locks(self):
for daemon in (False, True):
with self.subTest(daemon=daemon):
event = threading.Event()
thread = threading.Thread(target=event.wait, daemon=daemon)
# Thread.start() must add lock to _shutdown_locks,
# but only for non-daemon thread
thread.start()
tstate_lock = thread._tstate_lock
if not daemon:
self.assertIn(tstate_lock, threading._shutdown_locks)
else:
self.assertNotIn(tstate_lock, threading._shutdown_locks)
# unblock the thread and join it
event.set()
thread.join()
# Thread._stop() must remove tstate_lock from _shutdown_locks.
# Daemon threads must never add it to _shutdown_locks.
self.assertNotIn(tstate_lock, threading._shutdown_locks)
def test_locals_at_exit(self):
# bpo-19466: thread locals must not be deleted before destructors
# are called
rc, out, err = assert_python_ok("-c", """if 1:
import threading
class Atexit:
def __del__(self):
print("thread_dict.atexit = %r" % thread_dict.atexit)
thread_dict = threading.local()
thread_dict.atexit = "value"
atexit = Atexit()
""")
self.assertEqual(out.rstrip(), b"thread_dict.atexit = 'value'")
def test_boolean_target(self):
# bpo-41149: A thread that had a boolean value of False would not
# run, regardless of whether it was callable. The correct behaviour
# is for a thread to do nothing if its target is None, and to call
# the target otherwise.
class BooleanTarget(object):
def __init__(self):
self.ran = False
def __bool__(self):
return False
def __call__(self):
self.ran = True
target = BooleanTarget()
thread = threading.Thread(target=target)
thread.start()
thread.join()
self.assertTrue(target.ran)
def test_leak_without_join(self):
# bpo-37788: Test that a thread which is not joined explicitly
# does not leak. Test written for reference leak checks.
def noop(): pass
with threading_helper.wait_threads_exit():
threading.Thread(target=noop).start()
# Thread.join() is not called
@unittest.skipUnless(Py_DEBUG, 'need debug build (Py_DEBUG)')
def test_debug_deprecation(self):
# bpo-44584: The PYTHONTHREADDEBUG environment variable is deprecated
rc, out, err = assert_python_ok("-Wdefault", "-c", "pass",
PYTHONTHREADDEBUG="1")
msg = (b'DeprecationWarning: The threading debug '
b'(PYTHONTHREADDEBUG environment variable) '
b'is deprecated and will be removed in Python 3.12')
self.assertIn(msg, err)
def test_import_from_another_thread(self):
# bpo-1596321: If the threading module is first import from a thread
# different than the main thread, threading._shutdown() must handle
# this case without logging an error at Python exit.
code = textwrap.dedent('''
import _thread
import sys
event = _thread.allocate_lock()
event.acquire()
def import_threading():
import threading
event.release()
if 'threading' in sys.modules:
raise Exception('threading is already imported')
_thread.start_new_thread(import_threading, ())
# wait until the threading module is imported
event.acquire()
event.release()
if 'threading' not in sys.modules:
raise Exception('threading is not imported')
# don't wait until the thread completes
''')
rc, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
from test import support
childpid = os.fork()
if childpid != 0:
# parent process
support.wait_process(childpid, exitcode=0)
sys.exit(0)
# child process
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
from test import support
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
# parent process
support.wait_process(childpid, exitcode=0)
sys.exit(0)
# child process
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
with open(os.__file__, 'rb') as in_f:
stuff = in_f.read(200)
with open(os.devnull, 'wb') as null_f:
null_f.write(stuff)
time.sleep(random.random() / 1995)
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
support.wait_process(pid, exitcode=50)
else:
os._exit(50)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_clear_threads_states_after_fork(self):
# Issue #17094: check that threads states are cleared after fork()
# start a bunch of threads
threads = []
for i in range(16):
t = threading.Thread(target=lambda : time.sleep(0.3))
threads.append(t)
t.start()
pid = os.fork()
if pid == 0:
# check that threads states have been cleared
if len(sys._current_frames()) == 1:
os._exit(51)
else:
os._exit(52)
else:
support.wait_process(pid, exitcode=51)
for t in threads:
t.join()
class SubinterpThreadingTests(BaseTestCase):
def pipe(self):
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
if hasattr(os, 'set_blocking'):
os.set_blocking(r, False)
return (r, w)
def test_threads_join(self):
# Non-daemon threads should be joined at subinterpreter shutdown
# (issue #18808)
r, w = self.pipe()
code = textwrap.dedent(r"""
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,))
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_threads_join_2(self):
# Same as above, but a delay gets introduced after the thread's
# Python code returned but before the thread state is deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = self.pipe()
code = textwrap.dedent(r"""
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,))
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
@cpython_only
def test_daemon_threads_fatal_error(self):
subinterp_code = f"""if 1:
import os
import threading
import time
def f():
# Make sure the daemon thread is still running when
# Py_EndInterpreter is called.
time.sleep({test.support.SHORT_TIMEOUT})
threading.Thread(target=f, daemon=True).start()
"""
script = r"""if 1:
import _testcapi
_testcapi.run_in_subinterp(%r)
""" % (subinterp_code,)
with test.support.SuppressCrashReport():
rc, out, err = assert_python_failure("-c", script)
self.assertIn("Fatal Python error: Py_EndInterpreter: "
"not the last thread", err.decode())
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
thread.join()
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
thread.join()
def test_releasing_unacquired_lock(self):
lock = threading.Lock()
self.assertRaises(RuntimeError, lock.release)
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RecursionError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode())
self.assertEqual(data, expected_output)
def test_print_exception(self):
script = r"""if True:
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_1(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
sys.stderr = None
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_2(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
sys.stderr = None
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
self.assertNotIn("Unhandled exception", err.decode())
def test_bare_raise_in_brand_new_thread(self):
def bare_raise():
raise
class Issue27558(threading.Thread):
exc = None
def run(self):
try:
bare_raise()
except Exception as exc:
self.exc = exc
thread = Issue27558()
thread.start()
thread.join()
self.assertIsNotNone(thread.exc)
self.assertIsInstance(thread.exc, RuntimeError)
# explicitly break the reference cycle to not leak a dangling thread
thread.exc = None
def test_multithread_modify_file_noerror(self):
# See issue25872
def modify_file():
with open(os_helper.TESTFN, 'w', encoding='utf-8') as fp:
fp.write(' ')
traceback.format_stack()
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
threads = [
threading.Thread(target=modify_file)
for i in range(100)
]
for t in threads:
t.start()
t.join()
class ThreadRunFail(threading.Thread):
def run(self):
raise ValueError("run failed")
class ExceptHookTests(BaseTestCase):
def setUp(self):
restore_default_excepthook(self)
super().setUp()
def test_excepthook(self):
with support.captured_output("stderr") as stderr:
thread = ThreadRunFail(name="excepthook thread")
thread.start()
thread.join()
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {thread.name}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("run failed")', stderr)
self.assertIn('ValueError: run failed', stderr)
@support.cpython_only
def test_excepthook_thread_None(self):
# threading.excepthook called with thread=None: log the thread
# identifier in this case.
with support.captured_output("stderr") as stderr:
try:
raise ValueError("bug")
except Exception as exc:
args = threading.ExceptHookArgs([*sys.exc_info(), None])
try:
threading.excepthook(args)
finally:
# Explicitly break a reference cycle
args = None
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {threading.get_ident()}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("bug")', stderr)
self.assertIn('ValueError: bug', stderr)
def test_system_exit(self):
class ThreadExit(threading.Thread):
def run(self):
sys.exit(1)
# threading.excepthook() silently ignores SystemExit
with support.captured_output("stderr") as stderr:
thread = ThreadExit()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(), '')
def test_custom_excepthook(self):
args = None
def hook(hook_args):
nonlocal args
args = hook_args
try:
with support.swap_attr(threading, 'excepthook', hook):
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(args.exc_type, ValueError)
self.assertEqual(str(args.exc_value), 'run failed')
self.assertEqual(args.exc_traceback, args.exc_value.__traceback__)
self.assertIs(args.thread, thread)
finally:
# Break reference cycle
args = None
def test_custom_excepthook_fail(self):
def threading_hook(args):
raise ValueError("threading_hook failed")
err_str = None
def sys_hook(exc_type, exc_value, exc_traceback):
nonlocal err_str
err_str = str(exc_value)
with support.swap_attr(threading, 'excepthook', threading_hook), \
support.swap_attr(sys, 'excepthook', sys_hook), \
support.captured_output('stderr') as stderr:
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(),
'Exception in threading.excepthook:\n')
self.assertEqual(err_str, 'threading_hook failed')
def test_original_excepthook(self):
def run_thread():
with support.captured_output("stderr") as output:
thread = ThreadRunFail(name="excepthook thread")
thread.start()
thread.join()
return output.getvalue()
def threading_hook(args):
print("Running a thread failed", file=sys.stderr)
default_output = run_thread()
with support.swap_attr(threading, 'excepthook', threading_hook):
custom_hook_output = run_thread()
threading.excepthook = threading.__excepthook__
recovered_output = run_thread()
self.assertEqual(default_output, recovered_output)
self.assertNotEqual(default_output, custom_hook_output)
self.assertEqual(custom_hook_output, "Running a thread failed\n")
class TimerTests(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes and affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
timer1.join()
timer2.join()
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
restore_default_excepthook(self)
extra = {"ThreadError"}
not_exported = {'currentThread', 'activeCount'}
support.check__all__(self, threading, ('threading', '_thread'),
extra=extra, not_exported=not_exported)
class InterruptMainTests(unittest.TestCase):
def check_interrupt_main_with_signal_handler(self, signum):
def handler(signum, frame):
1/0
old_handler = signal.signal(signum, handler)
self.addCleanup(signal.signal, signum, old_handler)
with self.assertRaises(ZeroDivisionError):
_thread.interrupt_main()
def check_interrupt_main_noerror(self, signum):
handler = signal.getsignal(signum)
try:
# No exception should arise.
signal.signal(signum, signal.SIG_IGN)
_thread.interrupt_main(signum)
signal.signal(signum, signal.SIG_DFL)
_thread.interrupt_main(signum)
finally:
# Restore original handler
signal.signal(signum, handler)
def test_interrupt_main_subthread(self):
# Calling start_new_thread with a function that executes interrupt_main
# should raise KeyboardInterrupt upon completion.
def call_interrupt():
_thread.interrupt_main()
t = threading.Thread(target=call_interrupt)
with self.assertRaises(KeyboardInterrupt):
t.start()
t.join()
t.join()
def test_interrupt_main_mainthread(self):
# Make sure that if interrupt_main is called in main thread that
# KeyboardInterrupt is raised instantly.
with self.assertRaises(KeyboardInterrupt):
_thread.interrupt_main()
def test_interrupt_main_with_signal_handler(self):
self.check_interrupt_main_with_signal_handler(signal.SIGINT)
self.check_interrupt_main_with_signal_handler(signal.SIGTERM)
def test_interrupt_main_noerror(self):
self.check_interrupt_main_noerror(signal.SIGINT)
self.check_interrupt_main_noerror(signal.SIGTERM)
def test_interrupt_main_invalid_signal(self):
self.assertRaises(ValueError, _thread.interrupt_main, -1)
self.assertRaises(ValueError, _thread.interrupt_main, signal.NSIG)
self.assertRaises(ValueError, _thread.interrupt_main, 1000000)
@threading_helper.reap_threads
def test_can_interrupt_tight_loops(self):
cont = [True]
started = [False]
interrupted = [False]
def worker(started, cont, interrupted):
iterations = 100_000_000
started[0] = True
while cont[0]:
if iterations:
iterations -= 1
else:
return
pass
interrupted[0] = True
t = threading.Thread(target=worker,args=(started, cont, interrupted))
t.start()
while not started[0]:
pass
cont[0] = False
t.join()
self.assertTrue(interrupted[0])
class AtexitTests(unittest.TestCase):
def test_atexit_output(self):
rc, out, err = assert_python_ok("-c", """if True:
import threading
def run_last():
print('parrot')
threading._register_atexit(run_last)
""")
self.assertFalse(err)
self.assertEqual(out.strip(), b'parrot')
def test_atexit_called_once(self):
rc, out, err = assert_python_ok("-c", """if True:
import threading
from unittest.mock import Mock
mock = Mock()
threading._register_atexit(mock)
mock.assert_not_called()
# force early shutdown to ensure it was called once
threading._shutdown()
mock.assert_called_once()
""")
self.assertFalse(err)
def test_atexit_after_shutdown(self):
# The only way to do this is by registering an atexit within
# an atexit, which is intended to raise an exception.
rc, out, err = assert_python_ok("-c", """if True:
import threading
def func():
pass
def run_last():
threading._register_atexit(func)
threading._register_atexit(run_last)
""")
self.assertTrue(err)
self.assertIn("RuntimeError: can't register atexit after shutdown",
err.decode())
if __name__ == "__main__":
unittest.main()
|
remindmebot_search.py
|
#!/usr/bin/env python2.7
# =============================================================================
# IMPORTS
# =============================================================================
import praw
import re
import MySQLdb
import ConfigParser
import time
import urllib
import parsedatetime.parsedatetime as pdt
from datetime import datetime, timedelta
from requests.exceptions import HTTPError, ConnectionError, Timeout
from praw.errors import ExceptionList, APIException, InvalidCaptcha, InvalidUser, RateLimitExceeded
from socket import timeout
from pytz import timezone
from threading import Thread
# =============================================================================
# GLOBALS
# =============================================================================
# Reads the config file
config = ConfigParser.ConfigParser()
config.read("shittyremindmebot.cfg")
#Reddit info
user_agent = ("ShittyRemindMeBot v1.0 by anirudhr (Forked from RemindMeBot by /u/RemindMeBotWrangler)")
reddit = praw.Reddit(user_agent = user_agent)
USER = config.get("Reddit", "username")
PASS = config.get("Reddit", "password")
DB_USER = config.get("SQL", "user")
DB_PASS = config.get("SQL", "passwd")
# =============================================================================
# CLASSES
# =============================================================================
class Connect(object):
"""
DB connection class
"""
connection = None
cursor = None
def __init__(self):
self.connection = MySQLdb.connect(
host="localhost", user=DB_USER, passwd=DB_PASS, db="ShittyRemindMeBotDB"
)
self.cursor = self.connection.cursor()
class Search(object):
commented = [] # comments already replied to
subId = [] # reddit threads already replied in
def __init__(self, comment):
self._addToDB = Connect()
self.comment = comment # Reddit comment Object
self._messageInput = '"Umm, okay. I don\'t know if I can do a better job than that guy, but I will try."' #Shittiness must go here
self._storeTime = None
self._replyMessage = ""
self._replyDate = None
self._privateMessage = False
def run(self, privateMessage=False):
if privateMessage == True:
self._privateMessage = True
self.parse_comment()
self.save_to_db()
self.build_message()
self.reply()
def parse_comment(self):
"""
Parse comment looking for the message and time
"""
if self._privateMessage == True:
permalinkTemp = re.search('\[(.*?)\]', self.comment.body)
if permalinkTemp:
self.comment.permalink = permalinkTemp.group()[1:-1]
# Makes sure the URL is real
try:
urllib.urlopen(self.comment.permalink)
except IOError:
self.comment.permalink = "http://www.reddit.com/r/ShittyRemindMeBot/"
else:
# Defaults when the user doesn't provide a link
self.comment.permalink = "http://www.reddit.com/r/ShittyRemindMeBot/"
# remove RemindMe! and everything before
match = re.search(r'RemindMe!', self.comment.body)
tempString = self.comment.body[match.start():]
# Use message default if not found
messageInputTemp = re.search('(["].{0,9000}["])', tempString)
if messageInputTemp:
self._messageInput = messageInputTemp.group()
# Remove RemindMe!
self._storeTime = re.sub('(["].{0,9000}["])', '', tempString)[9:]
def save_to_db(self):
"""
Saves the permalink comment, the time, and the message to the DB
"""
cal = pdt.Calendar()
if cal.parse(self._storeTime)[1] == 0:
# default time
holdTime = cal.parse("1 day", datetime.now(timezone('UTC')))
else:
holdTime = cal.parse(self._storeTime, datetime.now(timezone('UTC')))
# Converting time
#9999/12/31 HH/MM/SS
self._replyDate = time.strftime('%Y-%m-%d %H:%M:%S', holdTime[0]) #Shittiness must go here
cmd = "INSERT INTO message_data (permalink, message, new_date, userID) VALUES (%s, %s, %s, %s)"
self._addToDB.cursor.execute(cmd, (
self.comment.permalink,
self._messageInput,
self._replyDate,
self.comment.author))
self._addToDB.connection.commit()
self._addToDB.connection.close()
# Info is added to DB, user won't be bothered a second time
self.commented.append(self.comment.id)
def build_message(self):
"""
Buildng message for user
"""
permalink = self.comment.permalink
self._replyMessage =(
"I'll try to message you on [**{0} UTC**](http://www.wolframalpha.com/input/?i={0} UTC To Local Time)"
" to remind you of [**this comment.**]({commentPermalink}). Let's see how this goes. I'm not making any promises."
"{remindMeMessage}"
"\n\n_____\n\n"
#"[^([FAQs])](http://www.reddit.com/r/ShittyRemindMeBot/) ^| "
"[^([Custom Reminder])](http://www.reddit.com/message/compose/?to=ShittyRemindMeBot&subject=Reminder&message="
"[LINK INSIDE SQUARE BRACKETS else default to FAQs]%0A%0ANOTE: Don't forget to add the time options after the command."
"%0A%0AHeyDudeRemindMeOfThis!) ^| "
#"[^([Feedback])](http://www.reddit.com/message/compose/?to=peatbull&subject=Feedback) ^| "
"[^([Code])](https://github.com/anirudhr/shittyremindmebot-reddit)"
)
if self._privateMessage == False:
remindMeMessage = (
"\n\n[**CLICK THIS LINK**](http://www.reddit.com/message/compose/?to=ShittyRemindMeBot&subject=Reminder&message="
"[{permalink}]%0A%0AHeyDudeRemindMeOfThis! {time}) to send a PM to also be reminded and to reduce spam.").format(
permalink=permalink,
time=self._storeTime.replace('\n', '')
)
else:
remindMeMessage = ""
self._replyMessage = self._replyMessage.format(
self._replyDate,
remindMeMessage=remindMeMessage,
commentPermalink=permalink)
def reply(self):
"""
Messages the user letting as a confirmation
"""
author = self.comment.author
try:
if self._privateMessage == False:
sub = reddit.get_submission(self.comment.permalink)
# First message will be a reply in a thread
# afterwards are PM in the same thread
if (sub.id not in self.subId):
self.comment.reply(self._replyMessage)
self.subId.append(sub.id)
else:
reddit.send_message(author, 'Hello, ' + str(author) + ' ShittyRemindMeBot Confirmation Sent', self._replyMessage)
else:
reddit.send_message(author, 'Hello, ' + str(author) + ' ShittyRemindMeBot Confirmation Sent', self._replyMessage)
except (HTTPError, ConnectionError, Timeout, timeout) as err:
print err
# PM instead if the banned from the subreddit
if str(err) == "403 Client Error: Forbidden":
reddit.send_message(author, 'Hello, ' + str(author) + ' ShittyRemindMeBot Confirmation Sent', self._replyMessage)
except RateLimitExceeded as err:
print err
# PM when I message too much
reddit.send_message(author, 'Hello, ' + str(author) + ' ShittyRemindMeBot Confirmation Sent', self._replyMessage)
time.sleep(10)
except APIException as err: # Catch any less specific API errors
print err
#else:
#print self._replyMessage
class ReadPM(Thread):
"""
Allows PMs to also work
"""
def __init__(self):
Thread.__init__(self)
self.daemon = True
self.start()
def run(self):
while True:
try:
for comment in reddit.get_unread(unset_has_mail=True, update_user=True):
redditPM = Search(comment)
if "HeyDudeRemindMeOfThis!" in comment.body and str(type(comment)) == "<class 'praw.objects.Message'>":
redditPM.run(privateMessage=True)
comment.mark_as_read()
time.sleep(30)
except Exception as err:
print "THREAD ERROR", err
# =============================================================================
# MAIN
# =============================================================================
def main():
reddit.login(USER, PASS)
print "start"
ReadPM()
while True:
try:
# loop through each comment
for comment in praw.helpers.comment_stream(reddit, 'all', limit=None, verbosity=0):
redditCall = Search(comment)
if ("HeyDudeRemindMeOfThis!" in comment.body and
redditCall.comment.id not in redditCall.commented and
'ShittyRemindMeBot' != str(comment.author)):
print "in"
t = Thread(target=redditCall.run())
t.start()
except Exception as err:
print err
# =============================================================================
# RUNNER
# =============================================================================
if __name__ == '__main__':
main()
|
__main__.py
|
# -*- coding: utf-8 -*-
import multiprocessing as mp
import sys
import os
import threading
import cv2
import pims
import PyQt5
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QWidget, QFileDialog
from video import VideoReader, linkTrajectories
from interface import Ui_ventanaPrincipal
# This line makes the app look good on hiDPI screens
PyQt5.QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling, True)
class GuiEvents(Ui_ventanaPrincipal):
""" In this class we load the interface stored at interface.py (it
inherits from the interface class) and define the events and
connections linked to the actions performed to the different widgets
(ex: click a button, etc) """
def __init__(self, dialog):
""" This method is executed when creating an instance of the class """
# We create an instance of the QT GUI Interface (so that is easy
# to access from the other methods of this class)
Ui_ventanaPrincipal.__init__(self)
# We pass the dialog object to the setupUi method of the GUI, it fills
# the dialog with the widgets and other elements of the interface
self.setupUi(dialog)
# Connects "add" saveAsImages_button with a custom function (printMsg)
self.saveAsImages_button.clicked.connect(self.saveAsImg)
self.play_button.clicked.connect(self.play)
self.close_button.clicked.connect(self.closeWindow)
self.selectFile_button.clicked.connect(self.selectFile)
self.refreshData_button.clicked.connect(self.refreshInfo)
self.playFps_slider.valueChanged.connect(self.updatePlayLength)
self.selectFolder_button.clicked.connect(self.selectFolder)
self.track_button.clicked.connect(self.trackParticles)
self.trajectories_button.clicked.connect(self.findTrajectories)
self.folderPath_edit.textChanged.connect(self.enableButtons)
self.filePath_edit.textChanged.connect(self.enableButtons)
self.initialFrame_spinBox.valueChanged.connect(self.updateTrackButton)
self.finalFrame_spinBox.valueChanged.connect(self.updateTrackButton)
self.filePath_edit.textChanged.connect(self.updateTrackButton)
# Initializing a value for communicating with saveAsImages process
# and store te percentage of video currently saved to images
self.percentSaved = mp.Value('i', 0)
# We create a timer that will tick every 1000 ms to update the progress
# bar (and possibly other tasks)
self.timer = QtCore.QTimer()
self.timer.setInterval(1000)
self.timer.start()
self.timer.timeout.connect(self.updateProgressBar)
def updateTrackButton(self):
initialFrame = self.initialFrame_spinBox.value()
finalFrame = self.finalFrame_spinBox.value()
n_frames = finalFrame - initialFrame
self.framesFinalminusInitial_label.setText(str(n_frames+1)+' frames')
if os.path.isfile(self.filePath_edit.text()) and n_frames>=0:
self.track_button.setDisabled(False)
else:
self.track_button.setDisabled(True)
def enableButtons(self):
if os.path.isfile(self.filePath_edit.text()):
self.play_button.setDisabled(False)
else:
self.play_button.setDisabled(True)
if os.path.isdir(self.folderPath_edit.text()) and os.path.isfile(self.filePath_edit.text()):
self.saveAsImages_button.setDisabled(False)
else:
self.saveAsImages_button.setDisabled(True)
def saveAsImg(self):
""" Here we define what happens when pressing the save button """
if os.path.isdir(self.folderPath_edit.text()) and os.path.isfile(self.filePath_edit.text()): # If directory and file exists:
img_folder = self.folderPath_edit.text() + '/' # 'D:/imagenes2/'
verbose = False
videoPath = self.filePath_edit.text()
crop = (self.minHeight_slider.value(),
self.maxHeight_slider.value(),
self.minWidth_slider.value(),
self.maxWidth_slider.value())
# A new process is created to save the video to images, this process
# is run as a daemon and prevents the GUI from hanging meanwhile
p = mp.Process(target=saveAsImg_Process, args=(videoPath, img_folder, crop, verbose, self.percentSaved,))
p.daemon = True
p.start()
#p.join()
#p.terminate()
print('New process started, saving images to:')
print('Process alive: ' + str(p.is_alive()))
else:
print('Incorrect or inexistent folder selected')
print(self.folderPath_edit.text()+'/')
def updateProgressBar(self):
self.saveAsImages_progressBar.setValue(self.percentSaved.value)
def refreshInfo(self):
try:
video = VideoReader(self.filePath_edit.text())
self.recSpeed_label.setText('Recording Speed: '+str(video.recordingSpeed)+' fps')
self.width_label.setText('Width: '+str(video.width)+' px')
self.height_label.setText('Height: '+str(video.height)+' px')
self.frameCount_label.setText('Frame Count: '+str(video.frameCount))
self.realTime_label.setText('Recorded Time: '+'{:.2f}'.format(video.realRecordedTime)+' s')
self.recDate_label.setText('Recording Date: '+video.recordingDate.strftime("%d %b %Y %H:%M"))
self.updatePlayLength()
self.maxHeight_slider.setMaximum(video.height)
self.maxWidth_slider.setMaximum(video.width)
self.initialFrame_spinBox.setMaximum(video.frameCount)
self.finalFrame_spinBox.setMaximum(video.frameCount)
#self.playFps_slider.setMaximum(video.recordingSpeed)
except:
print('Incorrect or empty file selected')
print(self.filePath_edit.text())
# Return values to default
self.recSpeed_label.setText('Recording Speed: ')
self.width_label.setText('Width: ')
self.height_label.setText('Height: ')
self.frameCount_label.setText('Frame Count: ')
self.realTime_label.setText('Recorded Time: ')
self.recDate_label.setText('Recording Date: ')
self.playLength_label.setText('Video Length: ')
def updatePlayLength(self):
""" This function updates te labels indicating video length when played """
try:
video = VideoReader(self.filePath_edit.text())
t = video.frameCount/self.playFps_slider.value()
multiplier = '{:.1f}'.format(video.recordingSpeed/self.playFps_slider.value())
self.playLength_label.setText('Video Length: '+ '{:.2f}'.format(t)+' s ('+multiplier+'X)')
except:
print('Incorrect or empty file selected')
print(self.filePath_edit.text())
def play(self):
try:
fps = self.playFps_slider.value()
video = VideoReader(self.filePath_edit.text())#'D:/aire2.cine'
video.cropVideo(self.minHeight_slider.value(),
self.maxHeight_slider.value(),
self.minWidth_slider.value(),
self.maxWidth_slider.value()) #0,790,300,1190
video.playVideo(fps)
except:
print('Incorrect or empty file selected')
print(self.filePath_edit.text())
def selectFile(self):
fileDialog = SelectFileDialog()
filePath = fileDialog.initUI()
self.filePath_edit.setText(filePath)
def selectFolder(self):
folderDialog = SelectFolderDialog()
folderPath = folderDialog.initUI()
self.folderPath_edit.setText(folderPath)
def closeWindow(self):
pass
def trackParticles(self):
video = VideoReader(self.filePath_edit.text())
video.cropVideo(self.minHeight_slider.value(),
self.maxHeight_slider.value(),
self.minWidth_slider.value(),
self.maxWidth_slider.value()) #0,790,300,1190
initialFrame = self.initialFrame_spinBox.value()
finalFrame = self.finalFrame_spinBox.value()
self.particles = video.detectCircles(initialFrame,finalFrame)
model = PandasModel(self.particles)
self.tracking_tableView.setModel(model)
self.trajectories_button.setDisabled(False)
print(self.particles)
def findTrajectories(self):
removeDrift = self.removeDrift_checkBox.checkState()
self.trajectories = linkTrajectories(self.particles, removeDrift)
model = PandasModel(self.trajectories)
self.tracking_tableView.setModel(model)
def saveAsImg_Process(videoPath, img_folder, crop, verbose, percentSaved):
""" Here we define what happens when pressing the save button.
This method executes u¡in a new process """
#verbose=False
#img_folder = 'D:/imagenes2/'
video = VideoReader(videoPath)
frameCount = video.frameCount
video = cv2.VideoCapture(videoPath)
i = 0
while(video.isOpened()):
# Leemos el frame actual y lo asignamos a la variable frame
ret, frame = video.read()
# Recorto el frame a la zona que me interesa (es simplemente operar
# con arrays de numpy)
frame_crop = frame[crop[0]:crop[1], crop[2]:crop[3]]
# Guardo el frame recortado a una imagen
path = img_folder + 'img' + "{:06d}".format(i) + '.png'
cv2.imwrite(path, frame_crop)
i+=1
# Guardamos el porcentaje que llevamos completado en una variable
# compartida entre este proceso y el principal
percentSaved.value = int(100*i/frameCount)
if verbose == True:
percent = " - " + "{:.2f}".format(100*i/frameCount) + " %"
print("Frame nº: " + str(i)+" / "+str(frameCount) + percent)
# Cerramos el stream de video
video.release()
class SelectFolderDialog(QWidget):
def __init__(self):
super().__init__()
self.title = 'Seleccionar Carpeta'
self.left = 10
self.top = 10
self.width = 800
self.height = 600
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
result = self.openFolderDialog()
self.show()
return result
def openFolderDialog(self):
options = QFileDialog.Options()
#options |= QFileDialog.DontUseNativeDialog
folder = QFileDialog.getExistingDirectory(self, "Seleccionar Carpeta", "", options=options)
if folder:
return folder
class SelectFileDialog(QWidget):
def __init__(self):
super().__init__()
self.title = 'Seleccionar archivo de Video'
self.left = 10
self.top = 10
self.width = 800
self.height = 600
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
result = self.openFileNameDialog()
#self.openFileNamesDialog()
#self.saveFileDialog()
self.show()
return result
def openFileNameDialog(self):
options = QFileDialog.Options()
#options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self,"Selecciona un archivo de video", "","Cine Files (*.cine);;All Files (*)", options=options)
#folder, _ = QFileDialog.getExistingDirectory(self, " kkk", "",options=options)
if fileName:
return fileName
def openFileNamesDialog(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
files, _ = QFileDialog.getOpenFileNames(self,"QFileDialog.getOpenFileNames()", "","All Files (*);;Python Files (*.py)", options=options)
if files:
print(files)
def saveFileDialog(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getSaveFileName(self,"QFileDialog.getSaveFileName()","","All Files (*);;Text Files (*.txt)", options=options)
if fileName:
print(fileName)
class PandasModel(QtCore.QAbstractTableModel):
"""
Class to populate a table view with a pandas dataframe
https://stackoverflow.com/questions/31475965/fastest-way-to-populate-qtableview-from-pandas-data-frame
"""
def __init__(self, data, parent=None):
QtCore.QAbstractTableModel.__init__(self, parent)
self._data = data
def rowCount(self, parent=None):
return len(self._data.values)
def columnCount(self, parent=None):
return self._data.columns.size
def data(self, index, role=QtCore.Qt.DisplayRole):
if index.isValid():
if role == QtCore.Qt.DisplayRole:
return str(self._data.values[index.row()][index.column()])
return None
def headerData(self, col, orientation, role):
if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:
return self._data.columns[col]
return None
if __name__ == '__main__':
# This script needs to be executed form command line (errors if from IDE)
# A PyQt5 application is created
app = QtWidgets.QApplication(sys.argv)
# app.setStyleSheet(open('C:/Users/malopez/Desktop/videoToolsGUI/theme.stylesheet').read())
# We create a QDialog object to show our GUI interface
dialog = QtWidgets.QDialog()
# We pass this dialog object as an argument to the main class
program = GuiEvents(dialog)
# Showing the main window until an exec condition is met
dialog.show()
sys.exit(app.exec_())
|
train.py
|
# --------------------------------------------------------
# FCN
# Copyright (c) 2016 RSE at UW
# Licensed under The MIT License [see LICENSE for details]
# Written by Yu Xiang
# --------------------------------------------------------
"""Train a FCN"""
from fcn.config import cfg
from gt_data_layer.layer import GtDataLayer
from gt_single_data_layer.layer import GtSingleDataLayer
from gt_synthesize_layer.layer import GtSynthesizeLayer
from utils.timer import Timer
import numpy as np
import os
import tensorflow as tf
import sys
import threading
import math
class SolverWrapper(object):
"""A simple wrapper around Caffe's solver.
This wrapper gives us control over he snapshotting process, which we
use to unnormalize the learned bounding-box regression weights.
"""
def __init__(self, sess, network, imdb, roidb, output_dir, pretrained_model=None, pretrained_ckpt=None):
"""Initialize the SolverWrapper."""
self.net = network
self.imdb = imdb
self.roidb = roidb
self.output_dir = output_dir
self.pretrained_model = pretrained_model
self.pretrained_ckpt = pretrained_ckpt
# For checkpoint
self.saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES), max_to_keep=12)
def snapshot(self, sess, iter):
"""Take a snapshot of the network after unnormalizing the learned
bounding-box regression weights. This enables easy use at test-time.
"""
net = self.net
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
infix = ('_' + cfg.TRAIN.SNAPSHOT_INFIX
if cfg.TRAIN.SNAPSHOT_INFIX != '' else '')
filename = (cfg.TRAIN.SNAPSHOT_PREFIX + infix + '_iter_{:d}'.format(iter+1) + '.ckpt')
filename = os.path.join(self.output_dir, filename)
self.saver.save(sess, filename, write_meta_graph=False)
print 'Wrote snapshot to: {:s}'.format(filename)
def restore(self, session, save_file):
reader = tf.train.NewCheckpointReader(save_file)
saved_shapes = reader.get_variable_to_shape_map()
var_names = sorted([(var.name, var.name.split(':')[0]) for var in tf.global_variables()
if var.name.split(':')[0] in saved_shapes])
var_name_to_var = {var.name : var for var in tf.global_variables()}
restore_vars = []
restored_var_names = set()
print('Restoring:')
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
for var_name, saved_var_name in var_names:
if 'global_step' in var_name:
continue
if 'Variable' in var_name:
continue
curr_var = var_name_to_var[var_name]
var_shape = curr_var.get_shape().as_list()
if var_shape == saved_shapes[saved_var_name]:
restore_vars.append(curr_var)
print(str(saved_var_name))
restored_var_names.add(saved_var_name)
else:
print('Shape mismatch for var', saved_var_name, 'expected', var_shape, 'got', saved_shapes[saved_var_name])
ignored_var_names = sorted(list(set(saved_shapes.keys()) - restored_var_names))
if len(ignored_var_names) == 0:
print('Restored all variables')
else:
print('Did not restore:' + '\n\t'.join(ignored_var_names))
if len(restore_vars) > 0:
saver = tf.train.Saver(restore_vars)
saver.restore(session, save_file)
print('Restored %s' % save_file)
def train_model(self, sess, train_op, loss, learning_rate, max_iters, data_layer):
"""Network training loop."""
# add summary
tf.summary.scalar('loss', loss)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(self.output_dir, sess.graph)
# intialize variables
sess.run(tf.global_variables_initializer())
if self.pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(self.pretrained_model)
self.net.load(self.pretrained_model, sess, True)
print self.pretrained_ckpt
if self.pretrained_ckpt is not None:
print ('Loading pretrained ckpt '
'weights from {:s}').format(self.pretrained_ckpt)
self.restore(sess, self.pretrained_ckpt)
tf.get_default_graph().finalize()
coord = tf.train.Coordinator()
if cfg.TRAIN.VISUALIZE:
load_and_enqueue(sess, self.net, data_layer, coord)
else:
t = threading.Thread(target=load_and_enqueue, args=(sess, self.net, data_layer, coord))
t.start()
last_snapshot_iter = -1
timer = Timer()
for iter in range(max_iters):
timer.tic()
summary, loss_value, lr, _ = sess.run([merged, loss, learning_rate, train_op])
train_writer.add_summary(summary, iter)
timer.toc()
print 'iter: %d / %d, loss: %.4f, lr: %.8f, time: %.2f' %\
(iter+1, max_iters, loss_value, lr, timer.diff)
if (iter+1) % (10 * cfg.TRAIN.DISPLAY) == 0:
print 'speed: {:.3f}s / iter'.format(timer.average_time)
if (iter+1) % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = iter
self.snapshot(sess, iter)
if last_snapshot_iter != iter:
self.snapshot(sess, iter)
sess.run(self.net.close_queue_op)
coord.request_stop()
coord.join([t])
def train_model_vertex(self, sess, train_op, loss, loss_cls, loss_vertex, loss_regu, learning_rate, max_iters, data_layer):
"""Network training loop."""
# add summary
# tf.summary.scalar('loss', loss)
# merged = tf.summary.merge_all()
# train_writer = tf.summary.FileWriter(self.output_dir, sess.graph)
# intialize variables
sess.run(tf.global_variables_initializer())
if self.pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(self.pretrained_model)
self.net.load(self.pretrained_model, sess, True)
if self.pretrained_ckpt is not None:
print ('Loading pretrained ckpt '
'weights from {:s}').format(self.pretrained_ckpt)
self.restore(sess, self.pretrained_ckpt)
tf.get_default_graph().finalize()
coord = tf.train.Coordinator()
if cfg.TRAIN.VISUALIZE:
load_and_enqueue(sess, self.net, data_layer, coord)
else:
t = threading.Thread(target=load_and_enqueue, args=(sess, self.net, data_layer, coord))
t.start()
# tf.train.write_graph(sess.graph_def, self.output_dir, 'model.pbtxt')
last_snapshot_iter = -1
timer = Timer()
for iter in range(max_iters):
timer.tic()
loss_value, loss_cls_value, loss_vertex_value, loss_regu_value, lr, _ = sess.run([loss, loss_cls, loss_vertex, loss_regu, learning_rate, train_op])
# train_writer.add_summary(summary, iter)
timer.toc()
print 'iter: %d / %d, loss: %.4f, loss_cls: %.4f, loss_vertex: %.4f, loss_regu: %.12f, lr: %.8f, time: %.2f' %\
(iter+1, max_iters, loss_value, loss_cls_value, loss_vertex_value, loss_regu_value, lr, timer.diff)
if (iter+1) % (10 * cfg.TRAIN.DISPLAY) == 0:
print 'speed: {:.3f}s / iter'.format(timer.average_time)
if (iter+1) % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = iter
self.snapshot(sess, iter)
if last_snapshot_iter != iter:
self.snapshot(sess, iter)
sess.run(self.net.close_queue_op)
coord.request_stop()
coord.join([t])
def train_model_vertex_pose(self, sess, train_op, loss, loss_cls, loss_vertex, loss_pose, learning_rate, max_iters, data_layer):
"""Network training loop."""
# add summary
# tf.summary.scalar('loss', loss)
# merged = tf.summary.merge_all()
# train_writer = tf.summary.FileWriter(self.output_dir, sess.graph)
coord = tf.train.Coordinator()
# intialize variables
sess.run(tf.global_variables_initializer())
if self.pretrained_ckpt is not None:
print ('Loading pretrained ckpt '
'weights from {:s}').format(self.pretrained_ckpt)
self.restore(sess, self.pretrained_ckpt)
elif self.pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(self.pretrained_model)
self.net.load(self.pretrained_model, sess, True)
tf.get_default_graph().finalize()
# if cfg.TRAIN.VISUALIZE:
# load_and_enqueue(sess, self.net, data_layer, coord)
# else:
t = threading.Thread(target=load_and_enqueue, args=(sess, self.net, data_layer, coord))
t.start()
# tf.train.write_graph(sess.graph_def, self.output_dir, 'model.pbtxt')
last_snapshot_iter = -1
timer = Timer()
for iter in range(max_iters):
print(iter)
timer.tic()
loss_value, loss_cls_value, loss_vertex_value, loss_pose_value, lr, _ = sess.run([loss, loss_cls, loss_vertex, loss_pose, learning_rate, train_op])
# train_writer.add_summary(summary, iter)
timer.toc()
print 'iter: %d / %d, loss: %.4f, loss_cls: %.4f, loss_vertex: %.4f, loss_pose: %.4f, lr: %.8f, time: %.2f' %\
(iter+1, max_iters, loss_value, loss_cls_value, loss_vertex_value, loss_pose_value, lr, timer.diff)
if (iter+1) % (10 * cfg.TRAIN.DISPLAY) == 0:
print 'speed: {:.3f}s / iter'.format(timer.average_time)
if (iter+1) % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = iter
self.snapshot(sess, iter)
if last_snapshot_iter != iter:
self.snapshot(sess, iter)
sess.run(self.net.close_queue_op)
coord.request_stop()
coord.join([t])
def train_model_vertex_pose_adapt(self, sess, train_op, loss, loss_cls, loss_vertex, loss_pose, \
loss_domain, label_domain, domain_label, learning_rate, max_iters, data_layer):
"""Network training loop."""
coord = tf.train.Coordinator()
if cfg.TRAIN.VISUALIZE:
load_and_enqueue(sess, self.net, data_layer, coord)
else:
t = threading.Thread(target=load_and_enqueue, args=(sess, self.net, data_layer, coord))
t.start()
# intialize variables
sess.run(tf.global_variables_initializer())
if self.pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(self.pretrained_model)
self.net.load(self.pretrained_model, sess, True)
if self.pretrained_ckpt is not None:
print ('Loading pretrained ckpt '
'weights from {:s}').format(self.pretrained_ckpt)
self.restore(sess, self.pretrained_ckpt)
tf.get_default_graph().finalize()
last_snapshot_iter = -1
timer = Timer()
for iter in range(max_iters):
timer.tic()
loss_value, loss_cls_value, loss_vertex_value, loss_pose_value, loss_domain_value, label_domain_value, domain_label_value, lr, _ = sess.run([loss, loss_cls, loss_vertex, loss_pose, loss_domain, label_domain, domain_label, learning_rate, train_op])
# train_writer.add_summary(summary, iter)
timer.toc()
print 'iter: %d / %d, loss: %.4f, loss_cls: %.4f, loss_vertex: %.4f, loss_pose: %.4f, loss_domain: %.4f, lr: %.8f, time: %.2f' %\
(iter+1, max_iters, loss_value, loss_cls_value, loss_vertex_value, loss_pose_value, loss_domain_value, lr, timer.diff)
print label_domain_value
print domain_label_value
if (iter+1) % (10 * cfg.TRAIN.DISPLAY) == 0:
print 'speed: {:.3f}s / iter'.format(timer.average_time)
if (iter+1) % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = iter
self.snapshot(sess, iter)
if last_snapshot_iter != iter:
self.snapshot(sess, iter)
sess.run(self.net.close_queue_op)
coord.request_stop()
coord.join([t])
def train_model_det(self, sess, train_op, loss, loss_rpn_cls, loss_rpn_box, loss_cls, loss_box, loss_pose, learning_rate, max_iters, data_layer):
"""Network training loop."""
# add summary
# tf.summary.scalar('loss', loss)
# merged = tf.summary.merge_all()
# train_writer = tf.summary.FileWriter(self.output_dir, sess.graph)
# intialize variables
sess.run(tf.global_variables_initializer())
if self.pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(self.pretrained_model)
self.net.load(self.pretrained_model, sess, True)
if self.pretrained_ckpt is not None:
print ('Loading pretrained ckpt '
'weights from {:s}').format(self.pretrained_ckpt)
self.restore(sess, self.pretrained_ckpt)
tf.get_default_graph().finalize()
coord = tf.train.Coordinator()
if cfg.TRAIN.VISUALIZE:
load_and_enqueue(sess, self.net, data_layer, coord)
else:
t = threading.Thread(target=load_and_enqueue, args=(sess, self.net, data_layer, coord))
t.start()
last_snapshot_iter = -1
timer = Timer()
for iter in range(max_iters):
timer.tic()
loss_value, loss_rpn_cls_value, loss_rpn_box_value, loss_cls_value, loss_box_value, loss_pose_value, lr, _ \
= sess.run([loss, loss_rpn_cls, loss_rpn_box, loss_cls, loss_box, loss_pose, learning_rate, train_op])
# train_writer.add_summary(summary, iter)
timer.toc()
print 'iter: %d / %d, loss: %.4f, loss_rpn_cls: %.4f, loss_rpn_box: %.4f, loss_cls: %.4f, loss_box: %.4f, loss_pose: %.4f, lr: %.8f, time: %.2f' %\
(iter+1, max_iters, loss_value, loss_rpn_cls_value, loss_rpn_box_value, loss_cls_value, loss_box_value, loss_pose_value, lr, timer.diff)
if (iter+1) % (10 * cfg.TRAIN.DISPLAY) == 0:
print 'speed: {:.3f}s / iter'.format(timer.average_time)
if (iter+1) % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = iter
self.snapshot(sess, iter)
if last_snapshot_iter != iter:
self.snapshot(sess, iter)
sess.run(self.net.close_queue_op)
coord.request_stop()
coord.join([t])
def get_training_roidb(imdb):
"""Returns a roidb (Region of Interest database) for use in training."""
if cfg.TRAIN.USE_FLIPPED:
print 'Appending horizontally-flipped training examples...'
imdb.append_flipped_images()
print 'done'
return imdb.roidb
def load_and_enqueue(sess, net, data_layer, coord):
iter = 0
while not coord.should_stop():
blobs = data_layer.forward(iter)
iter += 1
print(iter)
if cfg.INPUT == 'RGBD':
data_blob = blobs['data_image_color']
data_p_blob = blobs['data_image_depth']
elif cfg.INPUT == 'COLOR':
data_blob = blobs['data_image_color']
elif cfg.INPUT == 'DEPTH':
data_blob = blobs['data_image_depth']
elif cfg.INPUT == 'NORMAL':
data_blob = blobs['data_image_normal']
if cfg.TRAIN.SINGLE_FRAME:
if cfg.TRAIN.SEGMENTATION:
if cfg.INPUT == 'RGBD':
if cfg.TRAIN.VERTEX_REG_2D or cfg.TRAIN.VERTEX_REG_3D:
feed_dict={net.data: data_blob, net.data_p: data_p_blob, net.gt_label_2d: blobs['data_label'], net.keep_prob: 0.5, \
net.vertex_targets: blobs['data_vertex_targets'], net.vertex_weights: blobs['data_vertex_weights'], \
net.poses: blobs['data_pose'], net.extents: blobs['data_extents'], net.meta_data: blobs['data_meta_data']}
else:
feed_dict={net.data: data_blob, net.data_p: data_p_blob, net.gt_label_2d: blobs['data_label'], net.keep_prob: 0.5}
else:
if cfg.TRAIN.VERTEX_REG_2D or cfg.TRAIN.VERTEX_REG_3D:
feed_dict={net.data: data_blob, net.gt_label_2d: blobs['data_label'], net.keep_prob: 0.5, \
net.vertex_targets: blobs['data_vertex_targets'], net.vertex_weights: blobs['data_vertex_weights'], \
net.poses: blobs['data_pose'], net.extents: blobs['data_extents'], net.meta_data: blobs['data_meta_data'], \
net.points: blobs['data_points'], net.symmetry: blobs['data_symmetry']}
else:
feed_dict={net.data: data_blob, net.gt_label_2d: blobs['data_label'], net.keep_prob: 0.5}
else:
if cfg.INPUT == 'RGBD':
feed_dict={net.data: data_blob, net.data_p: data_p_blob, net.im_info: blobs['data_im_info'], \
net.gt_boxes: blobs['data_gt_boxes'], net.poses: blobs['data_pose'], \
net.points: blobs['data_points'], net.symmetry: blobs['data_symmetry'], net.keep_prob: 0.5}
else:
feed_dict={net.data: data_blob, net.im_info: blobs['data_im_info'], \
net.gt_boxes: blobs['data_gt_boxes'], net.poses: blobs['data_pose'], \
net.points: blobs['data_points'], net.symmetry: blobs['data_symmetry'], net.keep_prob: 0.5}
else:
if cfg.INPUT == 'RGBD':
feed_dict={net.data: data_blob, net.data_p: data_p_blob, net.gt_label_2d: blobs['data_label'], \
net.depth: blobs['data_depth'], net.meta_data: blobs['data_meta_data'], \
net.state: blobs['data_state'], net.weights: blobs['data_weights'], net.points: blobs['data_points'], net.keep_prob: 0.5}
else:
feed_dict={net.data: data_blob, net.gt_label_2d: blobs['data_label'], \
net.depth: blobs['data_depth'], net.meta_data: blobs['data_meta_data'], \
net.state: blobs['data_state'], net.weights: blobs['data_weights'], net.points: blobs['data_points'], net.keep_prob: 0.5}
sess.run(net.enqueue_op, feed_dict=feed_dict)
def loss_cross_entropy(scores, labels):
"""
scores: a list of tensors [batch_size, height, width, num_classes]
labels: a list of tensors [batch_size, height, width, num_classes]
"""
with tf.name_scope('loss'):
loss = 0
for i in range(cfg.TRAIN.NUM_STEPS):
score = scores[i]
label = labels[i]
cross_entropy = -tf.reduce_sum(label * score, reduction_indices=[3])
loss += tf.div(tf.reduce_sum(cross_entropy), tf.reduce_sum(label))
loss /= cfg.TRAIN.NUM_STEPS
return loss
def loss_cross_entropy_single_frame(scores, labels):
"""
scores: a tensor [batch_size, height, width, num_classes]
labels: a tensor [batch_size, height, width, num_classes]
"""
with tf.name_scope('loss'):
cross_entropy = -tf.reduce_sum(labels * scores, reduction_indices=[3])
loss = tf.div(tf.reduce_sum(cross_entropy), tf.reduce_sum(labels)+1e-10)
return loss
def loss_quaternion(pose_pred, pose_targets, pose_weights):
with tf.name_scope('loss'):
distances = 1 - tf.square( tf.reduce_sum(tf.multiply(pose_pred, pose_targets), reduction_indices=[1]) )
weights = tf.reduce_mean(pose_weights, reduction_indices=[1])
loss = tf.div( tf.reduce_sum(tf.multiply(weights, distances)), tf.reduce_sum(weights)+1e-10 )
return loss
def train_net(network, imdb, roidb, output_dir, pretrained_model=None, pretrained_ckpt=None, max_iters=40000):
"""Train a Fast R-CNN network."""
loss_regu = tf.add_n(tf.losses.get_regularization_losses(), 'regu')
if cfg.TRAIN.SINGLE_FRAME:
# classification loss
if cfg.NETWORK == 'FCN8VGG':
scores = network.prob
labels = network.gt_label_2d_queue
loss = loss_cross_entropy_single_frame(scores, labels) + loss_regu
else:
if cfg.TRAIN.VERTEX_REG_2D or cfg.TRAIN.VERTEX_REG_3D:
scores = network.get_output('prob')
labels = network.get_output('gt_label_weight')
loss_cls = loss_cross_entropy_single_frame(scores, labels)
vertex_pred = network.get_output('vertex_pred')
vertex_targets = network.get_output('vertex_targets')
vertex_weights = network.get_output('vertex_weights')
# loss_vertex = tf.div( tf.reduce_sum(tf.multiply(vertex_weights, tf.abs(tf.subtract(vertex_pred, vertex_targets)))), tf.reduce_sum(vertex_weights) + 1e-10 )
loss_vertex = cfg.TRAIN.VERTEX_W * smooth_l1_loss_vertex(vertex_pred, vertex_targets, vertex_weights)
if cfg.TRAIN.POSE_REG:
# pose_pred = network.get_output('poses_pred')
# pose_targets = network.get_output('poses_target')
# pose_weights = network.get_output('poses_weight')
# loss_pose = cfg.TRAIN.POSE_W * tf.div( tf.reduce_sum(tf.multiply(pose_weights, tf.abs(tf.subtract(pose_pred, pose_targets)))), tf.reduce_sum(pose_weights) )
# loss_pose = cfg.TRAIN.POSE_W * loss_quaternion(pose_pred, pose_targets, pose_weights)
loss_pose = cfg.TRAIN.POSE_W * network.get_output('loss_pose')[0]
if cfg.TRAIN.ADAPT:
domain_score = network.get_output("domain_score")
domain_label = network.get_output("domain_label")
label_domain = network.get_output("label_domain")
loss_domain = cfg.TRAIN.ADAPT_WEIGHT * tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=domain_score, labels=label_domain))
loss = loss_cls + loss_vertex + loss_pose + loss_domain + loss_regu
else:
loss = loss_cls + loss_vertex + loss_pose + loss_regu
else:
loss = loss_cls + loss_vertex + loss_regu
else:
scores = network.get_output('prob')
labels = network.get_output('gt_label_weight')
loss = loss_cross_entropy_single_frame(scores, labels) + loss_regu
else:
# classification loss
scores = network.get_output('outputs')
labels = network.get_output('labels_gt_2d')
loss = loss_cross_entropy(scores, labels) + loss_regu
# optimizer
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = cfg.TRAIN.LEARNING_RATE
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
cfg.TRAIN.STEPSIZE, 0.1, staircase=True)
momentum = cfg.TRAIN.MOMENTUM
train_op = tf.train.MomentumOptimizer(learning_rate, momentum).minimize(loss, global_step=global_step)
#config = tf.ConfigProto()
#config.gpu_options.per_process_gpu_memory_fraction = 0.85
#config.gpu_options.allow_growth = True
#with tf.Session(config=config) as sess:
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
# data layer
if cfg.TRAIN.SINGLE_FRAME:
print(imdb.data_queue)
data_layer = GtSynthesizeLayer(roidb, imdb.num_classes, imdb._extents, imdb._points_all, imdb._symmetry, imdb.cache_path, imdb.name, imdb.data_queue, cfg.CAD, cfg.POSE)
else:
data_layer = GtDataLayer(roidb, imdb.num_classes)
sw = SolverWrapper(sess, network, imdb, roidb, output_dir, pretrained_model=pretrained_model, pretrained_ckpt=pretrained_ckpt)
print 'Solving...'
if cfg.TRAIN.VERTEX_REG_2D or cfg.TRAIN.VERTEX_REG_3D:
if cfg.TRAIN.POSE_REG:
if cfg.TRAIN.ADAPT:
sw.train_model_vertex_pose_adapt(sess, train_op, loss, loss_cls, loss_vertex, loss_pose, \
loss_domain, label_domain, domain_label, learning_rate, max_iters, data_layer)
else:
sw.train_model_vertex_pose(sess, train_op, loss, loss_cls, loss_vertex, loss_pose, learning_rate, max_iters, data_layer)
else:
sw.train_model_vertex(sess, train_op, loss, loss_cls, loss_vertex, loss_regu, learning_rate, max_iters, data_layer)
else:
sw.train_model(sess, train_op, loss, learning_rate, max_iters, data_layer)
print 'done solving'
def smooth_l1_loss_vertex(vertex_pred, vertex_targets, vertex_weights, sigma=1.0):
sigma_2 = sigma ** 2
vertex_diff = vertex_pred - vertex_targets
diff = tf.multiply(vertex_weights, vertex_diff)
abs_diff = tf.abs(diff)
smoothL1_sign = tf.stop_gradient(tf.to_float(tf.less(abs_diff, 1. / sigma_2)))
in_loss = tf.pow(diff, 2) * (sigma_2 / 2.) * smoothL1_sign \
+ (abs_diff - (0.5 / sigma_2)) * (1. - smoothL1_sign)
loss = tf.div( tf.reduce_sum(in_loss), tf.reduce_sum(vertex_weights) + 1e-10 )
return loss
def smooth_l1_loss(bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights, sigma=1.0, dim=[1]):
sigma_2 = sigma ** 2
box_diff = bbox_pred - bbox_targets
in_box_diff = bbox_inside_weights * box_diff
abs_in_box_diff = tf.abs(in_box_diff)
smoothL1_sign = tf.stop_gradient(tf.to_float(tf.less(abs_in_box_diff, 1. / sigma_2)))
in_loss_box = tf.pow(in_box_diff, 2) * (sigma_2 / 2.) * smoothL1_sign \
+ (abs_in_box_diff - (0.5 / sigma_2)) * (1. - smoothL1_sign)
out_loss_box = bbox_outside_weights * in_loss_box
loss_box = tf.reduce_mean(tf.reduce_sum(
out_loss_box,
axis=dim
))
return loss_box
def train_net_det(network, imdb, roidb, output_dir, pretrained_model=None, pretrained_ckpt=None, max_iters=40000):
"""Train a Fast R-CNN network."""
loss_regu = tf.add_n(tf.losses.get_regularization_losses(), 'regu')
# RPN, class loss
rpn_cls_score = tf.reshape(network.get_output('rpn_cls_score_reshape'), [-1, 2])
rpn_label = tf.reshape(network.get_output('rpn_labels'), [-1])
rpn_select = tf.where(tf.not_equal(rpn_label, -1))
rpn_cls_score = tf.reshape(tf.gather(rpn_cls_score, rpn_select), [-1, 2])
rpn_label = tf.reshape(tf.gather(rpn_label, rpn_select), [-1])
loss_rpn_cls = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=rpn_cls_score, labels=rpn_label))
# RPN, bbox loss
rpn_bbox_pred = network.get_output('rpn_bbox_pred')
rpn_bbox_targets = network.get_output('rpn_bbox_targets')
rpn_bbox_inside_weights = network.get_output('rpn_bbox_inside_weights')
rpn_bbox_outside_weights = network.get_output('rpn_bbox_outside_weights')
loss_rpn_box = smooth_l1_loss(rpn_bbox_pred, rpn_bbox_targets, rpn_bbox_inside_weights,
rpn_bbox_outside_weights, sigma=3.0, dim=[1, 2, 3])
# RCNN, class loss
cls_score = network.get_output("cls_score")
label = tf.reshape(network.get_output("labels"), [-1])
loss_cls = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=cls_score, labels=label))
# RCNN, bbox loss
bbox_pred = network.get_output('bbox_pred')
bbox_targets = network.get_output('bbox_targets')
bbox_inside_weights = network.get_output('bbox_inside_weights')
bbox_outside_weights = network.get_output('bbox_outside_weights')
loss_box = smooth_l1_loss(bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights)
# pose regression loss
loss_pose = network.get_output('loss_pose')[0]
# add losses
loss = loss_rpn_cls + loss_rpn_box + loss_cls + loss_box + loss_pose + loss_regu
# optimizer
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = cfg.TRAIN.LEARNING_RATE
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
cfg.TRAIN.STEPSIZE, 0.1, staircase=True)
momentum = cfg.TRAIN.MOMENTUM
train_op = tf.train.MomentumOptimizer(learning_rate, momentum).minimize(loss, global_step=global_step)
#config = tf.ConfigProto()
#config.gpu_options.per_process_gpu_memory_fraction = 0.85
#config.gpu_options.allow_growth = True
#with tf.Session(config=config) as sess:
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
sw = SolverWrapper(sess, network, imdb, roidb, output_dir, pretrained_model=pretrained_model, pretrained_ckpt=pretrained_ckpt)
# thread to load data
data_layer = GtSynthesizeLayer(roidb, imdb.num_classes, imdb._extents, imdb._points_all, imdb._symmetry, imdb.cache_path, imdb.name, cfg.CAD, cfg.POSE)
print 'Solving...'
sw.train_model_det(sess, train_op, loss, loss_rpn_cls, loss_rpn_box, loss_cls, loss_box, loss_pose, learning_rate, max_iters, data_layer)
print 'done solving'
|
audioeffects.py
|
# PyAudio - get volume off of the microphone.
# We'll figure out what to do with it later...
# This is the equivalent to recordTest.py with the alsadriver, only with pyAudio so I can
# run it on the mac.
import pyaudio
import sys
import struct
import math
from threading import Thread
from threading import Lock
import threading
from effectlayer import *
def get_rms( block ):
# RMS amplitude is defined as the square root of the
# mean over time of the square of the amplitude.
# so we need to convert this string of bytes into
# a string of 16-bit samples...
# we will get one short out for each
# two chars in the string.
count = len(block)/2
format = "%dh"%(count)
shorts = struct.unpack( format, block )
# iterate over the block.
sum_squares = 0.0
for sample in shorts:
# sample is a signed short in +/- 32768.
# normalize it to 1.0
n = sample * SHORT_NORMALIZE
sum_squares += n*n
return math.sqrt( sum_squares / count )
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
INPUT_BLOCK_TIME = 0.05
#INPUT_BLOCK_TIME = 0.5
INPUT_FRAMES_PER_BLOCK = int(RATE*INPUT_BLOCK_TIME)
SHORT_NORMALIZE = (1.0/32768.0)
# open the sucker...
#p = pyaudio.PyAudio()
#print pyaudio.get_portaudio_version()
#print pyaudio.get_portaudio_version_text()
#print p.get_device_count()
#print p.get_default_input_device_info()
#print p.get_host_api_count()
#print p. get_host_api_info_by_index(0)
#print p.get_device_info_by_index(0)['name']
#print p.get_device_info_by_index(1)['name']
#print p.get_device_info_by_index(2)['name']
#device_index = 0 # built in microphone, not default device
#stream = p.open(format = FORMAT,
# channels = CHANNELS,
# rate = RATE,
# input = True,
# input_device_index = device_index,
# frames_per_buffer = INPUT_FRAMES_PER_BLOCK)
# now let's read some frames!!!
#try:
# for idx in range(0,60):
# block = stream.read(INPUT_FRAMES_PER_BLOCK)
# rms = get_rms(block)
# print rms
#except IOError, e:
# print( "Error recording: %s"%(e) )
# this is a filter based on a 10 bit, rather than 16 bit, sample
DCFILTER = 5
BASSFILTER = 3
TRIG1_STEP =1
TRIG1_CLAMP =70
BRIGHT1_UP =(65535 / 30)
BRIGHT1_DOWN =(65535 / 300)
BRIGHT1_MAX =64
TRIG2_STEP =1
TRIG2_CLAMP =40
BRIGHT2_UP =(65535 / 40)
BRIGHT2_DOWN =(65535 / 700)
BRIGHT2_MAX =64
class AudioEffectLayer(EffectLayer):
rms = 0
rms_lock = threading.Lock()
device_index = 0
running = True
tickcount = 0
accumDC = 0
accumN = 0
accumBass = 0
tmax1 = 0
tmax2 = 0
ibright1 = 0
ibright2 = 0
brightaccum1 = 0
brightaccum2 = 0
maxbright = 0
samplemultiplier = 20 # converting between 4KHz mono and 44KHz stereo...
def __init__(self):
myThread = Thread(target=self.runAudio)
# myThread = Thread(target=self.blipAudio)
myThread.daemon = True
myThread.start()
# runAudio()
def render(self, model, params, frame):
localRms = self.lockAndGetRms()
localRms = (localRms*5) if localRms < 0.1 else 1.0
frame[:] = localRms
print localRms
#frame[:] = 1.0
def runAudio(self):
print ("attempting to run audio");
p = pyaudio.PyAudio()
stream = p.open(format = FORMAT,
channels = CHANNELS,
rate = RATE,
input = True,
input_device_index = self.device_index,
frames_per_buffer = INPUT_FRAMES_PER_BLOCK)
while self.running:
try:
block = stream.read(INPUT_FRAMES_PER_BLOCK)
newRms = get_rms(block)
self.lockAndSetRms(newRms);
except IOError, e:
print( "Error recording: %s"%(e) )
def lockAndSetRms(self, new_rms):
self.rms_lock.acquire()
self.rms = new_rms
self.rms_lock.release()
def lockAndGetRms(self):
self.rms_lock.acquire()
newRMS = self.rms
self.rms_lock.release()
return newRMS
def kill(self):
self.running = False
myThread.join()
# Adapted from Austin Appleby, Bliplace project
# We note that this is based on a 10-bit sample, rather than the 16-bit samples that we have
# And we also note that they're sampling at 4KHz, and we're sampling at 44KHz. And they're
# probably sampling mono, whereas we're sampling stereo.
# May work anyway, though.
# and it may or may not have been signed in the original...
def processSample(self, sample) :
# remove rumble + residual DC bias
sample = sample - self.accumDC
self.accumDC = self.accumDC + (sample >> DCFILTER) # this seems to imply that sample is signed. ibright is uint16
# de-noise sample
self.accumN = (self.accumN + sample) >> 1
sample = self.accumN
# split into bass & treble
sample = sample - self.accumBass
self.accumBass = self.accumBass + (sample >> BASSFILTER)
bass = self.accumBass
treble = sample
# Every 64 * samplemultiplier samples, adapt triggers to volume
self.tickcount = self.tickcount + 1 if self.tickcount < ((64*self.samplemultiplier)-1) else 0
if (self.tickcount == 0):
if self.brightaccum1 > BRIGHT1_MAX*64*self.samplemultiplier: # too bright. move trigger up if not at max
if self.tmax1 < 32000:
self.tmax1 = self.tmax1 + TRIG1_STEP
self.tmax1 = self.tmax1 + (self.tmax1 >> 10)
else: # move trigger down if not at min
if self.tmax1 > TRIG1_CLAMP:
self.tmax1 = self.tmax1 - (self.tmax1 >> 10)
self.tmax1 = self.tmax1 - TRIG1_STEP
if self.brightaccum2 > BRIGHT2_MAX*64*self.samplemultiplier:
if self.tmax2 < 32000:
self.tmax2 = self.tmax2 + TRIG2_STEP
self.tmax2 = self.tmax2 + (self.tmax2 >> 10)
else:
if self.tmax2 > TRIG2_CLAMP:
self.tmax2 = self.tmax2 - (self.tmax2 >> 10)
self.tmax2 = self.tmax2 - TRIG2_STEP
self.brightaccum1 = 0
self.brightaccum2 = 0
# Ramp our brightness up if we hit a trigger, down otherwise
# note that ibright is an uint16 integer brightness
if (bass > self.tmax2):
if (self.ibright2 <= (65535-BRIGHT2_UP)): ## another assumption of 16 bits!
self.ibright2 = self.ibright2 + BRIGHT2_UP
else:
if (self.ibright2 >= BRIGHT2_DOWN):
self.ibright2 = self.ibright2 - BRIGHT2_DOWN
if (treble > self.tmax1):
if (self.ibright1 <= (65535-BRIGHT1_UP)): ## and another
self.ibright1 = self.ibright1 + BRIGHT1_UP
else:
if (self.ibright1 >= BRIGHT1_DOWN):
self.ibright1 = self.ibright1 - BRIGHT1_DOWN
# accumulate brightness...
self.brightaccum1 = self.brightaccum1 + (self.ibright1 >> 8)
self.brightaccum2 = self.brightaccum2 + (self.ibright2 >> 8)
if (self.brightaccum1 > self.maxbright) :
self.maxbright = self.brightaccum1
if self.brightaccum1 > 0:
# print "bright"
# print self.brightaccum1
# print self.maxbright
pass
def blipAudio(self):
# process sample
# read block
# for each sample, do update...
print ("attempting to run weird audio");
p = pyaudio.PyAudio()
stream = p.open(format = FORMAT,
channels = CHANNELS,
rate = RATE,
input = True,
input_device_index = self.device_index,
frames_per_buffer = INPUT_FRAMES_PER_BLOCK)
while self.running:
try:
block = stream.read(INPUT_FRAMES_PER_BLOCK)
# we will get one short out for each
# two chars in the string.
count = len(block)/2
format = "%dh"%(count)
shorts = struct.unpack( format, block )
# iterate over the block.
sum_squares = 0.0
for sample in shorts:
self.processSample(sample)
# newRms = float(self.brightaccum1)/65535
newRms = float(self.ibright1)/65535
print self.brightaccum1
print newRms
self.lockAndSetRms(newRms);
except IOError, e:
print( "Error recording: %s"%(e) )
#Update()
#uint8_t bright1 = pgm_read_byte(exptab+(ibright1 >> 8));
#uint8_t bright2 = pgm_read_byte(exptab+(ibright2 >> 8));
#brightaccum1 += pgm_read_byte(gammatab+bright1);
#brightaccum2 += pgm_read_byte(gammatab+bright2);
#setbright(bright1,bright2,bright1);
|
bpytop.py
|
#!/usr/bin/env python3
# pylint: disable=not-callable, no-member, unsubscriptable-object
# indent = tab
# tab-size = 4
# Copyright 2021 Aristocratos (jakob@qvantnet.com)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys, io, threading, signal, re, subprocess, logging, logging.handlers, argparse
import urllib.request
from time import time, sleep, strftime, tzset
from datetime import timedelta
from _thread import interrupt_main
from collections import defaultdict
from select import select
from distutils.util import strtobool
from string import Template
from math import ceil, floor
from random import randint
from shutil import which
from typing import List, Dict, Tuple, Union, Any, Iterable
errors: List[str] = []
try: import fcntl, termios, tty, pwd
except Exception as e: errors.append(f'{e}')
try: import psutil # type: ignore
except Exception as e: errors.append(f'{e}')
SELF_START = time()
SYSTEM: str
if "linux" in sys.platform: SYSTEM = "Linux"
elif "bsd" in sys.platform: SYSTEM = "BSD"
elif "darwin" in sys.platform: SYSTEM = "MacOS"
else: SYSTEM = "Other"
if errors:
print("ERROR!")
print("\n".join(errors))
if SYSTEM == "Other":
print("\nUnsupported platform!\n")
else:
print("\nInstall required modules!\n")
raise SystemExit(1)
VERSION: str = "1.0.66"
#? Argument parser ------------------------------------------------------------------------------->
args = argparse.ArgumentParser()
args.add_argument("-b", "--boxes", action="store", dest="boxes", help = "which boxes to show at start, example: -b \"cpu mem net proc\"")
args.add_argument("-lc", "--low-color", action="store_true", help = "disable truecolor, converts 24-bit colors to 256-color")
args.add_argument("-v", "--version", action="store_true", help = "show version info and exit")
args.add_argument("--debug", action="store_true", help = "start with loglevel set to DEBUG overriding value set in config")
stdargs = args.parse_args()
if stdargs.version:
print(f'bpytop version: {VERSION}\n'
f'psutil version: {".".join(str(x) for x in psutil.version_info)}')
raise SystemExit(0)
ARG_BOXES: str = stdargs.boxes
LOW_COLOR: bool = stdargs.low_color
DEBUG: bool = stdargs.debug
#? Variables ------------------------------------------------------------------------------------->
BANNER_SRC: List[Tuple[str, str, str]] = [
("#ffa50a", "#0fd7ff", "██████╗ ██████╗ ██╗ ██╗████████╗ ██████╗ ██████╗"),
("#f09800", "#00bfe6", "██╔══██╗██╔══██╗╚██╗ ██╔╝╚══██╔══╝██╔═══██╗██╔══██╗"),
("#db8b00", "#00a6c7", "██████╔╝██████╔╝ ╚████╔╝ ██║ ██║ ██║██████╔╝"),
("#c27b00", "#008ca8", "██╔══██╗██╔═══╝ ╚██╔╝ ██║ ██║ ██║██╔═══╝ "),
("#a86b00", "#006e85", "██████╔╝██║ ██║ ██║ ╚██████╔╝██║"),
("#000000", "#000000", "╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝"),
]
#*?This is the template used to create the config file
DEFAULT_CONF: Template = Template(f'#? Config file for bpytop v. {VERSION}' + '''
#* Color theme, looks for a .theme file in "/usr/[local/]share/bpytop/themes" and "~/.config/bpytop/themes", "Default" for builtin default theme.
#* Prefix name by a plus sign (+) for a theme located in user themes folder, i.e. color_theme="+monokai"
color_theme="$color_theme"
#* If the theme set background should be shown, set to False if you want terminal background transparency
theme_background=$theme_background
#* Sets if 24-bit truecolor should be used, will convert 24-bit colors to 256 color (6x6x6 color cube) if false.
truecolor=$truecolor
#* Manually set which boxes to show. Available values are "cpu mem net proc", separate values with whitespace.
shown_boxes="$shown_boxes"
#* Update time in milliseconds, increases automatically if set below internal loops processing time, recommended 2000 ms or above for better sample times for graphs.
update_ms=$update_ms
#* Processes update multiplier, sets how often the process list is updated as a multiplier of "update_ms".
#* Set to 2 or higher to greatly decrease bpytop cpu usage. (Only integers)
proc_update_mult=$proc_update_mult
#* Processes sorting, "pid" "program" "arguments" "threads" "user" "memory" "cpu lazy" "cpu responsive",
#* "cpu lazy" updates top process over time, "cpu responsive" updates top process directly.
proc_sorting="$proc_sorting"
#* Reverse sorting order, True or False.
proc_reversed=$proc_reversed
#* Show processes as a tree
proc_tree=$proc_tree
#* Which depth the tree view should auto collapse processes at
tree_depth=$tree_depth
#* Use the cpu graph colors in the process list.
proc_colors=$proc_colors
#* Use a darkening gradient in the process list.
proc_gradient=$proc_gradient
#* If process cpu usage should be of the core it's running on or usage of the total available cpu power.
proc_per_core=$proc_per_core
#* Show process memory as bytes instead of percent
proc_mem_bytes=$proc_mem_bytes
#* Sets the CPU stat shown in upper half of the CPU graph, "total" is always available, see:
#* https://psutil.readthedocs.io/en/latest/#psutil.cpu_times for attributes available on specific platforms.
#* Select from a list of detected attributes from the options menu
cpu_graph_upper="$cpu_graph_upper"
#* Sets the CPU stat shown in lower half of the CPU graph, "total" is always available, see:
#* https://psutil.readthedocs.io/en/latest/#psutil.cpu_times for attributes available on specific platforms.
#* Select from a list of detected attributes from the options menu
cpu_graph_lower="$cpu_graph_lower"
#* Toggles if the lower CPU graph should be inverted.
cpu_invert_lower=$cpu_invert_lower
#* Set to True to completely disable the lower CPU graph.
cpu_single_graph=$cpu_single_graph
#* Shows the system uptime in the CPU box.
show_uptime=$show_uptime
#* Check cpu temperature, needs "osx-cpu-temp" on MacOS X.
check_temp=$check_temp
#* Which sensor to use for cpu temperature, use options menu to select from list of available sensors.
cpu_sensor=$cpu_sensor
#* Show temperatures for cpu cores also if check_temp is True and sensors has been found
show_coretemp=$show_coretemp
#* Which temperature scale to use, available values: "celsius", "fahrenheit", "kelvin" and "rankine"
temp_scale="$temp_scale"
#* Show CPU frequency, can cause slowdowns on certain systems with some versions of psutil
show_cpu_freq=$show_cpu_freq
#* Draw a clock at top of screen, formatting according to strftime, empty string to disable.
draw_clock="$draw_clock"
#* Update main ui in background when menus are showing, set this to false if the menus is flickering too much for comfort.
background_update=$background_update
#* Custom cpu model name, empty string to disable.
custom_cpu_name="$custom_cpu_name"
#* Optional filter for shown disks, should be full path of a mountpoint, separate multiple values with a comma ",".
#* Begin line with "exclude=" to change to exclude filter, otherwise defaults to "most include" filter. Example: disks_filter="exclude=/boot, /home/user"
disks_filter="$disks_filter"
#* Show graphs instead of meters for memory values.
mem_graphs=$mem_graphs
#* If swap memory should be shown in memory box.
show_swap=$show_swap
#* Show swap as a disk, ignores show_swap value above, inserts itself after first disk.
swap_disk=$swap_disk
#* If mem box should be split to also show disks info.
show_disks=$show_disks
#* Filter out non physical disks. Set this to False to include network disks, RAM disks and similar.
only_physical=$only_physical
#* Read disks list from /etc/fstab. This also disables only_physical.
use_fstab=$use_fstab
#* Toggles if io stats should be shown in regular disk usage view
show_io_stat=$show_io_stat
#* Toggles io mode for disks, showing only big graphs for disk read/write speeds.
io_mode=$io_mode
#* Set to True to show combined read/write io graphs in io mode.
io_graph_combined=$io_graph_combined
#* Set the top speed for the io graphs in MiB/s (10 by default), use format "device:speed" separate disks with a comma ",".
#* Example: "/dev/sda:100, /dev/sdb:20"
io_graph_speeds="$io_graph_speeds"
#* Set fixed values for network graphs, default "10M" = 10 Mibibytes, possible units "K", "M", "G", append with "bit" for bits instead of bytes, i.e "100mbit"
net_download="$net_download"
net_upload="$net_upload"
#* Start in network graphs auto rescaling mode, ignores any values set above and rescales down to 10 Kibibytes at the lowest.
net_auto=$net_auto
#* Sync the scaling for download and upload to whichever currently has the highest scale
net_sync=$net_sync
#* If the network graphs color gradient should scale to bandwidth usage or auto scale, bandwidth usage is based on "net_download" and "net_upload" values
net_color_fixed=$net_color_fixed
#* Starts with the Network Interface specified here.
net_iface="$net_iface"
#* Show battery stats in top right if battery is present
show_battery=$show_battery
#* Show init screen at startup, the init screen is purely cosmetical
show_init=$show_init
#* Enable check for new version from github.com/aristocratos/bpytop at start.
update_check=$update_check
#* Set loglevel for "~/.config/bpytop/error.log" levels are: "ERROR" "WARNING" "INFO" "DEBUG".
#* The level set includes all lower levels, i.e. "DEBUG" will show all logging info.
log_level=$log_level
''')
CONFIG_DIR: str = f'{os.path.expanduser("~")}/.config/bpytop'
if not os.path.isdir(CONFIG_DIR):
try:
os.makedirs(CONFIG_DIR)
os.mkdir(f'{CONFIG_DIR}/themes')
except PermissionError:
print(f'ERROR!\nNo permission to write to "{CONFIG_DIR}" directory!')
raise SystemExit(1)
CONFIG_FILE: str = f'{CONFIG_DIR}/bpytop.conf'
THEME_DIR: str = ""
if os.path.isdir(f'{os.path.dirname(__file__)}/bpytop-themes'):
THEME_DIR = f'{os.path.dirname(__file__)}/bpytop-themes'
else:
for td in ["/usr/local/", "/usr/", "/snap/bpytop/current/usr/"]:
if os.path.isdir(f'{td}share/bpytop/themes'):
THEME_DIR = f'{td}share/bpytop/themes'
break
USER_THEME_DIR: str = f'{CONFIG_DIR}/themes'
CORES: int = psutil.cpu_count(logical=False) or 1
THREADS: int = psutil.cpu_count(logical=True) or 1
THREAD_ERROR: int = 0
DEFAULT_THEME: Dict[str, str] = {
"main_bg" : "#00",
"main_fg" : "#cc",
"title" : "#ee",
"hi_fg" : "#969696",
"selected_bg" : "#7e2626",
"selected_fg" : "#ee",
"inactive_fg" : "#40",
"graph_text" : "#60",
"meter_bg" : "#40",
"proc_misc" : "#0de756",
"cpu_box" : "#3d7b46",
"mem_box" : "#8a882e",
"net_box" : "#423ba5",
"proc_box" : "#923535",
"div_line" : "#30",
"temp_start" : "#4897d4",
"temp_mid" : "#5474e8",
"temp_end" : "#ff40b6",
"cpu_start" : "#50f095",
"cpu_mid" : "#f2e266",
"cpu_end" : "#fa1e1e",
"free_start" : "#223014",
"free_mid" : "#b5e685",
"free_end" : "#dcff85",
"cached_start" : "#0b1a29",
"cached_mid" : "#74e6fc",
"cached_end" : "#26c5ff",
"available_start" : "#292107",
"available_mid" : "#ffd77a",
"available_end" : "#ffb814",
"used_start" : "#3b1f1c",
"used_mid" : "#d9626d",
"used_end" : "#ff4769",
"download_start" : "#231a63",
"download_mid" : "#4f43a3",
"download_end" : "#b0a9de",
"upload_start" : "#510554",
"upload_mid" : "#7d4180",
"upload_end" : "#dcafde",
"process_start" : "#80d0a3",
"process_mid" : "#dcd179",
"process_end" : "#d45454",
}
MENUS: Dict[str, Dict[str, Tuple[str, ...]]] = {
"options" : {
"normal" : (
"┌─┐┌─┐┌┬┐┬┌─┐┌┐┌┌─┐",
"│ │├─┘ │ ││ ││││└─┐",
"└─┘┴ ┴ ┴└─┘┘└┘└─┘"),
"selected" : (
"╔═╗╔═╗╔╦╗╦╔═╗╔╗╔╔═╗",
"║ ║╠═╝ ║ ║║ ║║║║╚═╗",
"╚═╝╩ ╩ ╩╚═╝╝╚╝╚═╝") },
"help" : {
"normal" : (
"┬ ┬┌─┐┬ ┌─┐",
"├─┤├┤ │ ├─┘",
"┴ ┴└─┘┴─┘┴ "),
"selected" : (
"╦ ╦╔═╗╦ ╔═╗",
"╠═╣║╣ ║ ╠═╝",
"╩ ╩╚═╝╩═╝╩ ") },
"quit" : {
"normal" : (
"┌─┐ ┬ ┬ ┬┌┬┐",
"│─┼┐│ │ │ │ ",
"└─┘└└─┘ ┴ ┴ "),
"selected" : (
"╔═╗ ╦ ╦ ╦╔╦╗ ",
"║═╬╗║ ║ ║ ║ ",
"╚═╝╚╚═╝ ╩ ╩ ") }
}
MENU_COLORS: Dict[str, Tuple[str, ...]] = {
"normal" : ("#0fd7ff", "#00bfe6", "#00a6c7", "#008ca8"),
"selected" : ("#ffa50a", "#f09800", "#db8b00", "#c27b00")
}
#? Units for floating_humanizer function
UNITS: Dict[str, Tuple[str, ...]] = {
"bit" : ("bit", "Kib", "Mib", "Gib", "Tib", "Pib", "Eib", "Zib", "Yib", "Bib", "GEb"),
"byte" : ("Byte", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB", "BiB", "GEB")
}
SUBSCRIPT: Tuple[str, ...] = ("₀", "₁", "₂", "₃", "₄", "₅", "₆", "₇", "₈", "₉")
SUPERSCRIPT: Tuple[str, ...] = ("⁰", "¹", "²", "³", "⁴", "⁵", "⁶", "⁷", "⁸", "⁹")
#? Setup error logger ---------------------------------------------------------------->
try:
errlog = logging.getLogger("ErrorLogger")
errlog.setLevel(logging.DEBUG)
eh = logging.handlers.RotatingFileHandler(f'{CONFIG_DIR}/error.log', maxBytes=1048576, backupCount=4)
eh.setLevel(logging.DEBUG)
eh.setFormatter(logging.Formatter("%(asctime)s | %(levelname)s: %(message)s", datefmt="%d/%m/%y (%X)"))
errlog.addHandler(eh)
except PermissionError:
print(f'ERROR!\nNo permission to write to "{CONFIG_DIR}" directory!')
raise SystemExit(1)
#? Timers for testing and debugging -------------------------------------------------------------->
class TimeIt:
timers: Dict[str, float] = {}
paused: Dict[str, float] = {}
@classmethod
def start(cls, name):
cls.timers[name] = time()
@classmethod
def pause(cls, name):
if name in cls.timers:
cls.paused[name] = time() - cls.timers[name]
del cls.timers[name]
@classmethod
def stop(cls, name):
if name in cls.timers:
total: float = time() - cls.timers[name]
del cls.timers[name]
if name in cls.paused:
total += cls.paused[name]
del cls.paused[name]
errlog.debug(f'{name} completed in {total:.6f} seconds')
def timeit_decorator(func):
def timed(*args, **kw):
ts = time()
out = func(*args, **kw)
errlog.debug(f'{func.__name__} completed in {time() - ts:.6f} seconds')
return out
return timed
#? Set up config class and load config ----------------------------------------------------------->
class Config:
'''Holds all config variables and functions for loading from and saving to disk'''
keys: List[str] = ["color_theme", "update_ms", "proc_sorting", "proc_reversed", "proc_tree", "check_temp", "draw_clock", "background_update", "custom_cpu_name",
"proc_colors", "proc_gradient", "proc_per_core", "proc_mem_bytes", "disks_filter", "update_check", "log_level", "mem_graphs", "show_swap",
"swap_disk", "show_disks", "use_fstab", "net_download", "net_upload", "net_auto", "net_color_fixed", "show_init", "theme_background",
"net_sync", "show_battery", "tree_depth", "cpu_sensor", "show_coretemp", "proc_update_mult", "shown_boxes", "net_iface", "only_physical",
"truecolor", "io_mode", "io_graph_combined", "io_graph_speeds", "show_io_stat", "cpu_graph_upper", "cpu_graph_lower", "cpu_invert_lower",
"cpu_single_graph", "show_uptime", "temp_scale", "show_cpu_freq"]
conf_dict: Dict[str, Union[str, int, bool]] = {}
color_theme: str = "Default"
theme_background: bool = True
truecolor: bool = True
shown_boxes: str = "cpu mem net proc"
update_ms: int = 2000
proc_update_mult: int = 2
proc_sorting: str = "cpu lazy"
proc_reversed: bool = False
proc_tree: bool = False
tree_depth: int = 3
proc_colors: bool = True
proc_gradient: bool = True
proc_per_core: bool = False
proc_mem_bytes: bool = True
cpu_graph_upper: str = "total"
cpu_graph_lower: str = "total"
cpu_invert_lower: bool = True
cpu_single_graph: bool = False
show_uptime: bool = True
check_temp: bool = True
cpu_sensor: str = "Auto"
show_coretemp: bool = True
temp_scale: str = "celsius"
show_cpu_freq: bool = True
draw_clock: str = "%X"
background_update: bool = True
custom_cpu_name: str = ""
disks_filter: str = ""
update_check: bool = True
mem_graphs: bool = True
show_swap: bool = True
swap_disk: bool = True
show_disks: bool = True
only_physical: bool = True
use_fstab: bool = False
show_io_stat: bool = True
io_mode: bool = False
io_graph_combined: bool = False
io_graph_speeds: str = ""
net_download: str = "10M"
net_upload: str = "10M"
net_color_fixed: bool = False
net_auto: bool = True
net_sync: bool = False
net_iface: str = ""
show_battery: bool = True
show_init: bool = False
log_level: str = "WARNING"
warnings: List[str] = []
info: List[str] = []
sorting_options: List[str] = ["pid", "program", "arguments", "threads", "user", "memory", "cpu lazy", "cpu responsive"]
log_levels: List[str] = ["ERROR", "WARNING", "INFO", "DEBUG"]
cpu_percent_fields: List = ["total"]
cpu_percent_fields.extend(getattr(psutil.cpu_times_percent(), "_fields", []))
temp_scales: List[str] = ["celsius", "fahrenheit", "kelvin", "rankine"]
cpu_sensors: List[str] = [ "Auto" ]
if hasattr(psutil, "sensors_temperatures"):
try:
_temps = psutil.sensors_temperatures()
if _temps:
for _name, _entries in _temps.items():
for _num, _entry in enumerate(_entries, 1):
if hasattr(_entry, "current"):
cpu_sensors.append(f'{_name}:{_num if _entry.label == "" else _entry.label}')
except:
pass
changed: bool = False
recreate: bool = False
config_file: str = ""
_initialized: bool = False
def __init__(self, path: str):
self.config_file = path
conf: Dict[str, Union[str, int, bool]] = self.load_config()
if not "version" in conf.keys():
self.recreate = True
self.info.append(f'Config file malformatted or missing, will be recreated on exit!')
elif conf["version"] != VERSION:
self.recreate = True
self.info.append(f'Config file version and bpytop version missmatch, will be recreated on exit!')
for key in self.keys:
if key in conf.keys() and conf[key] != "_error_":
setattr(self, key, conf[key])
else:
self.recreate = True
self.conf_dict[key] = getattr(self, key)
self._initialized = True
def __setattr__(self, name, value):
if self._initialized:
object.__setattr__(self, "changed", True)
object.__setattr__(self, name, value)
if name not in ["_initialized", "recreate", "changed"]:
self.conf_dict[name] = value
def load_config(self) -> Dict[str, Union[str, int, bool]]:
'''Load config from file, set correct types for values and return a dict'''
new_config: Dict[str,Union[str, int, bool]] = {}
conf_file: str = ""
if os.path.isfile(self.config_file):
conf_file = self.config_file
elif SYSTEM == "BSD" and os.path.isfile("/usr/local/etc/bpytop.conf"):
conf_file = "/usr/local/etc/bpytop.conf"
elif SYSTEM != "BSD" and os.path.isfile("/etc/bpytop.conf"):
conf_file = "/etc/bpytop.conf"
else:
return new_config
try:
with open(conf_file, "r") as f:
for line in f:
line = line.strip()
if line.startswith("#? Config"):
new_config["version"] = line[line.find("v. ") + 3:]
continue
if not '=' in line:
continue
key, line = line.split('=', maxsplit=1)
if not key in self.keys:
continue
line = line.strip('"')
if type(getattr(self, key)) == int:
try:
new_config[key] = int(line)
except ValueError:
self.warnings.append(f'Config key "{key}" should be an integer!')
if type(getattr(self, key)) == bool:
try:
new_config[key] = bool(strtobool(line))
except ValueError:
self.warnings.append(f'Config key "{key}" can only be True or False!')
if type(getattr(self, key)) == str:
new_config[key] = str(line)
except Exception as e:
errlog.exception(str(e))
if "proc_sorting" in new_config and not new_config["proc_sorting"] in self.sorting_options:
new_config["proc_sorting"] = "_error_"
self.warnings.append(f'Config key "proc_sorted" didn\'t get an acceptable value!')
if "log_level" in new_config and not new_config["log_level"] in self.log_levels:
new_config["log_level"] = "_error_"
self.warnings.append(f'Config key "log_level" didn\'t get an acceptable value!')
if "update_ms" in new_config and int(new_config["update_ms"]) < 100:
new_config["update_ms"] = 100
self.warnings.append(f'Config key "update_ms" can\'t be lower than 100!')
for net_name in ["net_download", "net_upload"]:
if net_name in new_config and not new_config[net_name][0].isdigit(): # type: ignore
new_config[net_name] = "_error_"
if "cpu_sensor" in new_config and not new_config["cpu_sensor"] in self.cpu_sensors:
new_config["cpu_sensor"] = "_error_"
self.warnings.append(f'Config key "cpu_sensor" does not contain an available sensor!')
if "shown_boxes" in new_config and not new_config["shown_boxes"] == "":
for box in new_config["shown_boxes"].split(): #type: ignore
if not box in ["cpu", "mem", "net", "proc"]:
new_config["shown_boxes"] = "_error_"
self.warnings.append(f'Config key "shown_boxes" contains invalid box names!')
break
for cpu_graph in ["cpu_graph_upper", "cpu_graph_lower"]:
if cpu_graph in new_config and not new_config[cpu_graph] in self.cpu_percent_fields:
new_config[cpu_graph] = "_error_"
self.warnings.append(f'Config key "{cpu_graph}" does not contain an available cpu stat attribute!')
if "temp_scale" in new_config and not new_config["temp_scale"] in self.temp_scales:
new_config["temp_scale"] = "_error_"
self.warnings.append(f'Config key "temp_scale" does not contain a recognized temperature scale!')
return new_config
def save_config(self):
'''Save current config to config file if difference in values or version, creates a new file if not found'''
if not self.changed and not self.recreate: return
try:
with open(self.config_file, "w" if os.path.isfile(self.config_file) else "x") as f:
f.write(DEFAULT_CONF.substitute(self.conf_dict))
except Exception as e:
errlog.exception(str(e))
try:
CONFIG: Config = Config(CONFIG_FILE)
if DEBUG:
errlog.setLevel(logging.DEBUG)
else:
errlog.setLevel(getattr(logging, CONFIG.log_level))
DEBUG = CONFIG.log_level == "DEBUG"
errlog.info(f'New instance of bpytop version {VERSION} started with pid {os.getpid()}')
errlog.info(f'Loglevel set to {"DEBUG" if DEBUG else CONFIG.log_level}')
errlog.debug(f'Using psutil version {".".join(str(x) for x in psutil.version_info)}')
errlog.debug(f'CMD: {" ".join(sys.argv)}')
if CONFIG.info:
for info in CONFIG.info:
errlog.info(info)
CONFIG.info = []
if CONFIG.warnings:
for warning in CONFIG.warnings:
errlog.warning(warning)
CONFIG.warnings = []
except Exception as e:
errlog.exception(f'{e}')
raise SystemExit(1)
if ARG_BOXES:
_new_boxes: List = []
for _box in ARG_BOXES.split():
if _box in ["cpu", "mem", "net", "proc"]:
_new_boxes.append(_box)
CONFIG.shown_boxes = " ".join(_new_boxes)
del _box, _new_boxes
if SYSTEM == "Linux" and not os.path.isdir("/sys/class/power_supply"):
CONFIG.show_battery = False
if psutil.version_info[0] < 5 or (psutil.version_info[0] == 5 and psutil.version_info[1] < 7):
warn = f'psutil version {".".join(str(x) for x in psutil.version_info)} detected, version 5.7.0 or later required for full functionality!'
print("WARNING!", warn)
errlog.warning(warn)
#? Classes --------------------------------------------------------------------------------------->
class Term:
"""Terminal info and commands"""
width: int = 0
height: int = 0
resized: bool = False
_w : int = 0
_h : int = 0
fg: str = "" #* Default foreground color
bg: str = "" #* Default background color
hide_cursor = "\033[?25l" #* Hide terminal cursor
show_cursor = "\033[?25h" #* Show terminal cursor
alt_screen = "\033[?1049h" #* Switch to alternate screen
normal_screen = "\033[?1049l" #* Switch to normal screen
clear = "\033[2J\033[0;0f" #* Clear screen and set cursor to position 0,0
mouse_on = "\033[?1002h\033[?1015h\033[?1006h" #* Enable reporting of mouse position on click and release
mouse_off = "\033[?1002l" #* Disable mouse reporting
mouse_direct_on = "\033[?1003h" #* Enable reporting of mouse position at any movement
mouse_direct_off = "\033[?1003l" #* Disable direct mouse reporting
winch = threading.Event()
old_boxes: List = []
min_width: int = 0
min_height: int = 0
@classmethod
def refresh(cls, *args, force: bool = False):
"""Update width, height and set resized flag if terminal has been resized"""
if Init.running: cls.resized = False; return
if cls.resized: cls.winch.set(); return
cls._w, cls._h = os.get_terminal_size()
if (cls._w, cls._h) == (cls.width, cls.height) and cls.old_boxes == Box.boxes and not force: return
if force: Collector.collect_interrupt = True
if cls.old_boxes != Box.boxes:
w_p = h_p = 0
cls.min_width = cls.min_height = 0
cls.old_boxes = Box.boxes.copy()
for box_class in Box.__subclasses__():
for box_name in Box.boxes:
if box_name in str(box_class).capitalize():
if not (box_name == "cpu" and "proc" in Box.boxes) and not (box_name == "net" and "mem" in Box.boxes) and w_p + box_class.width_p <= 100:
w_p += box_class.width_p
cls.min_width += getattr(box_class, "min_w", 0)
if not (box_name in ["mem", "net"] and "proc" in Box.boxes) and h_p + box_class.height_p <= 100:
h_p += box_class.height_p
cls.min_height += getattr(box_class, "min_h", 0)
while (cls._w, cls._h) != (cls.width, cls.height) or (cls._w < cls.min_width or cls._h < cls.min_height):
if Init.running: Init.resized = True
CpuBox.clock_block = True
cls.resized = True
Collector.collect_interrupt = True
cls.width, cls.height = cls._w, cls._h
Draw.now(Term.clear)
box_width = min(50, cls._w - 2)
Draw.now(f'{create_box(cls._w // 2 - box_width // 2, cls._h // 2 - 2, 50, 3, "resizing", line_color=Colors.green, title_color=Colors.white)}',
f'{Mv.r(box_width // 4)}{Colors.default}{Colors.black_bg}{Fx.b}Width : {cls._w} Height: {cls._h}{Fx.ub}{Term.bg}{Term.fg}')
if cls._w < 80 or cls._h < 24:
while cls._w < cls.min_width or cls._h < cls.min_height:
Draw.now(Term.clear)
box_width = min(50, cls._w - 2)
Draw.now(f'{create_box(cls._w // 2 - box_width // 2, cls._h // 2 - 2, box_width, 4, "warning", line_color=Colors.red, title_color=Colors.white)}',
f'{Mv.r(box_width // 4)}{Colors.default}{Colors.black_bg}{Fx.b}Width: {Colors.red if cls._w < cls.min_width else Colors.green}{cls._w} ',
f'{Colors.default}Height: {Colors.red if cls._h < cls.min_height else Colors.green}{cls._h}{Term.bg}{Term.fg}',
f'{Mv.d(1)}{Mv.l(25)}{Colors.default}{Colors.black_bg}Current config need: {cls.min_width} x {cls.min_height}{Fx.ub}{Term.bg}{Term.fg}')
cls.winch.wait(0.3)
while Key.has_key():
if Key.last() == "q": clean_quit()
cls.winch.clear()
cls._w, cls._h = os.get_terminal_size()
else:
cls.winch.wait(0.3)
cls.winch.clear()
cls._w, cls._h = os.get_terminal_size()
Key.mouse = {}
Box.calc_sizes()
Collector.proc_counter = 1
if Menu.active: Menu.resized = True
Box.draw_bg(now=False)
cls.resized = False
Timer.finish()
@staticmethod
def echo(on: bool):
"""Toggle input echo"""
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(sys.stdin.fileno())
if on:
lflag |= termios.ECHO # type: ignore
else:
lflag &= ~termios.ECHO # type: ignore
new_attr = [iflag, oflag, cflag, lflag, ispeed, ospeed, cc]
termios.tcsetattr(sys.stdin.fileno(), termios.TCSANOW, new_attr)
@staticmethod
def title(text: str = "") -> str:
out: str = f'{os.environ.get("TERMINAL_TITLE", "")}'
if out and text: out += " "
if text: out += f'{text}'
return f'\033]0;{out}\a'
class Fx:
"""Text effects
* trans(string: str): Replace whitespace with escape move right to not overwrite background behind whitespace.
* uncolor(string: str) : Removes all 24-bit color and returns string ."""
start = "\033[" #* Escape sequence start
sep = ";" #* Escape sequence separator
end = "m" #* Escape sequence end
reset = rs = "\033[0m" #* Reset foreground/background color and text effects
bold = b = "\033[1m" #* Bold on
unbold = ub = "\033[22m" #* Bold off
dark = d = "\033[2m" #* Dark on
undark = ud = "\033[22m" #* Dark off
italic = i = "\033[3m" #* Italic on
unitalic = ui = "\033[23m" #* Italic off
underline = u = "\033[4m" #* Underline on
ununderline = uu = "\033[24m" #* Underline off
blink = bl = "\033[5m" #* Blink on
unblink = ubl = "\033[25m" #* Blink off
strike = s = "\033[9m" #* Strike / crossed-out on
unstrike = us = "\033[29m" #* Strike / crossed-out off
#* Precompiled regex for finding a 24-bit color escape sequence in a string
color_re = re.compile(r"\033\[\d+;\d?;?\d*;?\d*;?\d*m")
@staticmethod
def trans(string: str):
return string.replace(" ", "\033[1C")
@classmethod
def uncolor(cls, string: str) -> str:
return f'{cls.color_re.sub("", string)}'
class Raw(object):
"""Set raw input mode for device"""
def __init__(self, stream):
self.stream = stream
self.fd = self.stream.fileno()
def __enter__(self):
self.original_stty = termios.tcgetattr(self.stream)
tty.setcbreak(self.stream)
def __exit__(self, type, value, traceback):
termios.tcsetattr(self.stream, termios.TCSANOW, self.original_stty)
class Nonblocking(object):
"""Set nonblocking mode for device"""
def __init__(self, stream):
self.stream = stream
self.fd = self.stream.fileno()
def __enter__(self):
self.orig_fl = fcntl.fcntl(self.fd, fcntl.F_GETFL)
fcntl.fcntl(self.fd, fcntl.F_SETFL, self.orig_fl | os.O_NONBLOCK)
def __exit__(self, *args):
fcntl.fcntl(self.fd, fcntl.F_SETFL, self.orig_fl)
class Mv:
"""Class with collection of cursor movement functions: .t[o](line, column) | .r[ight](columns) | .l[eft](columns) | .u[p](lines) | .d[own](lines) | .save() | .restore()"""
@staticmethod
def to(line: int, col: int) -> str:
return f'\033[{line};{col}f' #* Move cursor to line, column
@staticmethod
def right(x: int) -> str: #* Move cursor right x columns
return f'\033[{x}C'
@staticmethod
def left(x: int) -> str: #* Move cursor left x columns
return f'\033[{x}D'
@staticmethod
def up(x: int) -> str: #* Move cursor up x lines
return f'\033[{x}A'
@staticmethod
def down(x: int) -> str: #* Move cursor down x lines
return f'\033[{x}B'
save: str = "\033[s" #* Save cursor position
restore: str = "\033[u" #* Restore saved cursor postion
t = to
r = right
l = left
u = up
d = down
class Key:
"""Handles the threaded input reader for keypresses and mouse events"""
list: List[str] = []
mouse: Dict[str, List[List[int]]] = {}
mouse_pos: Tuple[int, int] = (0, 0)
escape: Dict[Union[str, Tuple[str, str]], str] = {
"\n" : "enter",
("\x7f", "\x08") : "backspace",
("k", "OA") : "up",
("j", "OB") : "down",
("h", "OD") : "left",
("l", "OC") : "right",
"[2~" : "insert",
"[3~" : "delete",
"[H" : "home",
"[F" : "end",
"[5~" : "page_up",
"[6~" : "page_down",
"\t" : "tab",
"[Z" : "shift_tab",
"OP" : "f1",
"OQ" : "f2",
"OR" : "f3",
"OS" : "f4",
"[15" : "f5",
"[17" : "f6",
"[18" : "f7",
"[19" : "f8",
"[20" : "f9",
"[21" : "f10",
"[23" : "f11",
"[24" : "f12"
}
new = threading.Event()
idle = threading.Event()
mouse_move = threading.Event()
mouse_report: bool = False
idle.set()
stopping: bool = False
started: bool = False
reader: threading.Thread
@classmethod
def start(cls):
cls.stopping = False
cls.reader = threading.Thread(target=cls._get_key)
cls.reader.start()
cls.started = True
@classmethod
def stop(cls):
if cls.started and cls.reader.is_alive():
cls.stopping = True
try:
cls.reader.join()
except:
pass
@classmethod
def last(cls) -> str:
if cls.list: return cls.list.pop()
else: return ""
@classmethod
def get(cls) -> str:
if cls.list: return cls.list.pop(0)
else: return ""
@classmethod
def get_mouse(cls) -> Tuple[int, int]:
if cls.new.is_set():
cls.new.clear()
return cls.mouse_pos
@classmethod
def mouse_moved(cls) -> bool:
if cls.mouse_move.is_set():
cls.mouse_move.clear()
return True
else:
return False
@classmethod
def has_key(cls) -> bool:
return bool(cls.list)
@classmethod
def clear(cls):
cls.list = []
@classmethod
def input_wait(cls, sec: float = 0.0, mouse: bool = False) -> bool:
'''Returns True if key is detected else waits out timer and returns False'''
if cls.list: return True
if mouse: Draw.now(Term.mouse_direct_on)
cls.new.wait(sec if sec > 0 else 0.0)
if mouse: Draw.now(Term.mouse_direct_off, Term.mouse_on)
if cls.new.is_set():
cls.new.clear()
return True
else:
return False
@classmethod
def break_wait(cls):
cls.list.append("_null")
cls.new.set()
sleep(0.01)
cls.new.clear()
@classmethod
def _get_key(cls):
"""Get a key or escape sequence from stdin, convert to readable format and save to keys list. Meant to be run in it's own thread."""
input_key: str = ""
clean_key: str = ""
try:
while not cls.stopping:
with Raw(sys.stdin):
if not select([sys.stdin], [], [], 0.1)[0]: #* Wait 100ms for input on stdin then restart loop to check for stop flag
continue
input_key += sys.stdin.read(1) #* Read 1 key safely with blocking on
if input_key == "\033": #* If first character is a escape sequence keep reading
cls.idle.clear() #* Report IO block in progress to prevent Draw functions from getting a IO Block error
Draw.idle.wait() #* Wait for Draw function to finish if busy
with Nonblocking(sys.stdin): #* Set non blocking to prevent read stall
input_key += sys.stdin.read(20)
if input_key.startswith("\033[<"):
_ = sys.stdin.read(1000)
cls.idle.set() #* Report IO blocking done
#errlog.debug(f'{repr(input_key)}')
if input_key == "\033": clean_key = "escape" #* Key is "escape" key if only containing \033
elif input_key.startswith(("\033[<0;", "\033[<35;", "\033[<64;", "\033[<65;")): #* Detected mouse event
try:
cls.mouse_pos = (int(input_key.split(";")[1]), int(input_key.split(";")[2].rstrip("mM")))
except:
pass
else:
if input_key.startswith("\033[<35;"): #* Detected mouse move in mouse direct mode
cls.mouse_move.set()
cls.new.set()
elif input_key.startswith("\033[<64;"): #* Detected mouse scroll up
clean_key = "mouse_scroll_up"
elif input_key.startswith("\033[<65;"): #* Detected mouse scroll down
clean_key = "mouse_scroll_down"
elif input_key.startswith("\033[<0;") and input_key.endswith("m"): #* Detected mouse click release
if Menu.active:
clean_key = "mouse_click"
else:
for key_name, positions in cls.mouse.items(): #* Check if mouse position is clickable
if list(cls.mouse_pos) in positions:
clean_key = key_name
break
else:
clean_key = "mouse_click"
elif input_key == "\\": clean_key = "\\" #* Clean up "\" to not return escaped
else:
for code in cls.escape.keys(): #* Go trough dict of escape codes to get the cleaned key name
if input_key.lstrip("\033").startswith(code):
clean_key = cls.escape[code]
break
else: #* If not found in escape dict and length of key is 1, assume regular character
if len(input_key) == 1:
clean_key = input_key
if clean_key:
cls.list.append(clean_key) #* Store up to 10 keys in input queue for later processing
if len(cls.list) > 10: del cls.list[0]
clean_key = ""
cls.new.set() #* Set threading event to interrupt main thread sleep
input_key = ""
except Exception as e:
errlog.exception(f'Input thread failed with exception: {e}')
cls.idle.set()
cls.list.clear()
clean_quit(1, thread=True)
class Draw:
'''Holds the draw buffer and manages IO blocking queue
* .buffer([+]name[!], *args, append=False, now=False, z=100) : Add *args to buffer
* - Adding "+" prefix to name sets append to True and appends to name's current string
* - Adding "!" suffix to name sets now to True and print name's current string
* .out(clear=False) : Print all strings in buffer, clear=True clear all buffers after
* .now(*args) : Prints all arguments as a string
* .clear(*names) : Clear named buffers, all if no argument
* .last_screen() : Prints all saved buffers
'''
strings: Dict[str, str] = {}
z_order: Dict[str, int] = {}
saved: Dict[str, str] = {}
save: Dict[str, bool] = {}
once: Dict[str, bool] = {}
idle = threading.Event()
idle.set()
@classmethod
def now(cls, *args):
'''Wait for input reader and self to be idle then print to screen'''
Key.idle.wait()
cls.idle.wait()
cls.idle.clear()
try:
print(*args, sep="", end="", flush=True)
except BlockingIOError:
pass
Key.idle.wait()
print(*args, sep="", end="", flush=True)
cls.idle.set()
@classmethod
def buffer(cls, name: str, *args: str, append: bool = False, now: bool = False, z: int = 100, only_save: bool = False, no_save: bool = False, once: bool = False):
string: str = ""
if name.startswith("+"):
name = name.lstrip("+")
append = True
if name.endswith("!"):
name = name.rstrip("!")
now = True
cls.save[name] = not no_save
cls.once[name] = once
if not name in cls.z_order or z != 100: cls.z_order[name] = z
if args: string = "".join(args)
if only_save:
if name not in cls.saved or not append: cls.saved[name] = ""
cls.saved[name] += string
else:
if name not in cls.strings or not append: cls.strings[name] = ""
cls.strings[name] += string
if now:
cls.out(name)
@classmethod
def out(cls, *names: str, clear = False):
out: str = ""
if not cls.strings: return
if names:
for name in sorted(cls.z_order, key=cls.z_order.get, reverse=True): #type: ignore
if name in names and name in cls.strings:
out += cls.strings[name]
if cls.save[name]:
cls.saved[name] = cls.strings[name]
if clear or cls.once[name]:
cls.clear(name)
cls.now(out)
else:
for name in sorted(cls.z_order, key=cls.z_order.get, reverse=True): #type: ignore
if name in cls.strings:
out += cls.strings[name]
if cls.save[name]:
cls.saved[name] = cls.strings[name]
if cls.once[name] and not clear:
cls.clear(name)
if clear:
cls.clear()
cls.now(out)
@classmethod
def saved_buffer(cls) -> str:
out: str = ""
for name in sorted(cls.z_order, key=cls.z_order.get, reverse=True): #type: ignore
if name in cls.saved:
out += cls.saved[name]
return out
@classmethod
def clear(cls, *names, saved: bool = False):
if names:
for name in names:
if name in cls.strings:
del cls.strings[name]
if name in cls.save:
del cls.save[name]
if name in cls.once:
del cls.once[name]
if saved:
if name in cls.saved:
del cls.saved[name]
if name in cls.z_order:
del cls.z_order[name]
else:
cls.strings = {}
cls.save = {}
cls.once = {}
if saved:
cls.saved = {}
cls.z_order = {}
class Color:
'''Holds representations for a 24-bit color value
__init__(color, depth="fg", default=False)
-- color accepts 6 digit hexadecimal: string "#RRGGBB", 2 digit hexadecimal: string "#FF" or decimal RGB "255 255 255" as a string.
-- depth accepts "fg" or "bg"
__call__(*args) joins str arguments to a string and apply color
__str__ returns escape sequence to set color
__iter__ returns iteration over red, green and blue in integer values of 0-255.
* Values: .hexa: str | .dec: Tuple[int, int, int] | .red: int | .green: int | .blue: int | .depth: str | .escape: str
'''
hexa: str; dec: Tuple[int, int, int]; red: int; green: int; blue: int; depth: str; escape: str; default: bool
def __init__(self, color: str, depth: str = "fg", default: bool = False):
self.depth = depth
self.default = default
try:
if not color:
self.dec = (-1, -1, -1)
self.hexa = ""
self.red = self.green = self.blue = -1
self.escape = "\033[49m" if depth == "bg" and default else ""
return
elif color.startswith("#"):
self.hexa = color
if len(self.hexa) == 3:
self.hexa += self.hexa[1:3] + self.hexa[1:3]
c = int(self.hexa[1:3], base=16)
self.dec = (c, c, c)
elif len(self.hexa) == 7:
self.dec = (int(self.hexa[1:3], base=16), int(self.hexa[3:5], base=16), int(self.hexa[5:7], base=16))
else:
raise ValueError(f'Incorrectly formatted hexadecimal rgb string: {self.hexa}')
else:
c_t = tuple(map(int, color.split(" ")))
if len(c_t) == 3:
self.dec = c_t #type: ignore
else:
raise ValueError(f'RGB dec should be "0-255 0-255 0-255"')
ct = self.dec[0] + self.dec[1] + self.dec[2]
if ct > 255*3 or ct < 0:
raise ValueError(f'RGB values out of range: {color}')
except Exception as e:
errlog.exception(str(e))
self.escape = ""
return
if self.dec and not self.hexa: self.hexa = f'{hex(self.dec[0]).lstrip("0x").zfill(2)}{hex(self.dec[1]).lstrip("0x").zfill(2)}{hex(self.dec[2]).lstrip("0x").zfill(2)}'
if self.dec and self.hexa:
self.red, self.green, self.blue = self.dec
self.escape = f'\033[{38 if self.depth == "fg" else 48};2;{";".join(str(c) for c in self.dec)}m'
if not CONFIG.truecolor or LOW_COLOR:
self.escape = f'{self.truecolor_to_256(rgb=self.dec, depth=self.depth)}'
def __str__(self) -> str:
return self.escape
def __repr__(self) -> str:
return repr(self.escape)
def __iter__(self) -> Iterable:
for c in self.dec: yield c
def __call__(self, *args: str) -> str:
if len(args) < 1: return ""
return f'{self.escape}{"".join(args)}{getattr(Term, self.depth)}'
@staticmethod
def truecolor_to_256(rgb: Tuple[int, int, int], depth: str="fg") -> str:
out: str = ""
pre: str = f'\033[{"38" if depth == "fg" else "48"};5;'
greyscale: Tuple[int, int, int] = ( rgb[0] // 11, rgb[1] // 11, rgb[2] // 11 )
if greyscale[0] == greyscale[1] == greyscale[2]:
out = f'{pre}{232 + greyscale[0]}m'
else:
out = f'{pre}{round(rgb[0] / 51) * 36 + round(rgb[1] / 51) * 6 + round(rgb[2] / 51) + 16}m'
return out
@staticmethod
def escape_color(hexa: str = "", r: int = 0, g: int = 0, b: int = 0, depth: str = "fg") -> str:
"""Returns escape sequence to set color
* accepts either 6 digit hexadecimal hexa="#RRGGBB", 2 digit hexadecimal: hexa="#FF"
* or decimal RGB: r=0-255, g=0-255, b=0-255
* depth="fg" or "bg"
"""
dint: int = 38 if depth == "fg" else 48
color: str = ""
if hexa:
try:
if len(hexa) == 3:
c = int(hexa[1:], base=16)
if CONFIG.truecolor and not LOW_COLOR:
color = f'\033[{dint};2;{c};{c};{c}m'
else:
color = f'{Color.truecolor_to_256(rgb=(c, c, c), depth=depth)}'
elif len(hexa) == 7:
if CONFIG.truecolor and not LOW_COLOR:
color = f'\033[{dint};2;{int(hexa[1:3], base=16)};{int(hexa[3:5], base=16)};{int(hexa[5:7], base=16)}m'
else:
color = f'{Color.truecolor_to_256(rgb=(int(hexa[1:3], base=16), int(hexa[3:5], base=16), int(hexa[5:7], base=16)), depth=depth)}'
except ValueError as e:
errlog.exception(f'{e}')
else:
if CONFIG.truecolor and not LOW_COLOR:
color = f'\033[{dint};2;{r};{g};{b}m'
else:
color = f'{Color.truecolor_to_256(rgb=(r, g, b), depth=depth)}'
return color
@classmethod
def fg(cls, *args) -> str:
if len(args) > 2: return cls.escape_color(r=args[0], g=args[1], b=args[2], depth="fg")
else: return cls.escape_color(hexa=args[0], depth="fg")
@classmethod
def bg(cls, *args) -> str:
if len(args) > 2: return cls.escape_color(r=args[0], g=args[1], b=args[2], depth="bg")
else: return cls.escape_color(hexa=args[0], depth="bg")
class Colors:
'''Standard colors for menus and dialogs'''
default = Color("#cc")
white = Color("#ff")
red = Color("#bf3636")
green = Color("#68bf36")
blue = Color("#0fd7ff")
yellow = Color("#db8b00")
black_bg = Color("#00", depth="bg")
null = Color("")
class Theme:
'''__init__ accepts a dict containing { "color_element" : "color" }'''
themes: Dict[str, str] = {}
cached: Dict[str, Dict[str, str]] = { "Default" : DEFAULT_THEME }
current: str = ""
main_bg = main_fg = title = hi_fg = selected_bg = selected_fg = inactive_fg = proc_misc = cpu_box = mem_box = net_box = proc_box = div_line = temp_start = temp_mid = temp_end = cpu_start = cpu_mid = cpu_end = free_start = free_mid = free_end = cached_start = cached_mid = cached_end = available_start = available_mid = available_end = used_start = used_mid = used_end = download_start = download_mid = download_end = upload_start = upload_mid = upload_end = graph_text = meter_bg = process_start = process_mid = process_end = Colors.default
gradient: Dict[str, List[str]] = {
"temp" : [],
"cpu" : [],
"free" : [],
"cached" : [],
"available" : [],
"used" : [],
"download" : [],
"upload" : [],
"proc" : [],
"proc_color" : [],
"process" : [],
}
def __init__(self, theme: str):
self.refresh()
self._load_theme(theme)
def __call__(self, theme: str):
for k in self.gradient.keys(): self.gradient[k] = []
self._load_theme(theme)
def _load_theme(self, theme: str):
tdict: Dict[str, str]
if theme in self.cached:
tdict = self.cached[theme]
elif theme in self.themes:
tdict = self._load_file(self.themes[theme])
self.cached[theme] = tdict
else:
errlog.warning(f'No theme named "{theme}" found!')
theme = "Default"
CONFIG.color_theme = theme
tdict = DEFAULT_THEME
self.current = theme
#if CONFIG.color_theme != theme: CONFIG.color_theme = theme
if not "graph_text" in tdict and "inactive_fg" in tdict:
tdict["graph_text"] = tdict["inactive_fg"]
if not "meter_bg" in tdict and "inactive_fg" in tdict:
tdict["meter_bg"] = tdict["inactive_fg"]
if not "process_start" in tdict and "cpu_start" in tdict:
tdict["process_start"] = tdict["cpu_start"]
tdict["process_mid"] = tdict.get("cpu_mid", "")
tdict["process_end"] = tdict.get("cpu_end", "")
#* Get key names from DEFAULT_THEME dict to not leave any color unset if missing from theme dict
for item, value in DEFAULT_THEME.items():
default = item in ["main_fg", "main_bg"]
depth = "bg" if item in ["main_bg", "selected_bg"] else "fg"
if item in tdict:
setattr(self, item, Color(tdict[item], depth=depth, default=default))
else:
setattr(self, item, Color(value, depth=depth, default=default))
#* Create color gradients from one, two or three colors, 101 values indexed 0-100
self.proc_start, self.proc_mid, self.proc_end = self.main_fg, Colors.null, self.inactive_fg
self.proc_color_start, self.proc_color_mid, self.proc_color_end = self.inactive_fg, Colors.null, self.process_start
rgb: Dict[str, Tuple[int, int, int]]
colors: List[List[int]] = []
for name in self.gradient:
rgb = { "start" : getattr(self, f'{name}_start').dec, "mid" : getattr(self, f'{name}_mid').dec, "end" : getattr(self, f'{name}_end').dec }
colors = [ list(getattr(self, f'{name}_start')) ]
if rgb["end"][0] >= 0:
r = 50 if rgb["mid"][0] >= 0 else 100
for first, second in ["start", "mid" if r == 50 else "end"], ["mid", "end"]:
for i in range(r):
colors += [[rgb[first][n] + i * (rgb[second][n] - rgb[first][n]) // r for n in range(3)]]
if r == 100:
break
self.gradient[name] += [ Color.fg(*color) for color in colors ]
else:
c = Color.fg(*rgb["start"])
self.gradient[name] += [c] * 101
#* Set terminal colors
Term.fg = f'{self.main_fg}'
Term.bg = f'{self.main_bg}' if CONFIG.theme_background else "\033[49m"
Draw.now(self.main_fg, self.main_bg)
@classmethod
def refresh(cls):
'''Sets themes dict with names and paths to all found themes'''
cls.themes = { "Default" : "Default" }
try:
for d in (THEME_DIR, USER_THEME_DIR):
if not d: continue
for f in os.listdir(d):
if f.endswith(".theme"):
cls.themes[f'{"" if d == THEME_DIR else "+"}{f[:-6]}'] = f'{d}/{f}'
except Exception as e:
errlog.exception(str(e))
@staticmethod
def _load_file(path: str) -> Dict[str, str]:
'''Load a bashtop formatted theme file and return a dict'''
new_theme: Dict[str, str] = {}
try:
with open(path, "r") as f:
for line in f:
if not line.startswith("theme["): continue
key = line[6:line.find("]")]
s = line.find('"')
value = line[s + 1:line.find('"', s + 1)]
new_theme[key] = value
except Exception as e:
errlog.exception(str(e))
return new_theme
class Banner:
'''Holds the bpytop banner, .draw(line, [col=0], [center=False], [now=False])'''
out: List[str] = []
c_color: str = ""
length: int = 0
if not out:
for num, (color, color2, line) in enumerate(BANNER_SRC):
if len(line) > length: length = len(line)
out_var = ""
line_color = Color.fg(color)
line_color2 = Color.fg(color2)
line_dark = Color.fg(f'#{80 - num * 6}')
for n, letter in enumerate(line):
if letter == "█" and c_color != line_color:
if 5 < n < 25: c_color = line_color2
else: c_color = line_color
out_var += c_color
elif letter == " ":
letter = f'{Mv.r(1)}'
c_color = ""
elif letter != "█" and c_color != line_dark:
c_color = line_dark
out_var += line_dark
out_var += letter
out.append(out_var)
@classmethod
def draw(cls, line: int, col: int = 0, center: bool = False, now: bool = False):
out: str = ""
if center: col = Term.width // 2 - cls.length // 2
for n, o in enumerate(cls.out):
out += f'{Mv.to(line + n, col)}{o}'
out += f'{Term.fg}'
if now: Draw.out(out)
else: return out
class Symbol:
h_line: str = "─"
v_line: str = "│"
left_up: str = "┌"
right_up: str = "┐"
left_down: str = "└"
right_down: str = "┘"
title_left: str = "┤"
title_right: str = "├"
div_up: str = "┬"
div_down: str = "┴"
graph_up: Dict[float, str] = {
0.0 : " ", 0.1 : "⢀", 0.2 : "⢠", 0.3 : "⢰", 0.4 : "⢸",
1.0 : "⡀", 1.1 : "⣀", 1.2 : "⣠", 1.3 : "⣰", 1.4 : "⣸",
2.0 : "⡄", 2.1 : "⣄", 2.2 : "⣤", 2.3 : "⣴", 2.4 : "⣼",
3.0 : "⡆", 3.1 : "⣆", 3.2 : "⣦", 3.3 : "⣶", 3.4 : "⣾",
4.0 : "⡇", 4.1 : "⣇", 4.2 : "⣧", 4.3 : "⣷", 4.4 : "⣿"
}
graph_up_small = graph_up.copy()
graph_up_small[0.0] = "\033[1C"
graph_down: Dict[float, str] = {
0.0 : " ", 0.1 : "⠈", 0.2 : "⠘", 0.3 : "⠸", 0.4 : "⢸",
1.0 : "⠁", 1.1 : "⠉", 1.2 : "⠙", 1.3 : "⠹", 1.4 : "⢹",
2.0 : "⠃", 2.1 : "⠋", 2.2 : "⠛", 2.3 : "⠻", 2.4 : "⢻",
3.0 : "⠇", 3.1 : "⠏", 3.2 : "⠟", 3.3 : "⠿", 3.4 : "⢿",
4.0 : "⡇", 4.1 : "⡏", 4.2 : "⡟", 4.3 : "⡿", 4.4 : "⣿"
}
graph_down_small = graph_down.copy()
graph_down_small[0.0] = "\033[1C"
meter: str = "■"
up: str = "↑"
down: str = "↓"
left: str = "←"
right: str = "→"
enter: str = "↲"
ok: str = f'{Color.fg("#30ff50")}√{Color.fg("#cc")}'
fail: str = f'{Color.fg("#ff3050")}!{Color.fg("#cc")}'
class Graph:
'''Class for creating and adding to graphs
* __str__ : returns graph as a string
* add(value: int) : adds a value to graph and returns it as a string
* __call__ : same as add
'''
out: str
width: int
height: int
graphs: Dict[bool, List[str]]
colors: List[str]
invert: bool
max_value: int
color_max_value: int
offset: int
no_zero: bool
round_up_low: bool
current: bool
last: int
lowest: int = 0
symbol: Dict[float, str]
def __init__(self, width: int, height: int, color: Union[List[str], Color, None], data: List[int], invert: bool = False, max_value: int = 0, offset: int = 0, color_max_value: Union[int, None] = None, no_zero: bool = False, round_up_low: bool = False):
self.graphs: Dict[bool, List[str]] = {False : [], True : []}
self.current: bool = True
self.width = width
self.height = height
self.invert = invert
self.offset = offset
self.round_up_low = round_up_low
self.no_zero = no_zero or round_up_low
if not data: data = [0]
if max_value:
self.lowest = 1 if self.round_up_low else 0
self.max_value = max_value
data = [ min_max((v + offset) * 100 // (max_value + offset), min_max(v + offset, 0, self.lowest), 100) for v in data ] #* Convert values to percentage values of max_value with max_value as ceiling
else:
self.max_value = 0
if color_max_value:
self.color_max_value = color_max_value
else:
self.color_max_value = self.max_value
if self.color_max_value and self.max_value:
color_scale = int(100.0 * self.max_value / self.color_max_value)
else:
color_scale = 100
self.colors: List[str] = []
if isinstance(color, list) and height > 1:
for i in range(1, height + 1): self.colors.insert(0, color[min(100, i * color_scale // height)]) #* Calculate colors of graph
if invert: self.colors.reverse()
elif isinstance(color, Color) and height > 1:
self.colors = [ f'{color}' for _ in range(height) ]
else:
if isinstance(color, list): self.colors = color
elif isinstance(color, Color): self.colors = [ f'{color}' for _ in range(101) ]
if self.height == 1:
self.symbol = Symbol.graph_down_small if invert else Symbol.graph_up_small
else:
self.symbol = Symbol.graph_down if invert else Symbol.graph_up
value_width: int = ceil(len(data) / 2)
filler: str = ""
if value_width > width: #* If the size of given data set is bigger then width of graph, shrink data set
data = data[-(width*2):]
value_width = ceil(len(data) / 2)
elif value_width < width: #* If the size of given data set is smaller then width of graph, fill graph with whitespace
filler = self.symbol[0.0] * (width - value_width)
if len(data) % 2: data.insert(0, 0)
for _ in range(height):
for b in [True, False]:
self.graphs[b].append(filler)
self._create(data, new=True)
def _create(self, data: List[int], new: bool = False):
h_high: int
h_low: int
value: Dict[str, int] = { "left" : 0, "right" : 0 }
val: int
side: str
#* Create the graph
for h in range(self.height):
h_high = round(100 * (self.height - h) / self.height) if self.height > 1 else 100
h_low = round(100 * (self.height - (h + 1)) / self.height) if self.height > 1 else 0
for v in range(len(data)):
if new: self.current = bool(v % 2) #* Switch between True and False graphs
if new and v == 0: self.last = 0
for val, side in [self.last, "left"], [data[v], "right"]: # type: ignore
if val >= h_high:
value[side] = 4
elif val <= h_low:
value[side] = 0
else:
if self.height == 1: value[side] = round(val * 4 / 100 + 0.5)
else: value[side] = round((val - h_low) * 4 / (h_high - h_low) + 0.1)
if self.no_zero and not (new and v == 0 and side == "left") and h == self.height - 1 and value[side] < 1 and not (self.round_up_low and val == 0): value[side] = 1
if new: self.last = data[v]
self.graphs[self.current][h] += self.symbol[float(value["left"] + value["right"] / 10)]
if data: self.last = data[-1]
self.out = ""
if self.height == 1:
self.out += f'{"" if not self.colors else (THEME.inactive_fg if self.last < 5 else self.colors[self.last])}{self.graphs[self.current][0]}'
elif self.height > 1:
for h in range(self.height):
if h > 0: self.out += f'{Mv.d(1)}{Mv.l(self.width)}'
self.out += f'{"" if not self.colors else self.colors[h]}{self.graphs[self.current][h if not self.invert else (self.height - 1) - h]}'
if self.colors: self.out += f'{Term.fg}'
def __call__(self, value: Union[int, None] = None) -> str:
if not isinstance(value, int): return self.out
self.current = not self.current
if self.height == 1:
if self.graphs[self.current][0].startswith(self.symbol[0.0]):
self.graphs[self.current][0] = self.graphs[self.current][0].replace(self.symbol[0.0], "", 1)
else:
self.graphs[self.current][0] = self.graphs[self.current][0][1:]
else:
for n in range(self.height):
self.graphs[self.current][n] = self.graphs[self.current][n][1:]
if self.max_value: value = min_max((value + self.offset) * 100 // (self.max_value + self.offset), min_max(value + self.offset, 0, self.lowest), 100)
self._create([value])
return self.out
def add(self, value: Union[int, None] = None) -> str:
return self.__call__(value)
def __str__(self):
return self.out
def __repr__(self):
return repr(self.out)
class Graphs:
'''Holds all graphs and lists of graphs for dynamically created graphs'''
cpu: Dict[str, Graph] = {}
cores: List[Graph] = [NotImplemented] * THREADS
temps: List[Graph] = [NotImplemented] * (THREADS + 1)
net: Dict[str, Graph] = {}
detailed_cpu: Graph = NotImplemented
detailed_mem: Graph = NotImplemented
pid_cpu: Dict[int, Graph] = {}
disk_io: Dict[str, Dict[str, Graph]] = {}
class Meter:
'''Creates a percentage meter
__init__(value, width, theme, gradient_name) to create new meter
__call__(value) to set value and return meter as a string
__str__ returns last set meter as a string
'''
out: str
color_gradient: List[str]
color_inactive: Color
gradient_name: str
width: int
invert: bool
saved: Dict[int, str]
def __init__(self, value: int, width: int, gradient_name: str, invert: bool = False):
self.gradient_name = gradient_name
self.color_gradient = THEME.gradient[gradient_name]
self.color_inactive = THEME.meter_bg
self.width = width
self.saved = {}
self.invert = invert
self.out = self._create(value)
def __call__(self, value: Union[int, None]) -> str:
if not isinstance(value, int): return self.out
if value > 100: value = 100
elif value < 0: value = 100
if value in self.saved:
self.out = self.saved[value]
else:
self.out = self._create(value)
return self.out
def __str__(self) -> str:
return self.out
def __repr__(self):
return repr(self.out)
def _create(self, value: int) -> str:
if value > 100: value = 100
elif value < 0: value = 100
out: str = ""
for i in range(1, self.width + 1):
if value >= round(i * 100 / self.width):
out += f'{self.color_gradient[round(i * 100 / self.width) if not self.invert else round(100 - (i * 100 / self.width))]}{Symbol.meter}'
else:
out += self.color_inactive(Symbol.meter * (self.width + 1 - i))
break
else:
out += f'{Term.fg}'
if not value in self.saved:
self.saved[value] = out
return out
class Meters:
cpu: Meter
battery: Meter
mem: Dict[str, Union[Meter, Graph]] = {}
swap: Dict[str, Union[Meter, Graph]] = {}
disks_used: Dict[str, Meter] = {}
disks_free: Dict[str, Meter] = {}
class Box:
'''Box class with all needed attributes for create_box() function'''
name: str
num: int = 0
boxes: List = []
view_modes: Dict[str, List] = {"full" : ["cpu", "mem", "net", "proc"], "stat" : ["cpu", "mem", "net"], "proc" : ["cpu", "proc"]}
view_mode: str
for view_mode in view_modes:
if sorted(CONFIG.shown_boxes.split(), key=str.lower) == view_modes[view_mode]:
break
else:
view_mode = "user"
view_modes["user"] = CONFIG.shown_boxes.split()
height_p: int
width_p: int
x: int
y: int
width: int
height: int
out: str
bg: str
_b_cpu_h: int
_b_mem_h: int
redraw_all: bool
buffers: List[str] = []
c_counter: int = 0
clock_on: bool = False
clock: str = ""
clock_len: int = 0
resized: bool = False
clock_custom_format: Dict[str, Any] = {
"/host" : os.uname()[1],
"/user" : os.environ.get("USER") or pwd.getpwuid(os.getuid())[0],
"/uptime" : "",
}
if clock_custom_format["/host"].endswith(".local"):
clock_custom_format["/host"] = clock_custom_format["/host"].replace(".local", "")
@classmethod
def calc_sizes(cls):
'''Calculate sizes of boxes'''
cls.boxes = CONFIG.shown_boxes.split()
for sub in cls.__subclasses__():
sub._calc_size() # type: ignore
sub.resized = True # type: ignore
@classmethod
def draw_update_ms(cls, now: bool = True):
if not "cpu" in cls.boxes: return
update_string: str = f'{CONFIG.update_ms}ms'
xpos: int = CpuBox.x + CpuBox.width - len(update_string) - 15
if not "+" in Key.mouse:
Key.mouse["+"] = [[xpos + 7 + i, CpuBox.y] for i in range(3)]
Key.mouse["-"] = [[CpuBox.x + CpuBox.width - 4 + i, CpuBox.y] for i in range(3)]
Draw.buffer("update_ms!" if now and not Menu.active else "update_ms",
f'{Mv.to(CpuBox.y, xpos)}{THEME.cpu_box(Symbol.h_line * 7, Symbol.title_left)}{Fx.b}{THEME.hi_fg("+")} ',
f'{THEME.title(update_string)} {THEME.hi_fg("-")}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}', only_save=Menu.active, once=True)
if now and not Menu.active:
Draw.clear("update_ms")
if CONFIG.show_battery and hasattr(psutil, "sensors_battery") and psutil.sensors_battery():
Draw.out("battery")
@classmethod
def draw_clock(cls, force: bool = False):
if not "cpu" in cls.boxes or not cls.clock_on: return
cls.c_counter += 1
if cls.c_counter > 3600 / (Config.update_ms / 1000):
tzset()
cls.c_counter = 0
out: str = ""
if force: pass
elif Term.resized or strftime(CONFIG.draw_clock) == cls.clock: return
clock_string = cls.clock = strftime(CONFIG.draw_clock)
for custom in cls.clock_custom_format:
if custom in clock_string:
if custom == "/uptime": cls.clock_custom_format["/uptime"] = CpuCollector.uptime
clock_string = clock_string.replace(custom, cls.clock_custom_format[custom])
clock_len = len(clock_string[:(CpuBox.width-56)])
if cls.clock_len != clock_len and not CpuBox.resized:
out = f'{Mv.to(CpuBox.y, ((CpuBox.width)//2)-(cls.clock_len//2))}{Fx.ub}{THEME.cpu_box}{Symbol.h_line * cls.clock_len}'
cls.clock_len = clock_len
now: bool = False if Menu.active else not force
out += (f'{Mv.to(CpuBox.y, ((CpuBox.width)//2)-(clock_len//2))}{Fx.ub}{THEME.cpu_box}'
f'{Symbol.title_left}{Fx.b}{THEME.title(clock_string[:clock_len])}{Fx.ub}{THEME.cpu_box}{Symbol.title_right}{Term.fg}')
Draw.buffer("clock", out, z=1, now=now, once=not force, only_save=Menu.active)
if now and not Menu.active:
if CONFIG.show_battery and hasattr(psutil, "sensors_battery") and psutil.sensors_battery():
Draw.out("battery")
@classmethod
def empty_bg(cls) -> str:
return (f'{Term.clear}' +
(f'{Banner.draw(Term.height // 2 - 10, center=True)}'
f'{Mv.d(1)}{Mv.l(46)}{Colors.black_bg}{Colors.default}{Fx.b}[esc] Menu'
f'{Mv.r(25)}{Fx.i}Version: {VERSION}{Fx.ui}' if Term.height > 22 else "") +
f'{Mv.d(1)}{Mv.l(34)}{Fx.b}All boxes hidden!'
f'{Mv.d(1)}{Mv.l(17)}{Fx.b}[1] {Fx.ub}Toggle CPU box'
f'{Mv.d(1)}{Mv.l(18)}{Fx.b}[2] {Fx.ub}Toggle MEM box'
f'{Mv.d(1)}{Mv.l(18)}{Fx.b}[3] {Fx.ub}Toggle NET box'
f'{Mv.d(1)}{Mv.l(18)}{Fx.b}[4] {Fx.ub}Toggle PROC box'
f'{Mv.d(1)}{Mv.l(19)}{Fx.b}[m] {Fx.ub}Cycle presets'
f'{Mv.d(1)}{Mv.l(17)}{Fx.b}[q] Quit {Fx.ub}{Term.bg}{Term.fg}')
@classmethod
def draw_bg(cls, now: bool = True):
'''Draw all boxes outlines and titles'''
out: str = ""
if not cls.boxes:
out = cls.empty_bg()
else:
out = "".join(sub._draw_bg() for sub in cls.__subclasses__()) # type: ignore
Draw.buffer("bg", out, now=now, z=1000, only_save=Menu.active, once=True)
cls.draw_update_ms(now=now)
if CONFIG.draw_clock: cls.draw_clock(force=True)
class SubBox:
box_x: int = 0
box_y: int = 0
box_width: int = 0
box_height: int = 0
box_columns: int = 0
column_size: int = 0
class CpuBox(Box, SubBox):
name = "cpu"
num = 1
x = 1
y = 1
height_p = 32
width_p = 100
min_w: int = 60
min_h: int = 8
resized: bool = True
redraw: bool = False
buffer: str = "cpu"
battery_percent: int = 1000
battery_secs: int = 0
battery_status: str = "Unknown"
old_battery_pos = 0
old_battery_len = 0
battery_path: Union[str, None] = ""
battery_clear: bool = False
battery_symbols: Dict[str, str] = {"Charging": "▲",
"Discharging": "▼",
"Full": "■",
"Not charging": "■"}
clock_block: bool = True
Box.buffers.append(buffer)
@classmethod
def _calc_size(cls):
if not "cpu" in cls.boxes:
Box._b_cpu_h = 0
cls.width = Term.width
return
cpu = CpuCollector
height_p: int
if cls.boxes == ["cpu"]:
height_p = 100
else:
height_p = cls.height_p
cls.width = round(Term.width * cls.width_p / 100)
cls.height = round(Term.height * height_p / 100)
if cls.height < 8: cls.height = 8
Box._b_cpu_h = cls.height
#THREADS = 64
cls.box_columns = ceil((THREADS + 1) / (cls.height - 5))
if cls.box_columns * (20 + 13 if cpu.got_sensors else 21) < cls.width - (cls.width // 3):
cls.column_size = 2
cls.box_width = (20 + 13 if cpu.got_sensors else 21) * cls.box_columns - ((cls.box_columns - 1) * 1)
elif cls.box_columns * (15 + 6 if cpu.got_sensors else 15) < cls.width - (cls.width // 3):
cls.column_size = 1
cls.box_width = (15 + 6 if cpu.got_sensors else 15) * cls.box_columns - ((cls.box_columns - 1) * 1)
elif cls.box_columns * (8 + 6 if cpu.got_sensors else 8) < cls.width - (cls.width // 3):
cls.column_size = 0
else:
cls.box_columns = (cls.width - cls.width // 3) // (8 + 6 if cpu.got_sensors else 8); cls.column_size = 0
if cls.column_size == 0: cls.box_width = (8 + 6 if cpu.got_sensors else 8) * cls.box_columns + 1
cls.box_height = ceil(THREADS / cls.box_columns) + 4
if cls.box_height > cls.height - 2: cls.box_height = cls.height - 2
cls.box_x = (cls.width - 1) - cls.box_width
cls.box_y = cls.y + ceil((cls.height - 2) / 2) - ceil(cls.box_height / 2) + 1
@classmethod
def _draw_bg(cls) -> str:
if not "cpu" in cls.boxes: return ""
if not "M" in Key.mouse:
Key.mouse["M"] = [[cls.x + 10 + i, cls.y] for i in range(6)]
return (f'{create_box(box=cls, line_color=THEME.cpu_box)}'
f'{Mv.to(cls.y, cls.x + 10)}{THEME.cpu_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg("M")}{THEME.title("enu")}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}'
f'{create_box(x=cls.box_x, y=cls.box_y, width=cls.box_width, height=cls.box_height, line_color=THEME.div_line, fill=False, title=CPU_NAME[:cls.box_width - 14] if not CONFIG.custom_cpu_name else CONFIG.custom_cpu_name[:cls.box_width - 14])}')
@classmethod
def battery_activity(cls) -> bool:
if not hasattr(psutil, "sensors_battery") or psutil.sensors_battery() == None:
if cls.battery_percent != 1000:
cls.battery_clear = True
return False
if cls.battery_path == "":
cls.battery_path = None
if os.path.isdir("/sys/class/power_supply"):
for directory in sorted(os.listdir("/sys/class/power_supply")):
if directory.startswith('BAT') or 'battery' in directory.lower():
cls.battery_path = f'/sys/class/power_supply/{directory}/'
break
return_true: bool = False
percent: int = ceil(getattr(psutil.sensors_battery(), "percent", 0))
if percent != cls.battery_percent:
cls.battery_percent = percent
return_true = True
seconds: int = getattr(psutil.sensors_battery(), "secsleft", 0)
if seconds != cls.battery_secs:
cls.battery_secs = seconds
return_true = True
status: str = "not_set"
if cls.battery_path:
status = readfile(cls.battery_path + "status", default="not_set")
if status == "not_set" and getattr(psutil.sensors_battery(), "power_plugged", None) == True:
status = "Charging" if cls.battery_percent < 100 else "Full"
elif status == "not_set" and getattr(psutil.sensors_battery(), "power_plugged", None) == False:
status = "Discharging"
elif status == "not_set":
status = "Unknown"
if status != cls.battery_status:
cls.battery_status = status
return_true = True
return return_true or cls.resized or cls.redraw or Menu.active
@classmethod
def _draw_fg(cls):
if not "cpu" in cls.boxes: return
cpu = CpuCollector
if cpu.redraw: cls.redraw = True
out: str = ""
out_misc: str = ""
lavg: str = ""
x, y, w, h = cls.x + 1, cls.y + 1, cls.width - 2, cls.height - 2
bx, by, bw, bh = cls.box_x + 1, cls.box_y + 1, cls.box_width - 2, cls.box_height - 2
hh: int = ceil(h / 2)
hh2: int = h - hh
mid_line: bool = False
temp: int = 0
unit: str = ""
if not CONFIG.cpu_single_graph and CONFIG.cpu_graph_upper != CONFIG.cpu_graph_lower:
mid_line = True
if h % 2: hh = floor(h / 2)
else: hh2 -= 1
hide_cores: bool = (cpu.cpu_temp_only or not CONFIG.show_coretemp) and cpu.got_sensors
ct_width: int = (max(6, 6 * cls.column_size)) * hide_cores
if cls.resized or cls.redraw:
if not "m" in Key.mouse:
Key.mouse["m"] = [[cls.x + 16 + i, cls.y] for i in range(12)]
out_misc += f'{Mv.to(cls.y, cls.x + 16)}{THEME.cpu_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg("m")}{THEME.title}ode:{Box.view_mode}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}'
Graphs.cpu["up"] = Graph(w - bw - 3, (h if CONFIG.cpu_single_graph else hh), THEME.gradient["cpu"], cpu.cpu_upper, round_up_low=True)
if not CONFIG.cpu_single_graph:
Graphs.cpu["down"] = Graph(w - bw - 3, hh2, THEME.gradient["cpu"], cpu.cpu_lower, invert=CONFIG.cpu_invert_lower, round_up_low=True)
Meters.cpu = Meter(cpu.cpu_usage[0][-1], bw - (21 if cpu.got_sensors else 9), "cpu")
if cls.column_size > 0 or ct_width > 0:
for n in range(THREADS):
Graphs.cores[n] = Graph(5 * cls.column_size + ct_width, 1, None, cpu.cpu_usage[n + 1])
if cpu.got_sensors:
Graphs.temps[0] = Graph(5, 1, None, cpu.cpu_temp[0], max_value=cpu.cpu_temp_crit, offset=-23)
if cls.column_size > 1:
for n in range(1, THREADS + 1):
if not cpu.cpu_temp[n]:
continue
Graphs.temps[n] = Graph(5, 1, None, cpu.cpu_temp[n], max_value=cpu.cpu_temp_crit, offset=-23)
Draw.buffer("cpu_misc", out_misc, only_save=True)
if CONFIG.show_battery and cls.battery_activity():
bat_out: str = ""
if cls.battery_secs > 0:
battery_time: str = f' {cls.battery_secs // 3600:02}:{(cls.battery_secs % 3600) // 60:02}'
else:
battery_time = ""
if not hasattr(Meters, "battery") or cls.resized:
Meters.battery = Meter(cls.battery_percent, 10, "cpu", invert=True)
battery_symbol: str = cls.battery_symbols.get(cls.battery_status, "○")
battery_len: int = len(f'{CONFIG.update_ms}') + (11 if cls.width >= 100 else 0) + len(battery_time) + len(f'{cls.battery_percent}')
battery_pos = cls.width - battery_len - 17
if (battery_pos != cls.old_battery_pos or battery_len != cls.old_battery_len) and cls.old_battery_pos > 0 and not cls.resized:
bat_out += f'{Mv.to(y-1, cls.old_battery_pos)}{THEME.cpu_box(Symbol.h_line*(cls.old_battery_len+4))}'
cls.old_battery_pos, cls.old_battery_len = battery_pos, battery_len
bat_out += (f'{Mv.to(y-1, battery_pos)}{THEME.cpu_box(Symbol.title_left)}{Fx.b}{THEME.title}BAT{battery_symbol} {cls.battery_percent}%'+
("" if cls.width < 100 else f' {Fx.ub}{Meters.battery(cls.battery_percent)}{Fx.b}') +
f'{THEME.title}{battery_time}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}')
Draw.buffer("battery", f'{bat_out}{Term.fg}', only_save=Menu.active)
elif cls.battery_clear:
out += f'{Mv.to(y-1, cls.old_battery_pos)}{THEME.cpu_box(Symbol.h_line*(cls.old_battery_len+4))}'
cls.battery_clear = False
cls.battery_percent = 1000
cls.battery_secs = 0
cls.battery_status = "Unknown"
cls.old_battery_pos = 0
cls.old_battery_len = 0
cls.battery_path = ""
Draw.clear("battery", saved=True)
cx = cy = cc = 0
ccw = (bw + 1) // cls.box_columns
if cpu.cpu_freq:
freq: str = f'{cpu.cpu_freq} Mhz' if cpu.cpu_freq < 1000 else f'{float(cpu.cpu_freq / 1000):.1f} GHz'
out += f'{Mv.to(by - 1, bx + bw - 9)}{THEME.div_line(Symbol.title_left)}{Fx.b}{THEME.title(freq)}{Fx.ub}{THEME.div_line(Symbol.title_right)}'
out += f'{Mv.to(y, x)}{Graphs.cpu["up"](None if cls.resized else cpu.cpu_upper[-1])}'
if mid_line:
out += (f'{Mv.to(y+hh, x-1)}{THEME.cpu_box(Symbol.title_right)}{THEME.div_line}{Symbol.h_line * (w - bw - 3)}{THEME.div_line(Symbol.title_left)}'
f'{Mv.to(y+hh, x+((w-bw)//2)-((len(CONFIG.cpu_graph_upper)+len(CONFIG.cpu_graph_lower))//2)-4)}{THEME.main_fg}{CONFIG.cpu_graph_upper}{Mv.r(1)}▲▼{Mv.r(1)}{CONFIG.cpu_graph_lower}')
if not CONFIG.cpu_single_graph and Graphs.cpu.get("down"):
out += f'{Mv.to(y + hh + (1 * mid_line), x)}{Graphs.cpu["down"](None if cls.resized else cpu.cpu_lower[-1])}'
out += (f'{THEME.main_fg}{Mv.to(by + cy, bx + cx)}{Fx.b}{"CPU "}{Fx.ub}{Meters.cpu(cpu.cpu_usage[0][-1])}'
f'{THEME.gradient["cpu"][cpu.cpu_usage[0][-1]]}{cpu.cpu_usage[0][-1]:>4}{THEME.main_fg}%')
if cpu.got_sensors:
try:
temp, unit = temperature(cpu.cpu_temp[0][-1], CONFIG.temp_scale)
out += (f'{THEME.inactive_fg} ⡀⡀⡀⡀⡀{Mv.l(5)}{THEME.gradient["temp"][min_max(cpu.cpu_temp[0][-1], 0, cpu.cpu_temp_crit) * 100 // cpu.cpu_temp_crit]}{Graphs.temps[0](None if cls.resized else cpu.cpu_temp[0][-1])}'
f'{temp:>4}{THEME.main_fg}{unit}')
except:
cpu.got_sensors = False
cy += 1
for n in range(1, THREADS + 1):
out += f'{THEME.main_fg}{Mv.to(by + cy, bx + cx)}{Fx.b + "C" + Fx.ub if THREADS < 100 else ""}{str(n):<{2 if cls.column_size == 0 else 3}}'
if cls.column_size > 0 or ct_width > 0:
out += f'{THEME.inactive_fg}{"⡀" * (5 * cls.column_size + ct_width)}{Mv.l(5 * cls.column_size + ct_width)}{THEME.gradient["cpu"][cpu.cpu_usage[n][-1]]}{Graphs.cores[n-1](None if cls.resized else cpu.cpu_usage[n][-1])}'
else:
out += f'{THEME.gradient["cpu"][cpu.cpu_usage[n][-1]]}'
out += f'{cpu.cpu_usage[n][-1]:>{3 if cls.column_size < 2 else 4}}{THEME.main_fg}%'
if cpu.got_sensors and cpu.cpu_temp[n] and not hide_cores:
try:
temp, unit = temperature(cpu.cpu_temp[n][-1], CONFIG.temp_scale)
if cls.column_size > 1:
out += f'{THEME.inactive_fg} ⡀⡀⡀⡀⡀{Mv.l(5)}{THEME.gradient["temp"][min_max(cpu.cpu_temp[n][-1], 0, cpu.cpu_temp_crit) * 100 // cpu.cpu_temp_crit]}{Graphs.temps[n](None if cls.resized else cpu.cpu_temp[n][-1])}'
else:
out += f'{THEME.gradient["temp"][min_max(temp, 0, cpu.cpu_temp_crit) * 100 // cpu.cpu_temp_crit]}'
out += f'{temp:>4}{THEME.main_fg}{unit}'
except:
cpu.got_sensors = False
elif cpu.got_sensors and not hide_cores:
out += f'{Mv.r(max(6, 6 * cls.column_size))}'
out += f'{THEME.div_line(Symbol.v_line)}'
cy += 1
if cy > ceil(THREADS/cls.box_columns) and n != THREADS:
cc += 1; cy = 1; cx = ccw * cc
if cc == cls.box_columns: break
if cy < bh - 1: cy = bh - 1
if cy < bh and cc < cls.box_columns:
if cls.column_size == 2 and cpu.got_sensors:
lavg = f' Load AVG: {" ".join(str(l) for l in cpu.load_avg):^19.19}'
elif cls.column_size == 2 or (cls.column_size == 1 and cpu.got_sensors):
lavg = f'LAV: {" ".join(str(l) for l in cpu.load_avg):^14.14}'
elif cls.column_size == 1 or (cls.column_size == 0 and cpu.got_sensors):
lavg = f'L {" ".join(str(round(l, 1)) for l in cpu.load_avg):^11.11}'
else:
lavg = f'{" ".join(str(round(l, 1)) for l in cpu.load_avg[:2]):^7.7}'
out += f'{Mv.to(by + cy, bx + cx)}{THEME.main_fg}{lavg}{THEME.div_line(Symbol.v_line)}'
if CONFIG.show_uptime:
out += f'{Mv.to(y + (0 if not CONFIG.cpu_invert_lower or CONFIG.cpu_single_graph else h - 1), x + 1)}{THEME.graph_text}{Fx.trans("up " + cpu.uptime)}'
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.resized = cls.redraw = cls.clock_block = False
class MemBox(Box):
name = "mem"
num = 2
height_p = 38
width_p = 45
min_w: int = 36
min_h: int = 10
x = 1
y = 1
mem_meter: int = 0
mem_size: int = 0
disk_meter: int = 0
divider: int = 0
mem_width: int = 0
disks_width: int = 0
disks_io_h: int = 0
disks_io_order: List[str] = []
graph_speeds: Dict[str, int] = {}
graph_height: int
resized: bool = True
redraw: bool = False
buffer: str = "mem"
swap_on: bool = CONFIG.show_swap
Box.buffers.append(buffer)
mem_names: List[str] = ["used", "available", "cached", "free"]
swap_names: List[str] = ["used", "free"]
@classmethod
def _calc_size(cls):
if not "mem" in cls.boxes:
Box._b_mem_h = 0
cls.width = Term.width
return
width_p: int; height_p: int
if not "proc" in cls.boxes:
width_p = 100
else:
width_p = cls.width_p
if not "cpu" in cls.boxes:
height_p = 60 if "net" in cls.boxes else 98
elif not "net" in cls.boxes:
height_p = 98 - CpuBox.height_p
else:
height_p = cls.height_p
cls.width = round(Term.width * width_p / 100)
cls.height = round(Term.height * height_p / 100) + 1
if cls.height + Box._b_cpu_h > Term.height: cls.height = Term.height - Box._b_cpu_h
Box._b_mem_h = cls.height
cls.y = Box._b_cpu_h + 1
if CONFIG.show_disks:
cls.mem_width = ceil((cls.width - 3) / 2)
cls.disks_width = cls.width - cls.mem_width - 3
if cls.mem_width + cls.disks_width < cls.width - 2: cls.mem_width += 1
cls.divider = cls.x + cls.mem_width
else:
cls.mem_width = cls.width - 1
item_height: int = 6 if cls.swap_on and not CONFIG.swap_disk else 4
if cls.height - (3 if cls.swap_on and not CONFIG.swap_disk else 2) > 2 * item_height: cls.mem_size = 3
elif cls.mem_width > 25: cls.mem_size = 2
else: cls.mem_size = 1
cls.mem_meter = cls.width - (cls.disks_width if CONFIG.show_disks else 0) - (9 if cls.mem_size > 2 else 20)
if cls.mem_size == 1: cls.mem_meter += 6
if cls.mem_meter < 1: cls.mem_meter = 0
if CONFIG.mem_graphs:
cls.graph_height = round(((cls.height - (2 if cls.swap_on and not CONFIG.swap_disk else 1)) - (2 if cls.mem_size == 3 else 1) * item_height) / item_height)
if cls.graph_height == 0: cls.graph_height = 1
if cls.graph_height > 1: cls.mem_meter += 6
else:
cls.graph_height = 0
if CONFIG.show_disks:
cls.disk_meter = cls.width - cls.mem_width - 23
if cls.disks_width < 25:
cls.disk_meter += 10
if cls.disk_meter < 1: cls.disk_meter = 0
@classmethod
def _draw_bg(cls) -> str:
if not "mem" in cls.boxes: return ""
out: str = ""
out += f'{create_box(box=cls, line_color=THEME.mem_box)}'
if CONFIG.show_disks:
out += (f'{Mv.to(cls.y, cls.divider + 2)}{THEME.mem_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg("d")}{THEME.title("isks")}{Fx.ub}{THEME.mem_box(Symbol.title_right)}'
f'{Mv.to(cls.y, cls.divider)}{THEME.mem_box(Symbol.div_up)}'
f'{Mv.to(cls.y + cls.height - 1, cls.divider)}{THEME.mem_box(Symbol.div_down)}{THEME.div_line}'
f'{"".join(f"{Mv.to(cls.y + i, cls.divider)}{Symbol.v_line}" for i in range(1, cls.height - 1))}')
Key.mouse["d"] = [[cls.divider + 3 + i, cls.y] for i in range(5)]
else:
out += f'{Mv.to(cls.y, cls.x + cls.width - 9)}{THEME.mem_box(Symbol.title_left)}{THEME.hi_fg("d")}{THEME.title("isks")}{THEME.mem_box(Symbol.title_right)}'
Key.mouse["d"] = [[cls.x + cls.width - 8 + i, cls.y] for i in range(5)]
return out
@classmethod
def _draw_fg(cls):
if not "mem" in cls.boxes: return
mem = MemCollector
if mem.redraw: cls.redraw = True
out: str = ""
out_misc: str = ""
gbg: str = ""
gmv: str = ""
gli: str = ""
x, y, w, h = cls.x + 1, cls.y + 1, cls.width - 2, cls.height - 2
if cls.resized or cls.redraw:
cls.redraw = True
cls._calc_size()
out_misc += cls._draw_bg()
Meters.mem = {}
Meters.swap = {}
Meters.disks_used = {}
Meters.disks_free = {}
if cls.mem_meter > 0:
for name in cls.mem_names:
if CONFIG.mem_graphs:
Meters.mem[name] = Graph(cls.mem_meter, cls.graph_height, THEME.gradient[name], mem.vlist[name])
else:
Meters.mem[name] = Meter(mem.percent[name], cls.mem_meter, name)
if cls.swap_on:
for name in cls.swap_names:
if CONFIG.swap_disk and CONFIG.show_disks:
break
elif CONFIG.mem_graphs and not CONFIG.swap_disk:
Meters.swap[name] = Graph(cls.mem_meter, cls.graph_height, THEME.gradient[name], mem.swap_vlist[name])
else:
Meters.swap[name] = Meter(mem.swap_percent[name], cls.mem_meter, name)
if CONFIG.show_disks and mem.disks:
if CONFIG.show_io_stat or CONFIG.io_mode:
d_graph: List[str] = []
d_no_graph: List[str] = []
l_vals: List[Tuple[str, int, str, bool]] = []
if CONFIG.io_mode:
cls.disks_io_h = (cls.height - 2 - len(mem.disks)) // max(1, len(mem.disks_io_dict))
if cls.disks_io_h < 2: cls.disks_io_h = 1 if CONFIG.io_graph_combined else 2
else:
cls.disks_io_h = 1
if CONFIG.io_graph_speeds and not cls.graph_speeds:
try:
cls.graph_speeds = { spds.split(":")[0] : int(spds.split(":")[1]) for spds in list(i.strip() for i in CONFIG.io_graph_speeds.split(","))}
except (KeyError, ValueError):
errlog.error("Wrong formatting in io_graph_speeds variable. Using defaults.")
for name in mem.disks.keys():
if name in mem.disks_io_dict:
d_graph.append(name)
else:
d_no_graph.append(name)
continue
if CONFIG.io_graph_combined or not CONFIG.io_mode:
l_vals = [("rw", cls.disks_io_h, "available", False)]
else:
l_vals = [("read", cls.disks_io_h // 2, "free", False), ("write", cls.disks_io_h // 2, "used", True)]
Graphs.disk_io[name] = {_name : Graph(width=cls.disks_width - (6 if not CONFIG.io_mode else 0), height=_height, color=THEME.gradient[_gradient],
data=mem.disks_io_dict[name][_name], invert=_invert, max_value=cls.graph_speeds.get(name, 10), no_zero=True)
for _name, _height, _gradient, _invert in l_vals}
cls.disks_io_order = d_graph + d_no_graph
if cls.disk_meter > 0:
for n, name in enumerate(mem.disks.keys()):
if n * 2 > h: break
Meters.disks_used[name] = Meter(mem.disks[name]["used_percent"], cls.disk_meter, "used")
if len(mem.disks) * 3 <= h + 1:
Meters.disks_free[name] = Meter(mem.disks[name]["free_percent"], cls.disk_meter, "free")
if not "g" in Key.mouse:
Key.mouse["g"] = [[x + 8 + i, y-1] for i in range(5)]
out_misc += (f'{Mv.to(y-1, x + 7)}{THEME.mem_box(Symbol.title_left)}{Fx.b if CONFIG.mem_graphs else ""}'
f'{THEME.hi_fg("g")}{THEME.title("raph")}{Fx.ub}{THEME.mem_box(Symbol.title_right)}')
if CONFIG.show_disks:
if not "s" in Key.mouse:
Key.mouse["s"] = [[x + w - 6 + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x + w - 7)}{THEME.mem_box(Symbol.title_left)}{Fx.b if CONFIG.swap_disk else ""}'
f'{THEME.hi_fg("s")}{THEME.title("wap")}{Fx.ub}{THEME.mem_box(Symbol.title_right)}')
if not "i" in Key.mouse:
Key.mouse["i"] = [[x + w - 10 + i, y-1] for i in range(2)]
out_misc += (f'{Mv.to(y-1, x + w - 11)}{THEME.mem_box(Symbol.title_left)}{Fx.b if CONFIG.io_mode else ""}'
f'{THEME.hi_fg("i")}{THEME.title("o")}{Fx.ub}{THEME.mem_box(Symbol.title_right)}')
if Collector.collect_interrupt: return
Draw.buffer("mem_misc", out_misc, only_save=True)
try:
#* Mem
cx = 1; cy = 1
out += f'{Mv.to(y, x+1)}{THEME.title}{Fx.b}Total:{mem.string["total"]:>{cls.mem_width - 9}}{Fx.ub}{THEME.main_fg}'
if cls.graph_height > 0:
gli = f'{Mv.l(2)}{THEME.mem_box(Symbol.title_right)}{THEME.div_line}{Symbol.h_line * (cls.mem_width - 1)}{"" if CONFIG.show_disks else THEME.mem_box}{Symbol.title_left}{Mv.l(cls.mem_width - 1)}{THEME.title}'
if cls.graph_height >= 2:
gbg = f'{Mv.l(1)}'
gmv = f'{Mv.l(cls.mem_width - 2)}{Mv.u(cls.graph_height - 1)}'
big_mem: bool = cls.mem_width > 21
for name in cls.mem_names:
if cy > h - 1: break
if Collector.collect_interrupt: return
if cls.mem_size > 2:
out += (f'{Mv.to(y+cy, x+cx)}{gli}{name.capitalize()[:None if big_mem else 5]+":":<{1 if big_mem else 6.6}}{Mv.to(y+cy, x+cx + cls.mem_width - 3 - (len(mem.string[name])))}{Fx.trans(mem.string[name])}'
f'{Mv.to(y+cy+1, x+cx)}{gbg}{Meters.mem[name](None if cls.resized else mem.percent[name])}{gmv}{str(mem.percent[name])+"%":>4}')
cy += 2 if not cls.graph_height else cls.graph_height + 1
else:
out += f'{Mv.to(y+cy, x+cx)}{name.capitalize():{5.5 if cls.mem_size > 1 else 1.1}} {gbg}{Meters.mem[name](None if cls.resized else mem.percent[name])}{mem.string[name][:None if cls.mem_size > 1 else -2]:>{9 if cls.mem_size > 1 else 7}}'
cy += 1 if not cls.graph_height else cls.graph_height
#* Swap
if cls.swap_on and CONFIG.show_swap and not CONFIG.swap_disk and mem.swap_string:
if h - cy > 5:
if cls.graph_height > 0: out += f'{Mv.to(y+cy, x+cx)}{gli}'
cy += 1
out += f'{Mv.to(y+cy, x+cx)}{THEME.title}{Fx.b}Swap:{mem.swap_string["total"]:>{cls.mem_width - 8}}{Fx.ub}{THEME.main_fg}'
cy += 1
for name in cls.swap_names:
if cy > h - 1: break
if Collector.collect_interrupt: return
if cls.mem_size > 2:
out += (f'{Mv.to(y+cy, x+cx)}{gli}{name.capitalize()[:None if big_mem else 5]+":":<{1 if big_mem else 6.6}}{Mv.to(y+cy, x+cx + cls.mem_width - 3 - (len(mem.swap_string[name])))}{Fx.trans(mem.swap_string[name])}'
f'{Mv.to(y+cy+1, x+cx)}{gbg}{Meters.swap[name](None if cls.resized else mem.swap_percent[name])}{gmv}{str(mem.swap_percent[name])+"%":>4}')
cy += 2 if not cls.graph_height else cls.graph_height + 1
else:
out += f'{Mv.to(y+cy, x+cx)}{name.capitalize():{5.5 if cls.mem_size > 1 else 1.1}} {gbg}{Meters.swap[name](None if cls.resized else mem.swap_percent[name])}{mem.swap_string[name][:None if cls.mem_size > 1 else -2]:>{9 if cls.mem_size > 1 else 7}}'; cy += 1 if not cls.graph_height else cls.graph_height
if cls.graph_height > 0 and not cy == h: out += f'{Mv.to(y+cy, x+cx)}{gli}'
#* Disks
if CONFIG.show_disks and mem.disks:
cx = x + cls.mem_width - 1; cy = 0
big_disk: bool = cls.disks_width >= 25
gli = f'{Mv.l(2)}{THEME.div_line}{Symbol.title_right}{Symbol.h_line * cls.disks_width}{THEME.mem_box}{Symbol.title_left}{Mv.l(cls.disks_width - 1)}'
if CONFIG.io_mode:
for name in cls.disks_io_order:
item = mem.disks[name]
io_item = mem.disks_io_dict.get(name, {})
if Collector.collect_interrupt: return
if cy > h - 1: break
out += Fx.trans(f'{Mv.to(y+cy, x+cx)}{gli}{THEME.title}{Fx.b}{item["name"]:{cls.disks_width - 2}.12}{Mv.to(y+cy, x + cx + cls.disks_width - 11)}{item["total"][:None if big_disk else -2]:>9}')
if big_disk:
out += Fx.trans(f'{Mv.to(y+cy, x + cx + (cls.disks_width // 2) - (len(str(item["used_percent"])) // 2) - 2)}{Fx.ub}{THEME.main_fg}{item["used_percent"]}%')
cy += 1
if io_item:
if cy > h - 1: break
if CONFIG.io_graph_combined:
if cls.disks_io_h <= 1:
out += f'{Mv.to(y+cy, x+cx-1)}{" " * 5}'
out += (f'{Mv.to(y+cy, x+cx-1)}{Fx.ub}{Graphs.disk_io[name]["rw"](None if cls.redraw else mem.disks_io_dict[name]["rw"][-1])}'
f'{Mv.to(y+cy, x+cx-1)}{THEME.main_fg}{item["io"] or "RW"}')
cy += cls.disks_io_h
else:
if cls.disks_io_h <= 3:
out += f'{Mv.to(y+cy, x+cx-1)}{" " * 5}{Mv.to(y+cy+1, x+cx-1)}{" " * 5}'
out += (f'{Mv.to(y+cy, x+cx-1)}{Fx.ub}{Graphs.disk_io[name]["read"](None if cls.redraw else mem.disks_io_dict[name]["read"][-1])}'
f'{Mv.to(y+cy, x+cx-1)}{THEME.main_fg}{item["io_r"] or "R"}')
cy += cls.disks_io_h // 2
out += f'{Mv.to(y+cy, x+cx-1)}{Graphs.disk_io[name]["write"](None if cls.redraw else mem.disks_io_dict[name]["write"][-1])}'
cy += cls.disks_io_h // 2
out += f'{Mv.to(y+cy-1, x+cx-1)}{THEME.main_fg}{item["io_w"] or "W"}'
else:
for name, item in mem.disks.items():
if Collector.collect_interrupt: return
if not name in Meters.disks_used:
continue
if cy > h - 1: break
out += Fx.trans(f'{Mv.to(y+cy, x+cx)}{gli}{THEME.title}{Fx.b}{item["name"]:{cls.disks_width - 2}.12}{Mv.to(y+cy, x + cx + cls.disks_width - 11)}{item["total"][:None if big_disk else -2]:>9}')
if big_disk:
out += f'{Mv.to(y+cy, x + cx + (cls.disks_width // 2) - (len(item["io"]) // 2) - 2)}{Fx.ub}{THEME.main_fg}{Fx.trans(item["io"])}'
cy += 1
if cy > h - 1: break
if CONFIG.show_io_stat and name in Graphs.disk_io:
out += f'{Mv.to(y+cy, x+cx-1)}{THEME.main_fg}{Fx.ub}{" IO: " if big_disk else " IO " + Mv.l(2)}{Fx.ub}{Graphs.disk_io[name]["rw"](None if cls.redraw else mem.disks_io_dict[name]["rw"][-1])}'
if not big_disk and item["io"]:
out += f'{Mv.to(y+cy, x+cx-1)}{Fx.ub}{THEME.main_fg}{item["io"]}'
cy += 1
if cy > h - 1: break
out += Mv.to(y+cy, x+cx) + (f'Used:{str(item["used_percent"]) + "%":>4} ' if big_disk else "U ")
out += f'{Meters.disks_used[name](None if cls.resized else mem.disks[name]["used_percent"])}{item["used"][:None if big_disk else -2]:>{9 if big_disk else 7}}'
cy += 1
if len(mem.disks) * 3 + (len(mem.disks_io_dict) if CONFIG.show_io_stat else 0) <= h + 1:
if cy > h - 1: break
out += Mv.to(y+cy, x+cx)
out += f'Free:{str(item["free_percent"]) + "%":>4} ' if big_disk else f'{"F "}'
out += f'{Meters.disks_free[name](None if cls.resized else mem.disks[name]["free_percent"])}{item["free"][:None if big_disk else -2]:>{9 if big_disk else 7}}'
cy += 1
if len(mem.disks) * 4 + (len(mem.disks_io_dict) if CONFIG.show_io_stat else 0) <= h + 1: cy += 1
except (KeyError, TypeError):
return
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.resized = cls.redraw = False
class NetBox(Box, SubBox):
name = "net"
num = 3
height_p = 30
width_p = 45
min_w: int = 36
min_h: int = 6
x = 1
y = 1
resized: bool = True
redraw: bool = True
graph_height: Dict[str, int] = {}
symbols: Dict[str, str] = {"download" : "▼", "upload" : "▲"}
buffer: str = "net"
Box.buffers.append(buffer)
@classmethod
def _calc_size(cls):
if not "net" in cls.boxes:
cls.width = Term.width
return
if not "proc" in cls.boxes:
width_p = 100
else:
width_p = cls.width_p
cls.width = round(Term.width * width_p / 100)
cls.height = Term.height - Box._b_cpu_h - Box._b_mem_h
cls.y = Term.height - cls.height + 1
cls.box_width = 27 if cls.width > 45 else 19
cls.box_height = 9 if cls.height > 10 else cls.height - 2
cls.box_x = cls.width - cls.box_width - 1
cls.box_y = cls.y + ((cls.height - 2) // 2) - cls.box_height // 2 + 1
cls.graph_height["download"] = round((cls.height - 2) / 2)
cls.graph_height["upload"] = cls.height - 2 - cls.graph_height["download"]
cls.redraw = True
@classmethod
def _draw_bg(cls) -> str:
if not "net" in cls.boxes: return ""
return f'{create_box(box=cls, line_color=THEME.net_box)}\
{create_box(x=cls.box_x, y=cls.box_y, width=cls.box_width, height=cls.box_height, line_color=THEME.div_line, fill=False, title="Download", title2="Upload")}'
@classmethod
def _draw_fg(cls):
if not "net" in cls.boxes: return
net = NetCollector
if net.redraw: cls.redraw = True
if not net.nic: return
out: str = ""
out_misc: str = ""
x, y, w, h = cls.x + 1, cls.y + 1, cls.width - 2, cls.height - 2
bx, by, bw, bh = cls.box_x + 1, cls.box_y + 1, cls.box_width - 2, cls.box_height - 2
reset: bool = bool(net.stats[net.nic]["download"]["offset"])
if cls.resized or cls.redraw:
out_misc += cls._draw_bg()
Key.mouse["b"] = [[x+w - len(net.nic[:10]) - 9 + i, y-1] for i in range(4)]
Key.mouse["n"] = [[x+w - 5 + i, y-1] for i in range(4)]
Key.mouse["z"] = [[x+w - len(net.nic[:10]) - 14 + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x+w - 25)}{THEME.net_box}{Symbol.h_line * (10 - len(net.nic[:10]))}{Symbol.title_left}{Fx.b if reset else ""}{THEME.hi_fg("z")}{THEME.title("ero")}'
f'{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}'
f'{THEME.net_box}{Symbol.title_left}{Fx.b}{THEME.hi_fg("<b")} {THEME.title(net.nic[:10])} {THEME.hi_fg("n>")}{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}')
if w - len(net.nic[:10]) - 20 > 6:
Key.mouse["a"] = [[x+w - 20 - len(net.nic[:10]) + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x+w - 21 - len(net.nic[:10]))}{THEME.net_box(Symbol.title_left)}{Fx.b if net.auto_min else ""}{THEME.hi_fg("a")}{THEME.title("uto")}'
f'{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}')
if w - len(net.nic[:10]) - 20 > 13:
Key.mouse["y"] = [[x+w - 26 - len(net.nic[:10]) + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x+w - 27 - len(net.nic[:10]))}{THEME.net_box(Symbol.title_left)}{Fx.b if CONFIG.net_sync else ""}{THEME.title("s")}{THEME.hi_fg("y")}{THEME.title("nc")}'
f'{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}')
if net.address and w - len(net.nic[:10]) - len(net.address) - 20 > 15:
out_misc += (f'{Mv.to(y-1, x+7)}{THEME.net_box(Symbol.title_left)}{Fx.b}{THEME.title(net.address)}{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}')
Draw.buffer("net_misc", out_misc, only_save=True)
cy = 0
for direction in ["download", "upload"]:
strings = net.strings[net.nic][direction]
stats = net.stats[net.nic][direction]
if cls.redraw: stats["redraw"] = True
if stats["redraw"] or cls.resized:
Graphs.net[direction] = Graph(w - bw - 3, cls.graph_height[direction], THEME.gradient[direction], stats["speed"], max_value=net.sync_top if CONFIG.net_sync else stats["graph_top"],
invert=direction != "download", color_max_value=net.net_min.get(direction) if CONFIG.net_color_fixed else None, round_up_low=True)
out += f'{Mv.to(y if direction == "download" else y + cls.graph_height["download"], x)}{Graphs.net[direction](None if stats["redraw"] else stats["speed"][-1])}'
out += (f'{Mv.to(by+cy, bx)}{THEME.main_fg}{cls.symbols[direction]} {strings["byte_ps"]:<10.10}' +
("" if bw < 20 else f'{Mv.to(by+cy, bx+bw - 12)}{"(" + strings["bit_ps"] + ")":>12.12}'))
cy += 1 if bh != 3 else 2
if bh >= 6:
out += f'{Mv.to(by+cy, bx)}{cls.symbols[direction]} {"Top:"}{Mv.to(by+cy, bx+bw - 12)}{"(" + strings["top"] + ")":>12.12}'
cy += 1
if bh >= 4:
out += f'{Mv.to(by+cy, bx)}{cls.symbols[direction]} {"Total:"}{Mv.to(by+cy, bx+bw - 10)}{strings["total"]:>10.10}'
if bh > 2 and bh % 2: cy += 2
else: cy += 1
stats["redraw"] = False
out += (f'{Mv.to(y, x)}{THEME.graph_text(net.sync_string if CONFIG.net_sync else net.strings[net.nic]["download"]["graph_top"])}'
f'{Mv.to(y+h-1, x)}{THEME.graph_text(net.sync_string if CONFIG.net_sync else net.strings[net.nic]["upload"]["graph_top"])}')
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.redraw = cls.resized = False
class ProcBox(Box):
name = "proc"
num = 4
height_p = 68
width_p = 55
min_w: int = 44
min_h: int = 16
x = 1
y = 1
current_y: int = 0
current_h: int = 0
select_max: int = 0
selected: int = 0
selected_pid: int = 0
last_selection: int = 0
filtering: bool = False
moved: bool = False
start: int = 1
count: int = 0
s_len: int = 0
detailed: bool = False
detailed_x: int = 0
detailed_y: int = 0
detailed_width: int = 0
detailed_height: int = 8
resized: bool = True
redraw: bool = True
buffer: str = "proc"
pid_counter: Dict[int, int] = {}
Box.buffers.append(buffer)
@classmethod
def _calc_size(cls):
if not "proc" in cls.boxes:
cls.width = Term.width
return
width_p: int; height_p: int
if not "net" in cls.boxes and not "mem" in cls.boxes:
width_p = 100
else:
width_p = cls.width_p
if not "cpu" in cls.boxes:
height_p = 100
else:
height_p = cls.height_p
cls.width = round(Term.width * width_p / 100)
cls.height = round(Term.height * height_p / 100)
if cls.height + Box._b_cpu_h > Term.height: cls.height = Term.height - Box._b_cpu_h
cls.x = Term.width - cls.width + 1
cls.y = Box._b_cpu_h + 1
cls.current_y = cls.y
cls.current_h = cls.height
cls.select_max = cls.height - 3
cls.redraw = True
cls.resized = True
@classmethod
def _draw_bg(cls) -> str:
if not "proc" in cls.boxes: return ""
return create_box(box=cls, line_color=THEME.proc_box)
@classmethod
def selector(cls, key: str, mouse_pos: Tuple[int, int] = (0, 0)):
old: Tuple[int, int] = (cls.start, cls.selected)
new_sel: int
if key in ["up", "k"]:
if cls.selected == 1 and cls.start > 1:
cls.start -= 1
elif cls.selected == 1:
cls.selected = 0
elif cls.selected > 1:
cls.selected -= 1
elif key in ["down", "j"]:
if cls.selected == 0 and ProcCollector.detailed and cls.last_selection:
cls.selected = cls.last_selection
cls.last_selection = 0
if cls.selected == cls.select_max and cls.start < ProcCollector.num_procs - cls.select_max + 1:
cls.start += 1
elif cls.selected < cls.select_max:
cls.selected += 1
elif key == "mouse_scroll_up" and cls.start > 1:
cls.start -= 5
elif key == "mouse_scroll_down" and cls.start < ProcCollector.num_procs - cls.select_max + 1:
cls.start += 5
elif key == "page_up" and cls.start > 1:
cls.start -= cls.select_max
elif key == "page_down" and cls.start < ProcCollector.num_procs - cls.select_max + 1:
cls.start += cls.select_max
elif key == "home":
if cls.start > 1: cls.start = 1
elif cls.selected > 0: cls.selected = 0
elif key == "end":
if cls.start < ProcCollector.num_procs - cls.select_max + 1: cls.start = ProcCollector.num_procs - cls.select_max + 1
elif cls.selected < cls.select_max: cls.selected = cls.select_max
elif key == "mouse_click":
if mouse_pos[0] > cls.x + cls.width - 4 and cls.current_y + 1 < mouse_pos[1] < cls.current_y + 1 + cls.select_max + 1:
if mouse_pos[1] == cls.current_y + 2:
cls.start = 1
elif mouse_pos[1] == cls.current_y + 1 + cls.select_max:
cls.start = ProcCollector.num_procs - cls.select_max + 1
else:
cls.start = round((mouse_pos[1] - cls.current_y) * ((ProcCollector.num_procs - cls.select_max - 2) / (cls.select_max - 2)))
else:
new_sel = mouse_pos[1] - cls.current_y - 1 if mouse_pos[1] >= cls.current_y - 1 else 0
if new_sel > 0 and new_sel == cls.selected:
Key.list.insert(0, "enter")
return
elif new_sel > 0 and new_sel != cls.selected:
if cls.last_selection: cls.last_selection = 0
cls.selected = new_sel
elif key == "mouse_unselect":
cls.selected = 0
if cls.start > ProcCollector.num_procs - cls.select_max + 1 and ProcCollector.num_procs > cls.select_max: cls.start = ProcCollector.num_procs - cls.select_max + 1
elif cls.start > ProcCollector.num_procs: cls.start = ProcCollector.num_procs
if cls.start < 1: cls.start = 1
if cls.selected > ProcCollector.num_procs and ProcCollector.num_procs < cls.select_max: cls.selected = ProcCollector.num_procs
elif cls.selected > cls.select_max: cls.selected = cls.select_max
if cls.selected < 0: cls.selected = 0
if old != (cls.start, cls.selected):
cls.moved = True
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True, only_draw=True)
@classmethod
def _draw_fg(cls):
if not "proc" in cls.boxes: return
proc = ProcCollector
if proc.proc_interrupt: return
if proc.redraw: cls.redraw = True
out: str = ""
out_misc: str = ""
n: int = 0
x, y, w, h = cls.x + 1, cls.current_y + 1, cls.width - 2, cls.current_h - 2
prog_len: int; arg_len: int; val: int; c_color: str; m_color: str; t_color: str; sort_pos: int; tree_len: int; is_selected: bool; calc: int
dgx: int; dgw: int; dx: int; dw: int; dy: int
l_count: int = 0
scroll_pos: int = 0
killed: bool = True
indent: str = ""
offset: int = 0
tr_show: bool = True
usr_show: bool = True
vals: List[str]
g_color: str = ""
s_len: int = 0
if proc.search_filter: s_len = len(proc.search_filter[:10])
loc_string: str = f'{cls.start + cls.selected - 1}/{proc.num_procs}'
end: str = ""
if proc.detailed:
dgx, dgw = x, w // 3
dw = w - dgw - 1
if dw > 120:
dw = 120
dgw = w - 121
dx = x + dgw + 2
dy = cls.y + 1
if w > 67:
arg_len = w - 53 - (1 if proc.num_procs > cls.select_max else 0)
prog_len = 15
else:
arg_len = 0
prog_len = w - 38 - (1 if proc.num_procs > cls.select_max else 0)
if prog_len < 15:
tr_show = False
prog_len += 5
if prog_len < 12:
usr_show = False
prog_len += 9
if CONFIG.proc_tree:
tree_len = arg_len + prog_len + 6
arg_len = 0
#* Buttons and titles only redrawn if needed
if cls.resized or cls.redraw:
s_len += len(CONFIG.proc_sorting)
if cls.resized or s_len != cls.s_len or proc.detailed:
cls.s_len = s_len
for k in ["e", "r", "c", "T", "K", "I", "enter", "left", " ", "f", "delete"]:
if k in Key.mouse: del Key.mouse[k]
if proc.detailed:
killed = proc.details.get("killed", False)
main = THEME.main_fg if cls.selected == 0 and not killed else THEME.inactive_fg
hi = THEME.hi_fg if cls.selected == 0 and not killed else THEME.inactive_fg
title = THEME.title if cls.selected == 0 and not killed else THEME.inactive_fg
if cls.current_y != cls.y + 8 or cls.resized or Graphs.detailed_cpu is NotImplemented:
cls.current_y = cls.y + 8
cls.current_h = cls.height - 8
for i in range(7): out_misc += f'{Mv.to(dy+i, x)}{" " * w}'
out_misc += (f'{Mv.to(dy+7, x-1)}{THEME.proc_box}{Symbol.title_right}{Symbol.h_line*w}{Symbol.title_left}'
f'{Mv.to(dy+7, x+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg(SUPERSCRIPT[cls.num])}{THEME.title(cls.name)}{Fx.ub}{THEME.proc_box(Symbol.title_right)}{THEME.div_line}')
for i in range(7):
out_misc += f'{Mv.to(dy + i, dgx + dgw + 1)}{Symbol.v_line}'
out_misc += (f'{Mv.to(dy-1, x-1)}{THEME.proc_box}{Symbol.left_up}{Symbol.h_line*w}{Symbol.right_up}'
f'{Mv.to(dy-1, dgx + dgw + 1)}{Symbol.div_up}'
f'{Mv.to(dy-1, x+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.title(str(proc.details["pid"]))}{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.title(proc.details["name"][:(dgw - 11)])}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if cls.selected == 0:
Key.mouse["enter"] = [[dx+dw-10 + i, dy-1] for i in range(7)]
if cls.selected == 0 and not killed:
Key.mouse["T"] = [[dx+2 + i, dy-1] for i in range(9)]
out_misc += (f'{Mv.to(dy-1, dx+dw - 11)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{title if cls.selected > 0 else THEME.title}close{Fx.ub} {main if cls.selected > 0 else THEME.main_fg}{Symbol.enter}{THEME.proc_box(Symbol.title_right)}'
f'{Mv.to(dy-1, dx+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}T{title}erminate{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if dw > 28:
if cls.selected == 0 and not killed and not "K" in Key.mouse: Key.mouse["K"] = [[dx + 13 + i, dy-1] for i in range(4)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}K{title}ill{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if dw > 39:
if cls.selected == 0 and not killed and not "I" in Key.mouse: Key.mouse["I"] = [[dx + 19 + i, dy-1] for i in range(9)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}I{title}nterrupt{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if Graphs.detailed_cpu is NotImplemented or cls.resized:
Graphs.detailed_cpu = Graph(dgw+1, 7, THEME.gradient["cpu"], proc.details_cpu)
Graphs.detailed_mem = Graph(dw // 3, 1, None, proc.details_mem)
cls.select_max = cls.height - 11
y = cls.y + 9
h = cls.height - 10
else:
if cls.current_y != cls.y or cls.resized:
cls.current_y = cls.y
cls.current_h = cls.height
y, h = cls.y + 1, cls.height - 2
out_misc += (f'{Mv.to(y-1, x-1)}{THEME.proc_box}{Symbol.left_up}{Symbol.h_line*w}{Symbol.right_up}'
f'{Mv.to(y-1, x+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg(SUPERSCRIPT[cls.num])}{THEME.title(cls.name)}{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
f'{Mv.to(y+7, x-1)}{THEME.proc_box(Symbol.v_line)}{Mv.r(w)}{THEME.proc_box(Symbol.v_line)}')
cls.select_max = cls.height - 3
sort_pos = x + w - len(CONFIG.proc_sorting) - 7
if not "left" in Key.mouse:
Key.mouse["left"] = [[sort_pos + i, y-1] for i in range(3)]
Key.mouse["right"] = [[sort_pos + len(CONFIG.proc_sorting) + 3 + i, y-1] for i in range(3)]
out_misc += (f'{Mv.to(y-1, x + 8)}{THEME.proc_box(Symbol.h_line * (w - 9))}' +
("" if not proc.detailed else f"{Mv.to(dy+7, dgx + dgw + 1)}{THEME.proc_box(Symbol.div_down)}") +
f'{Mv.to(y-1, sort_pos)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg("<")} {THEME.title(CONFIG.proc_sorting)} '
f'{THEME.hi_fg(">")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if w > 29 + s_len:
if not "e" in Key.mouse: Key.mouse["e"] = [[sort_pos - 5 + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, sort_pos - 6)}{THEME.proc_box(Symbol.title_left)}{Fx.b if CONFIG.proc_tree else ""}'
f'{THEME.title("tre")}{THEME.hi_fg("e")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if w > 37 + s_len:
if not "r" in Key.mouse: Key.mouse["r"] = [[sort_pos - 14 + i, y-1] for i in range(7)]
out_misc += (f'{Mv.to(y-1, sort_pos - 15)}{THEME.proc_box(Symbol.title_left)}{Fx.b if CONFIG.proc_reversed else ""}'
f'{THEME.hi_fg("r")}{THEME.title("everse")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if w > 47 + s_len:
if not "c" in Key.mouse: Key.mouse["c"] = [[sort_pos - 24 + i, y-1] for i in range(8)]
out_misc += (f'{Mv.to(y-1, sort_pos - 25)}{THEME.proc_box(Symbol.title_left)}{Fx.b if CONFIG.proc_per_core else ""}'
f'{THEME.title("per-")}{THEME.hi_fg("c")}{THEME.title("ore")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if not "f" in Key.mouse or cls.resized: Key.mouse["f"] = [[x+6 + i, y-1] for i in range(6 if not proc.search_filter else 2 + len(proc.search_filter[-10:]))]
if proc.search_filter:
if not "delete" in Key.mouse: Key.mouse["delete"] = [[x+12 + len(proc.search_filter[-10:]) + i, y-1] for i in range(3)]
elif "delete" in Key.mouse:
del Key.mouse["delete"]
out_misc += (f'{Mv.to(y-1, x + 8)}{THEME.proc_box(Symbol.title_left)}{Fx.b if cls.filtering or proc.search_filter else ""}{THEME.hi_fg("F" if cls.filtering and proc.case_sensitive else "f")}{THEME.title}' +
("ilter" if not proc.search_filter and not cls.filtering else f' {proc.search_filter[-(10 if w < 83 else w - 74):]}{(Fx.bl + "█" + Fx.ubl) if cls.filtering else THEME.hi_fg(" del")}') +
f'{THEME.proc_box(Symbol.title_right)}')
main = THEME.inactive_fg if cls.selected == 0 else THEME.main_fg
hi = THEME.inactive_fg if cls.selected == 0 else THEME.hi_fg
title = THEME.inactive_fg if cls.selected == 0 else THEME.title
out_misc += (f'{Mv.to(y+h, x + 1)}{THEME.proc_box}{Symbol.h_line*(w-4)}'
f'{Mv.to(y+h, x+1)}{THEME.proc_box(Symbol.title_left)}{main}{Symbol.up} {Fx.b}{THEME.main_fg("select")} {Fx.ub}'
f'{THEME.inactive_fg if cls.selected == cls.select_max else THEME.main_fg}{Symbol.down}{THEME.proc_box(Symbol.title_right)}'
f'{THEME.proc_box(Symbol.title_left)}{title}{Fx.b}info {Fx.ub}{main}{Symbol.enter}{THEME.proc_box(Symbol.title_right)}')
if not "enter" in Key.mouse: Key.mouse["enter"] = [[x + 14 + i, y+h] for i in range(6)]
if w - len(loc_string) > 34:
if not "T" in Key.mouse: Key.mouse["T"] = [[x + 22 + i, y+h] for i in range(9)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}T{title}erminate{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if w - len(loc_string) > 40:
if not "K" in Key.mouse: Key.mouse["K"] = [[x + 33 + i, y+h] for i in range(4)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}K{title}ill{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if w - len(loc_string) > 51:
if not "I" in Key.mouse: Key.mouse["I"] = [[x + 39 + i, y+h] for i in range(9)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}I{title}nterrupt{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if CONFIG.proc_tree and w - len(loc_string) > 65:
if not " " in Key.mouse: Key.mouse[" "] = [[x + 50 + i, y+h] for i in range(12)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}spc {title}collapse{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
#* Processes labels
selected: str = CONFIG.proc_sorting
label: str
if selected == "memory": selected = "mem"
if selected == "threads" and not CONFIG.proc_tree and not arg_len: selected = "tr"
if CONFIG.proc_tree:
label = (f'{THEME.title}{Fx.b}{Mv.to(y, x)}{" Tree:":<{tree_len-2}}' + (f'{"Threads: ":<9}' if tr_show else " "*4) + (f'{"User:":<9}' if usr_show else "") + f'Mem%{"Cpu%":>11}{Fx.ub}{THEME.main_fg} ' +
(" " if proc.num_procs > cls.select_max else ""))
if selected in ["pid", "program", "arguments"]: selected = "tree"
else:
label = (f'{THEME.title}{Fx.b}{Mv.to(y, x)}{"Pid:":>7} {"Program:" if prog_len > 8 else "Prg:":<{prog_len}}' + (f'{"Arguments:":<{arg_len-4}}' if arg_len else "") +
((f'{"Threads:":<9}' if arg_len else f'{"Tr:":^5}') if tr_show else "") + (f'{"User:":<9}' if usr_show else "") + f'Mem%{"Cpu%":>11}{Fx.ub}{THEME.main_fg} ' +
(" " if proc.num_procs > cls.select_max else ""))
if selected == "program" and prog_len <= 8: selected = "prg"
selected = selected.split(" ")[0].capitalize()
if CONFIG.proc_mem_bytes: label = label.replace("Mem%", "MemB")
label = label.replace(selected, f'{Fx.u}{selected}{Fx.uu}')
out_misc += label
Draw.buffer("proc_misc", out_misc, only_save=True)
#* Detailed box draw
if proc.detailed:
if proc.details["status"] == psutil.STATUS_RUNNING: stat_color = Fx.b
elif proc.details["status"] in [psutil.STATUS_DEAD, psutil.STATUS_STOPPED, psutil.STATUS_ZOMBIE]: stat_color = f'{THEME.inactive_fg}'
else: stat_color = ""
expand = proc.expand
iw = (dw - 3) // (4 + expand)
iw2 = iw - 1
out += (f'{Mv.to(dy, dgx)}{Graphs.detailed_cpu(None if cls.moved or proc.details["killed"] else proc.details_cpu[-1])}'
f'{Mv.to(dy, dgx)}{THEME.title}{Fx.b}{0 if proc.details["killed"] else proc.details["cpu_percent"]}%{Mv.r(1)}{"" if SYSTEM == "MacOS" else (("C" if dgw < 20 else "Core") + str(proc.details["cpu_num"]))}')
for i, l in enumerate(["C", "P", "U"]):
out += f'{Mv.to(dy+2+i, dgx)}{l}'
for i, l in enumerate(["C", "M", "D"]):
out += f'{Mv.to(dy+4+i, dx+1)}{l}'
out += (f'{Mv.to(dy, dx+1)} {"Status:":^{iw}.{iw2}}{"Elapsed:":^{iw}.{iw2}}' +
(f'{"Parent:":^{iw}.{iw2}}' if dw > 28 else "") + (f'{"User:":^{iw}.{iw2}}' if dw > 38 else "") +
(f'{"Threads:":^{iw}.{iw2}}' if expand > 0 else "") + (f'{"Nice:":^{iw}.{iw2}}' if expand > 1 else "") +
(f'{"IO Read:":^{iw}.{iw2}}' if expand > 2 else "") + (f'{"IO Write:":^{iw}.{iw2}}' if expand > 3 else "") +
(f'{"TTY:":^{iw}.{iw2}}' if expand > 4 else "") +
f'{Mv.to(dy+1, dx+1)}{Fx.ub}{THEME.main_fg}{stat_color}{proc.details["status"]:^{iw}.{iw2}}{Fx.ub}{THEME.main_fg}{proc.details["uptime"]:^{iw}.{iw2}} ' +
(f'{proc.details["parent_name"]:^{iw}.{iw2}}' if dw > 28 else "") + (f'{proc.details["username"]:^{iw}.{iw2}}' if dw > 38 else "") +
(f'{proc.details["threads"]:^{iw}.{iw2}}' if expand > 0 else "") + (f'{proc.details["nice"]:^{iw}.{iw2}}' if expand > 1 else "") +
(f'{proc.details["io_read"]:^{iw}.{iw2}}' if expand > 2 else "") + (f'{proc.details["io_write"]:^{iw}.{iw2}}' if expand > 3 else "") +
(f'{proc.details["terminal"][-(iw2):]:^{iw}.{iw2}}' if expand > 4 else "") +
f'{Mv.to(dy+3, dx)}{THEME.title}{Fx.b}{("Memory: " if dw > 42 else "M:") + str(round(proc.details["memory_percent"], 1)) + "%":>{dw//3-1}}{Fx.ub} {THEME.inactive_fg}{"⡀"*(dw//3)}'
f'{Mv.l(dw//3)}{THEME.proc_misc}{Graphs.detailed_mem(None if cls.moved else proc.details_mem[-1])} '
f'{THEME.title}{Fx.b}{proc.details["memory_bytes"]:.{dw//3 - 2}}{THEME.main_fg}{Fx.ub}')
cy = dy + (4 if len(proc.details["cmdline"]) > dw - 5 else 5)
for i in range(ceil(len(proc.details["cmdline"]) / (dw - 5))):
out += f'{Mv.to(cy+i, dx + 3)}{proc.details["cmdline"][((dw-5)*i):][:(dw-5)]:{"^" if i == 0 else "<"}{dw-5}}'
if i == 2: break
#* Checking for selection out of bounds
if cls.start > proc.num_procs - cls.select_max + 1 and proc.num_procs > cls.select_max: cls.start = proc.num_procs - cls.select_max + 1
elif cls.start > proc.num_procs: cls.start = proc.num_procs
if cls.start < 1: cls.start = 1
if cls.selected > proc.num_procs and proc.num_procs < cls.select_max: cls.selected = proc.num_procs
elif cls.selected > cls.select_max: cls.selected = cls.select_max
if cls.selected < 0: cls.selected = 0
#* Start iteration over all processes and info
cy = 1
for n, (pid, items) in enumerate(proc.processes.items(), start=1):
if n < cls.start: continue
l_count += 1
if l_count == cls.selected:
is_selected = True
cls.selected_pid = pid
else: is_selected = False
indent, name, cmd, threads, username, mem, mem_b, cpu = [items.get(v, d) for v, d in [("indent", ""), ("name", ""), ("cmd", ""), ("threads", 0), ("username", "?"), ("mem", 0.0), ("mem_b", 0), ("cpu", 0.0)]]
if CONFIG.proc_tree:
arg_len = 0
offset = tree_len - len(f'{indent}{pid}')
if offset < 1: offset = 0
indent = f'{indent:.{tree_len - len(str(pid))}}'
if offset - len(name) > 12:
cmd = cmd.split(" ")[0].split("/")[-1]
if not cmd.startswith(name):
offset = len(name)
arg_len = tree_len - len(f'{indent}{pid} {name} ') + 2
cmd = f'({cmd[:(arg_len-4)]})'
else:
offset = prog_len - 1
if cpu > 1.0 or pid in Graphs.pid_cpu:
if pid not in Graphs.pid_cpu:
Graphs.pid_cpu[pid] = Graph(5, 1, None, [0])
cls.pid_counter[pid] = 0
elif cpu < 1.0:
cls.pid_counter[pid] += 1
if cls.pid_counter[pid] > 10:
del cls.pid_counter[pid], Graphs.pid_cpu[pid]
else:
cls.pid_counter[pid] = 0
end = f'{THEME.main_fg}{Fx.ub}' if CONFIG.proc_colors else Fx.ub
if cls.selected > cy: calc = cls.selected - cy
elif 0 < cls.selected <= cy: calc = cy - cls.selected
else: calc = cy
if CONFIG.proc_colors and not is_selected:
vals = []
for v in [int(cpu), int(mem), int(threads // 3)]:
if CONFIG.proc_gradient:
val = ((v if v <= 100 else 100) + 100) - calc * 100 // cls.select_max
vals += [f'{THEME.gradient["proc_color" if val < 100 else "process"][val if val < 100 else val - 100]}']
else:
vals += [f'{THEME.gradient["process"][v if v <= 100 else 100]}']
c_color, m_color, t_color = vals
else:
c_color = m_color = t_color = Fx.b
if CONFIG.proc_gradient and not is_selected:
g_color = f'{THEME.gradient["proc"][calc * 100 // cls.select_max]}'
if is_selected:
c_color = m_color = t_color = g_color = end = ""
out += f'{THEME.selected_bg}{THEME.selected_fg}{Fx.b}'
#* Creates one line for a process with all gathered information
out += (f'{Mv.to(y+cy, x)}{g_color}{indent}{pid:>{(1 if CONFIG.proc_tree else 7)}} ' +
f'{c_color}{name:<{offset}.{offset}} {end}' +
(f'{g_color}{cmd:<{arg_len}.{arg_len-1}}' if arg_len else "") +
(t_color + (f'{threads:>4} ' if threads < 1000 else "999> ") + end if tr_show else "") +
(g_color + (f'{username:<9.9}' if len(username) < 10 else f'{username[:8]:<8}+') if usr_show else "") +
m_color + ((f'{mem:>4.1f}' if mem < 100 else f'{mem:>4.0f} ') if not CONFIG.proc_mem_bytes else f'{floating_humanizer(mem_b, short=True):>4.4}') + end +
f' {THEME.inactive_fg}{"⡀"*5}{THEME.main_fg}{g_color}{c_color}' + (f' {cpu:>4.1f} ' if cpu < 100 else f'{cpu:>5.0f} ') + end +
(" " if proc.num_procs > cls.select_max else ""))
#* Draw small cpu graph for process if cpu usage was above 1% in the last 10 updates
if pid in Graphs.pid_cpu:
out += f'{Mv.to(y+cy, x + w - (12 if proc.num_procs > cls.select_max else 11))}{c_color if CONFIG.proc_colors else THEME.proc_misc}{Graphs.pid_cpu[pid](None if cls.moved else round(cpu))}{THEME.main_fg}'
if is_selected: out += f'{Fx.ub}{Term.fg}{Term.bg}{Mv.to(y+cy, x + w - 1)}{" " if proc.num_procs > cls.select_max else ""}'
cy += 1
if cy == h: break
if cy < h:
for i in range(h-cy):
out += f'{Mv.to(y+cy+i, x)}{" " * w}'
#* Draw scrollbar if needed
if proc.num_procs > cls.select_max:
if cls.resized:
Key.mouse["mouse_scroll_up"] = [[x+w-2+i, y] for i in range(3)]
Key.mouse["mouse_scroll_down"] = [[x+w-2+i, y+h-1] for i in range(3)]
scroll_pos = round(cls.start * (cls.select_max - 2) / (proc.num_procs - (cls.select_max - 2)))
if scroll_pos < 0 or cls.start == 1: scroll_pos = 0
elif scroll_pos > h - 3 or cls.start >= proc.num_procs - cls.select_max: scroll_pos = h - 3
out += (f'{Mv.to(y, x+w-1)}{Fx.b}{THEME.main_fg}↑{Mv.to(y+h-1, x+w-1)}↓{Fx.ub}'
f'{Mv.to(y+1+scroll_pos, x+w-1)}█')
elif "scroll_up" in Key.mouse:
del Key.mouse["scroll_up"], Key.mouse["scroll_down"]
#* Draw current selection and number of processes
out += (f'{Mv.to(y+h, x + w - 3 - len(loc_string))}{THEME.proc_box}{Symbol.title_left}{THEME.title}'
f'{Fx.b}{loc_string}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
#* Clean up dead processes graphs and counters
cls.count += 1
if cls.count == 100:
cls.count = 0
for p in list(cls.pid_counter):
if not psutil.pid_exists(p):
del cls.pid_counter[p], Graphs.pid_cpu[p]
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.redraw = cls.resized = cls.moved = False
class Collector:
'''Data collector master class
* .start(): Starts collector thread
* .stop(): Stops collector thread
* .collect(*collectors: Collector, draw_now: bool = True, interrupt: bool = False): queues up collectors to run'''
stopping: bool = False
started: bool = False
draw_now: bool = False
redraw: bool = False
only_draw: bool = False
thread: threading.Thread
collect_run = threading.Event()
collect_idle = threading.Event()
collect_idle.set()
collect_done = threading.Event()
collect_queue: List = []
collect_interrupt: bool = False
proc_interrupt: bool = False
use_draw_list: bool = False
proc_counter: int = 1
@classmethod
def start(cls):
cls.stopping = False
cls.thread = threading.Thread(target=cls._runner, args=())
cls.thread.start()
cls.started = True
@classmethod
def stop(cls):
if cls.started and cls.thread.is_alive():
cls.stopping = True
cls.started = False
cls.collect_queue = []
cls.collect_idle.set()
cls.collect_done.set()
try:
cls.thread.join()
except:
pass
@classmethod
def _runner(cls):
'''This is meant to run in it's own thread, collecting and drawing when collect_run is set'''
draw_buffers: List[str] = []
debugged: bool = False
try:
while not cls.stopping:
if CONFIG.draw_clock and CONFIG.update_ms != 1000: Box.draw_clock()
cls.collect_run.wait(0.1)
if not cls.collect_run.is_set():
continue
draw_buffers = []
cls.collect_interrupt = False
cls.collect_run.clear()
cls.collect_idle.clear()
cls.collect_done.clear()
if DEBUG and not debugged: TimeIt.start("Collect and draw")
while cls.collect_queue:
collector = cls.collect_queue.pop()
if not cls.only_draw:
collector._collect()
collector._draw()
if cls.use_draw_list: draw_buffers.append(collector.buffer)
if cls.collect_interrupt: break
if DEBUG and not debugged: TimeIt.stop("Collect and draw"); debugged = True
if cls.draw_now and not Menu.active and not cls.collect_interrupt:
if cls.use_draw_list: Draw.out(*draw_buffers)
else: Draw.out()
if CONFIG.draw_clock and CONFIG.update_ms == 1000: Box.draw_clock()
cls.collect_idle.set()
cls.collect_done.set()
except Exception as e:
errlog.exception(f'Data collection thread failed with exception: {e}')
cls.collect_idle.set()
cls.collect_done.set()
clean_quit(1, thread=True)
@classmethod
def collect(cls, *collectors, draw_now: bool = True, interrupt: bool = False, proc_interrupt: bool = False, redraw: bool = False, only_draw: bool = False):
'''Setup collect queue for _runner'''
cls.collect_interrupt = interrupt
cls.proc_interrupt = proc_interrupt
cls.collect_idle.wait()
cls.collect_interrupt = False
cls.proc_interrupt = False
cls.use_draw_list = False
cls.draw_now = draw_now
cls.redraw = redraw
cls.only_draw = only_draw
if collectors:
cls.collect_queue = [*collectors]
cls.use_draw_list = True
if ProcCollector in cls.collect_queue:
cls.proc_counter = 1
else:
cls.collect_queue = list(cls.__subclasses__())
if CONFIG.proc_update_mult > 1:
if cls.proc_counter > 1:
cls.collect_queue.remove(ProcCollector)
if cls.proc_counter == CONFIG.proc_update_mult:
cls.proc_counter = 0
cls.proc_counter += 1
cls.collect_run.set()
class CpuCollector(Collector):
'''Collects cpu usage for cpu and cores, cpu frequency, load_avg, uptime and cpu temps'''
cpu_usage: List[List[int]] = []
cpu_upper: List[int] = []
cpu_lower: List[int] = []
cpu_temp: List[List[int]] = []
cpu_temp_high: int = 0
cpu_temp_crit: int = 0
for _ in range(THREADS + 1):
cpu_usage.append([])
cpu_temp.append([])
freq_error: bool = False
cpu_freq: int = 0
load_avg: List[float] = []
uptime: str = ""
buffer: str = CpuBox.buffer
sensor_method: str = ""
got_sensors: bool = False
sensor_swap: bool = False
cpu_temp_only: bool = False
@classmethod
def get_sensors(cls):
'''Check if we can get cpu temps and return method of getting temps'''
cls.sensor_method = ""
if SYSTEM == "MacOS":
try:
if which("coretemp") and subprocess.check_output(["coretemp", "-p"], universal_newlines=True).strip().replace("-", "").isdigit():
cls.sensor_method = "coretemp"
elif which("osx-cpu-temp") and subprocess.check_output("osx-cpu-temp", universal_newlines=True).rstrip().endswith("°C"):
cls.sensor_method = "osx-cpu-temp"
except: pass
elif CONFIG.cpu_sensor != "Auto" and CONFIG.cpu_sensor in CONFIG.cpu_sensors:
cls.sensor_method = "psutil"
elif hasattr(psutil, "sensors_temperatures"):
try:
temps = psutil.sensors_temperatures()
if temps:
for name, entries in temps.items():
if name.lower().startswith("cpu"):
cls.sensor_method = "psutil"
break
for entry in entries:
if entry.label.startswith(("Package", "Core 0", "Tdie", "CPU")):
cls.sensor_method = "psutil"
break
except: pass
if not cls.sensor_method and SYSTEM == "Linux":
try:
if which("vcgencmd") and subprocess.check_output(["vcgencmd", "measure_temp"], universal_newlines=True).strip().endswith("'C"):
cls.sensor_method = "vcgencmd"
except: pass
cls.got_sensors = bool(cls.sensor_method)
@classmethod
def _collect(cls):
cls.cpu_usage[0].append(ceil(psutil.cpu_percent(percpu=False)))
if len(cls.cpu_usage[0]) > Term.width * 4:
del cls.cpu_usage[0][0]
cpu_times_percent = psutil.cpu_times_percent()
for x in ["upper", "lower"]:
if getattr(CONFIG, "cpu_graph_" + x) == "total":
setattr(cls, "cpu_" + x, cls.cpu_usage[0])
else:
getattr(cls, "cpu_" + x).append(ceil(getattr(cpu_times_percent, getattr(CONFIG, "cpu_graph_" + x))))
if len(getattr(cls, "cpu_" + x)) > Term.width * 4:
del getattr(cls, "cpu_" + x)[0]
for n, thread in enumerate(psutil.cpu_percent(percpu=True), start=1):
cls.cpu_usage[n].append(ceil(thread))
if len(cls.cpu_usage[n]) > Term.width * 2:
del cls.cpu_usage[n][0]
try:
if CONFIG.show_cpu_freq and hasattr(psutil.cpu_freq(), "current"):
freq: float = psutil.cpu_freq().current
cls.cpu_freq = round(freq * (1 if freq > 10 else 1000))
elif cls.cpu_freq > 0:
cls.cpu_freq = 0
except Exception as e:
if not cls.freq_error:
cls.freq_error = True
errlog.error("Exception while getting cpu frequency!")
errlog.exception(f'{e}')
else:
pass
cls.load_avg = [round(lavg, 2) for lavg in psutil.getloadavg()]
cls.uptime = str(timedelta(seconds=round(time()-psutil.boot_time(),0)))[:-3].replace(" days,", "d").replace(" day,", "d")
if CONFIG.check_temp and cls.got_sensors:
cls._collect_temps()
@classmethod
def _collect_temps(cls):
temp: int = 1000
cores: List[int] = []
core_dict: Dict[int, int] = {}
entry_int: int = 0
cpu_type: str = ""
c_max: int = 0
s_name: str = "_-_"
s_label: str = "_-_"
if cls.sensor_method == "psutil":
try:
if CONFIG.cpu_sensor != "Auto":
s_name, s_label = CONFIG.cpu_sensor.split(":", 1)
for name, entries in psutil.sensors_temperatures().items():
for num, entry in enumerate(entries, 1):
if name == s_name and (entry.label == s_label or str(num) == s_label):
if entry.label.startswith("Package"):
cpu_type = "intel"
elif entry.label.startswith("Tdie"):
cpu_type = "ryzen"
else:
cpu_type = "other"
if getattr(entry, "high", None) != None and entry.high > 1: cls.cpu_temp_high = round(entry.high)
else: cls.cpu_temp_high = 80
if getattr(entry, "critical", None) != None and entry.critical > 1: cls.cpu_temp_crit = round(entry.critical)
else: cls.cpu_temp_crit = 95
temp = round(entry.current)
elif entry.label.startswith(("Package", "Tdie")) and cpu_type in ["", "other"] and s_name == "_-_" and hasattr(entry, "current"):
if not cls.cpu_temp_high or cls.sensor_swap or cpu_type == "other":
cls.sensor_swap = False
if getattr(entry, "high", None) != None and entry.high > 1: cls.cpu_temp_high = round(entry.high)
else: cls.cpu_temp_high = 80
if getattr(entry, "critical", None) != None and entry.critical > 1: cls.cpu_temp_crit = round(entry.critical)
else: cls.cpu_temp_crit = 95
cpu_type = "intel" if entry.label.startswith("Package") else "ryzen"
temp = round(entry.current)
elif (entry.label.startswith(("Core", "Tccd", "CPU")) or (name.lower().startswith("cpu") and not entry.label)) and hasattr(entry, "current"):
if entry.label.startswith(("Core", "Tccd")):
entry_int = int(entry.label.replace("Core", "").replace("Tccd", ""))
if entry_int in core_dict and cpu_type != "ryzen":
if c_max == 0:
c_max = max(core_dict) + 1
if c_max < THREADS // 2 and (entry_int + c_max) not in core_dict:
core_dict[(entry_int + c_max)] = round(entry.current)
continue
elif entry_int in core_dict:
continue
core_dict[entry_int] = round(entry.current)
continue
elif cpu_type in ["intel", "ryzen"]:
continue
if not cpu_type:
cpu_type = "other"
if not cls.cpu_temp_high or cls.sensor_swap:
cls.sensor_swap = False
if getattr(entry, "high", None) != None and entry.high > 1: cls.cpu_temp_high = round(entry.high)
else: cls.cpu_temp_high = 60 if name == "cpu_thermal" else 80
if getattr(entry, "critical", None) != None and entry.critical > 1: cls.cpu_temp_crit = round(entry.critical)
else: cls.cpu_temp_crit = 80 if name == "cpu_thermal" else 95
temp = round(entry.current)
cores.append(round(entry.current))
if core_dict:
if not temp or temp == 1000:
temp = sum(core_dict.values()) // len(core_dict)
if not cls.cpu_temp_high or not cls.cpu_temp_crit:
cls.cpu_temp_high, cls.cpu_temp_crit = 80, 95
cls.cpu_temp[0].append(temp)
if cpu_type == "ryzen":
ccds: int = len(core_dict)
cores_per_ccd: int = CORES // ccds
z: int = 1
for x in range(THREADS):
if x == CORES:
z = 1
if CORE_MAP[x] + 1 > cores_per_ccd * z:
z += 1
if z in core_dict:
cls.cpu_temp[x+1].append(core_dict[z])
else:
for x in range(THREADS):
if CORE_MAP[x] in core_dict:
cls.cpu_temp[x+1].append(core_dict[CORE_MAP[x]])
elif len(cores) == THREADS / 2:
cls.cpu_temp[0].append(temp)
for n, t in enumerate(cores, start=1):
try:
cls.cpu_temp[n].append(t)
cls.cpu_temp[THREADS // 2 + n].append(t)
except IndexError:
break
else:
cls.cpu_temp[0].append(temp)
if len(cores) > 1:
for n, t in enumerate(cores, start=1):
try:
cls.cpu_temp[n].append(t)
except IndexError:
break
except Exception as e:
errlog.exception(f'{e}')
cls.got_sensors = False
CpuBox._calc_size()
else:
try:
if cls.sensor_method == "coretemp":
temp = max(0, int(subprocess.check_output(["coretemp", "-p"], universal_newlines=True).strip()))
cores = [max(0, int(x)) for x in subprocess.check_output("coretemp", universal_newlines=True).split()]
if len(cores) == THREADS / 2:
cls.cpu_temp[0].append(temp)
for n, t in enumerate(cores, start=1):
try:
cls.cpu_temp[n].append(t)
cls.cpu_temp[THREADS // 2 + n].append(t)
except IndexError:
break
else:
cores.insert(0, temp)
for n, t in enumerate(cores):
try:
cls.cpu_temp[n].append(t)
except IndexError:
break
if not cls.cpu_temp_high:
cls.cpu_temp_high = 85
cls.cpu_temp_crit = 100
elif cls.sensor_method == "osx-cpu-temp":
temp = max(0, round(float(subprocess.check_output("osx-cpu-temp", universal_newlines=True).strip()[:-2])))
if not cls.cpu_temp_high:
cls.cpu_temp_high = 85
cls.cpu_temp_crit = 100
elif cls.sensor_method == "vcgencmd":
temp = max(0, round(float(subprocess.check_output(["vcgencmd", "measure_temp"], universal_newlines=True).strip()[5:-2])))
if not cls.cpu_temp_high:
cls.cpu_temp_high = 60
cls.cpu_temp_crit = 80
except Exception as e:
errlog.exception(f'{e}')
cls.got_sensors = False
CpuBox._calc_size()
else:
if not cores:
cls.cpu_temp[0].append(temp)
if not core_dict and len(cores) <= 1:
cls.cpu_temp_only = True
if len(cls.cpu_temp[0]) > 5:
for n in range(len(cls.cpu_temp)):
if cls.cpu_temp[n]:
del cls.cpu_temp[n][0]
@classmethod
def _draw(cls):
CpuBox._draw_fg()
class MemCollector(Collector):
'''Collects memory and disks information'''
values: Dict[str, int] = {}
vlist: Dict[str, List[int]] = {}
percent: Dict[str, int] = {}
string: Dict[str, str] = {}
swap_values: Dict[str, int] = {}
swap_vlist: Dict[str, List[int]] = {}
swap_percent: Dict[str, int] = {}
swap_string: Dict[str, str] = {}
disks: Dict[str, Dict]
disk_hist: Dict[str, Tuple] = {}
timestamp: float = time()
disks_io_dict: Dict[str, Dict[str, List[int]]] = {}
recheck_diskutil: bool = True
diskutil_map: Dict[str, str] = {}
io_error: bool = False
old_disks: List[str] = []
old_io_disks: List[str] = []
fstab_filter: List[str] = []
excludes: List[str] = ["squashfs", "nullfs"]
if SYSTEM == "BSD": excludes += ["devfs", "tmpfs", "procfs", "linprocfs", "gvfs", "fusefs"]
buffer: str = MemBox.buffer
@classmethod
def _collect(cls):
#* Collect memory
mem = psutil.virtual_memory()
if hasattr(mem, "cached"):
cls.values["cached"] = mem.cached
else:
cls.values["cached"] = mem.active
cls.values["total"], cls.values["free"], cls.values["available"] = mem.total, mem.free, mem.available
cls.values["used"] = cls.values["total"] - cls.values["available"]
for key, value in cls.values.items():
cls.string[key] = floating_humanizer(value)
if key == "total": continue
cls.percent[key] = round(value * 100 / cls.values["total"])
if CONFIG.mem_graphs:
if not key in cls.vlist: cls.vlist[key] = []
cls.vlist[key].append(cls.percent[key])
if len(cls.vlist[key]) > MemBox.width: del cls.vlist[key][0]
#* Collect swap
if CONFIG.show_swap or CONFIG.swap_disk:
swap = psutil.swap_memory()
cls.swap_values["total"], cls.swap_values["free"] = swap.total, swap.free
cls.swap_values["used"] = cls.swap_values["total"] - cls.swap_values["free"]
if swap.total:
if not MemBox.swap_on:
MemBox.redraw = True
MemBox.swap_on = True
for key, value in cls.swap_values.items():
cls.swap_string[key] = floating_humanizer(value)
if key == "total": continue
cls.swap_percent[key] = round(value * 100 / cls.swap_values["total"])
if CONFIG.mem_graphs:
if not key in cls.swap_vlist: cls.swap_vlist[key] = []
cls.swap_vlist[key].append(cls.swap_percent[key])
if len(cls.swap_vlist[key]) > MemBox.width: del cls.swap_vlist[key][0]
else:
if MemBox.swap_on:
MemBox.redraw = True
MemBox.swap_on = False
else:
if MemBox.swap_on:
MemBox.redraw = True
MemBox.swap_on = False
if not CONFIG.show_disks: return
#* Collect disks usage
disk_read: int = 0
disk_write: int = 0
dev_name: str
disk_name: str
filtering: Tuple = ()
filter_exclude: bool = False
io_string_r: str
io_string_w: str
u_percent: int
cls.disks = {}
if CONFIG.disks_filter:
if CONFIG.disks_filter.startswith("exclude="):
filter_exclude = True
filtering = tuple(v.strip() for v in CONFIG.disks_filter.replace("exclude=", "").strip().split(","))
else:
filtering = tuple(v.strip() for v in CONFIG.disks_filter.strip().split(","))
try:
io_counters = psutil.disk_io_counters(perdisk=SYSTEM != "BSD", nowrap=True)
except ValueError as e:
if not cls.io_error:
cls.io_error = True
errlog.error(f'Non fatal error during disk io collection!')
if psutil.version_info[0] < 5 or (psutil.version_info[0] == 5 and psutil.version_info[1] < 7):
errlog.error(f'Caused by outdated psutil version.')
errlog.exception(f'{e}')
io_counters = None
if SYSTEM == "MacOS" and cls.recheck_diskutil:
cls.recheck_diskutil = False
try:
dutil_out = subprocess.check_output(["diskutil", "list", "physical"], universal_newlines=True)
for line in dutil_out.split("\n"):
line = line.replace("\u2068", "").replace("\u2069", "")
if line.startswith("/dev/"):
xdisk = line.split()[0].replace("/dev/", "")
elif "Container" in line:
ydisk = line.split()[3]
if xdisk and ydisk:
cls.diskutil_map[xdisk] = ydisk
xdisk = ydisk = ""
except:
pass
if CONFIG.use_fstab and SYSTEM != "MacOS" and not cls.fstab_filter:
try:
with open('/etc/fstab','r') as fstab:
for line in fstab:
line = line.strip()
if line and not line.startswith('#'):
mount_data = (line.split())
if mount_data[2].lower() != "swap":
cls.fstab_filter += [mount_data[1]]
errlog.debug(f'new fstab_filter set : {cls.fstab_filter}')
except IOError:
CONFIG.use_fstab = False
errlog.warning(f'Error reading fstab, use_fstab flag reset to {CONFIG.use_fstab}')
if not CONFIG.use_fstab and cls.fstab_filter:
cls.fstab_filter = []
errlog.debug(f'use_fstab flag has been turned to {CONFIG.use_fstab}, fstab_filter cleared')
for disk in psutil.disk_partitions(all=CONFIG.use_fstab or not CONFIG.only_physical):
disk_io = None
io_string_r = io_string_w = ""
if CONFIG.use_fstab and disk.mountpoint not in cls.fstab_filter:
continue
disk_name = disk.mountpoint.rsplit('/', 1)[-1] if not disk.mountpoint == "/" else "root"
if cls.excludes and disk.fstype in cls.excludes:
continue
if filtering and ((not filter_exclude and not disk.mountpoint in filtering) or (filter_exclude and disk.mountpoint in filtering)):
continue
if SYSTEM == "MacOS" and disk.mountpoint == "/private/var/vm":
continue
try:
disk_u = psutil.disk_usage(disk.mountpoint)
except:
pass
u_percent = round(getattr(disk_u, "percent", 0))
cls.disks[disk.device] = { "name" : disk_name, "used_percent" : u_percent, "free_percent" : 100 - u_percent }
for name in ["total", "used", "free"]:
cls.disks[disk.device][name] = floating_humanizer(getattr(disk_u, name, 0))
#* Collect disk io
if io_counters:
try:
if SYSTEM != "BSD":
dev_name = os.path.realpath(disk.device).rsplit('/', 1)[-1]
if not dev_name in io_counters:
for names in io_counters:
if names in dev_name:
disk_io = io_counters[names]
break
else:
if cls.diskutil_map:
for names, items in cls.diskutil_map.items():
if items in dev_name and names in io_counters:
disk_io = io_counters[names]
else:
disk_io = io_counters[dev_name]
elif disk.mountpoint == "/":
disk_io = io_counters
else:
raise Exception
disk_read = round((disk_io.read_bytes - cls.disk_hist[disk.device][0]) / (time() - cls.timestamp)) #type: ignore
disk_write = round((disk_io.write_bytes - cls.disk_hist[disk.device][1]) / (time() - cls.timestamp)) #type: ignore
if not disk.device in cls.disks_io_dict:
cls.disks_io_dict[disk.device] = {"read" : [], "write" : [], "rw" : []}
cls.disks_io_dict[disk.device]["read"].append(disk_read >> 20)
cls.disks_io_dict[disk.device]["write"].append(disk_write >> 20)
cls.disks_io_dict[disk.device]["rw"].append((disk_read + disk_write) >> 20)
if len(cls.disks_io_dict[disk.device]["read"]) > MemBox.width:
del cls.disks_io_dict[disk.device]["read"][0], cls.disks_io_dict[disk.device]["write"][0], cls.disks_io_dict[disk.device]["rw"][0]
except:
disk_read = disk_write = 0
else:
disk_read = disk_write = 0
if disk_io:
cls.disk_hist[disk.device] = (disk_io.read_bytes, disk_io.write_bytes)
if CONFIG.io_mode or MemBox.disks_width > 30:
if disk_read > 0:
io_string_r = f'▲{floating_humanizer(disk_read, short=True)}'
if disk_write > 0:
io_string_w = f'▼{floating_humanizer(disk_write, short=True)}'
if CONFIG.io_mode:
cls.disks[disk.device]["io_r"] = io_string_r
cls.disks[disk.device]["io_w"] = io_string_w
elif disk_read + disk_write > 0:
io_string_r += f'▼▲{floating_humanizer(disk_read + disk_write, short=True)}'
cls.disks[disk.device]["io"] = io_string_r + (" " if io_string_w and io_string_r else "") + io_string_w
if CONFIG.swap_disk and MemBox.swap_on:
cls.disks["__swap"] = { "name" : "swap", "used_percent" : cls.swap_percent["used"], "free_percent" : cls.swap_percent["free"], "io" : "" }
for name in ["total", "used", "free"]:
cls.disks["__swap"][name] = cls.swap_string[name]
if len(cls.disks) > 2:
try:
new = { list(cls.disks)[0] : cls.disks.pop(list(cls.disks)[0])}
new["__swap"] = cls.disks.pop("__swap")
new.update(cls.disks)
cls.disks = new
except:
pass
if cls.old_disks != list(cls.disks) or cls.old_io_disks != list(cls.disks_io_dict):
MemBox.redraw = True
cls.recheck_diskutil = True
cls.old_disks = list(cls.disks)
cls.old_io_disks = list(cls.disks_io_dict)
cls.timestamp = time()
@classmethod
def _draw(cls):
MemBox._draw_fg()
class NetCollector(Collector):
'''Collects network stats'''
buffer: str = NetBox.buffer
nics: List[str] = []
nic_i: int = 0
nic: str = ""
new_nic: str = ""
nic_error: bool = False
reset: bool = False
graph_raise: Dict[str, int] = {"download" : 5, "upload" : 5}
graph_lower: Dict[str, int] = {"download" : 5, "upload" : 5}
#min_top: int = 10<<10
#* Stats structure = stats[netword device][download, upload][total, last, top, graph_top, offset, speed, redraw, graph_raise, graph_low] = int, List[int], bool
stats: Dict[str, Dict[str, Dict[str, Any]]] = {}
#* Strings structure strings[network device][download, upload][total, byte_ps, bit_ps, top, graph_top] = str
strings: Dict[str, Dict[str, Dict[str, str]]] = {}
switched: bool = False
timestamp: float = time()
net_min: Dict[str, int] = {"download" : -1, "upload" : -1}
auto_min: bool = CONFIG.net_auto
net_iface: str = CONFIG.net_iface
sync_top: int = 0
sync_string: str = ""
address: str = ""
@classmethod
def _get_nics(cls):
'''Get a list of all network devices sorted by highest throughput'''
cls.nic_i = 0
cls.nics = []
cls.nic = ""
try:
io_all = psutil.net_io_counters(pernic=True)
except Exception as e:
if not cls.nic_error:
cls.nic_error = True
errlog.exception(f'{e}')
if not io_all: return
up_stat = psutil.net_if_stats()
for nic in sorted(io_all.keys(), key=lambda nic: (getattr(io_all[nic], "bytes_recv", 0) + getattr(io_all[nic], "bytes_sent", 0)), reverse=True):
if nic not in up_stat or not up_stat[nic].isup:
continue
cls.nics.append(nic)
if not cls.nics: cls.nics = [""]
cls.nic = cls.nics[cls.nic_i]
if cls.net_iface and cls.net_iface in cls.nics:
cls.nic = cls.net_iface
cls.nic_i = cls.nics.index(cls.nic)
@classmethod
def switch(cls, key: str):
if cls.net_iface: cls.net_iface = ""
if len(cls.nics) < 2 and cls.nic in cls.nics:
return
if cls.nic_i == -1:
cls.nic_i = 0 if key == "n" else -1
else:
cls.nic_i += +1 if key == "n" else -1
cls.nic_i %= len(cls.nics)
cls.new_nic = cls.nics[cls.nic_i]
cls.switched = True
Collector.collect(NetCollector, redraw=True)
@classmethod
def _collect(cls):
speed: int
stat: Dict
up_stat = psutil.net_if_stats()
if sorted(cls.nics) != sorted(nic for nic in up_stat if up_stat[nic].isup):
old_nic = cls.nic
cls._get_nics()
cls.nic = old_nic
if cls.nic not in cls.nics:
cls.nic_i = -1
else:
cls.nic_i = cls.nics.index(cls.nic)
if cls.switched:
cls.nic = cls.new_nic
cls.switched = False
if not cls.nic or cls.nic not in up_stat:
cls._get_nics()
if not cls.nic: return
NetBox.redraw = True
try:
io_all = psutil.net_io_counters(pernic=True)[cls.nic]
except KeyError:
pass
return
if not cls.nic in cls.stats:
cls.stats[cls.nic] = {}
cls.strings[cls.nic] = { "download" : {}, "upload" : {}}
for direction, value in ["download", io_all.bytes_recv], ["upload", io_all.bytes_sent]:
cls.stats[cls.nic][direction] = { "total" : value, "last" : value, "top" : 0, "graph_top" : 0, "offset" : 0, "speed" : [], "redraw" : True, "graph_raise" : 0, "graph_lower" : 7 }
for v in ["total", "byte_ps", "bit_ps", "top", "graph_top"]:
cls.strings[cls.nic][direction][v] = ""
cls.stats[cls.nic]["download"]["total"] = io_all.bytes_recv
cls.stats[cls.nic]["upload"]["total"] = io_all.bytes_sent
if cls.nic in psutil.net_if_addrs():
cls.address = getattr(psutil.net_if_addrs()[cls.nic][0], "address", "")
for direction in ["download", "upload"]:
stat = cls.stats[cls.nic][direction]
strings = cls.strings[cls.nic][direction]
#* Calculate current speed
stat["speed"].append(round((stat["total"] - stat["last"]) / (time() - cls.timestamp)))
stat["last"] = stat["total"]
speed = stat["speed"][-1]
if cls.net_min[direction] == -1:
cls.net_min[direction] = units_to_bytes(getattr(CONFIG, "net_" + direction))
stat["graph_top"] = cls.net_min[direction]
stat["graph_lower"] = 7
if not cls.auto_min:
stat["redraw"] = True
strings["graph_top"] = floating_humanizer(stat["graph_top"], short=True)
if stat["offset"] and stat["offset"] > stat["total"]:
cls.reset = True
if cls.reset:
if not stat["offset"]:
stat["offset"] = stat["total"]
else:
stat["offset"] = 0
if direction == "upload":
cls.reset = False
NetBox.redraw = True
if len(stat["speed"]) > NetBox.width * 2:
del stat["speed"][0]
strings["total"] = floating_humanizer(stat["total"] - stat["offset"])
strings["byte_ps"] = floating_humanizer(stat["speed"][-1], per_second=True)
strings["bit_ps"] = floating_humanizer(stat["speed"][-1], bit=True, per_second=True)
if speed > stat["top"] or not stat["top"]:
stat["top"] = speed
strings["top"] = floating_humanizer(stat["top"], bit=True, per_second=True)
if cls.auto_min:
if speed > stat["graph_top"]:
stat["graph_raise"] += 1
if stat["graph_lower"] > 0: stat["graph_lower"] -= 1
elif speed < stat["graph_top"] // 10:
stat["graph_lower"] += 1
if stat["graph_raise"] > 0: stat["graph_raise"] -= 1
if stat["graph_raise"] >= 5 or stat["graph_lower"] >= 5:
if stat["graph_raise"] >= 5:
stat["graph_top"] = round(max(stat["speed"][-5:]) / 0.8)
elif stat["graph_lower"] >= 5:
stat["graph_top"] = max(10 << 10, max(stat["speed"][-5:]) * 3)
stat["graph_raise"] = 0
stat["graph_lower"] = 0
stat["redraw"] = True
strings["graph_top"] = floating_humanizer(stat["graph_top"], short=True)
cls.timestamp = time()
if CONFIG.net_sync:
c_max: int = max(cls.stats[cls.nic]["download"]["graph_top"], cls.stats[cls.nic]["upload"]["graph_top"])
if c_max != cls.sync_top:
cls.sync_top = c_max
cls.sync_string = floating_humanizer(cls.sync_top, short=True)
NetBox.redraw = True
@classmethod
def _draw(cls):
NetBox._draw_fg()
class ProcCollector(Collector):
'''Collects process stats'''
buffer: str = ProcBox.buffer
search_filter: str = ""
case_sensitive: bool = False
processes: Dict = {}
num_procs: int = 0
det_cpu: float = 0.0
detailed: bool = False
detailed_pid: Union[int, None] = None
details: Dict[str, Any] = {}
details_cpu: List[int] = []
details_mem: List[int] = []
expand: int = 0
collapsed: Dict = {}
tree_counter: int = 0
p_values: List[str] = ["pid", "name", "cmdline", "num_threads", "username", "memory_percent", "cpu_percent", "cpu_times", "create_time"]
sort_expr: Dict = {}
sort_expr["pid"] = compile("p.info['pid']", "str", "eval")
sort_expr["program"] = compile("'' if p.info['name'] == 0.0 else p.info['name']", "str", "eval")
sort_expr["arguments"] = compile("' '.join(str(p.info['cmdline'])) or ('' if p.info['name'] == 0.0 else p.info['name'])", "str", "eval")
sort_expr["threads"] = compile("0 if p.info['num_threads'] == 0.0 else p.info['num_threads']", "str", "eval")
sort_expr["user"] = compile("'' if p.info['username'] == 0.0 else p.info['username']", "str", "eval")
sort_expr["memory"] = compile("p.info['memory_percent']", "str", "eval")
sort_expr["cpu lazy"] = compile("(sum(p.info['cpu_times'][:2] if not p.info['cpu_times'] == 0.0 else [0.0, 0.0]) * 1000 / (time() - p.info['create_time']))", "str", "eval")
sort_expr["cpu responsive"] = compile("(p.info['cpu_percent'] if CONFIG.proc_per_core else (p.info['cpu_percent'] / THREADS))", "str", "eval")
@classmethod
def _collect(cls):
'''List all processess with pid, name, arguments, threads, username, memory percent and cpu percent'''
if not "proc" in Box.boxes: return
out: Dict = {}
cls.det_cpu = 0.0
sorting: str = CONFIG.proc_sorting
reverse: bool = not CONFIG.proc_reversed
proc_per_cpu: bool = CONFIG.proc_per_core
search: List[str] = []
if cls.search_filter:
if cls.case_sensitive:
search = [i.strip() for i in cls.search_filter.split(",")]
else:
search = [i.strip() for i in cls.search_filter.lower().split(",")]
err: float = 0.0
n: int = 0
if CONFIG.proc_tree and sorting == "arguments":
sorting = "program"
sort_cmd = cls.sort_expr[sorting]
if CONFIG.proc_tree:
cls._tree(sort_cmd=sort_cmd, reverse=reverse, proc_per_cpu=proc_per_cpu, search=search)
else:
for p in sorted(psutil.process_iter(cls.p_values + (["memory_info"] if CONFIG.proc_mem_bytes else []), err), key=lambda p: eval(sort_cmd), reverse=reverse):
if cls.collect_interrupt or cls.proc_interrupt:
return
if p.info["name"] == "idle" or p.info["name"] == err or p.info["pid"] == err:
continue
if p.info["cmdline"] == err:
p.info["cmdline"] = ""
if p.info["username"] == err:
p.info["username"] = ""
if p.info["num_threads"] == err:
p.info["num_threads"] = 0
if search:
if cls.detailed and p.info["pid"] == cls.detailed_pid:
cls.det_cpu = p.info["cpu_percent"]
for value in [ p.info["name"], " ".join(p.info["cmdline"]), str(p.info["pid"]), p.info["username"] ]:
if not cls.case_sensitive:
value = value.lower()
for s in search:
if s in value:
break
else: continue
break
else: continue
cpu = p.info["cpu_percent"] if proc_per_cpu else round(p.info["cpu_percent"] / THREADS, 2)
mem = p.info["memory_percent"]
if CONFIG.proc_mem_bytes and hasattr(p.info["memory_info"], "rss"):
mem_b = p.info["memory_info"].rss
else:
mem_b = 0
cmd = " ".join(p.info["cmdline"]) or "[" + p.info["name"] + "]"
out[p.info["pid"]] = {
"name" : p.info["name"],
"cmd" : cmd.replace("\n", "").replace("\t", "").replace("\\", ""),
"threads" : p.info["num_threads"],
"username" : p.info["username"],
"mem" : mem,
"mem_b" : mem_b,
"cpu" : cpu }
n += 1
cls.num_procs = n
cls.processes = out.copy()
if cls.detailed:
cls.expand = ((ProcBox.width - 2) - ((ProcBox.width - 2) // 3) - 40) // 10
if cls.expand > 5: cls.expand = 5
if cls.detailed and not cls.details.get("killed", False):
try:
c_pid = cls.detailed_pid
det = psutil.Process(c_pid)
except (psutil.NoSuchProcess, psutil.ZombieProcess):
cls.details["killed"] = True
cls.details["status"] = psutil.STATUS_DEAD
ProcBox.redraw = True
else:
attrs: List[str] = ["status", "memory_info", "create_time"]
if not SYSTEM == "MacOS": attrs.extend(["cpu_num"])
if cls.expand:
attrs.extend(["nice", "terminal"])
if not SYSTEM == "MacOS": attrs.extend(["io_counters"])
if not c_pid in cls.processes: attrs.extend(["pid", "name", "cmdline", "num_threads", "username", "memory_percent"])
cls.details = det.as_dict(attrs=attrs, ad_value="")
if det.parent() != None: cls.details["parent_name"] = det.parent().name()
else: cls.details["parent_name"] = ""
cls.details["pid"] = c_pid
if c_pid in cls.processes:
cls.details["name"] = cls.processes[c_pid]["name"]
cls.details["cmdline"] = cls.processes[c_pid]["cmd"]
cls.details["threads"] = f'{cls.processes[c_pid]["threads"]}'
cls.details["username"] = cls.processes[c_pid]["username"]
cls.details["memory_percent"] = cls.processes[c_pid]["mem"]
cls.details["cpu_percent"] = round(cls.processes[c_pid]["cpu"] * (1 if CONFIG.proc_per_core else THREADS))
else:
cls.details["cmdline"] = " ".join(cls.details["cmdline"]) or "[" + cls.details["name"] + "]"
cls.details["threads"] = f'{cls.details["num_threads"]}'
cls.details["cpu_percent"] = round(cls.det_cpu)
cls.details["killed"] = False
if SYSTEM == "MacOS":
cls.details["cpu_num"] = -1
cls.details["io_counters"] = ""
if hasattr(cls.details["memory_info"], "rss"): cls.details["memory_bytes"] = floating_humanizer(cls.details["memory_info"].rss) # type: ignore
else: cls.details["memory_bytes"] = "? Bytes"
if isinstance(cls.details["create_time"], float):
uptime = timedelta(seconds=round(time()-cls.details["create_time"],0))
if uptime.days > 0: cls.details["uptime"] = f'{uptime.days}d {str(uptime).split(",")[1][:-3].strip()}'
else: cls.details["uptime"] = f'{uptime}'
else: cls.details["uptime"] = "??:??:??"
if cls.expand:
if cls.expand > 1 : cls.details["nice"] = f'{cls.details["nice"]}'
if SYSTEM == "BSD":
if cls.expand > 2:
if hasattr(cls.details["io_counters"], "read_count"): cls.details["io_read"] = f'{cls.details["io_counters"].read_count}'
else: cls.details["io_read"] = "?"
if cls.expand > 3:
if hasattr(cls.details["io_counters"], "write_count"): cls.details["io_write"] = f'{cls.details["io_counters"].write_count}'
else: cls.details["io_write"] = "?"
else:
if cls.expand > 2:
if hasattr(cls.details["io_counters"], "read_bytes"): cls.details["io_read"] = floating_humanizer(cls.details["io_counters"].read_bytes)
else: cls.details["io_read"] = "?"
if cls.expand > 3:
if hasattr(cls.details["io_counters"], "write_bytes"): cls.details["io_write"] = floating_humanizer(cls.details["io_counters"].write_bytes)
else: cls.details["io_write"] = "?"
if cls.expand > 4 : cls.details["terminal"] = f'{cls.details["terminal"]}'.replace("/dev/", "")
cls.details_cpu.append(cls.details["cpu_percent"])
mem = cls.details["memory_percent"]
if mem > 80: mem = round(mem)
elif mem > 60: mem = round(mem * 1.2)
elif mem > 30: mem = round(mem * 1.5)
elif mem > 10: mem = round(mem * 2)
elif mem > 5: mem = round(mem * 10)
else: mem = round(mem * 20)
cls.details_mem.append(mem)
if len(cls.details_cpu) > ProcBox.width: del cls.details_cpu[0]
if len(cls.details_mem) > ProcBox.width: del cls.details_mem[0]
@classmethod
def _tree(cls, sort_cmd, reverse: bool, proc_per_cpu: bool, search: List[str]):
'''List all processess in a tree view with pid, name, threads, username, memory percent and cpu percent'''
out: Dict = {}
err: float = 0.0
det_cpu: float = 0.0
infolist: Dict = {}
cls.tree_counter += 1
tree = defaultdict(list)
n: int = 0
for p in sorted(psutil.process_iter(cls.p_values + (["memory_info"] if CONFIG.proc_mem_bytes else []), err), key=lambda p: eval(sort_cmd), reverse=reverse):
if cls.collect_interrupt: return
try:
tree[p.ppid()].append(p.pid)
except (psutil.NoSuchProcess, psutil.ZombieProcess):
pass
else:
infolist[p.pid] = p.info
n += 1
if 0 in tree and 0 in tree[0]:
tree[0].remove(0)
def create_tree(pid: int, tree: defaultdict, indent: str = "", inindent: str = " ", found: bool = False, depth: int = 0, collapse_to: Union[None, int] = None):
nonlocal infolist, proc_per_cpu, search, out, det_cpu
name: str; threads: int; username: str; mem: float; cpu: float; collapse: bool = False
cont: bool = True
getinfo: Dict = {}
if cls.collect_interrupt: return
try:
name = psutil.Process(pid).name()
if name == "idle": return
except psutil.Error:
pass
cont = False
name = ""
if pid in infolist:
getinfo = infolist[pid]
if search and not found:
if cls.detailed and pid == cls.detailed_pid:
det_cpu = getinfo["cpu_percent"]
if "username" in getinfo and isinstance(getinfo["username"], float): getinfo["username"] = ""
if "cmdline" in getinfo and isinstance(getinfo["cmdline"], float): getinfo["cmdline"] = ""
for value in [ name, str(pid), getinfo.get("username", ""), " ".join(getinfo.get("cmdline", "")) ]:
if not cls.case_sensitive:
value = value.lower()
for s in search:
if s in value:
found = True
break
else: continue
break
else: cont = False
if cont:
if getinfo:
if getinfo["num_threads"] == err: threads = 0
else: threads = getinfo["num_threads"]
if getinfo["username"] == err: username = ""
else: username = getinfo["username"]
cpu = getinfo["cpu_percent"] if proc_per_cpu else round(getinfo["cpu_percent"] / THREADS, 2)
mem = getinfo["memory_percent"]
if getinfo["cmdline"] == err: cmd = ""
else: cmd = " ".join(getinfo["cmdline"]) or "[" + getinfo["name"] + "]"
if CONFIG.proc_mem_bytes and hasattr(getinfo["memory_info"], "rss"):
mem_b = getinfo["memory_info"].rss
else:
mem_b = 0
else:
threads = mem_b = 0
username = ""
mem = cpu = 0.0
if pid in cls.collapsed:
collapse = cls.collapsed[pid]
else:
collapse = depth > CONFIG.tree_depth
cls.collapsed[pid] = collapse
if collapse_to and not search:
out[collapse_to]["threads"] += threads
out[collapse_to]["mem"] += mem
out[collapse_to]["mem_b"] += mem_b
out[collapse_to]["cpu"] += cpu
else:
if pid in tree and len(tree[pid]) > 0:
sign: str = "+" if collapse else "-"
inindent = inindent.replace(" ├─ ", "[" + sign + "]─").replace(" └─ ", "[" + sign + "]─")
out[pid] = {
"indent" : inindent,
"name": name,
"cmd" : cmd.replace("\n", "").replace("\t", "").replace("\\", ""),
"threads" : threads,
"username" : username,
"mem" : mem,
"mem_b" : mem_b,
"cpu" : cpu,
"depth" : depth,
}
if search: collapse = False
elif collapse and not collapse_to:
collapse_to = pid
if pid not in tree:
return
children = tree[pid][:-1]
for child in children:
create_tree(child, tree, indent + " │ ", indent + " ├─ ", found=found, depth=depth+1, collapse_to=collapse_to)
create_tree(tree[pid][-1], tree, indent + " ", indent + " └─ ", depth=depth+1, collapse_to=collapse_to)
create_tree(min(tree), tree)
cls.det_cpu = det_cpu
if cls.collect_interrupt: return
if cls.tree_counter >= 100:
cls.tree_counter = 0
for pid in list(cls.collapsed):
if not psutil.pid_exists(pid):
del cls.collapsed[pid]
cls.num_procs = len(out)
cls.processes = out.copy()
@classmethod
def sorting(cls, key: str):
index: int = CONFIG.sorting_options.index(CONFIG.proc_sorting) + (1 if key in ["right", "l"] else -1)
if index >= len(CONFIG.sorting_options): index = 0
elif index < 0: index = len(CONFIG.sorting_options) - 1
CONFIG.proc_sorting = CONFIG.sorting_options[index]
if "left" in Key.mouse: del Key.mouse["left"]
Collector.collect(ProcCollector, interrupt=True, redraw=True)
@classmethod
def _draw(cls):
ProcBox._draw_fg()
class Menu:
'''Holds all menus'''
active: bool = False
close: bool = False
resized: bool = True
menus: Dict[str, Dict[str, str]] = {}
menu_length: Dict[str, int] = {}
background: str = ""
for name, menu in MENUS.items():
menu_length[name] = len(menu["normal"][0])
menus[name] = {}
for sel in ["normal", "selected"]:
menus[name][sel] = ""
for i in range(len(menu[sel])):
menus[name][sel] += Fx.trans(f'{Color.fg(MENU_COLORS[sel][i])}{menu[sel][i]}')
if i < len(menu[sel]) - 1: menus[name][sel] += f'{Mv.d(1)}{Mv.l(len(menu[sel][i]))}'
@classmethod
def main(cls):
if Term.width < 80 or Term.height < 24:
errlog.warning(f'The menu system only works on a terminal size of 80x24 or above!')
return
out: str = ""
banner: str = ""
redraw: bool = True
key: str = ""
mx: int = 0
my: int = 0
skip: bool = False
mouse_over: bool = False
mouse_items: Dict[str, Dict[str, int]] = {}
cls.active = True
cls.resized = True
menu_names: List[str] = list(cls.menus.keys())
menu_index: int = 0
menu_current: str = menu_names[0]
cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
while not cls.close:
key = ""
if cls.resized:
banner = (f'{Banner.draw(Term.height // 2 - 10, center=True)}{Mv.d(1)}{Mv.l(46)}{Colors.black_bg}{Colors.default}{Fx.b}← esc'
f'{Mv.r(30)}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}')
if UpdateChecker.version != VERSION:
banner += f'{Mv.to(Term.height, 1)}{Fx.b}{THEME.title}New release {UpdateChecker.version} available at https://github.com/aristocratos/bpytop{Fx.ub}{Term.fg}'
cy = 0
for name, menu in cls.menus.items():
ypos = Term.height // 2 - 2 + cy
xpos = Term.width // 2 - (cls.menu_length[name] // 2)
mouse_items[name] = { "x1" : xpos, "x2" : xpos + cls.menu_length[name] - 1, "y1" : ypos, "y2" : ypos + 2 }
cy += 3
redraw = True
cls.resized = False
if redraw:
out = ""
for name, menu in cls.menus.items():
out += f'{Mv.to(mouse_items[name]["y1"], mouse_items[name]["x1"])}{menu["selected" if name == menu_current else "normal"]}'
if skip and redraw:
Draw.now(out)
elif not skip:
Draw.now(f'{cls.background}{banner}{out}')
skip = redraw = False
if Key.input_wait(Timer.left(), mouse=True):
if Key.mouse_moved():
mx, my = Key.get_mouse()
for name, pos in mouse_items.items():
if pos["x1"] <= mx <= pos["x2"] and pos["y1"] <= my <= pos["y2"]:
mouse_over = True
if name != menu_current:
menu_current = name
menu_index = menu_names.index(name)
redraw = True
break
else:
mouse_over = False
else:
key = Key.get()
if key == "mouse_click" and not mouse_over:
key = "M"
if key == "q":
clean_quit()
elif key in ["escape", "M"]:
cls.close = True
break
elif key in ["up", "mouse_scroll_up", "shift_tab"]:
menu_index -= 1
if menu_index < 0: menu_index = len(menu_names) - 1
menu_current = menu_names[menu_index]
redraw = True
elif key in ["down", "mouse_scroll_down", "tab"]:
menu_index += 1
if menu_index > len(menu_names) - 1: menu_index = 0
menu_current = menu_names[menu_index]
redraw = True
elif key == "enter" or (key == "mouse_click" and mouse_over):
if menu_current == "quit":
clean_quit()
elif menu_current == "options":
cls.options()
cls.resized = True
elif menu_current == "help":
cls.help()
cls.resized = True
if Timer.not_zero() and not cls.resized:
skip = True
else:
Collector.collect()
Collector.collect_done.wait(2)
if CONFIG.background_update: cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
Timer.stamp()
Draw.now(f'{Draw.saved_buffer()}')
cls.background = ""
cls.active = False
cls.close = False
@classmethod
def help(cls):
if Term.width < 80 or Term.height < 24:
errlog.warning(f'The menu system only works on a terminal size of 80x24 or above!')
return
out: str = ""
out_misc : str = ""
redraw: bool = True
key: str = ""
skip: bool = False
main_active: bool = cls.active
cls.active = True
cls.resized = True
if not cls.background:
cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
help_items: Dict[str, str] = {
"(Mouse 1)" : "Clicks buttons and selects in process list.",
"Selected (Mouse 1)" : "Show detailed information for selected process.",
"(Mouse scroll)" : "Scrolls any scrollable list/text under cursor.",
"(Esc, shift+m)" : "Toggles main menu.",
"(m)" : "Cycle view presets, order: full->proc->stat->user.",
"(1)" : "Toggle CPU box.",
"(2)" : "Toggle MEM box.",
"(3)" : "Toggle NET box.",
"(4)" : "Toggle PROC box.",
"(d)" : "Toggle disks view in MEM box.",
"(F2, o)" : "Shows options.",
"(F1, shift+h)" : "Shows this window.",
"(ctrl+z)" : "Sleep program and put in background.",
"(ctrl+c, q)" : "Quits program.",
"(+) / (-)" : "Add/Subtract 100ms to/from update timer.",
"(Up, k) (Down, j)" : "Select in process list.",
"(Enter)" : "Show detailed information for selected process.",
"(Spacebar)" : "Expand/collapse the selected process in tree view.",
"(Pg Up) (Pg Down)" : "Jump 1 page in process list.",
"(Home) (End)" : "Jump to first or last page in process list.",
"(Left, h) (Right, l)" : "Select previous/next sorting column.",
"(b) (n)" : "Select previous/next network device.",
"(s)" : "Toggle showing swap as a disk.",
"(i)" : "Toggle disks io mode with big graphs.",
"(z)" : "Toggle totals reset for current network device",
"(a)" : "Toggle auto scaling for the network graphs.",
"(y)" : "Toggle synced scaling mode for network graphs.",
"(f)" : "Input a NON case-sensitive process filter.",
"(shift+f)" : "Input a case-sensitive process filter.",
"(c)" : "Toggle per-core cpu usage of processes.",
"(r)" : "Reverse sorting order in processes box.",
"(e)" : "Toggle processes tree view.",
"(delete)" : "Clear any entered filter.",
"Selected (shift+t)" : "Terminate selected process with SIGTERM - 15.",
"Selected (shift+k)" : "Kill selected process with SIGKILL - 9.",
"Selected (shift+i)" : "Interrupt selected process with SIGINT - 2.",
"_1" : " ",
"_2" : "For bug reporting and project updates, visit:",
"_3" : "https://github.com/aristocratos/bpytop",
}
while not cls.close:
key = ""
if cls.resized:
y = 8 if Term.height < len(help_items) + 10 else Term.height // 2 - len(help_items) // 2 + 4
out_misc = (f'{Banner.draw(y-7, center=True)}{Mv.d(1)}{Mv.l(46)}{Colors.black_bg}{Colors.default}{Fx.b}← esc'
f'{Mv.r(30)}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}')
x = Term.width//2-36
h, w = Term.height-2-y, 72
if len(help_items) > h:
pages = ceil(len(help_items) / h)
else:
h = len(help_items)
pages = 0
page = 1
out_misc += create_box(x, y, w, h+3, "help", line_color=THEME.div_line)
redraw = True
cls.resized = False
if redraw:
out = ""
cy = 0
if pages:
out += (f'{Mv.to(y, x+56)}{THEME.div_line(Symbol.title_left)}{Fx.b}{THEME.title("pg")}{Fx.ub}{THEME.main_fg(Symbol.up)} {Fx.b}{THEME.title}{page}/{pages} '
f'pg{Fx.ub}{THEME.main_fg(Symbol.down)}{THEME.div_line(Symbol.title_right)}')
out += f'{Mv.to(y+1, x+1)}{THEME.title}{Fx.b}{"Keys:":^20}Description:{THEME.main_fg}'
for n, (keys, desc) in enumerate(help_items.items()):
if pages and n < (page - 1) * h: continue
out += f'{Mv.to(y+2+cy, x+1)}{Fx.b}{("" if keys.startswith("_") else keys):^20.20}{Fx.ub}{desc:50.50}'
cy += 1
if cy == h: break
if cy < h:
for i in range(h-cy):
out += f'{Mv.to(y+2+cy+i, x+1)}{" " * (w-2)}'
if skip and redraw:
Draw.now(out)
elif not skip:
Draw.now(f'{cls.background}{out_misc}{out}')
skip = redraw = False
if Key.input_wait(Timer.left()):
key = Key.get()
if key == "mouse_click":
mx, my = Key.get_mouse()
if x <= mx < x + w and y <= my < y + h + 3:
if pages and my == y and x + 56 < mx < x + 61:
key = "up"
elif pages and my == y and x + 63 < mx < x + 68:
key = "down"
else:
key = "escape"
if key == "q":
clean_quit()
elif key in ["escape", "M", "enter", "backspace", "H", "f1"]:
cls.close = True
break
elif key in ["up", "mouse_scroll_up", "page_up"] and pages:
page -= 1
if page < 1: page = pages
redraw = True
elif key in ["down", "mouse_scroll_down", "page_down"] and pages:
page += 1
if page > pages: page = 1
redraw = True
if Timer.not_zero() and not cls.resized:
skip = True
else:
Collector.collect()
Collector.collect_done.wait(2)
if CONFIG.background_update: cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
Timer.stamp()
if main_active:
cls.close = False
return
Draw.now(f'{Draw.saved_buffer()}')
cls.background = ""
cls.active = False
cls.close = False
@classmethod
def options(cls):
if Term.width < 80 or Term.height < 24:
errlog.warning(f'The menu system only works on a terminal size of 80x24 or above!')
return
out: str = ""
out_misc : str = ""
redraw: bool = True
selected_cat: str = ""
selected_int: int = 0
option_items: Dict[str, List[str]] = {}
cat_list: List[str] = []
cat_int: int = 0
change_cat: bool = False
key: str = ""
skip: bool = False
main_active: bool = cls.active
cls.active = True
cls.resized = True
d_quote: str
inputting: bool = False
input_val: str = ""
Theme.refresh()
if not cls.background:
cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
categories: Dict[str, Dict[str, List[str]]] = {
"system" : {
"color_theme" : [
'Set color theme.',
'',
'Choose from all theme files in',
'"/usr/[local/]share/bpytop/themes" and',
'"~/.config/bpytop/themes".',
'',
'"Default" for builtin default theme.',
'User themes are prefixed by a plus sign "+".',
'',
'For theme updates see:',
'https://github.com/aristocratos/bpytop'],
"theme_background" : [
'If the theme set background should be shown.',
'',
'Set to False if you want terminal background',
'transparency.'],
"truecolor" : [
'Sets if 24-bit truecolor should be used.',
'(Requires restart to take effect!)',
'',
'Will convert 24-bit colors to 256 color',
'(6x6x6 color cube) if False.',
'',
'Set to False if your terminal doesn\'t have',
'truecolor support and can\'t convert to',
'256-color.'],
"shown_boxes" : [
'Manually set which boxes to show.',
'',
'Available values are "cpu mem net proc".',
'Seperate values with whitespace.',
'',
'Toggle between presets with mode key "m".'],
"update_ms" : [
'Update time in milliseconds.',
'',
'Recommended 2000 ms or above for better sample',
'times for graphs.',
'',
'Min value: 100 ms',
'Max value: 86400000 ms = 24 hours.'],
"draw_clock" : [
'Draw a clock at top of screen.',
'(Only visible if cpu box is enabled!)',
'',
'Formatting according to strftime, empty',
'string to disable.',
'',
'Custom formatting options:',
'"/host" = hostname',
'"/user" = username',
'"/uptime" = system uptime',
'',
'Examples of strftime formats:',
'"%X" = locale HH:MM:SS',
'"%H" = 24h hour, "%I" = 12h hour',
'"%M" = minute, "%S" = second',
'"%d" = day, "%m" = month, "%y" = year'],
"background_update" : [
'Update main ui when menus are showing.',
'',
'True or False.',
'',
'Set this to false if the menus is flickering',
'too much for a comfortable experience.'],
"show_battery" : [
'Show battery stats.',
'(Only visible if cpu box is enabled!)',
'',
'Show battery stats in the top right corner',
'if a battery is present.'],
"show_init" : [
'Show init screen at startup.',
'',
'The init screen is purely cosmetical and',
'slows down start to show status messages.'],
"update_check" : [
'Check for updates at start.',
'',
'Checks for latest version from:',
'https://github.com/aristocratos/bpytop'],
"log_level" : [
'Set loglevel for error.log',
'',
'Levels are: "ERROR" "WARNING" "INFO" "DEBUG".',
'The level set includes all lower levels,',
'i.e. "DEBUG" will show all logging info.']
},
"cpu" : {
"cpu_graph_upper" : [
'Sets the CPU stat shown in upper half of',
'the CPU graph.',
'',
'"total" = Total cpu usage.',
'"user" = User mode cpu usage.',
'"system" = Kernel mode cpu usage.',
'See:',
'https://psutil.readthedocs.io/en/latest/',
'#psutil.cpu_times',
'for attributes available on specific platforms.'],
"cpu_graph_lower" : [
'Sets the CPU stat shown in lower half of',
'the CPU graph.',
'',
'"total" = Total cpu usage.',
'"user" = User mode cpu usage.',
'"system" = Kernel mode cpu usage.',
'See:',
'https://psutil.readthedocs.io/en/latest/',
'#psutil.cpu_times',
'for attributes available on specific platforms.'],
"cpu_invert_lower" : [
'Toggles orientation of the lower CPU graph.',
'',
'True or False.'],
"cpu_single_graph" : [
'Completely disable the lower CPU graph.',
'',
'Shows only upper CPU graph and resizes it',
'to fit to box height.',
'',
'True or False.'],
"check_temp" : [
'Enable cpu temperature reporting.',
'',
'True or False.'],
"cpu_sensor" : [
'Cpu temperature sensor',
'',
'Select the sensor that corresponds to',
'your cpu temperature.',
'Set to "Auto" for auto detection.'],
"show_coretemp" : [
'Show temperatures for cpu cores.',
'',
'Only works if check_temp is True and',
'the system is reporting core temps.'],
"temp_scale" : [
'Which temperature scale to use.',
'',
'Celsius, default scale.',
'',
'Fahrenheit, the american one.',
'',
'Kelvin, 0 = absolute zero, 1 degree change',
'equals 1 degree change in Celsius.',
'',
'Rankine, 0 = abosulte zero, 1 degree change',
'equals 1 degree change in Fahrenheit.'],
"show_cpu_freq" : [
'Show CPU frequency',
'',
'Can cause slowdowns on systems with many',
'cores and psutil versions below 5.8.1'],
"custom_cpu_name" : [
'Custom cpu model name in cpu percentage box.',
'',
'Empty string to disable.'],
"show_uptime" : [
'Shows the system uptime in the CPU box.',
'',
'Can also be shown in the clock by using',
'"/uptime" in the formatting.',
'',
'True or False.'],
},
"mem" : {
"mem_graphs" : [
'Show graphs for memory values.',
'',
'True or False.'],
"show_disks" : [
'Split memory box to also show disks.',
'',
'True or False.'],
"show_io_stat" : [
'Toggle small IO stat graphs.',
'',
'Toggles the small IO graphs for the regular',
'disk usage view.',
'',
'True or False.'],
"io_mode" : [
'Toggles io mode for disks.',
'',
'Shows big graphs for disk read/write speeds',
'instead of used/free percentage meters.',
'',
'True or False.'],
"io_graph_combined" : [
'Toggle combined read and write graphs.',
'',
'Only has effect if "io mode" is True.',
'',
'True or False.'],
"io_graph_speeds" : [
'Set top speeds for the io graphs.',
'',
'Manually set which speed in MiB/s that equals',
'100 percent in the io graphs.',
'(10 MiB/s by default).',
'',
'Format: "device:speed" seperate disks with a',
'comma ",".',
'',
'Example: "/dev/sda:100, /dev/sdb:20".'],
"show_swap" : [
'If swap memory should be shown in memory box.',
'',
'True or False.'],
"swap_disk" : [
'Show swap as a disk.',
'',
'Ignores show_swap value above.',
'Inserts itself after first disk.'],
"only_physical" : [
'Filter out non physical disks.',
'',
'Set this to False to include network disks,',
'RAM disks and similar.',
'',
'True or False.'],
"use_fstab" : [
'Read disks list from /etc/fstab.',
'(Has no effect on macOS X)',
'',
'This also disables only_physical.',
'',
'True or False.'],
"disks_filter" : [
'Optional filter for shown disks.',
'',
'Should be full path of a mountpoint,',
'"root" replaces "/", separate multiple values',
'with a comma ",".',
'Begin line with "exclude=" to change to exclude',
'filter.',
'Oterwise defaults to "most include" filter.',
'',
'Example: disks_filter="exclude=/boot, /home/user"'],
},
"net" : {
"net_download" : [
'Fixed network graph download value.',
'',
'Default "10M" = 10 MibiBytes.',
'Possible units:',
'"K" (KiB), "M" (MiB), "G" (GiB).',
'',
'Append "bit" for bits instead of bytes,',
'i.e "100Mbit"',
'',
'Can be toggled with auto button.'],
"net_upload" : [
'Fixed network graph upload value.',
'',
'Default "10M" = 10 MibiBytes.',
'Possible units:',
'"K" (KiB), "M" (MiB), "G" (GiB).',
'',
'Append "bit" for bits instead of bytes,',
'i.e "100Mbit"',
'',
'Can be toggled with auto button.'],
"net_auto" : [
'Start in network graphs auto rescaling mode.',
'',
'Ignores any values set above at start and',
'rescales down to 10KibiBytes at the lowest.',
'',
'True or False.'],
"net_sync" : [
'Network scale sync.',
'',
'Syncs the scaling for download and upload to',
'whichever currently has the highest scale.',
'',
'True or False.'],
"net_color_fixed" : [
'Set network graphs color gradient to fixed.',
'',
'If True the network graphs color is based',
'on the total bandwidth usage instead of',
'the current autoscaling.',
'',
'The bandwidth usage is based on the',
'"net_download" and "net_upload" values set',
'above.'],
"net_iface" : [
'Network Interface.',
'',
'Manually set the starting Network Interface.',
'Will otherwise automatically choose the NIC',
'with the highest total download since boot.'],
},
"proc" : {
"proc_update_mult" : [
'Processes update multiplier.',
'Sets how often the process list is updated as',
'a multiplier of "update_ms".',
'',
'Set to 2 or higher to greatly decrease bpytop',
'cpu usage. (Only integers)'],
"proc_sorting" : [
'Processes sorting option.',
'',
'Possible values: "pid", "program", "arguments",',
'"threads", "user", "memory", "cpu lazy" and',
'"cpu responsive".',
'',
'"cpu lazy" updates top process over time,',
'"cpu responsive" updates top process directly.'],
"proc_reversed" : [
'Reverse processes sorting order.',
'',
'True or False.'],
"proc_tree" : [
'Processes tree view.',
'',
'Set true to show processes grouped by parents,',
'with lines drawn between parent and child',
'process.'],
"tree_depth" : [
'Process tree auto collapse depth.',
'',
'Sets the depth where the tree view will auto',
'collapse processes at.'],
"proc_colors" : [
'Enable colors in process view.',
'',
'Uses the cpu graph gradient colors.'],
"proc_gradient" : [
'Enable process view gradient fade.',
'',
'Fades from top or current selection.',
'Max fade value is equal to current themes',
'"inactive_fg" color value.'],
"proc_per_core" : [
'Process usage per core.',
'',
'If process cpu usage should be of the core',
'it\'s running on or usage of the total',
'available cpu power.',
'',
'If true and process is multithreaded',
'cpu usage can reach over 100%.'],
"proc_mem_bytes" : [
'Show memory as bytes in process list.',
' ',
'True or False.'],
}
}
loglevel_i: int = CONFIG.log_levels.index(CONFIG.log_level)
cpu_sensor_i: int = CONFIG.cpu_sensors.index(CONFIG.cpu_sensor)
cpu_graph_i: Dict[str, int] = { "cpu_graph_upper" : CONFIG.cpu_percent_fields.index(CONFIG.cpu_graph_upper),
"cpu_graph_lower" : CONFIG.cpu_percent_fields.index(CONFIG.cpu_graph_lower)}
temp_scale_i: int = CONFIG.temp_scales.index(CONFIG.temp_scale)
color_i: int
max_opt_len: int = max([len(categories[x]) for x in categories]) * 2
cat_list = list(categories)
while not cls.close:
key = ""
if cls.resized or change_cat:
cls.resized = change_cat = False
selected_cat = list(categories)[cat_int]
option_items = categories[cat_list[cat_int]]
option_len: int = len(option_items) * 2
y = 12 if Term.height < max_opt_len + 13 else Term.height // 2 - max_opt_len // 2 + 7
out_misc = (f'{Banner.draw(y-10, center=True)}{Mv.d(1)}{Mv.l(46)}{Colors.black_bg}{Colors.default}{Fx.b}← esc'
f'{Mv.r(30)}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}')
x = Term.width//2-38
x2 = x + 27
h, w, w2 = min(Term.height-1-y, option_len), 26, 50
h -= h % 2
color_i = list(Theme.themes).index(THEME.current)
out_misc += create_box(x, y - 3, w+w2+1, 3, f'tab{Symbol.right}', line_color=THEME.div_line)
out_misc += create_box(x, y, w, h+2, "options", line_color=THEME.div_line)
redraw = True
cat_width = floor((w+w2) / len(categories))
out_misc += f'{Fx.b}'
for cx, cat in enumerate(categories):
out_misc += f'{Mv.to(y-2, x + 1 + (cat_width * cx) + round(cat_width / 2 - len(cat) / 2 ))}'
if cat == selected_cat:
out_misc += f'{THEME.hi_fg}[{THEME.title}{Fx.u}{cat}{Fx.uu}{THEME.hi_fg}]'
else:
out_misc += f'{THEME.hi_fg}{SUPERSCRIPT[cx+1]}{THEME.title}{cat}'
out_misc += f'{Fx.ub}'
if option_len > h:
pages = ceil(option_len / h)
else:
h = option_len
pages = 0
page = pages if selected_int == -1 and pages > 0 else 1
selected_int = 0 if selected_int >= 0 else len(option_items) - 1
if redraw:
out = ""
cy = 0
selected = list(option_items)[selected_int]
if pages:
out += (f'{Mv.to(y+h+1, x+11)}{THEME.div_line(Symbol.title_left)}{Fx.b}{THEME.title("pg")}{Fx.ub}{THEME.main_fg(Symbol.up)} {Fx.b}{THEME.title}{page}/{pages} '
f'pg{Fx.ub}{THEME.main_fg(Symbol.down)}{THEME.div_line(Symbol.title_right)}')
#out += f'{Mv.to(y+1, x+1)}{THEME.title}{Fx.b}{"Keys:":^20}Description:{THEME.main_fg}'
for n, opt in enumerate(option_items):
if pages and n < (page - 1) * ceil(h / 2): continue
value = getattr(CONFIG, opt)
t_color = f'{THEME.selected_bg}{THEME.selected_fg}' if opt == selected else f'{THEME.title}'
v_color = "" if opt == selected else f'{THEME.title}'
d_quote = '"' if isinstance(value, str) else ""
if opt == "color_theme":
counter = f' {color_i + 1}/{len(Theme.themes)}'
elif opt == "proc_sorting":
counter = f' {CONFIG.sorting_options.index(CONFIG.proc_sorting) + 1}/{len(CONFIG.sorting_options)}'
elif opt == "log_level":
counter = f' {loglevel_i + 1}/{len(CONFIG.log_levels)}'
elif opt == "cpu_sensor":
counter = f' {cpu_sensor_i + 1}/{len(CONFIG.cpu_sensors)}'
elif opt in ["cpu_graph_upper", "cpu_graph_lower"]:
counter = f' {cpu_graph_i[opt] + 1}/{len(CONFIG.cpu_percent_fields)}'
elif opt == "temp_scale":
counter = f' {temp_scale_i + 1}/{len(CONFIG.temp_scales)}'
else:
counter = ""
out += f'{Mv.to(y+1+cy, x+1)}{t_color}{Fx.b}{opt.replace("_", " ").capitalize() + counter:^24.24}{Fx.ub}{Mv.to(y+2+cy, x+1)}{v_color}'
if opt == selected:
if isinstance(value, bool) or opt in ["color_theme", "proc_sorting", "log_level", "cpu_sensor", "cpu_graph_upper", "cpu_graph_lower", "temp_scale"]:
out += f'{t_color} {Symbol.left}{v_color}{d_quote + str(value) + d_quote:^20.20}{t_color}{Symbol.right} '
elif inputting:
out += f'{str(input_val)[-17:] + Fx.bl + "█" + Fx.ubl + "" + Symbol.enter:^33.33}'
else:
out += ((f'{t_color} {Symbol.left}{v_color}' if type(value) is int else " ") +
f'{str(value) + " " + Symbol.enter:^20.20}' + (f'{t_color}{Symbol.right} ' if type(value) is int else " "))
else:
out += f'{d_quote + str(value) + d_quote:^24.24}'
out += f'{Term.bg}'
if opt == selected:
h2 = len(option_items[opt]) + 2
y2 = y + (selected_int * 2) - ((page-1) * h)
if y2 + h2 > Term.height: y2 = Term.height - h2
out += f'{create_box(x2, y2, w2, h2, "description", line_color=THEME.div_line)}{THEME.main_fg}'
for n, desc in enumerate(option_items[opt]):
out += f'{Mv.to(y2+1+n, x2+2)}{desc:.48}'
cy += 2
if cy >= h: break
if cy < h:
for i in range(h-cy):
out += f'{Mv.to(y+1+cy+i, x+1)}{" " * (w-2)}'
if not skip or redraw:
Draw.now(f'{cls.background}{out_misc}{out}')
skip = redraw = False
if Key.input_wait(Timer.left()):
key = Key.get()
redraw = True
has_sel = False
if key == "mouse_click" and not inputting:
mx, my = Key.get_mouse()
if x < mx < x + w + w2 and y - 4 < my < y:
# if my == y - 2:
for cx, cat in enumerate(categories):
ccx = x + (cat_width * cx) + round(cat_width / 2 - len(cat) / 2 )
if ccx - 2 < mx < ccx + 2 + len(cat):
key = str(cx+1)
break
elif x < mx < x + w and y < my < y + h + 2:
mouse_sel = ceil((my - y) / 2) - 1 + ceil((page-1) * (h / 2))
if pages and my == y+h+1 and x+11 < mx < x+16:
key = "page_up"
elif pages and my == y+h+1 and x+19 < mx < x+24:
key = "page_down"
elif my == y+h+1:
pass
elif mouse_sel == selected_int:
if mx < x + 6:
key = "left"
elif mx > x + 19:
key = "right"
else:
key = "enter"
elif mouse_sel < len(option_items):
selected_int = mouse_sel
has_sel = True
else:
key = "escape"
if inputting:
if key in ["escape", "mouse_click"]:
inputting = False
elif key == "enter":
inputting = False
if str(getattr(CONFIG, selected)) != input_val:
if selected == "update_ms":
if not input_val or int(input_val) < 100:
CONFIG.update_ms = 100
elif int(input_val) > 86399900:
CONFIG.update_ms = 86399900
else:
CONFIG.update_ms = int(input_val)
elif selected == "proc_update_mult":
if not input_val or int(input_val) < 1:
CONFIG.proc_update_mult = 1
else:
CONFIG.proc_update_mult = int(input_val)
Collector.proc_counter = 1
elif selected == "tree_depth":
if not input_val or int(input_val) < 0:
CONFIG.tree_depth = 0
else:
CONFIG.tree_depth = int(input_val)
ProcCollector.collapsed = {}
elif selected == "shown_boxes":
new_boxes: List = []
for box in input_val.split():
if box in ["cpu", "mem", "net", "proc"]:
new_boxes.append(box)
CONFIG.shown_boxes = " ".join(new_boxes)
Box.view_mode = "user"
Box.view_modes["user"] = CONFIG.shown_boxes.split()
Draw.clear(saved=True)
elif isinstance(getattr(CONFIG, selected), str):
setattr(CONFIG, selected, input_val)
if selected.startswith("net_"):
NetCollector.net_min = {"download" : -1, "upload" : -1}
elif selected == "draw_clock":
Box.clock_on = len(CONFIG.draw_clock) > 0
if not Box.clock_on: Draw.clear("clock", saved=True)
elif selected == "io_graph_speeds":
MemBox.graph_speeds = {}
Term.refresh(force=True)
cls.resized = False
elif key == "backspace" and len(input_val):
input_val = input_val[:-1]
elif key == "delete":
input_val = ""
elif isinstance(getattr(CONFIG, selected), str) and len(key) == 1:
input_val += key
elif isinstance(getattr(CONFIG, selected), int) and key.isdigit():
input_val += key
elif key == "q":
clean_quit()
elif key in ["escape", "o", "M", "f2"]:
cls.close = True
break
elif key == "tab" or (key == "down" and selected_int == len(option_items) - 1 and (page == pages or pages == 0)):
if cat_int == len(categories) - 1:
cat_int = 0
else:
cat_int += 1
change_cat = True
elif key == "shift_tab" or (key == "up" and selected_int == 0 and page == 1):
if cat_int == 0:
cat_int = len(categories) - 1
else:
cat_int -= 1
change_cat = True
selected_int = -1 if key != "shift_tab" else 0
elif key in list(map(str, range(1, len(cat_list)+1))) and key != str(cat_int + 1):
cat_int = int(key) - 1
change_cat = True
elif key == "enter" and selected in ["update_ms", "disks_filter", "custom_cpu_name", "net_download",
"net_upload", "draw_clock", "tree_depth", "proc_update_mult", "shown_boxes", "net_iface", "io_graph_speeds"]:
inputting = True
input_val = str(getattr(CONFIG, selected))
elif key == "left" and selected == "update_ms" and CONFIG.update_ms - 100 >= 100:
CONFIG.update_ms -= 100
Box.draw_update_ms()
elif key == "right" and selected == "update_ms" and CONFIG.update_ms + 100 <= 86399900:
CONFIG.update_ms += 100
Box.draw_update_ms()
elif key == "left" and selected == "proc_update_mult" and CONFIG.proc_update_mult > 1:
CONFIG.proc_update_mult -= 1
Collector.proc_counter = 1
elif key == "right" and selected == "proc_update_mult":
CONFIG.proc_update_mult += 1
Collector.proc_counter = 1
elif key == "left" and selected == "tree_depth" and CONFIG.tree_depth > 0:
CONFIG.tree_depth -= 1
ProcCollector.collapsed = {}
elif key == "right" and selected == "tree_depth":
CONFIG.tree_depth += 1
ProcCollector.collapsed = {}
elif key in ["left", "right"] and isinstance(getattr(CONFIG, selected), bool):
setattr(CONFIG, selected, not getattr(CONFIG, selected))
if selected == "check_temp":
if CONFIG.check_temp:
CpuCollector.get_sensors()
else:
CpuCollector.sensor_method = ""
CpuCollector.got_sensors = False
if selected in ["net_auto", "net_color_fixed", "net_sync"]:
if selected == "net_auto": NetCollector.auto_min = CONFIG.net_auto
NetBox.redraw = True
if selected == "theme_background":
Term.bg = f'{THEME.main_bg}' if CONFIG.theme_background else "\033[49m"
Draw.now(Term.bg)
if selected == "show_battery":
Draw.clear("battery", saved=True)
Term.refresh(force=True)
cls.resized = False
elif key in ["left", "right"] and selected == "color_theme" and len(Theme.themes) > 1:
if key == "left":
color_i -= 1
if color_i < 0: color_i = len(Theme.themes) - 1
elif key == "right":
color_i += 1
if color_i > len(Theme.themes) - 1: color_i = 0
Collector.collect_idle.wait()
CONFIG.color_theme = list(Theme.themes)[color_i]
THEME(CONFIG.color_theme)
Term.refresh(force=True)
Timer.finish()
elif key in ["left", "right"] and selected == "proc_sorting":
ProcCollector.sorting(key)
elif key in ["left", "right"] and selected == "log_level":
if key == "left":
loglevel_i -= 1
if loglevel_i < 0: loglevel_i = len(CONFIG.log_levels) - 1
elif key == "right":
loglevel_i += 1
if loglevel_i > len(CONFIG.log_levels) - 1: loglevel_i = 0
CONFIG.log_level = CONFIG.log_levels[loglevel_i]
errlog.setLevel(getattr(logging, CONFIG.log_level))
errlog.info(f'Loglevel set to {CONFIG.log_level}')
elif key in ["left", "right"] and selected in ["cpu_graph_upper", "cpu_graph_lower"]:
if key == "left":
cpu_graph_i[selected] -= 1
if cpu_graph_i[selected] < 0: cpu_graph_i[selected] = len(CONFIG.cpu_percent_fields) - 1
if key == "right":
cpu_graph_i[selected] += 1
if cpu_graph_i[selected] > len(CONFIG.cpu_percent_fields) - 1: cpu_graph_i[selected] = 0
setattr(CONFIG, selected, CONFIG.cpu_percent_fields[cpu_graph_i[selected]])
setattr(CpuCollector, selected.replace("_graph", ""), [])
Term.refresh(force=True)
cls.resized = False
elif key in ["left", "right"] and selected == "temp_scale":
if key == "left":
temp_scale_i -= 1
if temp_scale_i < 0: temp_scale_i = len(CONFIG.temp_scales) - 1
if key == "right":
temp_scale_i += 1
if temp_scale_i > len(CONFIG.temp_scales) - 1: temp_scale_i = 0
CONFIG.temp_scale = CONFIG.temp_scales[temp_scale_i]
Term.refresh(force=True)
cls.resized = False
elif key in ["left", "right"] and selected == "cpu_sensor" and len(CONFIG.cpu_sensors) > 1:
if key == "left":
cpu_sensor_i -= 1
if cpu_sensor_i < 0: cpu_sensor_i = len(CONFIG.cpu_sensors) - 1
elif key == "right":
cpu_sensor_i += 1
if cpu_sensor_i > len(CONFIG.cpu_sensors) - 1: cpu_sensor_i = 0
Collector.collect_idle.wait()
CpuCollector.sensor_swap = True
CONFIG.cpu_sensor = CONFIG.cpu_sensors[cpu_sensor_i]
if CONFIG.check_temp and (CpuCollector.sensor_method != "psutil" or CONFIG.cpu_sensor == "Auto"):
CpuCollector.get_sensors()
Term.refresh(force=True)
cls.resized = False
elif key in ["up", "mouse_scroll_up"]:
selected_int -= 1
if selected_int < 0: selected_int = len(option_items) - 1
page = floor(selected_int * 2 / h) + 1
elif key in ["down", "mouse_scroll_down"]:
selected_int += 1
if selected_int > len(option_items) - 1: selected_int = 0
page = floor(selected_int * 2 / h) + 1
elif key == "page_up":
if not pages or page == 1:
selected_int = 0
else:
page -= 1
if page < 1: page = pages
selected_int = (page-1) * ceil(h / 2)
elif key == "page_down":
if not pages or page == pages:
selected_int = len(option_items) - 1
else:
page += 1
if page > pages: page = 1
selected_int = (page-1) * ceil(h / 2)
elif has_sel:
pass
else:
redraw = False
if Timer.not_zero() and not cls.resized:
skip = True
else:
Collector.collect()
Collector.collect_done.wait(2)
if CONFIG.background_update: cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
Timer.stamp()
if main_active:
cls.close = False
return
Draw.now(f'{Draw.saved_buffer()}')
cls.background = ""
cls.active = False
cls.close = False
class Timer:
timestamp: float
return_zero = False
@classmethod
def stamp(cls):
cls.timestamp = time()
@classmethod
def not_zero(cls) -> bool:
if cls.return_zero:
cls.return_zero = False
return False
return cls.timestamp + (CONFIG.update_ms / 1000) > time()
@classmethod
def left(cls) -> float:
t_left: float = cls.timestamp + (CONFIG.update_ms / 1000) - time()
if t_left > CONFIG.update_ms / 1000:
cls.stamp()
return CONFIG.update_ms / 1000
return t_left
@classmethod
def finish(cls):
cls.return_zero = True
cls.timestamp = time() - (CONFIG.update_ms / 1000)
Key.break_wait()
class UpdateChecker:
version: str = VERSION
thread: threading.Thread
@classmethod
def run(cls):
cls.thread = threading.Thread(target=cls._checker)
cls.thread.start()
@classmethod
def _checker(cls):
try:
with urllib.request.urlopen("https://github.com/aristocratos/bpytop/raw/master/bpytop.py", timeout=5) as source: # type: ignore
for line in source:
line = line.decode("utf-8")
if line.startswith("VERSION: str ="):
cls.version = line[(line.index("=")+1):].strip('" \n')
break
except Exception as e:
errlog.exception(f'{e}')
else:
if cls.version != VERSION and which("notify-send"):
try:
subprocess.run(["notify-send", "-u", "normal", "BpyTop Update!",
f'New version of BpyTop available!\nCurrent version: {VERSION}\nNew version: {cls.version}\nDownload at github.com/aristocratos/bpytop',
"-i", "update-notifier", "-t", "10000"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
except Exception as e:
errlog.exception(f'{e}')
class Init:
running: bool = True
initbg_colors: List[str] = []
initbg_data: List[int]
initbg_up: Graph
initbg_down: Graph
resized = False
@classmethod
def start(cls):
Draw.buffer("init", z=1)
Draw.buffer("initbg", z=10)
for i in range(51):
for _ in range(2): cls.initbg_colors.append(Color.fg(i, i, i))
Draw.buffer("banner", (f'{Banner.draw(Term.height // 2 - 10, center=True)}{Mv.d(1)}{Mv.l(11)}{Colors.black_bg}{Colors.default}'
f'{Fx.b}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}{Color.fg("#50")}'), z=2)
for _i in range(7):
perc = f'{str(round((_i + 1) * 14 + 2)) + "%":>5}'
Draw.buffer("+banner", f'{Mv.to(Term.height // 2 - 2 + _i, Term.width // 2 - 28)}{Fx.trans(perc)}{Symbol.v_line}')
Draw.out("banner")
Draw.buffer("+init!", f'{Color.fg("#cc")}{Fx.b}{Mv.to(Term.height // 2 - 2, Term.width // 2 - 21)}{Mv.save}')
cls.initbg_data = [randint(0, 100) for _ in range(Term.width * 2)]
cls.initbg_up = Graph(Term.width, Term.height // 2, cls.initbg_colors, cls.initbg_data, invert=True)
cls.initbg_down = Graph(Term.width, Term.height // 2, cls.initbg_colors, cls.initbg_data, invert=False)
@classmethod
def success(cls):
if not CONFIG.show_init or cls.resized: return
cls.draw_bg(5)
Draw.buffer("+init!", f'{Mv.restore}{Symbol.ok}\n{Mv.r(Term.width // 2 - 22)}{Mv.save}')
@staticmethod
def fail(err):
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Symbol.fail}')
sleep(2)
errlog.exception(f'{err}')
clean_quit(1, errmsg=f'Error during init! See {CONFIG_DIR}/error.log for more information.')
@classmethod
def draw_bg(cls, times: int = 5):
for _ in range(times):
sleep(0.05)
x = randint(0, 100)
Draw.buffer("initbg", f'{Fx.ub}{Mv.to(0, 0)}{cls.initbg_up(x)}{Mv.to(Term.height // 2, 0)}{cls.initbg_down(x)}')
Draw.out("initbg", "banner", "init")
@classmethod
def done(cls):
cls.running = False
if not CONFIG.show_init: return
if cls.resized:
Draw.now(Term.clear)
else:
cls.draw_bg(10)
Draw.clear("initbg", "banner", "init", saved=True)
if cls.resized: return
del cls.initbg_up, cls.initbg_down, cls.initbg_data, cls.initbg_colors
#? Functions ------------------------------------------------------------------------------------->
def get_cpu_name() -> str:
'''Fetch a suitable CPU identifier from the CPU model name string'''
name: str = ""
nlist: List = []
command: str = ""
cmd_out: str = ""
rem_line: str = ""
if SYSTEM == "Linux":
command = "cat /proc/cpuinfo"
rem_line = "model name"
elif SYSTEM == "MacOS":
command ="sysctl -n machdep.cpu.brand_string"
elif SYSTEM == "BSD":
command ="sysctl hw.model"
rem_line = "hw.model"
try:
cmd_out = subprocess.check_output("LANG=C " + command, shell=True, universal_newlines=True)
except:
pass
if rem_line:
for line in cmd_out.split("\n"):
if rem_line in line:
name = re.sub( ".*" + rem_line + ".*:", "", line,1).lstrip()
else:
name = cmd_out
nlist = name.split(" ")
try:
if "Xeon" in name and "CPU" in name:
name = nlist[nlist.index("CPU")+(-1 if name.endswith(("CPU", "z")) else 1)]
elif "Ryzen" in name:
name = " ".join(nlist[nlist.index("Ryzen"):nlist.index("Ryzen")+3])
elif "Duo" in name and "@" in name:
name = " ".join(nlist[:nlist.index("@")])
elif "CPU" in name and not nlist[0] == "CPU" and not nlist[nlist.index("CPU")-1].isdigit():
name = nlist[nlist.index("CPU")-1]
except:
pass
name = name.replace("Processor", "").replace("CPU", "").replace("(R)", "").replace("(TM)", "").replace("Intel", "")
name = re.sub(r"\d?\.?\d+[mMgG][hH][zZ]", "", name)
name = " ".join(name.split())
return name
def get_cpu_core_mapping() -> List[int]:
mapping: List[int] = []
core_ids: List[int] = []
if SYSTEM == "Linux" and os.path.isfile("/proc/cpuinfo"):
try:
mapping = [0] * THREADS
num = 0
with open("/proc/cpuinfo", "r") as f:
for line in f:
if line.startswith("processor"):
num = int(line.strip()[(line.index(": ")+2):])
if num > THREADS - 1:
break
elif line.startswith("core id"):
core_id = int(line.strip()[(line.index(": ")+2):])
if core_id not in core_ids:
core_ids.append(core_id)
mapping[num] = core_ids.index(core_id)
if num < THREADS - 1:
raise Exception
except:
mapping = []
if not mapping:
mapping = []
for _ in range(THREADS // CORES):
mapping.extend([x for x in range(CORES)])
return mapping
def create_box(x: int = 0, y: int = 0, width: int = 0, height: int = 0, title: str = "", title2: str = "", line_color: Color = None, title_color: Color = None, fill: bool = True, box = None) -> str:
'''Create a box from a box object or by given arguments'''
out: str = f'{Term.fg}{Term.bg}'
num: int = 0
if not line_color: line_color = THEME.div_line
if not title_color: title_color = THEME.title
#* Get values from box class if given
if box:
x = box.x
y = box.y
width = box.width
height = box.height
title = box.name
num = box.num
hlines: Tuple[int, int] = (y, y + height - 1)
out += f'{line_color}'
#* Draw all horizontal lines
for hpos in hlines:
out += f'{Mv.to(hpos, x)}{Symbol.h_line * (width - 1)}'
#* Draw all vertical lines and fill if enabled
for hpos in range(hlines[0]+1, hlines[1]):
out += f'{Mv.to(hpos, x)}{Symbol.v_line}{" " * (width-2) if fill else Mv.r(width-2)}{Symbol.v_line}'
#* Draw corners
out += f'{Mv.to(y, x)}{Symbol.left_up}\
{Mv.to(y, x + width - 1)}{Symbol.right_up}\
{Mv.to(y + height - 1, x)}{Symbol.left_down}\
{Mv.to(y + height - 1, x + width - 1)}{Symbol.right_down}'
#* Draw titles if enabled
if title:
numbered: str = "" if not num else f'{THEME.hi_fg(SUPERSCRIPT[num])}'
out += f'{Mv.to(y, x + 2)}{Symbol.title_left}{Fx.b}{numbered}{title_color}{title}{Fx.ub}{line_color}{Symbol.title_right}'
if title2:
out += f'{Mv.to(hlines[1], x + 2)}{Symbol.title_left}{title_color}{Fx.b}{title2}{Fx.ub}{line_color}{Symbol.title_right}'
return f'{out}{Term.fg}{Mv.to(y + 1, x + 1)}'
def now_sleeping(signum, frame):
"""Reset terminal settings and stop background input read before putting to sleep"""
Key.stop()
Collector.stop()
Draw.now(Term.clear, Term.normal_screen, Term.show_cursor, Term.mouse_off, Term.mouse_direct_off, Term.title())
Term.echo(True)
os.kill(os.getpid(), signal.SIGSTOP)
def now_awake(signum, frame):
"""Set terminal settings and restart background input read"""
Draw.now(Term.alt_screen, Term.clear, Term.hide_cursor, Term.mouse_on, Term.title("BpyTOP"))
Term.echo(False)
Key.start()
Term.refresh()
Box.calc_sizes()
Box.draw_bg()
Collector.start()
def quit_sigint(signum, frame):
"""SIGINT redirection to clean_quit()"""
clean_quit()
def clean_quit(errcode: int = 0, errmsg: str = "", thread: bool = False):
"""Stop background input read, save current config and reset terminal settings before quitting"""
global THREAD_ERROR
if thread:
THREAD_ERROR = errcode
interrupt_main()
return
if THREAD_ERROR: errcode = THREAD_ERROR
Key.stop()
Collector.stop()
if not errcode: CONFIG.save_config()
Draw.now(Term.clear, Term.normal_screen, Term.show_cursor, Term.mouse_off, Term.mouse_direct_off, Term.title())
Term.echo(True)
if errcode == 0:
errlog.info(f'Exiting. Runtime {timedelta(seconds=round(time() - SELF_START, 0))} \n')
else:
errlog.warning(f'Exiting with errorcode ({errcode}). Runtime {timedelta(seconds=round(time() - SELF_START, 0))} \n')
if not errmsg: errmsg = f'Bpytop exited with errorcode ({errcode}). See {CONFIG_DIR}/error.log for more information!'
if errmsg: print(errmsg)
raise SystemExit(errcode)
def floating_humanizer(value: Union[float, int], bit: bool = False, per_second: bool = False, start: int = 0, short: bool = False) -> str:
'''Scales up in steps of 1024 to highest possible unit and returns string with unit suffixed
* bit=True or defaults to bytes
* start=int to set 1024 multiplier starting unit
* short=True always returns 0 decimals and shortens unit to 1 character
'''
out: str = ""
mult: int = 8 if bit else 1
selector: int = start
unit: Tuple[str, ...] = UNITS["bit"] if bit else UNITS["byte"]
if isinstance(value, float): value = round(value * 100 * mult)
elif value > 0: value *= 100 * mult
else: value = 0
while len(f'{value}') > 5 and value >= 102400:
value >>= 10
if value < 100:
out = f'{value}'
break
selector += 1
else:
if len(f'{value}') == 4 and selector > 0:
out = f'{value}'[:-2] + "." + f'{value}'[-2]
elif len(f'{value}') == 3 and selector > 0:
out = f'{value}'[:-2] + "." + f'{value}'[-2:]
elif len(f'{value}') >= 2:
out = f'{value}'[:-2]
else:
out = f'{value}'
if short:
if "." in out:
out = f'{round(float(out))}'
if len(out) > 3:
out = f'{int(out[0]) + 1}'
selector += 1
out += f'{"" if short else " "}{unit[selector][0] if short else unit[selector]}'
if per_second: out += "ps" if bit else "/s"
return out
def units_to_bytes(value: str) -> int:
if not value: return 0
out: int = 0
mult: int = 0
bit: bool = False
value_i: int = 0
units: Dict[str, int] = {"k" : 1, "m" : 2, "g" : 3}
try:
if value.lower().endswith("s"):
value = value[:-1]
if value.lower().endswith("bit"):
bit = True
value = value[:-3]
elif value.lower().endswith("byte"):
value = value[:-4]
if value[-1].lower() in units:
mult = units[value[-1].lower()]
value = value[:-1]
if "." in value and value.replace(".", "").isdigit():
if mult > 0:
value_i = round(float(value) * 1024)
mult -= 1
else:
value_i = round(float(value))
elif value.isdigit():
value_i = int(value)
out = int(value_i) << (10 * mult)
if bit: out = round(out / 8)
except ValueError:
out = 0
return out
def min_max(value: int, min_value: int=0, max_value: int=100) -> int:
return max(min_value, min(value, max_value))
def readfile(file: str, default: str = "") -> str:
out: Union[str, None] = None
if os.path.isfile(file):
try:
with open(file, "r") as f:
out = f.read().strip()
except:
pass
return default if out is None else out
def temperature(value: int, scale: str = "celsius") -> Tuple[int, str]:
"""Returns a tuple with integer value and string unit converted from an integer in celsius to: celsius, fahrenheit, kelvin or rankine."""
if scale == "celsius":
return (value, "°C")
elif scale == "fahrenheit":
return (round(value * 1.8 + 32), "°F")
elif scale == "kelvin":
return (round(value + 273.15), "K ")
elif scale == "rankine":
return (round(value * 1.8 + 491.67), "°R")
else:
return (0, "")
def process_keys():
mouse_pos: Tuple[int, int] = (0, 0)
filtered: bool = False
box_keys = {"1" : "cpu", "2" : "mem", "3" : "net", "4" : "proc"}
while Key.has_key():
key = Key.get()
found: bool = True
if key in ["mouse_scroll_up", "mouse_scroll_down", "mouse_click"]:
mouse_pos = Key.get_mouse()
if mouse_pos[0] >= ProcBox.x and ProcBox.current_y + 1 <= mouse_pos[1] < ProcBox.current_y + ProcBox.current_h - 1:
pass
elif key == "mouse_click":
key = "mouse_unselect"
else:
key = "_null"
if ProcBox.filtering:
if key in ["enter", "mouse_click", "mouse_unselect"]:
ProcBox.filtering = False
Collector.collect(ProcCollector, redraw=True, only_draw=True)
continue
elif key in ["escape", "delete"]:
ProcCollector.search_filter = ""
ProcBox.filtering = False
elif len(key) == 1:
ProcCollector.search_filter += key
elif key == "backspace" and len(ProcCollector.search_filter) > 0:
ProcCollector.search_filter = ProcCollector.search_filter[:-1]
else:
continue
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True)
if filtered: Collector.collect_done.wait(0.1)
filtered = True
continue
if key == "_null":
continue
elif key == "q":
clean_quit()
elif key == "+" and CONFIG.update_ms + 100 <= 86399900:
CONFIG.update_ms += 100
Box.draw_update_ms()
elif key == "-" and CONFIG.update_ms - 100 >= 100:
CONFIG.update_ms -= 100
Box.draw_update_ms()
elif key in ["M", "escape"]:
Menu.main()
elif key in ["o", "f2"]:
Menu.options()
elif key in ["H", "f1"]:
Menu.help()
elif key == "m":
if list(Box.view_modes).index(Box.view_mode) + 1 > len(list(Box.view_modes)) - 1:
Box.view_mode = list(Box.view_modes)[0]
else:
Box.view_mode = list(Box.view_modes)[(list(Box.view_modes).index(Box.view_mode) + 1)]
CONFIG.shown_boxes = " ".join(Box.view_modes[Box.view_mode])
Draw.clear(saved=True)
Term.refresh(force=True)
elif key in box_keys:
boxes = CONFIG.shown_boxes.split()
if box_keys[key] in boxes:
boxes.remove(box_keys[key])
else:
boxes.append(box_keys[key])
CONFIG.shown_boxes = " ".join(boxes)
Box.view_mode = "user"
Box.view_modes["user"] = CONFIG.shown_boxes.split()
Draw.clear(saved=True)
Term.refresh(force=True)
else:
found = False
if found: continue
if "proc" in Box.boxes:
if key in ["left", "right", "h", "l"]:
ProcCollector.sorting(key)
elif key == " " and CONFIG.proc_tree and ProcBox.selected > 0:
if ProcBox.selected_pid in ProcCollector.collapsed:
ProcCollector.collapsed[ProcBox.selected_pid] = not ProcCollector.collapsed[ProcBox.selected_pid]
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key == "e":
CONFIG.proc_tree = not CONFIG.proc_tree
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key == "r":
CONFIG.proc_reversed = not CONFIG.proc_reversed
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key == "c":
CONFIG.proc_per_core = not CONFIG.proc_per_core
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key in ["/", "f"]:
ProcBox.filtering = True
ProcCollector.case_sensitive = key == "F"
if not ProcCollector.search_filter: ProcBox.start = 0
Collector.collect(ProcCollector, redraw=True, only_draw=True)
elif key in ["T", "K", "I"] and (ProcBox.selected > 0 or ProcCollector.detailed):
pid: int = ProcBox.selected_pid if ProcBox.selected > 0 else ProcCollector.detailed_pid # type: ignore
if psutil.pid_exists(pid):
if key == "T": sig = signal.SIGTERM
elif key == "K": sig = signal.SIGKILL
elif key == "I": sig = signal.SIGINT
try:
os.kill(pid, sig)
except Exception as e:
errlog.error(f'Exception when sending signal {sig} to pid {pid}')
errlog.exception(f'{e}')
elif key == "delete" and ProcCollector.search_filter:
ProcCollector.search_filter = ""
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True)
elif key == "enter":
if ProcBox.selected > 0 and ProcCollector.detailed_pid != ProcBox.selected_pid and psutil.pid_exists(ProcBox.selected_pid):
ProcCollector.detailed = True
ProcBox.last_selection = ProcBox.selected
ProcBox.selected = 0
ProcCollector.detailed_pid = ProcBox.selected_pid
ProcBox.resized = True
Collector.proc_counter = 1
elif ProcCollector.detailed:
ProcBox.selected = ProcBox.last_selection
ProcBox.last_selection = 0
ProcCollector.detailed = False
ProcCollector.detailed_pid = None
ProcBox.resized = True
Collector.proc_counter = 1
else:
continue
ProcCollector.details = {}
ProcCollector.details_cpu = []
ProcCollector.details_mem = []
Graphs.detailed_cpu = NotImplemented
Graphs.detailed_mem = NotImplemented
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True)
elif key in ["up", "down", "mouse_scroll_up", "mouse_scroll_down", "page_up", "page_down", "home", "end", "mouse_click", "mouse_unselect", "j", "k"]:
ProcBox.selector(key, mouse_pos)
if "net" in Box.boxes:
if key in ["b", "n"]:
NetCollector.switch(key)
elif key == "z":
NetCollector.reset = not NetCollector.reset
Collector.collect(NetCollector, redraw=True)
elif key == "y":
CONFIG.net_sync = not CONFIG.net_sync
Collector.collect(NetCollector, redraw=True)
elif key == "a":
NetCollector.auto_min = not NetCollector.auto_min
NetCollector.net_min = {"download" : -1, "upload" : -1}
Collector.collect(NetCollector, redraw=True)
if "mem" in Box.boxes:
if key == "g":
CONFIG.mem_graphs = not CONFIG.mem_graphs
Collector.collect(MemCollector, interrupt=True, redraw=True)
elif key == "s":
Collector.collect_idle.wait()
CONFIG.swap_disk = not CONFIG.swap_disk
Collector.collect(MemCollector, interrupt=True, redraw=True)
elif key == "d":
Collector.collect_idle.wait()
CONFIG.show_disks = not CONFIG.show_disks
Collector.collect(MemCollector, interrupt=True, redraw=True)
elif key == "i":
Collector.collect_idle.wait()
CONFIG.io_mode = not CONFIG.io_mode
Collector.collect(MemCollector, interrupt=True, redraw=True)
#? Pre main -------------------------------------------------------------------------------------->
CPU_NAME: str = get_cpu_name()
CORE_MAP: List[int] = get_cpu_core_mapping()
THEME: Theme
def main():
global THEME
Term.width = os.get_terminal_size().columns
Term.height = os.get_terminal_size().lines
#? Init -------------------------------------------------------------------------------------->
if DEBUG: TimeIt.start("Init")
#? Switch to alternate screen, clear screen, hide cursor, enable mouse reporting and disable input echo
Draw.now(Term.alt_screen, Term.clear, Term.hide_cursor, Term.mouse_on, Term.title("BpyTOP"))
Term.echo(False)
#Term.refresh(force=True)
#? Start a thread checking for updates while running init
if CONFIG.update_check: UpdateChecker.run()
#? Draw banner and init status
if CONFIG.show_init and not Init.resized:
Init.start()
#? Load theme
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Loading theme and creating colors... ")}{Mv.save}')
try:
THEME = Theme(CONFIG.color_theme)
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Setup boxes
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Doing some maths and drawing... ")}{Mv.save}')
try:
if CONFIG.check_temp: CpuCollector.get_sensors()
Box.calc_sizes()
Box.draw_bg(now=False)
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Setup signal handlers for SIGSTP, SIGCONT, SIGINT and SIGWINCH
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Setting up signal handlers... ")}{Mv.save}')
try:
signal.signal(signal.SIGTSTP, now_sleeping) #* Ctrl-Z
signal.signal(signal.SIGCONT, now_awake) #* Resume
signal.signal(signal.SIGINT, quit_sigint) #* Ctrl-C
signal.signal(signal.SIGWINCH, Term.refresh) #* Terminal resized
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Start a separate thread for reading keyboard input
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Starting input reader thread... ")}{Mv.save}')
try:
if isinstance(sys.stdin, io.TextIOWrapper) and sys.version_info >= (3, 7):
sys.stdin.reconfigure(errors="ignore") # type: ignore
Key.start()
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Start a separate thread for data collection and drawing
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Starting data collection and drawer thread... ")}{Mv.save}')
try:
Collector.start()
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Collect data and draw to buffer
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Collecting data and drawing... ")}{Mv.save}')
try:
Collector.collect(draw_now=False)
pass
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Draw to screen
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Finishing up... ")}{Mv.save}')
try:
Collector.collect_done.wait()
except Exception as e:
Init.fail(e)
else:
Init.success()
Init.done()
Term.refresh()
Draw.out(clear=True)
if CONFIG.draw_clock:
Box.clock_on = True
if DEBUG: TimeIt.stop("Init")
#? Main loop ------------------------------------------------------------------------------------->
def run():
while not False:
Term.refresh()
Timer.stamp()
while Timer.not_zero():
if Key.input_wait(Timer.left()):
process_keys()
Collector.collect()
#? Start main loop
try:
run()
except Exception as e:
errlog.exception(f'{e}')
clean_quit(1)
else:
#? Quit cleanly even if false starts being true...
clean_quit()
if __name__ == "__main__":
main()
|
force_align.py
|
#!/usr/bin/env python
import os
import subprocess
import sys
import threading
from os.path import join, dirname
PATH_FAST_ALIGN = join(dirname(__file__), 'fast_align', 'build')
# Simplified, non-threadsafe version for force_align.py
# Use the version in realtime for development
class Aligner:
def __init__(self, fwd_params, fwd_err, rev_params, rev_err, heuristic='grow-diag-final-and'):
build_root = PATH_FAST_ALIGN
fast_align = os.path.join(build_root, 'fast_align')
atools = os.path.join(build_root, 'atools')
if heuristic == 'fwd':
rev_params = ''
rev_err = ''
elif heuristic == 'rev':
fwd_params = ''
fwd_err = ''
if fwd_err != '':
(fwd_T, fwd_m) = self.read_err(fwd_err)
fwd_cmd = [fast_align, '-i', '-', '-d', '-T', fwd_T, '-m', fwd_m, '-f', fwd_params]
self.fwd_align = popen_io(fwd_cmd)
else:
self.fwd_align = None
if rev_err != '':
(rev_T, rev_m) = self.read_err(rev_err)
rev_cmd = [fast_align, '-i', '-', '-d', '-T', rev_T, '-m', rev_m, '-f', rev_params, '-r']
self.rev_align = popen_io(rev_cmd)
else:
self.rev_align = None
tools_cmd = [atools, '-i', '-', '-j', '-', '-c', heuristic]
self.tools = popen_io(tools_cmd)
def align(self, line):
# f words ||| e words ||| links ||| score
if self.fwd_align is not None:
line_to_write = '{}\n'.format(line)
self.fwd_align.stdin.write(line_to_write.encode('utf-8'))
fwd_line = self.fwd_align.stdout.readline().split('|||')[2].strip()
if self.rev_align is not None:
line_to_write = '{}\n'.format(line)
self.rev_align.stdin.write(line_to_write.encode('utf-8'))
rev_line = self.rev_align.stdout.readline().split('|||')[2].strip()
if self.fwd_align is not None and self.rev_align is not None:
fwd_line_to_write = '{}\n'.format(fwd_line)
rev_line_to_write = '{}\n'.format(rev_line)
self.tools.stdin.write(fwd_line_to_write.encode('utf-8'))
self.tools.stdin.write(rev_line_to_write.encode('utf-8'))
al_line = self.tools.stdout.readline().strip()
return al_line
elif self.fwd_align is not None:
return fwd_line
elif self.rev_align is not None:
return rev_line
else:
return ''
def close(self):
if self.fwd_align is not None:
self.fwd_align.stdin.close()
self.fwd_align.wait()
if self.rev_align is not None:
self.rev_align.stdin.close()
self.rev_align.wait()
self.tools.stdin.close()
self.tools.wait()
def read_err(self, err):
(T, m) = ('', '')
for line in open(err):
# expected target length = source length * N
if 'expected target length' in line:
m = line.split()[-1]
# final tension: N
elif 'final tension' in line:
T = line.split()[-1]
return (T, m)
def popen_io(cmd):
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def consume(s):
for _ in s:
pass
threading.Thread(target=consume, args=(p.stderr,)).start()
return p
def main():
if len(sys.argv[1:]) < 4:
sys.stderr.write('run:\n')
sys.stderr.write(' fast_align -i corpus.f-e -d -v -o -p fwd_params >fwd_align 2>fwd_err\n')
sys.stderr.write(' fast_align -i corpus.f-e -r -d -v -o -p rev_params >rev_align 2>rev_err\n')
sys.stderr.write('\n')
sys.stderr.write('then run:\n')
sys.stderr.write(
' {} fwd_params fwd_err rev_params rev_err [heuristic] <in.f-e >out.f-e.gdfa\n'.format(sys.argv[0]))
sys.stderr.write('\n')
sys.stderr.write(
'where heuristic is one of: (intersect union grow-diag grow-diag-final grow-diag-final-and) '
'default=grow-diag-final-and\n')
sys.exit(2)
aligner = Aligner(*sys.argv[1:])
while True:
line = sys.stdin.readline()
if not line:
break
sys.stdout.write('{}\n'.format(aligner.align(line.strip())))
sys.stdout.flush()
aligner.close()
if __name__ == '__main__':
main()
|
bus_trans_py2.py
|
import socket
import threading
import time
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_host = raw_input('server host: ')
port1 = input('server port: ')
server_port = int(port1)
client_host = raw_input('client host: ')
port2 = input('client port: ')
client_port = int(port2)
#server.bind(('192.168.4.100', 8888))
server.bind((server_host, server_port))
server.listen(5)
print(server.getsockname())
print('waiting for connect...')
connect, (host, port) = server.accept()
peer_name = connect.getpeername()
sock_name = connect.getsockname()
print(u'the client %s:%s has connected.' % (host, port))
print('The peer name is %s and sock name is %s' % (peer_name, sock_name))
#client.connect(('192.168.4.102', 8886))
client.connect((client_host, client_port))
class Message:
def listen(self):
while True:
Rx = connect.recv(1024)
# hex_bytes = bytes.fromhex(data)
hex_string = Rx.encode('hex')
#print(type(hex_string))
#connect.sendall(b'your words has received.')
print('the client say:' + hex_string )
time.sleep(0.1)
print('enter message:')
# print(hex_string)
# print(data)
def talk(self):
while True:
print('enter message: ')
Tx = raw_input()
if Tx == 'esc':
#server.close()
#client.close()
print('bye')
# the feature undone
else:
try:
hex_data = Tx.decode('hex')
client.sendall(hex_data)
except Exception as e:
print e
print('transfrom hex fail')
#data = data.encode('utf-8')
#rec_data = client.recv(1024)
#hex_string = rec_data.hex()
#print('form server receive:' + hex_string)
if __name__ =='__main__':
message=Message()
#threads = []
thread_listen=threading.Thread(target = message.listen, args = (),name='Listen')
thread_talk=threading.Thread(target = message.talk, args = (),name='Talk')
thread_listen.start()
thread_talk.start()
thread_listen.join()
thread_talk.join()
server.close()
client.close()
|
processes.py
|
from collections import abc
from pathlib import Path
import time
import re
from threading import Thread
from api import audio
from api import gui
from init_phase import init_phase
from viability_phase import viability_phase
from evaluation_phase import create_datetime_subdir, evaluation_phase
from evaluation_phase import load_all_learning_profiles
from phase_utils import retrieve_best_learning_profiles, EvaluationMode
from phase_utils import PresentationMode, get_specific_learning_profiles
from phase_utils import sort_by_score
from user_phase import user_phase
from presentation_phase import presentation_phase
def _phase_header(header):
n = len(header)
print()
print("#" * (n + 8))
print(f"# {'=' * (n + 4)} #")
print(f"# = {header} = #")
print(f"# {'=' * (n + 4)} #")
print("#" * (n + 8))
print()
def _pick_learning_profiles(learning_profiles: list):
# Show info
n = len(learning_profiles)
print(f"There are {n} Learning Profiles.")
# Prompt for starting index
start_index = -1
print(f"Pick a starting index between 0 and {n-1}:")
while True:
try:
start_index = int(input("> "))
if start_index >= 0 or start_index < n:
break
except ValueError:
continue
# Prompt for stopping index
stop_index = -1
print(f"Pick a stopping index between {start_index} and {n-1}:")
while True:
try:
stop_index = int(input("> "))
if stop_index >= start_index or stop_index < n:
break
except ValueError:
continue
return learning_profiles[start_index:(stop_index+1)]
def _pick_multiple_learning_profiles(learning_profiles: list):
# Initial prompt
print("Pick what Learning Profiles to evaluate.")
indexed_lps = {i: lp for i, lp in enumerate(learning_profiles)}
picked_inds = []
while True:
# Print unpicked LPs
print(
"Learning Profiles to pick from: (Train DS, Test DS, AL, ML, "
"hyperparameters)")
if len(picked_inds) == len(indexed_lps):
print("\t-")
else:
for i, lp in indexed_lps.items():
if i not in picked_inds:
print(f"\t{i}: {lp.get_name()}")
# Print picked LPs
print("Picked Learning Profiles:")
if not picked_inds:
print("\t-")
else:
for i in sorted(picked_inds):
print(f"\t{i}: {indexed_lps[i].get_id()}")
# Input prompt
print("Enter indices on format 'i' or 'i-j'.")
print("Drop staged Learning Profiles with 'drop i'.")
print("Write 'done' when you are done.")
# Handle input
try:
idx = input("> ")
if idx == "done": # Check if done
break
elif bool(re.match("^[0-9]+-[0-9]+$", idx)): # Check if range
span_str = idx.split("-")
picked_inds += [i for i in range(
int(span_str[0]), int(span_str[1]) + 1)
if i not in picked_inds]
elif bool(re.match("^drop [0-9]+$", idx)):
picked_inds.remove(int(idx.split()[1]))
elif int(idx) in indexed_lps.keys() \
and int(idx) not in picked_inds: # Check if singular
picked_inds.append(int(idx))
except ValueError:
continue
return [indexed_lps[i] for i in picked_inds]
def _nested_dict_ids(nested):
for _, value in nested.items():
if isinstance(value, abc.Mapping):
yield from _nested_dict_ids(value)
elif isinstance(value, abc.Iterable):
for lp in value:
yield lp.get_id()
else:
raise ValueError(f"Invalid structure (value was '{value}')")
def _best_learning_profiles(input_dir: Path, learning_profiles: list,
n_lps_per_cat: int):
# Load learning profile descriptions and choose best
lp_descs = load_all_learning_profiles(input_dir)
lp_descs_best = retrieve_best_learning_profiles(lp_descs, n_lps_per_cat)
# Use descriptions to retrieve actual learning profiles
return [lp for lp in learning_profiles
if lp.get_id() in _nested_dict_ids(lp_descs_best)]
def model_selection_process(data_dir: Path, output_dir: Path,
sliding_window_length: int,
batch_size: int, num_iterations: int,
seed_percent: float, n_threads: int):
"""
Runs the model selection process.
Args:
data_dir (Path): The directory where all `.csv` and
`.npy` files are located.
output_dir (Path): A directory where all Learning
Profile results will be stored.
sliding_window_length (int): The sliding window size to use.
batch_size (int): The batch size to use.
num_iterations (int): Number of batches to process.
seed_percent (float): Percent of initial seed data
to use before applying Active Learning.
n_threads (int): The number of threads to use.
"""
########################
# Initialization Phase #
########################
_phase_header("INIT PHASE")
learning_profiles = init_phase(
data_dir,
sliding_window_length=sliding_window_length,
batch_size=batch_size,
model_eval=False
)
##########
# Filter #
##########
_phase_header("LEARNING PROFILE FILTER")
filtered_learning_profiles = _pick_learning_profiles(learning_profiles)
###################
# Viability Phase #
###################
_phase_header("VIABILITY PHASE")
viability_phase(filtered_learning_profiles, num_iterations,
seed_percent, n_threads)
####################
# Evaluation Phase #
####################
_phase_header("EVALUATION PHASE")
stamped_output_dir = create_datetime_subdir(output_dir)
evaluation_phase(stamped_output_dir, filtered_learning_profiles)
print("Evaluated successfully!")
# Done
_phase_header("DONE")
def model_evaluation_process(data_dir: Path, input_dir: Path, output_dir: Path,
audio_dir: Path, sliding_window_length: int,
batch_size: int, num_iterations: int,
seed_percent: float, audio_file_ext: str,
n_lps_per_category_item: int):
"""
Runs the model evaluation process.
Args:
data_dir (Path): The directory where all `.csv` and
`.npy` files are located.
input_dir (Path): A directory with Learning Profile results from
the model_selection process.
output_dir (Path): A directory where all Learning
Profile results will be stored.
audio_dir (Path): A directory where all audio files are located.
sliding_window_length (int): The sliding window size to use.
batch_size (int): Number of batches to process.
num_iterations (int): Number of batches to process.
seed_percent (float): Percent of initial seed data
to use before applying Active Learning.
audio_file_ext (str): File extension of the audio files
in `data_dir`.
n_lps_per_category_item (int): The number of
best-performing-learning-profiles per
presentation-mode-category-item (a method/dataset from any of these
categories AL,ML,DS) to continue with from the
model selection phase.
Raises:
FileNotFoundError: If `input_dir` is not a valid directory.
"""
########################
# Initialization Phase #
########################
_phase_header("INIT PHASE")
learning_profiles = init_phase(
data_dir,
sliding_window_length=sliding_window_length,
batch_size=batch_size,
model_eval=True
)
##########
# Filter #
##########
_phase_header("LEARNING PROFILE FILTER")
# Validity check
if not input_dir.is_dir():
raise FileNotFoundError(f"Not a directory: '{input_dir}'")
# Get best learning profiles
filtered_learning_profiles = _best_learning_profiles(
input_dir, learning_profiles, n_lps_per_category_item)
# Pick what learning profiles to evaluate
picked_learning_profiles = _pick_multiple_learning_profiles(
filtered_learning_profiles)
##############
# User Phase #
##############
_phase_header("USER PHASE")
# Initialize audio
audio.init()
# User phase wrapper
def _user_phase_thread_func():
for _ in user_phase(
picked_learning_profiles, audio_dir,
num_iterations, seed_percent,
audio_file_ext):
pass
# Start application
_app = Thread(target=_user_phase_thread_func)
print("Starting User Phase thread...")
_app.start()
# Drive GUI
while _app.is_alive():
time.sleep(.01) # Allow other threads to breathe
gui.update_windows()
print("The GUI loop on main thread was exited " +
"since the User Phase thread was stopped!")
# Exit GUI
print("Destroying GUI...")
gui.destroy()
print("The GUI was successfully destroyed!")
# Deinitialize audio
audio.deinit()
####################
# Evaluation Phase #
####################
_phase_header("EVALUATION PHASE")
stamped_output_dir = create_datetime_subdir(output_dir)
evaluation_phase(stamped_output_dir, picked_learning_profiles)
print("Evaluated successfully!")
# Done
_phase_header("DONE")
def _get_sorted_specific_learning_profiles(lps, eval_mode, pres_mode,
n_lps_per_category_item):
sorted_lps = []
# Get all specific learning profiles
for spec_lps in get_specific_learning_profiles(
lps, pres_mode):
# For each attribute, sort learning profiles by score
# and choose n_lps_per_category_item nr of models per category item
for lp in sort_by_score(spec_lps, eval_mode,
n_lps_per_category_item):
sorted_lps.append(lp)
return sorted_lps
def presentation_process(learning_profile_dir: Path, n_lps: int):
"""
Runs the model evaluation process.
Args:
learning_profile_dir (Path): A directory with Learning Profile results
from either model_selection or model_evaluation.
n_lps (int): Max number of Learning Profiles to include in plot,
chooses the best performing ones.
(-1 all Learning Profiles included).
"""
# get profiles
lps_desc = load_all_learning_profiles(learning_profile_dir)
# Copy
lps_present = lps_desc
######################
# Presentation Phase #
######################
_phase_header("PRESENTATION PHASE")
print(f"In total there are {len(lps_desc)} Learning profiles.")
# Setup variables
quit = False
picked_eval = None
eval_modes = [eval_mode for eval_mode in EvaluationMode]
pres_modes = [pres_mode for pres_mode in PresentationMode]
# Input loop to gather plot settings
while True:
_phase_header("PLOT SETTINGS")
###################
# Evaluation mode #
###################
# Input prompt
print("Pick evaluation mode by writing the index of the desired"
" evaluation mode.")
for idx, eval_mode in enumerate(EvaluationMode):
print(f"{idx}:\t{eval_mode}")
print("Write 'exit' to quit.")
# Handle evaluation mode input
while True:
try:
idx = input("> ")
if "exit" == idx:
quit = True
break
elif int(idx) >= 0 and int(idx) < len(EvaluationMode):
picked_eval = eval_modes[int(idx)]
break
except ValueError:
continue
if quit:
break
#####################
# Presentation mode #
#####################
# Input prompt
print("Pick presentation mode by writing the index of the wanted"
" presentation mode.")
print("ML = Machine learning, AL = Active learning,"
" DS = Dataset.")
for idx, pres_mode in enumerate(PresentationMode):
print(f"{idx}:\t{pres_mode}")
print(
f"Write 'all' to present {n_lps if n_lps > -1 else len(lps_desc)}"
" learning profiles."
" (No presentation-mode-filtering)")
print("Write 'exit' to quit.")
# Handle presentation mode input
while True:
try:
idx = input("> ")
if "exit" == idx:
quit = True
break
elif "all" == idx:
lps_present = lps_desc
presentation_phase(learning_profiles=lps_present,
eval=picked_eval,
nr_models=n_lps)
break
elif int(idx) >= 0 and int(idx) < len(PresentationMode):
# Set nr of learning profiles per category item
n_lps_per_category_item = None
while True:
print("Write the number of "
"best-performing-learning-profiles"
" per presentation-mode-category-item "
"(a method/dataset from any of these "
"categories AL,ML,DS) "
"to apply presentation-mode-filtering"
"(-1 means all Learning Profiles included)")
n = input("> ")
if "exit" == n:
quit = True
break
elif int(n) == -1 or int(n) > 0:
n_lps_per_category_item = int(n)
break
if quit:
break
# Filter learning profiles given the arguments
lps_present = _get_sorted_specific_learning_profiles(
lps_desc, picked_eval, pres_modes[int(idx)],
n_lps_per_category_item)
# Run presentation phase to plot the results
presentation_phase(learning_profiles=lps_present,
eval=picked_eval,
nr_models=n_lps)
break
except ValueError:
continue
if quit:
break
pass
|
dev_test_dex_print.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# File: dev_test_dex_print.py
#
# Part of ‘UNICORN Binance WebSocket API’
# Project website: https://www.lucit.tech/unicorn-binance-websocket-api.html
# Github: https://github.com/LUCIT-Systems-and-Development/unicorn-binance-websocket-api
# Documentation: https://unicorn-binance-websocket-api.docs.lucit.tech
# PyPI: https://pypi.org/project/unicorn-binance-websocket-api/
#
# Author: LUCIT Systems and Development
#
# Copyright (c) 2019-2022, LUCIT Systems and Development (https://www.lucit.tech) and Oliver Zehentleitner
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from unicorn_binance_websocket_api.manager import BinanceWebSocketApiManager
import logging
import sys
import time
import threading
import os
try:
from unicorn_fy.unicorn_fy import UnicornFy
except ImportError:
print("Please install `unicorn-fy`! https://pypi.org/project/unicorn-fy/")
sys.exit(1)
logging.getLogger("unicorn_binance_websocket_api")
logging.basicConfig(level=logging.INFO,
filename=os.path.basename(__file__) + '.log',
format="{asctime} [{levelname:8}] {process} {thread} {module}: {message}",
style="{")
def print_stream_data_from_stream_buffer(binance_websocket_api_manager):
while True:
if binance_websocket_api_manager.is_manager_stopping():
exit(0)
oldest_stream_data_from_stream_buffer = binance_websocket_api_manager.pop_stream_data_from_stream_buffer()
if oldest_stream_data_from_stream_buffer is False:
time.sleep(0.01)
else:
unicorn_fied_stream_data = UnicornFy.binance_org_websocket(oldest_stream_data_from_stream_buffer)
print(unicorn_fied_stream_data)
# create instance of BinanceWebSocketApiManager for Binance Chain DEX
binance_websocket_api_manager = BinanceWebSocketApiManager(exchange="binance.org-testnet")
# start a worker process to move the received stream_data from the stream_buffer to a print function
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_websocket_api_manager,))
worker_thread.start()
# userAddress streams
binance_dex_user_address = "bnb1v566f3avl2ud5z0jepazsrguzkj367snlx4jm6"
id = binance_websocket_api_manager.create_stream('orders', binance_dex_user_address)
binance_websocket_api_manager.create_stream('accounts', binance_dex_user_address)
binance_websocket_api_manager.create_stream('transfers', binance_dex_user_address)
user_address_multi_stream_id = binance_websocket_api_manager.create_stream(['orders', 'transfers', 'accounts'],
binance_dex_user_address)
time.sleep(5)
binance_websocket_api_manager.print_stream_info(user_address_multi_stream_id)
time.sleep(5)
#markets = 'RAVEN-F66_BNB' # live
markets = ['000-0E1_BNB'] # testnet
channels = ['trades', 'kline_1m', 'kline_5m', 'kline_15m']
binance_websocket_api_manager.create_stream(["allTickers"], ["$all"])
binance_websocket_api_manager.create_stream(["allMiniTickers"], ["$all"])
binance_websocket_api_manager.create_stream(["blockheight"], ["$all"])
binance_websocket_api_manager.create_stream(["trades"], markets)
binance_websocket_api_manager.create_stream(["marketDepth"], markets)
binance_websocket_api_manager.create_stream(["kline_5m"], markets)
binance_websocket_api_manager.create_stream(["ticker"], markets)
binance_websocket_api_manager.create_stream(["miniTicker"], markets)
multiplex_stream_id = binance_websocket_api_manager.create_stream(channels, markets)
stream_id = binance_websocket_api_manager.create_stream(["kline_1m"], markets)
time.sleep(2)
binance_websocket_api_manager.print_stream_info(stream_id)
#markets = ['RAVEN-F66_BNB', 'ANKR-E97_BNB', 'AWC-986_BNB', 'COVA-218_BNB', 'BCPT-95A_BNB', 'WISH-2D5_BNB',
# 'MITH-C76_BNB', 'BNB_BTCB-1DE', 'BNB_USDSB-1AC', 'BTCB-1DE_USDSB-1AC', 'NEXO-A84_BNB'] # live
markets = ['000-0E1_BNB'] # testnet
channels = ['trades', 'kline_1m', 'kline_5m', 'kline_15m', 'marketDepth', 'ticker', 'miniTicker', 'marketDiff']
binance_websocket_api_manager.subscribe_to_stream(stream_id,
markets=markets,
channels=channels)
binance_websocket_api_manager.print_stream_info(stream_id)
markets = ['BCPT-95A_BNB', 'WISH-2D5_BNB',
'MITH-C76_BNB', 'BTCB-1DE_USDSB-1AC']
channels = ['trades', 'kline_15m', 'marketDepth', 'ticker', 'miniTicker', 'marketDiff']
binance_websocket_api_manager.unsubscribe_from_stream(stream_id, channels=channels, markets=markets)
while True:
#binance_websocket_api_manager.print_summary()
time.sleep(1)
|
__init__.py
|
# -*- coding: utf-8 -*-
"""
:copyright: Copyright 2016-2022 Sphinx Confluence Builder Contributors (AUTHORS)
:license: BSD-2-Clause (LICENSE)
"""
from bs4 import BeautifulSoup
from contextlib import contextmanager
from copy import deepcopy
from pkg_resources import parse_version
from sphinx.__init__ import __version__ as sphinx_version
from sphinx.application import Sphinx
from sphinx.util.console import color_terminal
from sphinx.util.console import nocolor
from sphinx.util.docutils import docutils_namespace
from sphinxcontrib.confluencebuilder import compat
from sphinxcontrib.confluencebuilder import util
from threading import Event
from threading import Lock
from threading import Thread
import inspect
import json
import os
import shutil
import sys
import time
try:
import http.server as http_server
except ImportError:
import SimpleHTTPServer as http_server
try:
import socketserver as server_socket
except ImportError:
import SocketServer as server_socket
# full extension name
EXT_NAME = 'sphinxcontrib.confluencebuilder'
class ConfluenceInstanceServer(server_socket.TCPServer):
def __init__(self):
"""
confluence instance server
Helps spawn an TCP server on a random local port to help emulate a
Confluence instance.
Attributes:
del_req: delete requests cached by handler
del_rsp: delete responses to use in the handler
get_req: get requests cached by handler
get_rsp: get responses to use in the handler
put_req: put requests cached by handler
put_rsp: put responses to use in the handler
"""
LOCAL_RANDOM_PORT = ('127.0.0.1', 0)
server_socket.TCPServer.__init__(self,
LOCAL_RANDOM_PORT, ConfluenceInstanceRequestHandler)
self.mtx = Lock()
self.del_req = []
self.del_rsp = []
self.get_req = []
self.get_rsp = []
self.put_req = []
self.put_rsp = []
def check_unhandled_requests(self):
"""
check if there are any unhandled requests still cached
Provides a helper call to allow a unit test to check if there are any
handled requests that have not been pop'ed from the instance. Provides
an easy way to verify that no unexpected requests have been made.
Returns:
whether or not there are still requests cached
"""
with self.mtx:
if self.del_req or self.get_req or self.put_req:
return True
return False
def pop_delete_request(self):
"""
pop the cached delete request made to the mocked server
Allows a unit test to pop the next available request path/headers that
have been pushed into the mocked Confluence server. This allows a
unit test to verify desired (or undesired) request values.
Returns:
the next delete request; ``None`` if no request was made
"""
try:
with self.mtx:
return self.del_req.pop(0)
except IndexError:
return None
def pop_get_request(self):
"""
pop the cached get request made to the mocked server
Allows a unit test to pop the next available request path/headers that
have been pushed into the mocked Confluence server. This allows a
unit test to verify desired (or undesired) request values.
Returns:
the next get request; ``None`` if no request was made
"""
try:
with self.mtx:
return self.get_req.pop(0)
except IndexError:
return None
def pop_put_request(self):
"""
pop the cached put request made to the mocked server
Allows a unit test to pop the next available request path/headers that
have been pushed into the mocked Confluence server. This allows a
unit test to verify desired (or undesired) request values.
Returns:
the next put request; ``None`` if no request was made
"""
try:
with self.mtx:
return self.put_req.pop(0)
except IndexError:
return None
def register_delete_rsp(self, code,):
"""
register a delete response
Registers a response the instance should return when a DELETE request
is being served.
Args:
code: the response code
"""
with self.mtx:
self.del_rsp.append(code)
def register_get_rsp(self, code, data):
"""
register a get response
Registers a response the instance should return when a GET request is
being served.
Args:
code: the response code
data: the data
"""
if data:
if isinstance(data, dict):
data = json.dumps(data)
data = data.encode('utf-8')
with self.mtx:
self.get_rsp.append((code, data))
def register_put_rsp(self, code, data):
"""
register a put response
Registers a response the instance should return when a PUT request is
being served.
Args:
code: the response code
data: the data
"""
if data:
if isinstance(data, dict):
data = json.dumps(data)
data = data.encode('utf-8')
with self.mtx:
self.put_rsp.append((code, data))
class ConfluenceInstanceRequestHandler(server_socket.ThreadingMixIn,
http_server.SimpleHTTPRequestHandler):
"""
confluence instance request handler
Provides the handler implementation when a z instance
wishes to serve an HTTP request. This handler will pull responses (if any)
populated into the server instance. If no responses are provided, the
default response will be a 500 error with no data.
"""
def do_DELETE(self):
"""
serve a delete request
This method is called when a DELETE request is being processed by this
handler.
"""
with self.server.mtx:
self.server.del_req.append((self.path, dict(self.headers)))
try:
code = self.server.del_rsp.pop(0)
except IndexError:
code = 500
self.send_response(code)
self.end_headers()
def do_GET(self):
"""
serve a get request
This method is called when a GET request is being processed by this
handler.
"""
with self.server.mtx:
self.server.get_req.append((self.path, dict(self.headers)))
try:
code, data = self.server.get_rsp.pop(0)
except IndexError:
code = 500
data = None
self.send_response(code)
self.end_headers()
if data:
self.wfile.write(data)
def do_PUT(self):
"""
serve a put request
This method is called when a PUT request is being processed by this
handler.
"""
with self.server.mtx:
self.server.put_req.append((self.path, dict(self.headers)))
try:
code, data = self.server.put_rsp.pop(0)
except IndexError:
code = 500
data = None
self.send_response(code)
self.end_headers()
if data:
self.wfile.write(data)
class MockedConfig(dict):
"""
mocked sphinx configuration
Provides a class to mock a Sphinx configuration for testing, to support both
dictionary key and attribute calls.
"""
def __getattr__(self, name):
if name in self:
return self[name]
return None
def __setattr__(self, name, value):
self[name] = value
def clone(self):
cloned = MockedConfig()
for key, value in self.items():
if value is None or callable(value):
cloned[key] = value
else:
cloned[key] = deepcopy(value)
return cloned
def enable_sphinx_info(verbosity=None):
"""
enable verbosity for features handled by this utility class
When invoked, this utility class will attempt to prepare or invoke
requests in a verbose manner.
Args:
verbosity (optional): configure verbosity on the sphinx application
"""
os.environ['SPHINX_STATUS'] = '1'
if verbosity:
os.environ['SPHINX_VERBOSITY'] = str(verbosity)
@contextmanager
def mock_confluence_instance(config=None, ignore_requests=False):
"""
spawns a mocked confluence instance which publishing attempts to be checked
The following spawns a mocked Confluence instance, which will create an
local HTTP server to serve API requests from a publisher instance.
Args:
config (optional): the configuration to populate a publisher url on
ignore_requests (optional): whether or not requests made to the server
should be ignored (default: ``False``)
Yields:
the http daemon
"""
serve_thread = None
try:
# spawn a mocked server instance
daemon = ConfluenceInstanceServer()
host, port = daemon.server_address
if config:
config.confluence_server_url = 'http://{}:{}/'.format(host, port)
# start accepting requests
if not ignore_requests:
sync = Event()
def serve_forever(daemon, sync):
sync.set()
daemon.serve_forever()
serve_thread = Thread(target=serve_forever, args=(daemon, sync,))
serve_thread.start()
# wait for the serving thread to be running
sync.wait()
# yeild context for a moment to help ensure the daemon is serving
time.sleep(0.1)
yield daemon
finally:
if serve_thread:
daemon.shutdown()
serve_thread.join()
else:
daemon.socket.close()
@contextmanager
def mock_getpass(mock):
def _(prompt='Password: ', stream=sys.stdout):
stream.write(prompt)
stream.write('(mocked input> ')
stream.write(mock)
stream.write('\n')
return mock
try:
original = util.getpass2
util.getpass2 = _
yield
finally:
util.getpass2 = original
@contextmanager
def mock_input(mock):
def _(prompt=''):
print(prompt + '(mocked input> ' + mock)
return mock
try:
original = compat.compat_input
compat.compat_input = _
yield
finally:
compat.compat_input = original
@contextmanager
def parse(filename, dirname=None):
"""
parse the output of a generated sphinx document
Parses the provided filename for generated Confluence-supported markup which
can be examined for expected content. This function will return an instance
of BeautifulSoup which a tester can take advantage of the utility calls the
library provides.
Args:
filename: the filename to parse
dirname (optional): the directory the provided filename exists in
Returns:
the parsed output
"""
if dirname:
target = os.path.join(dirname, filename)
else:
target = filename
target += '.conf'
with open(target, 'r') as fp:
soup = BeautifulSoup(fp, 'html.parser')
yield soup
def prepare_conf():
"""
prepare minimal sphinx configuration for sphinx application
Prepares a minimum number of required configuration values into a
dictionary for unit tests to extend. This dictionary can be passed into
a Sphinx application instance.
"""
config = MockedConfig()
config['extensions'] = [
EXT_NAME,
# include any forced-injected extensions (config support)
'sphinx.ext.imgmath',
]
config['confluence_publish'] = False
# support pre-Sphinx v2.0 installations which default to 'contents'
if parse_version(sphinx_version) < parse_version('2.0'):
config['master_doc'] = 'index'
return config
def prepare_dirs(container=None, f_back_count=1, postfix=None):
"""
return the output directory base for all unit tests
This utility method is used to provide other tests the location to store
output files. This method will ensure the output directory is removed
before returning.
Args:
container (optional): the output container name to use
f_back_count (optional): number of frame objects to move back when
attempting to auto-generate a container name
postfix (optional): postfix to add to the container directory
Returns:
the output directory
"""
if not container:
frame = inspect.currentframe()
for _ in range(f_back_count):
frame = frame.f_back
container = frame.f_code.co_name
lib_dir = os.path.dirname(os.path.realpath(__file__))
test_dir = os.path.join(lib_dir, os.pardir)
base_dir = os.path.join(test_dir, os.pardir)
output_dir = os.path.join(base_dir, 'output')
container_dir = os.path.abspath(os.path.join(output_dir, container))
if postfix:
container_dir += postfix
shutil.rmtree(container_dir, ignore_errors=True)
return container_dir
@contextmanager
def prepare_sphinx(src_dir, config=None, out_dir=None, extra_config=None,
builder=None, relax=False):
"""
prepare a sphinx application instance
Return a prepared Sphinx application instance [1] ready for execution.
[1]: https://github.com/sphinx-doc/sphinx/blob/master/sphinx/application.py
Args:
src_dir: document sources
config (optional): configuration to use
out_dir (optional): output for generated documents
extra_config (optional): additional configuration data to apply
builder (optional): the builder to use
relax (optional): do not generate warnings as errors
"""
# Enable coloring of warning and other messages. Note that this can
# cause sys.stderr to be mocked which is why we pass the new value
# explicitly on the call to Sphinx() below.
if 'MSYSTEM' not in os.environ and not color_terminal():
nocolor()
conf = dict(config) if config else {}
if extra_config:
conf.update(extra_config)
conf_dir = src_dir if config is None else None
warnerr = not relax
sts = None
if 'SPHINX_STATUS' in os.environ:
sts = sys.stdout
verbosity = 0
if 'SPHINX_VERBOSITY' in os.environ:
try:
verbosity = int(os.environ['SPHINX_VERBOSITY'])
except ValueError:
pass
# default to using this extension's builder
if not builder:
builder = 'confluence'
if not out_dir:
# 3 = prepare_dirs, this, contextmanager
out_dir = prepare_dirs(f_back_count=3)
doctrees_dir = os.path.join(out_dir, '.doctrees')
# support pre-Sphinx v4.0 installations which do not have `root_doc` by
# swapping to the obsolete configuration name
if parse_version(sphinx_version) < parse_version('4.0'):
if 'root_doc' in conf:
conf['master_doc'] = conf['root_doc']
del conf['root_doc']
with docutils_namespace():
app = Sphinx(
src_dir, # output for document sources
conf_dir, # configuration directory
out_dir, # output for generated documents
doctrees_dir, # output for doctree files
builder, # builder to execute
confoverrides=conf, # load provided configuration (volatile)
status=sts, # status output
warning=sys.stderr, # warnings output
warningiserror=warnerr, # treat warnings as errors
verbosity=verbosity) # verbosity
yield app
def prepare_sphinx_filenames(src_dir, filenames, configs=None):
"""
prepare explicit filenames for a sphinx application instance
A Sphinx engine allows accepting a list of filenames it will process;
however, these filenames need to be set to full paths. This is not always
convenient for testing, so this utility allows generating a filename list
with the source directory prefixed for each entry.
In addition, when passing a documentation set to process, Sphinx requires
that the documentation set has an existing root document. In some testing
datasets, they may not have one that exists. If this is detected, this
helper will adjust the configuration to adjust the root document to a
provided filename, which should prevent issues when the Sphinx application
prepares an environment. This is only performed when configurations are
provided in to this call. Multiple configuration entries can be provided,
and only the last configuration entry (must exist and) will be updated in
the event when a change is needed.
Args:
src_dir: document sources
filenames: the documents to process relative to src_dir (no extensions)
configs (optional): list of configurations to check for root doc issue
Returns:
the updated file name list
"""
files = []
for filename in filenames:
files.append(os.path.join(src_dir, filename + '.rst'))
if configs:
root_doc = 'index'
for config in configs:
if config and 'root_doc' in config:
root_doc = config['root_doc']
break
if root_doc not in filenames:
configs[-1]['root_doc'] = filenames[0] # update last config
return files
def build_sphinx(src_dir, config=None, out_dir=None, extra_config=None,
builder=None, relax=False, filenames=None):
"""
prepare a sphinx application instance
Creates, invokes and cleans up a Sphinx application instance [1].
[1]: https://github.com/sphinx-doc/sphinx/blob/master/sphinx/application.py
Args:
src_dir: document sources
config (optional): configuration to use
out_dir (optional): output for generated documents
extra_config (optional): additional configuration data to apply
builder (optional): the builder to use
relax (optional): do not generate warnings as errors
filenames (optional): specific documents to process
Returns:
the output directory
"""
if not out_dir:
# 2 = prepare_dirs, this
out_dir = prepare_dirs(f_back_count=2)
files = []
force_all = True
if filenames:
# force-all not supported when using explicit filenames
force_all = False
# sphinx application requires full paths for explicit filenames
extra_config = dict(extra_config) if extra_config else {}
files = prepare_sphinx_filenames(src_dir, filenames,
configs=(config, extra_config))
with prepare_sphinx(
src_dir, config=config, out_dir=out_dir, extra_config=extra_config,
builder=builder, relax=relax) as app:
app.build(force_all=force_all, filenames=files)
return out_dir
|
worker_handlers.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Code for communicating with the Workers."""
# mypy: disallow-untyped-defs
from __future__ import absolute_import
import collections
import contextlib
import copy
import logging
import queue
import subprocess
import sys
import threading
import time
from builtins import object
from typing import TYPE_CHECKING
from typing import Any
from typing import BinaryIO # pylint: disable=unused-import
from typing import Callable
from typing import DefaultDict
from typing import Dict
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Mapping
from typing import Optional
from typing import Tuple
from typing import Type
from typing import TypeVar
from typing import Union
from typing import cast
from typing import overload
import grpc
from apache_beam.io import filesystems
from apache_beam.io.filesystems import CompressionTypes
from apache_beam.portability import common_urns
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.portability.api import beam_provision_api_pb2
from apache_beam.portability.api import beam_provision_api_pb2_grpc
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.portability.api import endpoints_pb2
from apache_beam.runners.portability import artifact_service
from apache_beam.runners.portability.fn_api_runner.execution import Buffer
from apache_beam.runners.worker import data_plane
from apache_beam.runners.worker import sdk_worker
from apache_beam.runners.worker.channel_factory import GRPCChannelFactory
from apache_beam.runners.worker.sdk_worker import _Future
from apache_beam.runners.worker.statecache import StateCache
from apache_beam.utils import proto_utils
from apache_beam.utils import thread_pool_executor
from apache_beam.utils.sentinel import Sentinel
if TYPE_CHECKING:
from grpc import ServicerContext
from google.protobuf import message
from apache_beam.runners.portability.fn_api_runner.fn_runner import ExtendedProvisionInfo # pylint: disable=ungrouped-imports
# State caching is enabled in the fn_api_runner for testing, except for one
# test which runs without state caching (FnApiRunnerTestWithDisabledCaching).
# The cache is disabled in production for other runners.
STATE_CACHE_SIZE = 100
# Time-based flush is enabled in the fn_api_runner by default.
DATA_BUFFER_TIME_LIMIT_MS = 1000
_LOGGER = logging.getLogger(__name__)
T = TypeVar('T')
ConstructorFn = Callable[[
Union['message.Message', bytes],
'sdk_worker.StateHandler',
'ExtendedProvisionInfo',
'GrpcServer'
],
'WorkerHandler']
class ControlConnection(object):
_uid_counter = 0
_lock = threading.Lock()
def __init__(self):
# type: () -> None
self._push_queue = queue.Queue(
) # type: queue.Queue[Union[beam_fn_api_pb2.InstructionRequest, Sentinel]]
self._input = None # type: Optional[Iterable[beam_fn_api_pb2.InstructionResponse]]
self._futures_by_id = dict() # type: Dict[str, ControlFuture]
self._read_thread = threading.Thread(
name='beam_control_read', target=self._read)
self._state = BeamFnControlServicer.UNSTARTED_STATE
def _read(self):
# type: () -> None
assert self._input is not None
for data in self._input:
self._futures_by_id.pop(data.instruction_id).set(data)
@overload
def push(self, req):
# type: (Sentinel) -> None
pass
@overload
def push(self, req):
# type: (beam_fn_api_pb2.InstructionRequest) -> ControlFuture
pass
def push(self, req):
# type: (Union[Sentinel, beam_fn_api_pb2.InstructionRequest]) -> Optional[ControlFuture]
if req is BeamFnControlServicer._DONE_MARKER:
self._push_queue.put(req)
return None
if not req.instruction_id:
with ControlConnection._lock:
ControlConnection._uid_counter += 1
req.instruction_id = 'control_%s' % ControlConnection._uid_counter
future = ControlFuture(req.instruction_id)
self._futures_by_id[req.instruction_id] = future
self._push_queue.put(req)
return future
def get_req(self):
# type: () -> Union[Sentinel, beam_fn_api_pb2.InstructionRequest]
return self._push_queue.get()
def set_input(self, input):
# type: (Iterable[beam_fn_api_pb2.InstructionResponse]) -> None
with ControlConnection._lock:
if self._input:
raise RuntimeError('input is already set.')
self._input = input
self._read_thread.start()
self._state = BeamFnControlServicer.STARTED_STATE
def close(self):
# type: () -> None
with ControlConnection._lock:
if self._state == BeamFnControlServicer.STARTED_STATE:
self.push(BeamFnControlServicer._DONE_MARKER)
self._read_thread.join()
self._state = BeamFnControlServicer.DONE_STATE
def abort(self, exn):
# type: (Exception) -> None
for future in self._futures_by_id.values():
future.abort(exn)
class BeamFnControlServicer(beam_fn_api_pb2_grpc.BeamFnControlServicer):
"""Implementation of BeamFnControlServicer for clients."""
UNSTARTED_STATE = 'unstarted'
STARTED_STATE = 'started'
DONE_STATE = 'done'
_DONE_MARKER = Sentinel.sentinel
def __init__(
self,
worker_manager, # type: WorkerHandlerManager
):
# type: (...) -> None
self._worker_manager = worker_manager
self._lock = threading.Lock()
self._uid_counter = 0
self._state = self.UNSTARTED_STATE
# following self._req_* variables are used for debugging purpose, data is
# added only when self._log_req is True.
self._req_sent = collections.defaultdict(int) # type: DefaultDict[str, int]
self._log_req = logging.getLogger().getEffectiveLevel() <= logging.DEBUG
self._connections_by_worker_id = collections.defaultdict(
ControlConnection) # type: DefaultDict[str, ControlConnection]
def get_conn_by_worker_id(self, worker_id):
# type: (str) -> ControlConnection
with self._lock:
return self._connections_by_worker_id[worker_id]
def Control(self,
iterator, # type: Iterable[beam_fn_api_pb2.InstructionResponse]
context # type: ServicerContext
):
# type: (...) -> Iterator[beam_fn_api_pb2.InstructionRequest]
with self._lock:
if self._state == self.DONE_STATE:
return
else:
self._state = self.STARTED_STATE
worker_id = dict(context.invocation_metadata()).get('worker_id')
if not worker_id:
raise RuntimeError(
'All workers communicate through gRPC should have '
'worker_id. Received None.')
control_conn = self.get_conn_by_worker_id(worker_id)
control_conn.set_input(iterator)
while True:
to_push = control_conn.get_req()
if to_push is self._DONE_MARKER:
return
yield to_push
if self._log_req:
self._req_sent[to_push.instruction_id] += 1
def done(self):
# type: () -> None
self._state = self.DONE_STATE
_LOGGER.debug(
'Runner: Requests sent by runner: %s',
[(str(req), cnt) for req, cnt in self._req_sent.items()])
def GetProcessBundleDescriptor(self, id, context=None):
# type: (beam_fn_api_pb2.GetProcessBundleDescriptorRequest, Any) -> beam_fn_api_pb2.ProcessBundleDescriptor
return self._worker_manager.get_process_bundle_descriptor(id)
class WorkerHandler(object):
"""worker_handler for a worker.
It provides utilities to start / stop the worker, provision any resources for
it, as well as provide descriptors for the data, state and logging APIs for
it.
"""
_registered_environments = {} # type: Dict[str, Tuple[ConstructorFn, type]]
_worker_id_counter = -1
_lock = threading.Lock()
control_conn = None # type: ControlConnection
data_conn = None # type: data_plane._GrpcDataChannel
def __init__(self,
control_handler, # type: Any
data_plane_handler, # type: Any
state, # type: sdk_worker.StateHandler
provision_info # type: ExtendedProvisionInfo
):
# type: (...) -> None
"""Initialize a WorkerHandler.
Args:
control_handler:
data_plane_handler (data_plane.DataChannel):
state:
provision_info:
"""
self.control_handler = control_handler
self.data_plane_handler = data_plane_handler
self.state = state
self.provision_info = provision_info
with WorkerHandler._lock:
WorkerHandler._worker_id_counter += 1
self.worker_id = 'worker_%s' % WorkerHandler._worker_id_counter
def close(self):
# type: () -> None
self.stop_worker()
def start_worker(self):
# type: () -> None
raise NotImplementedError
def stop_worker(self):
# type: () -> None
raise NotImplementedError
def control_api_service_descriptor(self):
# type: () -> endpoints_pb2.ApiServiceDescriptor
raise NotImplementedError
def artifact_api_service_descriptor(self):
# type: () -> endpoints_pb2.ApiServiceDescriptor
raise NotImplementedError
def data_api_service_descriptor(self):
# type: () -> Optional[endpoints_pb2.ApiServiceDescriptor]
raise NotImplementedError
def state_api_service_descriptor(self):
# type: () -> Optional[endpoints_pb2.ApiServiceDescriptor]
raise NotImplementedError
def logging_api_service_descriptor(self):
# type: () -> Optional[endpoints_pb2.ApiServiceDescriptor]
raise NotImplementedError
@classmethod
def register_environment(
cls,
urn, # type: str
payload_type # type: Optional[Type[T]]
):
# type: (...) -> Callable[[Callable[[T, sdk_worker.StateHandler, ExtendedProvisionInfo, GrpcServer], WorkerHandler]], Callable[[T, sdk_worker.StateHandler, ExtendedProvisionInfo, GrpcServer], WorkerHandler]]
def wrapper(constructor):
# type: (Callable) -> Callable
cls._registered_environments[urn] = constructor, payload_type # type: ignore[assignment]
return constructor
return wrapper
@classmethod
def create(cls,
environment, # type: beam_runner_api_pb2.Environment
state, # type: sdk_worker.StateHandler
provision_info, # type: ExtendedProvisionInfo
grpc_server # type: GrpcServer
):
# type: (...) -> WorkerHandler
constructor, payload_type = cls._registered_environments[environment.urn]
return constructor(
proto_utils.parse_Bytes(environment.payload, payload_type),
state,
provision_info,
grpc_server)
# This takes a WorkerHandlerManager instead of GrpcServer, so it is not
# compatible with WorkerHandler.register_environment. There is a special case
# in WorkerHandlerManager.get_worker_handlers() that allows it to work.
@WorkerHandler.register_environment(python_urns.EMBEDDED_PYTHON, None) # type: ignore[arg-type]
class EmbeddedWorkerHandler(WorkerHandler):
"""An in-memory worker_handler for fn API control, state and data planes."""
def __init__(self,
unused_payload, # type: None
state, # type: sdk_worker.StateHandler
provision_info, # type: ExtendedProvisionInfo
worker_manager, # type: WorkerHandlerManager
):
# type: (...) -> None
super(EmbeddedWorkerHandler, self).__init__(
self, data_plane.InMemoryDataChannel(), state, provision_info)
self.control_conn = self # type: ignore # need Protocol to describe this
self.data_conn = self.data_plane_handler
state_cache = StateCache(STATE_CACHE_SIZE)
self.bundle_processor_cache = sdk_worker.BundleProcessorCache(
SingletonStateHandlerFactory(
sdk_worker.CachingStateHandler(state_cache, state)),
data_plane.InMemoryDataChannelFactory(
self.data_plane_handler.inverse()),
worker_manager._process_bundle_descriptors)
self.worker = sdk_worker.SdkWorker(
self.bundle_processor_cache,
state_cache_metrics_fn=state_cache.get_monitoring_infos)
self._uid_counter = 0
def push(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> ControlFuture
if not request.instruction_id:
self._uid_counter += 1
request.instruction_id = 'control_%s' % self._uid_counter
response = self.worker.do_instruction(request)
return ControlFuture(request.instruction_id, response)
def start_worker(self):
# type: () -> None
pass
def stop_worker(self):
# type: () -> None
self.bundle_processor_cache.shutdown()
def done(self):
# type: () -> None
pass
def data_api_service_descriptor(self):
# type: () -> endpoints_pb2.ApiServiceDescriptor
# A fake endpoint is needed for properly constructing timer info map in
# bundle_processor for fnapi_runner.
return endpoints_pb2.ApiServiceDescriptor(url='fake')
def state_api_service_descriptor(self):
# type: () -> None
return None
def logging_api_service_descriptor(self):
# type: () -> None
return None
class BasicLoggingService(beam_fn_api_pb2_grpc.BeamFnLoggingServicer):
LOG_LEVEL_MAP = {
beam_fn_api_pb2.LogEntry.Severity.CRITICAL: logging.CRITICAL,
beam_fn_api_pb2.LogEntry.Severity.ERROR: logging.ERROR,
beam_fn_api_pb2.LogEntry.Severity.WARN: logging.WARNING,
beam_fn_api_pb2.LogEntry.Severity.NOTICE: logging.INFO + 1,
beam_fn_api_pb2.LogEntry.Severity.INFO: logging.INFO,
beam_fn_api_pb2.LogEntry.Severity.DEBUG: logging.DEBUG,
beam_fn_api_pb2.LogEntry.Severity.TRACE: logging.DEBUG - 1,
beam_fn_api_pb2.LogEntry.Severity.UNSPECIFIED: logging.NOTSET,
}
def Logging(self, log_messages, context=None):
# type: (Iterable[beam_fn_api_pb2.LogEntry.List], Any) -> Iterator[beam_fn_api_pb2.LogControl]
yield beam_fn_api_pb2.LogControl()
for log_message in log_messages:
for log in log_message.log_entries:
logging.log(self.LOG_LEVEL_MAP[log.severity], str(log))
class BasicProvisionService(beam_provision_api_pb2_grpc.ProvisionServiceServicer
):
def __init__(self, base_info, worker_manager):
# type: (beam_provision_api_pb2.ProvisionInfo, WorkerHandlerManager) -> None
self._base_info = base_info
self._worker_manager = worker_manager
def GetProvisionInfo(self, request, context=None):
# type: (Any, Optional[ServicerContext]) -> beam_provision_api_pb2.GetProvisionInfoResponse
if context:
worker_id = dict(context.invocation_metadata())['worker_id']
worker = self._worker_manager.get_worker(worker_id)
info = copy.copy(worker.provision_info.provision_info)
info.logging_endpoint.CopyFrom(worker.logging_api_service_descriptor())
info.artifact_endpoint.CopyFrom(worker.artifact_api_service_descriptor())
info.control_endpoint.CopyFrom(worker.control_api_service_descriptor())
else:
info = self._base_info
return beam_provision_api_pb2.GetProvisionInfoResponse(info=info)
class GrpcServer(object):
_DEFAULT_SHUTDOWN_TIMEOUT_SECS = 5
def __init__(self,
state, # type: StateServicer
provision_info, # type: Optional[ExtendedProvisionInfo]
worker_manager, # type: WorkerHandlerManager
):
# type: (...) -> None
self.state = state
self.provision_info = provision_info
self.control_server = grpc.server(
thread_pool_executor.shared_unbounded_instance())
self.control_port = self.control_server.add_insecure_port('[::]:0')
self.control_address = 'localhost:%s' % self.control_port
# Options to have no limits (-1) on the size of the messages
# received or sent over the data plane. The actual buffer size
# is controlled in a layer above.
no_max_message_sizes = [("grpc.max_receive_message_length", -1),
("grpc.max_send_message_length", -1)]
self.data_server = grpc.server(
thread_pool_executor.shared_unbounded_instance(),
options=no_max_message_sizes)
self.data_port = self.data_server.add_insecure_port('[::]:0')
self.state_server = grpc.server(
thread_pool_executor.shared_unbounded_instance(),
options=no_max_message_sizes)
self.state_port = self.state_server.add_insecure_port('[::]:0')
self.control_handler = BeamFnControlServicer(worker_manager)
beam_fn_api_pb2_grpc.add_BeamFnControlServicer_to_server(
self.control_handler, self.control_server)
# If we have provision info, serve these off the control port as well.
if self.provision_info:
if self.provision_info.provision_info:
beam_provision_api_pb2_grpc.add_ProvisionServiceServicer_to_server(
BasicProvisionService(
self.provision_info.provision_info, worker_manager),
self.control_server)
def open_uncompressed(f):
# type: (str) -> BinaryIO
return filesystems.FileSystems.open(
f, compression_type=CompressionTypes.UNCOMPRESSED)
beam_artifact_api_pb2_grpc.add_ArtifactRetrievalServiceServicer_to_server(
artifact_service.ArtifactRetrievalService(
file_reader=open_uncompressed),
self.control_server)
self.data_plane_handler = data_plane.BeamFnDataServicer(
DATA_BUFFER_TIME_LIMIT_MS)
beam_fn_api_pb2_grpc.add_BeamFnDataServicer_to_server(
self.data_plane_handler, self.data_server)
beam_fn_api_pb2_grpc.add_BeamFnStateServicer_to_server(
GrpcStateServicer(state), self.state_server)
self.logging_server = grpc.server(
thread_pool_executor.shared_unbounded_instance(),
options=no_max_message_sizes)
self.logging_port = self.logging_server.add_insecure_port('[::]:0')
beam_fn_api_pb2_grpc.add_BeamFnLoggingServicer_to_server(
BasicLoggingService(), self.logging_server)
_LOGGER.info('starting control server on port %s', self.control_port)
_LOGGER.info('starting data server on port %s', self.data_port)
_LOGGER.info('starting state server on port %s', self.state_port)
_LOGGER.info('starting logging server on port %s', self.logging_port)
self.logging_server.start()
self.state_server.start()
self.data_server.start()
self.control_server.start()
def close(self):
# type: () -> None
self.control_handler.done()
to_wait = [
self.control_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS),
self.data_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS),
self.state_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS),
self.logging_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS)
]
for w in to_wait:
w.wait()
class GrpcWorkerHandler(WorkerHandler):
"""An grpc based worker_handler for fn API control, state and data planes."""
def __init__(self,
state, # type: StateServicer
provision_info, # type: ExtendedProvisionInfo
grpc_server # type: GrpcServer
):
# type: (...) -> None
self._grpc_server = grpc_server
super(GrpcWorkerHandler, self).__init__(
self._grpc_server.control_handler,
self._grpc_server.data_plane_handler,
state,
provision_info)
self.state = state
self.control_address = self.port_from_worker(self._grpc_server.control_port)
self.control_conn = self._grpc_server.control_handler.get_conn_by_worker_id(
self.worker_id)
self.data_conn = self._grpc_server.data_plane_handler.get_conn_by_worker_id(
self.worker_id)
def control_api_service_descriptor(self):
# type: () -> endpoints_pb2.ApiServiceDescriptor
return endpoints_pb2.ApiServiceDescriptor(
url=self.port_from_worker(self._grpc_server.control_port))
def artifact_api_service_descriptor(self):
# type: () -> endpoints_pb2.ApiServiceDescriptor
return endpoints_pb2.ApiServiceDescriptor(
url=self.port_from_worker(self._grpc_server.control_port))
def data_api_service_descriptor(self):
# type: () -> endpoints_pb2.ApiServiceDescriptor
return endpoints_pb2.ApiServiceDescriptor(
url=self.port_from_worker(self._grpc_server.data_port))
def state_api_service_descriptor(self):
# type: () -> endpoints_pb2.ApiServiceDescriptor
return endpoints_pb2.ApiServiceDescriptor(
url=self.port_from_worker(self._grpc_server.state_port))
def logging_api_service_descriptor(self):
# type: () -> endpoints_pb2.ApiServiceDescriptor
return endpoints_pb2.ApiServiceDescriptor(
url=self.port_from_worker(self._grpc_server.logging_port))
def close(self):
# type: () -> None
self.control_conn.close()
self.data_conn.close()
super(GrpcWorkerHandler, self).close()
def port_from_worker(self, port):
# type: (int) -> str
return '%s:%s' % (self.host_from_worker(), port)
def host_from_worker(self):
# type: () -> str
return 'localhost'
@WorkerHandler.register_environment(
common_urns.environments.EXTERNAL.urn, beam_runner_api_pb2.ExternalPayload)
class ExternalWorkerHandler(GrpcWorkerHandler):
def __init__(self,
external_payload, # type: beam_runner_api_pb2.ExternalPayload
state, # type: StateServicer
provision_info, # type: ExtendedProvisionInfo
grpc_server # type: GrpcServer
):
# type: (...) -> None
super(ExternalWorkerHandler,
self).__init__(state, provision_info, grpc_server)
self._external_payload = external_payload
def start_worker(self):
# type: () -> None
_LOGGER.info("Requesting worker at %s", self._external_payload.endpoint.url)
stub = beam_fn_api_pb2_grpc.BeamFnExternalWorkerPoolStub(
GRPCChannelFactory.insecure_channel(
self._external_payload.endpoint.url))
control_descriptor = endpoints_pb2.ApiServiceDescriptor(
url=self.control_address)
response = stub.StartWorker(
beam_fn_api_pb2.StartWorkerRequest(
worker_id=self.worker_id,
control_endpoint=control_descriptor,
artifact_endpoint=control_descriptor,
provision_endpoint=control_descriptor,
logging_endpoint=self.logging_api_service_descriptor(),
params=self._external_payload.params))
if response.error:
raise RuntimeError("Error starting worker: %s" % response.error)
def stop_worker(self):
# type: () -> None
pass
def host_from_worker(self):
# type: () -> str
# TODO(BEAM-8646): Reconcile across platforms.
if sys.platform in ['win32', 'darwin']:
return 'localhost'
import socket
return socket.getfqdn()
@WorkerHandler.register_environment(python_urns.EMBEDDED_PYTHON_GRPC, bytes)
class EmbeddedGrpcWorkerHandler(GrpcWorkerHandler):
def __init__(self,
payload, # type: bytes
state, # type: StateServicer
provision_info, # type: ExtendedProvisionInfo
grpc_server # type: GrpcServer
):
# type: (...) -> None
super(EmbeddedGrpcWorkerHandler,
self).__init__(state, provision_info, grpc_server)
from apache_beam.transforms.environments import EmbeddedPythonGrpcEnvironment
config = EmbeddedPythonGrpcEnvironment.parse_config(payload.decode('utf-8'))
self._state_cache_size = config.get('state_cache_size') or STATE_CACHE_SIZE
self._data_buffer_time_limit_ms = \
config.get('data_buffer_time_limit_ms') or DATA_BUFFER_TIME_LIMIT_MS
def start_worker(self):
# type: () -> None
self.worker = sdk_worker.SdkHarness(
self.control_address,
state_cache_size=self._state_cache_size,
data_buffer_time_limit_ms=self._data_buffer_time_limit_ms,
worker_id=self.worker_id)
self.worker_thread = threading.Thread(
name='run_worker', target=self.worker.run)
self.worker_thread.daemon = True
self.worker_thread.start()
def stop_worker(self):
# type: () -> None
self.worker_thread.join()
# The subprocesses module is not threadsafe on Python 2.7. Use this lock to
# prevent concurrent calls to Popen().
SUBPROCESS_LOCK = threading.Lock()
@WorkerHandler.register_environment(python_urns.SUBPROCESS_SDK, bytes)
class SubprocessSdkWorkerHandler(GrpcWorkerHandler):
def __init__(self,
worker_command_line, # type: bytes
state, # type: StateServicer
provision_info, # type: ExtendedProvisionInfo
grpc_server # type: GrpcServer
):
# type: (...) -> None
super(SubprocessSdkWorkerHandler,
self).__init__(state, provision_info, grpc_server)
self._worker_command_line = worker_command_line
def start_worker(self):
# type: () -> None
from apache_beam.runners.portability import local_job_service
self.worker = local_job_service.SubprocessSdkWorker(
self._worker_command_line, self.control_address, self.worker_id)
self.worker_thread = threading.Thread(
name='run_worker', target=self.worker.run)
self.worker_thread.start()
def stop_worker(self):
# type: () -> None
self.worker_thread.join()
@WorkerHandler.register_environment(
common_urns.environments.DOCKER.urn, beam_runner_api_pb2.DockerPayload)
class DockerSdkWorkerHandler(GrpcWorkerHandler):
def __init__(self,
payload, # type: beam_runner_api_pb2.DockerPayload
state, # type: StateServicer
provision_info, # type: ExtendedProvisionInfo
grpc_server # type: GrpcServer
):
# type: (...) -> None
super(DockerSdkWorkerHandler,
self).__init__(state, provision_info, grpc_server)
self._container_image = payload.container_image
self._container_id = None # type: Optional[bytes]
def host_from_worker(self):
# type: () -> str
if sys.platform == "darwin":
# See https://docs.docker.com/docker-for-mac/networking/
return 'host.docker.internal'
else:
return super(DockerSdkWorkerHandler, self).host_from_worker()
def start_worker(self):
# type: () -> None
with SUBPROCESS_LOCK:
try:
subprocess.check_call(['docker', 'pull', self._container_image])
except Exception:
_LOGGER.info('Unable to pull image %s' % self._container_image)
self._container_id = subprocess.check_output([
'docker',
'run',
'-d',
# TODO: credentials
'--network=host',
self._container_image,
'--id=%s' % self.worker_id,
'--logging_endpoint=%s' % self.logging_api_service_descriptor().url,
'--control_endpoint=%s' % self.control_address,
'--artifact_endpoint=%s' % self.control_address,
'--provision_endpoint=%s' % self.control_address,
]).strip()
assert self._container_id is not None
while True:
status = subprocess.check_output([
'docker', 'inspect', '-f', '{{.State.Status}}', self._container_id
]).strip()
_LOGGER.info(
'Waiting for docker to start up.Current status is %s' %
status.decode('utf-8'))
if status == b'running':
_LOGGER.info(
'Docker container is running. container_id = %s, '
'worker_id = %s',
self._container_id,
self.worker_id)
break
elif status in (b'dead', b'exited'):
subprocess.call(['docker', 'container', 'logs', self._container_id])
raise RuntimeError(
'SDK failed to start. Final status is %s' %
status.decode('utf-8'))
time.sleep(1)
self._done = False
t = threading.Thread(target=self.watch_container)
t.daemon = True
t.start()
def watch_container(self):
# type: () -> None
while not self._done:
assert self._container_id is not None
status = subprocess.check_output(
['docker', 'inspect', '-f', '{{.State.Status}}',
self._container_id]).strip()
if status != b'running':
if not self._done:
logs = subprocess.check_output([
'docker', 'container', 'logs', '--tail', '10', self._container_id
],
stderr=subprocess.STDOUT)
_LOGGER.info(logs)
self.control_conn.abort(
RuntimeError(
'SDK exited unexpectedly. '
'Final status is %s. Final log line is %s' % (
status.decode('utf-8'),
logs.decode('utf-8').strip().split('\n')[-1])))
time.sleep(5)
def stop_worker(self):
# type: () -> None
self._done = True
if self._container_id:
with SUBPROCESS_LOCK:
subprocess.call(['docker', 'kill', self._container_id])
class WorkerHandlerManager(object):
"""
Manages creation of ``WorkerHandler``s.
Caches ``WorkerHandler``s based on environment id.
"""
def __init__(self,
environments, # type: Mapping[str, beam_runner_api_pb2.Environment]
job_provision_info # type: ExtendedProvisionInfo
):
# type: (...) -> None
self._environments = environments
self._job_provision_info = job_provision_info
self._cached_handlers = collections.defaultdict(
list) # type: DefaultDict[str, List[WorkerHandler]]
self._workers_by_id = {} # type: Dict[str, WorkerHandler]
self.state_servicer = StateServicer()
self._grpc_server = None # type: Optional[GrpcServer]
self._process_bundle_descriptors = {
} # type: Dict[str, beam_fn_api_pb2.ProcessBundleDescriptor]
def register_process_bundle_descriptor(self, process_bundle_descriptor):
# type: (beam_fn_api_pb2.ProcessBundleDescriptor) -> None
self._process_bundle_descriptors[
process_bundle_descriptor.id] = process_bundle_descriptor
def get_process_bundle_descriptor(self, request):
# type: (beam_fn_api_pb2.GetProcessBundleDescriptorRequest) -> beam_fn_api_pb2.ProcessBundleDescriptor
return self._process_bundle_descriptors[
request.process_bundle_descriptor_id]
def get_worker_handlers(
self,
environment_id, # type: Optional[str]
num_workers # type: int
):
# type: (...) -> List[WorkerHandler]
if environment_id is None:
# Any environment will do, pick one arbitrarily.
environment_id = next(iter(self._environments.keys()))
environment = self._environments[environment_id]
# assume all environments except EMBEDDED_PYTHON use gRPC.
if environment.urn == python_urns.EMBEDDED_PYTHON:
# special case for EmbeddedWorkerHandler: there's no need for a gRPC
# server, but we need to pass self instead. Cast to make the type check
# on WorkerHandler.create() think we have a GrpcServer instance.
grpc_server = cast(GrpcServer, self)
elif self._grpc_server is None:
self._grpc_server = GrpcServer(
self.state_servicer, self._job_provision_info, self)
grpc_server = self._grpc_server
else:
grpc_server = self._grpc_server
worker_handler_list = self._cached_handlers[environment_id]
if len(worker_handler_list) < num_workers:
for _ in range(len(worker_handler_list), num_workers):
worker_handler = WorkerHandler.create(
environment,
self.state_servicer,
self._job_provision_info.for_environment(environment),
grpc_server)
_LOGGER.info(
"Created Worker handler %s for environment %s (%s, %r)",
worker_handler,
environment_id,
environment.urn,
environment.payload)
self._cached_handlers[environment_id].append(worker_handler)
self._workers_by_id[worker_handler.worker_id] = worker_handler
worker_handler.start_worker()
return self._cached_handlers[environment_id][:num_workers]
def close_all(self):
# type: () -> None
for worker_handler_list in self._cached_handlers.values():
for worker_handler in set(worker_handler_list):
try:
worker_handler.close()
except Exception:
_LOGGER.error(
"Error closing worker_handler %s" % worker_handler, exc_info=True)
self._cached_handlers = {} # type: ignore[assignment]
self._workers_by_id = {}
if self._grpc_server is not None:
self._grpc_server.close()
self._grpc_server = None
def get_worker(self, worker_id):
# type: (str) -> WorkerHandler
return self._workers_by_id[worker_id]
class StateServicer(beam_fn_api_pb2_grpc.BeamFnStateServicer,
sdk_worker.StateHandler):
class CopyOnWriteState(object):
def __init__(self, underlying):
# type: (DefaultDict[bytes, Buffer]) -> None
self._underlying = underlying
self._overlay = {} # type: Dict[bytes, Buffer]
def __getitem__(self, key):
# type: (bytes) -> Buffer
if key in self._overlay:
return self._overlay[key]
else:
return StateServicer.CopyOnWriteList(
self._underlying, self._overlay, key)
def __delitem__(self, key):
# type: (bytes) -> None
self._overlay[key] = []
def commit(self):
# type: () -> DefaultDict[bytes, Buffer]
self._underlying.update(self._overlay)
return self._underlying
class CopyOnWriteList(object):
def __init__(self,
underlying, # type: DefaultDict[bytes, Buffer]
overlay, # type: Dict[bytes, Buffer]
key # type: bytes
):
# type: (...) -> None
self._underlying = underlying
self._overlay = overlay
self._key = key
def __iter__(self):
# type: () -> Iterator[bytes]
if self._key in self._overlay:
return iter(self._overlay[self._key])
else:
return iter(self._underlying[self._key])
def append(self, item):
# type: (bytes) -> None
if self._key not in self._overlay:
self._overlay[self._key] = list(self._underlying[self._key])
self._overlay[self._key].append(item)
StateType = Union[CopyOnWriteState, DefaultDict[bytes, Buffer]]
def __init__(self):
# type: () -> None
self._lock = threading.Lock()
self._state = collections.defaultdict(list) # type: StateServicer.StateType
self._checkpoint = None # type: Optional[StateServicer.StateType]
self._use_continuation_tokens = False
self._continuations = {} # type: Dict[bytes, Tuple[bytes, ...]]
def checkpoint(self):
# type: () -> None
assert self._checkpoint is None and not \
isinstance(self._state, StateServicer.CopyOnWriteState)
self._checkpoint = self._state
self._state = StateServicer.CopyOnWriteState(self._state)
def commit(self):
# type: () -> None
assert isinstance(self._state,
StateServicer.CopyOnWriteState) and \
isinstance(self._checkpoint,
StateServicer.CopyOnWriteState)
self._state.commit()
self._state = self._checkpoint.commit()
self._checkpoint = None
def restore(self):
# type: () -> None
assert self._checkpoint is not None
self._state = self._checkpoint
self._checkpoint = None
@contextlib.contextmanager
def process_instruction_id(self, unused_instruction_id):
# type: (Any) -> Iterator
yield
def get_raw(self,
state_key, # type: beam_fn_api_pb2.StateKey
continuation_token=None # type: Optional[bytes]
):
# type: (...) -> Tuple[bytes, Optional[bytes]]
with self._lock:
full_state = self._state[self._to_key(state_key)]
if self._use_continuation_tokens:
# The token is "nonce:index".
if not continuation_token:
token_base = b'token_%x' % len(self._continuations)
self._continuations[token_base] = tuple(full_state)
return b'', b'%s:0' % token_base
else:
token_base, index = continuation_token.split(b':')
ix = int(index)
full_state_cont = self._continuations[token_base]
if ix == len(full_state_cont):
return b'', None
else:
return full_state_cont[ix], b'%s:%d' % (token_base, ix + 1)
else:
assert not continuation_token
return b''.join(full_state), None
def append_raw(
self,
state_key, # type: beam_fn_api_pb2.StateKey
data # type: bytes
):
# type: (...) -> _Future
with self._lock:
self._state[self._to_key(state_key)].append(data)
return _Future.done()
def clear(self, state_key):
# type: (beam_fn_api_pb2.StateKey) -> _Future
with self._lock:
try:
del self._state[self._to_key(state_key)]
except KeyError:
# This may happen with the caching layer across bundles. Caching may
# skip this storage layer for a blocking_get(key) request. Without
# the caching, the state for a key would be initialized via the
# defaultdict that _state uses.
pass
return _Future.done()
@staticmethod
def _to_key(state_key):
# type: (beam_fn_api_pb2.StateKey) -> bytes
return state_key.SerializeToString()
class GrpcStateServicer(beam_fn_api_pb2_grpc.BeamFnStateServicer):
def __init__(self, state):
# type: (StateServicer) -> None
self._state = state
def State(self,
request_stream, # type: Iterable[beam_fn_api_pb2.StateRequest]
context=None # type: Any
):
# type: (...) -> Iterator[beam_fn_api_pb2.StateResponse]
# Note that this eagerly mutates state, assuming any failures are fatal.
# Thus it is safe to ignore instruction_id.
for request in request_stream:
request_type = request.WhichOneof('request')
if request_type == 'get':
data, continuation_token = self._state.get_raw(
request.state_key, request.get.continuation_token)
yield beam_fn_api_pb2.StateResponse(
id=request.id,
get=beam_fn_api_pb2.StateGetResponse(
data=data, continuation_token=continuation_token))
elif request_type == 'append':
self._state.append_raw(request.state_key, request.append.data)
yield beam_fn_api_pb2.StateResponse(
id=request.id, append=beam_fn_api_pb2.StateAppendResponse())
elif request_type == 'clear':
self._state.clear(request.state_key)
yield beam_fn_api_pb2.StateResponse(
id=request.id, clear=beam_fn_api_pb2.StateClearResponse())
else:
raise NotImplementedError('Unknown state request: %s' % request_type)
class SingletonStateHandlerFactory(sdk_worker.StateHandlerFactory):
"""A singleton cache for a StateServicer."""
def __init__(self, state_handler):
# type: (sdk_worker.CachingStateHandler) -> None
self._state_handler = state_handler
def create_state_handler(self, api_service_descriptor):
# type: (endpoints_pb2.ApiServiceDescriptor) -> sdk_worker.CachingStateHandler
"""Returns the singleton state handler."""
return self._state_handler
def close(self):
# type: () -> None
"""Does nothing."""
pass
class ControlFuture(object):
def __init__(self,
instruction_id, # type: str
response=None # type: Optional[beam_fn_api_pb2.InstructionResponse]
):
# type: (...) -> None
self.instruction_id = instruction_id
self._response = response
if response is None:
self._condition = threading.Condition()
self._exception = None # type: Optional[Exception]
def is_done(self):
# type: () -> bool
return self._response is not None
def set(self, response):
# type: (beam_fn_api_pb2.InstructionResponse) -> None
with self._condition:
self._response = response
self._condition.notify_all()
def get(self, timeout=None):
# type: (Optional[float]) -> beam_fn_api_pb2.InstructionResponse
if not self._response and not self._exception:
with self._condition:
if not self._response and not self._exception:
self._condition.wait(timeout)
if self._exception:
raise self._exception
else:
assert self._response is not None
return self._response
def abort(self, exception):
# type: (Exception) -> None
with self._condition:
self._exception = exception
self._condition.notify_all()
|
adb_profile_chrome.py
|
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import gzip
import logging
import optparse
import os
import re
import select
import shutil
import sys
import threading
import time
import webbrowser
import zipfile
import zlib
from pylib import android_commands
from pylib import cmd_helper
from pylib import constants
from pylib import pexpect
_TRACE_VIEWER_ROOT = os.path.join(constants.DIR_SOURCE_ROOT,
'third_party', 'trace-viewer')
sys.path.append(_TRACE_VIEWER_ROOT)
from build import trace2html
_DEFAULT_CHROME_CATEGORIES = '_DEFAULT_CHROME_CATEGORIES'
def _GetTraceTimestamp():
return time.strftime('%Y-%m-%d-%H%M%S', time.localtime())
class ChromeTracingController(object):
def __init__(self, adb, package_info, categories, ring_buffer):
self._adb = adb
self._package_info = package_info
self._categories = categories
self._ring_buffer = ring_buffer
self._trace_file = None
self._trace_interval = None
self._trace_start_re = \
re.compile(r'Logging performance trace to file: (.*)')
self._trace_finish_re = \
re.compile(r'Profiler finished[.] Results are in (.*)[.]')
self._adb.StartMonitoringLogcat(clear=False)
def __str__(self):
return 'chrome trace'
def StartTracing(self, interval):
self._trace_interval = interval
self._adb.SyncLogCat()
self._adb.BroadcastIntent(self._package_info.package, 'GPU_PROFILER_START',
'-e categories "%s"' % ','.join(self._categories),
'-e continuous' if self._ring_buffer else '')
# Chrome logs two different messages related to tracing:
#
# 1. "Logging performance trace to file [...]"
# 2. "Profiler finished. Results are in [...]"
#
# The first one is printed when tracing starts and the second one indicates
# that the trace file is ready to be pulled.
try:
self._trace_file = self._adb.WaitForLogMatch(self._trace_start_re,
None,
timeout=5).group(1)
except pexpect.TIMEOUT:
raise RuntimeError('Trace start marker not found. Is the correct version '
'of the browser running?')
def StopTracing(self):
if not self._trace_file:
return
self._adb.BroadcastIntent(self._package_info.package, 'GPU_PROFILER_STOP')
self._adb.WaitForLogMatch(self._trace_finish_re, None, timeout=120)
def PullTrace(self):
# Wait a bit for the browser to finish writing the trace file.
time.sleep(self._trace_interval / 4 + 1)
trace_file = self._trace_file.replace('/storage/emulated/0/', '/sdcard/')
host_file = os.path.join(os.path.curdir, os.path.basename(trace_file))
self._adb.PullFileFromDevice(trace_file, host_file)
return host_file
_SYSTRACE_OPTIONS = [
# Compress the trace before sending it over USB.
'-z',
# Use a large trace buffer to increase the polling interval.
'-b', '16384'
]
# Interval in seconds for sampling systrace data.
_SYSTRACE_INTERVAL = 15
class SystraceController(object):
def __init__(self, adb, categories, ring_buffer):
self._adb = adb
self._categories = categories
self._ring_buffer = ring_buffer
self._done = threading.Event()
self._thread = None
self._trace_data = None
def __str__(self):
return 'systrace'
@staticmethod
def GetCategories(adb):
return adb.RunShellCommand('atrace --list_categories')
def StartTracing(self, interval):
self._thread = threading.Thread(target=self._CollectData)
self._thread.start()
def StopTracing(self):
self._done.set()
def PullTrace(self):
self._thread.join()
self._thread = None
if self._trace_data:
output_name = 'systrace-%s' % _GetTraceTimestamp()
with open(output_name, 'w') as out:
out.write(self._trace_data)
return output_name
def _RunATraceCommand(self, command):
# We use a separate interface to adb because the one from AndroidCommands
# isn't re-entrant.
device = ['-s', self._adb.GetDevice()] if self._adb.GetDevice() else []
cmd = ['adb'] + device + ['shell', 'atrace', '--%s' % command] + \
_SYSTRACE_OPTIONS + self._categories
return cmd_helper.GetCmdOutput(cmd)
def _CollectData(self):
trace_data = []
self._RunATraceCommand('async_start')
try:
while not self._done.is_set():
self._done.wait(_SYSTRACE_INTERVAL)
if not self._ring_buffer or self._done.is_set():
trace_data.append(
self._DecodeTraceData(self._RunATraceCommand('async_dump')))
finally:
trace_data.append(
self._DecodeTraceData(self._RunATraceCommand('async_stop')))
self._trace_data = ''.join([zlib.decompress(d) for d in trace_data])
@staticmethod
def _DecodeTraceData(trace_data):
try:
trace_start = trace_data.index('TRACE:')
except ValueError:
raise RuntimeError('Systrace start marker not found')
trace_data = trace_data[trace_start + 6:]
# Collapse CRLFs that are added by adb shell.
if trace_data.startswith('\r\n'):
trace_data = trace_data.replace('\r\n', '\n')
# Skip the initial newline.
return trace_data[1:]
def _GetSupportedBrowsers():
# Add aliases for backwards compatibility.
supported_browsers = {
'stable': constants.PACKAGE_INFO['chrome_stable'],
'beta': constants.PACKAGE_INFO['chrome_beta'],
'dev': constants.PACKAGE_INFO['chrome_dev'],
'build': constants.PACKAGE_INFO['chrome'],
}
supported_browsers.update(constants.PACKAGE_INFO)
unsupported_browsers = ['content_browsertests', 'gtest', 'legacy_browser']
for browser in unsupported_browsers:
del supported_browsers[browser]
return supported_browsers
def _CompressFile(host_file, output):
with gzip.open(output, 'wb') as out:
with open(host_file, 'rb') as input_file:
out.write(input_file.read())
os.unlink(host_file)
def _ArchiveFiles(host_files, output):
with zipfile.ZipFile(output, 'w', zipfile.ZIP_DEFLATED) as z:
for host_file in host_files:
z.write(host_file)
os.unlink(host_file)
def _PackageTracesAsHtml(trace_files, html_file):
with open(html_file, 'w') as f:
trace2html.WriteHTMLForTracesToFile(trace_files, f)
for trace_file in trace_files:
os.unlink(trace_file)
def _PrintMessage(heading, eol='\n'):
sys.stdout.write('%s%s' % (heading, eol))
sys.stdout.flush()
def _WaitForEnter(timeout):
select.select([sys.stdin], [], [], timeout)
def _StartTracing(controllers, interval):
for controller in controllers:
controller.StartTracing(interval)
def _StopTracing(controllers):
for controller in controllers:
controller.StopTracing()
def _PullTraces(controllers, output, compress, write_json):
_PrintMessage('Downloading...', eol='')
trace_files = []
for controller in controllers:
trace_files.append(controller.PullTrace())
if not write_json:
html_file = os.path.splitext(trace_files[0])[0] + '.html'
_PackageTracesAsHtml(trace_files, html_file)
trace_files = [html_file]
if compress and len(trace_files) == 1:
result = output or trace_files[0] + '.gz'
_CompressFile(trace_files[0], result)
elif len(trace_files) > 1:
result = output or 'chrome-combined-trace-%s.zip' % _GetTraceTimestamp()
_ArchiveFiles(trace_files, result)
elif output:
result = output
shutil.move(trace_files[0], result)
else:
result = trace_files[0]
_PrintMessage('done')
_PrintMessage('Trace written to %s' % os.path.abspath(result))
return result
def _CaptureAndPullTrace(controllers, interval, output, compress, write_json):
trace_type = ' + '.join(map(str, controllers))
try:
_StartTracing(controllers, interval)
if interval:
_PrintMessage('Capturing %d-second %s. Press Enter to stop early...' % \
(interval, trace_type), eol='')
_WaitForEnter(interval)
else:
_PrintMessage('Capturing %s. Press Enter to stop...' % trace_type, eol='')
raw_input()
finally:
_StopTracing(controllers)
if interval:
_PrintMessage('done')
return _PullTraces(controllers, output, compress, write_json)
def _ComputeChromeCategories(options):
categories = []
if options.trace_frame_viewer:
categories.append('disabled-by-default-cc.debug')
if options.trace_ubercompositor:
categories.append('disabled-by-default-cc.debug*')
if options.trace_gpu:
categories.append('disabled-by-default-gpu.debug*')
if options.trace_flow:
categories.append('disabled-by-default-toplevel.flow')
if options.chrome_categories:
categories += options.chrome_categories.split(',')
return categories
def _ComputeSystraceCategories(options):
if not options.systrace_categories:
return []
return options.systrace_categories.split(',')
def main():
parser = optparse.OptionParser(description='Record about://tracing profiles '
'from Android browsers. See http://dev.'
'chromium.org/developers/how-tos/trace-event-'
'profiling-tool for detailed instructions for '
'profiling.')
timed_options = optparse.OptionGroup(parser, 'Timed tracing')
timed_options.add_option('-t', '--time', help='Profile for N seconds and '
'download the resulting trace.', metavar='N',
type='float')
parser.add_option_group(timed_options)
cont_options = optparse.OptionGroup(parser, 'Continuous tracing')
cont_options.add_option('--continuous', help='Profile continuously until '
'stopped.', action='store_true')
cont_options.add_option('--ring-buffer', help='Use the trace buffer as a '
'ring buffer and save its contents when stopping '
'instead of appending events into one long trace.',
action='store_true')
parser.add_option_group(cont_options)
categories = optparse.OptionGroup(parser, 'Trace categories')
categories.add_option('-c', '--categories', help='Select Chrome tracing '
'categories with comma-delimited wildcards, '
'e.g., "*", "cat1*,-cat1a". Omit this option to trace '
'Chrome\'s default categories. Chrome tracing can be '
'disabled with "--categories=\'\'".',
metavar='CHROME_CATEGORIES', dest='chrome_categories',
default=_DEFAULT_CHROME_CATEGORIES)
categories.add_option('-s', '--systrace', help='Capture a systrace with the '
'chosen comma-delimited systrace categories. You can '
'also capture a combined Chrome + systrace by enabling '
'both types of categories. Use "list" to see the '
'available categories. Systrace is disabled by '
'default.', metavar='SYS_CATEGORIES',
dest='systrace_categories', default='')
categories.add_option('--trace-cc',
help='Deprecated, use --trace-frame-viewer.',
action='store_true')
categories.add_option('--trace-frame-viewer',
help='Enable enough trace categories for '
'compositor frame viewing.', action='store_true')
categories.add_option('--trace-ubercompositor',
help='Enable enough trace categories for '
'ubercompositor frame data.', action='store_true')
categories.add_option('--trace-gpu', help='Enable extra trace categories for '
'GPU data.', action='store_true')
categories.add_option('--trace-flow', help='Enable extra trace categories '
'for IPC message flows.', action='store_true')
parser.add_option_group(categories)
output_options = optparse.OptionGroup(parser, 'Output options')
output_options.add_option('-o', '--output', help='Save trace output to file.')
output_options.add_option('--json', help='Save trace as raw JSON instead of '
'HTML.', action='store_true')
output_options.add_option('--view', help='Open resulting trace file in a '
'browser.', action='store_true')
parser.add_option_group(output_options)
browsers = sorted(_GetSupportedBrowsers().keys())
parser.add_option('-b', '--browser', help='Select among installed browsers. '
'One of ' + ', '.join(browsers) + ', "stable" is used by '
'default.', type='choice', choices=browsers,
default='stable')
parser.add_option('-v', '--verbose', help='Verbose logging.',
action='store_true')
parser.add_option('-z', '--compress', help='Compress the resulting trace '
'with gzip. ', action='store_true')
options, args = parser.parse_args()
if options.trace_cc:
parser.parse_error("""--trace-cc is deprecated.
For basic jank busting uses, use --trace-frame-viewer
For detailed study of ubercompositor, pass --trace-ubercompositor.
When in doubt, just try out --trace-frame-viewer.
""")
if options.verbose:
logging.getLogger().setLevel(logging.DEBUG)
adb = android_commands.AndroidCommands()
if options.systrace_categories in ['list', 'help']:
_PrintMessage('\n'.join(SystraceController.GetCategories(adb)))
return 0
if not options.time and not options.continuous:
_PrintMessage('Time interval or continuous tracing should be specified.')
return 1
chrome_categories = _ComputeChromeCategories(options)
systrace_categories = _ComputeSystraceCategories(options)
package_info = _GetSupportedBrowsers()[options.browser]
if chrome_categories and 'webview' in systrace_categories:
logging.warning('Using the "webview" category in systrace together with '
'Chrome tracing results in duplicate trace events.')
controllers = []
if chrome_categories:
controllers.append(ChromeTracingController(adb,
package_info,
chrome_categories,
options.ring_buffer))
if systrace_categories:
controllers.append(SystraceController(adb,
systrace_categories,
options.ring_buffer))
if not controllers:
_PrintMessage('No trace categories enabled.')
return 1
result = _CaptureAndPullTrace(controllers,
options.time if not options.continuous else 0,
options.output,
options.compress,
options.json)
if options.view:
webbrowser.open(result)
if __name__ == '__main__':
sys.exit(main())
|
main.py
|
import threading
import time
import bs4
import requests
import StellarPlayer
import re
import urllib.parse
dytt_url = 'http://www.dptkbs.com/'
def concatUrl(url1, url2):
splits = re.split(r'/+',url1)
url = splits[0] + '//'
if url2.startswith('/'):
url = url + splits[1] + url2
else:
url = url + '/'.join(splits[1:-1]) + '/' + url2
return url
#爬取影视页面中的播放链接地址
def parse_dytt_movie(url):
res = requests.get(url,verify=False)
if res.status_code == 200:
bs = bs4.BeautifulSoup(res.content.decode('gb2312','ignore'),'html.parser')
selector = bs.select('#downlist > table > tbody > tr > td > a')
print(selector)
for item in selector:
return item.get('href')
else:
print(res.text)
#爬取某个分类页面的所有影视页面链接
def parse_dytt_page_movies(page_url):
urls = []
res = requests.get(page_url,verify=False)
if res.status_code == 200:
bs = bs4.BeautifulSoup(res.content.decode('gb2312','ignore'),'html.parser')
selector = bs.select('#header > div > div.bd2 > div.bd3 > div.bd3r > div.co_area2 > div.co_content8 > ul')
for ul in selector:
for item in ul.select('table a'):
url = concatUrl(page_url,item.get('href'))
title = ''
#普通页面的情况
if item.string:
if not re.match(r'\[(\w+)\]', item.string):
title = item.string
#搜索页面情况
else:
for nav_str in item.children:
if nav_str.string:
title = title + nav_str.string
if title:
urls.append({'title':title,'url':url})
else:
print(res.text)
return urls
#爬取分类对应的所有页面数
def parse_dytt_page_num(pageUrl):
print(pageUrl)
pages = []
res = requests.get(pageUrl,verify=False)
if res.status_code == 200:
bs = bs4.BeautifulSoup(res.content.decode('gb2312','ignore'),'html.parser')
selector = bs.select('#header > div > div.bd2 > div.bd3 > div.bd3r > div.co_area2 > div.co_content8 > div select')
for item in selector:
for child in item.children:
if type(child) == bs4.element.Tag:
page = child.get('value')
if page:
pages.append(page)
else:
print(res.text)
return pages
#爬取所有分类
def parse_dytt_category():
urls = []
search_urls = []
blacks = ['经典影片','旧版游戏','游戏下载','收藏本站','APP下载','华语连续剧','设为主页','留言板']
res = requests.get(dytt_url,verify=False)
if res.status_code == 200:
bs = bs4.BeautifulSoup(res.content.decode('gb2312','ignore'), 'html.parser')
selector = bs.select('#menu > div > ul > li')
for item in selector:
for child in item.children:
if type(child) == bs4.element.Tag:
url = child.get('href')
if url:
if not re.match(r'http',url):
url = concatUrl(dytt_url, url)
if not child.string in blacks:
urls.append({'title':child.string,'url':url})
#获取搜索页面链接
print(urls)
search_urls.append({'url':concatUrl(dytt_url,'/e/search/index.php')})
return urls, search_urls
def search_66ys_page_movies(search_url, search_word):
urls = []
res = requests.post(search_url,data={'show':'title,smalltext','keyboard':search_word.encode('gb2312')},verify=False)
if res.status_code == 200:
bs = bs4.BeautifulSoup(res.content.decode('gb2312','ignore'),'html.parser')
#header > div > div.bd2 > div.bd3 > div.bd3r > div.co_area2 > div.co_content8 > ul > table:nth-child(1) > tbody > tr:nth-child(2) > td:nth-child(2) > b > a:nth-child(2)
ul = bs.select('#header ul table a')
for a in ul:
print(a)
urls.append({'url':concatUrl(dytt_url, a.get('href')),'title':a.get('title')})
return urls
class dyttplugin(StellarPlayer.IStellarPlayerPlugin):
def __init__(self,player:StellarPlayer.IStellarPlayer):
super().__init__(player)
self.categories = []
self.search_urls = []
self.pages = []
self.movies = []
self.pageIndex = 0
self.curCategory = ''
self.cur_page = '第' + str(self.pageIndex + 1) + '页'
self.num_page = ''
self.search_word = ''
self.search_movies = []
self.detail_urls = []
self.gbthread = threading.Thread(target=self._bgThread)
def _bgThread(self):
while len(self.categories) == 0 and not self.isExit:
self.parsePage()
time.sleep(0.001)
print(f'dytt bg thread:{self.gbthread.native_id} exit')
# 刷新界面
def update():
if self.player.isModalExist('main'):
self.updateLayout('main',self.makeLayout())
self.loading(True)
if hasattr(self.player,'queueTask'):
self.player.queueTask(update)
else:
update()
def stop(self):
if self.gbthread.is_alive():
print(f'dytt bg thread:{self.gbthread.native_id} is still running')
return super().stop()
def start(self):
self.gbthread.start()
return super().start()
def parsePage(self):
#获取分类导航
if len(self.categories) == 0:
self.categories, self.search_urls = parse_dytt_category()
if len(self.categories) > 0:
if not self.curCategory:
self.curCategory = self.categories[0]['url']
#获取该分类的所有页面数
if len(self.pages) == 0:
self.pages = parse_dytt_page_num(self.curCategory)
self.num_page = '共' + str(len(self.pages)) + '页'
if len(self.pages) > 0:
#获取分页视频资源
if len(self.movies) == 0:
url = concatUrl(self.curCategory, self.pages[self.pageIndex])
self.movies = parse_dytt_page_movies(url)
def makeLayout(self):
nav_labels = []
for cat in self.categories:
nav_labels.append({'type':'link','name':cat['title'],'@click':'onCategoryClick'})
list_layout = [
{'type':'label','name':'title'},
{'type':'link','name':'详情','width':30,'@click':'onDetailClick'},
{'type':'space','width':10},
{'type':'link','name':'播放','width':30,'@click':'onPlayClick'},
]
if hasattr(self.player,'download'):
list_layout.append({'type':'space','width':10})
list_layout.append({'type':'link','name':'下载','width':30,'@click':'onDownloadClick'})
controls = [
{'group':nav_labels,'height':30},
{'type':'space','height':10},
{'group':
[
{'type':'edit','name':'search_edit','label':'搜索'},
{'type':'button','name':'搜电影','@click':'onSearch'}
]
,'height':30
},
{'type':'space','height':10},
{'type':'list','name':'list','itemlayout':{'group':list_layout},'value':self.movies,'separator':True,'itemheight':40},
{'group':
[
{'type':'space'},
{'group':
[
{'type':'label','name':'cur_page',':value':'cur_page'},
{'type':'link','name':'上一页','@click':'onClickFormerPage'},
{'type':'link','name':'下一页','@click':'onClickNextPage'},
{'type':'link','name':'首页','@click':'onClickFirstPage'},
{'type':'link','name':'末页','@click':'onClickLastPage'},
{'type':'label','name':'num_page',':value':'num_page'},
]
,'width':0.45
,'hAlign':'center'
},
{'type':'space'}
]
,'height':30
},
{'type':'space','height':5}
]
return controls
def show(self):
controls = self.makeLayout()
self.doModal('main',800,600,'',controls)
def onModalCreated(self, pageId):
print(f'dytt onModalCreated {pageId=}')
if pageId == 'main':
if len(self.movies) == 0:
self.loading()
elif pageId != 'search':
self.loadingPage(pageId)
def onSearchInput(self,*args):
print(f'{self.search_word}')
def onSearch(self,*args):
self.search_word = self.player.getControlValue('main','search_edit')
if len(self.search_urls) > 0:
url = self.search_urls[0]['url']
print(f'url={url}')
self.search_movies = search_66ys_page_movies(url, self.search_word)
if len(self.search_movies) > 0:
list_layout = {'group':[{'type':'label','name':'title','width':0.9},{'type':'link','name':'播放','width':30,'@click':'onPlayClick'},{'type':'space'}]}
controls = {'type':'list','name':'list','itemlayout':list_layout,'value':self.search_movies,'separator':True,'itemheight':40}
if not self.player.isModalExist('search'):
self.doModal('search',500,400,self.search_word,controls)
else:
self.player.updateControlValue('search','list',self.search_movies)
else:
self.player.toast('main',f'没有找到 {self.search_word} 相关的资源')
def onCategoryClick(self,pageId,control,*args):
for cat in self.categories:
if cat['title'] == control:
if cat['url'] != self.curCategory:
self.curCategory = cat['url']
self.pageIndex = 0
#获取新分类的页面数
self.loading()
self.pages = parse_dytt_page_num(self.curCategory)
self.num_page = num_page ='共' + str(len(self.pages)) + '页'
self.player.updateControlValue('main','num_page',num_page)
self.selectPage()
self.loading(True)
break
def onPlayClick(self, pageId, control, item, *args):
if pageId == 'main':
playUrl = parse_dytt_movie(self.movies[item]['url'])
elif pageId == 'search':
playUrl = parse_dytt_movie(self.search_movies[item]['url'])
if playUrl:
self.player.play(playUrl)
def onDownloadClick(self, pageId, control, item, *args):
if pageId == 'main':
playUrl = parse_dytt_movie(self.movies[item]['url'])
elif pageId == 'search':
playUrl = parse_dytt_movie(self.search_movies[item]['url'])
if playUrl:
self.player.download(playUrl)
def onDetailClick(self, pageId, control, item, *args):
url = self.movies[item]['url']
title = self.movies[item]['title']
print(url)
def parse_dytt_detail():
res = requests.get(url,verify=False)
if res.status_code == 200:
controls = []
bs = bs4.BeautifulSoup(res.content.decode('gb2312','ignore'),'html.parser')
#解析图片
selector = bs.select('#Zoom img')
for item in selector:
controls.append({'type':'image','value':item.get('src'),'width':200,'height':300})
#解析简介
skip = False
selector = bs.select('#Zoom td')
for item in selector:
for br in item.children:
if not br.string:
continue
href = None
if type(br) == bs4.element.Tag:
href = br.get('href')
if not re.match(r'主演|导演|演员|编剧',re.sub(r'\W+','',br.string)) or href:
if re.match(r'标签|简介',re.sub(r'\W+','',br.string)):
skip = False
if not skip or href:
if type(br) == bs4.element.NavigableString:
controls.append({'type':'label','value':br.string,'height':20})
elif href:
controls.append({'type':'link','name':br.string,'height':30,'@click':'on_detail_page_play'})
#保存页面对应的详情播放地址
self.detail_urls.append({'title':title,'url':href})
else:
skip = True
def update_detail_ui():
self.loadingPage(title, True)
self.updateLayout(title, controls)
if hasattr(self.player,'queueTask'):
self.player.queueTask(update_detail_ui)
else:
update_detail_ui()
t = threading.Thread(target=parse_dytt_detail)
t.start()
self.doModal(title, 600, 800, title, [])
#删除详情播放地址
for item in self.detail_urls:
if item['title'] == url:
self.detail_urls.remove(item)
break
def on_detail_page_play(self,pageId, *args):
for item in self.detail_urls:
if item['title'] == pageId:
self.player.play(item['url'])
break
def selectPage(self):
if len(self.pages) > self.pageIndex:
self.movies.clear()
self.player.updateControlValue('main','list',self.movies)
url = concatUrl(self.curCategory, self.pages[self.pageIndex])
self.movies = parse_dytt_page_movies(url)
self.player.updateControlValue('main','list',self.movies)
self.cur_page = cur_page = '第' + str(self.pageIndex + 1) + '页'
self.player.updateControlValue('main','cur_page',cur_page)
def onClickFormerPage(self, *args):
if self.pageIndex > 0:
self.pageIndex = self.pageIndex - 1
self.loading()
self.selectPage()
self.loading(True)
def onClickNextPage(self, *args):
num_page = len(self.pages)
if self.pageIndex + 1 < num_page:
self.pageIndex = self.pageIndex + 1
self.loading()
self.selectPage()
self.loading(True)
def onClickFirstPage(self, *args):
if self.pageIndex != 0:
self.pageIndex = 0
self.loading()
self.selectPage()
self.loading(True)
def onClickLastPage(self, *args):
if self.pageIndex != len(self.pages) - 1:
self.pageIndex = len(self.pages) - 1
self.loading()
self.selectPage()
self.loading(True)
def loading(self, stopLoading = False):
if hasattr(self.player,'loadingAnimation'):
self.player.loadingAnimation('main', stop=stopLoading)
def loadingPage(self, page, stopLoading = False):
if hasattr(self.player,'loadingAnimation'):
self.player.loadingAnimation(page, stop=stopLoading)
def newPlugin(player:StellarPlayer.IStellarPlayer,*arg):
plugin = dyttplugin(player)
return plugin
def destroyPlugin(plugin:StellarPlayer.IStellarPlayerPlugin):
plugin.stop()
|
SentenceTransformer.py
|
import json
import logging
import os
import shutil
from collections import OrderedDict
from typing import List, Dict, Tuple, Iterable, Type, Union, Callable
from zipfile import ZipFile
import requests
import numpy as np
from numpy import ndarray
import transformers
import torch
from torch import nn, Tensor, device
from torch.optim import Optimizer
from torch.utils.data import DataLoader
import torch.multiprocessing as mp
from tqdm.autonotebook import tqdm, trange
import math
import queue
from . import __DOWNLOAD_SERVER__
from .evaluation import SentenceEvaluator
from .util import import_from_string, batch_to_device, http_get
from .datasets.EncodeDataset import EncodeDataset
from .models import Transformer, Pooling
from . import __version__
class SentenceTransformer(nn.Sequential):
"""
Loads or create a SentenceTransformer model, that can be used to map sentences / text to embeddings.
:param model_name_or_path: If it is a filepath on disc, it loads the model from that path. If it is not a path, it first tries to download a pre-trained SentenceTransformer model. If that fails, tries to construct a model from Huggingface models repository with that name.
:param modules: This parameter can be used to create custom SentenceTransformer models from scratch.
:param device: Device (like 'cuda' / 'cpu') that should be used for computation. If None, checks if a GPU can be used.
"""
def __init__(self, model_name_or_path: str = None, modules: Iterable[nn.Module] = None, device: str = None):
if model_name_or_path is not None and model_name_or_path != "":
logging.info("Load pretrained SentenceTransformer: {}".format(model_name_or_path))
model_path = model_name_or_path
if not os.path.isdir(model_path) and not model_path.startswith('http://') and not model_path.startswith('https://'):
logging.info("Did not find folder {}. Assume to download model from server.".format(model_path))
model_path = __DOWNLOAD_SERVER__ + model_path + '.zip'
if model_path.startswith('http://') or model_path.startswith('https://'):
model_url = model_path
folder_name = model_url.replace("https://", "").replace("http://", "").replace("/", "_")[:250].rstrip('.zip')
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(
os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))
default_cache_path = os.path.join(torch_cache_home, 'sentence_transformers')
model_path = os.path.join(default_cache_path, folder_name)
os.makedirs(model_path, exist_ok=True)
if not os.listdir(model_path):
if model_url[-1] == "/":
model_url = model_url[:-1]
logging.info("Downloading sentence transformer model from {} and saving it at {}".format(model_url, model_path))
try:
zip_save_path = os.path.join(model_path, 'model.zip')
http_get(model_url, zip_save_path)
with ZipFile(zip_save_path, 'r') as zip:
zip.extractall(model_path)
os.remove(zip_save_path)
except requests.exceptions.HTTPError as e:
shutil.rmtree(model_path)
if e.response.status_code == 404:
logging.warning('SentenceTransformer-Model {} not found. Try to create it from scratch'.format(model_url))
logging.warning('Try to create Transformer Model {} with mean pooling'.format(model_name_or_path))
model_path = None
transformer_model = Transformer(model_name_or_path)
pooling_model = Pooling(transformer_model.get_word_embedding_dimension())
modules = [transformer_model, pooling_model]
else:
raise e
except Exception as e:
shutil.rmtree(model_path)
raise e
#### Load from disk
if model_path is not None:
logging.info("Load SentenceTransformer from folder: {}".format(model_path))
if os.path.exists(os.path.join(model_path, 'config.json')):
with open(os.path.join(model_path, 'config.json')) as fIn:
config = json.load(fIn)
if config['__version__'] > __version__:
logging.warning("You try to use a model that was created with version {}, however, your version is {}. This might cause unexpected behavior or errors. In that case, try to update to the latest version.\n\n\n".format(config['__version__'], __version__))
with open(os.path.join(model_path, 'modules.json')) as fIn:
contained_modules = json.load(fIn)
modules = OrderedDict()
for module_config in contained_modules:
module_class = import_from_string(module_config['type'])
module = module_class.load(os.path.join(model_path, module_config['path']))
modules[module_config['name']] = module
if modules is not None and not isinstance(modules, OrderedDict):
modules = OrderedDict([(str(idx), module) for idx, module in enumerate(modules)])
super().__init__(modules)
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
logging.info("Use pytorch device: {}".format(device))
self._target_device = torch.device(device)
def encode(self, sentences: Union[str, List[str], List[int]],
batch_size: int = 32,
show_progress_bar: bool = None,
output_value: str = 'sentence_embedding',
convert_to_numpy: bool = True,
convert_to_tensor: bool = False,
is_pretokenized: bool = False,
device: str = None,
num_workers: int = 0) -> Union[List[Tensor], ndarray, Tensor]:
"""
Computes sentence embeddings
:param sentences: the sentences to embed
:param batch_size: the batch size used for the computation
:param show_progress_bar: Output a progress bar when encode sentences
:param output_value: Default sentence_embedding, to get sentence embeddings. Can be set to token_embeddings to get wordpiece token embeddings.
:param convert_to_numpy: If true, the output is a list of numpy vectors. Else, it is a list of pytorch tensors.
:param convert_to_tensor: If true, you get one large tensor as return. Overwrites any setting from convert_to_numpy
:param is_pretokenized: If is_pretokenized=True, sentences must be a list of integers, containing the tokenized sentences with each token convert to the respective int.
:param device: Which torch.device to use for the computation
:param num_workers: Number of background-workers to tokenize data. Set to positive number to increase tokenization speed
:return:
By default, a list of tensors is returned. If convert_to_tensor, a stacked tensor is returned. If convert_to_numpy, a numpy matrix is returned.
"""
self.eval()
if show_progress_bar is None:
show_progress_bar = (logging.getLogger().getEffectiveLevel()==logging.INFO or logging.getLogger().getEffectiveLevel()==logging.DEBUG)
input_was_string = False
if isinstance(sentences, str): #Cast an individual sentence to a list with length 1
sentences = [sentences]
input_was_string = True
if device is None:
device = self._target_device
self.to(device)
all_embeddings = []
length_sorted_idx = np.argsort([self._text_length(sen) for sen in sentences])
sentences_sorted = [sentences[idx] for idx in length_sorted_idx]
inp_dataset = EncodeDataset(sentences_sorted, model=self, is_tokenized=is_pretokenized)
inp_dataloader = DataLoader(inp_dataset, batch_size=batch_size, collate_fn=self.smart_batching_collate_text_only, num_workers=num_workers, shuffle=False)
iterator = inp_dataloader
if show_progress_bar:
iterator = tqdm(inp_dataloader, desc="Batches")
for features in iterator:
for feature_name in features:
features[feature_name] = features[feature_name].to(device)
with torch.no_grad():
out_features = self.forward(features)
embeddings = out_features[output_value]
if output_value == 'token_embeddings':
#Set token embeddings to 0 for padding tokens
input_mask = out_features['attention_mask']
input_mask_expanded = input_mask.unsqueeze(-1).expand(embeddings.size()).float()
embeddings = embeddings * input_mask_expanded
all_embeddings.extend(embeddings)
all_embeddings = [all_embeddings[idx] for idx in np.argsort(length_sorted_idx)]
if convert_to_tensor:
all_embeddings = torch.stack(all_embeddings)
elif convert_to_numpy:
all_embeddings = np.asarray([emb.cpu().detach().numpy() for emb in all_embeddings])
if input_was_string:
all_embeddings = all_embeddings[0]
return all_embeddings
def start_multi_process_pool(self, target_devices: List[str] = None, encode_batch_size: int = 32):
"""
Starts multi process to process the encoding with several, independent processes.
This method is recommended if you want to encode on multiple GPUs. It is advised
to start only one process per GPU. This method works together with encode_multi_process
:param target_devices: PyTorch target devices, e.g. cuda:0, cuda:1... If None, all available CUDA devices will be used
:param encode_batch_size: Batch size for each process when calling encode
:return: Returns a dict with the target processes, an input queue and and output queue.
"""
if target_devices is None:
if torch.cuda.is_available():
target_devices = ['cuda:{}'.format(i) for i in range(torch.cuda.device_count())]
else:
logging.info("CUDA is not available. Start 4 CPU worker")
target_devices = ['cpu']*4
logging.info("Start multi-process pool on devices: {}".format(', '.join(map(str, target_devices))))
ctx = mp.get_context('spawn')
input_queue = ctx.Queue()
output_queue = ctx.Queue()
processes = []
for cuda_id in target_devices:
p = ctx.Process(target=SentenceTransformer._encode_multi_process_worker, args=(cuda_id, self, input_queue, output_queue, encode_batch_size), daemon=True)
p.start()
processes.append(p)
return {'input': input_queue, 'output': output_queue, 'processes': processes}
@staticmethod
def stop_multi_process_pool(pool):
"""
Stops all processes started with start_multi_process_pool
"""
for p in pool['processes']:
p.terminate()
for p in pool['processes']:
p.join()
p.close()
pool['input'].close()
pool['output'].close()
def encode_multi_process(self, sentences: List[str], pool: Dict[str, object], is_pretokenized: bool = False, chunk_size=None):
"""
This method allows to run encode() on multiple GPUs. The sentences are chunked into smaller packages
and sent to individual processes, which encode these on the different GPUs. This method is only suitable
for encoding large sets of sentences
:param sentences: List of sentences
:param pool: A pool of workers started with SentenceTransformer.start_multi_process_pool
:param is_pretokenized: If true, no tokenization will be applied. It is expected that the input sentences are list of ints.
:param chunk_size: Sentences are chunked and sent to the individual processes. If none, it determine a sensible size.
:return: Numpy matrix with all embeddings
"""
if chunk_size is None:
chunk_size = min(math.ceil(len(sentences) / len(pool["processes"]) / 10), 5000)
logging.info("Chunk data into packages of size {}".format(chunk_size))
input_queue = pool['input']
last_chunk_id = 0
chunk = []
for sentence in sentences:
chunk.append(sentence)
if len(chunk) >= chunk_size:
input_queue.put([last_chunk_id, is_pretokenized, chunk])
last_chunk_id += 1
chunk = []
if len(chunk) > 0:
input_queue.put([last_chunk_id, is_pretokenized, chunk])
last_chunk_id += 1
output_queue = pool['output']
results_list = sorted([output_queue.get() for _ in range(last_chunk_id)], key=lambda x: x[0])
embeddings = np.concatenate([result[1] for result in results_list])
return embeddings
@staticmethod
def _encode_multi_process_worker(target_device: str, model, input_queue, results_queue, encode_batch_size):
"""
Internal working process to encode sentences in multi-process setup
"""
while True:
try:
id, is_pretokenized, sentences = input_queue.get()
embeddings = model.encode(sentences, device=target_device, is_pretokenized=is_pretokenized, show_progress_bar=False, convert_to_numpy=True, batch_size=encode_batch_size)
results_queue.put([id, embeddings])
except queue.Empty:
break
def get_max_seq_length(self):
"""
Returns the maximal sequence length for input the model accepts. Longer inputs will be truncated
"""
if hasattr(self._first_module(), 'max_seq_length'):
return self._first_module().max_seq_length
return None
def tokenize(self, text: str):
"""
Tokenizes the text
"""
return self._first_module().tokenize(text)
def get_sentence_features(self, *features):
return self._first_module().get_sentence_features(*features)
def get_sentence_embedding_dimension(self):
return self._last_module().get_sentence_embedding_dimension()
def _first_module(self):
"""Returns the first module of this sequential embedder"""
return self._modules[next(iter(self._modules))]
def _last_module(self):
"""Returns the last module of this sequential embedder"""
return self._modules[next(reversed(self._modules))]
def save(self, path):
"""
Saves all elements for this seq. sentence embedder into different sub-folders
"""
if path is None:
return
logging.info("Save model to {}".format(path))
contained_modules = []
for idx, name in enumerate(self._modules):
module = self._modules[name]
model_path = os.path.join(path, str(idx)+"_"+type(module).__name__)
os.makedirs(model_path, exist_ok=True)
module.save(model_path)
contained_modules.append({'idx': idx, 'name': name, 'path': os.path.basename(model_path), 'type': type(module).__module__})
with open(os.path.join(path, 'modules.json'), 'w') as fOut:
json.dump(contained_modules, fOut, indent=2)
with open(os.path.join(path, 'config.json'), 'w') as fOut:
json.dump({'__version__': __version__}, fOut, indent=2)
def smart_batching_collate(self, batch):
"""
Transforms a batch from a SmartBatchingDataset to a batch of tensors for the model
Here, batch is a list of tuples: [(tokens, label), ...]
:param batch:
a batch from a SmartBatchingDataset
:return:
a batch of tensors for the model
"""
num_texts = len(batch[0][0])
labels = []
paired_texts = [[] for _ in range(num_texts)]
max_seq_len = [0] * num_texts
for tokens, label in batch:
labels.append(label)
for i in range(num_texts):
paired_texts[i].append(tokens[i])
max_seq_len[i] = max(max_seq_len[i], self._text_length(tokens[i]))
features = []
for idx in range(num_texts):
max_len = max_seq_len[idx]
feature_lists = {}
for text in paired_texts[idx]:
sentence_features = self.get_sentence_features(text, max_len)
for feature_name in sentence_features:
if feature_name not in feature_lists:
feature_lists[feature_name] = []
feature_lists[feature_name].append(sentence_features[feature_name])
for feature_name in feature_lists:
feature_lists[feature_name] = torch.cat(feature_lists[feature_name])
features.append(feature_lists)
return {'features': features, 'labels': torch.stack(labels)}
def smart_batching_collate_text_only(self, batch):
"""
Transforms a batch from a SmartBatchingDataset to a batch of tensors for the model.
Here, batch is a list of texts
:param batch:
a batch from a SmartBatchingDataset
:return:
a batch of tensors for the model
"""
max_seq_len = max([self._text_length(text) for text in batch])
feature_lists = {}
for text in batch:
sentence_features = self.get_sentence_features(text, max_seq_len)
for feature_name in sentence_features:
if feature_name not in feature_lists:
feature_lists[feature_name] = []
feature_lists[feature_name].append(sentence_features[feature_name])
for feature_name in feature_lists:
feature_lists[feature_name] = torch.cat(feature_lists[feature_name])
return feature_lists
def _text_length(self, text: Union[List[int], List[List[int]]]):
"""
Help function to get the length for the input text. Text can be either
a list of ints (which means a single text as input), or a tuple of list of ints
(representing several text inputs to the model).
"""
if len(text) == 0 or isinstance(text[0], int):
return len(text)
else:
return sum([len(t) for t in text])
def fit(self,
train_objectives: Iterable[Tuple[DataLoader, nn.Module]],
evaluator: SentenceEvaluator,
epochs: int = 1,
steps_per_epoch = None,
scheduler: str = 'WarmupLinear',
warmup_steps: int = 10000,
optimizer_class: Type[Optimizer] = transformers.AdamW,
optimizer_params : Dict[str, object]= {'lr': 2e-5, 'eps': 1e-6, 'correct_bias': False},
weight_decay: float = 0.01,
evaluation_steps: int = 0,
output_path: str = None,
output_path_ignore_not_empty: bool = False,
save_best_model: bool = True,
max_grad_norm: float = 1,
use_amp: bool = False,
callback: Callable[[float, int, int], None] = None,
):
"""
Train the model with the given training objective
Each training objective is sampled in turn for one batch.
We sample only as many batches from each objective as there are in the smallest one
to make sure of equal training with each dataset.
:param train_objectives: Tuples of (DataLoader, LossFunction). Pass more than one for multi-task learning
:param evaluator: An evaluator (sentence_transformers.evaluation) evaluates the model performance during training on held-out dev data. It is used to determine the best model that is saved to disc.
:param epochs: Number of epochs for training
:param steps_per_epoch: Number of training steps per epoch. If set to None (default), one epoch is equal the DataLoader size from train_objectives.
:param scheduler: Learning rate scheduler. Available schedulers: constantlr, warmupconstant, warmuplinear, warmupcosine, warmupcosinewithhardrestarts
:param warmup_steps: Behavior depends on the scheduler. For WarmupLinear (default), the learning rate is increased from o up to the maximal learning rate. After these many training steps, the learning rate is decreased linearly back to zero.
:param optimizer_class: Optimizer
:param optimizer_params: Optimizer parameters
:param weight_decay: Weight decay for model parameters
:param evaluation_steps: If > 0, evaluate the model using evaluator after each number of training steps
:param output_path: Storage path for the model and evaluation files
:param output_path_ignore_not_empty: By default, training will stop if output_path is not empty. If set to true, this error will be ignored and training proceeds.
:param save_best_model: If true, the best model (according to evaluator) is stored at output_path
:param max_grad_norm: Used for gradient normalization.
:param use_amp: Use Automatic Mixed Precision (AMP). Only for Pytorch >= 1.6.0
:param callback: Callback function that is invoked after each evaluation.
It must accept the following three parameters in this order:
`score`, `epoch`, `steps`
"""
if use_amp:
from torch.cuda.amp import autocast
scaler = torch.cuda.amp.GradScaler()
self.to(self._target_device)
if output_path is not None:
os.makedirs(output_path, exist_ok=True)
if not output_path_ignore_not_empty and len(os.listdir(output_path)) > 0:
raise ValueError("Output directory ({}) already exists and is not empty.".format(
output_path))
dataloaders = [dataloader for dataloader, _ in train_objectives]
# Use smart batching
for dataloader in dataloaders:
dataloader.collate_fn = self.smart_batching_collate
loss_models = [loss for _, loss in train_objectives]
device = self._target_device
for loss_model in loss_models:
loss_model.to(device)
self.best_score = -9999999
if steps_per_epoch is None or steps_per_epoch == 0:
steps_per_epoch = min([len(dataloader) for dataloader in dataloaders])
num_train_steps = int(steps_per_epoch * epochs)
# Prepare optimizers
optimizers = []
schedulers = []
for loss_model in loss_models:
param_optimizer = list(loss_model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': weight_decay},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = optimizer_class(optimizer_grouped_parameters, **optimizer_params)
scheduler_obj = self._get_scheduler(optimizer, scheduler=scheduler, warmup_steps=warmup_steps, t_total=num_train_steps)
optimizers.append(optimizer)
schedulers.append(scheduler_obj)
global_step = 0
data_iterators = [iter(dataloader) for dataloader in dataloaders]
num_train_objectives = len(train_objectives)
skip_scheduler = False
for epoch in trange(epochs, desc="Epoch"):
training_steps = 0
for loss_model in loss_models:
loss_model.zero_grad()
loss_model.train()
for _ in trange(steps_per_epoch, desc="Iteration", smoothing=0.05):
for train_idx in range(num_train_objectives):
loss_model = loss_models[train_idx]
optimizer = optimizers[train_idx]
scheduler = schedulers[train_idx]
data_iterator = data_iterators[train_idx]
try:
data = next(data_iterator)
except StopIteration:
#logging.info("Restart data_iterator")
data_iterator = iter(dataloaders[train_idx])
data_iterators[train_idx] = data_iterator
data = next(data_iterator)
features, labels = batch_to_device(data, self._target_device)
if use_amp:
with autocast():
loss_value = loss_model(features, labels)
scale_before_step = scaler.get_scale()
scaler.scale(loss_value).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)
scaler.step(optimizer)
scaler.update()
skip_scheduler = scaler.get_scale() != scale_before_step
else:
loss_value = loss_model(features, labels)
loss_value.backward()
torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)
optimizer.step()
optimizer.zero_grad()
if not skip_scheduler:
scheduler.step()
training_steps += 1
global_step += 1
if evaluation_steps > 0 and training_steps % evaluation_steps == 0:
self._eval_during_training(evaluator, output_path, save_best_model, epoch,
training_steps, callback)
for loss_model in loss_models:
loss_model.zero_grad()
loss_model.train()
self._eval_during_training(evaluator, output_path, save_best_model, epoch,
-1, callback)
def evaluate(self, evaluator: SentenceEvaluator, output_path: str = None):
"""
Evaluate the model
:param evaluator:
the evaluator
:param output_path:
the evaluator can write the results to this path
"""
if output_path is not None:
os.makedirs(output_path, exist_ok=True)
return evaluator(self, output_path)
def _eval_during_training(self, evaluator, output_path, save_best_model, epoch, steps, callback):
"""Runs evaluation during the training"""
if evaluator is not None:
score = evaluator(self, output_path=output_path, epoch=epoch, steps=steps)
if callback is not None:
callback(score, epoch, steps)
if score > self.best_score:
self.best_score = score
if save_best_model:
self.save(output_path)
def _get_scheduler(self, optimizer, scheduler: str, warmup_steps: int, t_total: int):
"""
Returns the correct learning rate scheduler. Available scheduler: constantlr, warmupconstant, warmuplinear, warmupcosine, warmupcosinewithhardrestarts
"""
scheduler = scheduler.lower()
if scheduler == 'constantlr':
return transformers.get_constant_schedule(optimizer)
elif scheduler == 'warmupconstant':
return transformers.get_constant_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps)
elif scheduler == 'warmuplinear':
return transformers.get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
elif scheduler == 'warmupcosine':
return transformers.get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
elif scheduler == 'warmupcosinewithhardrestarts':
return transformers.get_cosine_with_hard_restarts_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
else:
raise ValueError("Unknown scheduler {}".format(scheduler))
@property
def device(self) -> device:
"""
Get torch.device from module, assuming that the whole module has one device.
"""
try:
return next(self.parameters()).device
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].device
@property
def tokenizer(self):
"""
Property to get the tokenizer that is used by this model
"""
return self._first_module().tokenizer
@tokenizer.setter
def tokenizer(self, value):
"""
Property to set the tokenizer that is should used by this model
"""
self._first_module().tokenizer = value
@property
def max_seq_length(self):
"""
Property to get the maximal input sequence length for the model. Longer inputs will be truncated.
"""
return self._first_module().max_seq_length
@max_seq_length.setter
def max_seq_length(self, value):
"""
Property to set the maximal input sequence length for the model. Longer inputs will be truncated.
"""
self._first_module().max_seq_length = value
|
http_server_node.py
|
#!/usr/bin/env python
import http.server
import socketserver
import threading
import rospy
import robot_resource.robot_resource as rs
from sensor_msgs.msg import Image
from sensor_msgs.msg import NavSatFix
class RequestHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
if self.path == "/robotInfo":
rs.GET_robot_info(self)
elif self.path == "/robotStatus":
rs.GET_robot_status(self)
elif self.path == "/leftCameraFrame":
rs.GET_left_camera_frame(self)
#return http.server.SimpleHTTPRequestHandler.do_GET(self)
def main():
# Init ROS.
rospy.init_node('http_server', anonymous=True)
# Get params.
statek_name = rospy.get_param("~statek_name", "statek")
left_camera_topic = rospy.get_param("~left_camera_topic", "/" + statek_name + "/stereo/left/image_rect_color")
gps_topic = rospy.get_param("~gps_topic", "/" + statek_name + "/gps/fix")
# Set params to robot resource.
rs.ROS_set_robot_info(statek_name)
# Init subscribers.
rospy.Subscriber(left_camera_topic, Image, rs.ROS_camera_frame_callback, queue_size=1)
rospy.Subscriber(gps_topic, NavSatFix, rs.ROS_gps_fix_callback, queue_size=1)
PORT = 1337
handler = RequestHandler
httpd = socketserver.TCPServer(("", PORT), handler)
http_thread = threading.Thread(name='http_thread', target=httpd.serve_forever)
http_thread.start()
while not rospy.is_shutdown():
pass
httpd.shutdown()
http_thread.join()
if __name__ == '__main__':
main()
|
LR1.py
|
"""
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org>
"""
import datetime
import math
import os
import sys
import threading
import time
import cv2
import matplotlib.pyplot as plt
import numpy as np
import pyqtgraph as pg
from PyQt5 import uic, QtWidgets, QtCore
from PyQt5.QtWidgets import QApplication, QMainWindow, QTableWidgetItem
class KMeans:
"""
K-means clustering
code from: https://dev.to/rishitdagli/build-k-means-from-scratch-in-python-2140
"""
def __init__(self, k=2, tol=0.001, max_iter=300):
self.k = k
self.tol = tol
self.max_iter = max_iter
def fit(self, data, k=2, tol=0.001, max_iter=300):
self.k = k
self.tol = tol
self.max_iter = max_iter
self.centroids = {}
for i in range(self.k):
self.centroids[i] = data[i]
for i in range(self.max_iter):
self.classifications = {}
for i in range(self.k):
self.classifications[i] = []
for featureset in data:
distances = [np.linalg.norm(featureset - self.centroids[centroid]) for centroid in self.centroids]
classification = distances.index(min(distances))
self.classifications[classification].append(featureset)
prev_centroids = dict(self.centroids)
for classification in self.classifications:
self.centroids[classification] = np.average(self.classifications[classification], axis=0)
optimized = True
for c in self.centroids:
original_centroid = prev_centroids[c]
current_centroid = self.centroids[c]
if np.sum((current_centroid - original_centroid) / original_centroid * 100.0) > self.tol:
print(np.sum((current_centroid - original_centroid) / original_centroid * 100.0))
optimized = False
if optimized:
break
def predict(self, data):
distances = [np.linalg.norm(data - self.centroids[centroid]) for centroid in self.centroids]
classification = distances.index(min(distances))
return classification
class Window(QMainWindow):
def __init__(self):
super(Window, self).__init__()
# Load GUI file
uic.loadUi('LR1.ui', self)
# System variables
self.model = KMeans()
self.dump_file = None
self.reader_running = False
self.dump_paused = False
self.points = []
# Connect GUI controls
self.btn_load_data.clicked.connect(self.load_data)
self.btn_stop_reading.clicked.connect(self.stop_reading)
self.btn_pause.clicked.connect(self.pause)
self.plot_timer = QtCore.QTimer()
self.plot_timer.timeout.connect(self.update_plot)
self.plot_timer.start(100)
# Initialize table
self.init_tables()
# Initialize pyQtGraph charts
self.init_charts()
# Show GUI
self.show()
def init_tables(self):
"""
Initializes table of packets and setup table (whitelist table)
:return:
"""
self.points_table.setColumnCount(3)
self.points_table.verticalHeader().setVisible(False)
self.points_table.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.points_table.setHorizontalHeaderItem(0, QtWidgets.QTableWidgetItem('Packet'))
self.points_table.setHorizontalHeaderItem(1, QtWidgets.QTableWidgetItem('Time'))
self.points_table.setHorizontalHeaderItem(2, QtWidgets.QTableWidgetItem('Data'))
header = self.points_table.horizontalHeader()
header.setSectionResizeMode(0, QtWidgets.QHeaderView.Stretch)
header.setSectionResizeMode(1, QtWidgets.QHeaderView.Stretch)
header.setSectionResizeMode(2, QtWidgets.QHeaderView.Stretch)
def init_charts(self):
"""
Initializes charts
:return:
"""
self.graphWidget.setBackground((255, 255, 255))
self.graphWidget.showGrid(x=True, y=True, alpha=1.0)
def update_plot(self):
"""
Draws points over pyQTGraph
:return:
"""
if len(self.points) > 0 and not self.dump_paused:
self.graphWidget.clear()
# Find K-means clusters
self.model.fit(np.array(self.points), k=self.slider_clusters.value())
# Draw centroids
centroids_x = []
centroids_y = []
for centroid in self.model.centroids:
centroids_x.append(self.model.centroids[centroid][0])
centroids_y.append(self.model.centroids[centroid][1])
# Draw points by clusters
color_data = np.array(range(len(self.model.classifications) + 1))
color_map = plt.get_cmap('hsv')
min_z = np.min(color_data)
max_z = np.max(color_data)
rgba_img = color_map(1.0 - (color_data - min_z) / (max_z - min_z)) * 255
for classification in self.model.classifications:
features_x = []
features_y = []
for features_et in self.model.classifications[classification]:
features_x.append(features_et[0])
features_y.append(features_et[1])
self.graphWidget.plot(features_x, features_y, pen=None,
symbolBrush=(rgba_img[classification][0],
rgba_img[classification][1],
rgba_img[classification][2]), symbolSize=5)
max_x = np.max(features_x)
min_x = np.min(features_x)
max_y = np.max(features_y)
min_y = np.min(features_y)
self.graphWidget.plot([min_x, min_x, max_x, max_x, min_x], [min_y, max_y, max_y, min_y, min_y],
pen=pg.mkPen(((
rgba_img[classification][0],
rgba_img[classification][1],
rgba_img[classification][2]))),
symbolBrush=None, symbolSize=0)
# Plot centroids
self.graphWidget.plot(centroids_x, centroids_y, pen=None,
symbolBrush=(0, 0, 0), symbolSize=10)
# Found lines and draw it
points_x = np.array([item[0] for item in self.points])
points_y = np.array([item[1] for item in self.points])
min_x = np.min(points_x)
min_y = np.min(points_y)
points_x -= min_x
points_y -= min_y
points_x = points_x / 10
points_y = points_y / 10
points_image = np.zeros((int(np.max(points_y) + 1), int(np.max(points_x) + 1)), np.uint8)
for i in range(len(points_x)):
points_image[int(points_y[i]), int(points_x[i])] = 255
kernel = np.ones((5, 5), np.uint8)
points_image = cv2.dilate(points_image, kernel, iterations=2)
min_line_length = 550
max_line_gap = 70
lines = cv2.HoughLinesP(points_image, 1, np.pi / 180, 100, min_line_length, max_line_gap)
for line in lines:
for x1, y1, x2, y2 in line:
self.graphWidget.plot([x1 * 10 + min_x, x2 * 10 + min_x], [y1 * 10 + min_y, y2 * 10 + min_y],
pen=pg.mkPen((0, 255, 0)),
symbolBrush=None, symbolSize=0)
def load_data(self):
"""
Loads dump file
:return:
"""
if not self.reader_running:
if os.path.exists(self.data_file.text()):
print('Loading data...')
self.dump_file = open(self.data_file.text(), 'r')
self.reader_running = True
thread = threading.Thread(target=self.dump_reader)
thread.start()
else:
print('File', self.data_file.text(), 'doesn\'t exist!')
def pause(self):
"""
Pauses data stream
:return:
"""
self.dump_paused = not self.dump_paused
if self.dump_paused:
self.btn_pause.setText('Resume')
else:
self.btn_pause.setText('Pause')
def stop_reading(self):
"""
Stops reading data from dump file
:return:
"""
self.reader_running = False
self.dump_file.close()
def dump_reader(self):
"""
Reads dump from file
:return:
"""
# Clear table and data arrays
self.points_table.setRowCount(0)
# Create variables
packets_read = 0
last_packet_datetime = None
# Continue reading
while self.reader_running:
# If on pause
while self.dump_paused:
time.sleep(0.1)
# Read line from file
line = self.dump_file.readline()
# Check for line
if line is None or len(line) < 1:
break
data_packet = line.split(' ')
# Sleep defined time
time_string = str(data_packet[0]).replace('>', '')
if last_packet_datetime is None:
last_packet_datetime = datetime.datetime.strptime(time_string, '%H:%M:%S.%f')
packet_datetime = datetime.datetime.strptime(time_string, '%H:%M:%S.%f')
time.sleep((packet_datetime - last_packet_datetime).total_seconds())
last_packet_datetime = packet_datetime
# Add packet to the table
position = self.points_table.rowCount()
self.points_table.insertRow(position)
self.points_table.setItem(position, 0, QTableWidgetItem(str(position)))
self.points_table.setItem(position, 1, QTableWidgetItem(str(time_string)))
# Remove timestamp and ending from packet and convert to int
data_packet = list(map(int, data_packet[1:][:-1]))
self.points_table.setItem(position, 2, QTableWidgetItem(str(data_packet[0]) +
' ... ' + str(data_packet[-1])))
points = []
for i in range(len(data_packet)):
angle = (60.0 + i * 0.36) * math.pi / 180
x = data_packet[i] * math.sin(angle)
y = data_packet[i] * math.cos(angle)
points.append([x, y])
self.points = points.copy()
# Increment counter
packets_read += 1
self.dump_file.close()
print('File reading stopped. Read', packets_read, 'packets')
if __name__ == '__main__':
app = QApplication(sys.argv)
app.setStyle('fusion')
win = Window()
sys.exit(app.exec_())
|
wsdump.py
|
#!D:/WGU/Capstone/SpinalAbnormality/venv\python.exe
import argparse
import code
import sys
import threading
import time
import ssl
import gzip
import zlib
import six
from six.moves.urllib.parse import urlparse
import websocket
try:
import readline
except ImportError:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values is None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v") + 1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
parser.add_argument("--headers",
help="Set custom headers. Use ',' as separator")
return parser.parse_args()
class RawInput:
def raw_input(self, prompt):
if six.PY3:
line = input(prompt)
else:
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, six.text_type):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if args.proxy:
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if args.origin:
options["origin"] = args.origin
if args.subprotocols:
options["subprotocols"] = args.subprotocols
opts = {}
if args.nocert:
opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False}
if args.headers:
options['header'] = list(map(str.strip, args.headers.split(',')))
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if isinstance(data, bytes) and len(data)>2 and data[:2] == b'\037\213': # gzip magick
try:
data = "[gzip] " + str(gzip.decompress(data), "utf-8")
except:
pass
elif isinstance(data, bytes):
try:
data = "[zlib] " + str(zlib.decompress(data, -zlib.MAX_WBITS), "utf-8")
except:
pass
if isinstance(data, bytes):
data = repr(data)
if args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
else:
msg = data
if msg is not None:
if args.timings:
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
|
application.py
|
import asyncio
import os
import re
import signal
import sys
import threading
import time
from asyncio import (
AbstractEventLoop,
CancelledError,
Future,
Task,
ensure_future,
get_event_loop,
new_event_loop,
set_event_loop,
sleep,
)
from contextlib import contextmanager
from subprocess import Popen
from traceback import format_tb
from typing import (
Any,
Awaitable,
Callable,
Dict,
FrozenSet,
Generator,
Generic,
Hashable,
Iterable,
List,
Optional,
Tuple,
Type,
TypeVar,
Union,
cast,
overload,
)
from prompt_toolkit.buffer import Buffer
from prompt_toolkit.cache import SimpleCache
from prompt_toolkit.clipboard import Clipboard, InMemoryClipboard
from prompt_toolkit.data_structures import Size
from prompt_toolkit.enums import EditingMode
from prompt_toolkit.eventloop import (
get_traceback_from_context,
run_in_executor_with_context,
)
from prompt_toolkit.eventloop.utils import call_soon_threadsafe
from prompt_toolkit.filters import Condition, Filter, FilterOrBool, to_filter
from prompt_toolkit.formatted_text import AnyFormattedText
from prompt_toolkit.input.base import Input
from prompt_toolkit.input.typeahead import get_typeahead, store_typeahead
from prompt_toolkit.key_binding.bindings.page_navigation import (
load_page_navigation_bindings,
)
from prompt_toolkit.key_binding.defaults import load_key_bindings
from prompt_toolkit.key_binding.emacs_state import EmacsState
from prompt_toolkit.key_binding.key_bindings import (
Binding,
ConditionalKeyBindings,
GlobalOnlyKeyBindings,
KeyBindings,
KeyBindingsBase,
KeysTuple,
merge_key_bindings,
)
from prompt_toolkit.key_binding.key_processor import KeyPressEvent, KeyProcessor
from prompt_toolkit.key_binding.vi_state import ViState
from prompt_toolkit.keys import Keys
from prompt_toolkit.layout.containers import Container, Window
from prompt_toolkit.layout.controls import BufferControl, UIControl
from prompt_toolkit.layout.dummy import create_dummy_layout
from prompt_toolkit.layout.layout import Layout, walk
from prompt_toolkit.output import ColorDepth, Output
from prompt_toolkit.renderer import Renderer, print_formatted_text
from prompt_toolkit.search import SearchState
from prompt_toolkit.styles import (
BaseStyle,
DummyStyle,
DummyStyleTransformation,
DynamicStyle,
StyleTransformation,
default_pygments_style,
default_ui_style,
merge_styles,
)
from prompt_toolkit.utils import Event, in_main_thread
from .current import get_app_session, set_app
from .run_in_terminal import in_terminal, run_in_terminal
try:
import contextvars
except ImportError:
import prompt_toolkit.eventloop.dummy_contextvars as contextvars # type: ignore
__all__ = [
"Application",
]
E = KeyPressEvent
_AppResult = TypeVar("_AppResult")
ApplicationEventHandler = Callable[["Application[_AppResult]"], None]
_SIGWINCH = getattr(signal, "SIGWINCH", None)
_SIGTSTP = getattr(signal, "SIGTSTP", None)
class Application(Generic[_AppResult]):
"""
The main Application class!
This glues everything together.
:param layout: A :class:`~prompt_toolkit.layout.Layout` instance.
:param key_bindings:
:class:`~prompt_toolkit.key_binding.KeyBindingsBase` instance for
the key bindings.
:param clipboard: :class:`~prompt_toolkit.clipboard.Clipboard` to use.
:param full_screen: When True, run the application on the alternate screen buffer.
:param color_depth: Any :class:`~.ColorDepth` value, a callable that
returns a :class:`~.ColorDepth` or `None` for default.
:param erase_when_done: (bool) Clear the application output when it finishes.
:param reverse_vi_search_direction: Normally, in Vi mode, a '/' searches
forward and a '?' searches backward. In Readline mode, this is usually
reversed.
:param min_redraw_interval: Number of seconds to wait between redraws. Use
this for applications where `invalidate` is called a lot. This could cause
a lot of terminal output, which some terminals are not able to process.
`None` means that every `invalidate` will be scheduled right away
(which is usually fine).
When one `invalidate` is called, but a scheduled redraw of a previous
`invalidate` call has not been executed yet, nothing will happen in any
case.
:param max_render_postpone_time: When there is high CPU (a lot of other
scheduled calls), postpone the rendering max x seconds. '0' means:
don't postpone. '.5' means: try to draw at least twice a second.
:param refresh_interval: Automatically invalidate the UI every so many
seconds. When `None` (the default), only invalidate when `invalidate`
has been called.
:param terminal_size_polling_interval: Poll the terminal size every so many
seconds. Useful if the applications runs in a thread other then then
main thread where SIGWINCH can't be handled, or on Windows.
Filters:
:param mouse_support: (:class:`~prompt_toolkit.filters.Filter` or
boolean). When True, enable mouse support.
:param paste_mode: :class:`~prompt_toolkit.filters.Filter` or boolean.
:param editing_mode: :class:`~prompt_toolkit.enums.EditingMode`.
:param enable_page_navigation_bindings: When `True`, enable the page
navigation key bindings. These include both Emacs and Vi bindings like
page-up, page-down and so on to scroll through pages. Mostly useful for
creating an editor or other full screen applications. Probably, you
don't want this for the implementation of a REPL. By default, this is
enabled if `full_screen` is set.
Callbacks (all of these should accept an
:class:`~prompt_toolkit.application.Application` object as input.)
:param on_reset: Called during reset.
:param on_invalidate: Called when the UI has been invalidated.
:param before_render: Called right before rendering.
:param after_render: Called right after rendering.
I/O:
(Note that the preferred way to change the input/output is by creating an
`AppSession` with the required input/output objects. If you need multiple
applications running at the same time, you have to create a separate
`AppSession` using a `with create_app_session():` block.
:param input: :class:`~prompt_toolkit.input.Input` instance.
:param output: :class:`~prompt_toolkit.output.Output` instance. (Probably
Vt100_Output or Win32Output.)
Usage:
app = Application(...)
app.run()
# Or
await app.run_async()
"""
def __init__(
self,
layout: Optional[Layout] = None,
style: Optional[BaseStyle] = None,
include_default_pygments_style: FilterOrBool = True,
style_transformation: Optional[StyleTransformation] = None,
key_bindings: Optional[KeyBindingsBase] = None,
clipboard: Optional[Clipboard] = None,
full_screen: bool = False,
color_depth: Union[
ColorDepth, Callable[[], Union[ColorDepth, None]], None
] = None,
mouse_support: FilterOrBool = False,
enable_page_navigation_bindings: Optional[
FilterOrBool
] = None, # Can be None, True or False.
paste_mode: FilterOrBool = False,
editing_mode: EditingMode = EditingMode.EMACS,
erase_when_done: bool = False,
reverse_vi_search_direction: FilterOrBool = False,
min_redraw_interval: Union[float, int, None] = None,
max_render_postpone_time: Union[float, int, None] = 0.01,
refresh_interval: Optional[float] = None,
terminal_size_polling_interval: Optional[float] = 0.5,
on_reset: Optional["ApplicationEventHandler[_AppResult]"] = None,
on_invalidate: Optional["ApplicationEventHandler[_AppResult]"] = None,
before_render: Optional["ApplicationEventHandler[_AppResult]"] = None,
after_render: Optional["ApplicationEventHandler[_AppResult]"] = None,
# I/O.
input: Optional[Input] = None,
output: Optional[Output] = None,
) -> None:
# If `enable_page_navigation_bindings` is not specified, enable it in
# case of full screen applications only. This can be overridden by the user.
if enable_page_navigation_bindings is None:
enable_page_navigation_bindings = Condition(lambda: self.full_screen)
paste_mode = to_filter(paste_mode)
mouse_support = to_filter(mouse_support)
reverse_vi_search_direction = to_filter(reverse_vi_search_direction)
enable_page_navigation_bindings = to_filter(enable_page_navigation_bindings)
include_default_pygments_style = to_filter(include_default_pygments_style)
if layout is None:
layout = create_dummy_layout()
if style_transformation is None:
style_transformation = DummyStyleTransformation()
self.style = style
self.style_transformation = style_transformation
# Key bindings.
self.key_bindings = key_bindings
self._default_bindings = load_key_bindings()
self._page_navigation_bindings = load_page_navigation_bindings()
self.layout = layout
self.clipboard = clipboard or InMemoryClipboard()
self.full_screen: bool = full_screen
self._color_depth = color_depth
self.mouse_support = mouse_support
self.paste_mode = paste_mode
self.editing_mode = editing_mode
self.erase_when_done = erase_when_done
self.reverse_vi_search_direction = reverse_vi_search_direction
self.enable_page_navigation_bindings = enable_page_navigation_bindings
self.min_redraw_interval = min_redraw_interval
self.max_render_postpone_time = max_render_postpone_time
self.refresh_interval = refresh_interval
self.terminal_size_polling_interval = terminal_size_polling_interval
# Events.
self.on_invalidate = Event(self, on_invalidate)
self.on_reset = Event(self, on_reset)
self.before_render = Event(self, before_render)
self.after_render = Event(self, after_render)
# I/O.
session = get_app_session()
self.output = output or session.output
self.input = input or session.input
# List of 'extra' functions to execute before a Application.run.
self.pre_run_callables: List[Callable[[], None]] = []
self._is_running = False
self.future: Optional[Future[_AppResult]] = None
self.loop: Optional[AbstractEventLoop] = None
self.context: Optional[contextvars.Context] = None
#: Quoted insert. This flag is set if we go into quoted insert mode.
self.quoted_insert = False
#: Vi state. (For Vi key bindings.)
self.vi_state = ViState()
self.emacs_state = EmacsState()
#: When to flush the input (For flushing escape keys.) This is important
#: on terminals that use vt100 input. We can't distinguish the escape
#: key from for instance the left-arrow key, if we don't know what follows
#: after "\x1b". This little timer will consider "\x1b" to be escape if
#: nothing did follow in this time span.
#: This seems to work like the `ttimeoutlen` option in Vim.
self.ttimeoutlen = 0.5 # Seconds.
#: Like Vim's `timeoutlen` option. This can be `None` or a float. For
#: instance, suppose that we have a key binding AB and a second key
#: binding A. If the uses presses A and then waits, we don't handle
#: this binding yet (unless it was marked 'eager'), because we don't
#: know what will follow. This timeout is the maximum amount of time
#: that we wait until we call the handlers anyway. Pass `None` to
#: disable this timeout.
self.timeoutlen = 1.0
#: The `Renderer` instance.
# Make sure that the same stdout is used, when a custom renderer has been passed.
self._merged_style = self._create_merged_style(include_default_pygments_style)
self.renderer = Renderer(
self._merged_style,
self.output,
full_screen=full_screen,
mouse_support=mouse_support,
cpr_not_supported_callback=self.cpr_not_supported_callback,
)
#: Render counter. This one is increased every time the UI is rendered.
#: It can be used as a key for caching certain information during one
#: rendering.
self.render_counter = 0
# Invalidate flag. When 'True', a repaint has been scheduled.
self._invalidated = False
self._invalidate_events: List[
Event[object]
] = [] # Collection of 'invalidate' Event objects.
self._last_redraw_time = 0.0 # Unix timestamp of last redraw. Used when
# `min_redraw_interval` is given.
#: The `InputProcessor` instance.
self.key_processor = KeyProcessor(_CombinedRegistry(self))
# If `run_in_terminal` was called. This will point to a `Future` what will be
# set at the point when the previous run finishes.
self._running_in_terminal = False
self._running_in_terminal_f: Optional[Future[None]] = None
# Trigger initialize callback.
self.reset()
def _create_merged_style(self, include_default_pygments_style: Filter) -> BaseStyle:
"""
Create a `Style` object that merges the default UI style, the default
pygments style, and the custom user style.
"""
dummy_style = DummyStyle()
pygments_style = default_pygments_style()
@DynamicStyle
def conditional_pygments_style() -> BaseStyle:
if include_default_pygments_style():
return pygments_style
else:
return dummy_style
return merge_styles(
[
default_ui_style(),
conditional_pygments_style,
DynamicStyle(lambda: self.style),
]
)
@property
def color_depth(self) -> ColorDepth:
"""
The active :class:`.ColorDepth`.
The current value is determined as follows:
- If a color depth was given explicitly to this application, use that
value.
- Otherwise, fall back to the color depth that is reported by the
:class:`.Output` implementation. If the :class:`.Output` class was
created using `output.defaults.create_output`, then this value is
coming from the $PROMPT_TOOLKIT_COLOR_DEPTH environment variable.
"""
depth = self._color_depth
if callable(depth):
depth = depth()
if depth is None:
depth = self.output.get_default_color_depth()
return depth
@property
def current_buffer(self) -> Buffer:
"""
The currently focused :class:`~.Buffer`.
(This returns a dummy :class:`.Buffer` when none of the actual buffers
has the focus. In this case, it's really not practical to check for
`None` values or catch exceptions every time.)
"""
return self.layout.current_buffer or Buffer(
name="dummy-buffer"
) # Dummy buffer.
@property
def current_search_state(self) -> SearchState:
"""
Return the current :class:`.SearchState`. (The one for the focused
:class:`.BufferControl`.)
"""
ui_control = self.layout.current_control
if isinstance(ui_control, BufferControl):
return ui_control.search_state
else:
return SearchState() # Dummy search state. (Don't return None!)
def reset(self) -> None:
"""
Reset everything, for reading the next input.
"""
# Notice that we don't reset the buffers. (This happens just before
# returning, and when we have multiple buffers, we clearly want the
# content in the other buffers to remain unchanged between several
# calls of `run`. (And the same is true for the focus stack.)
self.exit_style = ""
self.background_tasks: List[Task[None]] = []
self.renderer.reset()
self.key_processor.reset()
self.layout.reset()
self.vi_state.reset()
self.emacs_state.reset()
# Trigger reset event.
self.on_reset.fire()
# Make sure that we have a 'focusable' widget focused.
# (The `Layout` class can't determine this.)
layout = self.layout
if not layout.current_control.is_focusable():
for w in layout.find_all_windows():
if w.content.is_focusable():
layout.current_window = w
break
def invalidate(self) -> None:
"""
Thread safe way of sending a repaint trigger to the input event loop.
"""
if not self._is_running:
# Don't schedule a redraw if we're not running.
# Otherwise, `get_event_loop()` in `call_soon_threadsafe` can fail.
# See: https://github.com/dbcli/mycli/issues/797
return
# `invalidate()` called if we don't have a loop yet (not running?), or
# after the event loop was closed.
if self.loop is None or self.loop.is_closed():
return
# Never schedule a second redraw, when a previous one has not yet been
# executed. (This should protect against other threads calling
# 'invalidate' many times, resulting in 100% CPU.)
if self._invalidated:
return
else:
self._invalidated = True
# Trigger event.
self.loop.call_soon_threadsafe(self.on_invalidate.fire)
def redraw() -> None:
self._invalidated = False
self._redraw()
def schedule_redraw() -> None:
call_soon_threadsafe(
redraw, max_postpone_time=self.max_render_postpone_time, loop=self.loop
)
if self.min_redraw_interval:
# When a minimum redraw interval is set, wait minimum this amount
# of time between redraws.
diff = time.time() - self._last_redraw_time
if diff < self.min_redraw_interval:
async def redraw_in_future() -> None:
await sleep(cast(float, self.min_redraw_interval) - diff)
schedule_redraw()
self.loop.call_soon_threadsafe(
lambda: self.create_background_task(redraw_in_future())
)
else:
schedule_redraw()
else:
schedule_redraw()
@property
def invalidated(self) -> bool:
"True when a redraw operation has been scheduled."
return self._invalidated
def _redraw(self, render_as_done: bool = False) -> None:
"""
Render the command line again. (Not thread safe!) (From other threads,
or if unsure, use :meth:`.Application.invalidate`.)
:param render_as_done: make sure to put the cursor after the UI.
"""
def run_in_context() -> None:
# Only draw when no sub application was started.
if self._is_running and not self._running_in_terminal:
if self.min_redraw_interval:
self._last_redraw_time = time.time()
# Render
self.render_counter += 1
self.before_render.fire()
if render_as_done:
if self.erase_when_done:
self.renderer.erase()
else:
# Draw in 'done' state and reset renderer.
self.renderer.render(self, self.layout, is_done=render_as_done)
else:
self.renderer.render(self, self.layout)
self.layout.update_parents_relations()
# Fire render event.
self.after_render.fire()
self._update_invalidate_events()
# NOTE: We want to make sure this Application is the active one. The
# invalidate function is often called from a context where this
# application is not the active one. (Like the
# `PromptSession._auto_refresh_context`).
# We copy the context in case the context was already active, to
# prevent RuntimeErrors. (The rendering is not supposed to change
# any context variables.)
if self.context is not None:
self.context.copy().run(run_in_context)
def _start_auto_refresh_task(self) -> None:
"""
Start a while/true loop in the background for automatic invalidation of
the UI.
"""
if self.refresh_interval is not None and self.refresh_interval != 0:
async def auto_refresh(refresh_interval: float) -> None:
while True:
await sleep(refresh_interval)
self.invalidate()
self.create_background_task(auto_refresh(self.refresh_interval))
def _update_invalidate_events(self) -> None:
"""
Make sure to attach 'invalidate' handlers to all invalidate events in
the UI.
"""
# Remove all the original event handlers. (Components can be removed
# from the UI.)
for ev in self._invalidate_events:
ev -= self._invalidate_handler
# Gather all new events.
# (All controls are able to invalidate themselves.)
def gather_events() -> Iterable[Event[object]]:
for c in self.layout.find_all_controls():
for ev in c.get_invalidate_events():
yield ev
self._invalidate_events = list(gather_events())
for ev in self._invalidate_events:
ev += self._invalidate_handler
def _invalidate_handler(self, sender: object) -> None:
"""
Handler for invalidate events coming from UIControls.
(This handles the difference in signature between event handler and
`self.invalidate`. It also needs to be a method -not a nested
function-, so that we can remove it again .)
"""
self.invalidate()
def _on_resize(self) -> None:
"""
When the window size changes, we erase the current output and request
again the cursor position. When the CPR answer arrives, the output is
drawn again.
"""
# Erase, request position (when cursor is at the start position)
# and redraw again. -- The order is important.
self.renderer.erase(leave_alternate_screen=False)
self._request_absolute_cursor_position()
self._redraw()
def _pre_run(self, pre_run: Optional[Callable[[], None]] = None) -> None:
"""
Called during `run`.
`self.future` should be set to the new future at the point where this
is called in order to avoid data races. `pre_run` can be used to set a
`threading.Event` to synchronize with UI termination code, running in
another thread that would call `Application.exit`. (See the progress
bar code for an example.)
"""
if pre_run:
pre_run()
# Process registered "pre_run_callables" and clear list.
for c in self.pre_run_callables:
c()
del self.pre_run_callables[:]
async def run_async(
self,
pre_run: Optional[Callable[[], None]] = None,
set_exception_handler: bool = True,
) -> _AppResult:
"""
Run the prompt_toolkit :class:`~prompt_toolkit.application.Application`
until :meth:`~prompt_toolkit.application.Application.exit` has been
called. Return the value that was passed to
:meth:`~prompt_toolkit.application.Application.exit`.
This is the main entry point for a prompt_toolkit
:class:`~prompt_toolkit.application.Application` and usually the only
place where the event loop is actually running.
:param pre_run: Optional callable, which is called right after the
"reset" of the application.
:param set_exception_handler: When set, in case of an exception, go out
of the alternate screen and hide the application, display the
exception, and wait for the user to press ENTER.
"""
assert not self._is_running, "Application is already running."
async def _run_async() -> _AppResult:
"Coroutine."
loop = get_event_loop()
f = loop.create_future()
self.future = f # XXX: make sure to set this before calling '_redraw'.
self.loop = loop
self.context = contextvars.copy_context()
# Counter for cancelling 'flush' timeouts. Every time when a key is
# pressed, we start a 'flush' timer for flushing our escape key. But
# when any subsequent input is received, a new timer is started and
# the current timer will be ignored.
flush_task: Optional[asyncio.Task[None]] = None
# Reset.
# (`self.future` needs to be set when `pre_run` is called.)
self.reset()
self._pre_run(pre_run)
# Feed type ahead input first.
self.key_processor.feed_multiple(get_typeahead(self.input))
self.key_processor.process_keys()
def read_from_input() -> None:
nonlocal flush_task
# Ignore when we aren't running anymore. This callback will
# removed from the loop next time. (It could be that it was
# still in the 'tasks' list of the loop.)
# Except: if we need to process incoming CPRs.
if not self._is_running and not self.renderer.waiting_for_cpr:
return
# Get keys from the input object.
keys = self.input.read_keys()
# Feed to key processor.
self.key_processor.feed_multiple(keys)
self.key_processor.process_keys()
# Quit when the input stream was closed.
if self.input.closed:
if not f.done():
f.set_exception(EOFError)
else:
# Automatically flush keys.
if flush_task:
flush_task.cancel()
flush_task = self.create_background_task(auto_flush_input())
async def auto_flush_input() -> None:
# Flush input after timeout.
# (Used for flushing the enter key.)
# This sleep can be cancelled, in that case we won't flush yet.
await sleep(self.ttimeoutlen)
flush_input()
def flush_input() -> None:
if not self.is_done:
# Get keys, and feed to key processor.
keys = self.input.flush_keys()
self.key_processor.feed_multiple(keys)
self.key_processor.process_keys()
if self.input.closed:
f.set_exception(EOFError)
# Enter raw mode, attach input and attach WINCH event handler.
with self.input.raw_mode(), self.input.attach(
read_from_input
), attach_winch_signal_handler(self._on_resize):
self.create_background_task(self._poll_output_size())
# Draw UI.
self._request_absolute_cursor_position()
self._redraw()
self._start_auto_refresh_task()
# Wait for UI to finish.
try:
result = await f
finally:
# In any case, when the application finishes.
# (Successful, or because of an error.)
try:
self._redraw(render_as_done=True)
finally:
# _redraw has a good chance to fail if it calls widgets
# with bad code. Make sure to reset the renderer
# anyway.
self.renderer.reset()
# Unset `is_running`, this ensures that possibly
# scheduled draws won't paint during the following
# yield.
self._is_running = False
# Detach event handlers for invalidate events.
# (Important when a UIControl is embedded in multiple
# applications, like ptterm in pymux. An invalidate
# should not trigger a repaint in terminated
# applications.)
for ev in self._invalidate_events:
ev -= self._invalidate_handler
self._invalidate_events = []
# Wait for CPR responses.
if self.output.responds_to_cpr:
await self.renderer.wait_for_cpr_responses()
# Wait for the run-in-terminals to terminate.
previous_run_in_terminal_f = self._running_in_terminal_f
if previous_run_in_terminal_f:
await previous_run_in_terminal_f
# Store unprocessed input as typeahead for next time.
store_typeahead(self.input, self.key_processor.empty_queue())
return cast(_AppResult, result)
async def _run_async2() -> _AppResult:
self._is_running = True
# Make sure to set `_invalidated` to `False` to begin with,
# otherwise we're not going to paint anything. This can happen if
# this application had run before on a different event loop, and a
# paint was scheduled using `call_soon_threadsafe` with
# `max_postpone_time`.
self._invalidated = False
loop = get_event_loop()
if set_exception_handler:
previous_exc_handler = loop.get_exception_handler()
loop.set_exception_handler(self._handle_exception)
try:
with set_app(self):
try:
result = await _run_async()
finally:
# Wait for the background tasks to be done. This needs to
# go in the finally! If `_run_async` raises
# `KeyboardInterrupt`, we still want to wait for the
# background tasks.
await self.cancel_and_wait_for_background_tasks()
# Set the `_is_running` flag to `False`. Normally this
# happened already in the finally block in `run_async`
# above, but in case of exceptions, that's not always the
# case.
self._is_running = False
# Also remove the Future again. (This brings the
# application back to its initial state, where it also
# doesn't have a Future.)
self.future = None
return result
finally:
if set_exception_handler:
loop.set_exception_handler(previous_exc_handler)
return await _run_async2()
def run(
self,
pre_run: Optional[Callable[[], None]] = None,
set_exception_handler: bool = True,
in_thread: bool = False,
) -> _AppResult:
"""
A blocking 'run' call that waits until the UI is finished.
This will start the current asyncio event loop. If no loop is set for
the current thread, then it will create a new loop. If a new loop was
created, this won't close the new loop (if `in_thread=False`).
:param pre_run: Optional callable, which is called right after the
"reset" of the application.
:param set_exception_handler: When set, in case of an exception, go out
of the alternate screen and hide the application, display the
exception, and wait for the user to press ENTER.
:param in_thread: When true, run the application in a background
thread, and block the current thread until the application
terminates. This is useful if we need to be sure the application
won't use the current event loop (asyncio does not support nested
event loops). A new event loop will be created in this background
thread, and that loop will also be closed when the background
thread terminates. When this is used, it's especially important to
make sure that all asyncio background tasks are managed through
`get_appp().create_background_task()`, so that unfinished tasks are
properly cancelled before the event loop is closed. This is used
for instance in ptpython.
"""
if in_thread:
result: _AppResult
exception: Optional[BaseException] = None
def run_in_thread() -> None:
nonlocal result, exception
try:
result = self.run(
pre_run=pre_run, set_exception_handler=set_exception_handler
)
except BaseException as e:
exception = e
finally:
# Make sure to close the event loop in this thread. Running
# the application creates a new loop (because we're in
# another thread), but it doesn't get closed automatically
# (also not by the garbage collector).
loop = get_event_loop()
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
thread = threading.Thread(target=run_in_thread)
thread.start()
thread.join()
if exception is not None:
raise exception
return result
# We don't create a new event loop by default, because we want to be
# sure that when this is called multiple times, each call of `run()`
# goes through the same event loop. This way, users can schedule
# background-tasks that keep running across multiple prompts.
try:
loop = get_event_loop()
except RuntimeError:
# Possibly we are not running in the main thread, where no event
# loop is set by default. Or somebody called `asyncio.run()`
# before, which closes the existing event loop. We can create a new
# loop.
loop = new_event_loop()
set_event_loop(loop)
return loop.run_until_complete(
self.run_async(pre_run=pre_run, set_exception_handler=set_exception_handler)
)
def _handle_exception(
self, loop: AbstractEventLoop, context: Dict[str, Any]
) -> None:
"""
Handler for event loop exceptions.
This will print the exception, using run_in_terminal.
"""
# For Python 2: we have to get traceback at this point, because
# we're still in the 'except:' block of the event loop where the
# traceback is still available. Moving this code in the
# 'print_exception' coroutine will loose the exception.
tb = get_traceback_from_context(context)
formatted_tb = "".join(format_tb(tb))
async def in_term() -> None:
async with in_terminal():
# Print output. Similar to 'loop.default_exception_handler',
# but don't use logger. (This works better on Python 2.)
print("\nUnhandled exception in event loop:")
print(formatted_tb)
print("Exception %s" % (context.get("exception"),))
await _do_wait_for_enter("Press ENTER to continue...")
ensure_future(in_term())
def create_background_task(
self, coroutine: Awaitable[None]
) -> "asyncio.Task[None]":
"""
Start a background task (coroutine) for the running application. When
the `Application` terminates, unfinished background tasks will be
cancelled.
If asyncio had nurseries like Trio, we would create a nursery in
`Application.run_async`, and run the given coroutine in that nursery.
Not threadsafe.
"""
task = get_event_loop().create_task(coroutine)
self.background_tasks.append(task)
return task
async def cancel_and_wait_for_background_tasks(self) -> None:
"""
Cancel all background tasks, and wait for the cancellation to be done.
If any of the background tasks raised an exception, this will also
propagate the exception.
(If we had nurseries like Trio, this would be the `__aexit__` of a
nursery.)
"""
for task in self.background_tasks:
task.cancel()
for task in self.background_tasks:
try:
await task
except CancelledError:
pass
async def _poll_output_size(self) -> None:
"""
Coroutine for polling the terminal dimensions.
Useful for situations where `attach_winch_signal_handler` is not sufficient:
- If we are not running in the main thread.
- On Windows.
"""
size: Optional[Size] = None
interval = self.terminal_size_polling_interval
if interval is None:
return
while True:
await asyncio.sleep(interval)
new_size = self.output.get_size()
if size is not None and new_size != size:
self._on_resize()
size = new_size
def cpr_not_supported_callback(self) -> None:
"""
Called when we don't receive the cursor position response in time.
"""
if not self.output.responds_to_cpr:
return # We know about this already.
def in_terminal() -> None:
self.output.write(
"WARNING: your terminal doesn't support cursor position requests (CPR).\r\n"
)
self.output.flush()
run_in_terminal(in_terminal)
@overload
def exit(self) -> None:
"Exit without arguments."
@overload
def exit(self, *, result: _AppResult, style: str = "") -> None:
"Exit with `_AppResult`."
@overload
def exit(
self, *, exception: Union[BaseException, Type[BaseException]], style: str = ""
) -> None:
"Exit with exception."
def exit(
self,
result: Optional[_AppResult] = None,
exception: Optional[Union[BaseException, Type[BaseException]]] = None,
style: str = "",
) -> None:
"""
Exit application.
.. note::
If `Application.exit` is called before `Application.run()` is
called, then the `Application` won't exit (because the
`Application.future` doesn't correspond to the current run). Use a
`pre_run` hook and an event to synchronize the closing if there's a
chance this can happen.
:param result: Set this result for the application.
:param exception: Set this exception as the result for an application. For
a prompt, this is often `EOFError` or `KeyboardInterrupt`.
:param style: Apply this style on the whole content when quitting,
often this is 'class:exiting' for a prompt. (Used when
`erase_when_done` is not set.)
"""
assert result is None or exception is None
if self.future is None:
raise Exception("Application is not running. Application.exit() failed.")
if self.future.done():
raise Exception("Return value already set. Application.exit() failed.")
self.exit_style = style
if exception is not None:
self.future.set_exception(exception)
else:
self.future.set_result(cast(_AppResult, result))
def _request_absolute_cursor_position(self) -> None:
"""
Send CPR request.
"""
# Note: only do this if the input queue is not empty, and a return
# value has not been set. Otherwise, we won't be able to read the
# response anyway.
if not self.key_processor.input_queue and not self.is_done:
self.renderer.request_absolute_cursor_position()
async def run_system_command(
self,
command: str,
wait_for_enter: bool = True,
display_before_text: AnyFormattedText = "",
wait_text: str = "Press ENTER to continue...",
) -> None:
"""
Run system command (While hiding the prompt. When finished, all the
output will scroll above the prompt.)
:param command: Shell command to be executed.
:param wait_for_enter: FWait for the user to press enter, when the
command is finished.
:param display_before_text: If given, text to be displayed before the
command executes.
:return: A `Future` object.
"""
async with in_terminal():
# Try to use the same input/output file descriptors as the one,
# used to run this application.
try:
input_fd = self.input.fileno()
except AttributeError:
input_fd = sys.stdin.fileno()
try:
output_fd = self.output.fileno()
except AttributeError:
output_fd = sys.stdout.fileno()
# Run sub process.
def run_command() -> None:
self.print_text(display_before_text)
p = Popen(command, shell=True, stdin=input_fd, stdout=output_fd)
p.wait()
await run_in_executor_with_context(run_command)
# Wait for the user to press enter.
if wait_for_enter:
await _do_wait_for_enter(wait_text)
def suspend_to_background(self, suspend_group: bool = True) -> None:
"""
(Not thread safe -- to be called from inside the key bindings.)
Suspend process.
:param suspend_group: When true, suspend the whole process group.
(This is the default, and probably what you want.)
"""
# Only suspend when the operating system supports it.
# (Not on Windows.)
if _SIGTSTP is not None:
def run() -> None:
# Send `SIGTSTP` to own process.
# This will cause it to suspend.
# Usually we want the whole process group to be suspended. This
# handles the case when input is piped from another process.
if suspend_group:
os.kill(0, _SIGTSTP)
else:
os.kill(os.getpid(), _SIGTSTP)
run_in_terminal(run)
def print_text(
self, text: AnyFormattedText, style: Optional[BaseStyle] = None
) -> None:
"""
Print a list of (style_str, text) tuples to the output.
(When the UI is running, this method has to be called through
`run_in_terminal`, otherwise it will destroy the UI.)
:param text: List of ``(style_str, text)`` tuples.
:param style: Style class to use. Defaults to the active style in the CLI.
"""
print_formatted_text(
output=self.output,
formatted_text=text,
style=style or self._merged_style,
color_depth=self.color_depth,
style_transformation=self.style_transformation,
)
@property
def is_running(self) -> bool:
"`True` when the application is currently active/running."
return self._is_running
@property
def is_done(self) -> bool:
if self.future:
return self.future.done()
return False
def get_used_style_strings(self) -> List[str]:
"""
Return a list of used style strings. This is helpful for debugging, and
for writing a new `Style`.
"""
attrs_for_style = self.renderer._attrs_for_style
if attrs_for_style:
return sorted(
[
re.sub(r"\s+", " ", style_str).strip()
for style_str in attrs_for_style.keys()
]
)
return []
class _CombinedRegistry(KeyBindingsBase):
"""
The `KeyBindings` of key bindings for a `Application`.
This merges the global key bindings with the one of the current user
control.
"""
def __init__(self, app: Application[_AppResult]) -> None:
self.app = app
self._cache: SimpleCache[
Tuple[Window, FrozenSet[UIControl]], KeyBindingsBase
] = SimpleCache()
@property
def _version(self) -> Hashable:
"""Not needed - this object is not going to be wrapped in another
KeyBindings object."""
raise NotImplementedError
def bindings(self) -> List[Binding]:
"""Not needed - this object is not going to be wrapped in another
KeyBindings object."""
raise NotImplementedError
def _create_key_bindings(
self, current_window: Window, other_controls: List[UIControl]
) -> KeyBindingsBase:
"""
Create a `KeyBindings` object that merges the `KeyBindings` from the
`UIControl` with all the parent controls and the global key bindings.
"""
key_bindings = []
collected_containers = set()
# Collect key bindings from currently focused control and all parent
# controls. Don't include key bindings of container parent controls.
container: Container = current_window
while True:
collected_containers.add(container)
kb = container.get_key_bindings()
if kb is not None:
key_bindings.append(kb)
if container.is_modal():
break
parent = self.app.layout.get_parent(container)
if parent is None:
break
else:
container = parent
# Include global bindings (starting at the top-model container).
for c in walk(container):
if c not in collected_containers:
kb = c.get_key_bindings()
if kb is not None:
key_bindings.append(GlobalOnlyKeyBindings(kb))
# Add App key bindings
if self.app.key_bindings:
key_bindings.append(self.app.key_bindings)
# Add mouse bindings.
key_bindings.append(
ConditionalKeyBindings(
self.app._page_navigation_bindings,
self.app.enable_page_navigation_bindings,
)
)
key_bindings.append(self.app._default_bindings)
# Reverse this list. The current control's key bindings should come
# last. They need priority.
key_bindings = key_bindings[::-1]
return merge_key_bindings(key_bindings)
@property
def _key_bindings(self) -> KeyBindingsBase:
current_window = self.app.layout.current_window
other_controls = list(self.app.layout.find_all_controls())
key = current_window, frozenset(other_controls)
return self._cache.get(
key, lambda: self._create_key_bindings(current_window, other_controls)
)
def get_bindings_for_keys(self, keys: KeysTuple) -> List[Binding]:
return self._key_bindings.get_bindings_for_keys(keys)
def get_bindings_starting_with_keys(self, keys: KeysTuple) -> List[Binding]:
return self._key_bindings.get_bindings_starting_with_keys(keys)
async def _do_wait_for_enter(wait_text: AnyFormattedText) -> None:
"""
Create a sub application to wait for the enter key press.
This has two advantages over using 'input'/'raw_input':
- This will share the same input/output I/O.
- This doesn't block the event loop.
"""
from prompt_toolkit.shortcuts import PromptSession
key_bindings = KeyBindings()
@key_bindings.add("enter")
def _ok(event: E) -> None:
event.app.exit()
@key_bindings.add(Keys.Any)
def _ignore(event: E) -> None:
"Disallow typing."
pass
session: PromptSession[None] = PromptSession(
message=wait_text, key_bindings=key_bindings
)
await session.app.run_async()
@contextmanager
def attach_winch_signal_handler(
handler: Callable[[], None]
) -> Generator[None, None, None]:
"""
Attach the given callback as a WINCH signal handler within the context
manager. Restore the original signal handler when done.
The `Application.run` method will register SIGWINCH, so that it will
properly repaint when the terminal window resizes. However, using
`run_in_terminal`, we can temporarily send an application to the
background, and run an other app in between, which will then overwrite the
SIGWINCH. This is why it's important to restore the handler when the app
terminates.
"""
# The tricky part here is that signals are registered in the Unix event
# loop with a wakeup fd, but another application could have registered
# signals using signal.signal directly. For now, the implementation is
# hard-coded for the `asyncio.unix_events._UnixSelectorEventLoop`.
# No WINCH? Then don't do anything.
sigwinch = getattr(signal, "SIGWINCH", None)
if sigwinch is None or not in_main_thread():
yield
return
# Keep track of the previous handler.
# (Only UnixSelectorEventloop has `_signal_handlers`.)
loop = asyncio.get_event_loop()
previous_winch_handler = getattr(loop, "_signal_handlers", {}).get(sigwinch)
try:
loop.add_signal_handler(sigwinch, handler)
yield
finally:
# Restore the previous signal handler.
loop.remove_signal_handler(sigwinch)
if previous_winch_handler is not None:
loop.add_signal_handler(
sigwinch,
previous_winch_handler._callback,
*previous_winch_handler._args,
)
|
dictation.py
|
#
#機能:
# 入力:
# ○ ・大文字小文字関係なく入力できる 入力は小文字に統一
# △・スペース等はスキップ ひとまず2連続 後々固有名詞など長めでも対応できるように
# ○ ・いい感じで自動改行する 単位ごとに改行
# ○ ・1文字ヒント機能
# ○ ・タイプミスの効果
# 音声:
# △・wavファイルが再生できる 子プロセスが終了しても音声は止まらないから、pygame pyaudio等に変えたほうがいい
# ○ ・繰り返し 一時停止
# ☓ ・数秒飛ばし戻し 3s 5s
# ○ 変更 -> soundファイルを空白部分で分割 分割の調整が必要
# ○ キーボードとマウスの入力から再生箇所を変える
# ○ プロセス間でのデータのやりとり
# その他:
# ・採点機能 間違い数のカウント
# ・複数の問題に対応
# ・同じ問題が連続して流れないように
# ・入力文字を予め _ で伏せて見せておく
# ・経過時間の計測,表示
# ○ ・全体のリセットやり直し
#
#キーボード機能:
# ・space -> start repeat
# ・Shift_L -> back one track
# ・Shift_R -> next track
import tkinter as tk
from pydub import AudioSegment
from pydub.silence import split_on_silence
import os
import subprocess
from multiprocessing import Process,Value
import glob
import time
import re
from functools import partial
class Application(tk.Frame):
count = 0
row = 1
true_word = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','1','2','3','4','5','6','7','8','9','0']
skip_word = [" ",".",",",";","'","-","?","’"]
row_limit_word = 60
hint = ''
flag_keyword = False
flag_skip = 0
flag_row = False
def __init__(self,master,sentence,num):
super().__init__(master)
self.pack()
self.sentence = sentence
self.num = num
self.master.geometry('1400x800')
self.master.title('ディクテーション')
self.buffer = tk.StringVar()
self.a = tk.Label(textvariable=self.buffer,font=('',30))
self.a.bind('<Key>',self.input)
self.a.pack()
self.button_function()
self.a.focus_set()
def input(self,event):
key = event.keysym #入力の受取
self.output(key)
def output(self,key):
self.flag_keyword = False
self.flag_skip = 0
self.flag_row = False
if key == self.sentence[self.count].lower() or key == 'hint':
self.count += 1
self.flag_keyword = True
if self.sentence[self.count] in self.skip_word:
if not self.sentence[self.count+1] in self.skip_word:
self.count += 1
self.flag_skip = 1
else:
self.count += 2
self.flag_skip = 2
if self.count > self.row_limit_word*self.row and self.sentence[self.count-1] == ' ':
self.sentence.insert(self.count,'\n')
self.count += 1
self.row += 1
self.flag_row = True
if (self.flag_skip == 0 and self.flag_keyword == True and key == self.sentence[self.count-1].lower()) \
or (self.flag_skip == 1 and key == self.sentence[self.count-2].lower())\
or self.flag_row == True:
self.a['fg'] = '#000000'
self.buffer.set(''.join(self.sentence[:self.count]))
elif self.flag_skip == 2:
self.a['fg'] = '#000000'
self.buffer.set(''.join(self.sentence[:self.count-1]))
else:
if key in self.true_word:
self.a['fg'] = '#990000'
self.buffer.set(''.join(self.sentence[:self.count])+key)
elif key == 'space':
self.num.value = 1
elif key == 'Shift_L':
self.num.value = 2
elif key == 'Shift_R':
self.num.value = 3
elif key == 'hint':
self.a['fg'] = '#008800'
self.buffer.set(''.join(self.sentence[:self.count]))
elif key == 'reset':
self.buffer.set(''.join(self.sentence[:self.count]))
def button_function(self):
self.hint_b = tk.Button(text='1文字ヒント',width=10,height=1,bg='#00aaff',command=self.one_hint,font=("",15))
self.hint_b.place(x=70,y=740)
self.start_b = tk.Button(text='再生',width=10,height=1,bg='#00aaff',command=partial(self.sound_func,1),font=("",15))
self.start_b.place(x=350,y=740)
self.back_b = tk.Button(text='戻る',width=10,height=1,bg='#00aaff',command=partial(self.sound_func,2),font=("",15))
self.back_b.place(x=630,y=740)
self.next_b = tk.Button(text='進む',width=10,height=1,bg='#00aaff',command=partial(self.sound_func,3),font=("",15))
self.next_b.place(x=910,y=740)
self.reset_b = tk.Button(text='リセット',width=10,height=1,bg='#ff0000',command=self.reset,font=("",15))
self.reset_b.place(x=1190,y=740)
def one_hint(self):
key = 'hint'
self.output(key)
def sound_func(self,num):
self.num.value = num
def reset(self):
self.count = 0
self.row = 1
self.num.value = 0
self.output('reset')
self.sentence = [s for s in self.sentence if s != '\n']
def sound(audio_file,num):
track = 1
while(1):
if track >= 1:
subprocess.run(['aplay',audio_file[track-1]])
track = -track
if num.value != -1:
if num.value == 1: #press space and repeat start
track = -track
elif num.value == 2: #press Shift_L and back track
track += 1
track = -track
elif num.value == 3: #press Shift_R and next track
track -= 1
track = -track
elif num.value == 0: #push reset button
track = 0
num.value = -1
if abs(track) >= len(audio_file) or track == 0:
track = -1
textfile = './data/text/part4.txt'
origin_sound_file = './data/sound/listening85.wav'
split_sound_dir = './data/split_sound/'
def main():
with open(textfile,'r') as f:
line = f.readlines()
sentence = list(line[0]) #ファイルには1行の文章の予定
for path in glob.glob(split_sound_dir+'*.wav'):
os.remove(path)
origin_sound = AudioSegment.from_file(origin_sound_file,format='wav') #無音部分で区切る
chunks = split_on_silence(origin_sound, min_silence_len=150, silence_thresh=-55, keep_silence=6)
for i, chunk in enumerate(chunks):
chunk.export(split_sound_dir+ str(i) +'.wav', format='wav')
audio_file = glob.glob(split_sound_dir+'*.wav')
num = lambda val : int(re.sub("\\D","",val)) #\\D 任意の数字以外置換
audio_file.sort(key=num)
num = Value('i',-1)
p = Process(target=sound,args=(audio_file,num))
p.start()
root = tk.Tk()
app = Application(master=root,sentence=sentence,num=num)
app.mainloop()
p.terminate()
if __name__ == "__main__":
main()
|
run_sweep.py
|
import sys
import os
import argparse
import itertools
from experiment_utils import config
from experiment_utils.utils import query_yes_no
import doodad as dd
import doodad.mount as mount
import doodad.easy_sweep.launcher as launcher
from doodad.easy_sweep.hyper_sweep import run_sweep_doodad
import multiprocessing
import random
from doodad.easy_sweep.hyper_sweep import Sweeper
import time
def run_sweep(run_experiment, sweep_params, exp_name, instance_type='c4.xlarge'):
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default='local',
help='Mode for running the experiments - local: runs on local machine, '
'ec2: runs on AWS ec2 cluster (requires a proper configuration file)')
parser.add_argument('--num_gpu', '-g', type=int, default=1,
help='Number of GPUs to use for running the experiments')
parser.add_argument('--exps_per_gpu', '-e', type=int, default=1,
help='Number of experiments per GPU simultaneously')
parser.add_argument('--num_cpu', '-c', type=int, default=multiprocessing.cpu_count(),
help='Number of threads to use for running experiments')
args = parser.parse_args(sys.argv[1:])
local_mount = mount.MountLocal(local_dir=config.BASE_DIR, pythonpath=True)
docker_mount_point = os.path.join(config.DOCKER_MOUNT_DIR, exp_name)
sweeper = launcher.DoodadSweeper([local_mount], docker_img=config.DOCKER_IMAGE,
docker_output_dir=docker_mount_point,
local_output_dir=os.path.join(config.DATA_DIR, 'local', exp_name))
sweeper.mount_out_s3 = mount.MountS3(s3_path='', mount_point=docker_mount_point, output=True)
if args.mode == 'ec2':
print("\n" + "**********" * 10 + "\nexp_prefix: {}\nvariants: {}".format(exp_name, len(
list(itertools.product(*[value for value in sweep_params.values()])))))
if query_yes_no("Continue?"):
sweeper.run_sweep_ec2(run_experiment, sweep_params, bucket_name=config.S3_BUCKET_NAME,
instance_type=instance_type,
region='us-west-2', s3_log_name=exp_name, add_date_to_logname=False)
elif args.mode == 'local_docker':
mode_docker = dd.mode.LocalDocker(
image=sweeper.image,
)
run_sweep_doodad(run_experiment, sweep_params, run_mode=mode_docker,
mounts=sweeper.mounts)
elif args.mode == 'local':
sweeper.run_sweep_serial(run_experiment, sweep_params)
elif args.mode == 'local_par':
sweeper.run_sweep_parallel(run_experiment, sweep_params)
elif args.mode == 'multi_gpu':
run_sweep_multi_gpu(run_experiment, sweep_params, num_gpu=args.num_gpu, exps_per_gpu=args.exps_per_gpu)
elif args.mode == 'local_singularity':
mode_singularity = dd.mode.LocalSingularity(
image='~/maml_zoo.simg')
run_sweep_doodad(run_experiment, sweep_params, run_mode=mode_singularity,
mounts=sweeper.mounts)
else:
raise NotImplementedError
def run_sweep_multi_gpu(run_method, params, repeat=1, num_cpu=multiprocessing.cpu_count(), num_gpu=2, exps_per_gpu=2):
sweeper = Sweeper(params, repeat, include_name=True)
gpu_frac = 0.9 / exps_per_gpu
num_runs = num_gpu * exps_per_gpu
cpu_per_gpu = num_cpu / num_gpu
exp_args = []
for config in sweeper:
exp_args.append((config, run_method))
random.shuffle(exp_args)
processes = [None] * num_runs
run_info = [(i, (i * cpu_per_gpu, (i + 1) * cpu_per_gpu)) for i in range(num_gpu)] * exps_per_gpu
for kwarg, run in exp_args:
launched = False
while not launched:
for idx in range(num_runs):
if processes[idx] is None or not processes[idx].is_alive():
kwarg['gpu_frac'] = gpu_frac
p = multiprocessing.Process(target=run, kwargs=kwarg)
os.environ["CUDA_VISIBLE_DEVICES"] = "%d" % run_info[idx][0]
os.system("taskset -p -c %d-%d %d" % (run_info[idx][1] + (os.getpid(),)))
p.start()
processes[idx] = p
launched = True
break
if not launched:
time.sleep(10)
|
download.py
|
#By Wang Haolong
import requests as re
import threading as th
import os
Max_Threads=5
class Crawler:
'''To download a book'''
def __init__(self, path, url):
'''init'''
self.path=path
self.url=url
self.url_list=[]
self.headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"}
self.create_path()
self.get_url()
self.start_downloading()
def create_path(self):
'''Create path to save files'''
if not os.path.exists(self.path):
os.mkdir(self.path)
print('保存路径获取成功')
def get_url(self):
'''get urls from the html file'''
self.html_file = re.get(self.url, headers = self.headers).content.decode('utf-8').splitlines()
self.flag = False
self.url_temp=''
for i in self.html_file[292]:
if i == '"':
if self.flag == False:
self.flag = True
else:
if 'http' in self.url_temp and 'jpeg' in self.url_temp:
self.url_list.append(self.url_temp)
self.url_temp = ''
self.flag = False
elif self.flag:
self.url_temp += i
print('资源url获取成功,共' + str(len(self.url_list)) + '个')
def getpic(self, id, url):
'''Download an save files'''
with open(self.path + '\\' + 'page' + str(id) + '.jpeg', 'wb') as goal_file:
goal_file.write(re.get(url, headers = self.headers).content)
def start_downloading(self):
'''start'''
cnt = 0
threads = []
while len(self.url_list) or len(threads):
for thread in threads:
if not thread.is_alive():
threads.remove(thread)
while len(threads) < Max_Threads and len(self.url_list):
thread = th.Thread(target= self.getpic, args= (cnt, self.url_list.pop(0)))
thread.setDaemon(True)
thread.start()
threads.append(thread)
cnt += 1
|
job_manager.py
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The worker module.
"""
import argparse
import os
import tempfile
import time
from abc import abstractmethod
from copy import deepcopy
from threading import Thread
from typing import Any, Callable, Dict, List, Optional, Tuple, Type
import boto3
from tqdm import tqdm
from ..configs import append_config_parser
from ..database.table import create_table
from ..logger import get_logger
from ..util import dump_to_yaml, load_from_yaml, upload_s3_file
from .job import Job, JobConfigs, JobEventLogger, JobState
from .result import TuneErrorCode, TuneResult
from .rpc import RPCClient, launch_server
log = get_logger("JobManager")
JOB_MANAGER_TABLE: Dict[str, Type["JobManagerBase"]] = {}
def register_job_manager(name: str, format_help: str) -> Callable:
"""Register job manager.
Parameters
----------
name: str
The job manager name.
format_help: str
The job manager specific config format description.
Returns
-------
reg: Callable
A callable function for registration.
"""
def _do_reg(manager: Type[JobManagerBase]):
if name in JOB_MANAGER_TABLE:
raise RuntimeError("%s has been registered" % name)
# Register job manager
JOB_MANAGER_TABLE[name] = manager
# Register job manager config
cfg_func = lambda: [
(
["--{}".format(name)],
{
"help": "Job manager description in YAML format. "
'Format: "<TVM target string>: {}"'.format(format_help),
},
)
]
append_config_parser("top.tune", "{} job manager options".format(name))(cfg_func)
return manager
return _do_reg
class JobManagerBase:
"""The base class of job manager."""
def __init__(
self,
target: str,
jobs: List[Job],
configs: argparse.Namespace,
):
"""Initialize a job manager.
Parameters
----------
target: str
The target string.
jobs: List[Job]
The job list.
configs: argparse.Namespace
The system configuration.
"""
self.target = target
job_cls: Optional[Type[Job]] = None
# Map from job.stateless_hash to job object.
self.job_set: Dict[int, Job] = {}
# Select jobs for this target.
for job in jobs:
# Filter out other targets.
if not job.is_target_compatible(target):
continue
# Update the target.
clone_job = deepcopy(job)
clone_job.workload.target = target
self.job_set[clone_job.stateless_hash] = clone_job
if job_cls is None:
job_cls = type(clone_job)
if not self.job_set:
raise RuntimeError(
"Terminate one %s due to no jobs for %s" % (type(self).__name__, target)
)
# Create the job configs. If the serialized job config is specified,
# then load it directly to override other CLI configs.
assert job_cls is not None
self.job_configs: JobConfigs = (
load_from_yaml(configs.job_configs)
if configs.job_configs is not None
else job_cls.create_job_configs(configs)
)
# Create a table in DB if needed and get its ARN.
assert self.job_configs.commit_options is not None
self.job_configs.commit_options["table-arn"] = create_table(
self.job_configs.commit_options["table-name"], **self.job_configs.commit_options["db"]
)
# Resume job states from the trace file.
if configs.trace_file is not None:
self.trace_file = configs.trace_file
else:
self.trace_file = "lorien-tune-%s.trace" % str(time.time()).replace(".", "")
log.info("Tuning state is maintained in %s", self.trace_file)
self.replay_trace()
self.resume_job_states()
# Resume job states and put WAITING jobs to the waiting queue.
self.waiting_jobs = [job for job in self.job_set.values() if job.state == JobState.WAITING]
# Start tracing job changes.
self.job_event_logger = JobEventLogger(self.trace_file)
for job in self.job_set.values():
job.trace(self.job_event_logger)
def num_jobs(self) -> int:
"""Return the total number of jobs to be tuned by this manager."""
return len(self.job_set)
@abstractmethod
def desc(self) -> str:
"""Return a description shown at the beginning of progress bar while tuning."""
raise NotImplementedError
@abstractmethod
def tune_impl(self, progress: tqdm):
"""Workload tuning implementation. The tuning results are directly stored in
self.job_n_results.
Parameters
----------
progress: tqdm
The formulated progress bar to be updated progressively.
"""
raise NotImplementedError
@abstractmethod
def resume_job_states(self):
"""Resume the jobs that were being tuned when the state was dumped."""
raise NotImplementedError
def tune(self) -> List[TuneResult]:
"""Tune workloads on the servers via RPC.
Returns
-------
tune_results: List[TuneResult]
The result can be either the absolute performance,
the speedup over the last performance, or the error message.
"""
log.info("Tuning target %s", self.target)
progress = tqdm(
total=self.num_jobs(),
desc=self.desc(),
bar_format="{desc}{percentage:3.0f}%|{bar:50}{r_bar}",
)
self.tune_impl(progress)
print("\n") # Sometimes tqdm miss the last newline.
# Aggregate results
results = []
for job in self.job_set.values():
results.append(job.result)
return results
def replay_trace(self):
"""Update the current state from the trace file."""
if not os.path.exists(self.trace_file):
return
with open(self.trace_file, "r") as filep:
for trace in filep:
try:
_, job_str = trace.replace("\n", "").split("\t")
except ValueError:
log.warning("Invalid trace: %s", trace)
continue
try:
loaded_job = load_from_yaml(job_str, Job)
except RuntimeError as err:
log.warning("Invalid job format: %s", str(err))
continue
if loaded_job.stateless_hash in self.job_set:
# Update state of an existing job.
curr_job = self.job_set[loaded_job.stateless_hash]
curr_job.state = loaded_job.state
curr_job._metadata = dict(loaded_job._metadata)
curr_job.result = deepcopy(loaded_job.result)
else:
# Recover missing jobs.
self.job_set[loaded_job.stateless_hash] = loaded_job
log.info("Successfully resumed job manager state from %s", self.trace_file)
@register_job_manager("local", "no additional config is required")
class LocalJobManager(JobManagerBase):
"""Local job manager class."""
def desc(self) -> str:
return "Local"
def resume_job_states(self):
"""Resume the jobs that were being tuned when the state was dumped."""
for job in self.job_set.values():
job.state = JobState.WAITING if job.state == JobState.TUNING else job.state
def tune_impl(self, progress):
"""Tune workloads with locally.
.. note::
Local tuner will not update the progress bar in order to keep the console concise.
Parameters
----------
progress: tqdm
The formulated progress bar to be updated progressively.
"""
self.job_configs.localize(self.target)
while self.waiting_jobs:
curr_job = self.waiting_jobs.pop()
assert curr_job.stateless_hash in self.job_set
curr_job.state = JobState.TUNING
curr_job.tune(
self.job_configs.tune_options,
self.job_configs.measure_options,
self.job_configs.commit_options,
)
log.info(curr_job.result)
curr_job.state = JobState.FINISHED
@register_job_manager(
"batch",
"{ job_queue: <Job queue>, job_def: <Job definition>, "
"job_bucket: <s3://bucket/folder(optional)>}",
)
class AWSBatchJobManager(JobManagerBase):
"""AWS batch job manager class."""
# Timeout for checking if a job is halted or failed, unit is in seconds.
RELAUNCH_TIMEOUT = 1800
def __init__(
self,
target: str,
jobs: List[Job],
configs: argparse.Namespace,
):
"""Initialize an AWS batch job manager."""
super(AWSBatchJobManager, self).__init__(target, jobs, configs)
# Parse batch environment.
batch_info: Dict[str, Any] = load_from_yaml(configs.batch)
for field in ["target", "job_queue", "job_def", "job_bucket"]:
if field not in batch_info:
raise RuntimeError("%s is missing in AWS batch config" % field)
self.job_queue = batch_info["job_queue"]
self.job_def = batch_info["job_def"]
self.job_bucket = batch_info["job_bucket"]
self.container_env: Dict[str, str] = {}
# Parse AWS credentials.
session = boto3.session.Session()
if session.region_name is None:
raise RuntimeError('AWS region is unset. Please use "aws configure" to setup a region')
self.container_env["AWS_REGION"] = session.region_name
self.terminated_jobs: List[str] = []
self.last_check_time = time.time()
credential = session.get_credentials()
if credential is None:
raise RuntimeError(
'AWS credential is required for AWS batch. Please use "aws configure" to setup'
)
self.container_env["AWS_ACCESS_KEY_ID"] = credential.access_key
self.container_env["AWS_SECRET_ACCESS_KEY"] = credential.secret_key
self.job_configs_str = dump_to_yaml(self.job_configs)
# Map from AWS batch job ID to job.
self.jobid_2_job: Dict[str, Job] = {}
def desc(self) -> str:
return "AWS batch job queue {0}".format(self.job_queue)
def resume_job_states(self):
"""Check if the tuning jobs have correct metadata."""
invalid_jobs = []
for job in self.job_set.values():
if job.state != JobState.TUNING:
continue
try:
job.get_metadata("AWSBatchJobID")
job.get_metadata("AWSBatchJobDetail")
except RuntimeError:
log.warning("AWSBatchJobID or AWSBatchJobDetail is missing in %s", job)
invalid_jobs.append(job.stateless_hash)
continue
# Remove invalid jobs from this tuning.
for job_hash in invalid_jobs:
del self.job_set[job_hash]
def tune_impl(self, progress):
"""Tune workloads with AWS batch.
Parameters
----------
progress: tqdm
The formulated progress bar to be updated progressively.
"""
# Add resumed tuning jobs.
for job in self.job_set.values():
if job.state == JobState.TUNING:
self.jobid_2_job[job.get_metadata("AWSBatchJobID")] = job
# Connect to AWS batch.
batch_client = boto3.client("batch")
# Make the command.
command = ["python3", "-m", "lorien", "tune", "--local", self.target]
command += ["--job-configs", self.job_configs_str]
# Submit for tuning.
while self.waiting_jobs:
curr_job = self.waiting_jobs.pop()
job_str = dump_to_yaml(curr_job)
if len(job_str) > 7000:
# AWS batch limits the job payload to 30 KiB and the container overrride
# length to 8192, so we cannot directly submit the serialized job
# if it is too large.
with tempfile.NamedTemporaryFile(
mode="w", prefix="lorien_upload_job_", suffix=".yaml"
) as filep:
filep.write(job_str)
filep.flush()
s3_path = "s3://{0}/{1}".format(self.job_bucket, os.path.basename(filep.name))
err_msg = upload_s3_file(filep.name, s3_path)
if err_msg:
raise RuntimeError(err_msg)
job_str = s3_path
job_detail = {
"jobName": "lorien-tuning-job",
"jobQueue": self.job_queue,
"jobDefinition": self.job_def,
"containerOverrides": {
"command": command + ["--job", job_str],
"environment": [
{"name": name, "value": val} for name, val in self.container_env.items()
],
},
}
try:
res = batch_client.submit_job(**job_detail)
self.jobid_2_job[res["jobId"]] = curr_job
curr_job.set_metadata("AWSBatchJobID", res["jobId"])
curr_job.set_metadata("AWSBatchJobDetail", job_detail)
curr_job.state = JobState.TUNING
except Exception as err: # pylint: disable=broad-except
log.warning("Failed to submit %s to AWS batch: %s", str(curr_job), str(err))
result = TuneResult()
result.error_code = TuneErrorCode.FAIL_TO_SUBMIT
result.error_msgs.append("Failed to submit to AWS batch")
assert curr_job.stateless_hash in self.job_set
curr_job.result = result
curr_job.state = JobState.FINISHED
continue
# Calculate finished jobs.
done_count = sum(
[1 if job.state == JobState.FINISHED else 0 for job in self.job_set.values()]
)
progress.update(done_count)
# Check progress.
while done_count < self.num_jobs():
# Divide job IDs to chunks with maximum size 100, because describe_jobs API only allows
# at most 100 jobs in each query.
jobs_desc = []
job_ids = list(self.jobid_2_job.keys())
chunks = [
job_ids[i * 100 : (i + 1) * 100] for i in range((len(job_ids) + 100 - 1) // 100)
]
for chunk in chunks:
jobs_desc += batch_client.describe_jobs(jobs=chunk)["jobs"]
# Filter finished jobs
success_ids = [desc["jobId"] for desc in jobs_desc if desc["status"] == "SUCCEEDED"]
fail_ids = [desc["jobId"] for desc in jobs_desc if desc["status"] == "FAILED"]
new_done_count = len(success_ids) + len(fail_ids)
# Remove finished jobs and update results.
for jobid in fail_ids:
curr_job = self.jobid_2_job[jobid]
result = TuneResult()
result.error_code = TuneErrorCode.FAIL_TO_GET_RESULT
result.error_msgs.append("Failed")
assert curr_job.stateless_hash in self.job_set
curr_job.result = result
curr_job.state = JobState.FINISHED
del self.jobid_2_job[jobid]
for jobid in success_ids:
curr_job = self.jobid_2_job[jobid]
result = TuneResult()
result.error_code = TuneErrorCode.FAIL_TO_GET_RESULT
result.error_msgs.append("Success")
assert curr_job.stateless_hash in self.job_set
curr_job.result = result
curr_job.state = JobState.FINISHED
del self.jobid_2_job[jobid]
# Update status.
progress.update(new_done_count)
done_count += new_done_count
time.sleep(1)
self.relaunch_hanging_jobs(batch_client, jobs_desc)
def relaunch_hanging_jobs(
self,
batch_client,
jobs_desc: List[Dict[str, Any]],
):
"""Timed monitoring check for halted/failed jobs and relaunch them
Parameters
----------
batch_client: botocore.client.Batch
boto3 client for AWS batch. Note that we do not annotate boto3 types because
it requires an additional package.
jobs_desc: List[Dict[str, Any]]
Job descriptions that extracted from each job.
"""
def get_log_events(log_group: str, stream_name: List[str], filter_pattern: str = ""):
try:
client = boto3.client("logs")
resp = client.filter_log_events(
logGroupName=log_group,
logStreamNames=stream_name,
filterPattern=filter_pattern,
limit=10000,
)
return resp["events"]
except Exception as err: # pylint: disable=broad-except
log.warning(
"Failed to obtain log from AWS Cloudwatch for: %s %s %s",
log,
stream_name,
str(err),
)
return []
self.curr_time = time.time()
if self.curr_time - self.last_check_time >= self.RELAUNCH_TIMEOUT:
# Check all running jobs when relaunch timeout is reached
running_jobs = [
[desc["jobId"], desc["container"]["logStreamName"]]
for desc in jobs_desc
if desc["status"] == "RUNNING"
]
for running_job in running_jobs:
job_id, log_stream = running_job[0], running_job[1]
# Prevent redundantly killing terminated jobs
if job_id in self.terminated_jobs:
continue
job = self.jobid_2_job[job_id]
job_detail = job.get_metadata("AWSBatchJobDetail")
events = get_log_events("/aws/batch/job", [log_stream], "Too many errors")
is_in_debug_mode = bool(events)
relaunch = False
if is_in_debug_mode:
events = get_log_events("/aws/batch/job", [log_stream], "LOG_COUNT")
assert len(events) >= 2 # Should not have less than 2 event logs.
first_time_interval = events[1]["timestamp"] - events[0]["timestamp"]
curr_timestamp = events[1]["timestamp"]
# Check the runtime of searching a number of trials and terminate the job
# if it now needs 10x longer time to finish the same number of trials.
for event in events[2:]:
time_interval = event["timestamp"] - curr_timestamp
if time_interval > first_time_interval * 10:
relaunch = True
break
curr_timestamp = event["timestamp"]
# Relaunch the same job and terminate the hanging one.
if relaunch:
try:
res = batch_client.submit_job(**job_detail)
except Exception as err: # pylint: disable=broad-except
log.warning("Failed to submit the relaunched job : %s %s", job_id, str(err))
break
try:
batch_client.terminate_job(
jobId=job_id,
reason="In DEBUG mode for a long time. "
"Relaunch a job with ID {}".format(res["jobId"]),
)
self.terminated_jobs.append(job_id)
except Exception as err: # pylint: disable=broad-except
log.warning("Failed to terminate job : %s %s", job_id, str(err))
break
job.set_metadata("AWSBatchJobID", res["jobId"])
job.set_metadata("AWSBatchJobDetail", job_detail)
self.jobid_2_job[res["jobId"]] = job
break
self.last_check_time = self.curr_time
@register_job_manager("rpc", "port")
class RPCJobManager(JobManagerBase):
"""RPC job manager class."""
def __init__(
self,
target: str,
jobs: List[Job],
configs: argparse.Namespace,
):
"""Initialize a RPC job manager."""
super(RPCJobManager, self).__init__(target, jobs, configs)
# Parse server info.
server_info: Dict[str, Any] = load_from_yaml(configs.rpc)
if "port" not in server_info:
raise RuntimeError("port is missing in RPC server config")
server_port = server_info["port"]
# Launch a RPC server with daemon so that it will be terminated with the main thread.
self.server = Thread(target=launch_server, args=(server_port, target), daemon=True)
try:
self.server.start()
except Exception as err: # pylint: disable=broad-except
raise RuntimeError("Failed to launch RPC server: %s" % str(err))
configs.server = "localhost:{}".format(server_port)
configs.target = target
# Launch a RPC client to initialize the server.
log.info("Initializing RPC server")
while True:
try:
self.client = RPCClient(configs, silent=True)
break
except Exception as err: # pylint: disable=broad-except
time.sleep(1)
continue
self.client.init_server(self.job_configs)
if not self.client.is_server_init():
raise RuntimeError("Failed to initialize RPC server")
def desc(self) -> str:
return "{0} RPC workers".format(self.client.num_workers())
def resume_job_states(self):
"""Resume the jobs that were being tuned when the state was dumped."""
for job in self.job_set.values():
job.state = JobState.WAITING if job.state == JobState.TUNING else job.state
def tune_impl(self, progress):
"""Tune workloads with RPC hosts.
Parameters
----------
progress: tqdm
The formulated progress bar to be updated progressively.
"""
# Submit for tuning.
done_count = 0
while self.waiting_jobs or done_count < self.num_jobs():
# Fetch (job, tune result) pairs in YAML string format.
new_results: List[Tuple[str, str]] = self.client.fetch_results()
if new_results:
progress.update(len(new_results))
done_count += len(new_results)
# Process results.
for job_str, result_str in new_results:
# Note that the new created job is a different object as in the job set,
# and we must use the original object to maintain the trace.
job_hash = load_from_yaml(job_str, Job).stateless_hash
result: TuneResult = load_from_yaml(result_str, TuneResult)
assert job_hash in self.job_set
curr_job = self.job_set[job_hash]
curr_job.state = JobState.FINISHED
curr_job.result = result
# Commit results if workers did not do so.
if "tune_logs" in result.metadata:
log_file = os.path.join("/tmp", curr_job.workload.get_log_file_name())
with open(log_file, "w") as filep:
filep.write(result.metadata["tune_logs"])
filep.flush()
result.log_file = log_file
result.commit(
self.job_configs.commit_options, workload=curr_job.workload, silent=True
)
os.remove(log_file)
del result.metadata["tune_logs"]
# Submit new jobs.
while self.waiting_jobs:
curr_job = self.waiting_jobs.pop()
# Keep submitting until server refuses to accept.
if not self.client.submit(dump_to_yaml(curr_job)):
self.waiting_jobs.append(curr_job)
break
curr_job.state = JobState.TUNING
# Update status.
progress.set_description(self.desc())
time.sleep(1)
|
websocket_client.py
|
# gdax/WebsocketClient.py
# original author: Daniel Paquin
# mongo "support" added by Drew Rice
#
#
# Template object to receive messages from the gdax Websocket Feed
from __future__ import print_function
import json
import base64
import hmac
import hashlib
import time
from threading import Thread
from websocket import create_connection, WebSocketConnectionClosedException
from pymongo import MongoClient
from gdax.gdax_auth import get_auth_headers
class WebsocketClient(object):
def __init__(self, url="wss://ws-feed.gdax.com", products=None, message_type="subscribe", mongo_collection=None,
should_print=True, auth=False, api_key="", api_secret="", api_passphrase="", channels=None):
self.url = url
self.products = products
self.channels = channels
self.type = message_type
self.stop = False
self.error = None
self.ws = None
self.thread = None
self.auth = auth
self.api_key = api_key
self.api_secret = api_secret
self.api_passphrase = api_passphrase
self.should_print = should_print
self.mongo_collection = mongo_collection
def start(self):
def _go():
self._connect()
self._listen()
self._disconnect()
self.stop = False
self.on_open()
self.thread = Thread(target=_go)
self.thread.start()
def _connect(self):
if self.products is None:
self.products = ["BTC-USD"]
elif not isinstance(self.products, list):
self.products = [self.products]
if self.url[-1] == "/":
self.url = self.url[:-1]
if self.channels is None:
sub_params = {'type': 'subscribe', 'product_ids': self.products}
else:
sub_params = {'type': 'subscribe', 'product_ids': self.products, 'channels': self.channels}
if self.auth:
timestamp = str(time.time())
message = timestamp + 'GET' + '/users/self'
sub_params.update(get_auth_headers(timestamp, message, self.api_key, self.api_secret, self.api_passphrase))
self.ws = create_connection(self.url)
self.ws.send(json.dumps(sub_params))
if self.type == "heartbeat":
sub_params = {"type": "heartbeat", "on": True}
else:
sub_params = {"type": "heartbeat", "on": False}
self.ws.send(json.dumps(sub_params))
def _listen(self):
while not self.stop:
try:
if int(time.time() % 30) == 0:
# Set a 30 second ping to keep connection alive
self.ws.ping("keepalive")
data = self.ws.recv()
msg = json.loads(data)
except ValueError as e:
self.on_error(e)
except Exception as e:
self.on_error(e)
else:
self.on_message(msg)
def _disconnect(self):
if self.type == "heartbeat":
self.ws.send(json.dumps({"type": "heartbeat", "on": False}))
try:
if self.ws:
self.ws.close()
except WebSocketConnectionClosedException as e:
pass
self.on_close()
def close(self):
self.stop = True
self.thread.join()
def on_open(self):
if self.should_print:
print("-- Subscribed! --\n")
def on_close(self):
if self.should_print:
print("\n-- Socket Closed --")
def on_message(self, msg):
if self.should_print:
print(msg)
if self.mongo_collection: # dump JSON to given mongo collection
self.mongo_collection.insert_one(msg)
def on_error(self, e, data=None):
self.error = e
self.stop
print('{} - data: {}'.format(e, data))
if __name__ == "__main__":
import sys
import gdax
import time
class MyWebsocketClient(gdax.WebsocketClient):
def on_open(self):
self.url = "wss://ws-feed.gdax.com/"
self.products = ["BTC-USD", "ETH-USD"]
self.message_count = 0
print("Let's count the messages!")
def on_message(self, msg):
print(json.dumps(msg, indent=4, sort_keys=True))
self.message_count += 1
def on_close(self):
print("-- Goodbye! --")
wsClient = MyWebsocketClient()
wsClient.start()
print(wsClient.url, wsClient.products)
try:
while True:
print("\nMessageCount =", "%i \n" % wsClient.message_count)
time.sleep(1)
except KeyboardInterrupt:
wsClient.close()
if wsClient.error:
sys.exit(1)
else:
sys.exit(0)
|
stepper_test_thread.py
|
#!/usr/bin/env python3
""" test example file for rpiMotorlib.py L298 stepper tests"""
import time
import pi_stepper as s
import threading
# from stepper_lib import RpiMotorLib
max_speed = 0.002
min_speed = 0.06
GpioPins_MA = [13, 11, 15, 12]
GpioPins_MB = [37, 33, 35, 16]
# GpioPins_MA = [10, 10, 10, 10]
# Declare an named instance of class pass a name and type of motor
# type of motor(Nema) is case sensitive
motor_A = s.Stepper(
"motor_A", "Nema", GpioPins_MA, max_speed, min_speed, "s", "full", False
)
motor_B = s.Stepper(
"motor_B", "Nema", GpioPins_MB, max_speed, min_speed, "d", "full", False
)
def thread_motor(thread_name, motor):
while 1:
motor.motor_run()
# import the random module
import random
# determining the values of the parameters
mu = 100
sigma = 50
def main():
"""main function loop"""
# ====== tests for motor L298STepTest ====
t1 = threading.Thread(name="MA", target=thread_motor, args=("MA", motor_A))
# Started the threads
t1.start()
t2 = threading.Thread(name="MB", target=thread_motor, args=("MB", motor_B))
# Started the threads
t2.start()
motor_A.set_speed(255)
motor_B.set_speed(255)
print("Test #0: move 90°")
motor_A.set_target(180)
motor_B.set_target(-180)
time.sleep(3)
# motor_A.move_speed_control(255, True)
# time.sleep(10)
# while 1:
# # using the gauss() method
# pos = random.gauss(mu, sigma)
# speed = random.choice([10, 50, 100, 150, 200, 250, 255, 245])
# sign = random.choice([0, 1])
# print(
# "---------------"
# + str(speed)
# + " "
# + str(sign)
# + " "
# + str(motor_A.current_pos)
# )
# a = 1
# motor_A.move_speed_control(int(speed), sign)
# time.sleep(5)
# ===================MAIN===============================
if __name__ == "__main__":
print("START")
main()
exit()
# =====================END===============================
|
test_websocket.py
|
import asyncio
import functools
import threading
import requests
import pytest
import websockets
from contextlib import contextmanager
from uvicorn.protocols.http import HttpToolsProtocol
class WebSocketResponse:
persist = False
def __init__(self, scope):
self.scope = scope
async def __call__(self, receive, send):
self.send = send
if self.persist:
while True:
message = await receive()
await self.handle(message)
else:
message = await receive()
await self.handle(message)
async def handle(self, message):
message_type = message["type"].replace(".", "_")
handler = getattr(self, message_type)
await handler(message)
def run_loop(loop):
loop.run_forever()
loop.close()
@contextmanager
def run_server(app):
asyncio.set_event_loop(None)
loop = asyncio.new_event_loop()
protocol = functools.partial(HttpToolsProtocol, app=app, loop=loop)
create_server_task = loop.create_server(protocol, host="127.0.0.1")
server = loop.run_until_complete(create_server_task)
url = "ws://127.0.0.1:%d/" % server.sockets[0].getsockname()[1]
try:
# Run the event loop in a new thread.
thread = threading.Thread(target=run_loop, args=[loop])
thread.start()
# Return the contextmanager state.
yield url
finally:
# Close the loop from our main thread.
loop.call_soon_threadsafe(loop.stop)
thread.join()
def test_invalid_upgrade():
app = lambda scope: None
with run_server(app) as url:
url = url.replace("ws://", "http://")
response = requests.get(
url, headers={"upgrade": "websocket", "connection": "upgrade"}, timeout=5
)
assert response.status_code == 403
def test_accept_connection():
class App(WebSocketResponse):
async def websocket_connect(self, message):
await self.send({"type": "websocket.accept"})
async def open_connection(url):
async with websockets.connect(url) as websocket:
return websocket.open
with run_server(App) as url:
loop = asyncio.new_event_loop()
is_open = loop.run_until_complete(open_connection(url))
assert is_open
loop.close()
def test_send_text_data_to_client():
class App(WebSocketResponse):
async def websocket_connect(self, message):
await self.send({"type": "websocket.accept"})
await self.send({"type": "websocket.send", "text": "123"})
async def get_data(url):
async with websockets.connect(url) as websocket:
return await websocket.recv()
with run_server(App) as url:
loop = asyncio.new_event_loop()
data = loop.run_until_complete(get_data(url))
assert data == "123"
loop.close()
def test_send_binary_data_to_client():
class App(WebSocketResponse):
async def websocket_connect(self, message):
await self.send({"type": "websocket.accept"})
await self.send({"type": "websocket.send", "bytes": b"123"})
async def get_data(url):
async with websockets.connect(url) as websocket:
return await websocket.recv()
with run_server(App) as url:
loop = asyncio.new_event_loop()
data = loop.run_until_complete(get_data(url))
assert data == b"123"
loop.close()
def test_send_and_close_connection():
class App(WebSocketResponse):
async def websocket_connect(self, message):
await self.send({"type": "websocket.close", "text": "123"})
async def get_data(url):
async with websockets.connect(url) as websocket:
data = await websocket.recv()
is_open = True
try:
await websocket.recv()
except:
is_open = False
return (data, is_open)
with run_server(App) as url:
loop = asyncio.new_event_loop()
(data, is_open) = loop.run_until_complete(get_data(url))
assert data == "123"
assert not is_open
loop.close()
def test_send_text_data_to_server():
class App(WebSocketResponse):
persist = True
async def websocket_connect(self, message):
await self.send({"type": "websocket.accept"})
async def websocket_receive(self, message):
_text = message.get("text")
await self.send({"type": "websocket.send", "text": _text})
async def send_text(url):
async with websockets.connect(url) as websocket:
await websocket.send("abc")
return await websocket.recv()
with run_server(App) as url:
loop = asyncio.new_event_loop()
data = loop.run_until_complete(send_text(url))
assert data == "abc"
loop.close()
def test_send_binary_data_to_server():
class App(WebSocketResponse):
persist = True
async def websocket_connect(self, message):
await self.send({"type": "websocket.accept"})
async def websocket_receive(self, message):
_bytes = message.get("bytes")
await self.send({"type": "websocket.send", "bytes": _bytes})
async def send_text(url):
async with websockets.connect(url) as websocket:
await websocket.send(b"abc")
return await websocket.recv()
with run_server(App) as url:
loop = asyncio.new_event_loop()
data = loop.run_until_complete(send_text(url))
assert data == b"abc"
loop.close()
def test_send_after_protocol_close():
class App(WebSocketResponse):
async def websocket_connect(self, message):
await self.send({"type": "websocket.close", "text": "123"})
with pytest.raises(Exception):
await self.send({"type": "websocket.send", "text": "1234"})
async def get_data(url):
async with websockets.connect(url) as websocket:
data = await websocket.recv()
is_open = True
try:
await websocket.recv()
except:
is_open = False
return (data, is_open)
with run_server(App) as url:
loop = asyncio.new_event_loop()
(data, is_open) = loop.run_until_complete(get_data(url))
assert data == "123"
assert not is_open
loop.close()
def test_subprotocols():
class App(WebSocketResponse):
async def websocket_connect(self, message):
await self.send({"type": "websocket.accept", "subprotocol": "proto1"})
async def get_subprotocol(url):
async with websockets.connect(
url, subprotocols=["proto1", "proto2"]
) as websocket:
return websocket.subprotocol
with run_server(App) as url:
loop = asyncio.new_event_loop()
subprotocol = loop.run_until_complete(get_subprotocol(url))
assert subprotocol == "proto1"
loop.close()
|
basic02.py
|
"""
多个进程之间的全局变量的共享问题
1- 多进程的执行顺序和线程一样是乱序的
2- 每个进程互相独立,各有自己的一份全局变量,所以worker1中修改全局变量,worker2不受影响
3- 进程之间变量独立不共享,这一点和thread完全不同
"""
import multiprocessing,time
g_num = 0
def worker1(num):
global g_num
print(g_num)
# 这里修改全局变量
g_num += 99
while True:
time.sleep(1)
print("worker1获取到的g_num=%d" % g_num)
def worker2(num):
while True:
print("worker2获取到的g_num=%d" % g_num)
time.sleep(1)
def main():
pro1 = multiprocessing.Process(target=worker1, args=(0, ))
pro1.start()
pro2 = multiprocessing.Process(target=worker2, args=(1, ))
pro2.start()
# if __name__ == '__main__':
# main()
"""
进程间如果想要共享全局变量的话使用中间商queue,每个进程都访问这个queue
1- queue队列,先进先出, stack栈的话,先进后出
2- 队列的存取原理是:放一个取一个, 取出队列就没有了
3- 如果队列没有东西进行get,或者put放的超过长度,都会阻塞,一个萝卜一个坑
"""
from multiprocessing import queues
def testQueue():
# 创建一个3个长度的队列
queue = multiprocessing.Queue(3)
# put放入数据
# queue.put(1)
# get读取数据
queue.get()
testQueue()
|
bot.py
|
import discord
from discord.ext import commands
from settings import Settings
import statistics
import embed_creator
import json
import os
from decouple import config
import time
import threading
def determine_prefixes(bot, message):
return settings.get_prefix(message.guild.id)
settings = Settings()
client = discord.Client()
bot = commands.Bot(command_prefix=determine_prefixes)
bot.remove_command('help')
#################### SETTINGS ####################
@bot.command()
async def setprefix(ctx, *args):
if ctx.message.author.guild_permissions.administrator:
if (len(args) != 1):
prefix = await bot.get_prefix(ctx.message)
await ctx.send("**Invalid command!** Correct usage: `" + prefix + "setprefix {newprefix}`.")
return False
else:
await settings.set_prefix(ctx.message.guild.id, args[0])
prefix = await bot.get_prefix(ctx.message)
await ctx.send(":white_check_mark: Prefix set to **" + prefix + "**")
#################### COMMANDS ####################
async def is_disabled(ctx):
admin = ctx.message.author.guild_permissions.administrator
disabled = await settings.check_disabled_channel(ctx.message.guild.id, ctx.message.channel.id)
return not disabled or admin
@bot.command()
@commands.check(is_disabled)
async def help(ctx, *args):
prefix = await bot.get_prefix(ctx.message)
user_help_string = ":grey_question: HyperStats Help :grey_question:\n\n Prefix: `" + prefix + "`"
if ctx.message.author.guild_permissions.administrator:
user_help_string += "\n\n`" + prefix + "setprefix {prefix}`: Changes the bot prefix **(admin only)**."
user_help_string += "\n`" + prefix + "disablechannel`: Disables the bot in the channel this command is used in for non-administrators **(admin only)**."
user_help_string += "\n`" + prefix + "enablechannel`: Enables the bot in the channel this command is used in for non-administrators **(admin only)**."
user_help_string += "\n`" + prefix + "listdisabledchannels`: Lists all disabled channels **(admin only)**."
user_help_string += "\n\n`" + prefix + "help`: Displays this menu."
user_help_string += "\n`" + prefix + "about`: Displays information about this bot."
user_help_string += "\n`" + prefix + "link {playername} {platform}`: Links your discord account to your in-game account. Once complete, you can use the below commands without any arguments to view your own stats."
user_help_string += "\n`" + prefix + "unlink`: Unlinks your in-game account from your discord account."
user_help_string += "\n\nFor all of the below commands, platform must be either be empty for PC, or one of `PC`, `Xbox` or `PS`. You can also use `" + prefix + "link {playername} {platform}` to use the below commands to view your own stats without any arguments."
user_help_string += "\n\n`" + prefix + "stats {playername} {platform}`: Displays player stats."
user_help_string += "\n`" + prefix + "weapons {playername} {platform}`: Displays weapon stats for a player."
user_help_string += "\n`" + prefix + "hacks {playername} {platform}`: Displays hack stats for a player."
user_help_string += "\n`" + prefix + "best {playername} {platform}`: Displays career best stats for a player (best in one game)."
await ctx.send(user_help_string)
@bot.command()
@commands.check(is_disabled)
async def stats(ctx, *args):
await api_down(ctx)
linked = await statistics.is_linked(settings, ctx, args, "stats")
if linked:
return
valid = await check_stats_commands(ctx, "stats", args)
if valid:
status = await ctx.send(":hourglass: Finding player " + args[0] + "...")
platform = await statistics.determine_platform(status, args)
await statistics.show_statistics(ctx, status, "stats", args[0], platform)
@bot.command()
@commands.check(is_disabled)
async def weapons(ctx, *args):
await api_down(ctx)
linked = await statistics.is_linked(settings, ctx, args, "weapons")
if linked:
return
valid = await check_stats_commands(ctx, "weapons", args)
if valid:
status = await ctx.send(":hourglass: Finding player " + args[0] + "...")
platform = await statistics.determine_platform(status, args)
await statistics.show_statistics(ctx, status, "weapons", args[0], platform)
@bot.command()
@commands.check(is_disabled)
async def best(ctx, *args):
await api_down(ctx)
linked = await statistics.is_linked(settings, ctx, args, "best")
if linked:
return
valid = await check_stats_commands(ctx, "best", args)
if valid:
status = await ctx.send(":hourglass: Finding player " + args[0] + "...")
platform = await statistics.determine_platform(status, args)
await statistics.show_statistics(ctx, status, "best", args[0], platform)
@bot.command()
@commands.check(is_disabled)
async def hacks(ctx, *args):
await api_down(ctx)
linked = await statistics.is_linked(settings, ctx, args, "hacks")
if linked:
return
valid = await check_stats_commands(ctx, "hacks", args)
if valid:
status = await ctx.send(":hourglass: Finding player " + args[0] + "...")
platform = await statistics.determine_platform(status, args)
await statistics.show_statistics(ctx, status, "hacks", args[0], platform)
@bot.command()
async def disablechannel(ctx, *args):
prefix = await bot.get_prefix(ctx.message)
if ctx.message.author.guild_permissions.administrator:
if (len(args) != 0):
await ctx.send("**:stop_sign: Invalid command!** Correct usage: `" + prefix + "disablechannel`. Use this command in the channel you'd like to disable the bot in!")
else:
disable = await settings.disable_channel(ctx.message.guild.id, ctx.message.channel.id)
if disable:
await ctx.send(":white_check_mark: Bot **disabled** in channel for non-administrators.")
else:
await ctx.send(":stop_sign: Bot **already disabled** in channel.")
@bot.command()
@commands.check(is_disabled)
async def about(ctx, *args):
embed = await embed_creator.create_about_embed()
await ctx.send(embed=embed)
@bot.command()
async def enablechannel(ctx, *args):
prefix = await bot.get_prefix(ctx.message)
if ctx.message.author.guild_permissions.administrator:
if (len(args) != 0):
await ctx.send("**:stop_sign: Invalid command!** Correct usage: `" + prefix + "enablechannel`. Use this command in the channel you'd like to enable the bot in!")
else:
enable = await settings.enable_channel(ctx.message.guild.id, ctx.message.channel.id)
if enable:
await ctx.send(":white_check_mark: Bot **enabled** in channel for non-administrators.")
else:
await ctx.send(":stop_sign: Bot **already enabled** in channel.")
@bot.command()
async def listdisabledchannels(ctx, *args):
prefix = await bot.get_prefix(ctx.message)
if ctx.message.author.guild_permissions.administrator:
if (len(args) != 0):
await ctx.send("**:stop_sign: Invalid command!** Correct usage: `" + prefix + "listdisabledchannels`.")
else:
await ctx.send(await settings.get_disabled_channels(ctx.message.guild.id))
@bot.command()
@commands.check(is_disabled)
async def link(ctx, *args):
await api_down(ctx)
prefix = await bot.get_prefix(ctx.message)
already_linked = await settings.get_linked_user(ctx.message.author.id)
if already_linked is not None:
await ctx.send(":stop_sign: This account is already linked to **" + already_linked['p_name'] + "**!")
return False
if (len(args) != 2 and len(args) != 1):
await ctx.send("**:stop_sign: Invalid command!** Correct usage: `" + prefix + "link {playername} {platform}`. Platform must either be `PC`, `Xbox` or `PS` (or blank for PC).")
return False
status = await ctx.send(":hourglass: Attempting to link player " + args[0] + "...")
platform = await statistics.determine_platform(status, args)
id = await statistics.find_player(status, args[0], platform)
if not id:
await status.edit(content=":exclamation: Failed to find player **" + args[0] + "**! Link failed!")
else:
await settings.add_link(ctx.message.author.id, id["p_id"], id["p_name"], id['p_platform'])
await status.edit(content=":white_check_mark: Your Discord account has been linked to **" + id["p_name"] + "**! You can now use the commands without the extra parameters!")
@bot.command()
@commands.check(is_disabled)
async def unlink(ctx, *args):
already_linked = await settings.get_linked_user(ctx.message.author.id)
if already_linked is not None:
await ctx.send(":stop_sign: Unlinked **" + already_linked['p_name'] + "** from your discord account!")
await settings.remove_link(ctx.message.author.id)
return True
else:
await ctx.send(":stop_sign: No account linked!")
return False
#################### HELPER ####################
@bot.event
async def on_ready():
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=" " + str(len(bot.guilds)) + " servers | $help"))
@client.event
async def on_guild_join(guild):
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=" " + str(len(bot.guilds)) + " servers | $help"))
async def check_stats_commands(ctx, command, args):
prefix = await bot.get_prefix(ctx.message)
if (len(args) != 1 and len(args) != 2):
await ctx.send("**:stop_sign: Invalid command!** Correct usage: `" + prefix + command + " {playername} {platform}`. Platform must either be `PC`, `Xbox` or `PS` (or blank for PC).")
return False
return True
async def api_down(ctx):
if (config('APIDOWN') == "true"):
await ctx.send(":exclamation: The API may be unavailable, please try again later.")
def listen_for_commands():
while True:
command = input("Enter command: ")
if (command.lower() == "save"):
settings.save_settings()
print("Settings saved!")
if __name__ == "__main__":
token = config('HYPERSTATSTEST')
threading.Thread(target=listen_for_commands).start()
bot.run(token)
|
test_engine_py3k.py
|
import asyncio
from sqlalchemy import Column
from sqlalchemy import create_engine
from sqlalchemy import delete
from sqlalchemy import event
from sqlalchemy import exc
from sqlalchemy import func
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy import union_all
from sqlalchemy.ext.asyncio import create_async_engine
from sqlalchemy.ext.asyncio import engine as _async_engine
from sqlalchemy.ext.asyncio import exc as asyncio_exc
from sqlalchemy.ext.asyncio.base import ReversibleProxy
from sqlalchemy.ext.asyncio.engine import AsyncConnection
from sqlalchemy.ext.asyncio.engine import AsyncEngine
from sqlalchemy.pool import AsyncAdaptedQueuePool
from sqlalchemy.testing import assertions
from sqlalchemy.testing import async_test
from sqlalchemy.testing import combinations
from sqlalchemy.testing import config
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_raises
from sqlalchemy.testing import expect_raises_message
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_false
from sqlalchemy.testing import is_none
from sqlalchemy.testing import is_not
from sqlalchemy.testing import is_true
from sqlalchemy.testing import mock
from sqlalchemy.testing import ne_
from sqlalchemy.util.concurrency import greenlet_spawn
class AsyncFixture:
@config.fixture(
params=[
(rollback, run_second_execute, begin_nested)
for rollback in (True, False)
for run_second_execute in (True, False)
for begin_nested in (True, False)
]
)
def async_trans_ctx_manager_fixture(self, request, metadata):
rollback, run_second_execute, begin_nested = request.param
from sqlalchemy import Table, Column, Integer, func, select
t = Table("test", metadata, Column("data", Integer))
eng = getattr(self, "bind", None) or config.db
t.create(eng)
async def run_test(subject, trans_on_subject, execute_on_subject):
async with subject.begin() as trans:
if begin_nested:
if not config.requirements.savepoints.enabled:
config.skip_test("savepoints not enabled")
if execute_on_subject:
nested_trans = subject.begin_nested()
else:
nested_trans = trans.begin_nested()
async with nested_trans:
if execute_on_subject:
await subject.execute(t.insert(), {"data": 10})
else:
await trans.execute(t.insert(), {"data": 10})
# for nested trans, we always commit/rollback on the
# "nested trans" object itself.
# only Session(future=False) will affect savepoint
# transaction for session.commit/rollback
if rollback:
await nested_trans.rollback()
else:
await nested_trans.commit()
if run_second_execute:
with assertions.expect_raises_message(
exc.InvalidRequestError,
"Can't operate on closed transaction "
"inside context manager. Please complete the "
"context manager "
"before emitting further commands.",
):
if execute_on_subject:
await subject.execute(
t.insert(), {"data": 12}
)
else:
await trans.execute(
t.insert(), {"data": 12}
)
# outside the nested trans block, but still inside the
# transaction block, we can run SQL, and it will be
# committed
if execute_on_subject:
await subject.execute(t.insert(), {"data": 14})
else:
await trans.execute(t.insert(), {"data": 14})
else:
if execute_on_subject:
await subject.execute(t.insert(), {"data": 10})
else:
await trans.execute(t.insert(), {"data": 10})
if trans_on_subject:
if rollback:
await subject.rollback()
else:
await subject.commit()
else:
if rollback:
await trans.rollback()
else:
await trans.commit()
if run_second_execute:
with assertions.expect_raises_message(
exc.InvalidRequestError,
"Can't operate on closed transaction inside "
"context "
"manager. Please complete the context manager "
"before emitting further commands.",
):
if execute_on_subject:
await subject.execute(t.insert(), {"data": 12})
else:
await trans.execute(t.insert(), {"data": 12})
expected_committed = 0
if begin_nested:
# begin_nested variant, we inserted a row after the nested
# block
expected_committed += 1
if not rollback:
# not rollback variant, our row inserted in the target
# block itself would be committed
expected_committed += 1
if execute_on_subject:
eq_(
await subject.scalar(select(func.count()).select_from(t)),
expected_committed,
)
else:
with subject.connect() as conn:
eq_(
await conn.scalar(select(func.count()).select_from(t)),
expected_committed,
)
return run_test
class EngineFixture(AsyncFixture, fixtures.TablesTest):
__requires__ = ("async_dialect",)
@testing.fixture
def async_engine(self):
return engines.testing_engine(asyncio=True, transfer_staticpool=True)
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column("user_id", Integer, primary_key=True, autoincrement=False),
Column("user_name", String(20)),
)
@classmethod
def insert_data(cls, connection):
users = cls.tables.users
connection.execute(
users.insert(),
[{"user_id": i, "user_name": "name%d" % i} for i in range(1, 20)],
)
class AsyncEngineTest(EngineFixture):
__backend__ = True
@testing.fails("the failure is the test")
@async_test
async def test_we_are_definitely_running_async_tests(self, async_engine):
async with async_engine.connect() as conn:
eq_(await conn.scalar(text("select 1")), 2)
@async_test
async def test_interrupt_ctxmanager_connection(
self, async_engine, async_trans_ctx_manager_fixture
):
fn = async_trans_ctx_manager_fixture
async with async_engine.connect() as conn:
await fn(conn, trans_on_subject=False, execute_on_subject=True)
def test_proxied_attrs_engine(self, async_engine):
sync_engine = async_engine.sync_engine
is_(async_engine.url, sync_engine.url)
is_(async_engine.pool, sync_engine.pool)
is_(async_engine.dialect, sync_engine.dialect)
eq_(async_engine.name, sync_engine.name)
eq_(async_engine.driver, sync_engine.driver)
eq_(async_engine.echo, sync_engine.echo)
@async_test
async def test_engine_eq_ne(self, async_engine):
e2 = _async_engine.AsyncEngine(async_engine.sync_engine)
e3 = testing.engines.testing_engine(
asyncio=True, transfer_staticpool=True
)
eq_(async_engine, e2)
ne_(async_engine, e3)
is_false(async_engine == None)
@async_test
async def test_no_attach_to_event_loop(self, testing_engine):
"""test #6409"""
import asyncio
import threading
errs = []
def go():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
async def main():
tasks = [task() for _ in range(2)]
await asyncio.gather(*tasks)
await engine.dispose()
async def task():
async with engine.begin() as connection:
result = await connection.execute(select(1))
result.all()
try:
engine = testing_engine(
asyncio=True, transfer_staticpool=False
)
asyncio.run(main())
except Exception as err:
errs.append(err)
t = threading.Thread(target=go)
t.start()
t.join()
if errs:
raise errs[0]
@async_test
async def test_connection_info(self, async_engine):
async with async_engine.connect() as conn:
conn.info["foo"] = "bar"
eq_(conn.sync_connection.info, {"foo": "bar"})
@async_test
async def test_connection_eq_ne(self, async_engine):
async with async_engine.connect() as conn:
c2 = _async_engine.AsyncConnection(
async_engine, conn.sync_connection
)
eq_(conn, c2)
async with async_engine.connect() as c3:
ne_(conn, c3)
is_false(conn == None)
@async_test
async def test_transaction_eq_ne(self, async_engine):
async with async_engine.connect() as conn:
t1 = await conn.begin()
t2 = _async_engine.AsyncTransaction._regenerate_proxy_for_target(
t1._proxied
)
eq_(t1, t2)
is_false(t1 == None)
def test_clear_compiled_cache(self, async_engine):
async_engine.sync_engine._compiled_cache["foo"] = "bar"
eq_(async_engine.sync_engine._compiled_cache["foo"], "bar")
async_engine.clear_compiled_cache()
assert "foo" not in async_engine.sync_engine._compiled_cache
def test_execution_options(self, async_engine):
a2 = async_engine.execution_options(foo="bar")
assert isinstance(a2, _async_engine.AsyncEngine)
eq_(a2.sync_engine._execution_options, {"foo": "bar"})
eq_(async_engine.sync_engine._execution_options, {})
"""
attr uri, pool, dialect, engine, name, driver, echo
methods clear_compiled_cache, update_execution_options,
execution_options, get_execution_options, dispose
"""
@async_test
async def test_proxied_attrs_connection(self, async_engine):
conn = await async_engine.connect()
sync_conn = conn.sync_connection
is_(conn.engine, async_engine)
is_(conn.closed, sync_conn.closed)
is_(conn.dialect, async_engine.sync_engine.dialect)
eq_(conn.default_isolation_level, sync_conn.default_isolation_level)
@async_test
async def test_transaction_accessor(self, async_engine):
async with async_engine.connect() as conn:
is_none(conn.get_transaction())
is_false(conn.in_transaction())
is_false(conn.in_nested_transaction())
trans = await conn.begin()
is_true(conn.in_transaction())
is_false(conn.in_nested_transaction())
is_(
trans.sync_transaction, conn.get_transaction().sync_transaction
)
nested = await conn.begin_nested()
is_true(conn.in_transaction())
is_true(conn.in_nested_transaction())
is_(
conn.get_nested_transaction().sync_transaction,
nested.sync_transaction,
)
eq_(conn.get_nested_transaction(), nested)
is_(
trans.sync_transaction, conn.get_transaction().sync_transaction
)
await nested.commit()
is_true(conn.in_transaction())
is_false(conn.in_nested_transaction())
await trans.rollback()
is_none(conn.get_transaction())
is_false(conn.in_transaction())
is_false(conn.in_nested_transaction())
@testing.requires.queue_pool
@async_test
async def test_invalidate(self, async_engine):
conn = await async_engine.connect()
is_(conn.invalidated, False)
connection_fairy = await conn.get_raw_connection()
is_(connection_fairy.is_valid, True)
dbapi_connection = connection_fairy.dbapi_connection
await conn.invalidate()
if testing.against("postgresql+asyncpg"):
assert dbapi_connection._connection.is_closed()
new_fairy = await conn.get_raw_connection()
is_not(new_fairy.dbapi_connection, dbapi_connection)
is_not(new_fairy, connection_fairy)
is_(new_fairy.is_valid, True)
is_(connection_fairy.is_valid, False)
@async_test
async def test_get_dbapi_connection_raise(self, async_engine):
conn = await async_engine.connect()
with testing.expect_raises_message(
exc.InvalidRequestError,
"AsyncConnection.connection accessor is not "
"implemented as the attribute",
):
conn.connection
@async_test
async def test_get_raw_connection(self, async_engine):
conn = await async_engine.connect()
pooled = await conn.get_raw_connection()
is_(pooled, conn.sync_connection.connection)
@async_test
async def test_isolation_level(self, async_engine):
conn = await async_engine.connect()
sync_isolation_level = await greenlet_spawn(
conn.sync_connection.get_isolation_level
)
isolation_level = await conn.get_isolation_level()
eq_(isolation_level, sync_isolation_level)
await conn.execution_options(isolation_level="SERIALIZABLE")
isolation_level = await conn.get_isolation_level()
eq_(isolation_level, "SERIALIZABLE")
await conn.close()
@testing.requires.queue_pool
@async_test
async def test_dispose(self, async_engine):
c1 = await async_engine.connect()
c2 = await async_engine.connect()
await c1.close()
await c2.close()
p1 = async_engine.pool
if isinstance(p1, AsyncAdaptedQueuePool):
eq_(async_engine.pool.checkedin(), 2)
await async_engine.dispose()
if isinstance(p1, AsyncAdaptedQueuePool):
eq_(async_engine.pool.checkedin(), 0)
is_not(p1, async_engine.pool)
@testing.requires.independent_connections
@async_test
async def test_init_once_concurrency(self, async_engine):
c1 = async_engine.connect()
c2 = async_engine.connect()
await asyncio.wait([c1, c2])
@async_test
async def test_connect_ctxmanager(self, async_engine):
async with async_engine.connect() as conn:
result = await conn.execute(select(1))
eq_(result.scalar(), 1)
@async_test
async def test_connect_plain(self, async_engine):
conn = await async_engine.connect()
try:
result = await conn.execute(select(1))
eq_(result.scalar(), 1)
finally:
await conn.close()
@async_test
async def test_connection_not_started(self, async_engine):
conn = async_engine.connect()
testing.assert_raises_message(
asyncio_exc.AsyncContextNotStarted,
"AsyncConnection context has not been started and "
"object has not been awaited.",
conn.begin,
)
@async_test
async def test_transaction_commit(self, async_engine):
users = self.tables.users
async with async_engine.begin() as conn:
await conn.execute(delete(users))
async with async_engine.connect() as conn:
eq_(await conn.scalar(select(func.count(users.c.user_id))), 0)
@async_test
async def test_savepoint_rollback_noctx(self, async_engine):
users = self.tables.users
async with async_engine.begin() as conn:
savepoint = await conn.begin_nested()
await conn.execute(delete(users))
await savepoint.rollback()
async with async_engine.connect() as conn:
eq_(await conn.scalar(select(func.count(users.c.user_id))), 19)
@async_test
async def test_savepoint_commit_noctx(self, async_engine):
users = self.tables.users
async with async_engine.begin() as conn:
savepoint = await conn.begin_nested()
await conn.execute(delete(users))
await savepoint.commit()
async with async_engine.connect() as conn:
eq_(await conn.scalar(select(func.count(users.c.user_id))), 0)
@async_test
async def test_transaction_rollback(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
trans = conn.begin()
await trans.start()
await conn.execute(delete(users))
await trans.rollback()
async with async_engine.connect() as conn:
eq_(await conn.scalar(select(func.count(users.c.user_id))), 19)
@async_test
async def test_conn_transaction_not_started(self, async_engine):
async with async_engine.connect() as conn:
trans = conn.begin()
with expect_raises_message(
asyncio_exc.AsyncContextNotStarted,
"AsyncTransaction context has not been started "
"and object has not been awaited.",
):
await trans.rollback(),
@testing.requires.queue_pool
@async_test
async def test_pool_exhausted_some_timeout(self, async_engine):
engine = create_async_engine(
testing.db.url,
pool_size=1,
max_overflow=0,
pool_timeout=0.1,
)
async with engine.connect():
with expect_raises(exc.TimeoutError):
await engine.connect()
@testing.requires.queue_pool
@async_test
async def test_pool_exhausted_no_timeout(self, async_engine):
engine = create_async_engine(
testing.db.url,
pool_size=1,
max_overflow=0,
pool_timeout=0,
)
async with engine.connect():
with expect_raises(exc.TimeoutError):
await engine.connect()
@async_test
async def test_create_async_engine_server_side_cursor(self, async_engine):
testing.assert_raises_message(
asyncio_exc.AsyncMethodRequired,
"Can't set server_side_cursors for async engine globally",
create_async_engine,
testing.db.url,
server_side_cursors=True,
)
class AsyncEventTest(EngineFixture):
"""The engine events all run in their normal synchronous context.
we do not provide an asyncio event interface at this time.
"""
__backend__ = True
@async_test
async def test_no_async_listeners(self, async_engine):
with testing.expect_raises_message(
NotImplementedError,
"asynchronous events are not implemented "
"at this time. Apply synchronous listeners to the "
"AsyncEngine.sync_engine or "
"AsyncConnection.sync_connection attributes.",
):
event.listen(async_engine, "before_cursor_execute", mock.Mock())
conn = await async_engine.connect()
with testing.expect_raises_message(
NotImplementedError,
"asynchronous events are not implemented "
"at this time. Apply synchronous listeners to the "
"AsyncEngine.sync_engine or "
"AsyncConnection.sync_connection attributes.",
):
event.listen(conn, "before_cursor_execute", mock.Mock())
@async_test
async def test_sync_before_cursor_execute_engine(self, async_engine):
canary = mock.Mock()
event.listen(async_engine.sync_engine, "before_cursor_execute", canary)
async with async_engine.connect() as conn:
sync_conn = conn.sync_connection
await conn.execute(text("select 1"))
eq_(
canary.mock_calls,
[
mock.call(
sync_conn, mock.ANY, "select 1", mock.ANY, mock.ANY, False
)
],
)
@async_test
async def test_sync_before_cursor_execute_connection(self, async_engine):
canary = mock.Mock()
async with async_engine.connect() as conn:
sync_conn = conn.sync_connection
event.listen(
async_engine.sync_engine, "before_cursor_execute", canary
)
await conn.execute(text("select 1"))
eq_(
canary.mock_calls,
[
mock.call(
sync_conn, mock.ANY, "select 1", mock.ANY, mock.ANY, False
)
],
)
@async_test
async def test_event_on_sync_connection(self, async_engine):
canary = mock.Mock()
async with async_engine.connect() as conn:
event.listen(conn.sync_connection, "begin", canary)
async with conn.begin():
eq_(
canary.mock_calls,
[mock.call(conn.sync_connection)],
)
class AsyncInspection(EngineFixture):
__backend__ = True
@async_test
async def test_inspect_engine(self, async_engine):
with testing.expect_raises_message(
exc.NoInspectionAvailable,
"Inspection on an AsyncEngine is currently not supported.",
):
inspect(async_engine)
@async_test
async def test_inspect_connection(self, async_engine):
async with async_engine.connect() as conn:
with testing.expect_raises_message(
exc.NoInspectionAvailable,
"Inspection on an AsyncConnection is currently not supported.",
):
inspect(conn)
class AsyncResultTest(EngineFixture):
@testing.combinations(
(None,), ("scalars",), ("mappings",), argnames="filter_"
)
@async_test
async def test_all(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
if filter_ == "mappings":
result = result.mappings()
elif filter_ == "scalars":
result = result.scalars(1)
all_ = await result.all()
if filter_ == "mappings":
eq_(
all_,
[
{"user_id": i, "user_name": "name%d" % i}
for i in range(1, 20)
],
)
elif filter_ == "scalars":
eq_(
all_,
["name%d" % i for i in range(1, 20)],
)
else:
eq_(all_, [(i, "name%d" % i) for i in range(1, 20)])
@testing.combinations(
(None,), ("scalars",), ("mappings",), argnames="filter_"
)
@async_test
async def test_aiter(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
if filter_ == "mappings":
result = result.mappings()
elif filter_ == "scalars":
result = result.scalars(1)
rows = []
async for row in result:
rows.append(row)
if filter_ == "mappings":
eq_(
rows,
[
{"user_id": i, "user_name": "name%d" % i}
for i in range(1, 20)
],
)
elif filter_ == "scalars":
eq_(
rows,
["name%d" % i for i in range(1, 20)],
)
else:
eq_(rows, [(i, "name%d" % i) for i in range(1, 20)])
@testing.combinations((None,), ("mappings",), argnames="filter_")
@async_test
async def test_keys(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
if filter_ == "mappings":
result = result.mappings()
eq_(result.keys(), ["user_id", "user_name"])
await result.close()
@async_test
async def test_unique_all(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(
union_all(select(users), select(users)).order_by(
users.c.user_id
)
)
all_ = await result.unique().all()
eq_(all_, [(i, "name%d" % i) for i in range(1, 20)])
@async_test
async def test_columns_all(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
all_ = await result.columns(1).all()
eq_(all_, [("name%d" % i,) for i in range(1, 20)])
@testing.combinations(
(None,), ("scalars",), ("mappings",), argnames="filter_"
)
@async_test
async def test_partitions(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
if filter_ == "mappings":
result = result.mappings()
elif filter_ == "scalars":
result = result.scalars(1)
check_result = []
async for partition in result.partitions(5):
check_result.append(partition)
if filter_ == "mappings":
eq_(
check_result,
[
[
{"user_id": i, "user_name": "name%d" % i}
for i in range(a, b)
]
for (a, b) in [(1, 6), (6, 11), (11, 16), (16, 20)]
],
)
elif filter_ == "scalars":
eq_(
check_result,
[
["name%d" % i for i in range(a, b)]
for (a, b) in [(1, 6), (6, 11), (11, 16), (16, 20)]
],
)
else:
eq_(
check_result,
[
[(i, "name%d" % i) for i in range(a, b)]
for (a, b) in [(1, 6), (6, 11), (11, 16), (16, 20)]
],
)
@testing.combinations(
(None,), ("scalars",), ("mappings",), argnames="filter_"
)
@async_test
async def test_one_success(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(
select(users).limit(1).order_by(users.c.user_name)
)
if filter_ == "mappings":
result = result.mappings()
elif filter_ == "scalars":
result = result.scalars()
u1 = await result.one()
if filter_ == "mappings":
eq_(u1, {"user_id": 1, "user_name": "name%d" % 1})
elif filter_ == "scalars":
eq_(u1, 1)
else:
eq_(u1, (1, "name%d" % 1))
@async_test
async def test_one_no_result(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(
select(users).where(users.c.user_name == "nonexistent")
)
with expect_raises_message(
exc.NoResultFound, "No row was found when one was required"
):
await result.one()
@async_test
async def test_one_multi_result(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(
select(users).where(users.c.user_name.in_(["name3", "name5"]))
)
with expect_raises_message(
exc.MultipleResultsFound,
"Multiple rows were found when exactly one was required",
):
await result.one()
@testing.combinations(
("scalars",), ("stream_scalars",), argnames="filter_"
)
@async_test
async def test_scalars(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
if filter_ == "scalars":
result = (await conn.scalars(select(users))).all()
elif filter_ == "stream_scalars":
result = await (await conn.stream_scalars(select(users))).all()
eq_(result, list(range(1, 20)))
class TextSyncDBAPI(fixtures.TestBase):
def test_sync_dbapi_raises(self):
with expect_raises_message(
exc.InvalidRequestError,
"The asyncio extension requires an async driver to be used.",
):
create_async_engine("sqlite:///:memory:")
@testing.fixture
def async_engine(self):
engine = create_engine("sqlite:///:memory:", future=True)
engine.dialect.is_async = True
return _async_engine.AsyncEngine(engine)
@async_test
@combinations(
lambda conn: conn.exec_driver_sql("select 1"),
lambda conn: conn.stream(text("select 1")),
lambda conn: conn.execute(text("select 1")),
argnames="case",
)
async def test_sync_driver_execution(self, async_engine, case):
with expect_raises_message(
exc.AwaitRequired,
"The current operation required an async execution but none was",
):
async with async_engine.connect() as conn:
await case(conn)
@async_test
async def test_sync_driver_run_sync(self, async_engine):
async with async_engine.connect() as conn:
res = await conn.run_sync(
lambda conn: conn.scalar(text("select 1"))
)
assert res == 1
assert await conn.run_sync(lambda _: 2) == 2
class AsyncProxyTest(EngineFixture, fixtures.TestBase):
@async_test
async def test_get_transaction(self, async_engine):
async with async_engine.connect() as conn:
async with conn.begin() as trans:
is_(trans.connection, conn)
is_(conn.get_transaction(), trans)
@async_test
async def test_get_nested_transaction(self, async_engine):
async with async_engine.connect() as conn:
async with conn.begin() as trans:
n1 = await conn.begin_nested()
is_(conn.get_nested_transaction(), n1)
n2 = await conn.begin_nested()
is_(conn.get_nested_transaction(), n2)
await n2.commit()
is_(conn.get_nested_transaction(), n1)
is_(conn.get_transaction(), trans)
@async_test
async def test_get_connection(self, async_engine):
async with async_engine.connect() as conn:
is_(
AsyncConnection._retrieve_proxy_for_target(
conn.sync_connection
),
conn,
)
def test_regenerate_connection(self, connection):
async_connection = AsyncConnection._retrieve_proxy_for_target(
connection
)
a2 = AsyncConnection._retrieve_proxy_for_target(connection)
is_(async_connection, a2)
is_not(async_connection, None)
is_(async_connection.engine, a2.engine)
is_not(async_connection.engine, None)
@testing.requires.predictable_gc
@async_test
async def test_gc_engine(self, testing_engine):
ReversibleProxy._proxy_objects.clear()
eq_(len(ReversibleProxy._proxy_objects), 0)
async_engine = AsyncEngine(testing.db)
eq_(len(ReversibleProxy._proxy_objects), 1)
del async_engine
eq_(len(ReversibleProxy._proxy_objects), 0)
@testing.requires.predictable_gc
@async_test
async def test_gc_conn(self, testing_engine):
ReversibleProxy._proxy_objects.clear()
async_engine = AsyncEngine(testing.db)
eq_(len(ReversibleProxy._proxy_objects), 1)
async with async_engine.connect() as conn:
eq_(len(ReversibleProxy._proxy_objects), 2)
async with conn.begin() as trans:
eq_(len(ReversibleProxy._proxy_objects), 3)
del trans
del conn
eq_(len(ReversibleProxy._proxy_objects), 1)
del async_engine
eq_(len(ReversibleProxy._proxy_objects), 0)
def test_regen_conn_but_not_engine(self, async_engine):
sync_conn = async_engine.sync_engine.connect()
async_conn = AsyncConnection._retrieve_proxy_for_target(sync_conn)
async_conn2 = AsyncConnection._retrieve_proxy_for_target(sync_conn)
is_(async_conn, async_conn2)
is_(async_conn.engine, async_engine)
def test_regen_trans_but_not_conn(self, async_engine):
sync_conn = async_engine.sync_engine.connect()
async_conn = AsyncConnection._retrieve_proxy_for_target(sync_conn)
trans = sync_conn.begin()
async_t1 = async_conn.get_transaction()
is_(async_t1.connection, async_conn)
is_(async_t1.sync_transaction, trans)
async_t2 = async_conn.get_transaction()
is_(async_t1, async_t2)
|
collectd_extractor.py
|
# -*- coding: utf-8 -*-
import argparse
import sys
import json
import pdb
import threading
import traceback, logging
import collectd_metrics
from time import sleep
from config import cfg
from confluent_kafka import Producer, Consumer, KafkaError, TopicPartition
from influxdb import InfluxDBClient
format_str = '%(asctime)s - %(levelname)s - %(name)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=format_str)
logger = logging.getLogger(__name__)
collectd_cfg = cfg['collectd']
influxdb_cfg = cfg['influxdb']
def delivery_report(err, msg):
""" Called once for each message produced to indicate delivery result.
Triggered by poll() or flush(). """
if err is not None:
logger.error('Message delivery failed: {}'.format(err))
def extract(message):
data = message.value().decode('utf-8')
data = json.loads(message.value())
data = data[0]
return collectd_metrics.get_measurements(data)
def main():
# kafka
consumer = Consumer(collectd_cfg['consumer'])
consumer.subscribe([collectd_cfg['raw_data_topic']])
producer = Producer(collectd_cfg['producer'])
# Trigger any available delivery report callbacks from previous produce() calls
# see: https://github.com/confluentinc/confluent-kafka-python/issues/16
producer.poll(0)
# influxdb
influxdb_client = InfluxDBClient(host=influxdb_cfg['server'],
database=influxdb_cfg['database'])
influxdb_client.create_database(influxdb_cfg['database'])
influxdb_client.create_retention_policy(name="infinite",
duration='INF',
replication=1,
database=influxdb_cfg['database'],
default=True
)
influxdb_data_points = []
influxdb_to_send = threading.Event()
influxdb_to_stop = threading.Event()
influxdb_to_stop.clear()
influxdb_to_send.clear()
def set_influxdb_to_send():
while not influxdb_to_stop.isSet():
sleep(0.5)
influxdb_to_send.set()
influxdb_flag_thread = threading.Thread(target=set_influxdb_to_send)
influxdb_flag_thread.start()
logger.info("Start processing collectd data ...")
try:
while True:
msg = consumer.poll(1.0)
if msg is None:
continue
if msg.error():
logger.error("Consumer error: {}".format(msg.error()))
continue
measurements = extract(msg)
# Send extracted data to kafka topics
# Asynchronously produce a message, the delivery report callback
# will be triggered from poll() above, or flush() below, when the message has
# been successfully delivered or failed permanently.
for item in measurements:
producer.produce(topic='collectd',
value=str({item[0]: item[1]}),
timestamp=item[2],
callback=delivery_report)
producer.poll(0)
# Send extracted data to influxdb, but batching (only send every 1s)
for item in measurements:
influxdb_data_points.append({"measurement": item[0],
# timestamp from ms in collectd to ns in influxdb
"time": int(item[2]) * 10**6,
"fields": {
"value": item[1],
}
})
if influxdb_to_send.isSet():
influxdb_client.write_points(influxdb_data_points)
influxdb_to_send.clear()
influxdb_data_points = []
except KeyboardInterrupt:
# Wait for any outstanding messages to be delivered and delivery report
# callbacks to be triggered.
producer.flush()
consumer.close()
influxdb_to_stop.set()
if __name__ == '__main__':
main()
|
ip_lib.py
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import re
import threading
import time
import eventlet
import netaddr
from neutron_lib import constants
from neutron_lib import exceptions
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import netutils
from pyroute2.netlink import exceptions \
as netlink_exceptions # pylint: disable=no-name-in-module
from pyroute2.netlink import rtnl # pylint: disable=no-name-in-module
from pyroute2.netlink.rtnl import \
ifaddrmsg # pylint: disable=no-name-in-module
from pyroute2.netlink.rtnl import ifinfmsg # pylint: disable=no-name-in-module
from pyroute2 import netns # pylint: disable=no-name-in-module
from neutron._i18n import _
from neutron.agent.common import utils
from neutron.common import utils as common_utils
from neutron.privileged.agent.linux import ip_lib as privileged
LOG = logging.getLogger(__name__)
IP_NONLOCAL_BIND = 'net.ipv4.ip_nonlocal_bind'
LOOPBACK_DEVNAME = 'lo'
FB_TUNNEL_DEVICE_NAMES = ['gre0', 'gretap0', 'tunl0', 'erspan0', 'sit0',
'ip6tnl0', 'ip6gre0']
IP_RULE_TABLES = {'default': 253,
'main': 254,
'local': 255}
IP_RULE_TABLES_NAMES = {v: k for k, v in IP_RULE_TABLES.items()}
# Rule indexes: pyroute2.netlink.rtnl
# Rule names: https://www.systutorials.com/docs/linux/man/8-ip-rule/
# NOTE(ralonsoh): 'masquerade' type is printed as 'nat' in 'ip rule' command
IP_RULE_TYPES = {0: 'unspecified',
1: 'unicast',
6: 'blackhole',
7: 'unreachable',
8: 'prohibit',
10: 'nat'}
IP_ADDRESS_SCOPE = {rtnl.rtscopes['RT_SCOPE_UNIVERSE']: 'global',
rtnl.rtscopes['RT_SCOPE_SITE']: 'site',
rtnl.rtscopes['RT_SCOPE_LINK']: 'link',
rtnl.rtscopes['RT_SCOPE_HOST']: 'host'}
IP_ADDRESS_SCOPE_NAME = {v: k for k, v in IP_ADDRESS_SCOPE.items()}
IP_ADDRESS_EVENTS = {'RTM_NEWADDR': 'added',
'RTM_DELADDR': 'removed'}
SYS_NET_PATH = '/sys/class/net'
DEFAULT_GW_PATTERN = re.compile(r"via (\S+)")
METRIC_PATTERN = re.compile(r"metric (\S+)")
DEVICE_NAME_PATTERN = re.compile(r"(\d+?): (\S+?):.*")
# NOTE: no metric is interpreted by the kernel as having the highest priority
# (value 0). "ip route" uses the netlink API to communicate with the kernel. In
# IPv6, when the metric value is not set is translated as 1024 as default:
# https://access.redhat.com/solutions/3659171
IP_ROUTE_METRIC_DEFAULT = {constants.IP_VERSION_4: 0,
constants.IP_VERSION_6: 1024}
def remove_interface_suffix(interface):
"""Remove a possible "<if>@<endpoint>" suffix from an interface' name.
This suffix can appear in some kernel versions, and intends on specifying,
for example, a veth's pair. However, this interface name is useless to us
as further 'ip' commands require that the suffix be removed.
"""
# If '@' is not present, this will do nothing.
return interface.partition("@")[0]
class AddressNotReady(exceptions.NeutronException):
message = _("Failure waiting for address %(address)s to "
"become ready: %(reason)s")
InvalidArgument = privileged.InvalidArgument
class SubProcessBase(object):
def __init__(self, namespace=None,
log_fail_as_error=True):
self.namespace = namespace
self.log_fail_as_error = log_fail_as_error
try:
self.force_root = cfg.CONF.ip_lib_force_root
except cfg.NoSuchOptError:
# Only callers that need to force use of the root helper
# need to register the option.
self.force_root = False
def _run(self, options, command, args):
if self.namespace:
return self._as_root(options, command, args)
elif self.force_root:
# Force use of the root helper to ensure that commands
# will execute in dom0 when running under XenServer/XCP.
return self._execute(options, command, args, run_as_root=True)
else:
return self._execute(options, command, args)
def _as_root(self, options, command, args, use_root_namespace=False):
namespace = self.namespace if not use_root_namespace else None
return self._execute(options, command, args, run_as_root=True,
namespace=namespace)
def _execute(self, options, command, args, run_as_root=False,
namespace=None):
opt_list = ['-%s' % o for o in options]
ip_cmd = add_namespace_to_cmd(['ip'], namespace)
cmd = ip_cmd + opt_list + [command] + list(args)
return utils.execute(cmd, run_as_root=run_as_root, privsep_exec=True,
log_fail_as_error=self.log_fail_as_error)
def set_log_fail_as_error(self, fail_with_error):
self.log_fail_as_error = fail_with_error
def get_log_fail_as_error(self):
return self.log_fail_as_error
class IPWrapper(SubProcessBase):
def __init__(self, namespace=None):
super(IPWrapper, self).__init__(namespace=namespace)
self.netns = IpNetnsCommand(self)
def device(self, name):
return IPDevice(name, namespace=self.namespace)
def get_devices_info(self, exclude_loopback=True,
exclude_fb_tun_devices=True):
devices = get_devices_info(self.namespace)
retval = []
for device in devices:
if (exclude_loopback and device['name'] == LOOPBACK_DEVNAME or
exclude_fb_tun_devices and
device['name'] in FB_TUNNEL_DEVICE_NAMES):
continue
retval.append(device)
return retval
def get_devices(self, exclude_loopback=True, exclude_fb_tun_devices=True):
retval = []
try:
devices = privileged.get_device_names(self.namespace)
except privileged.NetworkNamespaceNotFound:
return retval
for name in devices:
if (exclude_loopback and name == LOOPBACK_DEVNAME or
exclude_fb_tun_devices and name in FB_TUNNEL_DEVICE_NAMES):
continue
retval.append(IPDevice(name, namespace=self.namespace))
return retval
def get_device_by_ip(self, ip):
"""Get the IPDevice from system which has ip configured.
@param ip: look for the device holding this ip. If this is None,
None is returned.
@type ip: str.
"""
if not ip:
return None
cidr = common_utils.ip_to_cidr(ip)
kwargs = {'address': common_utils.cidr_to_ip(cidr)}
if not common_utils.is_cidr_host(cidr):
kwargs['mask'] = common_utils.cidr_mask_length(cidr)
devices = get_devices_with_ip(self.namespace, **kwargs)
if not devices:
# Search by broadcast address.
broadcast = common_utils.cidr_broadcast_address(cidr)
if broadcast:
devices = get_devices_with_ip(self.namespace,
broadcast=broadcast)
if devices:
return IPDevice(devices[0]['name'], namespace=self.namespace)
def add_tuntap(self, name, mode='tap'):
privileged.create_interface(
name, self.namespace, "tuntap", mode=mode)
return IPDevice(name, namespace=self.namespace)
def add_veth(self, name1, name2, namespace2=None):
peer = {'ifname': name2}
if namespace2 is None:
namespace2 = self.namespace
else:
self.ensure_namespace(namespace2)
peer['net_ns_fd'] = namespace2
privileged.create_interface(
name1, self.namespace, 'veth', peer=peer)
return (IPDevice(name1, namespace=self.namespace),
IPDevice(name2, namespace=namespace2))
def add_macvtap(self, name, src_dev, mode='bridge'):
privileged.create_interface(name,
self.namespace,
"macvtap",
physical_interface=src_dev,
mode=mode)
return IPDevice(name, namespace=self.namespace)
def del_veth(self, name):
"""Delete a virtual interface between two namespaces."""
privileged.delete_interface(name, self.namespace)
def add_dummy(self, name):
"""Create a Linux dummy interface with the given name."""
privileged.create_interface(name, self.namespace, "dummy")
return IPDevice(name, namespace=self.namespace)
def ensure_namespace(self, name):
if not self.netns.exists(name):
ip = self.netns.add(name)
lo = ip.device(LOOPBACK_DEVNAME)
lo.link.set_up()
else:
ip = IPWrapper(namespace=name)
return ip
def namespace_is_empty(self):
return not self.get_devices()
def garbage_collect_namespace(self):
"""Conditionally destroy the namespace if it is empty."""
if self.namespace and self.netns.exists(self.namespace):
if self.namespace_is_empty():
self.netns.delete(self.namespace)
return True
return False
def add_device_to_namespace(self, device):
if self.namespace:
device.link.set_netns(self.namespace)
def add_vlan(self, name, physical_interface, vlan_id):
privileged.create_interface(name,
self.namespace,
"vlan",
physical_interface=physical_interface,
vlan_id=vlan_id)
return IPDevice(name, namespace=self.namespace)
def add_vxlan(self, name, vni, group=None, dev=None, ttl=None, tos=None,
local=None, srcport=None, dstport=None, proxy=False):
kwargs = {'vxlan_id': vni}
if group:
kwargs['vxlan_group'] = group
if dev:
kwargs['physical_interface'] = dev
if ttl:
kwargs['vxlan_ttl'] = ttl
if tos:
kwargs['vxlan_tos'] = tos
if local:
kwargs['vxlan_local'] = local
if proxy:
kwargs['vxlan_proxy'] = proxy
# tuple: min,max
if srcport:
if len(srcport) == 2 and srcport[0] <= srcport[1]:
kwargs['vxlan_port_range'] = (str(srcport[0]), str(srcport[1]))
else:
raise exceptions.NetworkVxlanPortRangeError(
vxlan_range=srcport)
if dstport:
kwargs['vxlan_port'] = dstport
privileged.create_interface(name, self.namespace, "vxlan", **kwargs)
return (IPDevice(name, namespace=self.namespace))
class IPDevice(SubProcessBase):
def __init__(self, name, namespace=None, kind='link'):
super(IPDevice, self).__init__(namespace=namespace)
self._name = name
self.kind = kind
self.link = IpLinkCommand(self)
self.addr = IpAddrCommand(self)
self.route = IpRouteCommand(self)
self.neigh = IpNeighCommand(self)
def __eq__(self, other):
return (other is not None and self.name == other.name and
self.namespace == other.namespace)
def __str__(self):
return self.name
def __repr__(self):
return "<IPDevice(name=%s, namespace=%s)>" % (self._name,
self.namespace)
def exists(self):
"""Return True if the device exists in the namespace."""
return privileged.interface_exists(self.name, self.namespace)
def delete_addr_and_conntrack_state(self, cidr):
"""Delete an address along with its conntrack state
This terminates any active connections through an IP.
:param cidr: the IP address for which state should be removed.
This can be passed as a string with or without /NN.
A netaddr.IPAddress or netaddr.Network representing the IP address
can also be passed.
"""
self.addr.delete(cidr)
self.delete_conntrack_state(cidr)
def delete_conntrack_state(self, cidr):
"""Delete conntrack state rules
Deletes both rules (if existing), the destination and the reply one.
"""
ip_str = str(netaddr.IPNetwork(cidr).ip)
ip_wrapper = IPWrapper(namespace=self.namespace)
# Delete conntrack state for ingress traffic
# If 0 flow entries have been deleted
# conntrack -D will return 1
try:
ip_wrapper.netns.execute(["conntrack", "-D", "-d", ip_str],
check_exit_code=True,
extra_ok_codes=[1], privsep_exec=True)
except RuntimeError:
LOG.exception("Failed deleting ingress connection state of"
" floatingip %s", ip_str)
# Delete conntrack state for egress traffic
try:
ip_wrapper.netns.execute(["conntrack", "-D", "-q", ip_str],
check_exit_code=True,
extra_ok_codes=[1], privsep_exec=True)
except RuntimeError:
LOG.exception("Failed deleting egress connection state of"
" floatingip %s", ip_str)
def delete_socket_conntrack_state(self, cidr, dport, protocol):
ip_str = str(netaddr.IPNetwork(cidr).ip)
ip_wrapper = IPWrapper(namespace=self.namespace)
cmd = ["conntrack", "-D", "-d", ip_str, '-p', protocol,
'--dport', dport]
try:
ip_wrapper.netns.execute(cmd, check_exit_code=True,
extra_ok_codes=[1], privsep_exec=True)
except RuntimeError:
LOG.exception("Failed deleting ingress connection state of "
"socket %(ip)s:%(port)s", {'ip': ip_str,
'port': dport})
def disable_ipv6(self):
if not netutils.is_ipv6_enabled():
return
sysctl_name = re.sub(r'\.', '/', self.name)
cmd = ['net.ipv6.conf.%s.disable_ipv6=1' % sysctl_name]
return sysctl(cmd, namespace=self.namespace)
@property
def name(self):
if self._name:
return self._name[:constants.DEVICE_NAME_MAX_LEN]
return self._name
@name.setter
def name(self, name):
self._name = name
class IpDeviceCommandBase(object):
def __init__(self, parent):
self._parent = parent
@property
def name(self):
return self._parent.name
@property
def kind(self):
return self._parent.kind
class IpLinkCommand(IpDeviceCommandBase):
def set_address(self, mac_address):
privileged.set_link_attribute(
self.name, self._parent.namespace, address=mac_address)
def set_allmulticast_on(self):
privileged.set_link_flags(
self.name, self._parent.namespace, ifinfmsg.IFF_ALLMULTI)
def set_mtu(self, mtu_size):
privileged.set_link_attribute(
self.name, self._parent.namespace, mtu=mtu_size)
def set_up(self):
privileged.set_link_attribute(
self.name, self._parent.namespace, state='up')
def set_down(self):
privileged.set_link_attribute(
self.name, self._parent.namespace, state='down')
def set_netns(self, namespace):
privileged.set_link_attribute(
self.name, self._parent.namespace, net_ns_fd=namespace)
self._parent.namespace = namespace
def set_name(self, name):
privileged.set_link_attribute(
self.name, self._parent.namespace, ifname=name)
self._parent.name = name
def set_alias(self, alias_name):
privileged.set_link_attribute(
self.name, self._parent.namespace, ifalias=alias_name)
def create(self):
privileged.create_interface(self.name, self._parent.namespace,
self.kind)
def delete(self):
privileged.delete_interface(self.name, self._parent.namespace)
@property
def address(self):
return self.attributes.get('link/ether')
@property
def state(self):
return self.attributes.get('state')
@property
def allmulticast(self):
return self.attributes.get('allmulticast')
@property
def mtu(self):
return self.attributes.get('mtu')
@property
def qdisc(self):
return self.attributes.get('qdisc')
@property
def qlen(self):
return self.attributes.get('qlen')
@property
def alias(self):
return self.attributes.get('alias')
@property
def link_kind(self):
return self.attributes.get('link_kind')
@property
def attributes(self):
return privileged.get_link_attributes(self.name,
self._parent.namespace)
@property
def exists(self):
return privileged.interface_exists(self.name, self._parent.namespace)
def get_vfs(self):
return privileged.get_link_vfs(self.name, self._parent.namespace)
def set_vf_feature(self, vf_config):
return privileged.set_link_vf_feature(
self.name, self._parent.namespace, vf_config)
class IpAddrCommand(IpDeviceCommandBase):
def add(self, cidr, scope='global', add_broadcast=True):
add_ip_address(cidr, self.name, self._parent.namespace, scope,
add_broadcast)
def delete(self, cidr):
delete_ip_address(cidr, self.name, self._parent.namespace)
def flush(self, ip_version):
flush_ip_addresses(ip_version, self.name, self._parent.namespace)
def list(self, scope=None, to=None, filters=None, ip_version=None):
"""Get device details of a device named <self.name>."""
def filter_device(device, filters):
# Accepted filters: dynamic, permanent, tentative, dadfailed.
for filter in filters:
if filter == 'permanent' and device['dynamic']:
return False
elif not device[filter]:
return False
return True
kwargs = {}
if to:
cidr = common_utils.ip_to_cidr(to)
kwargs = {'address': common_utils.cidr_to_ip(cidr)}
if not common_utils.is_cidr_host(cidr):
kwargs['mask'] = common_utils.cidr_mask_length(cidr)
if scope:
kwargs['scope'] = scope
if ip_version:
kwargs['family'] = common_utils.get_socket_address_family(
ip_version)
devices = get_devices_with_ip(self._parent.namespace, name=self.name,
**kwargs)
if not filters:
return devices
filtered_devices = []
for device in (device for device in devices
if filter_device(device, filters)):
filtered_devices.append(device)
return filtered_devices
def wait_until_address_ready(self, address, wait_time=30):
"""Wait until an address is no longer marked 'tentative'
raises AddressNotReady if times out or address not present on interface
"""
def is_address_ready():
try:
addr_info = self.list(to=address)[0]
except IndexError:
raise AddressNotReady(
address=address,
reason=_('Address not present on interface'))
if not addr_info['tentative']:
return True
if addr_info['dadfailed']:
raise AddressNotReady(
address=address, reason=_('Duplicate address detected'))
return False
errmsg = _("Exceeded %s second limit waiting for "
"address to leave the tentative state.") % wait_time
common_utils.wait_until_true(
is_address_ready, timeout=wait_time, sleep=0.20,
exception=AddressNotReady(address=address, reason=errmsg))
class IpRouteCommand(IpDeviceCommandBase):
def __init__(self, parent, table=None):
super(IpRouteCommand, self).__init__(parent)
self._table = table
def add_gateway(self, gateway, metric=None, table=None, scope='global'):
self.add_route(None, via=gateway, table=table, metric=metric,
scope=scope)
def delete_gateway(self, gateway, table=None, scope=None):
self.delete_route(None, device=self.name, via=gateway, table=table,
scope=scope)
def list_routes(self, ip_version, scope=None, via=None, table=None,
**kwargs):
table = table or self._table
return list_ip_routes(self._parent.namespace, ip_version, scope=scope,
via=via, table=table, device=self.name, **kwargs)
def list_onlink_routes(self, ip_version):
routes = self.list_routes(ip_version, scope='link')
return [r for r in routes if not r['source_prefix']]
def add_onlink_route(self, cidr):
self.add_route(cidr, scope='link')
def delete_onlink_route(self, cidr):
self.delete_route(cidr, device=self.name, scope='link')
def get_gateway(self, scope=None, table=None,
ip_version=constants.IP_VERSION_4):
routes = self.list_routes(ip_version, scope=scope, table=table)
for route in routes:
if route['via'] and route['cidr'] in constants.IP_ANY.values():
return route
def flush(self, ip_version, table=None, **kwargs):
for route in self.list_routes(ip_version, table=table):
self.delete_route(route['cidr'], device=route['device'],
via=route['via'], table=table, **kwargs)
def add_route(self, cidr, via=None, table=None, metric=None, scope=None,
**kwargs):
table = table or self._table
add_ip_route(self._parent.namespace, cidr, device=self.name, via=via,
table=table, metric=metric, scope=scope, **kwargs)
def delete_route(self, cidr, device=None, via=None, table=None, scope=None,
**kwargs):
table = table or self._table
delete_ip_route(self._parent.namespace, cidr, device=device, via=via,
table=table, scope=scope, **kwargs)
class IPRoute(SubProcessBase):
def __init__(self, namespace=None, table=None):
super(IPRoute, self).__init__(namespace=namespace)
self.name = None
self.route = IpRouteCommand(self, table=table)
class IpNeighCommand(IpDeviceCommandBase):
def add(self, ip_address, mac_address, nud_state=None, **kwargs):
add_neigh_entry(ip_address,
mac_address,
self.name,
namespace=self._parent.namespace,
nud_state=nud_state,
**kwargs)
def delete(self, ip_address, mac_address, **kwargs):
delete_neigh_entry(ip_address,
mac_address,
self.name,
self._parent.namespace,
**kwargs)
def dump(self, ip_version, **kwargs):
return dump_neigh_entries(ip_version,
self.name,
self._parent.namespace,
**kwargs)
def flush(self, ip_version, ip_address):
"""Flush neighbour entries
Given address entry is removed from neighbour cache (ARP or NDP). To
flush all entries pass string 'all' as an address.
From https://man.archlinux.org/man/core/iproute2/ip-neighbour.8.en:
"the default neighbour states to be flushed do not include permanent
and noarp".
:param ip_version: Either 4 or 6 for IPv4 or IPv6 respectively
:param ip_address: The prefix selecting the neighbours to flush or
"all"
"""
cidr = netaddr.IPNetwork(ip_address) if ip_address != 'all' else None
for entry in self.dump(ip_version):
if entry['state'] in ('permanent', 'noarp'):
continue
if ip_address == 'all' or entry['dst'] in cidr:
self.delete(entry['dst'], entry['lladdr'])
class IpNetnsCommand(object):
def __init__(self, parent):
self._parent = parent
def add(self, name):
create_network_namespace(name)
wrapper = IPWrapper(namespace=name)
wrapper.netns.execute(['sysctl', '-w',
'net.ipv4.conf.all.promote_secondaries=1'],
privsep_exec=True)
return wrapper
def delete(self, name):
delete_network_namespace(name)
def execute(self, cmds, addl_env=None, check_exit_code=True,
log_fail_as_error=True, extra_ok_codes=None,
run_as_root=False, privsep_exec=False):
ns_params = []
if self._parent.namespace:
run_as_root = True
ns_params = ['ip', 'netns', 'exec', self._parent.namespace]
env_params = []
if addl_env:
env_params = (['env'] +
['%s=%s' % pair for pair in addl_env.items()])
cmd = ns_params + env_params + list(cmds)
return utils.execute(cmd, check_exit_code=check_exit_code,
extra_ok_codes=extra_ok_codes,
log_fail_as_error=log_fail_as_error,
run_as_root=run_as_root,
privsep_exec=privsep_exec)
def exists(self, name):
return network_namespace_exists(name)
def vlan_in_use(segmentation_id, namespace=None):
"""Return True if VLAN ID is in use by an interface, else False."""
interfaces = get_devices_info(namespace)
vlans = {interface.get('vlan_id') for interface in interfaces
if interface.get('vlan_id')}
return segmentation_id in vlans
def vxlan_in_use(segmentation_id, namespace=None):
"""Return True if VXLAN VNID is in use by an interface, else False."""
interfaces = get_devices_info(namespace)
vxlans = {interface.get('vxlan_id') for interface in interfaces
if interface.get('vxlan_id')}
return segmentation_id in vxlans
def device_exists(device_name, namespace=None):
"""Return True if the device exists in the namespace."""
return IPDevice(device_name, namespace=namespace).exists()
def device_exists_with_ips_and_mac(device_name, ip_cidrs, mac, namespace=None):
"""Return True if the device with the given IP addresses and MAC address
exists in the namespace.
"""
try:
device = IPDevice(device_name, namespace=namespace)
if mac and mac != device.link.address:
return False
device_ip_cidrs = [ip['cidr'] for ip in device.addr.list()]
for ip_cidr in ip_cidrs:
if ip_cidr not in device_ip_cidrs:
return False
except RuntimeError:
return False
else:
return True
def get_device_mac(device_name, namespace=None):
"""Return the MAC address of the device."""
return IPDevice(device_name, namespace=namespace).link.address
def get_device_mtu(device_name, namespace=None):
"""Return the MTU value of the device."""
return IPDevice(device_name, namespace=namespace).link.mtu
NetworkNamespaceNotFound = privileged.NetworkNamespaceNotFound
NetworkInterfaceNotFound = privileged.NetworkInterfaceNotFound
IpAddressAlreadyExists = privileged.IpAddressAlreadyExists
def add_ip_address(cidr, device, namespace=None, scope='global',
add_broadcast=True):
"""Add an IP address.
:param cidr: IP address to add, in CIDR notation
:param device: Device name to use in adding address
:param namespace: The name of the namespace in which to add the address
:param scope: scope of address being added
:param add_broadcast: should broadcast address be added
"""
net = netaddr.IPNetwork(cidr)
broadcast = None
if add_broadcast and net.version == 4:
# NOTE(slaweq): in case if cidr is /32 net.broadcast is None so
# same IP address as cidr should be set as broadcast
broadcast = str(net.broadcast or net.ip)
privileged.add_ip_address(
net.version, str(net.ip), net.prefixlen,
device, namespace, scope, broadcast)
def delete_ip_address(cidr, device, namespace=None):
"""Delete an IP address.
:param cidr: IP address to delete, in CIDR notation
:param device: Device name to use in deleting address
:param namespace: The name of the namespace in which to delete the address
"""
net = netaddr.IPNetwork(cidr)
privileged.delete_ip_address(
net.version, str(net.ip), net.prefixlen, device, namespace)
def flush_ip_addresses(ip_version, device, namespace=None):
"""Flush all IP addresses.
:param ip_version: IP version of addresses to flush
:param device: Device name to use in flushing addresses
:param namespace: The name of the namespace in which to flush the addresses
"""
privileged.flush_ip_addresses(ip_version, device, namespace)
# NOTE(haleyb): These neighbour functions live outside the IpNeighCommand
# class since not all callers require it.
def add_neigh_entry(ip_address, mac_address, device, namespace=None,
nud_state=None, **kwargs):
"""Add a neighbour entry.
:param ip_address: IP address of entry to add
:param mac_address: MAC address of entry to add
:param device: Device name to use in adding entry
:param namespace: The name of the namespace in which to add the entry
:param nud_state: The NUD (Neighbour Unreachability Detection) state of
the entry; defaults to "permanent"
"""
ip_version = common_utils.get_ip_version(ip_address)
nud_state = nud_state or 'permanent'
privileged.add_neigh_entry(ip_version,
ip_address,
mac_address,
device,
namespace,
nud_state,
**kwargs)
def delete_neigh_entry(ip_address, mac_address, device, namespace=None,
**kwargs):
"""Delete a neighbour entry.
:param ip_address: IP address of entry to delete
:param mac_address: MAC address of entry to delete
:param device: Device name to use in deleting entry
:param namespace: The name of the namespace in which to delete the entry
"""
ip_version = common_utils.get_ip_version(ip_address)
privileged.delete_neigh_entry(ip_version,
ip_address,
mac_address,
device,
namespace,
**kwargs)
def dump_neigh_entries(ip_version, device=None, namespace=None, **kwargs):
"""Dump all neighbour entries.
:param ip_version: IP version of entries to show (4 or 6)
:param device: Device name to use in dumping entries
:param namespace: The name of the namespace in which to dump the entries
:param kwargs: Callers add any filters they use as kwargs
:return: a list of dictionaries, each representing a neighbour.
The dictionary format is: {'dst': ip_address,
'lladdr': mac_address,
'device': device_name}
"""
return list(privileged.dump_neigh_entries(ip_version,
device,
namespace,
**kwargs))
def create_network_namespace(namespace, **kwargs):
"""Create a network namespace.
:param namespace: The name of the namespace to create
:param kwargs: Callers add any filters they use as kwargs
"""
privileged.create_netns(namespace, **kwargs)
def delete_network_namespace(namespace, **kwargs):
"""Delete a network namespace.
:param namespace: The name of the namespace to delete
:param kwargs: Callers add any filters they use as kwargs
"""
privileged.remove_netns(namespace, **kwargs)
def list_network_namespaces(**kwargs):
"""List all network namespace entries.
:param kwargs: Callers add any filters they use as kwargs
"""
if cfg.CONF.AGENT.use_helper_for_ns_read:
return privileged.list_netns(**kwargs)
else:
return netns.listnetns(**kwargs)
def network_namespace_exists(namespace, try_is_ready=False, **kwargs):
"""Check if a network namespace exists.
:param namespace: The name of the namespace to check
:param try_is_ready: Try to open the namespace to know if the namespace
is ready to be operated.
:param kwargs: Callers add any filters they use as kwargs
"""
if not try_is_ready:
output = list_network_namespaces(**kwargs)
return namespace in output
try:
privileged.open_namespace(namespace)
return True
except (RuntimeError, OSError):
pass
return False
def list_namespace_pids(namespace):
"""List namespace process PIDs
:param namespace: (string) the name of the namespace
:return: (tuple)
"""
return privileged.list_ns_pids(namespace)
def ensure_device_is_ready(device_name, namespace=None):
dev = IPDevice(device_name, namespace=namespace)
try:
# Ensure the device has a MAC address and is up, even if it is already
# up.
if not dev.link.exists or not dev.link.address:
LOG.info("Device %s cannot be used as it has no MAC "
"address", device_name)
return False
dev.link.set_up()
except RuntimeError:
return False
return True
def iproute_arg_supported(command, arg):
command += ['help']
stdout, stderr = utils.execute(command, check_exit_code=False,
return_stderr=True, log_fail_as_error=False)
return any(arg in line for line in stderr.split('\n'))
def _arping(ns_name, iface_name, address, count, log_exception):
# Due to a Linux kernel bug*, it's advised to spread gratuitous updates
# more, injecting an interval between consequent packets that is longer
# than 1s which is currently hardcoded** in arping. To achieve that, we
# call arping tool the 'count' number of times, each issuing a single ARP
# update, and wait between iterations.
#
# * https://patchwork.ozlabs.org/patch/760372/
# ** https://github.com/iputils/iputils/pull/86
first = True
# Since arping is used to send gratuitous ARP, a response is
# not expected. In some cases (no response) and with some
# platforms (>=Ubuntu 14.04), arping exit code can be 1.
extra_ok_codes = [1]
ip_wrapper = IPWrapper(namespace=ns_name)
for i in range(count):
if not first:
# hopefully enough for kernel to get out of locktime loop
time.sleep(2)
# On the second (and subsequent) arping calls, we can get a
# "bind: Cannot assign requested address" error since
# the IP address might have been deleted concurrently.
# We will log an error below if this isn't the case, so
# no need to have execute() log one as well.
extra_ok_codes = [1, 2]
first = False
# some Linux kernels* don't honour REPLYs. Send both gratuitous REQUEST
# and REPLY packets (REQUESTs are left for backwards compatibility for
# in case if some network peers, vice versa, honor REPLYs and not
# REQUESTs)
#
# * https://patchwork.ozlabs.org/patch/763016/
for arg in ('-U', '-A'):
arping_cmd = ['arping', arg, '-I', iface_name, '-c', 1,
# Pass -w to set timeout to ensure exit if interface
# removed while running
'-w', 2, address]
try:
ip_wrapper.netns.execute(arping_cmd,
extra_ok_codes=extra_ok_codes,
privsep_exec=True)
except Exception as exc:
# Since this is spawned in a thread and executed 2 seconds
# apart, something may have been deleted while we were
# sleeping. Downgrade message to info and return early
# unless it was the first try.
exists = device_exists_with_ips_and_mac(iface_name,
[address],
mac=None,
namespace=ns_name)
msg = ("Failed sending gratuitous ARP to %(addr)s on "
"%(iface)s in namespace %(ns)s: %(err)s")
logger_method = LOG.exception
if not (log_exception and (first or exists)):
logger_method = LOG.info
logger_method(msg, {'addr': address,
'iface': iface_name,
'ns': ns_name,
'err': exc})
if not exists:
LOG.info("Interface %(iface)s or address %(addr)s "
"in namespace %(ns)s was deleted concurrently",
{'iface': iface_name,
'addr': address,
'ns': ns_name})
return
def send_ip_addr_adv_notif(
ns_name, iface_name, address, count=3, log_exception=True,
use_eventlet=True):
"""Send advance notification of an IP address assignment.
If the address is in the IPv4 family, send gratuitous ARP.
If the address is in the IPv6 family, no advance notification is
necessary, since the Neighbor Discovery Protocol (NDP), Duplicate
Address Discovery (DAD), and (for stateless addresses) router
advertisements (RAs) are sufficient for address resolution and
duplicate address detection.
:param ns_name: Namespace name which GARPs are gonna be sent from.
:param iface_name: Name of interface which GARPs are gonna be sent from.
:param address: Advertised IP address.
:param count: (Optional) How many GARPs are gonna be sent. Default is 3.
:param log_exception: (Optional) True if possible failures should be logged
on exception level. Otherwise they are logged on
WARNING level. Default is True.
:param use_eventlet: (Optional) True if the arping command will be spawned
using eventlet, False to use Python threads
(threading).
"""
def arping():
_arping(ns_name, iface_name, address, count, log_exception)
if count > 0 and netaddr.IPAddress(address).version == 4:
if use_eventlet:
eventlet.spawn_n(arping)
else:
threading.Thread(target=arping).start()
def sysctl(cmd, namespace=None, log_fail_as_error=True):
"""Run sysctl command 'cmd'
@param cmd: a list containing the sysctl command to run
@param namespace: network namespace to run command in
@param log_fail_as_error: failure logged as LOG.error
execute() doesn't return the exit status of the command it runs,
it returns stdout and stderr. Setting check_exit_code=True will cause
it to raise a RuntimeError if the exit status of the command is
non-zero, which in sysctl's case is an error. So we're normalizing
that into zero (success) and one (failure) here to mimic what
"echo $?" in a shell would be.
This is all because sysctl is too verbose and prints the value you
just set on success, unlike most other utilities that print nothing.
execute() will have dumped a message to the logs with the actual
output on failure, so it's not lost, and we don't need to print it
here.
"""
cmd = ['sysctl', '-w'] + cmd
ip_wrapper = IPWrapper(namespace=namespace)
try:
ip_wrapper.netns.execute(cmd, run_as_root=True,
log_fail_as_error=log_fail_as_error,
privsep_exec=True)
except RuntimeError as rte:
LOG.warning(
"Setting %(cmd)s in namespace %(ns)s failed: %(err)s.",
{'cmd': cmd,
'ns': namespace,
'err': rte})
return 1
return 0
def add_namespace_to_cmd(cmd, namespace=None):
"""Add an optional namespace to the command."""
return ['ip', 'netns', 'exec', namespace] + cmd if namespace else cmd
def get_ipv6_lladdr(mac_addr):
return '%s/64' % netaddr.EUI(mac_addr).ipv6_link_local()
def get_ip_nonlocal_bind(namespace=None):
"""Get kernel option value of ip_nonlocal_bind in given namespace."""
cmd = ['sysctl', '-bn', IP_NONLOCAL_BIND]
ip_wrapper = IPWrapper(namespace)
return int(ip_wrapper.netns.execute(cmd, run_as_root=True,
privsep_exec=True))
def set_ip_nonlocal_bind(value, namespace=None, log_fail_as_error=True):
"""Set sysctl knob of ip_nonlocal_bind to given value."""
cmd = ['%s=%d' % (IP_NONLOCAL_BIND, value)]
return sysctl(cmd, namespace=namespace,
log_fail_as_error=log_fail_as_error)
def set_ip_nonlocal_bind_for_namespace(namespace, value, root_namespace=False):
"""Set ip_nonlocal_bind but don't raise exception on failure."""
failed = set_ip_nonlocal_bind(value, namespace=namespace,
log_fail_as_error=False)
if failed and root_namespace:
# Somewhere in the 3.19 kernel timeframe ip_nonlocal_bind was
# changed to be a per-namespace attribute. To be backwards
# compatible we need to try both if at first we fail.
LOG.debug('Namespace (%s) does not support setting %s, '
'trying in root namespace', namespace, IP_NONLOCAL_BIND)
return set_ip_nonlocal_bind(value)
if failed:
LOG.warning(
"%s will not be set to %d in the root namespace in order to "
"not break DVR, which requires this value be set to 1. This "
"may introduce a race between moving a floating IP to a "
"different network node, and the peer side getting a "
"populated ARP cache for a given floating IP address.",
IP_NONLOCAL_BIND, value)
def get_ipv6_forwarding(device, namespace=None):
"""Get kernel value of IPv6 forwarding for device in given namespace."""
cmd = ['sysctl', '-b', "net.ipv6.conf.%s.forwarding" % device]
ip_wrapper = IPWrapper(namespace)
return int(ip_wrapper.netns.execute(cmd, run_as_root=True,
privsep_exec=True))
def _parse_ip_rule(rule, ip_version):
"""Parse a pyroute2 rule and returns a dictionary
Parameters contained in the returned dictionary:
- priority: rule priority
- from: source IP address
- to: (optional) destination IP address
- type: rule type (see RULE_TYPES)
- table: table name or number (see RULE_TABLES)
- fwmark: (optional) FW mark
- iif: (optional) input interface name
- oif: (optional) output interface name
:param rule: pyroute2 rule dictionary
:param ip_version: IP version (4, 6)
:return: dictionary with IP rule information
"""
parsed_rule = {'priority': str(rule['attrs'].get('FRA_PRIORITY', 0))}
from_ip = rule['attrs'].get('FRA_SRC')
if from_ip:
parsed_rule['from'] = common_utils.ip_to_cidr(
from_ip, prefix=rule['src_len'])
if common_utils.is_cidr_host(parsed_rule['from']):
parsed_rule['from'] = common_utils.cidr_to_ip(parsed_rule['from'])
else:
parsed_rule['from'] = constants.IP_ANY[ip_version]
to_ip = rule['attrs'].get('FRA_DST')
if to_ip:
parsed_rule['to'] = common_utils.ip_to_cidr(
to_ip, prefix=rule['dst_len'])
if common_utils.is_cidr_host(parsed_rule['to']):
parsed_rule['to'] = common_utils.cidr_to_ip(parsed_rule['to'])
parsed_rule['type'] = IP_RULE_TYPES[rule['action']]
table_num = rule['attrs']['FRA_TABLE']
for table_name in (name for (name, index) in
IP_RULE_TABLES.items() if index == table_num):
parsed_rule['table'] = table_name
break
else:
parsed_rule['table'] = str(table_num)
fwmark = rule['attrs'].get('FRA_FWMARK')
if fwmark:
fwmask = rule['attrs'].get('FRA_FWMASK')
parsed_rule['fwmark'] = '{0:#x}/{1:#x}'.format(fwmark, fwmask)
iifname = rule['attrs'].get('FRA_IIFNAME')
if iifname:
parsed_rule['iif'] = iifname
oifname = rule['attrs'].get('FRA_OIFNAME')
if oifname:
parsed_rule['oif'] = oifname
return parsed_rule
def list_ip_rules(namespace, ip_version):
"""List all IP rules in a namespace
:param namespace: namespace name
:param ip_version: IP version (4, 6)
:return: list of dictionaries with the rules information
"""
rules = privileged.list_ip_rules(namespace, ip_version)
return [_parse_ip_rule(rule, ip_version) for rule in rules]
def _make_pyroute2_args(ip, iif, table, priority, to):
"""Returns a dictionary of arguments to be used in pyroute rule commands
:param ip: (string) source IP or CIDR address (IPv4, IPv6)
:param iif: (string) input interface name
:param table: (string, int) table number (as an int or a string) or table
name ('default', 'main', 'local')
:param priority: (string, int) rule priority
:param to: (string) destination IP or CIDR address (IPv4, IPv6)
:return: a dictionary with the kwargs needed in pyroute rule commands
"""
ip_version = common_utils.get_ip_version(ip)
# In case we need to add a rule based on an incoming interface, no
# IP address is given; the rule default source ("from") address is
# "all".
cmd_args = {'family': common_utils.get_socket_address_family(ip_version)}
if iif:
cmd_args['iifname'] = iif
else:
cmd_args['src'] = common_utils.cidr_to_ip(ip)
cmd_args['src_len'] = common_utils.cidr_mask(ip)
if to:
cmd_args['dst'] = common_utils.cidr_to_ip(to)
cmd_args['dst_len'] = common_utils.cidr_mask(to)
if table:
cmd_args['table'] = IP_RULE_TABLES.get(table) or int(table)
if priority:
cmd_args['priority'] = int(priority)
return cmd_args
def _exist_ip_rule(rules, ip, iif, table, priority, to):
"""Check if any rule matches the conditions"""
for rule in rules:
if iif and rule.get('iif') != iif:
continue
if not iif and rule['from'] != ip:
continue
if table and rule.get('table') != str(table):
continue
if priority and rule['priority'] != str(priority):
continue
if to and rule.get('to') != to:
continue
break
else:
return False
return True
def add_ip_rule(namespace, ip, iif=None, table=None, priority=None, to=None):
"""Create an IP rule in a namespace
:param namespace: (string) namespace name
:param ip: (string) source IP or CIDR address (IPv4, IPv6)
:param iif: (Optional) (string) input interface name
:param table: (Optional) (string, int) table number
:param priority: (Optional) (string, int) rule priority
:param to: (Optional) (string) destination IP or CIDR address (IPv4, IPv6)
"""
ip_version = common_utils.get_ip_version(ip)
rules = list_ip_rules(namespace, ip_version)
if _exist_ip_rule(rules, ip, iif, table, priority, to):
return
cmd_args = _make_pyroute2_args(ip, iif, table, priority, to)
privileged.add_ip_rule(namespace, **cmd_args)
def delete_ip_rule(namespace, ip, iif=None, table=None, priority=None,
to=None):
"""Delete an IP rule in a namespace
:param namespace: (string) namespace name
:param ip: (string) source IP or CIDR address (IPv4, IPv6)
:param iif: (Optional) (string) input interface name
:param table: (Optional) (string, int) table number
:param priority: (Optional) (string, int) rule priority
:param to: (Optional) (string) destination IP or CIDR address (IPv4, IPv6)
"""
cmd_args = _make_pyroute2_args(ip, iif, table, priority, to)
privileged.delete_ip_rule(namespace, **cmd_args)
def get_attr(pyroute2_obj, attr_name):
"""Get an attribute from a PyRoute2 object"""
rule_attrs = pyroute2_obj.get('attrs', [])
for attr in (attr for attr in rule_attrs if attr[0] == attr_name):
return attr[1]
def _parse_ip_address(pyroute2_address, device_name):
ip = get_attr(pyroute2_address, 'IFA_ADDRESS')
ip_length = pyroute2_address['prefixlen']
event = IP_ADDRESS_EVENTS.get(pyroute2_address.get('event'))
cidr = common_utils.ip_to_cidr(ip, prefix=ip_length)
flags = get_attr(pyroute2_address, 'IFA_FLAGS')
dynamic = not bool(flags & ifaddrmsg.IFA_F_PERMANENT)
tentative = bool(flags & ifaddrmsg.IFA_F_TENTATIVE)
dadfailed = bool(flags & ifaddrmsg.IFA_F_DADFAILED)
scope = IP_ADDRESS_SCOPE[pyroute2_address['scope']]
return {'name': device_name,
'cidr': cidr,
'scope': scope,
'broadcast': get_attr(pyroute2_address, 'IFA_BROADCAST'),
'dynamic': dynamic,
'tentative': tentative,
'dadfailed': dadfailed,
'event': event}
def get_devices_with_ip(namespace, name=None, **kwargs):
retval = []
link_args = {}
if name:
link_args['ifname'] = name
scope = kwargs.pop('scope', None)
if scope:
kwargs['scope'] = IP_ADDRESS_SCOPE_NAME[scope]
if not link_args:
ip_addresses = privileged.get_ip_addresses(namespace, **kwargs)
else:
device = get_devices_info(namespace, **link_args)
if not device:
return retval
ip_addresses = privileged.get_ip_addresses(
namespace, index=device[0]['index'], **kwargs)
devices = {} # {device index: name}
for ip_address in ip_addresses:
index = ip_address['index']
name = get_attr(ip_address, 'IFA_LABEL') or devices.get(index)
if not name:
device = get_devices_info(namespace, index=index)
if not device:
continue
name = device[0]['name']
retval.append(_parse_ip_address(ip_address, name))
devices[index] = name
return retval
def get_devices_info(namespace, **kwargs):
devices = privileged.get_link_devices(namespace, **kwargs)
retval = {}
for device in devices:
ret = {'index': device['index'],
'name': get_attr(device, 'IFLA_IFNAME'),
'operstate': get_attr(device, 'IFLA_OPERSTATE'),
'linkmode': get_attr(device, 'IFLA_LINKMODE'),
'mtu': get_attr(device, 'IFLA_MTU'),
'promiscuity': get_attr(device, 'IFLA_PROMISCUITY'),
'mac': get_attr(device, 'IFLA_ADDRESS'),
'broadcast': get_attr(device, 'IFLA_BROADCAST')}
ifla_link = get_attr(device, 'IFLA_LINK')
if ifla_link:
ret['parent_index'] = ifla_link
ifla_linkinfo = get_attr(device, 'IFLA_LINKINFO')
if ifla_linkinfo:
ret['kind'] = get_attr(ifla_linkinfo, 'IFLA_INFO_KIND')
ifla_data = get_attr(ifla_linkinfo, 'IFLA_INFO_DATA')
if ret['kind'] == 'vxlan':
ret['vxlan_id'] = get_attr(ifla_data, 'IFLA_VXLAN_ID')
ret['vxlan_group'] = get_attr(ifla_data, 'IFLA_VXLAN_GROUP')
ret['vxlan_link_index'] = get_attr(ifla_data,
'IFLA_VXLAN_LINK')
elif ret['kind'] == 'vlan':
ret['vlan_id'] = get_attr(ifla_data, 'IFLA_VLAN_ID')
elif ret['kind'] == 'bridge':
ret['stp'] = get_attr(ifla_data, 'IFLA_BR_STP_STATE')
ret['forward_delay'] = get_attr(ifla_data,
'IFLA_BR_FORWARD_DELAY')
retval[device['index']] = ret
for device in retval.values():
if device.get('parent_index'):
parent_device = retval.get(device['parent_index'])
if parent_device:
device['parent_name'] = parent_device['name']
elif device.get('vxlan_link_index'):
device['vxlan_link_name'] = (
retval[device['vxlan_link_index']]['name'])
return list(retval.values())
def ip_monitor(namespace, queue, event_stop, event_started):
"""Monitor IP address changes
If namespace is not None, this function must be executed as root user, but
cannot use privsep because is a blocking function and can exhaust the
number of working threads.
"""
def get_device_name(index):
try:
with privileged.get_iproute(namespace) as ip:
device = ip.link('get', index=index)
if device:
attrs = device[0].get('attrs', [])
for attr in (attr for attr in attrs
if attr[0] == 'IFLA_IFNAME'):
return attr[1]
except netlink_exceptions.NetlinkError as e:
if e.code == errno.ENODEV:
return
raise
def read_ip_updates(_ip, _queue):
"""Read Pyroute2.IPRoute input socket
The aim of this function is to open and bind an IPRoute socket only for
reading the netlink changes; no other operations are done with this
opened socket. This function is executed in a separate thread,
dedicated only to this task.
"""
_ip.bind(async_cache=True)
try:
while True:
ip_addresses = _ip.get()
for ip_address in ip_addresses:
_queue.put(ip_address)
except EOFError:
pass
_queue = eventlet.Queue()
try:
cache_devices = {}
with privileged.get_iproute(namespace) as ip:
for device in ip.get_links():
cache_devices[device['index']] = get_attr(device,
'IFLA_IFNAME')
_ip = privileged.get_iproute(namespace)
ip_updates_thread = threading.Thread(target=read_ip_updates,
args=(_ip, _queue))
ip_updates_thread.start()
event_started.set()
while not event_stop.is_set():
try:
ip_address = _queue.get(timeout=1)
except eventlet.queue.Empty:
continue
if 'index' in ip_address and 'prefixlen' in ip_address:
index = ip_address['index']
name = (get_device_name(index) or
cache_devices.get(index))
if not name:
continue
cache_devices[index] = name
queue.put(_parse_ip_address(ip_address, name))
_ip.close()
ip_updates_thread.join(timeout=5)
except OSError as e:
if e.errno == errno.ENOENT:
raise privileged.NetworkNamespaceNotFound(netns_name=namespace)
raise
def add_ip_route(namespace, cidr, device=None, via=None, table=None,
metric=None, scope=None, proto='static', **kwargs):
"""Add an IP route"""
if table:
table = IP_RULE_TABLES.get(table, table)
ip_version = common_utils.get_ip_version(cidr or via)
privileged.add_ip_route(namespace, cidr, ip_version,
device=device, via=via, table=table,
metric=metric, scope=scope, proto=proto, **kwargs)
def list_ip_routes(namespace, ip_version, scope=None, via=None, table=None,
device=None, **kwargs):
"""List IP routes"""
def get_device(index, devices):
for device in (d for d in devices if d['index'] == index):
return get_attr(device, 'IFLA_IFNAME')
def get_proto(proto_number):
if proto_number in rtnl.rt_proto:
return rtnl.rt_proto[proto_number]
elif str(proto_number) in constants.IP_PROTOCOL_NUM_TO_NAME_MAP:
return constants.IP_PROTOCOL_NUM_TO_NAME_MAP[str(proto_number)]
table = table if table else 'main'
table = IP_RULE_TABLES.get(table, table)
routes = privileged.list_ip_routes(namespace, ip_version, device=device,
table=table, **kwargs)
devices = privileged.get_link_devices(namespace)
ret = []
for route in routes:
cidr = get_attr(route, 'RTA_DST')
if cidr:
cidr = '%s/%s' % (cidr, route['dst_len'])
else:
cidr = constants.IP_ANY[ip_version]
table = int(get_attr(route, 'RTA_TABLE'))
metric = (get_attr(route, 'RTA_PRIORITY') or
IP_ROUTE_METRIC_DEFAULT[ip_version])
proto = get_proto(route['proto'])
value = {
'table': IP_RULE_TABLES_NAMES.get(table, table),
'source_prefix': get_attr(route, 'RTA_PREFSRC'),
'cidr': cidr,
'scope': IP_ADDRESS_SCOPE[int(route['scope'])],
'metric': metric,
'proto': proto,
}
multipath = get_attr(route, 'RTA_MULTIPATH')
if multipath:
value['device'] = None
mp_via = []
for mp in multipath:
mp_via.append({'device': get_device(int(mp['oif']), devices),
'via': get_attr(mp, 'RTA_GATEWAY'),
'weight': int(mp['hops']) + 1})
value['via'] = mp_via
else:
value['device'] = get_device(int(get_attr(route, 'RTA_OIF')),
devices)
value['via'] = get_attr(route, 'RTA_GATEWAY')
ret.append(value)
if scope:
ret = [route for route in ret if route['scope'] == scope]
if via:
ret = [route for route in ret if route['via'] == via]
return ret
def delete_ip_route(namespace, cidr, device=None, via=None, table=None,
scope=None, **kwargs):
"""Delete an IP route"""
if table:
table = IP_RULE_TABLES.get(table, table)
ip_version = common_utils.get_ip_version(cidr or via)
privileged.delete_ip_route(namespace, cidr, ip_version,
device=device, via=via, table=table,
scope=scope, **kwargs)
|
redis_lock.py
|
# -*- coding: utf-8 -*-
# @Time : 2020/11/9 下午9:00
# @Author : 司云中
# @File : redis_lock.py
# @Software: Pycharm
import uuid
import math
import time
from threading import Thread
import redis
from redis import WatchError
def acquire_lock_with_timeout(conn, lock_name, acquire_timeout=3, lock_timeout=3, **kwargs):
"""
基于 Redis 实现的单点分布式锁
:param conn: Redis 连接
:param lock_name: 锁的名称
:param acquire_timeout: 获取锁的超时时间,默认 3 秒
:param lock_timeout: 锁的超时时间,默认 2 秒
:return:
"""
identifier = str(uuid.uuid4())
lock_name = f'redis-lock:{lock_name}'
lock_timeout = int(math.ceil(lock_timeout))
end = time.time() + acquire_timeout
commodity_id = kwargs.pop('pk') # 弹出商品id
key = self.key
while time.time() < end:
# 如果不存在这个锁则加锁并设置过期时间,避免死锁
if conn.set(lock_name, identifier, ex=lock_timeout, nx=True): # 如果拿到锁
conn.incr()
return identifier # 返回唯一标识
return False
def release_lock(conn, lock_name, identifier):
"""
释放锁
:param conn: Redis 连接
:param lockname: 锁的名称
:param identifier: 锁的标识
:return:
"""
# python中redis事务是通过pipeline的封装实现的
with conn.pipeline() as pipe:
lock_name = 'lock:' + lock_name
while True:
try:
# watch 锁,监听该锁, multi 后如果该 key 被其他客户端改变, 事务操作会抛出 WatchError 异常,使得当前客户端不会误删了别人的锁
pipe.watch(lock_name)
ident = pipe.get(lock_name)
if ident and ident.decode('utf-8') == identifier:
# 事务开始
pipe.multi()
pipe.delete(lock_name)
pipe.execute()
return True
pipe.unwatch()
break
except WatchError:
pass
return False
count=50
redis_client = redis.Redis(host="127.0.0.1",
port=6381,
db=10)
def seckill(i):
print("线程:{}--想获取锁".format(i))
identifier=acquire_lock_with_timeout(redis_client, 'resource', i)
global count
if count<1:
print("线程:{}--没抢到,票抢完了".format(i))
return
count-=1
print("线程:{}--抢到一张票,还剩{}张票".format(i,count))
release_lock(redis_client, 'resource',identifier) # 释放这把锁
for i in range(1000):
t = Thread(target=seckill,args=(i,))
t.start()
|
emailServer.py
|
import os
import json
import time
import smtplib
import threading
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
from firebase_admin import auth
from email.message import EmailMessage
# Use the application default credentials
jsonKey = json.load(open('ServiceAccountKey.json'))
cred = credentials.Certificate(jsonKey)
app = firebase_admin.initialize_app(cred)
db = firestore.client()
def getAdminAddresses():
docs = db.collection("Users").where(
"userGroup", "in", ["Owner", "Admin"]
).where(
"emailNotifications", "==", True
).stream()
return [doc.to_dict()["email"] for doc in docs]
def getEmails():
docs = db.collection("Emails").stream()
emails = [doc.to_dict() for doc in docs]
emailMessages = []
for e in emails:
msg = EmailMessage()
msg.set_content(e["message"])
msg["Subject"] = e["subject"]
msg["From"] = e["email"]
emailMessages.append(msg)
return emailMessages
def getFaultEmails():
docs = db.collection("Notifications").stream()
faults = [doc.to_dict() for doc in docs]
emails = []
for fault in faults:
try:
loggerId = fault['logger']
message = fault['message']
# get siteID and equipment name
loggerDoc = db.collection(u'Loggers').document(
loggerId).get().to_dict()
siteId = loggerDoc['site']
siteDoc = db.collection(u'Sites').document(siteId).get().to_dict()
equipName = ''
for unit in siteDoc['equipmentUnits']:
if (loggerId in unit['loggers']):
equipName = unit['name']
# iterate over user documents
users = [doc.to_dict() for doc in db.collection("Users").stream()]
for user in users:
if (('equipmentNotifications' in user)):
if (siteId in user['equipmentNotifications']):
subscribed = user['equipmentNotifications'][siteId][equipName]
notificationsOn = ('emailNotifications' in user and user['emailNotifications']) or (
'emailNotifications' not in user)
if subscribed and notificationsOn:
# generate email
emailRecipient = user['email']
emailSubject = f"Fault detected on {equipName}"
emailContent = message
msg = EmailMessage()
msg.set_content(emailContent)
msg["Subject"] = emailSubject
msg["To"] = emailRecipient
emails.append(msg)
except:
print("a notification document was incorrectly created")
return emails
def deleteFaults():
docs = db.collection('Notifications').stream()
for doc in docs:
db.collection("Notifications").document(doc.id).delete()
def deleteEmails():
docs = db.collection("Emails").stream()
for doc in docs:
db.collection("Emails").document(doc.id).delete()
def sendMail():
EMAIL = "YADA.Sender@gmail.com"
PASS = "HLt8AJpfNgm8Jvn"
adminAddresses = "YADA.Sender@gmail.com, " + ", ".join(getAdminAddresses())
while True:
with smtplib.SMTP_SSL("smtp.gmail.com", 465, timeout=10.0) as server:
server.ehlo()
server.login(EMAIL, PASS)
emails = getEmails()
for e in emails:
server.sendmail(e["From"], adminAddresses, e.as_string())
print(f'Sent message from {e.get("From")}.')
deleteEmails()
notifications = getFaultEmails()
for n in notifications:
server.sendmail(EMAIL, [n["To"], EMAIL], n.as_string())
print(f'Sent message to {n["To"]}.')
deleteFaults()
time.sleep(5.0)
if __name__ == "__main__":
sender = threading.Thread(target=sendMail)
print("Starting sender thread...")
sender.start()
|
portscanhoneypot.py
|
#!/bin/env python3
"""
portscanhoneypot:
Simple honeypot to catch rogue port scans on the network for use as an early warning
beacon of potential threat actors on the network.
Author: Dana Epp (@danaepp)
"""
import os
import os.path
import sys
import getopt
import socket
import threading
import time
from struct import unpack, pack
import logging
from appsettings import AppSettings
from webhooks import WebHook, WebHookType
# Colors
# --------------
RED = '\33[31m'
CYAN = '\33[36m'
GREEN = '\33[32m'
WHITE = '\33[0m'
# --------------
# Ethernet Header Length
ETH_HEADER_LEN = 14
# TCP control flags
TH_FIN = 0x01 # end of data
TH_SYN = 0x02 # synchronize sequence numbers
TH_RST = 0x04 # reset connection
TH_PSH = 0x08 # push
TH_ACK = 0x10 # acknowledgment number set
TH_URG = 0x20 # urgent pointer set
TH_ECE = 0x40 # ECN echo, RFC 3168
TH_CWR = 0x80 # congestion window reduced
# Generic timestamp for local logging
get_timestamp = lambda : time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
# Versioning
__version__ = "1.0"
PIDFILE="/var/run/pshp.pid"
class PortScanHoneyPot:
def __init__(self, settings, loglevel=None):
PortScanHoneyPot.setup_logging(loglevel)
self.__daemon = settings.daemon
self.__iface = settings.iface
self.__listening_ips = settings.listening_ips
self.__listening_ports = settings.listening_ports
self.__allowed_hosts = settings.allowed_hosts
self.__logfile = settings.portscanlog
# Setup optional webhook for notifications
if settings.webhook and settings.webhook_type != WebHookType.NONE :
self.__webhook = WebHook(settings.webhook, settings.webhook_type)
else:
self.__webhook = None
@classmethod
def print_banner(cls):
print(f"\n{CYAN}==================================================")
print(f" {WHITE}PortScan Honeypot{CYAN} ({WHITE}v{__version__}{CYAN}) - Developed by @danaepp")
print(f" https://github.com/danaepp/PortScanHoneypot")
print(f"=================================================={WHITE} \n")
@classmethod
def display_usage(cls):
print( 'sudo portscanhoneypot.py\n\t[-c /path/to/config.conf] [-d] [--daemon]\n' )
@classmethod
def setup_logging(cls, log_level):
if log_level is None:
logging.basicConfig(
stream=sys.stdout,
level=log_level,
format='%(asctime)s [%(levelname)s] %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p' )
else:
logging.basicConfig(
filename="pshp_debug.log",
level=log_level,
format='%(asctime)s [%(levelname)s] %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p' )
def write_log(self, line):
if self.__logfile:
self.__logfile.write( f"{line}\n")
self.__logfile.flush()
def process_packet(self, packet, addr):
# Get Ethernet frame header
eth_header = packet[:ETH_HEADER_LEN]
# Break out the ethernet frame
eth = unpack('!6s6sH' , eth_header)
# Get the protocol. We only want to deal with IP packets (8)
eth_protocol = socket.ntohs( eth[2] )
if eth_protocol == 8:
# Parse IP header
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Version| IHL |Type of Service| Total Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Identification |Flags| Fragment Offset |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Time to Live | Protocol | Header Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Destination Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Options | Padding |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
ip_header = packet[ETH_HEADER_LEN:20 + ETH_HEADER_LEN]
# Unpack the header
iph = unpack('!BBHHHBBH4s4s', ip_header)
# Need to calc IP header size for use later
version_ihl = iph[0]
ihl = version_ihl & 0xF
iph_length = ihl * 4
protocol = iph[6]
s_addr = socket.inet_ntoa(iph[8])
d_addr = socket.inet_ntoa(iph[9])
# Only look for TCP connections (6)
if protocol == 6:
# Parse TCP header
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source Port | Destination Port |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Sequence Number |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Acknowledgment Number |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Data | |U|A|P|R|S|F| |
# | Offset| Reserved |R|C|S|S|Y|I| Window |
# | | |G|K|H|T|N|N| |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Checksum | Urgent Pointer |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Options | Padding |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | data |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
t = iph_length + ETH_HEADER_LEN
# Extract raw bytes of TCP header
tcp_header = packet[t:t+20]
# Unpack TCP header
tcph = unpack('!HHLLBBHHH' , tcp_header)
dest_port = tcph[1]
flags = tcph[5]
# We only want to monitor the interface IP for the listening ports...
# drop everything else
if d_addr in self.__listening_ips and dest_port in self.__listening_ports:
self.__process_scanner_packet(flags, s_addr, d_addr, dest_port)
# Process scanner packets tripped up by the honeypot
def __process_scanner_packet(self, flags, s_addr, d_addr, d_port):
# We want to make sure we drop packets from allowed hosts (ie: RMM/NM/Network scanners etc)
if s_addr not in self.__allowed_hosts:
scan_type = self.get_scan_type(flags)
flags_str = self.get_flags(flags)
msg = f"[{get_timestamp()}] {scan_type} scan (flags:{flags_str}) detected from {str(s_addr)} to {str(d_addr)}:{str(d_port)}"
self.write_log(msg)
if self.__webhook:
self.__webhook.notify(msg)
if not self.__daemon:
print( msg )
def get_scan_type(self, flags):
# TCP flags to scan type mapping
scan_types_mapping = {
0: 'TCP NULL',
TH_FIN: 'TCP FIN',
TH_SYN: 'TCP SYN',
TH_SYN|TH_RST: 'TCP SYN',
TH_ACK: 'TCP ACK',
TH_URG|TH_PSH|TH_FIN: 'TCP XMAS',
TH_URG|TH_PSH|TH_FIN|TH_ACK: 'TCP XMAS',
TH_SYN|TH_FIN: 'TCP SYN/FIN',
TH_FIN|TH_ACK: 'TCP FIN/ACK',
TH_SYN|TH_ACK|TH_RST: 'TCP CONN',
TH_URG|TH_PSH|TH_ACK|TH_RST|TH_SYN|TH_FIN: 'TCP ALL-FLAGS'
}
return scan_types_mapping.get(flags, 'unknown')
def get_flags(self, flags):
flags_str = ''
if flags == 0:
flags_str = 'N'
else:
if flags & TH_URG:
flags_str += 'U'
if flags & TH_ACK:
flags_str += 'A'
if flags & TH_PSH:
flags_str += 'P'
if flags & TH_RST:
flags_str += 'R'
if flags & TH_SYN:
flags_str += 'S'
if flags & TH_FIN:
flags_str += 'F'
return flags_str
def sniff(self):
print( f"Starting up honeypot to detect port scans on '{self.__iface}'..." )
try:
sock = socket.socket( socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs(3) ) # 3 = ETH_P_ALL
except Exception as err:
# Should only hit here if raw sockets aren't allowed
logging.exception(err)
sys.exit()
try:
sock.bind((self.__iface, 0))
except OSError as err:
logging.exception(err)
sys.exit("Bind failed. Aborting.")
except Exception as ex:
logging.exception(ex)
sys.exit("General exception while binding. Aborting")
while True:
try:
packet, addr = sock.recvfrom(65535)
threading.Thread(target=self.process_packet, args=(packet, addr)).start()
except KeyboardInterrupt:
if not self.__logfile.closed:
self.__logfile.close()
sys.exit()
except Exception as ex:
print( "General exception while listening for port scans." )
logging.exception(ex)
def run(self):
if self.__daemon:
# Disconnect from tty
try:
pid = os.fork()
if pid>0:
sys.exit(0)
except OSError as e:
logging.exception(e)
sys.exit("First fork failed during daemonize")
os.setsid()
os.umask(0)
# We need a second fork to fully disconnect the process
try:
pid = os.fork()
if pid>0:
open(PIDFILE,'w').write(str(pid))
sys.exit(0)
except OSError as e:
logging.exception(e)
sys.exit("Second fork failed during daemonize")
# If we get this far, we now have a disconnected daemon process and we can sniff
logging.info( "Launching Port Scan Honeypot as a daemon..." )
self.sniff()
else:
self.print_banner()
self.sniff()
def main(argv):
if os.geteuid() != 0:
msg = "You must have effective 'root' privs to run this program"
logging.error(msg)
sys.exit(msg)
settingsfile = None
loglevel = None
daemon = False
try:
opts, args = getopt.getopt( argv,
"hc:d",
["help", "config=", "debug", "daemon"])
except getopt.GetoptError as err:
logging.exception(err)
sys.exit(2)
for opt, arg in opts:
if opt in ( "-h", "--help"):
PortScanHoneyPot.display_usage()
sys.exit()
elif opt in ( "-c", "--config" ):
settingsfile = arg
elif opt in ( "-d", "--debug" ):
loglevel = logging.DEBUG
elif opt in ( "--daemon" ):
daemon = True
settings = AppSettings(daemon, settingsfile)
honey_pot = PortScanHoneyPot(settings, loglevel)
honey_pot.run()
if __name__ == "__main__":
main(sys.argv[1:])
|
mbase.py
|
"""
mbase module
This module contains the base model class from which
all of the other models inherit from.
"""
from __future__ import print_function
import sys
import os
import subprocess as sp
import shutil
import threading
if sys.version_info > (3, 0):
import queue as Queue
else:
import Queue
from datetime import datetime
import copy
import numpy as np
from flopy import utils
from .version import __version__
# Global variables
iconst = 1 # Multiplier for individual array elements in integer and real arrays read by MODFLOW's U2DREL, U1DREL and U2DINT.
iprn = -1 # Printout flag. If >= 0 then array values read are printed in listing file.
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
def which(program):
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
# test for exe in current working directory
if is_exe(program):
return program
# test for exe in path statement
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
class FileData(object):
def __init__(self, fname, unit, binflag=False, output=False, package=None):
self.fname = fname
self.unit = unit
self.binflag = binflag
self.output = output
self.package = package
class FileData(object):
def __init__(self):
self.file_data = []
def add_file(self, fname, unit, binflag=False, output=False, package=None):
ipop = []
for idx, file_data in enumerate(self.file_data):
if file_data.fname == fname or file_data.unit == unit:
ipop.append(idx)
self.file_data.append(FileData(fname, unit, binflag=binflag,
output=output, package=package))
class BaseModel(object):
"""
MODFLOW based models base class
Parameters
----------
modelname : string
Name of the model. Model files will be given this name. (default is
'modflowtest'
namefile_ext : string
name file extension (default is 'nam')
exe_name : string
name of the modflow executable
model_ws : string
Path to the model workspace. Model files will be created in this
directory. Default is None, in which case model_ws is assigned
to the current working directory.
"""
def __init__(self, modelname='modflowtest', namefile_ext='nam',
exe_name='mf2k.exe', model_ws=None,
structured=True, **kwargs):
"""
BaseModel init
"""
self.__name = modelname
self.namefile_ext = namefile_ext
self.namefile = self.__name + '.' + self.namefile_ext
self.packagelist = []
self.heading = ''
self.exe_name = exe_name
self.external_extension = 'ref'
if model_ws is None: model_ws = os.getcwd()
if not os.path.exists(model_ws):
try:
os.makedirs(model_ws)
except:
print(
'\n{0:s} not valid, workspace-folder was changed to {1:s}\n'.format(
model_ws, os.getcwd()))
model_ws = os.getcwd()
self._model_ws = model_ws
self.structured = structured
self.pop_key_list = []
self.cl_params = ''
# check for reference info in kwargs
# we are just carrying these until a dis package is added
self._xul = kwargs.pop("xul", None)
self._yul = kwargs.pop("yul", None)
self._rotation = kwargs.pop("rotation", 0.0)
self._proj4_str = kwargs.pop("proj4_str", "EPSG:4326")
self._start_datetime = kwargs.pop("start_datetime", "1-1-1970")
# Model file information
self.__onunit__ = 10
# external option stuff
self.array_free_format = True
self.free_format_input = True
self.parameter_load = False
self.array_format = None
self.external_fnames = []
self.external_units = []
self.external_binflag = []
self.external_output = []
self.package_units = []
# output files
self.output_fnames = []
self.output_units = []
self.output_binflag = []
self.output_packages = []
return
# we don't need these - no need for controlled access to array_free_format
# def set_free_format(self, value=True):
# """
# Set the free format flag for the model instance
#
# Parameters
# ----------
# value : bool
# Boolean value to set free format flag for model. (default is True)
#
# Returns
# -------
#
# """
# if not isinstance(value, bool):
# print('Error: set_free_format passed value must be a boolean')
# return False
# self.array_free_format = value
#
# def get_free_format(self):
# """
# Return the free format flag for the model
#
# Returns
# -------
# out : bool
# Free format flag for the model
#
# """
# return self.array_free_format
def next_unit(self, i=None):
if i is not None:
self.__onunit__ = i - 1
else:
self.__onunit__ += 1
return self.__onunit__
def next_ext_unit(self):
"""
Function to encapsulate next_ext_unit attribute
"""
next_unit = self._next_ext_unit + 1
self._next_ext_unit += 1
return next_unit
def export(self, f, **kwargs):
# for pak in self.packagelist:
# f = pak.export(f)
# return f
from .export import utils
return utils.model_helper(f, self, **kwargs)
def add_package(self, p):
"""
Add a package.
Parameters
----------
p : Package object
"""
for idx, u in enumerate(p.unit_number):
if u != 0:
if u in self.package_units or u in self.external_units:
try:
pn = p.name[idx]
except:
pn = p.name
msg = "WARNING: unit {} ".format(u) + \
"of package {} already in use".format(pn)
print(msg)
self.package_units.append(u)
for i, pp in enumerate(self.packagelist):
if pp.allowDuplicates:
continue
elif isinstance(p, type(pp)):
print('****Warning -- two packages of the same type: ',
type(p), type(pp))
print('replacing existing Package...')
self.packagelist[i] = p
return
if self.verbose:
print('adding Package: ', p.name[0])
self.packagelist.append(p)
def remove_package(self, pname):
"""
Remove a package from this model
Parameters
----------
pname : string
Name of the package, such as 'RIV', 'BAS6', etc.
"""
for i, pp in enumerate(self.packagelist):
if pname.upper() in pp.name:
if self.verbose:
print('removing Package: ', pp.name)
# Remove the package object from the model's packagelist
p = self.packagelist.pop(i)
# Remove the package unit number from the list of package
# units stored with the model
for iu in p.unit_number:
if iu in self.package_units:
self.package_units.remove(iu)
return
raise StopIteration(
'Package name ' + pname + ' not found in Package list')
def __getattr__(self, item):
"""
__getattr__ - syntactic sugar
Parameters
----------
item : str
3 character package name (case insensitive) or "sr" to access
the SpatialReference instance of the ModflowDis object
Returns
-------
sr : SpatialReference instance
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
Note
----
if self.dis is not None, then the spatial reference instance is updated
using self.dis.delr, self.dis.delc, and self.dis.lenuni before being
returned
"""
if item == 'sr':
if self.dis is not None:
return self.dis.sr
else:
return None
if item == 'tr':
if self.dis is not None:
return self.dis.tr
else:
return None
if item == "start_datetime":
if self.dis is not None:
return self.dis.tr.start_datetime
else:
return None
return self.get_package(item)
def get_ext_dict_attr(self, ext_unit_dict=None, unit=None, filetype=None,
pop_key=True):
iu = None
fname = None
if ext_unit_dict is not None:
for key, value in ext_unit_dict.items():
if key == unit:
iu = key
fname = os.path.basename(value.filename)
break
elif value.filetype == filetype:
iu = key
fname = os.path.basename(value.filename)
if pop_key:
self.add_pop_key_list(iu)
break
return iu, fname
def _output_msg(self, i, add=True):
if add:
txt1 = 'Adding'
txt2 = 'to'
else:
txt1 = 'Removing'
txt2 = 'from'
msg = '{} {} '.format(txt1, self.output_fnames[i]) + \
'(unit={}) '.format(self.output_units[i]) + \
'{} the output list.'.format(txt2)
print(msg)
def add_output_file(self, unit, fname=None, extension='cbc',
binflag=True, package=None):
"""
Add an ascii or binary output file file for a package
Parameters
----------
unit : int
unit number of external array
fname : str
filename of external array. (default is None)
extension : str
extension to use for the cell-by-cell file. Only used if fname
is None. (default is cbc)
binflag : bool
boolean flag indicating if the output file is a binary file.
Default is True
package : str
string that defines the package the output file is attached to.
Default is None
"""
add_cbc = False
if unit > 0:
add_cbc = True
# determine if the file is in external_units
if abs(unit) in self.external_units:
idx = self.external_units.index(abs(unit))
fname = os.path.basename(self.external_fnames[idx])
binflag = self.external_binflag[idx]
self.remove_external(unit=abs(unit))
# determine if the unit exists in the output data
if abs(unit) in self.output_units:
add_cbc = False
idx = self.output_units.index(abs(unit))
# determine if binflag has changed
if binflag is not self.output_binflag[idx]:
add_cbc = True
if add_cbc:
self.remove_output(unit=abs(unit))
else:
if package is not None:
self.output_packages[idx].append(package)
if add_cbc:
if fname is None:
fname = self.name + '.' + extension
# check if this file name exists for a different unit number
if fname in self.output_fnames:
idx = self.output_fnames.index(fname)
iut = self.output_units[idx]
if iut != unit:
# include unit number in fname if package has
# not been passed
if package is None:
fname = self.name + '.{}.'.format(unit) \
+ extension
# include package name in fname
else:
fname = self.name + '.{}.'.format(package) \
+ extension
else:
fname = os.path.basename(fname)
self.add_output(fname, unit, binflag=binflag, package=package)
return
def add_output(self, fname, unit, binflag=False, package=None):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.output_fnames:
print("BaseModel.add_output() warning: " +
"replacing existing filename {0}".format(fname))
idx = self.output_fnames.index(fname)
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(idx)
self.output_units.pop(idx)
self.output_binflag.pop(idx)
self.output_packages.pop(idx)
self.output_fnames.append(fname)
self.output_units.append(unit)
self.output_binflag.append(binflag)
if package is not None:
self.output_packages.append([package])
else:
self.output_packages.append([])
if self.verbose:
self._output_msg(-1, add=True)
return
def remove_output(self, fname=None, unit=None):
"""
Remove an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
else:
raise Exception(
' either fname or unit must be passed to remove_output()')
return
def get_output(self, fname=None, unit=None):
"""
Get an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
return self.output_units[i]
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
return self.output_fnames[i]
return None
else:
raise Exception(
' either fname or unit must be passed to get_output()')
return
def set_output_attribute(self, fname=None, unit=None, attr=None):
"""
Set a variable in an output file from the model by specifying either
the file name or the unit number and a dictionary with attributes
to change.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
raise Exception(
' either fname or unit must be passed ' +
' to set_output_attribute()')
if attr is not None:
if idx is not None:
for key, value in attr.items:
if key == 'binflag':
self.output_binflag[idx] = value
elif key == 'fname':
self.output_fnames[idx] = value
elif key == 'unit':
self.output_units[idx] = value
return
def get_output_attribute(self, fname=None, unit=None, attr=None):
"""
Get a attribute for an output file from the model by specifying either
the file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
raise Exception(
' either fname or unit must be passed ' +
' to set_output_attribute()')
v = None
if attr is not None:
if idx is not None:
if attr == 'binflag':
v = self.output_binflag[idx]
elif attr == 'fname':
v = self.output_fnames[idx]
elif attr == 'unit':
v = self.output_units[idx]
return v
def add_external(self, fname, unit, binflag=False, output=False):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.external_fnames:
print("BaseModel.add_external() warning: " +
"replacing existing filename {}".format(fname))
idx = self.external_fnames.index(fname)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
if unit in self.external_units:
print("BaseModel.add_external() warning: " +
"replacing existing unit {}".format(unit))
idx = self.external_units.index(unit)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
self.external_fnames.append(fname)
self.external_units.append(unit)
self.external_binflag.append(binflag)
self.external_output.append(output)
return
def remove_external(self, fname=None, unit=None):
"""
Remove an external file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
"""
plist = []
if fname is not None:
for i, e in enumerate(self.external_fnames):
if fname in e:
plist.append(i)
elif unit is not None:
for i, u in enumerate(self.external_units):
if u == unit:
plist.append(i)
else:
raise Exception(
' either fname or unit must be passed to remove_external()')
# remove external file
j = 0
for i in plist:
ipos = i - j
self.external_fnames.pop(ipos)
self.external_units.pop(ipos)
self.external_binflag.pop(ipos)
self.external_output.pop(ipos)
j += 1
return
def add_existing_package(self, filename, ptype=None,
copy_to_model_ws=True):
""" add an existing package to a model instance.
Parameters
----------
filename : str
the name of the file to add as a package
ptype : optional
the model package type (e.g. "lpf", "wel", etc). If None,
then the file extension of the filename arg is used
copy_to_model_ws : bool
flag to copy the package file into the model_ws directory.
"""
if ptype is None:
ptype = filename.split('.')[-1]
ptype = str(ptype).upper()
# for pak in self.packagelist:
# if ptype in pak.name:
# print("BaseModel.add_existing_package() warning: " +\
# "replacing existing package {0}".format(ptype))
class Obj(object):
pass
fake_package = Obj()
fake_package.write_file = lambda: None
fake_package.extra = ['']
fake_package.name = [ptype]
fake_package.extension = [filename.split('.')[-1]]
fake_package.unit_number = [self.next_ext_unit()]
if copy_to_model_ws:
base_filename = os.path.split(filename)[-1]
fake_package.file_name = [base_filename]
shutil.copy2(filename, os.path.join(self.model_ws, base_filename))
else:
fake_package.file_name = [filename]
fake_package.allowDuplicates = True
self.add_package(fake_package)
def get_name_file_entries(self):
"""
Get a string representation of the name file.
Parameters
----------
"""
s = ''
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] == 0:
continue
s += '{:14s} {:5d} '.format(p.name[i], p.unit_number[i]) + \
'{:s} {:s}\n'.format(p.file_name[i], p.extra[i])
return s
def has_package(self, name):
"""
Check if package name is in package list.
Parameters
----------
name : str
Name of the package, 'DIS', 'BAS6', etc. (case-insensitive).
Returns
-------
bool
True if package name exists, otherwise False if not found.
"""
if not name:
raise ValueError('invalid package name')
name = name.upper()
for p in self.packagelist:
for pn in p.name:
if pn.upper() == name:
return True
return False
def get_package(self, name):
"""
Get a package.
Parameters
----------
name : str
Name of the package, 'RIV', 'LPF', etc. (case-insensitive).
Returns
-------
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
"""
if not name:
raise ValueError('invalid package name')
name = name.upper()
for pp in (self.packagelist):
if pp.name[0].upper() == name:
return pp
return None
def get_package_list(self):
"""
Get a list of all the package names.
Parameters
----------
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
val = []
for pp in (self.packagelist):
val.append(pp.name[0].upper())
return val
def set_version(self, version):
self.version = version.lower()
# check that this is a valid model version
if self.version not in list(self.version_types.keys()):
err = 'Error: Unsupported model version ({}).'.format(
self.version) + \
' Valid model versions are:'
for v in list(self.version_types.keys()):
err += ' {}'.format(v)
raise Exception(err)
# set namefile heading
heading = '# Name file for ' + \
'{}, '.format(self.version_types[self.version]) + \
'generated by Flopy version {}.'.format(__version__)
self.heading = heading
# set heading for each package
for p in self.get_package_list():
pak = self.get_package(p)
heading = '# {} package for '.format(pak.name[0]) + \
'{}, '.format(self.version_types[self.version]) + \
'generated by Flopy version {}.'.format(__version__)
pak.heading = heading
return None
def change_model_ws(self, new_pth=None, reset_external=False):
"""
Change the model work space.
Parameters
----------
new_pth : str
Location of new model workspace. If this path does not exist,
it will be created. (default is None, which will be assigned to
the present working directory).
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
if new_pth is None:
new_pth = os.getcwd()
if not os.path.exists(new_pth):
try:
sys.stdout.write(
'\ncreating model workspace...\n {}\n'.format(new_pth))
os.makedirs(new_pth)
except:
line = '\n{} not valid, workspace-folder '.format(new_pth) + \
'was changed to {}\n'.format(os.getcwd())
print(line)
new_pth = os.getcwd()
# --reset the model workspace
old_pth = self._model_ws
self._model_ws = new_pth
line = '\nchanging model workspace...\n {}\n'.format(new_pth)
sys.stdout.write(line)
# reset the paths for each package
for pp in (self.packagelist):
pp.fn_path = os.path.join(self.model_ws, pp.file_name[0])
# create the external path (if needed)
if hasattr(self, "external_path") and self.external_path is not None \
and not os.path.exists(os.path.join(self._model_ws,
self.external_path)):
pth = os.path.join(self._model_ws, self.external_path)
os.makedirs(pth)
if reset_external:
self._reset_external(pth, old_pth)
elif reset_external:
self._reset_external(self._model_ws, old_pth)
return None
def _reset_external(self, pth, old_pth):
new_ext_fnames = []
for ext_file, output in zip(self.external_fnames,
self.external_output):
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
# this is a wicked mess
if output:
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
new_ext_file = ext_file
else:
# fpth = os.path.abspath(os.path.join(old_pth, ext_file))
# new_ext_file = os.path.relpath(fpth, os.path.abspath(pth))
fdir = os.path.dirname(ext_file)
if fdir == '':
fpth = os.path.abspath(os.path.join(old_pth, ext_file))
else:
fpth = ext_file
ao = os.path.abspath(os.path.dirname(fpth))
ep = os.path.abspath(pth)
relp = os.path.relpath(ao, ep)
new_ext_file = os.path.join(relp, os.path.basename(ext_file))
new_ext_fnames.append(new_ext_file)
self.external_fnames = new_ext_fnames
@property
def model_ws(self):
return copy.deepcopy(self._model_ws)
def _set_name(self, value):
"""
Set model name
Parameters
----------
value : str
Name to assign to model.
"""
self.__name = str(value)
self.namefile = self.__name + '.' + self.namefile_ext
for p in self.packagelist:
for i in range(len(p.extension)):
p.file_name[i] = self.__name + '.' + p.extension[i]
p.fn_path = os.path.join(self.model_ws, p.file_name[0])
def __setattr__(self, key, value):
if key == "free_format_input":
# if self.bas6 is not None:
# self.bas6.ifrefm = value
super(BaseModel, self).__setattr__(key, value)
elif key == "name":
self._set_name(value)
elif key == "model_ws":
self.change_model_ws(value)
elif key == "sr":
assert isinstance(value, utils.SpatialReference)
if self.dis is not None:
self.dis.sr = value
else:
raise Exception("cannot set SpatialReference -"
"ModflowDis not found")
elif key == "tr":
assert isinstance(value, utils.TemporalReference)
if self.dis is not None:
self.dis.tr = value
else:
raise Exception("cannot set TemporalReference -"
"ModflowDis not found")
elif key == "start_datetime":
if self.dis is not None:
self.dis.start_datetime = value
self.tr.start_datetime = value
else:
raise Exception("cannot set start_datetime -"
"ModflowDis not found")
else:
super(BaseModel, self).__setattr__(key, value)
def run_model(self, silent=False, pause=False, report=False,
normal_msg='normal termination'):
"""
This method will run the model using subprocess.Popen.
Parameters
----------
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
return run_model(self.exe_name, self.namefile, model_ws=self.model_ws,
silent=silent, pause=pause, report=report,
normal_msg=normal_msg)
def load_results(self):
print('load_results not implemented')
return None
def write_input(self, SelPackList=False, check=False):
"""
Write the input.
Parameters
----------
SelPackList : False or list of packages
"""
if check:
# run check prior to writing input
self.check(f='{}.chk'.format(self.name), verbose=self.verbose,
level=1)
# reset the model to free_format if parameter substitution was
# performed on a model load
if self.parameter_load and not self.free_format_input:
if self.verbose:
print('\nReseting free_format_input to True to ' +
'preserve the precision of the parameter data.')
self.free_format_input = True
if self.verbose:
print('\nWriting packages:')
if SelPackList == False:
for p in self.packagelist:
if self.verbose:
print(' Package: ', p.name[0])
# prevent individual package checks from running after
# model-level package check above
# otherwise checks are run twice
# or the model level check procedure would have to be split up
# or each package would need a check arguemnt,
# or default for package level check would have to be False
try:
p.write_file(check=False)
except TypeError:
p.write_file()
else:
for pon in SelPackList:
for i, p in enumerate(self.packagelist):
if pon in p.name:
if self.verbose:
print(' Package: ', p.name[0])
try:
p.write_file(check=False)
except TypeError:
p.write_file()
break
if self.verbose:
print(' ')
# write name file
self.write_name_file()
# os.chdir(org_dir)
return
def write_name_file(self):
"""
Every Package needs its own writenamefile function
"""
raise Exception(
'IMPLEMENTATION ERROR: writenamefile must be overloaded')
def set_model_units(self):
"""
Every model needs its own set_model_units method
"""
raise Exception(
'IMPLEMENTATION ERROR: set_model_units must be overloaded')
@property
def name(self):
"""
Get model name
Returns
-------
name : str
name of model
"""
return copy.deepcopy(self.__name)
def add_pop_key_list(self, key):
"""
Add a external file unit number to a list that will be used to remove
model output (typically binary) files from ext_unit_dict.
Parameters
----------
key : int
file unit number
Returns
-------
Examples
--------
"""
if key not in self.pop_key_list:
self.pop_key_list.append(key)
def check(self, f=None, verbose=True, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
chk = utils.check(self, f=f, verbose=verbose, level=level)
results = {}
for p in self.packagelist:
if chk.package_check_levels.get(p.name[0].lower(), 0) <= level:
results[p.name[0]] = p.check(f=None, verbose=False,
level=level - 1)
# model level checks
# solver check
if self.version in chk.solver_packages.keys():
solvers = set(chk.solver_packages[self.version]).intersection(
set(self.get_package_list()))
if not solvers:
chk._add_to_summary('Error', desc='\r No solver package',
package='model')
elif len(list(solvers)) > 1:
for s in solvers:
chk._add_to_summary('Error',
desc='\r Multiple solver packages',
package=s)
else:
chk.passed.append('Compatible solver package')
# check for unit number conflicts
package_units = {}
duplicate_units = {}
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] != 0:
if p.unit_number[i] in package_units.values():
duplicate_units[p.name[i]] = p.unit_number[i]
otherpackage = [k for k, v in package_units.items()
if v == p.unit_number[i]][0]
duplicate_units[otherpackage] = p.unit_number[i]
if len(duplicate_units) > 0:
for k, v in duplicate_units.items():
chk._add_to_summary('Error', package=k, value=v,
desc='unit number conflict')
else:
chk.passed.append('Unit number conflicts')
# add package check results to model level check summary
for k, r in results.items():
if r is not None and r.summary_array is not None: # currently SFR doesn't have one
chk.summary_array = np.append(chk.summary_array,
r.summary_array).view(
np.recarray)
chk.passed += ['{} package: {}'.format(r.package.name[0], psd)
for psd in r.passed]
chk.summarize()
return chk
def plot(self, SelPackList=None, **kwargs):
"""
Plot 2-D, 3-D, transient 2-D, and stress period list (MfList)
model input data
Parameters
----------
SelPackList : bool or list
List of of packages to plot. If SelPackList=None all packages
are plotted. (default is None)
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return.
(default is zero)
key : str
MfList dictionary key. (default is None)
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.plot()
"""
# valid keyword arguments
if 'kper' in kwargs:
kper = int(kwargs.pop('kper'))
else:
kper = 0
if 'mflay' in kwargs:
mflay = kwargs.pop('mflay')
else:
mflay = None
if 'filename_base' in kwargs:
fileb = kwargs.pop('filename_base')
else:
fileb = None
if 'file_extension' in kwargs:
fext = kwargs.pop('file_extension')
fext = fext.replace('.', '')
else:
fext = 'png'
if 'key' in kwargs:
key = kwargs.pop('key')
else:
key = None
if self.verbose:
print('\nPlotting Packages')
axes = []
ifig = 0
if SelPackList is None:
for p in self.packagelist:
caxs = p.plot(initial_fig=ifig,
filename_base=fileb, file_extension=fext,
kper=kper, mflay=mflay, key=key)
# unroll nested lists of axes into a single list of axes
if isinstance(caxs, list):
for c in caxs:
axes.append(c)
else:
axes.append(caxs)
# update next active figure number
ifig = len(axes) + 1
else:
for pon in SelPackList:
for i, p in enumerate(self.packagelist):
if pon in p.name:
if self.verbose:
print(' Plotting Package: ', p.name[0])
caxs = p.plot(initial_fig=ifig,
filename_base=fileb, file_extension=fext,
kper=kper, mflay=mflay, key=key)
# unroll nested lists of axes into a single list of axes
if isinstance(caxs, list):
for c in caxs:
axes.append(c)
else:
axes.append(caxs)
# update next active figure number
ifig = len(axes) + 1
break
if self.verbose:
print(' ')
return axes
def to_shapefile(self, filename, package_names=None, **kwargs):
"""
Wrapper function for writing a shapefile for the model grid. If
package_names is not None, then search through the requested packages
looking for arrays that can be added to the shapefile as attributes
Parameters
----------
filename : string
name of the shapefile to write
package_names : list of package names (e.g. ["dis","lpf"])
Packages to export data arrays to shapefile. (default is None)
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> m.to_shapefile('model.shp', SelPackList)
"""
import warnings
warnings.warn("to_shapefile() is deprecated. use .export()")
self.export(filename, package_names=package_names)
return
def run_model(exe_name, namefile, model_ws='./',
silent=False, pause=False, report=False,
normal_msg='normal termination',
async=False, cargs=None):
"""
This function will run the model using subprocess.Popen. It
communicates with the model's stdout asynchronously and reports
progress to the screen with timestamps
Parameters
----------
exe_name : str
Executable name (with path, if necessary) to run.
namefile : str
Namefile of model to run. The namefile must be the
filename of the namefile without the path.
model_ws : str
Path to the location of the namefile. (default is the
current working directory - './')
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
async : boolean
asynchonously read model stdout and report with timestamps. good for
models that take long time to run. not good for models that run
really fast
cargs : str or list of strings
additional command line arguments to pass to the executable.
Default is None
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
success = False
buff = []
# convert normal_msg to lower case for comparison
if isinstance(normal_msg, str):
normal_msg = [normal_msg.lower()]
elif isinstance(normal_msg, list):
for idx, s in enumerate(normal_msg):
normal_msg[idx] = s.lower()
# Check to make sure that program and namefile exist
exe = which(exe_name)
if exe is None:
import platform
if platform.system() in 'Windows':
if not exe_name.lower().endswith('.exe'):
exe = which(exe_name + '.exe')
if exe is None:
s = 'The program {} does not exist or is not executable.'.format(
exe_name)
raise Exception(s)
else:
if not silent:
s = 'FloPy is using the following executable to run the model: {}'.format(
exe)
print(s)
if not os.path.isfile(os.path.join(model_ws, namefile)):
s = 'The namefile for this model does not exists: {}'.format(namefile)
raise Exception(s)
# simple little function for the thread to target
def q_output(output, q):
for line in iter(output.readline, b''):
q.put(line)
# time.sleep(1)
# output.close()
# create a list of arguments to pass to Popen
argv = [exe_name, namefile]
# add additional arguments to Popen arguments
if cargs is not None:
if isinstance(cargs, str):
cargs = [cargs]
for t in cargs:
argv.append(t)
# run the model with Popen
proc = sp.Popen(argv,
stdout=sp.PIPE, stderr=sp.STDOUT, cwd=model_ws)
if not async:
while True:
line = proc.stdout.readline()
c = line.decode('utf-8')
if c != '':
for msg in normal_msg:
if msg in c.lower():
success = True
break
c = c.rstrip('\r\n')
if not silent:
print('{}'.format(c))
if report == True:
buff.append(c)
else:
break
return success, buff
# some tricks for the async stdout reading
q = Queue.Queue()
thread = threading.Thread(target=q_output, args=(proc.stdout, q))
thread.daemon = True
thread.start()
failed_words = ["fail", "error"]
last = datetime.now()
lastsec = 0.
while True:
try:
line = q.get_nowait()
except Queue.Empty:
pass
else:
if line == '':
break
line = line.decode().lower().strip()
if line != '':
now = datetime.now()
dt = now - last
tsecs = dt.total_seconds() - lastsec
line = "(elapsed:{0})-->{1}".format(tsecs, line)
lastsec = tsecs + lastsec
buff.append(line)
if not silent:
print(line)
for fword in failed_words:
if fword in line:
success = False
break
if proc.poll() is not None:
break
proc.wait()
thread.join(timeout=1)
buff.extend(proc.stdout.readlines())
proc.stdout.close()
for line in buff:
if normal_msg in line:
print("success")
success = True
break
if pause:
input('Press Enter to continue...')
return success, buff
|
run.py
|
# Copyright (c) 2020 Institution of Parallel and Distributed System, Shanghai Jiao Tong University
# ServerlessBench is licensed under the Mulan PSL v1.
# You can use this software according to the terms and conditions of the Mulan PSL v1.
# You may obtain a copy of Mulan PSL v1 at:
# http://license.coscl.org.cn/MulanPSL
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v1 for more details.
import os
import threading
import time
import sys, getopt
# this script should be executed in parent dir of scripts
def client(i,results,loopTimes):
print("client %d start" %i)
IMAGE_PROCESS_HOME=os.environ['TESTCASE4_HOME'] + "/image-process"
command = "%s/scripts/run-single.sh -R -t " %(IMAGE_PROCESS_HOME) + str(loopTimes)
r = os.popen(command)
text = r.read()
results[i] = text
print("client %d finished" %i)
def warmup(i,warmupTimes):
for j in range(warmupTimes):
IMAGE_PROCESS_HOME=os.environ['TESTCASE4_HOME'] + "/image-process"
r = os.popen("%s/scripts/action_invoke.sh" %IMAGE_PROCESS_HOME)
text = r.read()
print("client %d warmup finished" %i)
def main():
argv = getargv()
clientNum = argv[0]
loopTimes = argv[1]
warmupTimes = argv[2]
threads = []
containerName = "java8action"
r = os.popen("docker stop `docker ps | grep %s | awk {'print $1'}`" %containerName)
r.read()
# First: warm up
for i in range(clientNum):
t = threading.Thread(target=warmup,args=(i,warmupTimes))
threads.append(t)
for i in range(clientNum):
threads[i].start()
for i in range(clientNum):
threads[i].join()
print("Warm up complete")
# Second: invoke the actions
# Initialize the results and the clients
threads = []
results = []
for i in range(clientNum):
results.append('')
# Create the clients
for i in range(clientNum):
t = threading.Thread(target=client,args=(i,results,loopTimes))
threads.append(t)
# start the clients
for i in range(clientNum):
threads[i].start()
for i in range(clientNum):
threads[i].join()
outfile = open("result.csv","w")
outfile.write("invokeTime,endTime\n")
latencies = []
minInvokeTime = 0x7fffffffffffffff
maxEndTime = 0
for i in range(clientNum):
# get and parse the result of a client
clientResult = parseResult(results[i])
# print the result of every loop of the client
for j in range(len(clientResult)):
outfile.write(clientResult[j][0] + ',' + clientResult[j][1] + '\n')
# Collect the latency
latency = int(clientResult[j][-1]) - int(clientResult[j][0])
latencies.append(latency)
# Find the first invoked action and the last return one.
if int(clientResult[j][0]) < minInvokeTime:
minInvokeTime = int(clientResult[j][0])
if int(clientResult[j][-1]) > maxEndTime:
maxEndTime = int(clientResult[j][-1])
formatResult(latencies,maxEndTime - minInvokeTime, clientNum, loopTimes, warmupTimes)
def parseResult(result):
lines = result.split('\n')
parsedResults = []
for line in lines:
if line.find("invokeTime") == -1:
continue
parsedTimes = ['','']
i = 0
count = 0
while count < 2:
while i < len(line):
if line[i].isdigit():
parsedTimes[count] = line[i:i+13]
i += 13
count += 1
continue
i += 1
parsedResults.append(parsedTimes)
return parsedResults
def getargv():
if len(sys.argv) != 3 and len(sys.argv) != 4:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
exit(0)
if not str.isdigit(sys.argv[1]) or not str.isdigit(sys.argv[2]) or int(sys.argv[1]) < 1 or int(sys.argv[2]) < 1:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
print("Client number and loop times must be an positive integer")
exit(0)
if len(sys.argv) == 4:
if not str.isdigit(sys.argv[3]) or int(sys.argv[3]) < 1:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
print("Warm up times must be an positive integer")
exit(0)
else:
return (int(sys.argv[1]),int(sys.argv[2]),1)
return (int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3]))
def formatResult(latencies,duration,client,loop,warmup):
requestNum = len(latencies)
latencies.sort()
duration = float(duration)
# calculate the average latency
total = 0
for latency in latencies:
total += latency
print("\n")
print("------------------ result ---------------------")
print("%s / %d requests finished in %.2f seconds" %(requestNum, (loop * client), (duration/1000)))
print("latency (ms):\navg\t50%\t75%\t90%\t95%\t99%")
if requestNum > 0:
averageLatency = float(total) / requestNum
_50pcLatency = latencies[int(requestNum * 0.5) - 1]
_75pcLatency = latencies[int(requestNum * 0.75) - 1]
_90pcLatency = latencies[int(requestNum * 0.9) - 1]
_95pcLatency = latencies[int(requestNum * 0.95) - 1]
_99pcLatency = latencies[int(requestNum * 0.99) - 1]
print("%.2f\t%d\t%d\t%d\t%d\t%d" %(averageLatency,_50pcLatency,_75pcLatency,_90pcLatency,_95pcLatency,_99pcLatency))
print("throughput (n/s):\n%.2f" %(requestNum / (duration/1000)))
# output result to file
resultfile = open("eval-result.log","a")
resultfile.write("\n\n------------------ (concurrent)result ---------------------\n")
resultfile.write("client: %d, loop_times: %d, warmup_times: %d\n" % (client, loop, warmup))
resultfile.write("%s / %d requests finished in %.2f seconds\n" %(requestNum, (loop * client), (duration/1000)))
resultfile.write("latency (ms):\navg\t50%\t75%\t90%\t95%\t99%\n")
if requestNum > 0:
resultfile.write("%.2f\t%d\t%d\t%d\t%d\t%d\n" %(averageLatency,_50pcLatency,_75pcLatency,_90pcLatency,_95pcLatency,_99pcLatency))
resultfile.write("throughput (n/s):\n%.2f\n" %(requestNum / (duration/1000)))
main()
|
Network_TF.py
|
"""
Licensed under the Unlicense License;
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://unlicense.org
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import threading
import tensorflow as tf
import numpy as np
class Network:
def __init__(self, synaptic_weights_0=np.array([]), synaptic_weights_1=np.array([]),
images=np.array([]), labels=np.array([]),
train_iterations=20, train_output_labels=4, debug=True):
self.synaptic_weights_0 = synaptic_weights_0
self.synaptic_weights_1 = synaptic_weights_1
self.images = images
self.labels = labels
self.train_iterations = train_iterations
self.debug = debug
self.train_output_labels = train_output_labels
self.model = None
def save_weights(self, file):
if self.debug:
print('Saving model...')
self.model.save(os.path.splitext(file)[0] + '.h5')
if self.debug:
print('Done. File', file, 'saved.')
def load_weights(self, file):
if self.debug:
print('Loading model...')
self.model = tf.keras.models.load_model(os.path.splitext(file)[0] + '.h5')
if self.debug:
print('Done. Model from file', file, 'loaded.')
def start_training(self):
self.model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(600,)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(self.train_output_labels)
])
self.model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
# TRAINING
thread = threading.Thread(target=self.training)
thread.start()
def predict(self):
predicted_classes = []
predictions = self.model.predict(self.images)
for prediction in predictions:
predicted_classes.append(np.argmax(prediction))
return predicted_classes
def training(self):
# noinspection PyBroadException
try:
self.labels = np.reshape(self.labels, len(self.labels[0]))
self.model.fit(self.images, self.labels, epochs=self.train_iterations)
except:
print(sys.exc_info())
|
_support.py
|
"""Support functions for pyxmpp2 test suite."""
import os
import sys
import logging
import unittest
TEST_DIR = os.path.dirname(__file__)
DATA_DIR = os.path.join(TEST_DIR, "data")
RESOURCES = ['network', 'lo-network', 'gsasl']
if "TEST_USE" in os.environ:
RESOURCES = os.environ["TEST_USE"].split()
if "TEST_STACKDUMP_FILE" in os.environ:
import traceback
import threading
import time
def stack_dumper():
stackdump_file = open(os.environ.get("TEST_STACKDUMP_FILE"), "w")
while True:
time.sleep(5)
stackdump_file.write(time.ctime() + "\n")
frames = sys._current_frames()
for frame in frames.values():
traceback.print_stack(frame, file = stackdump_file)
stackdump_file.write("\n")
stackdump_file.flush()
thr = threading.Thread(target = stack_dumper)
thr.daemon = True
thr.start()
# pylint: disable=W0602,C0103
logging_ready = False
def setup_logging():
"""Set up logging for the tests.
Log level used depends on number of '-v' in sys.argv
"""
# pylint: disable=W0603
global logging_ready
if logging_ready:
return
if sys.argv.count("-v") > 2:
logging.basicConfig(level=logging.DEBUG)
elif sys.argv.count("-v") == 2:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.ERROR)
logging_ready = True
def filter_tests(suite):
"""Make a new TestSuite from `suite`, removing test classes
with names starting with '_'."""
result = unittest.TestSuite()
for test in suite:
if isinstance(test, unittest.TestSuite):
result.addTest(filter_tests(test))
elif not test.__class__.__name__.startswith("_"):
result.addTest(test)
return result
def load_tests(loader, tests, pattern):
"""Use default test list, just remove the classes which names start with
'_'."""
# pylint: disable=W0613
suite = filter_tests(tests)
return suite
|
send_recv.py
|
#!/usr/bin/python
# -*- coding= utf-8 -*-
import threading
import socket
class socket_manager():
def __init__(self,logger=None,host='0.0.0.0',port=30000,group='224.0.0.119'):
self._be_multicast = False
self._multicast_group = group
self._multicast_port = port
address = (host,port)
self._cli_s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self._srv_s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self._srv_s.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
self._srv_s.bind(address)
if group is not None:
self._be_multicast = True
self._srv_s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 1)
self._srv_s.setsockopt(socket.IPPROTO_IP,
socket.IP_ADD_MEMBERSHIP,
socket.inet_aton(group) + socket.inet_aton('0.0.0.0'))
self._cli_s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 1)
self._cli_s.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_LOOP, 0)
#self._cli_s.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_IF, socket.inet_aton("192.168.1.7"))
self._cli_s.setsockopt(socket.IPPROTO_IP,
socket.IP_ADD_MEMBERSHIP,
socket.inet_aton(group) + socket.inet_aton('0.0.0.0'))
#self._srv_s.setblocking(0)
self._srv_callback = None
self._thead = None
self._logger = logger
self._stop = False
self._addr_atol = lambda x:int(repr(x).replace('.','').replace(':','').replace("'",''))
def __socket_recv_run(self,s):
while self._stop is False:
data, addr = self._srv_s.recvfrom(2048)
if not data:
self._logger.warning( "client has exist" )
continue
self._logger.debug( "received: data %d bytes from %s\n", len(data), addr)
if self._srv_callback is not None:
self._srv_callback(self._addr_atol(addr[0]),data)
if self._be_multicast is True:
self._srv_s.setsockopt(socket.IPPROTO_IP,
socket.IP_DROP_MEMBERSHIP,
socket.inet_aton(self._multicast_group) + socket.inet_aton('0.0.0.0'))
self._srv_s.close()
self._srv_s = None
def start_run(self,callback):
self._srv_callback = callback
self._thead = threading.Thread(target=self.__socket_recv_run,args = (1,), name = 'thread-socket')
self._thead.setDaemon(True)
self._thead.start()
def stop_run(self):
self._stop = True
def do_send(self,msg,host='224.0.0.119',port=30000):
address = (host,port)
if self._be_multicast is True:
address = (self._multicast_group,self._multicast_port)
#self._logger.debug( "send multicast data %d bytes to %s\n", len(msg), address[0])
return self._cli_s.sendto(msg, address)
def get_inet_aton(self,addr_str):
addr = socket.inet_aton(addr_str)
return (ord(addr[0]) << 24) + (ord(addr[1]) << 16) + (ord(addr[2])<<8) + ord(addr[3])
def get_inet_ntoa(self,addr_uint):
return '%d.%d.%d.%d'%((addr_uint>>24)&0xff, (addr_uint>>16)&0xff, (addr_uint>>8)&0xff, addr_uint&0xff)
def get_own_addr_hash_list(self):
myname = socket.getfqdn(socket.gethostname())
ipList = socket.gethostbyname_ex(myname)
print (myname,ipList)
#print (self._srv_s.getsockname())
addr_int_list = []
for addr in ipList[2]:
addr_int_list.append(self.get_inet_aton(addr))
print (addr_int_list)
return addr_int_list
def __finalize__(self):
if self._srv_s is not None:
self._srv_s.close()
self._srv_s = None
if self._cli_s is not None:
self._cli_s.close()
self._cli_s = None
|
gym_gazeboros_ac.py
|
#!/usr/bin/env python
from datetime import datetime
import copy
import traceback
import os, subprocess, time, signal
#from cv_bridge import CvBridge
import gym
import math
import random
# u
import numpy as np
import cv2 as cv
import rospy
# Brings in the SimpleActionClient
import actionlib
# Brings in the .action file and messages used by the move base action
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import Image
from gazebo_msgs.msg import ModelState
from geometry_msgs.msg import Point
from geometry_msgs.msg import Point32
from geometry_msgs.msg import TransformStamped
from rosgraph_msgs.msg import Clock
from costmap_converter.msg import ObstacleArrayMsg
from costmap_converter.msg import ObstacleMsg
from gazebo_msgs.msg import ModelStates
from geometry_msgs.msg import Twist
from gazebo_msgs.srv import SetModelState
from gym.utils import seeding
import threading
import _thread
from squaternion import Quaternion
from simple_pid import PID
import pickle
import logging
logger = logging.getLogger(__name__)
# Environment Parameters
class EnvConfig:
# Boolean to make robots spawn at constant locations
USE_TESTING = False
# Set to move obstacles out of the way in case they exist but you don't want them in the way
USE_OBSTACLES = False
# Pattern to init obstacles
# 0: Places obstacles between robot and person
# 1: Places obstacles randomly within circle
OBSTACLE_MODE = 1
# Radius(meters) away from person robot for random placement(mode 1) of objects
OBSTACLE_RADIUS_AWAY = 3
# Obstacle size
OBSTACLE_SIZE = 0.5
# Allows/Denies Robot TEB Local Planner to avoid obstacles
SEND_TEB_OBSTACLES = True
# Gets person robot to use move base
PERSON_USE_MB = True
# Episode Length
EPISODE_LEN = 15
# Returns Human State only in get_observations if True
RETURN_HINN_STATE = True
# Size to reduce laser scan to
SCAN_REDUCTION_SIZE = 20
# If True, calls init_simulator() on set_agent() call
INIT_SIM_ON_AGENT = False
# If True, moves jackal bot out of the way and puts obstacles around person
TRAIN_HINN = False
class History():
def __init__(self, window_size, update_rate, save_rate=10):
self.idx = 0
self.update_rate = update_rate
self.save_rate = save_rate
self.lock = threading.Lock()
self.memory_size = int(math.ceil(save_rate/update_rate*window_size)+1)
self.data = [None for x in range(self.memory_size)]
self.prev_add_time = rospy.Time.now().to_sec() - 1
self.window_size = window_size
self.avg_frame_rate = None
self.time_data_= []
def add_element(self, element):
"""
element: the data that we put inside the history data array
"""
if abs(rospy.Time.now().to_sec() - self.prev_add_time) < 1./self.save_rate:
return
with self.lock:
self.idx = (self.idx + 1) % self.window_size
self.prev_add_time = rospy.Time.now().to_sec()
if self.data[self.idx] is None:
for idx in range(self.memory_size):
self.data[idx] = element
self.data[self.idx] = element
if not len(self.time_data_) > 50:
self.time_data_.append(self.prev_add_time)
if len(self.time_data_) > 3:
prev_t = self.time_data_[0]
time_intervals = []
for t in self.time_data_[1:]:
time_intervals.append(t - prev_t)
prev_t = t
self.avg_frame_rate = 1.0 / np.average(time_intervals)
def get_elemets(self):
return_data = []
while self.avg_frame_rate is None:
time.sleep(0.1)
skip_frames = -int(math.ceil(self.avg_frame_rate / self.update_rate))
with self.lock:
index = self.idx #(self.idx - 1)% self.window_size
if self.window_size * abs(skip_frames) >= self.memory_size:
rospy.logerr("error in get element memory not enough update rate{} avg_frame_rate{} mem_size {} skipf: {}".format(self.update_rate, self.avg_frame_rate, self.memory_size, skip_frames))
for i in range (self.window_size):
return_data.append(self.data[index])
index = (index + skip_frames) % self.window_size
return return_data
def get_latest(self):
with self.lock:
return self.data[self.idx]
class Robot():
def __init__(self, name, max_angular_speed=1, max_linear_speed=1, relative=None, agent_num=None, use_goal=False, use_movebase=False, use_jackal=False, window_size=10, is_testing=False):
self.name = name
self.use_jackal = use_jackal
self.init_node = False
self.alive = True
self.prev_call_gazeboros_ = None
if relative is None:
relative = self
self.relative = relative
self.is_testing = is_testing
if self.is_testing:
self.all_pose_ = []
self.last_time_added = rospy.Time.now().to_sec()
self.log_history = []
self.agent_num = agent_num
self.init_node = True
self.deleted = False
self.update_rate_states = 2.0
self.window_size_history = window_size
self.current_vel_ = Twist()
self.goal = {"pos": None, "orientation": None}
self.use_goal = use_goal
self.use_movebase = use_movebase
self.max_angular_vel = max_angular_speed
self.max_linear_vel = max_linear_speed
self.max_rel_pos_range = 5.0 # meter
self.width_laserelement_image = 100
self.height_laser_image = 50
self.state_ = {'position': (None, None),
'orientation': None}
if self.use_jackal:
self.cmd_vel_pub = rospy.Publisher('/{}/jackal_velocity_controller/cmd_vel'.format(name), Twist, queue_size=1)
else:
self.cmd_vel_pub = rospy.Publisher('/{}/cmd_vel'.format(name), Twist, queue_size=1)
if ("tb3" in self.name and self.use_movebase) or ("person" in self.name and EnvConfig.PERSON_USE_MB):
# Create an action client called "move_base" with action definition file "MoveBaseAction"
self.action_client_ = actionlib.SimpleActionClient('/move_base_{}'.format(self.name),MoveBaseAction)
# Waits until the action server has started up and started listening for goals.
self.action_client_.wait_for_server(rospy.rostime.Duration(0.4))
else:
self.action_client_ = None
if "person" == self.name:
self.angular_pid = PID(0.5, 0, 0.03, setpoint=0)
self.linear_pid = PID(1.0, 0, 0.05, setpoint=0)
else:
self.angular_pid = PID(2.5, 0, 0.03, setpoint=0)
self.linear_pid = PID(2.5, 0, 0.05, setpoint=0)
self.pos_history = History(self.window_size_history, self.update_rate_states)
self.orientation_history = History(self.window_size_history, self.update_rate_states)
self.velocity_history = History(self.window_size_history, self.update_rate_states)
self.is_collided = False
self.is_pause = False
self.reset = False
self.scan_image = None
def calculate_ahead(self, distance):
x = self.state_['position'][0] + math.cos(self.state_["orientation"]) * distance
y = self.state_['position'][1] + math.sin(self.state_["orientation"]) * distance
return (x,y)
def movebase_cancel_goals(self):
self.action_client_.cancel_all_goals()
self.stop_robot()
def movebase_client_goal(self, goal_pos, goal_orientation):
# Creates a new goal with the MoveBaseGoal constructor
move_base_goal = MoveBaseGoal()
move_base_goal.target_pose.header.frame_id = "tb3_{}/odom".format(self.agent_num)
move_base_goal.target_pose.header.stamp = rospy.Time.now()
move_base_goal.target_pose.pose.position.x = goal_pos[0]
move_base_goal.target_pose.pose.position.y = goal_pos[1]
quaternion_rotation = Quaternion.from_euler(0, goal_orientation, 0)
move_base_goal.target_pose.pose.orientation.x = quaternion_rotation[3]
move_base_goal.target_pose.pose.orientation.y = quaternion_rotation[1]
move_base_goal.target_pose.pose.orientation.z = quaternion_rotation[2]
move_base_goal.target_pose.pose.orientation.w = quaternion_rotation[0]
# Sends the move_base_goal to the action server.
self.action_client_.send_goal(move_base_goal)
# Waits for the server to finish performing the action.
#wait = self.action_client_.wait_for_result(rospy.rostime.Duration(0.4))
# If the result doesn't arrive, assume the Server is not available
# if not wait:
# rospy.logerr("Action server not available!")
# else:
# # Result of executing the action
# return self.action_client_.get_result()
def get_pos(self):
counter_problem = 0
while self.state_['position'] is None:
if self.reset:
return (None, None)
if counter_problem > 20:
rospy.logdebug("waiting for pos to be available {}/{}".format(counter_problem/10, 20))
time.sleep(0.001)
counter_problem += 1
if counter_problem > 200:
raise Exception('Probable shared memory issue happend')
return self.state_['position']
def get_orientation(self):
counter_problem = 0
while self.state_['orientation'] is None:
if self.reset:
return None
if counter_problem > 20:
rospy.logdebug("waiting for pos to be available {}/{}".format(counter_problem/10, 20))
time.sleep(0.001)
counter_problem += 1
if counter_problem > 200:
raise Exception('Probable shared memory issue happend')
return self.state_['orientation']
def is_current_state_ready(self):
return (self.state_['position'][0] is not None)
def is_observation_ready(self):
return (self.pos_history.avg_frame_rate is not None and\
self.orientation_history.avg_frame_rate is not None and\
self.velocity_history.avg_frame_rate is not None)
def update(self, init_pose):
self.alive = True
self.goal = {"pos": None, "orientation": None}
if "person" == self.name:
self.angular_pid = PID(0.5, 0, 0.03, setpoint=0)
self.linear_pid = PID(1.0, 0, 0.05, setpoint=0)
else:
self.angular_pid = PID(2.5, 0, 0.03, setpoint=0)
self.linear_pid = PID(2.5, 0, 0.05, setpoint=0)
self.pos_history = History(self.window_size_history, self.update_rate_states)
self.orientation_history = History(self.window_size_history, self.update_rate_states)
self.velocity_history = History(self.window_size_history, self.update_rate_states)
self.velocity_history.add_element((0,0))
self.pos_history.add_element((init_pose["pos"][0],init_pose["pos"][1]))
self.orientation_history.add_element(init_pose["orientation"])
self.log_history = []
if self.is_testing:
self.all_pose_ = []
#self.prev_call_gazeboros_ = None
#self.is_collided = False
self.is_pause = False
self.reset = False
def add_log(self, log):
self.log_history.append(log)
def remove(self):
self.reset = True
def set_state(self, state):
self.state_["position"] = state["position"]
self.state_["orientation"] = state["orientation"]
self.state_["velocity"] = state["velocity"]
self.orientation_history.add_element(state["orientation"])
self.pos_history.add_element(state["position"])
self.velocity_history.add_element(state["velocity"])
if self.is_testing and abs (rospy.Time.now().to_sec()- self.last_time_added) > 0.01:
self.all_pose_.append(self.state_.copy())
self.last_time_added = rospy.Time.now().to_sec()
def get_state(self):
return self.state_
def get_velocity(self):
return self.velocity_history.get_latest()
def pause(self):
self.is_pause = True
self.stop_robot()
def resume(self):
self.is_pause = False
def take_action(self, action, target_orientation=None):
if self.is_pause:
return
if self.use_goal:
if "person" in self.name:
pose = self.get_pos()
pos_global = [pose[0]+action[0], pose[1]+action[1]]
else:
pos = GazeborosEnv.denormalize(action[0:2], self.max_rel_pos_range)
pos_global = GazeborosEnv.get_global_position(pos, self.relative)
if target_orientation:
self.goal["orientation"] = target_orientation
else:
self.goal["orientation"] = self.get_orientation()
self.goal["pos"] = pos_global
if self.use_movebase:
self.movebase_client_goal(pos_global, self.goal["orientation"])
else:
linear_vel = max(min(action[0]*self.max_linear_vel, self.max_linear_vel), -self.max_linear_vel)
angular_vel = max(min(action[1]*self.max_angular_vel, self.max_angular_vel), -self.max_angular_vel)
cmd_vel = Twist()
cmd_vel.linear.x = linear_vel #float(self.current_vel_.linear.x -(self.current_vel_.linear.x - linear_vel)*0.9)
cmd_vel.angular.z = angular_vel #-float(self.current_vel_.angular.z - (self.current_vel_.angular.z - angular_vel)*0.9)
self.current_vel_ = cmd_vel
self.cmd_vel_pub.publish(cmd_vel)
def stop_robot(self):
self.cmd_vel_pub.publish(Twist())
def angle_distance_to_point(self, pos):
current_pos = self.get_pos()
if current_pos[0] is None:
return None, None
angle = math.atan2(pos[1] - current_pos[1], pos[0] - current_pos[0])
distance = math.hypot(pos[0] - current_pos[0], pos[1] - current_pos[1])
angle = (angle - self.state_["orientation"] + math.pi) % (math.pi * 2) - math.pi
return angle, distance
def publish_cmd_vel(self, linear, angular):
cmd_vel = Twist()
angular_vel = min(max(angular, -self.max_angular_vel),self.max_angular_vel)
linear_vel = min(max(linear, 0), self.max_linear_vel)
cmd_vel.linear.x = float(linear_vel)
cmd_vel.angular.z = float(angular_vel)
self.cmd_vel_pub.publish(cmd_vel)
def use_selected_person_mod(self, person_mode):
while person_mode<=6:
if self.is_pause:
self.stop_robot()
return
if self.reset:
self.stop_robot()
return
angular_vel = 0
linear_vel = 0
if person_mode == 0:
linear_vel = self.max_linear_vel
if person_mode == 1:
#linear_vel = self.max_linear_vel * random.random()
linear_vel = self.max_linear_vel * 0.35
elif person_mode == 2:
linear_vel = self.max_linear_vel/2
angular_vel = self.max_angular_vel/6
elif person_mode == 3:
linear_vel = self.max_linear_vel/2
angular_vel = -self.max_angular_vel/6
elif person_mode == 4:
linear_vel, angular_vel = self.get_velocity()
linear_vel = linear_vel - (linear_vel - (random.random()/2 + 0.5))/2.
angular_vel = -self.max_angular_vel/6
elif person_mode == 5:
linear_vel, angular_vel = self.get_velocity()
linear_vel = linear_vel - (linear_vel - (random.random()/2 + 0.5))/2.
angular_vel = self.max_angular_vel/6
elif person_mode == 6:
linear_vel, angular_vel = self.get_velocity()
linear_vel = linear_vel - (linear_vel - (random.random()/2 + 0.5))/2.
angular_vel = angular_vel - (angular_vel - (random.random()-0.5)*2)/2.
self.publish_cmd_vel(linear_vel, angular_vel)
time.sleep(0.002)
def go_to_goal(self):
while True:
if self.reset:
return
while self.goal["pos"] is None:
time.sleep(0.1)
continue
diff_angle, distance = self.angle_distance_to_point(self.goal["pos"])
time_prev = rospy.Time.now().to_sec()
while not distance < 0.1 and abs(rospy.Time.now().to_sec() - time_prev) < 5:
if self.is_pause:
self.stop_robot()
return
if self.reset:
self.stop_robot()
return
diff_angle, distance = self.angle_distance_to_point(self.goal["pos"])
if distance is None:
return
if self.reset:
return
angular_vel = -min(max(self.angular_pid(diff_angle), -self.max_angular_vel),self.max_angular_vel)
linear_vel = min(max(self.linear_pid(-distance), 0), self.max_linear_vel)
linear_vel = linear_vel * math.pow((abs(math.pi - abs(diff_angle))/math.pi), 1.5)
self.publish_cmd_vel(linear_vel, angular_vel)
time.sleep(0.01)
self.stop_robot()
def go_to_pos(self, pos, stop_after_getting=False):
if self.is_pause:
self.stop_robot()
return
if self.reset:
return
diff_angle, distance = self.angle_distance_to_point(pos)
if distance is None:
print (self.get_pos())
return
time_prev = rospy.Time.now().to_sec()
while not distance < 0.2 and abs(rospy.Time.now().to_sec() - time_prev) < 5:
if self.is_pause:
self.stop_robot()
return
if self.reset:
return
diff_angle, distance = self.angle_distance_to_point(pos)
if distance is None:
return
if self.reset:
return
angular_vel = -min(max(self.angular_pid(diff_angle), -self.max_angular_vel),self.max_angular_vel)
linear_vel = min(max(self.linear_pid(-distance), 0), self.max_linear_vel)
linear_vel = linear_vel * math.pow((abs(math.pi - abs(diff_angle))/math.pi), 2)
self.publish_cmd_vel(linear_vel, angular_vel)
time.sleep(0.01)
if stop_after_getting:
self.stop_robot()
def get_goal(self):
counter_problem = 0
while self.goal["pos"] is None:
if self.reset:
return (None, None)
if counter_problem > 20:
rospy.logwarn("waiting for goal to be available {}/{}".format(counter_problem/10, 20))
time.sleep(0.01)
counter_problem += 1
if counter_problem > 200:
raise Exception('Probable shared memory issue happend')
# if not self.use_movebase:
# pos = GazeborosEnv.get_global_position(self.goal["pos"], self)
# goal = {"pos":pos, "orientation":None}
# else:
# goal = self.goal
return self.goal
def get_laser_image(self):
return np.expand_dims(self.scan_image, axis=2)
class GazeborosEnv(gym.Env):
def __init__(self, is_evaluation=False):
self.is_evaluation_ = is_evaluation
# self.bridge = CvBridge()
# self.image_pub = rospy.Publisher("image_observation", Image)
# self.image_pub_gt = rospy.Publisher("image_observation_gt", Image)
self.is_reseting = True
self.use_path = True
self.use_jackal = True
self.lock = _thread.allocate_lock()
self.path_follower_test_settings = {0:(0,0, "straight",False), 1:(2,0, "right", False), 2:(3,0, "left", False),\
3:(1,4, "straight_Behind", False), 4:(2,3, "right_behind", False), 5:(3,3, "left_behind", False), 6:(7,2, "traj_1", True, True),\
7:(7, 12, "traj_2", True, True), 8:(7, 43, "traj_3", True),\
9:(2,1, "right_left", False), 10:(2,2, "right_right", False),\
11:(3,1, "left_left", False), 12:(3,2, "left_right", False)\
}
#self.path_follower_test_settings = {0:(7, 43, "traj_3", True)#(7,2, "traj_1", True, True), 1:(7, 12, "traj_2", True, True)}
self.is_testing = EnvConfig.USE_TESTING
self.small_window_size = False
self.use_predifined_mode_person = True
self.use_goal = True
self.use_orientation_in_observation = True
self.collision_distance = 0.3
self.best_distance = 1.5
self.robot_mode = 0
self.window_size = 10
self.use_movebase = True
self.use_reachability = False
self.use_obstacles = EnvConfig.USE_OBSTACLES
self.obstacle_mode = EnvConfig.OBSTACLE_MODE
self.obstacle_names = []
self.person_scan = [1000.0 for i in range(EnvConfig.SCAN_REDUCTION_SIZE)]
self.person_use_move_base = EnvConfig.PERSON_USE_MB
self.person_mode = 0
self.position_thread = None
self.path_follower_current_setting_idx = 0
self.use_supervise_action = False
self.mode_person = 0
self.use_noise = True
self.is_use_test_setting = False
self.use_reverse = True
if self.small_window_size:
self.window_size = 5
if self.is_testing:
self.use_noise = False
self.use_reverse = False
self.is_use_test_setting = True
self.fallen = False
self.is_max_distance = False
self.use_random_around_person_ = False
self.max_mod_person_ = 7
self.wait_observation_ = 0
# being use for observation visualization
self.center_pos_ = (0, 0)
self.colors_visualization = cv.cvtColor(cv.applyColorMap(np.arange(0, 255, dtype=np.uint8), cv.COLORMAP_WINTER), cv.COLOR_BGR2RGB).reshape(255,3).tolist()
self.color_index = 0
self.first_call_observation = True
self.test_simulation_ = False
observation_dimensions = 46
if self.use_orientation_in_observation:
observation_dimensions += 1
if self.small_window_size:
observation_dimensions -= 20
if EnvConfig.RETURN_HINN_STATE:
observation_dimensions = 23
observation_dimensions += EnvConfig.SCAN_REDUCTION_SIZE
self.observation_space = gym.spaces.Box(low=-1, high=1, shape=(observation_dimensions,))
self.current_obsevation_image_ = np.zeros([2000,2000,3])
self.current_obsevation_image_.fill(255)
self.prev_action = (0, 0)
self.action_space = gym.spaces.Box(low=np.array([-1.0, -1.0]), high=np.array([1.0, 1.0]), dtype=np.float32)
self.min_distance = 1
self.max_distance = 2.5
if self.test_simulation_ or self.is_evaluation_:
self.max_numb_steps = 80
elif self.is_use_test_setting:
self.max_numb_steps = 100
else:
self.max_numb_steps = 80
self.reward_range = [-1, 1]
self.reachabilit_value = None
if self.use_reachability:
with open('data/reachability.pkl', 'rb') as f:
self.reachabilit_value = pickle.load(f)
def get_person_pos(self):
theta = self.person.get_orientation()
xy = self.person.get_pos()
return [xy[0], xy[1], theta]
def get_system_velocities(self):
robot_state = self.robot.get_state()
person_state = self.person.get_state()
robot_lin_velocity = robot_state["velocity"][0]
robot_angular_velocity = robot_state["velocity"][1]
robot_orientation = robot_state["orientation"]
person_lin_velocity = person_state["velocity"][0]
person_angular_velocity = person_state["velocity"][1]
x_distance_between = person_state["position"][0] - robot_state["position"][0]
y_distance_between = person_state["position"][1] - robot_state["position"][1]
dx_dt = -person_lin_velocity + robot_lin_velocity * math.cos(robot_orientation) + person_angular_velocity * y_distance_between
dy_dt = robot_lin_velocity * math.sin(robot_orientation) - person_angular_velocity * x_distance_between
da_dt = robot_angular_velocity - person_angular_velocity
return (dx_dt, dy_dt, da_dt)
def get_test_path_number(self):
rospy.loginfo("current path idx: {}".format(self.path_follower_current_setting_idx))
return self.path_follower_test_settings[self.path_follower_current_setting_idx][2]
def use_test_setting(self):
self.is_use_test_setting = True
def set_person_mode(self, setting):
self.person_mode = setting
def set_use_obstacles(self, setting):
self.use_obstacles = setting
def set_agent(self, agent_num):
try:
self.node = rospy.init_node('gym_gazeboros_{}'.format(agent_num))
except Exception as e:
rospy.logerr("probably already init in another node {}".format(e))
rospy.wait_for_service('/gazebo/set_model_state')
self.set_model_state_sp = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
date_time = datetime.now().strftime("%m_%d_%Y_%H_%M_%S")
self.agent_num = agent_num
self.obstacle_pub_ = rospy.Publisher('/move_base_node_tb3_{}/TebLocalPlannerROS/obstacles'.format(self.agent_num), ObstacleArrayMsg, queue_size=1)
self.person_obstacle_pub_ = rospy.Publisher('/move_base_node_person_{}/TebLocalPlannerROS/obstacles'.format(self.agent_num), ObstacleArrayMsg, queue_size=1)
self.create_robots()
self.path = {}
self.paths = []
self.log_file = None
try:
with open('data/person_trajectories_rl.pkl', 'rb') as f:
paths = pickle.load(f)
for path in paths:
angle_person = path['start_person']['orientation']
for angle in [x for x in range(0, 360, 10)]:
for angle_robot_person in [x for x in range(0, 360, 90)]:
path_angle = path.copy()
angle_from_person = np.deg2rad(angle) + angle_person
angle_person_robot = np.deg2rad(angle_robot_person) + angle_person
path_angle['start_robot']['pos'] = (path_angle['start_person']['pos'][0] + math.cos(angle_from_person)*2, path_angle['start_person']['pos'][1] + math.sin(angle_from_person)*2)
path_angle['start_robot']['orientation'] = angle_person_robot
path_angle['name'] = path['name'] + " " + str(angle) +" " + str(angle_robot_person)
self.paths.append(path_angle)
self.path_idx = -1
self.path = self.paths[self.path_idx]
except Exception as e:
print("error happend in writing {}".format(e))
self.agent_num = agent_num
self.state_cb_prev_time = None
self.model_states_sub = rospy.Subscriber("/gazebo/model_states", ModelStates, self.model_states_cb)
self.scan_sub = rospy.Subscriber("/person_{}/scan".format(self.agent_num), LaserScan, self.scan_cb)
if EnvConfig.INIT_SIM_ON_AGENT:
with self.lock:
self.init_simulator()
def scan_cb(self, msg):
reduced_size = EnvConfig.SCAN_REDUCTION_SIZE
large_n = 1000.0
div = int(len(msg.ranges)/reduced_size)
reduced_scan = []
count = 0
a_size = 0
avg = 0
# Reduce from 720 to reduced size
for r in msg.ranges:
if r > 0 and r < 20:
avg += r
a_size += 1
count += 1
if count == div:
if a_size != 0:
avg /= a_size
else:
avg = large_n
reduced_scan.append(avg)
count = 0
a_size = 0
avg = 0
self.person_scan = reduced_scan
pass
def create_obstacle_msg(self, name, pose):
obstacle_msg = ObstacleMsg()
obstacle_msg.id = 1
point = Point32()
point.x = pose.position.x
point.y = pose.position.y
point.z = pose.position.z
obstacle_msg.polygon.points.append(point)
# TODO probably needs some tweaking but works for regular cyn/box
# - I think the robot could be ok to get closer to the obstacles?
# TODO polygon for box instead of using a circle
obstacle_msg.radius = EnvConfig.OBSTACLE_SIZE/2
obstacle_msg.orientation.x = pose.orientation.x
obstacle_msg.orientation.y = pose.orientation.y
obstacle_msg.orientation.z = pose.orientation.z
obstacle_msg.orientation.w = pose.orientation.w
obstacle_msg.velocities.twist.linear.x = 0
obstacle_msg.velocities.twist.angular.z = 0
return obstacle_msg
def model_states_cb(self, states_msg):
# Grab Obstacle Names for Agent
if not self.obstacle_names:
for name in states_msg.name:
if "obstacle" in name:
for char in name:
if char.isdigit():
if int(char) == self.agent_num:
self.obstacle_names.append(name)
obstacle_msg_array = ObstacleArrayMsg()
obstacle_msg_array.header.stamp = rospy.Time.now()
obstacle_msg_array.header.frame_id = "tb3_{}/odom".format(self.agent_num)
person_obs_msg_array = ObstacleArrayMsg()
person_obs_msg_array.header.stamp = rospy.Time.now()
person_obs_msg_array.header.frame_id = "person_{}/odom".format(self.agent_num)
for model_idx in range(len(states_msg.name)):
found = False
for robot in [self.robot, self.person]:
if states_msg.name[model_idx] == robot.name:
found = True
break
elif "obstacle" in states_msg.name[model_idx] and EnvConfig.SEND_TEB_OBSTACLES:
obstacle_msg_array.obstacles.append(
self.create_obstacle_msg(
states_msg.name[model_idx], states_msg.pose[model_idx]
)
)
person_obs_msg_array.obstacles.append(
self.create_obstacle_msg(
states_msg.name[model_idx], states_msg.pose[model_idx]
)
)
if not found:
continue
pos = states_msg.pose[model_idx]
euler = Quaternion(w=pos.orientation.w, x=pos.orientation.x, y=pos.orientation.y, z=pos.orientation.z).to_euler()
if EnvConfig.PERSON_USE_MB:
orientation = euler[2]
else:
# Preserve how Payam had it setup...
orientation = euler[0]
fall_angle = np.deg2rad(90)
if abs(abs(euler[1]) - fall_angle)< 0.1 or abs(abs(euler[2]) - fall_angle)<0.1:
self.fallen = True
# get velocity
twist = states_msg.twist[model_idx]
linear_vel = twist.linear.x
angular_vel = twist.angular.z
pos_x = pos.position.x
pos_y = pos.position.y
state = {}
state["velocity"] = (linear_vel, angular_vel)
state["position"] = (pos_x, pos_y)
state["orientation"] = orientation
robot.set_state(state)
if self.use_movebase:
obstacle_msg = ObstacleMsg()
obstacle_msg.id = 0
for x in range (5):
for y in range (5):
point = Point32()
point.x = pos.position.x + (x-2)*0.1
point.y = pos.position.y + (y-2)*0.1
point.z = pos.position.z
obstacle_msg.polygon.points.append(point)
obstacle_msg.orientation.x = pos.orientation.x
obstacle_msg.orientation.y = pos.orientation.y
obstacle_msg.orientation.z = pos.orientation.z
obstacle_msg.orientation.w = pos.orientation.w
obstacle_msg.velocities.twist.linear.x = twist.linear.x
obstacle_msg.velocities.twist.angular.z = twist.linear.z
if robot.name == self.person.name:
obstacle_msg.header = obstacle_msg_array.header
obstacle_msg_array.obstacles.append(obstacle_msg)
else:
obstacle_msg.header = person_obs_msg_array.header
person_obs_msg_array.obstacles.append(obstacle_msg)
self.obstacle_pub_.publish(obstacle_msg_array)
self.person_obstacle_pub_.publish(person_obs_msg_array)
def create_robots(self):
self.person = Robot('person_{}'.format(self.agent_num),
max_angular_speed=1, max_linear_speed=.6, agent_num=self.agent_num, window_size=self.window_size, is_testing=self.is_testing, use_goal=self.use_goal, use_movebase=self.use_movebase)
relative = self.person
self.robot = Robot('tb3_{}'.format(self.agent_num),
max_angular_speed=1.8, max_linear_speed=0.8, relative=relative, agent_num=self.agent_num, use_goal=self.use_goal, use_movebase=self.use_movebase ,use_jackal=self.use_jackal, window_size=self.window_size, is_testing=self.is_testing)
def find_random_point_in_circle(self, radious, min_distance, around_point):
max_r = 2
r = (radious - min_distance) * math.sqrt(random.random()) + min_distance
theta = random.random() * 2 * math.pi
x = around_point[0] + r * math.cos(theta)
y = around_point[1] + r * math.sin(theta)
return (x, y)
def set_mode_person_based_on_episode_number(self, episode_number):
if episode_number < 500:
self.mode_person = 0
elif episode_number < 510:
self.mode_person = 1
elif episode_number < 700:
self.mode_person = 3
elif episode_number < 900:
self.mode_person = 5
elif episode_number < 1000:
self.mode_person = 6
else:
#self.mode_person = 7
if random.random()>0.5:
self.mode_person = 7
else:
self.mode_person = random.randint(0, 6)
def get_init_pos_robot_person(self):
if self.is_evaluation_:
idx_start = 0
elif self.is_use_test_setting:
idx_start = self.path_follower_test_settings[self.path_follower_current_setting_idx][1]
else:
idx_start = random.randint(0, len(self.path["points"]) - 20)
self.current_path_idx = idx_start
if not self.is_use_test_setting and self.use_reverse and random.random() > 0.5:
self.path["points"].reverse()
if self.person_use_move_base:
x = random.uniform(-3,3)
y = random.uniform(-3,3)
init_pos_person = {"pos": (x, y), "orientation":random.uniform(0, math.pi)}
random_pos_robot = self.find_random_point_in_circle(1.5, 2.5, init_pos_person["pos"])
init_pos_robot = {"pos": random_pos_robot, "orientation":random.uniform(0, math.pi)}
return init_pos_robot, init_pos_person
if self.is_evaluation_:
init_pos_person = self.path["start_person"]
init_pos_robot = self.path["start_robot"]
elif self.is_use_test_setting and not self.path_follower_test_settings[self.path_follower_current_setting_idx][3]:
init_pos_person = {"pos": (0, 0), "orientation":0}
mode = self.path_follower_test_settings[self.path_follower_current_setting_idx][1]
if mode == 0:
orinetation_person_rob = 0
elif mode == 1:
orinetation_person_rob = -math.pi /4.
elif mode == 2:
orinetation_person_rob = math.pi /4.
elif mode == 3:
orinetation_person_rob = -math.pi
else:
orinetation_person_rob = math.pi/8*7
pos_robot = (1.5*math.cos(orinetation_person_rob), 1.5*math.sin(orinetation_person_rob))
init_pos_robot = {"pos": pos_robot, "orientation":0}
elif not self.use_path:
init_pos_person = {"pos": (0, 0), "orientation": random.random()*2*math.pi - math.pi}
ahead_person = (init_pos_person['pos'][0] + math.cos(init_pos_person["orientation"]) * 2, init_pos_person['pos'][1] + math.sin(init_pos_person["orientation"]) * 2)
random_pos_robot = self.find_random_point_in_circle(1.5, 2.5, init_pos_person["pos"])
init_pos_robot = {"pos": random_pos_robot,\
"orientation": init_pos_person["orientation"]}#random.random()*2*math.pi - math.pi}#self.calculate_angle_using_path(idx_start)}
elif self.use_random_around_person_:
init_pos_person = {"pos": self.path["points"][idx_start], "orientation": self.calculate_angle_using_path(idx_start)}
init_pos_robot = {"pos": self.find_random_point_in_circle(1.5, 1, self.path["points"][idx_start]),\
"orientation": random.random()*2*math.pi - math.pi}#self.calculate_angle_using_path(idx_start)}
else:
init_pos_person = {"pos": self.path["points"][idx_start], "orientation": self.calculate_angle_using_path(idx_start)}
if self.is_use_test_setting and len(self.path_follower_test_settings[self.path_follower_current_setting_idx])>4 and self.path_follower_test_settings[self.path_follower_current_setting_idx][4] :
orinetation_person_rob = math.pi/2.2
pos_robot = (self.path["points"][idx_start][0] + 2*math.cos(orinetation_person_rob+init_pos_person["orientation"]), self.path["points"][idx_start][1] + 2*math.sin(orinetation_person_rob+init_pos_person["orientation"]))
init_pos_robot = {"pos": pos_robot, "orientation":self.calculate_angle_using_path(idx_start+5)}
else:
idx_robot = idx_start + 1
while (math.hypot(self.path["points"][idx_robot][1] - self.path["points"][idx_start][1],
self.path["points"][idx_robot][0] - self.path["points"][idx_start][0]) < 1.6):
idx_robot += 1
init_pos_robot = {"pos": self.path["points"][idx_robot],\
"orientation": self.calculate_angle_using_path(idx_robot)}
if not self.is_testing:
init_pos_robot["pos"] = (init_pos_robot["pos"][0]+ random.random()-0.5, \
init_pos_robot["pos"][1]+ random.random()-0.5)
init_pos_robot["orientation"] = GazeborosEnv.wrap_pi_to_pi(init_pos_robot["orientation"] + random.random()-0.5)
return init_pos_robot, init_pos_person
def set_marker_pose(self, xy):
pose = {"pos": (xy[0], xy[1]), "orientation": 0}
self.set_pos("marker",pose)
def set_pos(self, name, pose):
set_model_msg = ModelState()
set_model_msg.model_name = name
self.prev_action = (0,0)
quaternion_rotation = Quaternion.from_euler(0, pose["orientation"], 0)
set_model_msg.pose.orientation.x = quaternion_rotation[3]
set_model_msg.pose.orientation.y = quaternion_rotation[1]
set_model_msg.pose.orientation.z = quaternion_rotation[2]
set_model_msg.pose.orientation.w = quaternion_rotation[0]
if self.use_jackal and "tb3" in name:
set_model_msg.pose.position.z = 2.6 * self.agent_num + 0.1635
elif "marker" in name:
set_model_msg.pose.position.z = 1.6
else:
set_model_msg.pose.position.z = 2.6 * self.agent_num + 0.099
set_model_msg.pose.position.x = pose["pos"][0]
set_model_msg.pose.position.y = pose["pos"][1]
rospy.wait_for_service('/gazebo/set_model_state')
self.set_model_state_sp(set_model_msg)
def get_obstacle_init_pos(self, init_pos_robot, init_pos_person):
num_obstacles = len(self.obstacle_names)
out_of_the_way_pose = {"pos": (15,15), "orientation":0}
if not self.use_obstacles:
return [out_of_the_way_pose for i in range(num_obstacles)]
elif self.obstacle_mode == 0:
# Place obstacles between robot and person
# Calculate distance between robots, subtract some buffer room
x_range = abs(init_pos_robot["pos"][0] - init_pos_person["pos"][0])
y_range = abs(init_pos_robot["pos"][1] - init_pos_person["pos"][1])
if x_range != 0:
x_range -= EnvConfig.OBSTACLE_SIZE
if y_range != 0:
y_range -= EnvConfig.OBSTACLE_SIZE
# Check if we have enough space for obstacles between robots
x_buffer_space = y_buffer_space = -1
num_obs_to_place = num_obstacles + 1
while x_buffer_space < 0 and y_buffer_space < 0:
num_obs_to_place -= 1
x_buffer_space = x_range - (EnvConfig.OBSTACLE_SIZE * num_obs_to_place)
y_buffer_space = y_range - ((EnvConfig.OBSTACLE_SIZE * num_obs_to_place))
if num_obs_to_place == 0:
# No space for obstacles so put them away
rospy.logwarn("Not enough space for obstacles between robots.")
return [out_of_the_way_pose for i in range(num_obstacles)]
x_spacing = x_range / num_obs_to_place
y_spacing = y_range / num_obs_to_place
if init_pos_robot["pos"][0] < init_pos_person["pos"][0]:
base_x = init_pos_robot["pos"][0]
else:
base_x = init_pos_person["pos"][0]
if init_pos_robot["pos"][1] < init_pos_person["pos"][1]:
base_y = init_pos_robot["pos"][1]
else:
base_y = init_pos_person["pos"][1]
# Place obstacles on line between robot and person
obstacle_positions = []
for i in range(num_obs_to_place):
base_x += x_spacing
base_y += y_spacing
obstacle_positions.append({"pos": (base_x, base_y), "orientation":0})
obstacle_positions.extend([out_of_the_way_pose for i in range(num_obstacles - num_obs_to_place)])
return obstacle_positions
elif self.obstacle_mode == 1:
# Put obstacles randomly within area
obstacle_radius = EnvConfig.OBSTACLE_RADIUS_AWAY
min_distance_away_from_robot = EnvConfig.OBSTACLE_SIZE
obstacle_positions = []
for obs_idx in range(num_obstacles):
random_point = self.find_random_point_in_circle(obstacle_radius, min_distance_away_from_robot, init_pos_robot["pos"])
random_point = self.prevent_overlap(init_pos_person["pos"], random_point, min_distance_away_from_robot)
obstacle_positions.append({"pos": random_point, "orientation":0})
return obstacle_positions
# Prevent point b from overlapping point a
def prevent_overlap(self, point_a, point_b, min_distance):
x = point_b[0]
y = point_b[1]
if abs(point_b[0] - point_a[0]) < min_distance:
x += min_distance
if abs(point_b[1] - point_a[1]) < min_distance:
y += min_distance
return (x, y)
def set_obstacle_pos(self, init_pos_robot, init_pos_person):
obs_positions = self.get_obstacle_init_pos(init_pos_robot, init_pos_person)
for obs_idx in range(len(self.obstacle_names)):
self.set_pos(self.obstacle_names[obs_idx], obs_positions[obs_idx])
def init_simulator(self):
self.number_of_steps = 0
rospy.loginfo("init simulation called")
self.is_pause = True
init_pos_robot, init_pos_person = self.get_init_pos_robot_person()
self.center_pos_ = init_pos_person["pos"]
self.color_index = 0
self.fallen = False
self.is_max_distance = False
self.first_call_observation = True
rospy.loginfo("Waiting for path follower to die")
if self.position_thread:
self.position_thread.join()
rospy.loginfo("Done waiting")
self.current_obsevation_image_.fill(255)
if self.use_movebase:
self.robot.movebase_cancel_goals()
if self.person_use_move_base:
self.person.movebase_cancel_goals()
rospy.sleep(0.5)
self.person.stop_robot()
self.robot.stop_robot()
# if self.use_movebase:
# self.prev_action = (0,0, 0)
# else:
self.prev_action = (0,0)
if EnvConfig.TRAIN_HINN:
init_pos_robot = {"pos": (30,30), "orientation": 0}
# Set positions of robots and obstacles
self.set_pos(self.robot.name, init_pos_robot)
self.set_pos(self.person.name, init_pos_person)
if EnvConfig.TRAIN_HINN:
self.set_obstacle_pos(init_pos_person, init_pos_robot)
else:
self.set_obstacle_pos(init_pos_robot, init_pos_person)
self.robot.update(init_pos_robot)
self.person.update(init_pos_person)
self.path_finished = False
self.position_thread = threading.Thread(target=self.path_follower, args=(self.current_path_idx, self.robot, init_pos_person,))
self.position_thread.daemon = True
self.is_reseting = False
self.position_thread.start()
self.wait_observation_ = 0
self.is_reseting = False
self.robot.reset = False
self.person.reset = False
# self.resume_simulator()
rospy.loginfo("init simulation finished")
self.is_pause = False
def pause(self):
self.is_pause = True
self.person.pause()
self.robot.pause()
def resume_simulator(self):
rospy.loginfo("resume simulator")
self.is_pause = False
self.person.resume()
self.robot.resume()
rospy.loginfo("resumed simulator")
def calculate_angle_using_path(self, idx):
return math.atan2(self.path["points"][idx+1][1] - self.path["points"][idx][1], self.path["points"][idx+1][0] - self.path["points"][idx][0])
@staticmethod
def denormalize(value, max_val):
if type(value) == tuple or type(value) == list:
norm_val = [float(x) * max_val for x in value]
else:
norm_val = value * float(max_val)
return norm_val
@staticmethod
def normalize(value, max_val, zero_to_one=None):
if type(value) == tuple or type(value) == list:
norm_val = [x/float(max_val) for x in value]
else:
norm_val = value/float(max_val)
if zero_to_one is not None:
if type(value) == tuple or type(value) == list:
norm_val = [(x + 1)/2 for x in norm_val]
else:
norm_val = (norm_val + 1)/2.
return norm_val
@staticmethod
def get_global_position(pos_goal, center):
while not center.is_current_state_ready():
if center.reset:
rospy.logwarn("reseting so return none in rel pos rel: {} center".format(relative.is_current_state_ready(), center.is_current_state_ready()))
return (None, None)
time.sleep(0.01)
rospy.logwarn ("waiting for observation to be ready")
#relative_orientation = relative.state_['orientation']
center_pos = np.asarray(center.state_['position'])
center_orientation = center.state_['orientation']
#pos = [x * 5 for x in pos_goal]
relative_pos = np.asarray(pos_goal)
# transform the relative to center coordinat
rotation_matrix = np.asarray([[np.cos(center_orientation), np.sin(center_orientation)], [-np.sin(center_orientation), np.cos(center_orientation)]])
relative_pos = np.matmul(relative_pos, rotation_matrix)
global_pos = np.asarray(relative_pos + center_pos)
return global_pos
@staticmethod
def get_global_position_orientation(pos_goal, orientation_goal, center):
while not center.is_current_state_ready():
if center.reset:
rospy.logwarn("reseting so return none in rel pos rel: {} center".format(relative.is_current_state_ready(), center.is_current_state_ready()))
return (None, None)
time.sleep(0.01)
rospy.logwarn ("waiting for observation to be ready")
#relative_orientation = relative.state_['orientation']
center_pos = np.asarray(center.state_['position'])
center_orientation = center.state_['orientation']
#pos = [x * 5 for x in pos_goal]
relative_pos = np.asarray(pos_goal)
relative_pos2 = np.asarray((relative_pos[0] +math.cos(orientation_goal) , relative_pos[1] + math.sin(orientation_goal)))
# transform the relative to center coordinat
rotation_matrix = np.asarray([[np.cos(center_orientation), np.sin(center_orientation)], [-np.sin(center_orientation), np.cos(center_orientation)]])
relative_pos = np.matmul(relative_pos, rotation_matrix)
relative_pos2 = np.matmul(relative_pos2, rotation_matrix)
global_pos = np.asarray(relative_pos + center_pos)
global_pos2 = np.asarray(relative_pos2 + center_pos)
new_orientation = np.arctan2(global_pos2[1]-global_pos[1], global_pos2[0]-global_pos[0])
return global_pos, new_orientation
@staticmethod
def wrap_pi_to_pi(angle):
while angle > math.pi:
angle -= 2*math.pi
while angle < - math.pi:
angle += 2*math.pi
return angle
@staticmethod
def get_relative_heading_position(relative, center):
while not relative.is_current_state_ready() or not center.is_current_state_ready():
if relative.reset:
rospy.logwarn("reseting so return none in rel pos rel: {} center".format(relative.is_current_state_ready(), center.is_current_state_ready()))
return (None, None)
time.sleep(0.1)
rospy.loginfo ("waiting for observation to be ready heading pos")
relative_orientation = relative.state_['orientation']
center_pos = np.asarray(center.state_['position'])
center_orientation = center.state_['orientation']
# transform the relative to center coordinat
relative_pos = np.asarray(relative.state_['position'] - center_pos)
relative_pos2 = np.asarray((relative_pos[0] +math.cos(relative_orientation) , relative_pos[1] + math.sin(relative_orientation)))
rotation_matrix = np.asarray([[np.cos(-center_orientation), np.sin(-center_orientation)], [-np.sin(-center_orientation), np.cos(-center_orientation)]])
relative_pos = np.matmul(relative_pos, rotation_matrix)
relative_pos2 = np.matmul(relative_pos2, rotation_matrix)
angle_relative = np.arctan2(relative_pos2[1]-relative_pos[1], relative_pos2[0]-relative_pos[0])
return -angle_relative, relative_pos
@staticmethod
def get_relative_position(pos, center):
while not center.is_current_state_ready():
if center.reset:
rospy.loginfo("reseting so return none in rel pos rel: {} center".format(relative.is_current_state_ready(), center.is_current_state_ready()))
return (None, None)
time.sleep(0.01)
rospy.loginfo("waiting for observation to be ready relative pos")
#relative_orientation = relative.state_['orientation']
center_pos = np.asarray(center.state_['position'])
center_orientation = center.state_['orientation']
relative_pos = np.asarray(pos)
# transform the relative to center coordinat
relative_pos = np.asarray(relative_pos - center_pos)
rotation_matrix = np.asarray([[np.cos(-center_orientation), np.sin(-center_orientation)], [-np.sin(-center_orientation), np.cos(-center_orientation)]])
relative_pos = np.matmul(relative_pos, rotation_matrix)
return relative_pos
def set_robot_to_auto(self):
self.robot_mode = 1
def respect_orientation(self, xy, orientation):
x = math.cos(orientation) * xy[0] - math.sin(orientation) * xy[1]
y = math.sin(orientation) * xy[0] + math.cos(orientation) * xy[1]
return [x,y]
def path_follower(self, idx_start, robot, person_init_pose):
"""
Move base person mode:
1: Attempt left curved path
2: Attempt right curved path
3: Random
4: Zig zag
0/default: Attempt straight path
"""
if self.person_use_move_base:
print(f"person_mode = {self.person_mode}")
if self.person_mode == 1:
interval = 3
for i in range(math.floor(EnvConfig.EPISODE_LEN/interval)):
action = [0.5,i*0.5]
action = self.respect_orientation(action, person_init_pose["orientation"])
target_orientation = person_init_pose["orientation"] + i * math.pi/EnvConfig.EPISODE_LEN/2
self.person.take_action(action, target_orientation=target_orientation)
rospy.sleep(interval)
elif self.person_mode == 2:
interval = 3
for i in range(math.floor(EnvConfig.EPISODE_LEN/interval)):
action = [0.5,-i * 0.5]
action = self.respect_orientation(action, person_init_pose["orientation"])
target_orientation = person_init_pose["orientation"] - i * math.pi/EnvConfig.EPISODE_LEN/2
self.person.take_action(action, target_orientation=target_orientation)
rospy.sleep(interval)
elif self.person_mode == 3:
interval = 5
for i in range(math.floor(EnvConfig.EPISODE_LEN/interval)):
x = random.uniform(-1,1)
y = random.uniform(-1,1)
self.person.take_action([x, y])
rospy.sleep(interval)
elif self.person_mode == 4:
y = 0.5
interval = 3
for i in range(math.floor(EnvConfig.EPISODE_LEN/interval)):
action = [1,y]
action = self.respect_orientation(action, person_init_pose["orientation"])
target_orientation = person_init_pose["orientation"] - y * math.pi/4
self.person.take_action(action, target_orientation=target_orientation)
y *= -1
rospy.sleep(interval)
else:
# 0/Default: Straight path
for i in range(EnvConfig.EPISODE_LEN):
action = [2,0]
action = self.respect_orientation(action, person_init_pose["orientation"])
self.person.take_action(action)
rospy.sleep(1)
else:
counter = 0
while self.is_pause:
if self.is_reseting:
rospy.loginfo( "path follower return as reseting ")
return
time.sleep(0.001)
if counter > 10000:
rospy.loginfo( "path follower waiting for pause to be false")
counter = 0
counter += 1
rospy.loginfo( "path follower waiting for lock pause:{} reset:{}".format(self.is_pause, self.is_reseting))
if self.lock.acquire(timeout=10):
rospy.sleep(1.5)
rospy.loginfo("path follower got the lock")
if self.is_use_test_setting:
mode_person = self.path_follower_test_settings[self.path_follower_current_setting_idx][0]
elif self.test_simulation_:
mode_person = -1
elif self.is_evaluation_:
mode_person = 2
elif self.use_predifined_mode_person:
mode_person = self.mode_person
else:
mode_person = random.randint(0, 7)
#if self.agent_num == 2:
# mode_person = random.randint(1, self.max_mod_person_)
#else:
# mode_person = 0
# if self.agent_num == 0:
# mode_person = 5
# elif self.agent_num == 1:
# mode_person = 2
# elif self.agent_num == 2:
# mode_person = 3
# elif self.agent_num == 3:
# mode_person = 7
# else:
# mode_person = random.randint(1, self.max_mod_person_)
# if mode_person == 0:
# person_thread = threading.Thread(target=self.person.go_to_goal, args=())
# person_thread.start()
if self.use_goal and not self.use_movebase:
self.robot_thread = threading.Thread(target=self.robot.go_to_goal, args=())
self.robot_thread.start()
for idx in range (idx_start, len(self.path["points"])-3):
point = (self.path["points"][idx][0], self.path["points"][idx][1])
self.current_path_idx = idx
counter_pause = 0
while self.is_pause:
counter_pause+=1
rospy.loginfo("pause in path follower")
if self.is_reseting or counter_pause > 200:
# if mode_person == 0:
# person_thread.join()
self.lock.release()
return
time.sleep(0.001)
try:
if mode_person <= 6:
self.person.use_selected_person_mod(mode_person)
else:
self.person.go_to_pos(point, stop_after_getting=True)
time.sleep(0.001)
# person_thread.start()
# if self.robot_mode == 1:
# noisy_point = (self.path["points"][idx+3][0] +min(max(np.random.normal(),-0.5),0.5), self.path["points"][idx+3][1] +min(max(np.random.normal(),-0.5),0.5))
# robot_thread = threading.Thread(target=self.robot.go_to_pos, args=(noisy_point,True,))
# robot_thread.start()
# robot_thread.join()
# person_thread.join()
except Exception as e:
rospy.logerr("path follower {}, {}".format(self.is_reseting, e))
traceback.print_exc()
break
if self.is_reseting:
self.person.stop_robot()
break
self.lock.release()
rospy.loginfo("path follower release the lock")
self.path_finished = True
else:
rospy.loginfo("problem in getting the log in path follower")
# robot.stop_robot()
def get_laser_scan(self):
return self.robot.get_laser_image()
def get_laser_scan_all(self):
images = self.robot.scan_image_history.get_elemets()
counter = 0
while len(images)!=self.robot.scan_image_history.window_size and counter<250:
images = self.robot.scan_image_history.get_elemets()
time.sleep(0.005)
counter +=1
if counter > 100:
rospy.loginfo("wait for laser scan to get filled sec: {}/25".format(counter / 10))
if counter>=250:
raise RuntimeError(
'exception while calling get_laser_scan:')
images = np.asarray(images)
return (images.reshape((images.shape[1], images.shape[2], images.shape[0])))
def get_observation(self):
# got_laser = False
# while not got_laser:
# try:
# laser_all = self.get_laser_scan_all()
# got_laser = True
# except Exception as e:
# rospy.logerr("laser_error reseting")
# # self.reset(reset_gazebo = True)
while self.robot.pos_history.avg_frame_rate is None or self.person.pos_history.avg_frame_rate is None or self.robot.velocity_history.avg_frame_rate is None or self.person.velocity_history.avg_frame_rate is None:
if self.is_reseting:
return None
time.sleep(0.001)
pos_his_robot = np.asarray(self.robot.pos_history.get_elemets())
heading_robot = self.robot.state_["orientation"]
pos_his_person = np.asarray(self.person.pos_history.get_elemets())
heading_person = self.person.state_["orientation"]
robot_vel = np.asarray(self.robot.get_velocity())
person_vel = np.asarray(self.person.get_velocity())
poses = np.concatenate((pos_his_robot, pos_his_person))
if self.use_noise:
poses += np.random.normal(loc=0, scale=0.1, size=poses.shape)
heading_robot += np.random.normal(loc=0, scale=0.2)
heading_person += np.random.normal(loc=0, scale=0.2)
robot_vel += np.random.normal(loc=0, scale=0.1, size=robot_vel.shape)
person_vel += np.random.normal(loc=0, scale=0.1, size=person_vel.shape)
heading_relative = GazeborosEnv.wrap_pi_to_pi(heading_robot-heading_person)/(math.pi)
pos_rel = []
for pos in (poses):
relative = GazeborosEnv.get_relative_position(pos, self.robot.relative)
pos_rel.append(relative)
pos_history = np.asarray(np.asarray(pos_rel)).flatten()/6.0
velocities = np.concatenate((person_vel, robot_vel))/self.robot.max_angular_vel
if self.use_orientation_in_observation:
velocities_heading = np.append(velocities, heading_relative)
else:
velocities_heading = velocities
final_ob = np.append(np.append(pos_history, velocities_heading), self.prev_action)
if EnvConfig.RETURN_HINN_STATE:
final_ob = np.append(np.append(person_vel, heading_person), pos_his_person)
# if EnvConfig.USE_OBSTACLES:
final_ob = np.append(final_ob, self.person_scan)
return final_ob
def __del__(self):
return
def visualize_observation(self):
observation_image = np.zeros([2000,2000,3])
observation_image_gt = np.zeros([2000,2000,3])
observation_image = observation_image.astype(np.uint8)
observation_image_gt = observation_image_gt.astype(np.uint8)
observation_image.fill(255)
observation_image_gt.fill(255)
while self.robot.pos_history.avg_frame_rate is None or self.person.pos_history.avg_frame_rate is None or self.robot.velocity_history.avg_frame_rate is None or self.person.velocity_history.avg_frame_rate is None:
if self.is_reseting:
return None
time.sleep(0.001)
pos_his_robot = self.robot.pos_history.get_elemets()
heading_robot = self.robot.state_["orientation"]
pos_his_person = self.person.pos_history.get_elemets()
heading_person = self.person.state_["orientation"]
heading_relative = GazeborosEnv.wrap_pi_to_pi(heading_robot-heading_person)/(math.pi)
center_pos = pos_his_robot[-1]
for pos in pos_his_robot:
relative = GazeborosEnv.get_relative_position(pos, self.robot)
pos_rel = GazeborosEnv.to_image_coordinate(relative, (0, 0))
pos_gt = GazeborosEnv.to_image_coordinate(pos, center_pos)
observation_image = self.add_circle_observation_to_image(relative, (255, 0, 0), 10, center_pos=(0,0), image=observation_image)
observation_image_gt = self.add_circle_observation_to_image(pos, (255, 0, 0), 10, center_pos=center_pos, image=observation_image_gt)
for pos in pos_his_person:
relative = GazeborosEnv.get_relative_position(pos, self.robot)
pos_rel = GazeborosEnv.to_image_coordinate(relative, (0, 0))
pos_gt = GazeborosEnv.to_image_coordinate(pos, center_pos)
observation_image = self.add_circle_observation_to_image(relative, (0, 255, 0), 10, image = observation_image, center_pos=(0,0))
observation_image_gt = self.add_circle_observation_to_image(pos, (0, 255, 0), 10, image=observation_image_gt, center_pos=center_pos)
self.image_pub.publish(self.bridge.cv2_to_imgmsg(observation_image, encoding="bgr8"))
self.image_pub_gt.publish(self.bridge.cv2_to_imgmsg(observation_image_gt, encoding="bgr8"))
@staticmethod
def to_image_coordinate(pos, center_pos):
return (int((pos[0] - center_pos[0])*50+1000), int((pos[1] - center_pos[1])*50+1000))
def add_line_observation_to_image(self, pos, pos2):
color = self.colors_visualization[self.color_index]
pos_image = GazeborosEnv.to_image_coordinate(pos, self.center_pos_)
pos_image2 = GazeborosEnv.to_image_coordinate(pos2, self.center_pos_)
if pos_image[0] >2000 or pos_image[0] < 0 or pos_image[1] >2000 or pos_image[1] < 0:
rospy.logerr("problem with observation: {}".format(pos_image))
return
self.new_obsevation_image_ = cv.line(self.new_obsevation_image_, (pos_image[0], pos_image[1]), (pos_image2[0], pos_image2[1]), color, 1)
def add_triangle_observation_to_image(self, pos, orientation):
color = self.colors_visualization[self.color_index]
pos_image = GazeborosEnv.to_image_coordinate(pos, self.center_pos_)
pos_triangle1 = GazeborosEnv.to_image_coordinate((pos[0]+math.cos(orientation)*0.3, pos[1]+math.sin(orientation)*0.3), self.center_pos_)
pos_triangle2 = GazeborosEnv.to_image_coordinate((pos[0]+math.cos(orientation+math.pi/2)*0.1, pos[1]+math.sin(orientation+math.pi/2)*0.1), self.center_pos_)
pos_triangle3 = GazeborosEnv.to_image_coordinate((pos[0]+math.cos(orientation-math.pi/2)*0.1, pos[1]+math.sin(orientation-math.pi/2)*0.1), self.center_pos_)
poses = [pos_triangle1, pos_triangle2, pos_triangle3]
print(poses)
for pos in poses:
if pos[0] >2000 or pos[0] < 0 or pos[1] >2000 or pos[1] < 0:
rospy.logerr("problem with observation: {}".format(pos))
return
self.new_obsevation_image_ = cv.drawContours(self.new_obsevation_image_, [np.asarray(poses)], 0, color, -1)
def add_arrow_observation_to_image(self, pos, orientation):
color = self.colors_visualization[self.color_index]
pos_image = GazeborosEnv.to_image_coordinate(pos, self.center_pos_)
pos_image2 = GazeborosEnv.to_image_coordinate((pos[0]+math.cos(orientation)*0.3, pos[1]+math.sin(orientation)*0.3), self.center_pos_)
if pos_image[0] >2000 or pos_image[0] < 0 or pos_image[1] >2000 or pos_image[1] < 0:
rospy.logerr("problem with observation: {}".format(pos_image))
return
self.new_obsevation_image_ = cv.arrowedLine(self.new_obsevation_image_, (pos_image[0], pos_image[1]), (pos_image2[0], pos_image2[1]), color, 2, tipLength=0.5)
def add_circle_observation_to_image(self, pos, color, radious, center_pos=None, image=None):
if image is None:
image = self.new_obsevation_image_
if center_pos is None:
center_pos = self.center_pos_
pos_image = GazeborosEnv.to_image_coordinate(pos, center_pos)
if pos_image[0] >2000 or pos_image[0] < 0 or pos_image[1] >2000 or pos_image[1] < 0:
rospy.logerr("problem with observation: {}".format(pos_image))
return
return (cv.circle(image , (pos_image[0], pos_image[1]), radious, color, 2))
def get_supervised_action(self):
while not self.person.is_current_state_ready() and not self.is_reseting:
time.sleep(0.1)
if self.is_reseting:
return np.asarray([0,0])
self.use_supervise_action = True
pos = self.person.calculate_ahead(1.5)
pos_person = self.person.get_pos()
pos_relative = GazeborosEnv.get_relative_position(pos, self.robot.relative)
pos_person_relative = GazeborosEnv.get_relative_position(pos_person, self.robot.relative)
pos_norm = GazeborosEnv.normalize(pos_relative, self.robot.max_rel_pos_range)
orientation = GazeborosEnv.normalize(math.atan2(pos_relative[1] - pos_person_relative[1], pos_relative[0] - pos_person_relative[0]), math.pi)
return np.asarray((pos_norm[0], pos_norm[1], orientation))
def update_observation_image(self):
self.new_obsevation_image_ = np.copy(self.current_obsevation_image_)
robot_pos = self.robot.get_pos()
robot_orientation = self.robot.get_orientation()
person_pos = self.person.get_pos()
person_orientation = self.person.get_orientation()
if self.use_goal:
current_goal = self.robot.get_goal()
if person_orientation is None or robot_orientation is None:
rospy.logerr("person or robot orientation is None")
return
if self.first_call_observation:
# self.new_obsevation_image_ = self.add_circle_observation_to_image(robot_pos, [152,100,100], 10)
# self.new_obsevation_image_ = self.add_circle_observation_to_image(person_pos,[0,100,100], 10)
self.first_call_observation = False
if self.is_collided():
self.new_obsevation_image_ = self.add_circle_observation_to_image(robot_pos, [152,200,200], 10)
self.new_obsevation_image_ = self.add_circle_observation_to_image(person_pos,[200,100,100], 10)
self.add_arrow_observation_to_image(robot_pos, robot_orientation)
self.add_triangle_observation_to_image(person_pos, person_orientation)
if self.use_goal:
if self.use_movebase:
goal_orientation = current_goal["orientation"]
else:
goal_orientation = robot_orientation
self.add_circle_observation_to_image(current_goal["pos"], self.colors_visualization[self.color_index], 5)
#self.add_line_observation_to_image(robot_pos, current_goal["pos"])
else:
self.add_line_observation_to_image(robot_pos, person_pos)
alpha = 0.50
self.current_obsevation_image_ = cv.addWeighted(self.new_obsevation_image_, alpha, self.current_obsevation_image_, 1 - alpha, 0)
def get_current_observation_image(self):
image = self.current_obsevation_image_
image = image/255.
if self.is_testing:
self.save_current_path()
return image
def take_action(self, action):
self.prev_action = action[:2]
self.robot.take_action(action)
if not self.person_use_move_base:
if self.wait_observation_ <= 0:
self.update_observation_image()
self.wait_observation_ = 7
self.color_index += 2
if self.color_index >= len(self.colors_visualization):
self.color_index = len(self.colors_visualization) - 1
self.wait_observation_ -= 1
return
def is_skip_run(self):
if self.fallen:
return True
else:
return False
def is_successful(self):
if self.is_collided() or self.is_max_distance or self.fallen:
return False
else:
return True
def step(self, action):
self.number_of_steps += 1
self.take_action(action)
# instead of one reward get all the reward during wait
# rospy.sleep(0.4)
sleep_time = 0.10
rewards = []
if sleep_time > 0.1:
for t in range (10):
rospy.sleep(sleep_time/10.)
rewards.append(self.get_reward())
reward = np.mean(rewards)
else:
rospy.sleep(sleep_time)
reward = self.get_reward()
ob = self.get_observation()
episode_over = False
if not EnvConfig.RETURN_HINN_STATE:
rel_person = GazeborosEnv.get_relative_heading_position(self.robot, self.person)[1]
distance = math.hypot(rel_person[0], rel_person[1])
if self.path_finished:
rospy.loginfo("path finished")
episode_over = True
if self.is_collided():
self.update_observation_image()
episode_over = True
rospy.loginfo('collision happened episode over')
reward -= 0.5 # maybe remove less when in start of leaning
elif distance > 5:
self.update_observation_image()
self.is_max_distance = True
episode_over = True
rospy.loginfo('max distance happened episode over')
elif self.number_of_steps > self.max_numb_steps:
self.update_observation_image()
episode_over = True
if self.fallen:
episode_over = True
rospy.loginfo('fallen')
reward = min(max(reward, -1), 1)
if self.agent_num == 0:
rospy.loginfo("action {} reward {}".format(action, reward))
if episode_over and not EnvConfig.RETURN_HINN_STATE:
self.person.reset = True
#reward += 1
return ob, reward, episode_over, {}
def is_collided(self):
rel_person = GazeborosEnv.get_relative_heading_position(self.robot, self.person)[1]
distance = math.hypot(rel_person[0], rel_person[1])
if distance < self.collision_distance or self.robot.is_collided:
return True
return False
def get_distance(self):
_, pos_rel = GazeborosEnv.get_relative_heading_position(self.robot, self.person)
return math.hypot(pos_rel[0],pos_rel[1])
def get_angle_person_robot(self):
_, pos_rel = GazeborosEnv.get_relative_heading_position(self.robot, self.person)
angle_robot_person = math.atan2(pos_rel[1], pos_rel[0])
return (GazeborosEnv.wrap_pi_to_pi(angle_robot_person))
def get_reward(self):
reward = 0
angle_robot_person, pos_rel = GazeborosEnv.get_relative_heading_position(self.robot, self.person)
angle_robot_person = math.atan2(pos_rel[1], pos_rel[0])
angle_robot_person = np.rad2deg(GazeborosEnv.wrap_pi_to_pi(angle_robot_person))
distance = math.hypot(pos_rel[0], pos_rel[1])
# Negative reward for being behind the person
if self.is_collided():
reward -= 1
if distance < 0.5:
reward = -1.3
elif abs(distance - self.best_distance) < 0.5:
reward += 0.5 * (0.5 - abs(distance - self.best_distance))
elif distance >= self.best_distance+0.5:
reward -= 0.25 * (distance - (self.best_distance+0.5))
elif distance < self.best_distance-0.5:
reward -= (self.best_distance - 0.5 - distance)/(self.best_distance - 0.5)
if abs(angle_robot_person) < 25:
reward += 0.5 * (25 - abs(angle_robot_person)) / 25
else:
reward -= 0.25 * abs(angle_robot_person) / 180
if abs(distance - self.best_distance) < 0.5 and abs(angle_robot_person) < 25:
reward += 0.25
# if not 90 > angle_robot_person > 0:
# reward -= distance/6.0
# elif self.min_distance < distance < self.max_distance:
# reward += 0.1 + (90 - angle_robot_person) * 0.9 / 90
# elif distance < self.min_distance:
# reward -= 1 - distance / self.min_distance
# else:
# reward -= distance / 7.0
reward = min(max(reward, -1), 1)
return reward
def save_log(self):
pickle.dump({"person_history":self.person.log_history, "robot_history":self.robot.log_history}, self.log_file)
self.log_file.close()
def reset(self, reset_gazebo=False):
self.is_pause = True
self.is_reseting = True
self.robot.reset = True
self.person.reset = True
rospy.loginfo("trying to get the lock for reset")
# if reset_gazebo:
# self.reset_gazebo()
with self.lock:
rospy.loginfo("got the lock")
not_init = True
try:
if self.is_evaluation_:
if self.log_file is not None:
pickle.dump({"person_history":self.person.log_history, "robot_history":self.robot.log_history}, self.log_file)
self.log_file.close()
self.path_idx += 1
print("start path_id: {}".format(self.path_idx))
if self.path_idx < len(self.paths)-1:
self.path = self.paths[self.path_idx]
self.log_file = open(self.path["name"], "wb")
else:
print("all done")
self.person.stop_robot()
exit(0)
self.init_simulator()
not_init = False
except RuntimeError as e:
rospy.logerr("error happend reseting: {}".format(e))
if not_init:
rospy.loginfo("not init so run reset again")
return (self.reset())
else:
rospy.sleep(2)
return self.get_observation()
def save_current_path(self):
all_pos_robot = self.robot.all_pose_
all_pos_person = self.person.all_pose_
directory = "data/traj_simulations"
name = ""
if self.use_goal:
if self.use_supervise_action:
name += "base_"
else:
name += "planner_"
else:
name += "cmd_"
name += self.path_follower_test_settings[self.path_follower_current_setting_idx][2]
if not os.path.exists(directory):
os.makedirs(directory)
with open(os.path.join(directory, name + ".pkl") , "wb") as f:
pickle.dump({"robot":all_pos_robot, "person":all_pos_person, "name":name}, f)
self.robot.all_pose_ = []
self.person.all_pose_ = []
def next_setting(self):
self.path_follower_current_setting_idx += 1
def is_finish(self):
if self.path_follower_current_setting_idx >= len(self.path_follower_test_settings)-1:
return True
return False
def render(self, mode='human', close=False):
""" Viewer only supports human mode currently. """
return
def calculate_rechability_derivite(self, x, y, v, theta):
get_idx = lambda x: int(math.floor(x))
pos_norm = GazeborosEnv.normalize((x, y), self.robot.max_rel_pos_range, True)
orientation_norm = GazeborosEnv.normalize(theta, math.pi, True)
velocity_norm = GazeborosEnv.normalize(v, self.robot.max_linear_vel, True)
x_idx = get_idx(pos_norm[0]*(self.reachabilit_value.shape[0]-1))
y_idx = get_idx(pos_norm[1]*(self.reachabilit_value.shape[1]-1))
orientation_idx = get_idx(orientation_norm * (self.reachabilit_value.shape[3] -1))
v_idx = get_idx(velocity_norm * (self.reachabilit_value.shape[2]-1))
rospy.loginfo("x: {} y: {} theta {}".format(x_idx, y_idx, orientation_idx))
v_idx = max(min(v_idx, self.reachabilit_value.shape[2]-2), 0)
orientation_idx = max(min(orientation_idx, self.reachabilit_value.shape[3]-2), 0)
x_idx = max(min(x_idx, self.reachabilit_value.shape[0]-1), 0)
y_idx = max(min(y_idx, self.reachabilit_value.shape[1]-1), 0)
derivative_v = (self.reachabilit_value[x_idx, y_idx, v_idx+1, orientation_idx] -\
self.reachabilit_value[x_idx, y_idx, v_idx, orientation_idx])/2
derivative_theta = (self.reachabilit_value[x_idx, y_idx, v_idx, orientation_idx+1] -\
self.reachabilit_value[x_idx, y_idx, v_idx, orientation_idx])/2
rospy.loginfo("x: {} y: {} theta {}".format(x_idx, y_idx, orientation_idx))
return derivative_v, derivative_theta, self.reachabilit_value[x_idx, y_idx, v_idx, orientation_idx]
def reachability_action(self):
relative = GazeborosEnv.get_relative_position(self.robot.get_pos(), self.person)
orientation = GazeborosEnv.wrap_pi_to_pi(self.robot.get_orientation() - self.person.get_orientation())
print(np.rad2deg(orientation), np.rad2deg(self.person.get_orientation()), np.rad2deg(self.robot.get_orientation()) )
velocity = self.robot.get_velocity()[0]
derivative_v, derivative_theta, v = self.calculate_rechability_derivite(relative[0], relative[1], velocity, orientation)
rospy.loginfo("d_v: {:0.5f} W: {:0.5f} v {:0.1f}".format(derivative_v, derivative_theta, v))
action = [0,0]
if v<1:
if derivative_v > 0:
action[0] = 1
else:
action[0] = -1
if derivative_theta > 0:
action[1] = 1
else:
action[1] = -1
return action
#def read_bag():
# gazeboros_n = GazeborosEnv()
# gazeboros_n.set_agent(0)
#
# while gazeboros_n.robot.prev_call_gazeboros_ is None or rospy.Time.now().to_sec() - gazeboros_n.robot.prev_call_gazeboros_ < 5:
# rospy.sleep(0.1)
# gazeboros_n.save_log()
# print("done")
#read_bag()
def test():
gazeboros_env = GazeborosEnv()
gazeboros_env.set_agent(0)
step = 0
while (True):
step +=1
#action = gazeboros_env.get_supervised_action()
#action = gazeboros_env.reachability_action()
#gazeboros_env.step(action)
rel_person = GazeborosEnv.get_relative_heading_position(gazeboros_env.robot, gazeboros_env.person)[1]
relative_pos2 = GazeborosEnv.get_relative_position(gazeboros_env.robot.get_pos(), gazeboros_env.robot.relative)
orientation1 = np.rad2deg(np.arctan2(rel_person[1], rel_person[0]))
distance = math.hypot(relative_pos2[0], relative_pos2[1])
heading_robot = gazeboros_env.robot.state_["orientation"]
heading_person = gazeboros_env.person.state_["orientation"]
heading_relative = GazeborosEnv.wrap_pi_to_pi(heading_robot-heading_person)
orientation_heading = np.rad2deg(heading_relative)
#print (f"ob: {gazeboros_env.get_observation()}")
print (f"reward: {gazeboros_env.get_reward()}")
print (f"pos: {rel_person} vs {relative_pos2}")
print (f"orientation_h: {orientation_heading} dist: {distance} orin: {orientation1}")
print (f"orientation_robo: {np.rad2deg(heading_robot)} orintation pers: {np.rad2deg(heading_person)}")
print ("\n\n")
#if step % 50==0:
# print("reseting")
# gazeboros_env.reset()
#gazeboros_env.visualize_observation()
rospy.sleep(1)
#test()
|
wav_to_tfrecord.py
|
import tensorflow as tf
import numpy as np
from concurrent.futures import ProcessPoolExecutor
from functools import partial
import os
import sys
sys.path.append('../')
from util import audio
import re
import json
from hparams import hparams
import time
import os, numpy, argparse, random, time
import multiprocessing
with open('./datasets/normal.json', 'r') as f:
_symbols = json.load(f)
_pad = '_'
_eos = '~'
symbols = [_pad, _eos] + _symbols
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
_id_to_symbol = {i: s for i, s in enumerate(symbols)}
#generate the int
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
#generate the str typt
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _symbols_to_sequence(symbols):
out = []
for s in symbols:
try:
out.append(_symbol_to_id[s])
except:
out.append(_symbol_to_id['_'])
return out
def _process_utterance(wav_path, seq, id):
'''Preprocesses a single utterance audio/text pair.
This writes the mel and linear scale spectrograms to disk and returns a tuple to write
to the train.txt file.
Args:
wav_path: Path to the audio file containing the speech input
seq: The text spoken in the input audio file
id : identity
Returns:
A example containing many datas
'''
# Load the audio to a numpy array:
wav = audio.load_wav(wav_path)
# Compute the linear-scale spectrogram from the wav:
spectrogram = audio.spectrogram(wav).astype(np.float32).T
# Compute a mel-scale spectrogram from the wav:
mel_spectrogram = audio.melspectrogram(wav).astype(np.float32).T
input_lengths = len(seq)
n_frames = spectrogram.shape[0]
input_features = [tf.train.Feature(int64_list=tf.train.Int64List(value=[input_])) for input_ in seq]
mel_features = [tf.train.Feature(float_list=tf.train.FloatList(value=input_)) for input_ in mel_spectrogram]
spec_features = [tf.train.Feature(float_list=tf.train.FloatList(value=input_)) for input_ in spectrogram]
wav_feature = [tf.train.Feature(float_list=tf.train.FloatList(value=[sample])) for sample in wav]
feature_list = {
'inputs': tf.train.FeatureList(feature=input_features),
'mel': tf.train.FeatureList(feature=mel_features),
'spec': tf.train.FeatureList(feature=spec_features),
'wav': tf.train.FeatureList(feature=wav_feature),
}
feature_lists = tf.train.FeatureLists(feature_list=feature_list)
n_frame_ = tf.train.Feature(int64_list=tf.train.Int64List(value=[n_frames]))
input_lengths_ = tf.train.Feature(int64_list=tf.train.Int64List(value=[input_lengths]))
identity_ = tf.train.Feature(int64_list=tf.train.Int64List(value=[int(id)]))
context = tf.train.Features(feature={
"n_frame": n_frame_,
"input_lengths": input_lengths_,
"identity":identity_
})
example = tf.train.SequenceExample(context=context, feature_lists=feature_lists)
return example
def write_worker(q_out, tfrecord_file):
pre_time = time.time()
count = 1
writer = tf.python_io.TFRecordWriter(tfrecord_file)
while True:
deq = q_out.get()
if deq is None:
break
serial = deq
writer.write(serial.SerializeToString())
if count % 100 == 0:
cur_time = time.time()
print('time:', cur_time - pre_time, 'count:', count)
pre_time = cur_time
count += 1
def audio_encoder(item, q_out):
wav_root = item[0]
content = item[1]
seq = _symbols_to_sequence(content)
seq = np.asarray(seq)
id = item[2]
example = _process_utterance(wav_root, seq, id)
q_out.put(example)
def read_worker(q_in, q_out):
while True:
item = q_in.get()
if item is None:
break
audio_encoder(item, q_out)
def wav_to_tfrecord_read_from_text(args, text_path, data_name, id_num):
tfrecord_dir = os.path.join(args.output, "tfrecord_tacotron_" + data_name)
os.makedirs(tfrecord_dir, exist_ok=True)
tfrecord_file = os.path.join(tfrecord_dir, 'tfrecord_tacotron_' + data_name +
'_id_num_' + str(id_num) + '.tfrecord')
q_in = [multiprocessing.Queue(1024) for i in range(args.num_workers)] # num_thread default = 32
q_out = multiprocessing.Queue(1024)
read_process = [multiprocessing.Process(target=read_worker, args=(q_in[i], q_out)) for i in range(args.num_workers)]
for p in read_process:
p.start()
write_process = multiprocessing.Process(target=write_worker, args=(q_out, tfrecord_file))
write_process.start()
with open(text_path, 'r') as f:
ct = 0
for line in f:
line = eval(line)
q_in[ct % len(q_in)].put(line)
ct += 1
for q in q_in:
q.put(None)
for p in read_process:
p.join()
q_out.put(None)
write_process.join()
try:
with open('./train_data_dict.json', 'r') as f:
train_data_dict = json.load(f)
except:
train_data_dict = {}
train_data_dict[data_name] = tfrecord_file
with open('./train_data_dict.json', 'w') as f:
json.dump(train_data_dict, f)
|
clientDrone.py
|
import subprocess
import socket
import time
import logging
import urllib.request
from utils import *
from globals import *
def start_video_stream():
logging.info('Starting video stream')
cmd = "raspivid -g 24 -n -w 1280 -h 720 -b 1000000 -fps 24 -t 0 -o udp://{}:{}"
cmd = cmd.format(server_ip, port_drone_video)
logging.info(cmd)
ps = subprocess.Popen("exec " + cmd, stdout=subprocess.PIPE, shell=True)
#output = ps.communicate()[0]
#print(output)
return ps
def listen_control_loop(socketClient: ConcurrentSocket):
sub_process = None
while True:
try:
pair_data_address = socketClient.recv()
logging.info('Received {} from {}'.format(pair_data_address[0], pair_data_address[1]))
except socket.error as exc:
logging.error('socket.error : {}'.format(exc))
command = pair_data_address[0].decode('utf-8')
if command == CMD_START:
logging.info('command {} received'.format(command))
if sub_process is not None:
sub_process.kill()
sub_process = start_video_stream()
elif command == CMD_STOP:
logging.info('command {} received'.format(command))
if sub_process is not None:
sub_process.kill()
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=logging.INFO,
handlers=[
logging.FileHandler("drone.log"),
logging.StreamHandler()
])
logging.info('Camera is running')
socket_client = ConcurrentSocket()
thread_send_alive = threading.Thread(target = send_alive_loop, args = (socket_client, (server_ip, port_drone_control),))
thread_send_alive.start()
thread_control = threading.Thread(target = listen_control_loop, args = (socket_client,))
thread_control.start()
thread_send_alive.join()
thread_control.join()
|
main.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The main training script."""
import multiprocessing
import os
from absl import app
from absl import flags
from absl import logging
import numpy as np
from tensorflow.python.ops import custom_gradient # pylint:disable=g-direct-tensorflow-import
from tensorflow.python.framework import ops # pylint:disable=g-direct-tensorflow-import
def get_variable_by_name(var_name):
"""Given a variable name, retrieves a handle on the tensorflow Variable."""
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
def _filter_fn(item):
try:
return var_name == item.op.name
except AttributeError:
# Collection items without operation are ignored.
return False
candidate_vars = list(filter(_filter_fn, global_vars))
if len(candidate_vars) >= 1:
# Filter out non-trainable variables.
candidate_vars = [v for v in candidate_vars if v.trainable]
else:
raise ValueError("Unsuccessful at finding variable {}.".format(var_name))
if len(candidate_vars) == 1:
return candidate_vars[0]
elif len(candidate_vars) > 1:
raise ValueError(
"Unsuccessful at finding trainable variable {}. "
"Number of candidates: {}. "
"Candidates: {}".format(var_name, len(candidate_vars), candidate_vars))
else:
# The variable is not trainable.
return None
custom_gradient.get_variable_by_name = get_variable_by_name
import tensorflow.compat.v1 as tf
tf.disable_eager_execution()
from efficientdet import dataloader
import det_model_fn
import efficientdet.hparams_config as hparams_config
import efficientdet.utils as utils
flags.DEFINE_string(
'tpu',
default=None,
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 '
'url.')
flags.DEFINE_string(
'gcp_project',
default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone',
default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string('eval_name', default=None, help='Eval job name')
flags.DEFINE_enum('strategy', None, ['tpu', 'gpus', ''],
'Training: gpus for multi-gpu, if None, use TF default.')
flags.DEFINE_bool('use_fake_data', False, 'Use fake input.')
flags.DEFINE_bool(
'use_xla', False,
'Use XLA even if strategy is not tpu. If strategy is tpu, always use XLA, '
'and this flag has no effect.')
flags.DEFINE_string('model_dir', None, 'Location of model_dir')
flags.DEFINE_string(
'backbone_ckpt', '', 'Location of the ResNet50 checkpoint to use for model '
'initialization.')
flags.DEFINE_string('ckpt', None,
'Start training from this EfficientDet checkpoint.')
flags.DEFINE_string(
'hparams', '', 'Comma separated k=v pairs of hyperparameters or a module'
' containing attributes to use as hyperparameters.')
flags.DEFINE_integer(
'num_cores', default=8, help='Number of TPU cores for training')
flags.DEFINE_bool('use_spatial_partition', False, 'Use spatial partition.')
flags.DEFINE_integer(
'num_cores_per_replica',
default=2,
help='Number of TPU cores per replica when using spatial partition.')
flags.DEFINE_multi_integer(
'input_partition_dims', [1, 2, 1, 1],
'A list that describes the partition dims for all the tensors.')
flags.DEFINE_integer('train_batch_size', 64, 'global training batch size')
flags.DEFINE_integer('eval_batch_size', 1, 'global evaluation batch size')
flags.DEFINE_integer('eval_samples', 5000, 'Number of samples for eval.')
flags.DEFINE_integer('iterations_per_loop', 100,
'Number of iterations per TPU training loop')
flags.DEFINE_integer('save_checkpoints_steps', 100,
'Number of iterations per checkpoint save')
flags.DEFINE_string(
'train_file_pattern', None,
'Glob for training data files (e.g., COCO train - minival set)')
flags.DEFINE_string('val_file_pattern', None,
'Glob for evaluation tfrecords (e.g., COCO val2017 set)')
flags.DEFINE_string(
'val_json_file', None,
'COCO validation JSON containing golden bounding boxes. If None, use the '
'ground truth from the dataloader. Ignored if testdev_dir is not None.')
flags.DEFINE_string('testdev_dir', None,
'COCO testdev dir. If not None, ignorer val_json_file.')
flags.DEFINE_integer('num_examples_per_epoch', 120000,
'Number of examples in one epoch')
flags.DEFINE_integer('num_epochs', None, 'Number of epochs for training')
flags.DEFINE_enum('mode', 'train', ['train', 'eval', 'train_and_eval'],
'Mode to run: train, eval or train_and_eval')
flags.DEFINE_string('model_name', 'efficientdet-d1', 'Model name.')
flags.DEFINE_bool('eval_after_train', False, 'Run one eval after the '
'training finishes.')
flags.DEFINE_bool('profile', False, 'Profile training performance.')
flags.DEFINE_integer(
'tf_random_seed', None, 'Sets the TF graph seed for deterministic execution'
' across runs (for debugging).')
# For Eval mode
flags.DEFINE_integer('min_eval_interval', 180,
'Minimum seconds between evaluations.')
flags.DEFINE_integer(
'eval_timeout', None,
'Maximum seconds between checkpoints before evaluation terminates.')
# for train_and_eval mode
flags.DEFINE_bool(
'run_epoch_in_child_process', False,
'This option helps to rectify CPU memory leak. If True, every epoch is '
'run in a separate process for train and eval and memory will be cleared.'
'Drawback: need to kill 2 processes if trainining needs to be interrupted.')
FLAGS = flags.FLAGS
def main(_):
if FLAGS.strategy == 'tpu':
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
tpu_grpc_url = tpu_cluster_resolver.get_master()
tf.Session.reset(tpu_grpc_url)
else:
tpu_cluster_resolver = None
# Check data path
if FLAGS.mode in ('train', 'train_and_eval'):
if FLAGS.train_file_pattern is None:
raise RuntimeError('Must specify --train_file_pattern for train.')
if FLAGS.mode in ('eval', 'train_and_eval'):
if FLAGS.val_file_pattern is None:
raise RuntimeError('Must specify --val_file_pattern for eval.')
# Parse and override hparams
config = hparams_config.get_detection_config(FLAGS.model_name)
config.override(FLAGS.hparams)
if FLAGS.num_epochs: # NOTE: remove this flag after updating all docs.
config.num_epochs = FLAGS.num_epochs
# Parse image size in case it is in string format.
config.image_size = utils.parse_image_size(config.image_size)
# The following is for spatial partitioning. `features` has one tensor while
# `labels` had 4 + (`max_level` - `min_level` + 1) * 2 tensors. The input
# partition is performed on `features` and all partitionable tensors of
# `labels`, see the partition logic below.
# In the TPUEstimator context, the meaning of `shard` and `replica` is the
# same; follwing the API, here has mixed use of both.
if FLAGS.use_spatial_partition:
# Checks input_partition_dims agrees with num_cores_per_replica.
if FLAGS.num_cores_per_replica != np.prod(FLAGS.input_partition_dims):
raise RuntimeError('--num_cores_per_replica must be a product of array'
'elements in --input_partition_dims.')
labels_partition_dims = {
'mean_num_positives': None,
'source_ids': None,
'groundtruth_data': None,
'image_scales': None,
'image_masks': None,
}
# The Input Partition Logic: We partition only the partition-able tensors.
feat_sizes = utils.get_feat_sizes(
config.get('image_size'), config.get('max_level'))
for level in range(config.get('min_level'), config.get('max_level') + 1):
def _can_partition(spatial_dim):
partitionable_index = np.where(
spatial_dim % np.array(FLAGS.input_partition_dims) == 0)
return len(partitionable_index[0]) == len(FLAGS.input_partition_dims)
spatial_dim = feat_sizes[level]
if _can_partition(spatial_dim['height']) and _can_partition(
spatial_dim['width']):
labels_partition_dims['box_targets_%d' %
level] = FLAGS.input_partition_dims
labels_partition_dims['cls_targets_%d' %
level] = FLAGS.input_partition_dims
else:
labels_partition_dims['box_targets_%d' % level] = None
labels_partition_dims['cls_targets_%d' % level] = None
num_cores_per_replica = FLAGS.num_cores_per_replica
input_partition_dims = [FLAGS.input_partition_dims, labels_partition_dims]
num_shards = FLAGS.num_cores // num_cores_per_replica
else:
num_cores_per_replica = None
input_partition_dims = None
num_shards = FLAGS.num_cores
params = dict(
config.as_dict(),
model_name=FLAGS.model_name,
iterations_per_loop=FLAGS.iterations_per_loop,
model_dir=FLAGS.model_dir,
num_shards=num_shards,
num_examples_per_epoch=FLAGS.num_examples_per_epoch,
strategy=FLAGS.strategy,
backbone_ckpt=FLAGS.backbone_ckpt,
ckpt=FLAGS.ckpt,
val_json_file=FLAGS.val_json_file,
testdev_dir=FLAGS.testdev_dir,
profile=FLAGS.profile,
mode=FLAGS.mode)
config_proto = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False)
if FLAGS.strategy != 'tpu':
if FLAGS.use_xla:
config_proto.graph_options.optimizer_options.global_jit_level = (
tf.OptimizerOptions.ON_1)
config_proto.gpu_options.allow_growth = True
model_dir = FLAGS.model_dir
model_fn_instance = det_model_fn.get_model_fn(FLAGS.model_name)
max_instances_per_image = config.max_instances_per_image
if FLAGS.eval_samples:
eval_steps = int((FLAGS.eval_samples + FLAGS.eval_batch_size - 1) //
FLAGS.eval_batch_size)
else:
eval_steps = None
total_examples = int(config.num_epochs * FLAGS.num_examples_per_epoch)
train_steps = total_examples // FLAGS.train_batch_size
logging.info(params)
if not tf.io.gfile.exists(model_dir):
tf.io.gfile.makedirs(model_dir)
config_file = os.path.join(model_dir, 'config.yaml')
if not tf.io.gfile.exists(config_file):
tf.io.gfile.GFile(config_file, 'w').write(str(config))
train_input_fn = dataloader.InputReader(
FLAGS.train_file_pattern,
is_training=True,
use_fake_data=FLAGS.use_fake_data,
max_instances_per_image=max_instances_per_image)
eval_input_fn = dataloader.InputReader(
FLAGS.val_file_pattern,
is_training=False,
use_fake_data=FLAGS.use_fake_data,
max_instances_per_image=max_instances_per_image)
if FLAGS.strategy == 'tpu':
tpu_config = tf.estimator.tpu.TPUConfig(
FLAGS.iterations_per_loop if FLAGS.strategy == 'tpu' else 1,
num_cores_per_replica=num_cores_per_replica,
input_partition_dims=input_partition_dims,
per_host_input_for_training=tf.estimator.tpu.InputPipelineConfig
.PER_HOST_V2)
run_config = tf.estimator.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=model_dir,
log_step_count_steps=FLAGS.iterations_per_loop,
session_config=config_proto,
tpu_config=tpu_config,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tf_random_seed=FLAGS.tf_random_seed,
)
# TPUEstimator can do both train and eval.
train_est = tf.estimator.tpu.TPUEstimator(
model_fn=model_fn_instance,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
config=run_config,
params=params)
eval_est = train_est
else:
strategy = None
if FLAGS.strategy == 'gpus':
strategy = tf.distribute.MirroredStrategy()
run_config = tf.estimator.RunConfig(
model_dir=model_dir,
train_distribute=strategy,
log_step_count_steps=FLAGS.iterations_per_loop,
session_config=config_proto,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tf_random_seed=FLAGS.tf_random_seed,
)
def get_estimator(global_batch_size):
params['num_shards'] = getattr(strategy, 'num_replicas_in_sync', 1)
params['batch_size'] = global_batch_size // params['num_shards']
return tf.estimator.Estimator(
model_fn=model_fn_instance, config=run_config, params=params)
# train and eval need different estimator due to different batch size.
train_est = get_estimator(FLAGS.train_batch_size)
eval_est = get_estimator(FLAGS.eval_batch_size)
# start train/eval flow.
if FLAGS.mode == 'train':
train_est.train(input_fn=train_input_fn, max_steps=train_steps)
if FLAGS.eval_after_train:
eval_est.evaluate(input_fn=eval_input_fn, steps=eval_steps)
elif FLAGS.mode == 'eval':
# Run evaluation when there's a new checkpoint
for ckpt in tf.train.checkpoints_iterator(
FLAGS.model_dir,
min_interval_secs=FLAGS.min_eval_interval,
timeout=FLAGS.eval_timeout):
logging.info('Starting to evaluate.')
try:
eval_results = eval_est.evaluate(eval_input_fn, steps=eval_steps)
# Terminate eval job when final checkpoint is reached.
try:
current_step = int(os.path.basename(ckpt).split('-')[1])
except IndexError:
logging.info('%s has no global step info: stop!', ckpt)
break
utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)
if current_step >= train_steps:
logging.info('Eval finished step %d/%d', current_step, train_steps)
break
except tf.errors.NotFoundError:
# Checkpoint might be not already deleted by the time eval finished.
# We simply skip ssuch case.
logging.info('Checkpoint %s no longer exists, skipping.', ckpt)
elif FLAGS.mode == 'train_and_eval':
ckpt = tf.train.latest_checkpoint(FLAGS.model_dir)
try:
step = int(os.path.basename(ckpt).split('-')[1])
current_epoch = (
step * FLAGS.train_batch_size // FLAGS.num_examples_per_epoch)
logging.info('found ckpt at step %d (epoch %d)', step, current_epoch)
except (IndexError, TypeError):
logging.info('Folder %s has no ckpt with valid step.', FLAGS.model_dir)
current_epoch = 0
def run_train_and_eval(e):
print('\n =====> Starting training, epoch: %d.' % e)
train_est.train(
input_fn=train_input_fn,
max_steps=e * FLAGS.num_examples_per_epoch // FLAGS.train_batch_size)
print('\n =====> Starting evaluation, epoch: %d.' % e)
eval_results = eval_est.evaluate(input_fn=eval_input_fn, steps=eval_steps)
ckpt = tf.train.latest_checkpoint(FLAGS.model_dir)
utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)
epochs_per_cycle = 1 # higher number has less graph construction overhead.
for e in range(current_epoch + 1, config.num_epochs + 1, epochs_per_cycle):
if FLAGS.run_epoch_in_child_process:
p = multiprocessing.Process(target=run_train_and_eval, args=(e,))
p.start()
p.join()
if p.exitcode != 0:
return p.exitcode
else:
tf.reset_default_graph()
run_train_and_eval(e)
else:
logging.info('Invalid mode: %s', FLAGS.mode)
if __name__ == '__main__':
app.run(main)
|
cluster.py
|
# Copyright (c) 2021 MIT
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import time
import signal
import sys, os
import subprocess
import json
import xmlrpc.server
import xmlrpc.client
import re
import threading
from os.path import expanduser
from argparse import ArgumentParser, REMAINDER
from typing import Optional, IO, List, Any
from jobDescription import TrainingJob
import grpc
import runtime_pb2
import runtime_pb2_grpc
# import examples.vgg as vgg # TODO: this is used for debugging. Remove this later.
extra_args = [] # unparsed arguments stored here are forwarded to runtimes
HAS_EXCEPTION = False
def excepthook(args):
global HAS_EXCEPTION
print("In excepthook", args)
HAS_EXCEPTION = True
threading.excepthook = excepthook
def waitthreads(threadList):
for thread in threadList:
while thread.is_alive() and not HAS_EXCEPTION:
time.sleep(0.1)
if HAS_EXCEPTION:
sys.exit(-1)
thread.join()
def discover_gpu_numa():
from subprocess import check_output
gpus = check_output("nvidia-smi -x -q | grep \"gpu id\"", shell=True).decode("utf-8").splitlines()
try:
has_numactl = os.system("numactl ls > /dev/null 2>&1") == 0
except:
has_numactl = False
if not has_numactl:
return [-1] * len(gpus)
nodes = []
for g in gpus:
gid = g.split("\"")[1][4:].lower()
node = check_output(f"cat /sys/bus/pci/devices/{gid}/numa_node", shell=True).decode("utf-8").strip()
nodes.append(int(node))
return nodes
class CppRuntimeProxy:
def __init__(self, addressWithPort: str):
self.channel = grpc.insecure_channel(addressWithPort) # ex) 'localhost:50051'
self.stub = runtime_pb2_grpc.RuntimeStub(self.channel)
def scheduleTraining(self, name, jobInJson, dataDir, tensorTagsInJson, jobRankToGlobalRankInJson, jobParamsInJson):
response = self.stub.ScheduleTraining(runtime_pb2.ScheduleTrainingRequest(
name=name, job_in_json=jobInJson, data_dir=dataDir,
tensor_tags_in_json=tensorTagsInJson,
job_rank_to_global_rank_in_json=jobRankToGlobalRankInJson, job_meta_params_in_json=jobParamsInJson))
print("received: " + response.message)
def poke(self):
response = self.stub.Poke(runtime_pb2.Empty())
# print("received: " + response.message)
def shutdown(self):
response = self.stub.Shutdown(runtime_pb2.Empty())
print("received: " + response.message)
def initCommBackend(self):
# response = self.stub.(runtime_pb2.Empty())
# print("received: " + response.message)
pass
# print("initCommBackend() not implemented")
def initCommNCCL(self, message, msgType, groupId, members):
response = self.stub.InitCommNCCL(runtime_pb2.InitCommNCCLMsg(
message=message, msg_type=msgType, group_id=groupId, members=members))
print("received: " + response.message)
return response.group_id;
def initCommGRPC(self, rankToIpMap):
rankToIpMapInJson = json.dumps(rankToIpMap)
print("In initCommGRPC, rankToIpMapInJson: " + rankToIpMapInJson)
response = self.stub.InitCommGRPC(runtime_pb2.InitCommGRPCRequest(
rank_to_ip_map_in_json = rankToIpMapInJson
))
print("received: " + response.message)
def initCommGroups(self, jobName, commGroupsInJson):
print("initCommGroups not implemented")
class Location:
def __init__(self, address: str, port: int, device: int, userId: str, sshKeyPath: str, isCpp: bool):
self.address = address
self.port = port
self.device = device
self.userId = userId
self.sshKeyPath = sshKeyPath
self.serverId = None
self.proxy = None
self.isCpp = isCpp
self.is_local = address == "127.0.0.1"
self.process = None
self.numa_node = -1
def getProxy(self, maxRetry = 180):
if self.proxy != None:
# print("getProxy() returned from cached proxy value.")
return self.proxy
# Python runtime
retryGap = 1
retryCount = 0
while retryCount < maxRetry and not HAS_EXCEPTION:
try:
if self.isCpp: # CPP runtime
self.proxy = CppRuntimeProxy("%s:%d"%(self.address, self.port))
# print("cppProxy created for %s:%d"%(self.address, self.port))
else:
self.proxy = xmlrpc.client.ServerProxy("http://%s:%d/"%(self.address, self.port))
self.proxy.poke()
return self.proxy
except (ConnectionRefusedError, grpc.RpcError): # ConnectionRefusedError is for xmlrpc.
print("Cannot connect to %s:%d. Will retry in %d sec." %
(self.address, self.port, retryGap))
time.sleep(retryGap)
# retryGap += 2 # exponential back off.
retryCount += 1
assert False, "couldn't connect"
return None
def downloadFile(self, remotePath: str, localPath: str):
assert not self.is_local
print(" Downloading %s to %s at %s" % (remotePath, localPath, self.address))
kwargs = dict()
kwargs['stderr'] = subprocess.STDOUT
# sh_command = ['mkdir', '-p', localPath]
# subprocess.check_call(sh_command, **kwargs)
sh_command = ['scp', '-i', self.sshKeyPath, '%s@%s:%s' % (self.userId, self.address, remotePath), localPath]
subprocess.check_call(sh_command, **kwargs)
def uploadFile(self, localFilePath, remotePath):
assert not self.is_local
print(" Uploading %s to %s at %s" % (localFilePath, remotePath, self.address))
kwargs = dict()
# kwargs['shell'] = True
kwargs['stderr'] = subprocess.STDOUT
sh_command = ['scp', '-i', self.sshKeyPath, localFilePath, '%s@%s:%s' % (self.userId, self.address, remotePath)]
subprocess.check_call(sh_command, **kwargs)
def rsh(self, command):
kwargs = dict()
kwargs['stderr'] = subprocess.STDOUT
# sh_command = ['ssh', '-v', '-i', '~/.ssh/ulma-sjp.pem', 'ubuntu@%s' % self, '%s' % command]
if self.is_local:
sh_command = command
kwargs["shell"] = True
else:
sh_command = ['ssh', '-i', self.sshKeyPath, '-o', 'StrictHostKeyChecking=no', '%s@%s' % (self.userId, self.address), '%s' % command]
try:
subprocess.check_call(sh_command, **kwargs)
except subprocess.CalledProcessError as e:
output = e.output
exit(1)
return
def __monitor(self):
self.process.wait()
sys.exit(0)
def rshAsync(self, command, **kwargs):
print("Sending cmd: %s" % command)
if self.is_local:
sh_command = command
kwargs["shell"] = True
else:
sh_command = ['ssh', '-i', self.sshKeyPath, '-o StrictHostKeyChecking=no', '%s@%s' % (self.userId, self.address),
'%s' % command]
self.process = subprocess.Popen(sh_command, **kwargs)
t = threading.Thread(target=Location.__monitor, args=(self,), daemon=True)
t.start()
return self.process
def upSync(self, localPath, remotePath):
if self.is_local:
assert False
return
try:
subprocess.check_call(['rsync', '-e', 'ssh -i %s -o StrictHostKeyChecking=no' % self.sshKeyPath,
'-rh', "--exclude=*__pycache__", localPath, "%s@%s:%s" % (self.userId, self.address, remotePath)],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
output = e.output
exit(1)
class ClusterCoordinator(xmlrpc.server.SimpleXMLRPCServer):
""" GPU cluster coordinator. It accepts training jobs from clients and schedule them to runtimes. """
def __init__(self, addrToBind: str, portToBind: int, locations: List[Location], workDir: str, be_batch_size: int):
super(ClusterCoordinator, self).__init__((addrToBind, portToBind))
self.myAddr = addrToBind
self.myPort = portToBind
self.locations = locations
self.workDir = workDir
self.processes = [] # from subprocess calls used for launching runtime.
self.nextTagStartOffset = 1
self.be_batch_size = be_batch_size
self.commGroups = set()
self.ongoingJobs = {} # Dict of contexts of ongoing jobs. Indexed by job name.
f = open("runtimeResult.data", "w")
f.close()
def _dispatch(self, method, params):
""" Custom dispatcher for XML-RPC server. """
try:
# We are forcing the 'export_' prefix on methods that are
# callable through XML-RPC for security.
func = getattr(self, 'export_' + method)
except AttributeError:
raise Exception('method "%s" is not supported' % method)
else:
return func(*params)
######################################################
## RPC handlers
######################################################
def export_poke(self):
return 'Returned from poke at %s' % self.myAddr
def export_scheduleTraining(self, jobName: str, trainingJobInJSON: str, runbe):
job = TrainingJob("test", None, None, 0, 0, "")
job.loadJSON(trainingJobInJSON)
print("received job")
gpusUsed = job.getGpusUsed()
moduleDescList = [job.dumpSingleRunnableModule(rank) for rank in range(gpusUsed)]
tensorTags = self.buildCommTensorTags(moduleDescList)
tensorTagsInJson = json.dumps(tensorTags)
for rank in range(gpusUsed):
with open(f"/tmp/rank{rank}.json", "wb") as f:
f.write(bytes(moduleDescList[rank].encode("utf-8")))
commSets = self.buildNeededCommGroups(moduleDescList)
for s in commSets:
self.initCommBackendAll("nccl", s)
jobRankToGlobalRank = list(range(gpusUsed))
jobRankToGlobalRankInJson = json.dumps(jobRankToGlobalRank)
# TODO: should pick locations that doesn't have other priority job scheduled.
if len(self.locations) < gpusUsed:
return "Not enough servers available. %d gpus available while %d needed" % (len(self.locations), gpusUsed)
jobParams = {
"run_with_be": runbe,
"nr_gpus": gpusUsed,
"cifar_training": "cifar" in jobName,
"lossfn": "CrossEntropyLoss" if "gpt2" in jobName else "NLL",
"autocast": True,
}
jobParamsInJson = json.dumps(jobParams)
threadList = []
def requestScheduleTraining(proxy, jobInJson):
proxy.scheduleTraining(jobName, jobInJson, "SYNTHETIC", tensorTagsInJson, jobRankToGlobalRankInJson, jobParamsInJson)
for rank in range(gpusUsed):
location = self.locations[rank]
moduleDesc = moduleDescList[rank]
thread = threading.Thread(name='reqScheTrain%d'%rank, target=requestScheduleTraining, args=(location.getProxy(), moduleDesc))
threadList.append(thread)
thread.start()
waitthreads(threadList)
self.ongoingJobs[jobName] = {"iterTime": 0, "gpuMsec": 0, "gpusUsed": gpusUsed, "gpusFinished": 0, "globalBatchSize": job.globalBatchSize}
self.ongoingJobs[jobName].update({"beImagesPerIter": 0.0, "idleMsPerIter": 0.0})
# for rank in range(gpusUsed):
# location = self.locations[rank]
# moduleDesc = moduleDescList[rank] # job.dumpSingleRunnableModule(rank)
# print(location.getProxy().scheduleTraining(jobName, moduleDesc, "SYNTHETIC", tensorTagsInJson, jobRankToGlobalRankInJson))
return 'done'
def export_notifyTrainingFinished(self, runtimeAddress: str, name: str, beImagesPerIter: float, idleMsPerIter: float, remainingJobCount: int, fpTime: float, bpTime: float, iterTime: float):
print("Training for %s is completed at %s. (%d jobs are remaining) fp: %3.1f bp: %3.1f iterTime: %3.1f" % (name, runtimeAddress, remainingJobCount, fpTime, bpTime, iterTime))
iterTime /= 1000
self.ongoingJobs[name]["iterTime"] = max(self.ongoingJobs[name]["iterTime"], iterTime)
self.ongoingJobs[name]["gpuMsec"] += (fpTime + bpTime) / 1000
self.ongoingJobs[name]["gpusFinished"] += 1
self.ongoingJobs[name]["beImagesPerIter"] += beImagesPerIter
self.ongoingJobs[name]["idleMsPerIter"] += idleMsPerIter
if self.ongoingJobs[name]["gpusFinished"] == self.ongoingJobs[name]["gpusUsed"]:
toprints = [
"{globalBatchSize:2}", "{gpusUsed:2}", "{iterTime:4.1f}",
"{gpuMsec:4.1f}", "{beImagesPerIter:3.1f}",
"{idleMsPerIter:3.1f}"
]
print("Training for {} is completed entirely.".format(name))
cols = ["GlobalBatchSize", "GpusUsed", "IterTime", "GpuMsec", "BeImagesPerIter", "IdleMsPerIter"]
print(" " + " ".join(cols))
dataline = " " + " ".join(toprints).format(**self.ongoingJobs[name])
print(dataline)
f = open("runtimeResult.data", "a")
f.write(dataline + "\n")
f.close()
return 'done'
def export_addGpuNode(self):
print("NOT YET IMPLEMENTED.")
######################################################
## Internal helper methods
######################################################
def buildCommTensorTags(self, moduleDescList):
# TODO: need tag allocator that can recycle tags.
tag = 0
tensorTags = {}
for moduleDesc in moduleDescList:
spec = json.loads(moduleDesc)
for ldsc in spec["layers"]:
if "xfers" in ldsc: # either sender or receiver need to assign tag.
for item in ldsc["xfers"]:
tensorTags[item["name"]] = tag
tag += item["prop"]["xferSamples"]
tensorTags[item["name"] + "_back"] = tag
tag += item["prop"]["xferSamples"]
return tensorTags
def buildNeededCommGroups(self, moduleDescList):
groups = set()
desc = json.loads(moduleDescList[0])
for l in desc['layers']:
activeset = tuple(sorted(l['gpuAssignment']))
if len(activeset) > 1:
groups.add(activeset)
return list(groups)
######################################################
## Runtime cluster management
######################################################
def installPackages(self):
""" Install required software at each runtime server """
pipPackages = ["torch", "jsonpickle", "torchvision"]
# "pip install torch==1.8.0+cu111 torchvision==0.9.0+cu111 torchaudio==0.8.0 -f https://download.pytorch.org/whl/torch_stable.html"]
for location in self.locations:
for pipPackage in pipPackages:
location.rsh("pip install %s" % pipPackage)
def launchRuntimeAll(self, c10dBackend: str, profile: bool, cppRuntime: bool, manualLaunch: bool):
""" Launch runtime at all remote locations. Also registers the sighandler
that cleanly shuts down all remote runtime servers.
"""
# Using the absolute path for compatibility with C++ runtime.
logdir = args.logdir
if not logdir:
logdir = os.getcwd() + "/logs/"
upSyncedAddrs = set()
for i, location in enumerate(self.locations):
if (location.address not in upSyncedAddrs):
# TODO: skip if location's addr is same as the current node.
# location.upSync(".", self.workDir)
upSyncedAddrs.add(location.address)
# pass master ip and port.
stdoutFp = open(f"{logdir}/runtime%d.out"%i, "a", buffering=1)
stderrFp = open(f"{logdir}/runtime%d.err"%i, "a", buffering=1)
nsysPrefix = ""
if "--cuda_profile" in extra_args:# and location.device == 0: # Only run 1 nsys per host.
nsysPrefix = "nsys profile -f true -o net%d -c cudaProfilerApi -t cuda,nvtx --export sqlite " % i # -s none
if manualLaunch:
print("Skipping ssh launching runtime. Must have launched them manually.")
elif cppRuntime:
if location.numa_node >= 0:
numacmd = "numactl -N{nn} -m{nn}".format(nn=location.numa_node)
else:
numacmd = ""
self.processes.append(location.rshAsync(
f"CUDA_VISIBLE_DEVICES={location.device} {numacmd} {nsysPrefix} {self.workDir}/csrc/build/runtime" + \
" --myAddr %s:%d --device 0 --c10dBackend %s --rank %d --worldSize %d --logdir %s --be_batch_size %d %s" % \
(location.address, location.port, c10dBackend, i, len(self.locations), logdir, self.be_batch_size, " ".join(extra_args)) #+ \
, stdout=stdoutFp, stderr=stderrFp))
else:
self.processes.append(location.rshAsync(
# nsysPrefix + "python3 " + self.workDir + "runtime.py" + \
"source ~/.profile; " + nsysPrefix + "python3 " + self.workDir + "runtime.py" + \
" --coordinatorAddr %s:%d --myAddr %s:%d --device %d --c10dBackend %s --rank %d --worldSize %d --be_batch_size %d %s" % \
(self.myAddr, self.myPort, location.address, location.port, location.device, c10dBackend, i, len(self.locations), self.be_batch_size, "--profile" if profile else "") #+ \
, stdout=stdoutFp, stderr=stderrFp))
sig_names = {2: "SIGINT", 15: "SIGTERM"}
last_return_code = None
def sigkill_handler(signum, frame):
print("signum:%d Trying to shutdown all runtime." % signum)
self.shutdownRuntimeAll()
# self.waitForRuntimeAll()
for process in self.processes:
print(f"Killing subprocess {process.pid}")
try:
process.terminate()
# process.kill()
except Exception:
pass
if last_return_code is not None:
raise subprocess.CalledProcessError(returncode=last_return_code, cmd=cmd)
if signum in sig_names:
print(f"Main process received {sig_names[signum]}, exiting")
sys.exit(1)
signal.signal(signal.SIGINT, sigkill_handler)
# signal.signal(signal.SIGTERM, sigkill_handler)
time.sleep(2) ## + (15 if profile else 0))
for location in self.locations:
proxy = location.getProxy()
proxy.poke()
def shutdownRuntimeAll(self):
""" Ask all remote runtime servers to stop. Returns after all servers ack the shutdown request. """
for location in self.locations:
try:
proxy = location.getProxy(maxRetry=1)
if proxy != None:
print(proxy.shutdown())
# print(location.getProxy(maxRetry=1).shutdown())
except xmlrpc.client.Fault:
print("pipe broken while shuting down %s" % location.address)
except grpc.RpcError:
print("GRPC error while shuting down %s" % location.address)
def initCommBackendAll(self, c10dBackend, commGroupSet):
assert(sorted(commGroupSet) == list(commGroupSet))
if tuple(commGroupSet) in self.commGroups:
return
self.commGroups.add(tuple(commGroupSet))
if c10dBackend == "nccl":
group_id = self.locations[commGroupSet[0]].getProxy().initCommNCCL("Generate comm group ID", 0, bytes(128), list(commGroupSet))
threadList = []
def requestInitCommBackend(proxy):
# print(proxy.initCommBackend())
if c10dBackend == "grpc":
print(proxy.initCommGRPC(rankToIpMap))
if c10dBackend == "nccl":
proxy.initCommNCCL("Join comm group", 1, group_id, list(commGroupSet))
for i in commGroupSet:
location = self.locations[i]
thread = threading.Thread(name='init_comm%d'%i, target=requestInitCommBackend, args=(location.getProxy(),))
thread.start()
threadList.append(thread)
waitthreads(threadList)
def initCommGroupsAll(self, jobName: str, commGrpDict: dict, jobRankToGlobalRank: list):
""" A helper function that will ask all runtimes to create new c10d comm groups.
Used while scheduling a new training job. This method should be invoked before
scheduling a new training job to any runtime that will participate in training.
"""
commGrpDictWithGlobalRanks = {}
for grpName in commGrpDict:
grpRanks = commGrpDict[grpName]
globalGrpRanks = [jobRankToGlobalRank[rank] for rank in grpRanks]
commGrpDictWithGlobalRanks[grpName] = globalGrpRanks
commGrpDictWithGlobalRanksInJson = json.dumps(commGrpDictWithGlobalRanks)
threadList = []
def requestInitCommGroups(proxy, jobName, commGroupsInJson):
# print(proxy.initCommGroups(jobName, commGroupsInJson))
proxy.initCommGroups(jobName, commGroupsInJson)
for i, location in enumerate(self.locations):
thread = threading.Thread(name='init_commGroups%d'%i, target=requestInitCommGroups,
args=(location.getProxy(), jobName, commGrpDictWithGlobalRanksInJson,))
thread.start()
threadList.append(thread)
waitthreads(threadList)
def waitForRuntimeAll(self):
""" Waits until all runtime processes terminate. Development use only. """
# TODO: replace this method with xmlrpc server event loop.
print("Waiting for ssh process to terminate.")
for p in self.processes:
p.wait()
####################################################################################
## Initial launch scripts
####################################################################################
def parse_args():
"""
Helper function parsing the command line options
@retval ArgumentParser
"""
parser = ArgumentParser(description="ClusterCoordinator initial launch "
"script that will spawn up "
"multiple distributed processes")
# Optional arguments for the launch helper
parser.add_argument("--addrToBind", type=str, default="localhost:12340",
help="IP:port to listen for requests to the cluster coordinator")
parser.add_argument("--c10dBackend", type=str, default="nccl",
help="pytorch c10d communication backend. Type either nccl or gloo")
parser.add_argument("--logLevel", type=int, default=1,
help="Logging level. 0: verbose, 1: Info, 2: Error") # NOT YET IMPLEMENTED.
parser.add_argument("--pathToConfig", type=str, default="clusterConfig.json",
help="The full path to the cluster configuration files")
parser.add_argument('--install', default=False, action='store_true',
help="When this option is set, it will install required pip packages to all servers")
parser.add_argument('--profile', default=False, action='store_true',
help="To launch runtimes with night system profiling.")
parser.add_argument("--be_batch_size", type=int, default=0,
help="launch runtimes with be beatch size")
parser.add_argument('--cpp', default=False, action='store_true',
help="To launch CPP version runtimes.")
parser.add_argument('--manualLaunch', default=False, action='store_true',
help="Do not runtimes automatically. Primarily for using gdb on runtime processes.")
parser.add_argument("--logdir", type=str, default="", help="Full path of log directory")
# For installing nsys.. (with other cuda toolkit..)
# wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/cuda-ubuntu1804.pin
# sudo mv cuda-ubuntu1804.pin /etc/apt/preferences.d/cuda-repository-pin-600
# sudo apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/7fa2af80.pub
# sudo add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/ /"
# sudo apt-get update
# sudo apt-get -y install cuda
return parser.parse_known_args()
def main():
global args, extra_args
args, extra_args = parse_args()
# clusterConfig = json.load(open(args.pathToConfig))
global rankToIpMap
rankToIpMap = {}
commGrpRanksWorld = []
locations = []
# for serverConfig in clusterConfig["serverList"]:
# print("Found %s" % str(serverConfig))
port = 11250
gpus = discover_gpu_numa()
for i, node in enumerate(gpus):
rankToIpMap[str(len(locations))] = f"127.0.0.1:{port}"
commGrpRanksWorld.append(len(locations))
loc = Location("127.0.0.1", port, i, None, None, args.cpp)
loc.numa_node = node
locations.append(loc)
port += 1
addrToBindCombo = re.split('[-:]', args.addrToBind)
addrToBind = addrToBindCombo[0]
portToBind = int(addrToBindCombo[1])
coordinator = ClusterCoordinator(addrToBind, portToBind, locations, os.getcwd(), args.be_batch_size)
if args.install:
coordinator.installPackages()
# Just make sure there's no previously left runtimes.
# CPP runtimes seem to terminate appropriately. So, there's no need to shutdown leftovers.
if not args.cpp:
print("Cleaning up potentially leftover runtime servers from previous experiment.")
coordinator.shutdownRuntimeAll()
time.sleep(10)
coordinator.launchRuntimeAll(args.c10dBackend, profile=args.profile, cppRuntime=args.cpp, manualLaunch=args.manualLaunch)
print("All runtime nodes are up and running. Now, initializing communication backend..")
coordinator.initCommBackendAll(args.c10dBackend, commGrpRanksWorld)
print("Communication backends are ready at all locations.")
print("Now, cluster is ready to accept training jobs.")
sys.stdout.flush()
coordinator.timeout = 1
while not HAS_EXCEPTION:
coordinator.handle_request()
time.sleep(5)
if __name__ == "__main__":
main()
|
httpread.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests, random
import moment
import multiprocessing
id_list = []
for i in range(0,1000):
id_list.append(str(i)+"tao")
post_url = "http://192.168.99.100:4000/user/"
numprocess = 100
# r = requests.get(post_url+"10000tao")
# print r.text
def sendreadrequest():
st = moment.now().epoch()
for i in range(0,100):
j = random.randint(0,999)
#r = requests.put(post_url+para_list[i]["_id"],para_list[i])
r = requests.get(post_url+id_list[j])
#print r.text
#print r.status_code
if(r.status_code != 200 or "status" in r.json()):
print i
print "read failed"
break
runt = moment.now().epoch() - st
print runt
##########################################################
plist = []
for i in range (0,numprocess):
p = multiprocessing.Process(target = sendreadrequest)
plist.append(p)
for i in range (0,numprocess):
plist[i].start()
# sendrequest()
|
test_mp.py
|
from multiprocessing import Process, Queue
from time import sleep
def f(q):
q.put([42, None, 'hello'])
sleep(3)
q.put([111])
if __name__ == '__main__':
q = Queue()
p = Process(target=f, args=(q,))
p.start()
print(q.get())
print(q.get()) # prints "[42, None, 'hello']"
p.join()
|
signals.py
|
from django.db.models.signals import post_save
from News.models import News, NewsTag, Columns, Author, Article
from django.conf import settings
from task.models import Task
from News.management.commands.scraper import run_news_scraper, get_news_links, \
run_articles_scraper, run_columns_scraper
from threading import Thread
import os
def handler_run_parser(sender, instance, **kwargs):
if kwargs.get('created'):
if instance.task == 'run_parser':
Thread(target=run_columns_scraper, args=(1, instance)).start()
Thread(target=run_articles_scraper, args=(1, instance)).start()
Thread(target=run_news_scraper, args=(1, instance)).start()
elif instance.task == 'get_articles':
Thread(target=run_articles_scraper, args=(1, instance)).start()
elif instance.task == 'get_fresh_links':
Thread(target=get_news_links, args=(1, instance)).start()
elif instance.task == 'get_columns':
Thread(target=run_columns_scraper, args=(1, instance)).start()
elif instance.task == 'get_news':
Thread(target=run_news_scraper, args=(1, instance)).start()
elif instance.task == 'count_images':
count_images = len(os.listdir(os.path.join(settings.BASE_DIR, 'media/images/')))
instance.status = f'Images = {count_images}'
instance.save()
elif instance.task == 'count__total_news':
news = News.objects.count()
instance.status = f'There are {news} news in database'
instance.save()
elif instance.task == 'count__articles':
count_items = Article.objects.count()
instance.status = f'There are {count_items} Articles'
instance.save()
elif instance.task == 'count__columns':
count_items = Columns.objects.count()
instance.status = f'There are {count_items} Columns'
instance.save()
elif instance.task == 'count__categories':
count_categories = NewsTag.objects.count()
instance.status = f'There are {count_categories} categories in your database'
instance.save()
elif instance.task == 'count__author':
authors = Author.objects.count()
instance.status = f'There are {authors} Authors in your database'
instance.save()
post_save.connect(handler_run_parser, sender=Task)
|
test_internal.py
|
import os
import random
import threading
from hks_pylib.logger.logger import Display
from hks_pylib.logger.standard import StdUsers
from hks_pynetwork.internal import LocalNode, ForwardNode
from hks_pylib.logger import StandardLoggerGenerator
from hks_pynetwork.external import STCPSocket
from hks_pylib.cryptography.ciphers.symmetrics import AES_CTR, AES_CBC
logger_generator = StandardLoggerGenerator("tests/test_internal.log")
KEY = os.urandom(32)
N_SAMPLE_DATA = random.randint(10, 20)
NODE1_SAMPLE_DATA_LIST = [os.urandom(random.randint(100, 200)) for _ in range(N_SAMPLE_DATA)]
NODE2_SAMPLE_DATA_LIST = [os.urandom(random.randint(100, 200)) for _ in range(N_SAMPLE_DATA)]
def node1():
node = LocalNode(
name="NODE1",
logger_generator=logger_generator,
display={StdUsers.USER: Display.ALL, StdUsers.DEV: Display.ALL}
)
err = None
for node1_data, node2_data in zip(NODE1_SAMPLE_DATA_LIST, NODE2_SAMPLE_DATA_LIST):
node.send("NODE2", node1_data)
source, data, obj = node.recv()
if source != "NODE2":
err = "ERROR NODE1 SOURCE NOT MATCH"
if data != node2_data:
err = "ERROR NODE1 DATA NOT MATCH"
node.close()
if err:
raise Exception(err)
def node2():
node = LocalNode(
name="NODE2",
logger_generator=logger_generator,
display={StdUsers.USER: Display.ALL, StdUsers.DEV: Display.ALL}
)
err = None
for node1_data, node2_data in zip(NODE1_SAMPLE_DATA_LIST, NODE2_SAMPLE_DATA_LIST):
source, data, obj = node.recv()
node.send("NODE1", node2_data)
if source != "NODE1":
err = "ERROR NODE2 SOURCE NOT MATCH"
if data != node1_data:
err = "ERROR NODE2 DATA NOT MATCH"
node.close()
if err:
raise Exception(err)
def test_local_node():
t1 = threading.Thread(target=node2)
t1.start()
t2 = threading.Thread(target=node1)
t2.start()
t1.join()
t2.join()
def server():
server = STCPSocket(
cipher=AES_CTR(KEY),
name="Server",
buffer_size=1024,
logger_generator=logger_generator,
display={StdUsers.USER: Display.ALL, StdUsers.DEV: Display.ALL}
)
server.bind(("127.0.0.1", 19999))
server.listen()
socket, addr = server.accept()
err = None
for node1_data, node2_data in zip(NODE1_SAMPLE_DATA_LIST, NODE2_SAMPLE_DATA_LIST):
data = socket.recv()
if data != node2_data:
err = "SERVER ERROR DATA NOT MATCH"
break
socket.send(node1_data)
socket.close()
if err:
raise Exception(err)
socket.close()
def client():
client = STCPSocket(
cipher=AES_CTR(KEY),
name="Client",
buffer_size=1024,
logger_generator=logger_generator,
display={StdUsers.USER: Display.ALL, StdUsers.DEV: Display.ALL}
)
client.connect(("127.0.0.1", 19999))
node = LocalNode(
"CLIENT",
logger_generator,
display={StdUsers.USER: Display.ALL, StdUsers.DEV: Display.ALL}
)
forwarder = ForwardNode(
node=node,
socket=client,
name="Forwarder of client",
implicated_die=True,
logger_generator=logger_generator,
display={StdUsers.USER: Display.ALL, StdUsers.DEV: Display.ALL}
)
threading.Thread(target=forwarder.start).start()
err = None
for node1_data, node2_data in zip(NODE1_SAMPLE_DATA_LIST, NODE2_SAMPLE_DATA_LIST):
node.send(forwarder.name, node2_data)
source, data, obj = node.recv()
if source != forwarder.name:
err = "ERROR CLIENT SOURCE NOT MATCH"
break
if data != node1_data:
err = "ERROR CLIENT DATA NOT MATCH"
break
node.close()
if err:
raise Exception(err)
def test_forwardnode():
t1 = threading.Thread(target=server)
t1.start()
t2 = threading.Thread(target=client)
t2.start()
t1.join()
t2.join()
|
quoteproviders.py
|
from collections import namedtuple
import requests, pickle
from abc import ABCMeta, abstractmethod
from bs4 import BeautifulSoup
from utilities import *
from random import randint
from threading import Thread
# Tuple object
Quote = namedtuple('Quote', ['text', 'author'])
class QuoteException(Exception):
def __init__(self, message):
super().__init__(self, message)
# Base abstract class
class QuoteProvider:
__metaclass__ = ABCMeta
def __init__(self, filename='quotes.txt', url=None):
self.url = url
self.filename = filename
self.quotes = list()
# Public API
def save(self, quote):
''' Saves a quote object in a pickle file '''
try:
with open(self.filename, 'ab') as quotefile:
pickle.dump(quote, quotefile)
return True
except Exception as err:
raise QuoteException("Could not save quote!\nErr: %s" % err)
return False
def exists(self, quote):
''' Checks if a quote object exists in a pickle file '''
try:
with open(self.filename, 'rb') as quotefile:
while True:
data = pickle.load(quotefile)
if quote == data:
return True
except:
pass
return False
def randomize(self):
''' Return a random quote from the list '''
if len(self.quotes) > 0:
number = randint(0, len(self.quotes) - 1)
return self.quotes[number]
@abstractmethod
def load(self):
''' Function that must be overwritten in sub-classes, it handles loading all the quotes into 'self.quotes' '''
return
# Private API
@abstractmethod
def __parse__(self, input):
''' Function that must be overwritten in sub-classes, it handles parsing the return output from 'self.html' '''
return
@abstractmethod
def __fetch__(self, url):
''' abstract method that handles fetching data and adding to 'self.quotes' '''
pass
def __request__(self, url):
''' Make a GET request on a specific uri and return all the response from said GET request. '''
url = url or self.url
if not url or not Utilities.validate_uri(url):
raise QuoteException("Url not valid!")
r = requests.get(url)
if r.status_code == 200:
return r.text
else:
raise QuoteException("%s could not return quotes!" % self.url)
def __html__(self, html):
''' Return a BeautifulSoup object from a given text string '''
if not html:
raise QuoteException("No html arg!")
try:
return BeautifulSoup(html)
except Exception as err:
raise QuoteException('Could not parse text into BeautifulSoup!')
# Subclass
class GoodreadQuote(QuoteProvider):
def __init__(self):
return super().__init__(url='')
def __parse__(self, input):
return
def load(self):
return
def __fetch__(self, url):
return
# Subclass
class BrainyQuote(QuoteProvider):
def __init__(self):
super().__init__(url='http://www.brainyquote.com/quotes/keywords/list%s.html')
# Overwritten
def __parse__(self, input):
try:
if not input:
raise QuoteException("Can't parse input!")
# find all divs with correct class
for div in [ x for x in input.find_all('div', attrs={'class': 'boxyPaddingBig'}) ]:
# get text and author
text, auth = [ y for y in div.text.split('\n') if y != '"' and y ]
yield (text, auth)
except Exception as err:
raise QuoteException("Can't parse input!\nErr: %s" % err)
def load(self):
''' Load all data in a multi threaded env '''
threads = []
for i in range(14): # 13 pages
url = self.url % ('_{0}'.format(i) if i > 0 else '')
t = Thread(target=self.__fetch__, args=(url,))
threads.append(t)
t.start()
for thread in threads:
thread.join()
def __fetch__(self, url):
''' Utilizes all methods to fetch the data from pre specfied configuration '''
# GET request for data
data = self.__request__(url)
# Change into HTML
html = self.__html__(data)
# Parse html and iterate
for data in self.__parse__(html):
text, auth = data
quote = Quote(text, auth)
self.quotes.append(quote)
|
camera.py
|
"""camera.py
This code implements the Camera class, which encapsulates code to
handle IP CAM, USB webcam or the Jetson onboard camera. In
addition, this Camera class is further extended to take a video
file or an image file as input.
"""
import logging
import threading
import numpy as np
import cv2
def add_camera_args(parser):
"""Add parser augument for camera options."""
parser.add_argument('--file', dest='use_file',
help='use a video file as input (remember to '
'also set --filename)',
action='store_true')
parser.add_argument('--image', dest='use_image',
help='use an image file as input (remember to '
'also set --filename)',
action='store_true')
parser.add_argument('--filename', dest='filename',
help='video file name, e.g. test.mp4',
default=None, type=str)
parser.add_argument('--rtsp', dest='use_rtsp',
help='use IP CAM (remember to also set --uri)',
action='store_true')
parser.add_argument('--uri', dest='rtsp_uri',
help='RTSP URI, e.g. rtsp://192.168.1.64:554',
default=None, type=str)
parser.add_argument('--latency', dest='rtsp_latency',
help='latency in ms for RTSP [200]',
default=200, type=int)
parser.add_argument('--usb', dest='use_usb',
help='use USB webcam (remember to also set --vid)',
action='store_true')
parser.add_argument('--vid', dest='video_dev',
help='device # of USB webcam (/dev/video?) [0]',
default=0, type=int)
parser.add_argument('--width', dest='image_width',
help='image width [640]',
default=640, type=int)
parser.add_argument('--height', dest='image_height',
help='image height [480]',
default=480, type=int)
return parser
def open_cam_rtsp(uri, width, height, latency):
"""Open an RTSP URI (IP CAM)."""
gst_str = ('rtspsrc location={} latency={} ! '
'rtph264depay ! h264parse ! omxh264dec ! '
'nvvidconv ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! videoconvert ! '
'appsink').format(uri, latency, width, height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_cam_usb(dev, width, height):
"""Open a USB webcam.
We want to set width and height here, otherwise we could just do:
return cv2.VideoCapture(dev)
"""
gst_str = ('v4l2src device=/dev/video{} ! '
'video/x-raw, width=(int){}, height=(int){} ! '
'videoconvert ! appsink').format(dev, width, height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_cam_onboard(width, height):
"""Open the Jetson onboard camera.
On versions of L4T prior to 28.1, you might need to add
'flip-method=2' into gst_str.
"""
gst_str = ('nvcamerasrc ! '
'video/x-raw(memory:NVMM), '
'width=(int)2592, height=(int)1458, '
'format=(string)I420, framerate=(fraction)30/1 ! '
'nvvidconv ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! videoconvert ! '
'appsink').format(width, height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def grab_img(cam):
"""This 'grab_img' function is designed to be run in the sub-thread.
Once started, this thread continues to grab a new image and put it
into the global 'img_handle', until 'thread_running' is set to False.
"""
while cam.thread_running:
_, cam.img_handle = cam.cap.read()
if cam.img_handle is None:
logging.warning('grab_img(): cap.read() returns None...')
break
cam.thread_running = False
class Camera():
"""Camera class which supports reading images from theses video sources:
1. Video file
2. Image (jpg, png, etc.) file, repeating indefinitely
3. RTSP (IP CAM)
4. USB webcam
5. Jetson onboard camera
"""
def __init__(self, args):
self.args = args
self.is_opened = False
self.use_thread = False
self.thread_running = False
self.img_handle = None
self.img_width = 0
self.img_height = 0
self.cap = None
self.thread = None
def open(self):
"""Open camera based on command line arguments."""
assert self.cap is None, 'Camera is already opened!'
args = self.args
if args.use_file:
self.cap = cv2.VideoCapture(6)
# ignore image width/height settings here
self.use_thread = False
elif args.use_image:
self.cap = 'OK'
self.img_handle = cv2.imread(args.filename)
# ignore image width/height settings here
if self.img_handle is not None:
self.is_opened = True
self.img_height, self.img_width, _ = self.img_handle.shape
self.use_thread = False
elif args.use_rtsp:
self.cap = open_cam_rtsp(
args.rtsp_uri,
args.image_width,
args.image_height,
args.rtsp_latency
)
self.use_thread = True
elif args.use_usb:
self.cap = open_cam_usb(
args.video_dev,
args.image_width,
args.image_height
)
self.use_thread = True
else: # by default, use the jetson onboard camera
self.cap = open_cam_onboard(
args.image_width,
args.image_height
)
self.use_thread = True
if self.cap != 'OK':
if self.cap.isOpened():
# Try to grab the 1st image and determine width and height
_, img = self.cap.read()
if img is not None:
self.img_height, self.img_width, _ = img.shape
self.is_opened = True
def start(self):
assert not self.thread_running
if self.use_thread:
self.thread_running = True
self.thread = threading.Thread(target=grab_img, args=(self,))
self.thread.start()
def stop(self):
self.thread_running = False
if self.use_thread:
self.thread.join()
def read(self):
if self.args.use_file:
_, img = self.cap.read()
if img is None:
#logging.warning('grab_img(): cap.read() returns None...')
# looping around
self.cap.release()
self.cap = cv2.VideoCapture(self.args.filename)
_, img = self.cap.read()
return img
elif self.args.use_image:
return np.copy(self.img_handle)
else:
return self.img_handle
def release(self):
assert not self.thread_running
if self.cap != 'OK':
self.cap.release()
|
5.rlock_multi_thread.py
|
'''
说明: rlock在多线程之间是互斥的, 只有在同一个线程才能重入
'''
import logging
import time
from datetime import datetime
from threading import RLock, Thread, current_thread
from typing import List
def req1(lock: RLock, flag: int, list: List):
lock.acquire()
if flag == 0:
list.append(1)
else:
list.append(2)
lock.release()
lock.release()
def req2(lock: RLock, value: int):
try:
if value == 0:
lock.acquire()
logging.info(f'{current_thread().name} 1')
# 1秒钟之后释放锁
time.sleep(1)
lock.release()
if value == 1:
lock.release()
except Exception as e:
logging.info(f'{current_thread().name} {e}')
pass
def req3(lock: RLock, value: int):
logging.info(f'{current_thread().name} start')
# 多线程之间是互斥的: 线程4 会阻塞
lock.acquire()
logging.info(f'{current_thread().name} end')
def main1(lock: RLock):
list = []
req1(lock, 0, list)
req1(lock, 1, list)
assert [1, 2] == list, 'main1 error'
def main2(lock: RLock):
tasks = [Thread(target=req2, args=(lock, i),
name=f'thread-{i+1}') for i in range(2)]
for i in tasks:
i.start()
for i in tasks:
i.join()
def main3(lock: RLock):
tasks = [Thread(target=req3, args=(lock, i),
name=f'thread-{i+3}') for i in range(2)]
for i in tasks:
i.start()
for i in tasks:
i.join()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, format='%(message)s')
lock = RLock()
logging.info('==> 单线程acquire两次')
main1(lock)
logging.info('==> 线程2 无法释放线程1的acquire')
main2(lock)
logging.info('==> 多线程之间是互斥的: 线程3没有释放锁, 线程4无法获取锁')
main3(lock)
|
multi_threading.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time, threading
#:
def loop():
print("thread %s is running..." % threading.current_thread().name)
n = 0
while n < 5:
n = n + 1
print("thread %s >>> %s" % (threading.current_thread().name, n))
time.sleep(1)
print("thread %s ended." % threading.current_thread().name)
print("thread %s is running..." % threading.current_thread().name)
t = threading.Thread(target=loop, name="LoopThread")
t.start()
t.join()
print("thread %s ended." % threading.current_thread().name)
|
test_abstract_wrapper.py
|
import logging
import pathlib
import sys
import xmlrpc.client
from tempfile import TemporaryDirectory
from threading import Thread
from time import sleep
from finorch.config.config import WrapperConfigManager
from finorch.sessions.abstract_client import AbstractClient
from finorch.sessions.abstract_session import AbstractSession
from finorch.sessions.abstract_wrapper import AbstractWrapper
from finorch.utils.cd import cd
def test_constructor():
cls = AbstractWrapper()
assert cls._xml_rpc_server is None
def test_set_server():
cls = AbstractWrapper()
cls.set_server("test server")
assert cls._xml_rpc_server == "test server"
def test_prepare_log_file():
with TemporaryDirectory() as tmpdir:
with cd(tmpdir):
AbstractWrapper.prepare_log_file()
logging.info("Test Log Entry")
with open(pathlib.Path.cwd() / 'wrapper.log', 'r') as f:
assert f.readline().split('-')[-1].strip() == "Test Log Entry"
def test_start_wrapper():
# Dummy abstract session
class MyAbstractSession(AbstractSession):
callsign = "local"
client_klass = AbstractClient
wrapper_klass = AbstractWrapper
with TemporaryDirectory() as tmpdir:
def exec_thread():
with cd(tmpdir):
AbstractWrapper.prepare_log_file()
AbstractWrapper.start_wrapper(MyAbstractSession)
t = Thread(target=exec_thread)
t.start()
sleep(0.5)
# Check that a port config was set
assert int(WrapperConfigManager(tmpdir).get_port())
# Thread should finish almost instantly
assert not t.is_alive()
def test_start_wrapper_exception():
# Dummy Wrapper
class MyWrapper(AbstractWrapper):
def run(self):
raise Exception("Exception")
# Dummy abstract session
class MyAbstractSession(AbstractSession):
callsign = "local"
client_klass = AbstractClient
wrapper_klass = MyWrapper
with TemporaryDirectory() as tmpdir:
def exec_thread():
with cd(tmpdir):
AbstractWrapper.prepare_log_file()
AbstractWrapper.start_wrapper(MyAbstractSession)
t = Thread(target=exec_thread)
t.start()
sleep(0.5)
# Check that a port config was set
assert int(WrapperConfigManager(tmpdir).get_port())
# Thread should finish almost instantly
assert not t.is_alive()
def test_terminate():
terminating = False
terminated = False
# Dummy Wrapper
class MyWrapper(AbstractWrapper):
def run(self):
while not terminating:
sleep(0.1)
print("Terminating", file=sys.stderr)
nonlocal terminated
terminated = True
# Dummy abstract session
class MyAbstractSession(AbstractSession):
callsign = "dummy"
client_klass = AbstractClient
wrapper_klass = MyWrapper
with TemporaryDirectory() as tmpdir:
def exec_thread():
with cd(tmpdir):
AbstractWrapper.prepare_log_file()
AbstractWrapper.start_wrapper(MyAbstractSession)
while not terminated:
sleep(0.1)
sleep(0.1)
t = Thread(target=exec_thread)
t.start()
sleep(0.5)
port = int(WrapperConfigManager(tmpdir).get_port())
client_rpc = xmlrpc.client.ServerProxy(
f'http://localhost:{port}/rpc',
allow_none=True,
use_builtin_types=True
)
assert t.is_alive()
client_rpc.terminate()
terminating = True
sleep(0.5)
# Thread should be finished
assert not t.is_alive()
|
STABLE Functional Pipeline 06132019.py
|
import sys
import re
import os
import shutil
from multiprocessing import Process
#import pandas as pd
#import numpy as np
#from functools import reduce
#from pandas import ExcelWriter
#from pandas import ExcelFile
#import matplotlib.pyplot as plt
#from scipy.stats import ttest_ind
## init gets user input for paramaters of the program.
##0== hardcoded test info for OA/CL set. 1== user popup/console input. 2== read input from text file. File structure may already be established, and maybe reading names from text fie
#Make program crashable and resumable.
#try to parallelize run_find_gene()
def main():
##TODO CALL PRE_CHECK AT THE APPROPRIATE LOCATION
header = ["0: .gff3 and Settings","1: *.vcf","2: *FILT_NVC","3: Findgene Output","4: compgene outputs","5: Final Outputs"]
filelist = [["","",""],[],[],[],[""],[]] #filelist is a list of all inputted and logged files.
dir_path = os.path.dirname(os.path.realpath(__file__)) #to figure out where the program is being executed. This is home base
os.chdir(dir_path) #move the cwd to the dir_path
cwd = os.getcwd() #This will be useful for doing things within the current directory
#0=[.gff3,filt.gff3, setup_file], 1=*.vcf, 2=*filt_vcf, 3= *findgene output, 4=compgene outputs, 5= OUTPUTS [make_pxl output, make_csv Output, filelist_output]
##INITIALIZE
#make sure settings file is searched for and added to filelist
runmode=0 #mode in which the program will be operated TODO MAKE THIS USER INPUT
types = get_types(runmode) #the names of the different types of conditions (control, OA, HA, ...)
ed_type = get_ed_type(runmode)# are we looking for A/I or C/U?
find_gene_mode = get_find_gene_mode()
init(types, runmode) #test mode init(0)
#init(1) # User input mode
#init(2, "INPATH")# Read in from file mode
##USER LOADS FILES
loaded = ""
#ADD: ability to read loaded from input
while(loaded!="c"):
print(types)
loaded=input("Load Input files into their appropriate folders. Enter \'c\' to Continue, or \'e\' to Exit the program.\t").strip().lower()
if loaded=="e":
break
if loaded =="e":
sys.exit() #exit before data intensive steps
#RUN pre_check()
sys.stdout.write("\n Running pre_check")
run_pre_check(dir_path, cwd)
sys.stdout.write("\n Completed pre_check")
##Add all to file list
sys.stdout.write("\n Running Standardize")
standardize(filelist, types, dir_path, cwd, ed_type)#add correct inputs
sys.stdout.write("\n Completed Standardize")
#VCF Filter
sys.stdout.write("\n Running VCF_Filter")
run_vcf_filter(filelist, dir_path, ed_type)
sys.stdout.write("\n Completed VCF_Filter")
#GFF3 Filter
sys.stdout.write("\n Running GFF3_Filter")
run_gff3_filter(filelist, dir_path, ed_type)
sys.stdout.write("\n Completed GFF3_Filter")
#Findgene
sys.stdout.write("\n Running FINDGENE in mode " + find_gene_mode)
run_find_gene(filelist, dir_path, ed_type, find_gene_mode)
sys.stdout.write("\n Completed FINDGENEin mode" + find_gene_mode)
#COMPGENE
sys.stdout.write("\n Running COMPGENE")
run_compgene(filelist, dir_path, ed_type)
sys.stdout.write("\n Completed COMPGENE")
#MakePXL
sys.stdout.write("\n Running MakePXL")
os.chdir(dir_path)
pxl_name = "./outputs/PXL.txt" #TODO MODIFY TO BE OF FORMAT: (a or c)_[types]_PXL.txt
make_pxl(filelist[4][-1], filelist[3], pxl_name, ed_type)
filelist[5].append(pxl_name) #changed to .apped rather than direct indexing to fix index out of range issue.
#make_pxl(filelist[4][-1]) #store in outputs and filelist[5][0]
sys.stdout.write("\n Completed MakePXL")
#MakeCSV
sys.stdout.write("\n Running MakeCSV")
csv_name = "./outputs/CSV.csv" #can modify for more flexibility later
make_csv(filelist[5][0], csv_name, ed_type) #store in outputs and filelist[5][1]
filelist[5].append(csv_name)
sys.stdout.write("\n Completed MakePXL")
#Output filelist for confirmation that all files were correctly processed
sys.stdout.write("\n Outputting Filelist")
filelist_name = "./outputs/filelist_output.txt"
filelist[5].append(filelist_name)
output_files_used(filelist, header, dir_path, ed_type)
sys.stdout.write("\n Completed Outputting Filelist")
sys.stdout.write("\n Execution Completed, Ending Program")
sys.exit()#Ends the program
def get_ed_type(runmode):
ed_type = ""
if (runmode == 0 or runmode == 1):
while(ed_type!="a" and ed_type!="c"):
ed_type = input("Run for A-to-I or C-to-U editing? Enter a or c\t").lower().strip() #note: incorrect inputs will just trigger the loop to repeat.
return ed_type
elif runmode ==2:
#MAKE ABLE TO READ INPUT FROM A FILE
return
else:
sys.stdout.write("\n ERROR in get_ed_type. runmode-based error. Exiting program")
sys.exit()
def clear_pipe(dir_path, cwd):
folder = dir_path+"/intermeds"
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.remove(file_path)
#elif os.path.isdir(file_path): shutil.rmtree(file_path)
except Exception as e:
print(e)
def run_pre_check(dir_path, cwd):
pre_check_meanings = {1:"pre_check passed",2:"pre_check failed in inputs",3:"pre_check failed in intermeds",4:"pre_check failed in outputs"}
pre_check_val = pre_check(dir_path, cwd)
if pre_check_val!=1:
sys.stdout.write("\n"+pre_check_meanings[pre_check_val])
quit_in_pre = input("pre_check detected errors. Proceed anyway? c to continue, r to clear intermeds and outputs, n to quit program. c/r/n\t").lower().strip()
if(quit_in_pre=="n"):
sys.stdout.write("Exiting. Goodbye")
sys.exit()
elif(quit_in_pre=="r"):
clear_pipe(dir_path, cwd)
run_pre_check(dir_path, cwd) #Recursively runs the pre_check to try to fix errors. quits when no error detected OR when y is input
elif(quit_in_pre =="c"):
sys.stdout.write("\n pre_check process bypassed (with command c)")
return
else:
sys.stdout.write("\n"+pre_check_meanings[pre_check_val])
return
def pre_check(dir_path, cwd):
"""Checks if files are correctly set up. Returns 1 if files correctly set up. returns 2 if error in inputs, 3 if error in intermeds, 4 if error in outputs.
pre_check processes inputs then intermeds then outputs, and returns on the first potential error detected.
An error in inputs may preclude detecting an issue in intermeds or outputs. intermeds error could preclude detecting in outputs"""
sys.stdout.write("\n Performing Pre-Check of file setup")
sys.stdout.write("\n Checking "+dir_path+"/inputs")
#check that inputs are not formatted to processed name format
#get list of directories beginning with t
dirs = os.listdir(dir_path+"/inputs")
for dire in dirs:
if dire[0]=="o":
continue
elif(dire[0]=="t"):
files = os.listdir(dir_path+"/inputs/"+dire)
for file in files:
if "Galstep" in file:
sys.stdout.write("Error: File Names already standardized at: "+dir_path+"/inputs/"+dire+"/"+file)
sys.stdout.write("Input issue -- FAIL")
return 2
else:
sys.stdout.write("unexpected directory detected. potential issue at: "+dir_path+"/inputs/"+dire)
sys.stdout.write("Input issue -- FAIL")
return 2
#check each file for "Galstep." If Match: return 2, print input fail
sys.stdout.write(" inputs unformatted - PASS")
#check that intermediates empty
sys.stdout.write("\n Checking "+dir_path+"/intermeds")
if not os.listdir(dir_path+"/intermeds"):
print(dir_path+"/intermeds\t is empty - PASS")
pass
else:
print(dir_path+"/intermeds\t is NOT empty - FAIL")
return 3
#check that outputs empty
sys.stdout.write("\n Checking "+dir_path+"/outputs")
if not os.listdir(dir_path+"/outputs"):
print(dir_path+"/outputs\t is empty - PASS")
pass
else:
print(dir_path+"/outputs\t is NOT empty - FAIL")
return 4
return 1
def init(types, runmode=1, in_path="",):
if(runmode == 0): #condition to skip re-placing the files for test mode
skip_init = input("skip init? y/n\t")
if (skip_init=="y"):
return
os.mkdir("inputs") #for raw input files
#note: when testing, make sure that the current path is still . and is NOT ./inputs
#if necessary, reset cursor
os.mkdir("intermeds") #files produced internally (filtered versions, etc) that end user is not expected to see
os.mkdir("outputs") #files that the end user would want to see (comparison files, pxl, excel)
numtypes=0 #number of types of files to be compared. For example, 2 types is control and experimental. 3 skips init entirely
#types_raw =[] #input variable #maybe extra, delete soon
typenames =[] #List of touples patterned (typenumber, verbal name) #return
#ed_type = ""
#already_init = False
#If/elif conditions to get the info. create stuff after the info is recorded.
#if (False): """INPUT FOLDER ALREADY EXISTS. This conditional must be edited"""
# already_init = True
#make flexible for if init performed or not already
if runmode == 3: #escape
return
elif runmode==0:
#hardcoded test info
print("Standard CL/OA test mode selected. 2 types")
numtypes=2
#typenames=[("type1","CL"),("type2","OA")]
#ed_type = "AG"
#already_init = False #Make this true if the file tree is already initialized in mode ==2
elif runmode ==1:
#user input
print("Manual Input Selected.")
numtypes=input("Number of types (integer, 2 is standard for control and experimental)\t")
# """raw_types=raw_input("In Order, what are the names of these types (separated by commas)").split(",").strip()
# for i in range(0,len(raw_types)):
# typenames.append(("type{}"format(str(i)),raw_types[i]))
# ed_type=raw_input("What type of editing to look for ? A/G or C/U? Enter A or C").strip()
# #add an input checker to make sure this can take only a,A,c,C
# if(ed_type.islower()): #checks if the edtype is lower case
# ed_type=ed_type.upper()#changes it to upper case if necessary
elif runmode ==2: #TODO: MAKE EXECUTABLE FROM A FILE
print("Input from text file mode selected")
#from inpath
# read input from a text file at in_path
#be ready for the file structure to already be initialized. If this is the case, just get the names and editing type and
#already_init = True
else:
#print error and exit
print("Error in init. Exiting Init component.")
return
for i in range(0,numtypes): #make sure this works, flexibly
dirname = "./inputs/type{}_{}".format(str(i),types[i])
os.mkdir(dirname)
os.mkdir("./inputs/other")
def get_types(runmode):
types = []
if runmode == 3: #Input of a new runmode
runmode = input("input new mode (0 for standard test, 1 for manual entry, 2 for document entry\t")
if runmode == 0: #test
types =["CL","OA"]
elif runmode == 1: #user input
types=input("In Order, what are the names of these types (separated by commas)\t").split(",").strip()
elif runmode == 2: #read from file
#read types from file
pass
return types
def standardize(filelist, types, dir_path, cwd, ed_type):
directories = os.listdir(dir_path+"/inputs")
for directory in directories:
if directory != ("other"):
standardize_NVCs(filelist, types, dir_path, cwd, directory, ed_type)
other_contents = os.listdir(dir_path+"/inputs/other")
for item in other_contents:
if(".gff3" in item):
filelist[0][0]="./inputs/other/"+item
elif("settings" in item):
print("settings document detected. Previously added to filelist")
continue #TODO make this work from the settings document
else:
print("Unrecognized input format. Excluded from filelist:\t"+ item)
def standardize_NVCs(filelist, types, dir_path, cwd, directory, ed_type):
for filename in os.listdir("./inputs/"+directory): #TODO Confirm the ./ not needed.
os.chdir(dir_path)
oldname = filename #Format: Galaxy34-[Naive_Variant_Caller_(NVC)_on_data_19_and_data_26].vcf
#sys.stdout.write("\noldname\t"+oldname) #debug
type_info = directory.split("/")[-1].strip()
gal_num = oldname.split("-")[0].split("y")[1].strip() #isolate the number
fasta_step = oldname.split("_")[6].strip()
#sys.stdout.write("\noldname\t"+oldname) #debug
#sys.stdout.write("\n\nbam_step = oldname.split(_)[9]") #debug
#sys.stdout.write(str(oldname.split("_"))) #debug
bam_step = oldname.split("_")[9].split(".")[0].split("]")[0].strip() #Added the "]" here to remove the bracket left in names.
newname = "NVC_"+ed_type+"_"+type_info+"_Galstep"+gal_num+"_ON_DATA_"+fasta_step+"_"+bam_step+".vcf"#New Format (v2): NVC_EDTYPE_TYPE#_TYPE_GALAXY STEP NUMBER_ON_DATA_FASTASTEP#_BAMSTEP#.vcf
os.chdir("./inputs/"+directory)
os.rename(oldname, newname)
filelist[1].append("./inputs/"+directory+"/"+newname)#make sure the path gets in here
os.chdir(dir_path)
def get_find_gene_mode(): #gives the mode for running findgene.
user_in = input("Sequential (Legacy) or Parallel (Experimental) findgene mode? s/p\t").lower().strip()
return user_in
def run_vcf_filter(filelist,dir_path, ed_type): #Helper function to run several calls of VCF_filter1
for vcf_file in filelist[1]:
os.chdir(dir_path)
#print(vcf_file)
clean_name = vcf_file.split("/")[3].split(".")[0].strip()
outpath = "./intermeds/"+clean_name+"_FILT.txt"
vcf_filter1(vcf_file, outpath, ed_type)
filelist[2].append(outpath)#make sure the path gets in here #would change this for parallel
def run_gff3_filter(filelist, dir_path, ed_type):
os.chdir(dir_path)
clean_name = filelist[0][0].split("/")[3].split(".")[0].strip()
outpath = "./intermeds/"+clean_name+"_FILT_gff3.txt"
gff3_filter(filelist[0][0], outpath, ed_type)
filelist[0][1] = outpath #make sure the path gets in here
def run_find_gene(filelist, dir_path, ed_type, find_gene_mode):
if find_gene_mode == "s":
sequential_run_findgene(filelist, dir_path, ed_type)
elif find_gene_mode == "p":
parallel_run_findgene(filelist, dir_path, ed_type)
else:
sys.stdout.write("\n Wrong find_genemode_mode input. defaulting to sequential (Legacy mode)")
sequential_run_findgene(filelist, dir_path, ed_type)
def sequential_run_findgene(filelist, dir_path, ed_type):
for filt_vcf in filelist[2]:
os.chdir(dir_path)
clean_name = filt_vcf.split("/")[2].split(".")[0].strip()
outpath = "./intermeds/"+clean_name+"_GENES.txt"
sys.stdout.write("\n RUNNING find_gene of "+outpath)
find_gene(filt_vcf, filelist[0][1],outpath, ed_type)
filelist[3].append(outpath)#make sure the path gets in here
sys.stdout.write("\n COMPLETED find_gene of "+outpath)
def parallel_run_findgene(filelist, dir_path, ed_type):
proc = []
for filt_vcf in filelist[2]:
os.chdir(dir_path)
clean_name = filt_vcf.split("/")[2].split(".")[0].strip()
outpath = "./intermeds/"+clean_name+"_GENES.txt"
p = Process(target = find_gene, args = (filt_vcf, filelist[0][1],outpath, ed_type), name = "FINDGENE RUN OF: "+outpath)
p.start()
proc.append(p)
filelist[3].append(outpath)#make sure the path gets in here
for p in proc:
p.join() #Locks further execution of the main thread until all processes have executed
def run_compgene(filelist, dir_path,ed_type):
for i in range(0,len(filelist[3])-1): #1 fewer comparison than there are items in the list. TODO Confirm Range auto removes this hold.
if(i==0):
outpath = "./intermeds/COMP_0.txt"
comp_gene(filelist[3][0],filelist[3][1],outpath, ed_type)#compare First 2 files
filelist[4].append(outpath) #TODO FATAL bug here. Not sure why. maybe [4][0]. Confirm .append() has fixed this.
else:
#compare filelist[4][-1] to filelist[3][i] #TODO potentially to i+1?
outpath = "./intermeds/COMP_{}.txt".format(str(i))
comp_gene(filelist[4][-1],filelist[3][i+1],outpath, ed_type)#compare most recent comparison file with next file on filelist
filelist[4].append(outpath)
def output_files_used(filelist, header, dir_path, ed_type):
new_file = open(filelist[5][2], 'x')
for i in range(0,len(header)):
new_file.write(header[i]+":\t")
for j in range(0,len(filelist[i])):
new_file.write(filelist[i][j]+"\t")
new_file.write("\n")
def vcf_filter1(vcf, filter_vcf, ed_type):
ref = ""
alt = ""
if(ed_type =="a"):
ref = "A"
alt = "G"
elif(ed_type=="c"):
ref = "C"
alt = "T"
try:
new_file = open(filter_vcf, 'x')
open_vcf = open(vcf, 'r')
open_vcf.seek(0)
for line in open_vcf:
#TODO getting a bug in C/U execution in vcf_filter1() conditional. seeing if breaking up the if works
vcf_line = line.split('\t')
if "#" in line or vcf_line[0] == '/n': #TODO Changes here for C/U -- this should work
continue
if vcf_line[3] != ref or vcf_line[4] != alt: #Split the conditional to avoid index out of range problems
continue
#print(line)
n1 = vcf_line[9].split(':')[-1]
n2 = n1.split(',')[0:-1]
numref = 0
numalt = 0
for x in n2:
num = int(x.split('=')[-1])
if ref in x:
numref += num
elif alt in x:
numalt += num
else:
sys.stdout.write("\nSomething was wrong with the input file in VCF_Filter")
new_file.write(line + '\t' + ':'+ref+'=' + str(numref) + ',' + ''+alt+'=' + str(numalt) + ',' + '\n')
# Sample Output into filtered file
# scaffold_0 26626 . A G . . AC=1;AF=0.00833333333333;SB=2.79069767442 GT:AC:AF:SB:NC 0/0:1:0.00833333333333:2.79069767442:+A=77,-A=42,-G=1,
# :A=119,G=1,
#print(line + '\t' + ':A=' + str(numA) + ',' + 'G=' + str(numG) + ',' + '\n')
#print(n2)
except FileExistsError:
print(vcf + filter_vcf + ' already exists')
#vcf_filter1('C1.vcf','C1_filtered.txt')
#counts num of a-to-g events
# def count(file):
# count_a_to_g = 0
# test = 0
# with open(file, 'r') as open_file: #'r' is opening the file in read mode
# for line in open_file: #loops through all lines in file
# if "A\tG" in line:
# if "AC=1" in line:
# count_a_to_g = count_a_to_g
# else:
# count_a_to_g = count_a_to_g + 1 #change to +=1?
# print ("Number of A-to-I Editing Sites is:", count_a_to_g)
# #count('data.vcf')
#count('C1_filtered.txt')
#filter out so only get genes IN GFF3
def gff3_filter(gff3, filter_gff3, ed_type):
"""Filters a raw .gff3 file to get only genes, reducing file size by approximately 1/10"""
try:
new_file = open(filter_gff3, 'x')
open_gff3 = open(gff3, 'r')
open_gff3.seek(0)
for line in open_gff3:
gff3_line = line.split('\t')
if len(gff3_line) > 8:
#print(gff3_line)
gff3_id = gff3_line[8]
id_check = gff3_id.split(':')
if id_check[0] == 'ID=gene':
new_file.write(line)
open_gff3.seek(0)
new_file.close()
open_gff3.close()
except FileExistsError:
print('the file ' + filter_gff3 + ' already exists')
#gff3_filter('dsechellia.gff3', 'filt_dsechellia.gff3')
#takes the result of vcf_filter and gff3_filter
#find_gene(vcf, gff3, file, editing type) takes a vcf file, a gff3 file and an out path (what you want your new file to be named)
#and will return a new file with the scaffold, vcf position and the gene ID. (can add more things by adding to line 38).
#must input the files into the function in this order or it will not work.
def find_gene(vcf, gff3, file, ed_type):
ref = ""
alt = ""
if(ed_type =="a"):
ref = "A"
alt = "G"
elif(ed_type=="c"):
ref = "C"
alt = "T"
try:
new_file= open(file, 'x')
open_vcf = open(vcf, 'r')
open_gff3 = open(gff3, 'r')
open_vcf.seek(0) #see line 39
open_gff3.seek(0) #see line 39
for line0 in open_vcf:
vcf_line = line0.split('\t')
if vcf_line[0] == '\n': #if line is empty (just white space) skip to next line in vcf file
continue
#vcf_line[-1].split(':')
get_nums = vcf_line[-1].split(':')
num = get_nums[-1].strip()
num_strip = num.split(',')
num0 = num_strip[0].split('=')
num1 = num_strip[1].split('=')
if len(num0) < 2: #no ref
continue
if len(num1) < 2: #no alts
continue
#print(len(num0))
#print(len(num1))
refct = int(num0[1])
altct = int(num1[1])
if refct == 0: #if no ref obs then throw out
continue
if refct + altct < 10: #if num ref + num alt is < 10 throw out
continue
vcf_scaffold = vcf_line[0]
vcf_pos = vcf_line[1]
for line1 in open_gff3:
#TODO Possibly speed up this step by using a regex search (maybe using grep?)
gff3_line = line1.split('\t')
#print(gff3_line)
gff3_scaffold = gff3_line[0]
if vcf_scaffold == gff3_scaffold: #checking scaffolds against each other
#print(vcf_scaffold)
gff3_initial = gff3_line[3]
gff3_final = gff3_line[4]
gff3_id = gff3_line[8]
id_check = gff3_id.split(':')
if id_check[0] != 'ID=gene':
continue
if int(vcf_pos) >= int(gff3_initial) and int(vcf_pos) <= int(gff3_final): #If it's within the gene
#print(gff3_line)
new_file.write(vcf_scaffold + ' ' + vcf_pos + ' ' + gff3_id + ' '+ref+' = ' + num0[1] + ' '+alt+' = ' + num1[1] + '\n')
open_gff3.seek(0) #stackoverflow told me to do this and it worked.. resets the cursor
#to the first for loop..
new_file.close()
open_vcf.close()
open_gff3.close()
except FileExistsError:
print('the file ' + file + ' already exist')
#find_gene('C1_filtered.txt','filt_dsechellia.gff3','C1_genes.txt')
#comp_genes(f0, f1, new_file) = new_file with the SNPs that are
#common to both f0 and f1
def comp_gene(f0, f1, file, ed_type):
ref = ""
alt = ""
if(ed_type =="a"):
ref = "A"
alt = "G"
elif(ed_type=="c"):
ref = "C"
alt = "T"
new_file = open(file, 'x')
open_f0 = open(f0, 'r')
open_f1 = open(f1, 'r')
open_f0.seek(0)
open_f1.seek(0)
for line0 in open_f0:
l0 = line0.split(' ')
#print(l0)
scaffold0 = l0[0]
if scaffold0 == '' or scaffold0 == ref or scaffold0 == '\n':
continue
pos0 = l0[1].strip()
#print(pos0)
for line1 in open_f1:
l1 = line1.split(' ')
#print(l1)
#print(l1[0] + ' ' + l1[1])
scaffold1 = l1[0]
if scaffold1 == '' or scaffold1 == ref or scaffold1 == '\n':
continue
pos1 = l1[1].strip()
#print(scaffold1 + 'ONE')
if scaffold0 == scaffold1 and pos0 == pos1:
new_file.write(scaffold0 + ' ' + pos0 + '\n')
open_f1.seek(0)
new_file.close()
open_f0.close()
open_f1.close()
def make_pxl(compOutput, genes, outpath, ed_type):
compO = open(compOutput, 'r')
new_file = open(outpath, 'x')
ref = ""
alt = ""
if(ed_type =="a"):
ref = "A"
alt = "G"
elif(ed_type=="c"):
ref = "C"
alt = "T"
for line in compO:
#print(line)
l = line.split(' ')
if len(l) != 2:
continue
scafC = l[0] #scaffold of comparison location
posC = l[1] #position of comparison location on scaffold
#print('========POS C========')
#print(posC)
new_file.write(scafC + '\t' + posC) #Writes the scaffold and position to the PXL
#print(scafC + '\t' + posC)
for file in genes:
f = open(file, 'r')
#print(f)
for line0 in f:
l0 = line0.split(' ')
#print(l0)
#print(l0)
scafG = l0[0] # For matching to scafC
posG = l0[1] #For matching to posC
if posG == 'A': #if it's not a full data line (format of gene has an enter in it beofre A and G counts)
continue #Unless some weirdness occurs in processing, this condition should never trigger. The comment to the left may be wrong.
if posG == 'C':
continue #TODO See if this fixes the bug for only comp C runs.
#print('=========POS G=======')
#print(posG)
if int(posC) == int(posG): #If the lines' posiitons do match: #TODO CONFIRM FIXES WORKED
#print('TRUE')
if scafC == scafG: #If the lines' scaffolds do match:
#find out how to get name of a file (f)
new_file.write('^' + '\t' + file + '\t' + next(f).strip()+'\t'+l0[-1].strip()+"\n") #TODO for some reason, a \n is being added by next(f). This is a problem, causes maek_pxl to bug out. tried to fix with .strip()
#print('^' + '\t' + file + '\t' + next(f))
def make_csv(pxl, outpath, ed_type):
ref = ""
alt = ""
if(ed_type =="a"):
ref = "A"
alt = "G"
elif(ed_type=="c"):
ref = "C"
alt = "T"
pxl = open(pxl, 'r')
new_file = open(outpath, 'x')
new_file.write("Scaffold,Position,RunFileName,Ref("+ref+"),Alt("+alt+"),GeneInfo\n") #TODO ADD GENEINFO INTO HERE
scaf=""
pos=""
for line in pxl:
if(line[0]!="^"):
#print(line)
splittab = line.split("\t")
#print(splittab)
scaf = splittab[0]
pos = splittab[1].strip("\n")
elif(line[0]=="^"):
splitcar = line.split("\t")
splitspa = splitcar[2].split(" ")
#print(splitspa)
new_file.write(scaf+","+pos+","+splitcar[1]+","+splitspa[2]+","+splitspa[5]+","+splitcar[3]) #TODO Confirm this works. 06-11-19 Fixed Typos
#print(scaf+","+pos+","+splitcar[1]+","+splitspa[3]+","+splitspa[6].strip())
if __name__ == "__main__": #sets up a main area. This will not work well if imported
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.