source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
app.py | from flask import Flask, jsonify, render_template, request
import os
from time import sleep
from multiprocessing import Process
import configparser
from spotify_background_color import SpotifyBackgroundColor
from current_spotify_playback import CurrentSpotifyPlayback, NoArtworkException
from led_controller import LEDController
app = Flask(__name__)
CLIENT_ID = os.environ.get('SPOTIPY_CLIENT_ID')
CLIENT_SECRET = os.environ.get('SPOTIPY_CLIENT_SECRET')
REDIRECT_URI = os.environ.get('SPOTIPY_REDIRECT_URI')
REFRESH_TOKEN = os.environ.get('SPOTIPY_REFRESH_TOKEN')
@app.route('/')
def main():
return render_template('index.html')
@app.route('/spotify')
def spotify():
global p
if not p.is_alive():
p = Process(target=main_spotify, args=())
p.start()
return render_template('spotify.html')
@app.route('/manual')
def manual():
global p
try:
p.terminate()
except AttributeError:
pass
return render_template('manual.html')
@app.route('/color', methods=['GET', 'POST'])
def color():
if request.method == 'POST':
data = request.json
r = data['r']
g = data['g']
b = data['b']
led.set_color(r, g, b, delay=0)
return jsonify(status='updating', data=data)
else:
curr_r, curr_g, curr_b = led.get_color()
return jsonify(status='current', data={'r': curr_r, 'g': curr_g, 'b': curr_b})
@app.route('/off')
def off():
global p
try:
p.terminate()
except AttributeError:
pass
led.set_color(0, 0, 0)
return render_template('off.html')
def main_spotify():
old_song_id = ''
while True:
spotify.update_current_playback()
if spotify.connected_to_chromecast(name):
if spotify.new_song(old_song_id):
try:
artwork = spotify.get_artwork()
background_color = SpotifyBackgroundColor(
img=artwork, image_processing_size=(100, 100))
r, g, b = background_color.best_color(
k=8, color_tol=0)
except NoArtworkException:
r, g, b = 255, 255, 255
led.set_color(r, g, b)
old_song_id = spotify.get_current_song_id()
else:
old_song_id = ''
r, g, b = led.get_color()
if r != 0 or g != 0 or b != 0:
led.set_color(0, 0, 0)
sleep(2)
if __name__ == '__main__':
config = configparser.ConfigParser()
config.read('config.ini')
GPIO_PINS = config['GPIO PINS']
red_pin = int(GPIO_PINS['red_pin'])
green_pin = int(GPIO_PINS['green_pin'])
blue_pin = int(GPIO_PINS['blue_pin'])
name = config['CHROMECAST']['name']
led = LEDController(red_pin, green_pin, blue_pin)
spotify = CurrentSpotifyPlayback(CLIENT_ID, CLIENT_SECRET,
REDIRECT_URI, REFRESH_TOKEN)
p = Process(target=main_spotify, args=())
app.run(host='0.0.0.0')
|
test_callbacks.py | import os
import sys
import multiprocessing
import numpy as np
import pytest
from keras import optimizers
np.random.seed(1337)
from keras import callbacks
from keras.models import Sequential
from keras.layers.core import Dense
from keras.utils.test_utils import get_test_data
from keras import backend as K
from keras.utils import np_utils
input_dim = 2
nb_hidden = 4
nb_class = 2
batch_size = 5
train_samples = 20
test_samples = 20
def test_ModelCheckpoint():
filepath = 'checkpoint.h5'
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=1)
assert os.path.exists(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=1)
assert os.path.exists(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=1)
assert os.path.exists(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=1)
assert os.path.exists(filepath)
os.remove(filepath)
def test_EarlyStopping():
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
mode = 'max'
monitor = 'val_acc'
patience = 0
cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)]
history = model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=20)
mode = 'auto'
monitor = 'val_acc'
patience = 2
cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)]
history = model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=20)
def test_EarlyStopping_reuse():
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = Sequential((
Dense(1, input_dim=1, activation='relu'),
Dense(1, activation='sigmoid'),
))
model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
stopper = callbacks.EarlyStopping(monitor='acc', patience=patience)
weights = model.get_weights()
hist = model.fit(data, labels, callbacks=[stopper])
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper])
assert len(hist.epoch) >= patience
def test_LearningRateScheduler():
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5)
assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon()
def test_ReduceLROnPlateau():
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, epsilon=10, patience=1, cooldown=5)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5, verbose=2)
assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.01, atol=K.epsilon())
model = make_model()
cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, epsilon=0, patience=1, cooldown=5)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5, verbose=2)
assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.1, atol=K.epsilon())
@pytest.mark.skipif((K.backend() != 'tensorflow'),
reason="Requires tensorflow backend")
def test_TensorBoard():
import shutil
import tensorflow as tf
import keras.backend.tensorflow_backend as KTF
filepath = './logs'
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(X_train) // batch_size
else:
max_batch_index = len(X_test) // batch_size
i = 0
while 1:
if train:
yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size])
else:
yield (X_test[i * batch_size: (i + 1) * batch_size], y_test[i * batch_size: (i + 1) * batch_size])
i += 1
i = i % max_batch_index
def data_generator_graph(train):
while 1:
if train:
yield {'X_vars': X_train, 'output': y_train}
else:
yield {'X_vars': X_test, 'output': y_test}
# case 1 Sequential
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
tsb = callbacks.TensorBoard(log_dir=filepath, histogram_freq=1)
cbks = [tsb]
# fit with validation data
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=2)
# fit with validation data and accuracy
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=2)
# fit generator with validation data
model.fit_generator(data_generator(True), len(X_train), nb_epoch=2,
validation_data=(X_test, y_test),
callbacks=cbks)
# fit generator without validation data
model.fit_generator(data_generator(True), len(X_train), nb_epoch=2,
callbacks=cbks)
# fit generator with validation data and accuracy
model.fit_generator(data_generator(True), len(X_train), nb_epoch=2,
validation_data=(X_test, y_test),
callbacks=cbks)
# fit generator without validation data and accuracy
model.fit_generator(data_generator(True), len(X_train), nb_epoch=2,
callbacks=cbks)
assert os.path.exists(filepath)
shutil.rmtree(filepath)
def test_LambdaCallback():
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model training and be terminated after training has completed.
def f():
while True:
pass
p = multiprocessing.Process(target=f)
p.start()
cleanup_callback = callbacks.LambdaCallback(on_train_end=lambda logs: p.terminate())
cbks = [cleanup_callback]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5)
p.join()
assert not p.is_alive()
@pytest.mark.skipif((K.backend() != 'tensorflow'),
reason="Requires tensorflow backend")
def test_TensorBoard_with_ReduceLROnPlateau():
import shutil
filepath = './logs'
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [
callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.5,
patience=4,
verbose=1),
callbacks.TensorBoard(
log_dir=filepath)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=2)
assert os.path.exists(filepath)
shutil.rmtree(filepath)
if __name__ == '__main__':
pytest.main([__file__])
|
test_setup.py | """Test component/platform setup."""
# pylint: disable=protected-access
import asyncio
import os
from unittest import mock
import threading
import logging
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.const import EVENT_HOMEASSISTANT_START, CONSTRAINT_FILE
import homeassistant.config as config_util
from homeassistant import setup, loader
import homeassistant.util.dt as dt_util
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA
from homeassistant.helpers import discovery
from tests.common import \
get_test_home_assistant, MockModule, MockPlatform, \
assert_setup_component, get_test_config_dir
ORIG_TIMEZONE = dt_util.DEFAULT_TIME_ZONE
VERSION_PATH = os.path.join(get_test_config_dir(), config_util.VERSION_FILE)
_LOGGER = logging.getLogger(__name__)
class TestSetup:
"""Test the bootstrap utils."""
hass = None
backup_cache = None
# pylint: disable=invalid-name, no-self-use
def setup_method(self, method):
"""Setup the test."""
self.hass = get_test_home_assistant()
def teardown_method(self, method):
"""Clean up."""
self.hass.stop()
# if os.path.isfile(VERSION_PATH):
# os.remove(VERSION_PATH)
def test_validate_component_config(self):
"""Test validating component configuration."""
config_schema = vol.Schema({
'comp_conf': {
'hello': str
}
}, required=True)
loader.set_component(
'comp_conf', MockModule('comp_conf', config_schema=config_schema))
with assert_setup_component(0):
assert not setup.setup_component(self.hass, 'comp_conf', {})
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(0):
assert not setup.setup_component(self.hass, 'comp_conf', {
'comp_conf': None
})
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(0):
assert not setup.setup_component(self.hass, 'comp_conf', {
'comp_conf': {}
})
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(0):
assert not setup.setup_component(self.hass, 'comp_conf', {
'comp_conf': {
'hello': 'world',
'invalid': 'extra',
}
})
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(1):
assert setup.setup_component(self.hass, 'comp_conf', {
'comp_conf': {
'hello': 'world',
}
})
def test_validate_platform_config(self):
"""Test validating platform configuration."""
platform_schema = PLATFORM_SCHEMA.extend({
'hello': str,
})
loader.set_component(
'platform_conf',
MockModule('platform_conf', platform_schema=platform_schema))
loader.set_component(
'platform_conf.whatever', MockPlatform('whatever'))
with assert_setup_component(0):
assert setup.setup_component(self.hass, 'platform_conf', {
'platform_conf': {
'hello': 'world',
'invalid': 'extra',
}
})
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('platform_conf')
with assert_setup_component(1):
assert setup.setup_component(self.hass, 'platform_conf', {
'platform_conf': {
'platform': 'whatever',
'hello': 'world',
},
'platform_conf 2': {
'invalid': True
}
})
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('platform_conf')
with assert_setup_component(0):
assert setup.setup_component(self.hass, 'platform_conf', {
'platform_conf': {
'platform': 'not_existing',
'hello': 'world',
}
})
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('platform_conf')
with assert_setup_component(1):
assert setup.setup_component(self.hass, 'platform_conf', {
'platform_conf': {
'platform': 'whatever',
'hello': 'world',
}
})
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('platform_conf')
with assert_setup_component(1):
assert setup.setup_component(self.hass, 'platform_conf', {
'platform_conf': [{
'platform': 'whatever',
'hello': 'world',
}]
})
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('platform_conf')
# Any falsey platform config will be ignored (None, {}, etc)
with assert_setup_component(0) as config:
assert setup.setup_component(self.hass, 'platform_conf', {
'platform_conf': None
})
assert 'platform_conf' in self.hass.config.components
assert not config['platform_conf'] # empty
assert setup.setup_component(self.hass, 'platform_conf', {
'platform_conf': {}
})
assert 'platform_conf' in self.hass.config.components
assert not config['platform_conf'] # empty
def test_component_not_found(self):
"""setup_component should not crash if component doesn't exist."""
assert not setup.setup_component(self.hass, 'non_existing')
def test_component_not_double_initialized(self):
"""Test we do not setup a component twice."""
mock_setup = mock.MagicMock(return_value=True)
loader.set_component('comp', MockModule('comp', setup=mock_setup))
assert setup.setup_component(self.hass, 'comp')
assert mock_setup.called
mock_setup.reset_mock()
assert setup.setup_component(self.hass, 'comp')
assert not mock_setup.called
@mock.patch('homeassistant.util.package.install_package',
return_value=False)
def test_component_not_installed_if_requirement_fails(self, mock_install):
"""Component setup should fail if requirement can't install."""
self.hass.config.skip_pip = False
loader.set_component(
'comp', MockModule('comp', requirements=['package==0.0.1']))
assert not setup.setup_component(self.hass, 'comp')
assert 'comp' not in self.hass.config.components
@mock.patch('homeassistant.setup.os.path.dirname')
@mock.patch('homeassistant.util.package.running_under_virtualenv',
return_value=True)
@mock.patch('homeassistant.util.package.install_package',
return_value=True)
def test_requirement_installed_in_venv(
self, mock_install, mock_venv, mock_dirname):
"""Test requirement installed in virtual environment."""
mock_venv.return_value = True
mock_dirname.return_value = 'ha_package_path'
self.hass.config.skip_pip = False
loader.set_component(
'comp', MockModule('comp', requirements=['package==0.0.1']))
assert setup.setup_component(self.hass, 'comp')
assert 'comp' in self.hass.config.components
assert mock_install.call_args == mock.call(
'package==0.0.1',
constraints=os.path.join('ha_package_path', CONSTRAINT_FILE))
@mock.patch('homeassistant.setup.os.path.dirname')
@mock.patch('homeassistant.util.package.running_under_virtualenv',
return_value=False)
@mock.patch('homeassistant.util.package.install_package',
return_value=True)
def test_requirement_installed_in_deps(
self, mock_install, mock_venv, mock_dirname):
"""Test requirement installed in deps directory."""
mock_dirname.return_value = 'ha_package_path'
self.hass.config.skip_pip = False
loader.set_component(
'comp', MockModule('comp', requirements=['package==0.0.1']))
assert setup.setup_component(self.hass, 'comp')
assert 'comp' in self.hass.config.components
assert mock_install.call_args == mock.call(
'package==0.0.1', target=self.hass.config.path('deps'),
constraints=os.path.join('ha_package_path', CONSTRAINT_FILE))
def test_component_not_setup_twice_if_loaded_during_other_setup(self):
"""Test component setup while waiting for lock is not setup twice."""
result = []
@asyncio.coroutine
def async_setup(hass, config):
"""Tracking Setup."""
result.append(1)
loader.set_component(
'comp', MockModule('comp', async_setup=async_setup))
def setup_component():
"""Setup the component."""
setup.setup_component(self.hass, 'comp')
thread = threading.Thread(target=setup_component)
thread.start()
setup.setup_component(self.hass, 'comp')
thread.join()
assert len(result) == 1
def test_component_not_setup_missing_dependencies(self):
"""Test we do not setup a component if not all dependencies loaded."""
deps = ['non_existing']
loader.set_component('comp', MockModule('comp', dependencies=deps))
assert not setup.setup_component(self.hass, 'comp', {})
assert 'comp' not in self.hass.config.components
self.hass.data.pop(setup.DATA_SETUP)
loader.set_component('non_existing', MockModule('non_existing'))
assert setup.setup_component(self.hass, 'comp', {})
def test_component_failing_setup(self):
"""Test component that fails setup."""
loader.set_component(
'comp', MockModule('comp', setup=lambda hass, config: False))
assert not setup.setup_component(self.hass, 'comp', {})
assert 'comp' not in self.hass.config.components
def test_component_exception_setup(self):
"""Test component that raises exception during setup."""
def exception_setup(hass, config):
"""Setup that raises exception."""
raise Exception('fail!')
loader.set_component('comp', MockModule('comp', setup=exception_setup))
assert not setup.setup_component(self.hass, 'comp', {})
assert 'comp' not in self.hass.config.components
def test_component_setup_with_validation_and_dependency(self):
"""Test all config is passed to dependencies."""
def config_check_setup(hass, config):
"""Setup method that tests config is passed in."""
if config.get('comp_a', {}).get('valid', False):
return True
raise Exception('Config not passed in: {}'.format(config))
loader.set_component('comp_a',
MockModule('comp_a', setup=config_check_setup))
loader.set_component('switch.platform_a', MockPlatform('comp_b',
['comp_a']))
setup.setup_component(self.hass, 'switch', {
'comp_a': {
'valid': True
},
'switch': {
'platform': 'platform_a',
}
})
assert 'comp_a' in self.hass.config.components
def test_platform_specific_config_validation(self):
"""Test platform that specifies config."""
platform_schema = PLATFORM_SCHEMA.extend({
'valid': True,
}, extra=vol.PREVENT_EXTRA)
mock_setup = mock.MagicMock(spec_set=True)
loader.set_component(
'switch.platform_a',
MockPlatform(platform_schema=platform_schema,
setup_platform=mock_setup))
with assert_setup_component(0, 'switch'):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'platform_a',
'invalid': True
}
})
assert mock_setup.call_count == 0
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('switch')
with assert_setup_component(0):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'platform_a',
'valid': True,
'invalid_extra': True,
}
})
assert mock_setup.call_count == 0
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('switch')
with assert_setup_component(1):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'platform_a',
'valid': True
}
})
assert mock_setup.call_count == 1
def test_disable_component_if_invalid_return(self):
"""Test disabling component if invalid return."""
loader.set_component(
'disabled_component',
MockModule('disabled_component', setup=lambda hass, config: None))
assert not setup.setup_component(self.hass, 'disabled_component')
assert loader.get_component('disabled_component') is None
assert 'disabled_component' not in self.hass.config.components
self.hass.data.pop(setup.DATA_SETUP)
loader.set_component(
'disabled_component',
MockModule('disabled_component', setup=lambda hass, config: False))
assert not setup.setup_component(self.hass, 'disabled_component')
assert loader.get_component('disabled_component') is not None
assert 'disabled_component' not in self.hass.config.components
self.hass.data.pop(setup.DATA_SETUP)
loader.set_component(
'disabled_component',
MockModule('disabled_component', setup=lambda hass, config: True))
assert setup.setup_component(self.hass, 'disabled_component')
assert loader.get_component('disabled_component') is not None
assert 'disabled_component' in self.hass.config.components
def test_all_work_done_before_start(self):
"""Test all init work done till start."""
call_order = []
def component1_setup(hass, config):
"""Setup mock component."""
discovery.discover(hass, 'test_component2',
component='test_component2')
discovery.discover(hass, 'test_component3',
component='test_component3')
return True
def component_track_setup(hass, config):
"""Setup mock component."""
call_order.append(1)
return True
loader.set_component(
'test_component1',
MockModule('test_component1', setup=component1_setup))
loader.set_component(
'test_component2',
MockModule('test_component2', setup=component_track_setup))
loader.set_component(
'test_component3',
MockModule('test_component3', setup=component_track_setup))
@callback
def track_start(event):
"""Track start event."""
call_order.append(2)
self.hass.bus.listen_once(EVENT_HOMEASSISTANT_START, track_start)
self.hass.add_job(setup.async_setup_component(
self.hass, 'test_component1', {}))
self.hass.block_till_done()
self.hass.start()
assert call_order == [1, 1, 2]
@asyncio.coroutine
def test_component_cannot_depend_config(hass):
"""Test config is not allowed to be a dependency."""
result = yield from setup._async_process_dependencies(
hass, None, 'test', ['config'])
assert not result
@asyncio.coroutine
def test_component_warn_slow_setup(hass):
"""Warn we log when a component setup takes a long time."""
loader.set_component('test_component1', MockModule('test_component1'))
with mock.patch.object(hass.loop, 'call_later', mock.MagicMock()) \
as mock_call:
result = yield from setup.async_setup_component(
hass, 'test_component1', {})
assert result
assert mock_call.called
assert len(mock_call.mock_calls) == 3
timeout, logger_method = mock_call.mock_calls[0][1][:2]
assert timeout == setup.SLOW_SETUP_WARNING
assert logger_method == setup._LOGGER.warning
assert mock_call().cancel.called
@asyncio.coroutine
def test_platform_no_warn_slow(hass):
"""Do not warn for long entity setup time."""
loader.set_component(
'test_component1',
MockModule('test_component1', platform_schema=PLATFORM_SCHEMA))
with mock.patch.object(hass.loop, 'call_later', mock.MagicMock()) \
as mock_call:
result = yield from setup.async_setup_component(
hass, 'test_component1', {})
assert result
assert not mock_call.called
|
safaribooks.py | #!/usr/bin/env python3
# coding: utf-8
import re
import os
import sys
import json
import shutil
import pathlib
import getpass
import logging
import argparse
import requests
import traceback
from html import escape
from random import random
from lxml import html, etree
from multiprocessing import Process, Queue, Value
from urllib.parse import urljoin, urlparse, parse_qs, quote_plus
PATH = os.path.dirname(os.path.realpath(__file__))
COOKIES_FILE = os.path.join(PATH, "cookies.json")
ORLY_BASE_HOST = "oreilly.com" # PLEASE INSERT URL HERE
SAFARI_BASE_HOST = "learning." + ORLY_BASE_HOST
API_ORIGIN_HOST = "api." + ORLY_BASE_HOST
ORLY_BASE_URL = "https://www." + ORLY_BASE_HOST
SAFARI_BASE_URL = "https://" + SAFARI_BASE_HOST
API_ORIGIN_URL = "https://" + API_ORIGIN_HOST
PROFILE_URL = SAFARI_BASE_URL + "/profile/"
# DEBUG
USE_PROXY = False
PROXIES = {"https": "https://127.0.0.1:8080"}
class Display:
BASE_FORMAT = logging.Formatter(
fmt="[%(asctime)s] %(message)s",
datefmt="%d/%b/%Y %H:%M:%S"
)
SH_DEFAULT = "\033[0m" if "win" not in sys.platform else "" # TODO: colors for Windows
SH_YELLOW = "\033[33m" if "win" not in sys.platform else ""
SH_BG_RED = "\033[41m" if "win" not in sys.platform else ""
SH_BG_YELLOW = "\033[43m" if "win" not in sys.platform else ""
def __init__(self, log_file):
self.output_dir = ""
self.output_dir_set = False
self.log_file = os.path.join(PATH, log_file)
self.logger = logging.getLogger("SafariBooks")
self.logger.setLevel(logging.INFO)
logs_handler = logging.FileHandler(filename=self.log_file)
logs_handler.setFormatter(self.BASE_FORMAT)
logs_handler.setLevel(logging.INFO)
self.logger.addHandler(logs_handler)
self.columns, _ = shutil.get_terminal_size()
self.logger.info("** Welcome to SafariBooks! **")
self.book_ad_info = False
self.css_ad_info = Value("i", 0)
self.images_ad_info = Value("i", 0)
self.last_request = (None,)
self.in_error = False
self.state_status = Value("i", 0)
sys.excepthook = self.unhandled_exception
def set_output_dir(self, output_dir):
self.info("Output directory:\n %s" % output_dir)
self.output_dir = output_dir
self.output_dir_set = True
def unregister(self):
self.logger.handlers[0].close()
sys.excepthook = sys.__excepthook__
def log(self, message):
try:
self.logger.info(str(message, "utf-8", "replace"))
except (UnicodeDecodeError, Exception):
self.logger.info(message)
def out(self, put):
pattern = "\r{!s}\r{!s}\n"
try:
s = pattern.format(" " * self.columns, str(put, "utf-8", "replace"))
except TypeError:
s = pattern.format(" " * self.columns, put)
sys.stdout.write(s)
def info(self, message, state=False):
self.log(message)
output = (self.SH_YELLOW + "[*]" + self.SH_DEFAULT if not state else
self.SH_BG_YELLOW + "[-]" + self.SH_DEFAULT) + " %s" % message
self.out(output)
def error(self, error):
if not self.in_error:
self.in_error = True
self.log(error)
output = self.SH_BG_RED + "[#]" + self.SH_DEFAULT + " %s" % error
self.out(output)
def exit(self, error):
self.error(str(error))
if self.output_dir_set:
output = (self.SH_YELLOW + "[+]" + self.SH_DEFAULT +
" Please delete the output directory '" + self.output_dir + "'"
" and restart the program.")
self.out(output)
output = self.SH_BG_RED + "[!]" + self.SH_DEFAULT + " Aborting..."
self.out(output)
self.save_last_request()
sys.exit(1)
def unhandled_exception(self, _, o, tb):
self.log("".join(traceback.format_tb(tb)))
self.exit("Unhandled Exception: %s (type: %s)" % (o, o.__class__.__name__))
def save_last_request(self):
if any(self.last_request):
self.log("Last request done:\n\tURL: {0}\n\tDATA: {1}\n\tOTHERS: {2}\n\n\t{3}\n{4}\n\n{5}\n"
.format(*self.last_request))
def intro(self):
output = self.SH_YELLOW + ("""
____ ___ _
/ __/__ _/ _/__ _____(_)
_\ \/ _ `/ _/ _ `/ __/ /
/___/\_,_/_/ \_,_/_/ /_/
/ _ )___ ___ / /__ ___
/ _ / _ \/ _ \/ '_/(_-<
/____/\___/\___/_/\_\/___/
""" if random() > 0.5 else """
██████╗ ██████╗ ██╗ ██╗ ██╗██████╗
██╔═══██╗ ██╔══██╗██║ ╚██╗ ██╔╝╚════██╗
██║ ██║ ██████╔╝██║ ╚████╔╝ ▄███╔╝
██║ ██║ ██╔══██╗██║ ╚██╔╝ ▀▀══╝
╚██████╔╝ ██║ ██║███████╗██║ ██╗
╚═════╝ ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝
""") + self.SH_DEFAULT
output += "\n" + "~" * (self.columns // 2)
self.out(output)
def parse_description(self, desc):
if not desc:
return "n/d"
try:
return html.fromstring(desc).text_content()
except (html.etree.ParseError, html.etree.ParserError) as e:
self.log("Error parsing the description: %s" % e)
return "n/d"
def book_info(self, info):
description = self.parse_description(info.get("description", None)).replace("\n", " ")
for t in [
("Title", info.get("title", "")), ("Authors", ", ".join(aut.get("name", "") for aut in info.get("authors", []))),
("Identifier", info.get("identifier", "")), ("ISBN", info.get("isbn", "")),
("Publishers", ", ".join(pub.get("name", "") for pub in info.get("publishers", []))),
("Rights", info.get("rights", "")),
("Description", description[:500] + "..." if len(description) >= 500 else description),
("Release Date", info.get("issued", "")),
("URL", info.get("web_url", ""))
]:
self.info("{0}{1}{2}: {3}".format(self.SH_YELLOW, t[0], self.SH_DEFAULT, t[1]), True)
def state(self, origin, done):
progress = int(done * 100 / origin)
bar = int(progress * (self.columns - 11) / 100)
if self.state_status.value < progress:
self.state_status.value = progress
sys.stdout.write(
"\r " + self.SH_BG_YELLOW + "[" + ("#" * bar).ljust(self.columns - 11, "-") + "]" +
self.SH_DEFAULT + ("%4s" % progress) + "%" + ("\n" if progress == 100 else "")
)
def done(self, epub_file):
self.info("Done: %s\n\n" % epub_file +
" If you like it, please * this project on GitHub to make it known:\n"
" https://github.com/lorenzodifuccia/safaribooks\n"
" e don't forget to renew your Safari Books Online subscription:\n"
" " + SAFARI_BASE_URL + "\n\n" +
self.SH_BG_RED + "[!]" + self.SH_DEFAULT + " Bye!!")
@staticmethod
def api_error(response):
message = "API: "
if "detail" in response and "Not found" in response["detail"]:
message += "book's not present in Safari Books Online.\n" \
" The book identifier is the digits that you can find in the URL:\n" \
" `" + SAFARI_BASE_URL + "/library/view/book-name/XXXXXXXXXXXXX/`"
else:
os.remove(COOKIES_FILE)
message += "Out-of-Session%s.\n" % (" (%s)" % response["detail"]) if "detail" in response else "" + \
Display.SH_YELLOW + "[+]" + Display.SH_DEFAULT + \
" Use the `--cred` or `--login` options in order to perform the auth login to Safari."
return message
class WinQueue(list): # TODO: error while use `process` in Windows: can't pickle _thread.RLock objects
def put(self, el):
self.append(el)
def qsize(self):
return self.__len__()
class SafariBooks:
LOGIN_URL = ORLY_BASE_URL + "/member/auth/login/"
LOGIN_ENTRY_URL = SAFARI_BASE_URL + "/login/unified/?next=/home/"
API_TEMPLATE = SAFARI_BASE_URL + "/api/v1/book/{0}/"
BASE_01_HTML = "<!DOCTYPE html>\n" \
"<html lang=\"en\" xml:lang=\"en\" xmlns=\"http://www.w3.org/1999/xhtml\"" \
" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"" \
" xsi:schemaLocation=\"http://www.w3.org/2002/06/xhtml2/" \
" http://www.w3.org/MarkUp/SCHEMA/xhtml2.xsd\"" \
" xmlns:epub=\"http://www.idpf.org/2007/ops\">\n" \
"<head>\n" \
"{0}\n" \
"<style type=\"text/css\">" \
"body{{margin:1em;background-color:transparent!important;}}" \
"#sbo-rt-content *{{text-indent:0pt!important;}}#sbo-rt-content .bq{{margin-right:1em!important;}}"
KINDLE_HTML = "#sbo-rt-content *{{word-wrap:break-word!important;" \
"word-break:break-word!important;}}#sbo-rt-content table,#sbo-rt-content pre" \
"{{overflow-x:unset!important;overflow:unset!important;" \
"overflow-y:unset!important;white-space:pre-wrap!important;}}"
BASE_02_HTML = "</style>" \
"</head>\n" \
"<body>{1}</body>\n</html>"
CONTAINER_XML = "<?xml version=\"1.0\"?>" \
"<container version=\"1.0\" xmlns=\"urn:oasis:names:tc:opendocument:xmlns:container\">" \
"<rootfiles>" \
"<rootfile full-path=\"OEBPS/content.opf\" media-type=\"application/oebps-package+xml\" />" \
"</rootfiles>" \
"</container>"
# Format: ID, Title, Authors, Description, Subjects, Publisher, Rights, Date, CoverId, MANIFEST, SPINE, CoverUrl
CONTENT_OPF = "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n" \
"<package xmlns=\"http://www.idpf.org/2007/opf\" unique-identifier=\"bookid\" version=\"2.0\" >\n" \
"<metadata xmlns:dc=\"http://purl.org/dc/elements/1.1/\" " \
" xmlns:opf=\"http://www.idpf.org/2007/opf\">\n" \
"<dc:title>{1}</dc:title>\n" \
"{2}\n" \
"<dc:description>{3}</dc:description>\n" \
"{4}" \
"<dc:publisher>{5}</dc:publisher>\n" \
"<dc:rights>{6}</dc:rights>\n" \
"<dc:language>en-US</dc:language>\n" \
"<dc:date>{7}</dc:date>\n" \
"<dc:identifier id=\"bookid\">{0}</dc:identifier>\n" \
"<meta name=\"cover\" content=\"{8}\"/>\n" \
"</metadata>\n" \
"<manifest>\n" \
"<item id=\"ncx\" href=\"toc.ncx\" media-type=\"application/x-dtbncx+xml\" />\n" \
"{9}\n" \
"</manifest>\n" \
"<spine toc=\"ncx\">\n{10}</spine>\n" \
"<guide><reference href=\"{11}\" title=\"Cover\" type=\"cover\" /></guide>\n" \
"</package>"
# Format: ID, Depth, Title, Author, NAVMAP
TOC_NCX = "<?xml version=\"1.0\" encoding=\"utf-8\" standalone=\"no\" ?>\n" \
"<!DOCTYPE ncx PUBLIC \"-//NISO//DTD ncx 2005-1//EN\"" \
" \"http://www.daisy.org/z3986/2005/ncx-2005-1.dtd\">\n" \
"<ncx xmlns=\"http://www.daisy.org/z3986/2005/ncx/\" version=\"2005-1\">\n" \
"<head>\n" \
"<meta content=\"ID:ISBN:{0}\" name=\"dtb:uid\"/>\n" \
"<meta content=\"{1}\" name=\"dtb:depth\"/>\n" \
"<meta content=\"0\" name=\"dtb:totalPageCount\"/>\n" \
"<meta content=\"0\" name=\"dtb:maxPageNumber\"/>\n" \
"</head>\n" \
"<docTitle><text>{2}</text></docTitle>\n" \
"<docAuthor><text>{3}</text></docAuthor>\n" \
"<navMap>{4}</navMap>\n" \
"</ncx>"
HEADERS = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate",
"Referer": LOGIN_ENTRY_URL,
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/90.0.4430.212 Safari/537.36"
}
COOKIE_FLOAT_MAX_AGE_PATTERN = re.compile(r'(max-age=\d*\.\d*)', re.IGNORECASE)
def __init__(self, args):
self.args = args
self.display = Display("info_%s.log" % escape(args.bookid))
self.display.intro()
self.session = requests.Session()
if USE_PROXY: # DEBUG
self.session.proxies = PROXIES
self.session.verify = False
self.session.headers.update(self.HEADERS)
self.jwt = {}
if not args.cred:
if not os.path.isfile(COOKIES_FILE):
self.display.exit("Login: unable to find `cookies.json` file.\n"
" Please use the `--cred` or `--login` options to perform the login.")
self.session.cookies.update(json.load(open(COOKIES_FILE)))
else:
self.display.info("Logging into Safari Books Online...", state=True)
self.do_login(*args.cred)
if not args.no_cookies:
json.dump(self.session.cookies.get_dict(), open(COOKIES_FILE, 'w'))
self.check_login()
self.book_id = args.bookid
self.api_url = self.API_TEMPLATE.format(self.book_id)
self.display.info("Retrieving book info...")
self.book_info = self.get_book_info()
self.display.book_info(self.book_info)
self.display.info("Retrieving book chapters...")
self.book_chapters = self.get_book_chapters()
self.chapters_queue = self.book_chapters[:]
if len(self.book_chapters) > sys.getrecursionlimit():
sys.setrecursionlimit(len(self.book_chapters))
self.book_title = self.book_info["title"]
self.base_url = self.book_info["web_url"]
self.clean_book_title = "".join(self.escape_dirname(self.book_title).split(",")[:2]) \
+ " ({0})".format(self.book_id)
books_dir = os.path.join(PATH, "Books")
if not os.path.isdir(books_dir):
os.mkdir(books_dir)
self.BOOK_PATH = os.path.join(books_dir, self.clean_book_title)
self.display.set_output_dir(self.BOOK_PATH)
self.css_path = ""
self.images_path = ""
self.create_dirs()
self.chapter_title = ""
self.filename = ""
self.chapter_stylesheets = []
self.css = []
self.images = []
self.display.info("Downloading book contents... (%s chapters)" % len(self.book_chapters), state=True)
self.BASE_HTML = self.BASE_01_HTML + (self.KINDLE_HTML if not args.kindle else "") + self.BASE_02_HTML
self.cover = False
self.get()
if not self.cover:
self.cover = self.get_default_cover() if "cover" in self.book_info else False
cover_html = self.parse_html(
html.fromstring("<div id=\"sbo-rt-content\"><img src=\"Images/{0}\"></div>".format(self.cover)), True
)
self.book_chapters = [{
"filename": "default_cover.xhtml",
"title": "Cover"
}] + self.book_chapters
self.filename = self.book_chapters[0]["filename"]
self.save_page_html(cover_html)
self.css_done_queue = Queue(0) if "win" not in sys.platform else WinQueue()
self.display.info("Downloading book CSSs... (%s files)" % len(self.css), state=True)
self.collect_css()
self.images_done_queue = Queue(0) if "win" not in sys.platform else WinQueue()
self.display.info("Downloading book images... (%s files)" % len(self.images), state=True)
self.collect_images()
self.display.info("Creating EPUB file...", state=True)
self.create_epub()
if not args.no_cookies:
json.dump(self.session.cookies.get_dict(), open(COOKIES_FILE, "w"))
self.display.done(os.path.join(self.BOOK_PATH, self.book_id + ".epub"))
self.display.unregister()
if not self.display.in_error and not args.log:
os.remove(self.display.log_file)
def handle_cookie_update(self, set_cookie_headers):
for morsel in set_cookie_headers:
# Handle Float 'max-age' Cookie
if self.COOKIE_FLOAT_MAX_AGE_PATTERN.search(morsel):
cookie_key, cookie_value = morsel.split(";")[0].split("=")
self.session.cookies.set(cookie_key, cookie_value)
def requests_provider(self, url, is_post=False, data=None, perform_redirect=True, **kwargs):
try:
response = getattr(self.session, "post" if is_post else "get")(
url,
data=data,
allow_redirects=False,
**kwargs
)
self.handle_cookie_update(response.raw.headers.getlist("Set-Cookie"))
self.display.last_request = (
url, data, kwargs, response.status_code, "\n".join(
["\t{}: {}".format(*h) for h in response.headers.items()]
), response.text
)
except (requests.ConnectionError, requests.ConnectTimeout, requests.RequestException) as request_exception:
self.display.error(str(request_exception))
return 0
if response.is_redirect and perform_redirect:
return self.requests_provider(response.next.url, is_post, None, perform_redirect)
# TODO How about **kwargs?
return response
@staticmethod
def parse_cred(cred):
if ":" not in cred:
return False
sep = cred.index(":")
new_cred = ["", ""]
new_cred[0] = cred[:sep].strip("'").strip('"')
if "@" not in new_cred[0]:
return False
new_cred[1] = cred[sep + 1:]
return new_cred
def do_login(self, email, password):
response = self.requests_provider(self.LOGIN_ENTRY_URL)
if response == 0:
self.display.exit("Login: unable to reach Safari Books Online. Try again...")
next_parameter = None
try:
next_parameter = parse_qs(urlparse(response.request.url).query)["next"][0]
except (AttributeError, ValueError, IndexError):
self.display.exit("Login: unable to complete login on Safari Books Online. Try again...")
redirect_uri = API_ORIGIN_URL + quote_plus(next_parameter)
response = self.requests_provider(
self.LOGIN_URL,
is_post=True,
json={
"email": email,
"password": password,
"redirect_uri": redirect_uri
},
perform_redirect=False
)
if response == 0:
self.display.exit("Login: unable to perform auth to Safari Books Online.\n Try again...")
if response.status_code != 200: # TODO To be reviewed
try:
error_page = html.fromstring(response.text)
errors_message = error_page.xpath("//ul[@class='errorlist']//li/text()")
recaptcha = error_page.xpath("//div[@class='g-recaptcha']")
messages = ([" `%s`" % error for error in errors_message
if "password" in error or "email" in error] if len(errors_message) else []) + \
([" `ReCaptcha required (wait or do logout from the website).`"] if len(
recaptcha) else [])
self.display.exit(
"Login: unable to perform auth login to Safari Books Online.\n" + self.display.SH_YELLOW +
"[*]" + self.display.SH_DEFAULT + " Details:\n" + "%s" % "\n".join(
messages if len(messages) else [" Unexpected error!"])
)
except (html.etree.ParseError, html.etree.ParserError) as parsing_error:
self.display.error(parsing_error)
self.display.exit(
"Login: your login went wrong and it encountered in an error"
" trying to parse the login details of Safari Books Online. Try again..."
)
self.jwt = response.json() # TODO: save JWT Tokens and use the refresh_token to restore user session
response = self.requests_provider(self.jwt["redirect_uri"])
if response == 0:
self.display.exit("Login: unable to reach Safari Books Online. Try again...")
def check_login(self):
response = self.requests_provider(PROFILE_URL, perform_redirect=False)
if response == 0:
self.display.exit("Login: unable to reach Safari Books Online. Try again...")
elif response.status_code != 200:
self.display.exit("Authentication issue: unable to access profile page.")
elif "user_type\":\"Expired\"" in response.text:
self.display.exit("Authentication issue: account subscription expired.")
self.display.info("Successfully authenticated.", state=True)
def get_book_info(self):
response = self.requests_provider(self.api_url)
if response == 0:
self.display.exit("API: unable to retrieve book info.")
response = response.json()
if not isinstance(response, dict) or len(response.keys()) == 1:
self.display.exit(self.display.api_error(response))
if "last_chapter_read" in response:
del response["last_chapter_read"]
for key, value in response.items():
if value is None:
response[key] = 'n/a'
return response
def get_book_chapters(self, page=1):
response = self.requests_provider(urljoin(self.api_url, "chapter/?page=%s" % page))
if response == 0:
self.display.exit("API: unable to retrieve book chapters.")
response = response.json()
if not isinstance(response, dict) or len(response.keys()) == 1:
self.display.exit(self.display.api_error(response))
if "results" not in response or not len(response["results"]):
self.display.exit("API: unable to retrieve book chapters.")
if response["count"] > sys.getrecursionlimit():
sys.setrecursionlimit(response["count"])
result = []
result.extend([c for c in response["results"] if "cover" in c["filename"] or "cover" in c["title"]])
for c in result:
del response["results"][response["results"].index(c)]
result += response["results"]
return result + (self.get_book_chapters(page + 1) if response["next"] else [])
def get_default_cover(self):
response = self.requests_provider(self.book_info["cover"], stream=True)
if response == 0:
self.display.error("Error trying to retrieve the cover: %s" % self.book_info["cover"])
return False
file_ext = response.headers["Content-Type"].split("/")[-1]
with open(os.path.join(self.images_path, "default_cover." + file_ext), 'wb') as i:
for chunk in response.iter_content(1024):
i.write(chunk)
return "default_cover." + file_ext
def get_html(self, url):
response = self.requests_provider(url)
if response == 0 or response.status_code != 200:
self.display.exit(
"Crawler: error trying to retrieve this page: %s (%s)\n From: %s" %
(self.filename, self.chapter_title, url)
)
root = None
try:
root = html.fromstring(response.text, base_url=SAFARI_BASE_URL)
except (html.etree.ParseError, html.etree.ParserError) as parsing_error:
self.display.error(parsing_error)
self.display.exit(
"Crawler: error trying to parse this page: %s (%s)\n From: %s" %
(self.filename, self.chapter_title, url)
)
return root
@staticmethod
def url_is_absolute(url):
return bool(urlparse(url).netloc)
@staticmethod
def is_image_link(url: str):
return pathlib.Path(url).suffix[1:].lower() in ["jpg", "jpeg", "png", "gif"]
def link_replace(self, link):
if link and not link.startswith("mailto"):
if not self.url_is_absolute(link):
if any(x in link for x in ["cover", "images", "graphics"]) or \
self.is_image_link(link):
image = link.split("/")[-1]
return "Images/" + image
return link.replace(".html", ".xhtml")
else:
if self.book_id in link:
return self.link_replace(link.split(self.book_id)[-1])
return link
@staticmethod
def get_cover(html_root):
lowercase_ns = etree.FunctionNamespace(None)
lowercase_ns["lower-case"] = lambda _, n: n[0].lower() if n and len(n) else ""
images = html_root.xpath("//img[contains(lower-case(@id), 'cover') or contains(lower-case(@class), 'cover') or"
"contains(lower-case(@name), 'cover') or contains(lower-case(@src), 'cover') or"
"contains(lower-case(@alt), 'cover')]")
if len(images):
return images[0]
divs = html_root.xpath("//div[contains(lower-case(@id), 'cover') or contains(lower-case(@class), 'cover') or"
"contains(lower-case(@name), 'cover') or contains(lower-case(@src), 'cover')]//img")
if len(divs):
return divs[0]
a = html_root.xpath("//a[contains(lower-case(@id), 'cover') or contains(lower-case(@class), 'cover') or"
"contains(lower-case(@name), 'cover') or contains(lower-case(@src), 'cover')]//img")
if len(a):
return a[0]
return None
def parse_html(self, root, first_page=False):
if random() > 0.8:
if len(root.xpath("//div[@class='controls']/a/text()")):
self.display.exit(self.display.api_error(" "))
book_content = root.xpath("//div[@id='sbo-rt-content']")
if not len(book_content):
self.display.exit(
"Parser: book content's corrupted or not present: %s (%s)" %
(self.filename, self.chapter_title)
)
page_css = ""
if len(self.chapter_stylesheets):
for chapter_css_url in self.chapter_stylesheets:
if chapter_css_url not in self.css:
self.css.append(chapter_css_url)
self.display.log("Crawler: found a new CSS at %s" % chapter_css_url)
page_css += "<link href=\"Styles/Style{0:0>2}.css\" " \
"rel=\"stylesheet\" type=\"text/css\" />\n".format(self.css.index(chapter_css_url))
stylesheet_links = root.xpath("//link[@rel='stylesheet']")
if len(stylesheet_links):
for s in stylesheet_links:
css_url = urljoin("https:", s.attrib["href"]) if s.attrib["href"][:2] == "//" \
else urljoin(self.base_url, s.attrib["href"])
if css_url not in self.css:
self.css.append(css_url)
self.display.log("Crawler: found a new CSS at %s" % css_url)
page_css += "<link href=\"Styles/Style{0:0>2}.css\" " \
"rel=\"stylesheet\" type=\"text/css\" />\n".format(self.css.index(css_url))
stylesheets = root.xpath("//style")
if len(stylesheets):
for css in stylesheets:
if "data-template" in css.attrib and len(css.attrib["data-template"]):
css.text = css.attrib["data-template"]
del css.attrib["data-template"]
try:
page_css += html.tostring(css, method="xml", encoding='unicode') + "\n"
except (html.etree.ParseError, html.etree.ParserError) as parsing_error:
self.display.error(parsing_error)
self.display.exit(
"Parser: error trying to parse one CSS found in this page: %s (%s)" %
(self.filename, self.chapter_title)
)
# TODO: add all not covered tag for `link_replace` function
svg_image_tags = root.xpath("//image")
if len(svg_image_tags):
for img in svg_image_tags:
image_attr_href = [x for x in img.attrib.keys() if "href" in x]
if len(image_attr_href):
svg_url = img.attrib.get(image_attr_href[0])
svg_root = img.getparent().getparent()
new_img = svg_root.makeelement("img")
new_img.attrib.update({"src": svg_url})
svg_root.remove(img.getparent())
svg_root.append(new_img)
book_content = book_content[0]
book_content.rewrite_links(self.link_replace)
xhtml = None
try:
if first_page:
is_cover = self.get_cover(book_content)
if is_cover is not None:
page_css = "<style>" \
"body{display:table;position:absolute;margin:0!important;height:100%;width:100%;}" \
"#Cover{display:table-cell;vertical-align:middle;text-align:center;}" \
"img{height:90vh;margin-left:auto;margin-right:auto;}" \
"</style>"
cover_html = html.fromstring("<div id=\"Cover\"></div>")
cover_div = cover_html.xpath("//div")[0]
cover_img = cover_div.makeelement("img")
cover_img.attrib.update({"src": is_cover.attrib["src"]})
cover_div.append(cover_img)
book_content = cover_html
self.cover = is_cover.attrib["src"]
xhtml = html.tostring(book_content, method="xml", encoding='unicode')
except (html.etree.ParseError, html.etree.ParserError) as parsing_error:
self.display.error(parsing_error)
self.display.exit(
"Parser: error trying to parse HTML of this page: %s (%s)" %
(self.filename, self.chapter_title)
)
return page_css, xhtml
@staticmethod
def escape_dirname(dirname, clean_space=False):
if ":" in dirname:
if dirname.index(":") > 15:
dirname = dirname.split(":")[0]
elif "win" in sys.platform:
dirname = dirname.replace(":", ",")
for ch in ['~', '#', '%', '&', '*', '{', '}', '\\', '<', '>', '?', '/', '`', '\'', '"', '|', '+', ':']:
if ch in dirname:
dirname = dirname.replace(ch, "_")
return dirname if not clean_space else dirname.replace(" ", "")
def create_dirs(self):
if os.path.isdir(self.BOOK_PATH):
self.display.log("Book directory already exists: %s" % self.BOOK_PATH)
else:
os.makedirs(self.BOOK_PATH)
oebps = os.path.join(self.BOOK_PATH, "OEBPS")
if not os.path.isdir(oebps):
self.display.book_ad_info = True
os.makedirs(oebps)
self.css_path = os.path.join(oebps, "Styles")
if os.path.isdir(self.css_path):
self.display.log("CSSs directory already exists: %s" % self.css_path)
else:
os.makedirs(self.css_path)
self.display.css_ad_info.value = 1
self.images_path = os.path.join(oebps, "Images")
if os.path.isdir(self.images_path):
self.display.log("Images directory already exists: %s" % self.images_path)
else:
os.makedirs(self.images_path)
self.display.images_ad_info.value = 1
def save_page_html(self, contents):
self.filename = self.filename.replace(".html", ".xhtml")
open(os.path.join(self.BOOK_PATH, "OEBPS", self.filename), "wb") \
.write(self.BASE_HTML.format(contents[0], contents[1]).encode("utf-8", 'xmlcharrefreplace'))
self.display.log("Created: %s" % self.filename)
def get(self):
len_books = len(self.book_chapters)
for _ in range(len_books):
if not len(self.chapters_queue):
return
first_page = len_books == len(self.chapters_queue)
next_chapter = self.chapters_queue.pop(0)
self.chapter_title = next_chapter["title"]
self.filename = next_chapter["filename"]
# Images
if "images" in next_chapter and len(next_chapter["images"]):
self.images.extend(urljoin(next_chapter['asset_base_url'], img_url)
for img_url in next_chapter['images'])
# Stylesheets
self.chapter_stylesheets = []
if "stylesheets" in next_chapter and len(next_chapter["stylesheets"]):
self.chapter_stylesheets.extend(x["url"] for x in next_chapter["stylesheets"])
if "site_styles" in next_chapter and len(next_chapter["site_styles"]):
self.chapter_stylesheets.extend(next_chapter["site_styles"])
if os.path.isfile(os.path.join(self.BOOK_PATH, "OEBPS", self.filename.replace(".html", ".xhtml"))):
if not self.display.book_ad_info and \
next_chapter not in self.book_chapters[:self.book_chapters.index(next_chapter)]:
self.display.info(
("File `%s` already exists.\n"
" If you want to download again all the book,\n"
" please delete the output directory '" + self.BOOK_PATH + "' and restart the program.")
% self.filename.replace(".html", ".xhtml")
)
self.display.book_ad_info = 2
else:
self.save_page_html(self.parse_html(self.get_html(next_chapter["content"]), first_page))
self.display.state(len_books, len_books - len(self.chapters_queue))
def _thread_download_css(self, url):
css_file = os.path.join(self.css_path, "Style{0:0>2}.css".format(self.css.index(url)))
if os.path.isfile(css_file):
if not self.display.css_ad_info.value and url not in self.css[:self.css.index(url)]:
self.display.info(("File `%s` already exists.\n"
" If you want to download again all the CSSs,\n"
" please delete the output directory '" + self.BOOK_PATH + "'"
" and restart the program.") %
css_file)
self.display.css_ad_info.value = 1
else:
response = self.requests_provider(url)
if response == 0:
self.display.error("Error trying to retrieve this CSS: %s\n From: %s" % (css_file, url))
with open(css_file, 'wb') as s:
s.write(response.content)
self.css_done_queue.put(1)
self.display.state(len(self.css), self.css_done_queue.qsize())
def _thread_download_images(self, url):
image_name = url.split("/")[-1]
image_path = os.path.join(self.images_path, image_name)
if os.path.isfile(image_path):
if not self.display.images_ad_info.value and url not in self.images[:self.images.index(url)]:
self.display.info(("File `%s` already exists.\n"
" If you want to download again all the images,\n"
" please delete the output directory '" + self.BOOK_PATH + "'"
" and restart the program.") %
image_name)
self.display.images_ad_info.value = 1
else:
response = self.requests_provider(urljoin(SAFARI_BASE_URL, url), stream=True)
if response == 0:
self.display.error("Error trying to retrieve this image: %s\n From: %s" % (image_name, url))
return
with open(image_path, 'wb') as img:
for chunk in response.iter_content(1024):
img.write(chunk)
self.images_done_queue.put(1)
self.display.state(len(self.images), self.images_done_queue.qsize())
def _start_multiprocessing(self, operation, full_queue):
if len(full_queue) > 5:
for i in range(0, len(full_queue), 5):
self._start_multiprocessing(operation, full_queue[i:i + 5])
else:
process_queue = [Process(target=operation, args=(arg,)) for arg in full_queue]
for proc in process_queue:
proc.start()
for proc in process_queue:
proc.join()
def collect_css(self):
self.display.state_status.value = -1
# "self._start_multiprocessing" seems to cause problem. Switching to mono-thread download.
for css_url in self.css:
self._thread_download_css(css_url)
def collect_images(self):
if self.display.book_ad_info == 2:
self.display.info("Some of the book contents were already downloaded.\n"
" If you want to be sure that all the images will be downloaded,\n"
" please delete the output direcotry '" + self.BOOK_PATH +
"' and restart the program.")
self.display.state_status.value = -1
# "self._start_multiprocessing" seems to cause problem. Switching to mono-thread download.
for image_url in self.images:
self._thread_download_images(image_url)
def create_content_opf(self):
self.css = next(os.walk(self.css_path))[2]
self.images = next(os.walk(self.images_path))[2]
manifest = []
spine = []
for c in self.book_chapters:
c["filename"] = c["filename"].replace(".html", ".xhtml")
item_id = escape("".join(c["filename"].split(".")[:-1]))
manifest.append("<item id=\"{0}\" href=\"{1}\" media-type=\"application/xhtml+xml\" />".format(
item_id, c["filename"]
))
spine.append("<itemref idref=\"{0}\"/>".format(item_id))
for i in set(self.images):
dot_split = i.split(".")
head = "img_" + escape("".join(dot_split[:-1]))
extension = dot_split[-1]
manifest.append("<item id=\"{0}\" href=\"Images/{1}\" media-type=\"image/{2}\" />".format(
head, i, "jpeg" if "jp" in extension else extension
))
for i in range(len(self.css)):
manifest.append("<item id=\"style_{0:0>2}\" href=\"Styles/Style{0:0>2}.css\" "
"media-type=\"text/css\" />".format(i))
authors = "\n".join("<dc:creator opf:file-as=\"{0}\" opf:role=\"aut\">{0}</dc:creator>".format(
escape(aut.get("name", "n/d"))
) for aut in self.book_info.get("authors", []))
subjects = "\n".join("<dc:subject>{0}</dc:subject>".format(escape(sub.get("name", "n/d")))
for sub in self.book_info.get("subjects", []))
return self.CONTENT_OPF.format(
(self.book_info.get("isbn", self.book_id)),
escape(self.book_title),
authors,
escape(self.book_info.get("description", "")),
subjects,
", ".join(escape(pub.get("name", "")) for pub in self.book_info.get("publishers", [])),
escape(self.book_info.get("rights", "")),
self.book_info.get("issued", ""),
self.cover,
"\n".join(manifest),
"\n".join(spine),
self.book_chapters[0]["filename"].replace(".html", ".xhtml")
)
@staticmethod
def parse_toc(l, c=0, mx=0):
r = ""
for cc in l:
c += 1
if int(cc["depth"]) > mx:
mx = int(cc["depth"])
r += "<navPoint id=\"{0}\" playOrder=\"{1}\">" \
"<navLabel><text>{2}</text></navLabel>" \
"<content src=\"{3}\"/>".format(
cc["fragment"] if len(cc["fragment"]) else cc["id"], c,
escape(cc["label"]), cc["href"].replace(".html", ".xhtml").split("/")[-1]
)
if cc["children"]:
sr, c, mx = SafariBooks.parse_toc(cc["children"], c, mx)
r += sr
r += "</navPoint>\n"
return r, c, mx
def create_toc(self):
response = self.requests_provider(urljoin(self.api_url, "toc/"))
if response == 0:
self.display.exit("API: unable to retrieve book chapters. "
"Don't delete any files, just run again this program"
" in order to complete the `.epub` creation!")
response = response.json()
if not isinstance(response, list) and len(response.keys()) == 1:
self.display.exit(
self.display.api_error(response) +
" Don't delete any files, just run again this program"
" in order to complete the `.epub` creation!"
)
navmap, _, max_depth = self.parse_toc(response)
return self.TOC_NCX.format(
(self.book_info["isbn"] if self.book_info["isbn"] else self.book_id),
max_depth,
self.book_title,
", ".join(aut.get("name", "") for aut in self.book_info.get("authors", [])),
navmap
)
def create_epub(self):
open(os.path.join(self.BOOK_PATH, "mimetype"), "w").write("application/epub+zip")
meta_info = os.path.join(self.BOOK_PATH, "META-INF")
if os.path.isdir(meta_info):
self.display.log("META-INF directory already exists: %s" % meta_info)
else:
os.makedirs(meta_info)
open(os.path.join(meta_info, "container.xml"), "wb").write(
self.CONTAINER_XML.encode("utf-8", "xmlcharrefreplace")
)
open(os.path.join(self.BOOK_PATH, "OEBPS", "content.opf"), "wb").write(
self.create_content_opf().encode("utf-8", "xmlcharrefreplace")
)
open(os.path.join(self.BOOK_PATH, "OEBPS", "toc.ncx"), "wb").write(
self.create_toc().encode("utf-8", "xmlcharrefreplace")
)
zip_file = os.path.join(PATH, "Books", self.book_id)
if os.path.isfile(zip_file + ".zip"):
os.remove(zip_file + ".zip")
shutil.make_archive(zip_file, 'zip', self.BOOK_PATH)
os.rename(zip_file + ".zip", os.path.join(self.BOOK_PATH, self.book_id) + ".epub")
# MAIN
if __name__ == "__main__":
arguments = argparse.ArgumentParser(prog="safaribooks.py",
description="Download and generate an EPUB of your favorite books"
" from Safari Books Online.",
add_help=False,
allow_abbrev=False)
login_arg_group = arguments.add_mutually_exclusive_group()
login_arg_group.add_argument(
"--cred", metavar="<EMAIL:PASS>", default=False,
help="Credentials used to perform the auth login on Safari Books Online."
" Es. ` --cred \"account_mail@mail.com:password01\" `."
)
login_arg_group.add_argument(
"--login", action='store_true',
help="Prompt for credentials used to perform the auth login on Safari Books Online."
)
arguments.add_argument(
"--no-cookies", dest="no_cookies", action='store_true',
help="Prevent your session data to be saved into `cookies.json` file."
)
arguments.add_argument(
"--kindle", dest="kindle", action='store_true',
help="Add some CSS rules that block overflow on `table` and `pre` elements."
" Use this option if you're going to export the EPUB to E-Readers like Amazon Kindle."
)
arguments.add_argument(
"--preserve-log", dest="log", action='store_true', help="Leave the `info_XXXXXXXXXXXXX.log`"
" file even if there isn't any error."
)
arguments.add_argument("--help", action="help", default=argparse.SUPPRESS, help='Show this help message.')
arguments.add_argument(
"bookid", metavar='<BOOK ID>',
help="Book digits ID that you want to download. You can find it in the URL (X-es):"
" `" + SAFARI_BASE_URL + "/library/view/book-name/XXXXXXXXXXXXX/`"
)
args_parsed = arguments.parse_args()
if args_parsed.cred or args_parsed.login:
user_email = ""
pre_cred = ""
if args_parsed.cred:
pre_cred = args_parsed.cred
else:
user_email = input("Email: ")
passwd = getpass.getpass("Password: ")
pre_cred = user_email + ":" + passwd
parsed_cred = SafariBooks.parse_cred(pre_cred)
if not parsed_cred:
arguments.error("invalid credential: %s" % (
args_parsed.cred if args_parsed.cred else (user_email + ":*******")
))
args_parsed.cred = parsed_cred
else:
if args_parsed.no_cookies:
arguments.error("invalid option: `--no-cookies` is valid only if you use the `--cred` option")
SafariBooks(args_parsed)
# Hint: do you want to download more then one book once, initialized more than one instance of `SafariBooks`...
sys.exit(0)
|
test_threading.py | """
Tests for the threading module.
"""
import test.support
from test.support import (verbose, import_module, cpython_only,
requires_type_collecting)
from test.support.script_helper import assert_python_ok, assert_python_failure
import random
import sys
import _thread
import threading
import time
import unittest
import weakref
import os
import subprocess
from test import lock_tests
from test import support
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('netbsd5', 'hp-ux11')
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertLessEqual(self.nrunning.get(), 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertGreaterEqual(self.nrunning.get(), 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.support.threading_setup()
def tearDown(self):
test.support.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertIsNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, initial\)>$')
t.start()
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertFalse(t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertIsNotNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, stopped -?\d+\)>$')
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertIsNotNone(threading.currentThread().ident)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, ())
done.wait()
self.assertEqual(ident[0], tid)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256 KiB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256 KiB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1 MiB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1 MiB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
#Issue 29376
self.assertTrue(threading._active[tid].is_alive())
self.assertRegex(repr(threading._active[tid]), '_DummyThread')
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
set_async_exc.argtypes = (ctypes.c_ulong, ctypes.py_object)
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
try:
result = set_async_exc(tid, exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(-1, exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertFalse(t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(t.id, exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=10)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
t.isAlive()
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertNotIn('daemon', repr(t))
t.daemon = True
self.assertIn('daemon', repr(t))
def test_daemon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
test.support.setswitchinterval(1e-6)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
pid = os.fork()
if pid == 0:
os._exit(11 if t.is_alive() else 10)
else:
t.join()
pid, status = os.waitpid(pid, 0)
self.assertTrue(os.WIFEXITED(status))
self.assertEqual(10, os.WEXITSTATUS(status))
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
else:
os.waitpid(pid, 0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "MainThread\nTrue\nTrue\n")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys
def f():
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
os.waitpid(pid, 0)
th = threading.Thread(target=f)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1\nTrue\nTrue\n")
def test_main_thread_during_shutdown(self):
# bpo-31516: current_thread() should still point to the main thread
# at shutdown
code = """if 1:
import gc, threading
main_thread = threading.current_thread()
assert main_thread is threading.main_thread() # sanity check
class RefCycle:
def __init__(self):
self.cycle = self
def __del__(self):
print("GC:",
threading.current_thread() is main_thread,
threading.main_thread() is main_thread,
threading.enumerate() == [main_thread])
RefCycle()
gc.collect() # sanity check
x = RefCycle()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode()
self.assertEqual(err, b"")
self.assertEqual(data.splitlines(),
["GC: True True True"] * 2)
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock is None until the thread is started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, None)
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertFalse(tstate_lock.acquire(timeout=0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(tstate_lock.acquire(timeout=5), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
self.assertTrue(t.is_alive())
# Let is_alive() find out the C code is done.
tstate_lock.release()
self.assertFalse(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertIsNone(t._tstate_lock)
t.join()
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
t.join()
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "generator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
import _testcapi
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
in_f = open(os.__file__, 'rb')
stuff = in_f.read(200)
null_f = open(os.devnull, 'wb')
null_f.write(stuff)
time.sleep(random.random() / 1995)
null_f.close()
in_f.close()
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
os._exit(0)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_clear_threads_states_after_fork(self):
# Issue #17094: check that threads states are cleared after fork()
# start a bunch of threads
threads = []
for i in range(16):
t = threading.Thread(target=lambda : time.sleep(0.3))
threads.append(t)
t.start()
pid = os.fork()
if pid == 0:
# check that threads states have been cleared
if len(sys._current_frames()) == 1:
os._exit(0)
else:
os._exit(1)
else:
_, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
for t in threads:
t.join()
class SubinterpThreadingTests(BaseTestCase):
def test_threads_join(self):
# Non-daemon threads should be joined at subinterpreter shutdown
# (issue #18808)
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import threading
import time
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
time.sleep(0.05)
os.write(%d, b"x")
threading.Thread(target=f).start()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_threads_join_2(self):
# Same as above, but a delay gets introduced after the thread's
# Python code returned but before the thread state is deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import threading
import time
class Sleeper:
def __del__(self):
time.sleep(0.05)
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
time.sleep(0.05)
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
@cpython_only
def test_daemon_threads_fatal_error(self):
subinterp_code = r"""if 1:
import os
import threading
import time
def f():
# Make sure the daemon thread is still running when
# Py_EndInterpreter is called.
time.sleep(10)
threading.Thread(target=f, daemon=True).start()
"""
script = r"""if 1:
import _testcapi
_testcapi.run_in_subinterp(%r)
""" % (subinterp_code,)
with test.support.SuppressCrashReport():
rc, out, err = assert_python_failure("-c", script)
self.assertIn("Fatal Python error: Py_EndInterpreter: "
"not the last thread", err.decode())
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
thread.join()
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
thread.join()
def test_releasing_unacquired_lock(self):
lock = threading.Lock()
self.assertRaises(RuntimeError, lock.release)
@unittest.skipUnless(sys.platform == 'darwin' and test.support.python_is_optimized(),
'test macosx problem')
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RecursionError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode())
self.assertEqual(data, expected_output)
def test_print_exception(self):
script = r"""if True:
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
@requires_type_collecting
def test_print_exception_stderr_is_none_1(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
sys.stderr = None
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_2(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
sys.stderr = None
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
self.assertNotIn("Unhandled exception", err.decode())
def test_bare_raise_in_brand_new_thread(self):
def bare_raise():
raise
class Issue27558(threading.Thread):
exc = None
def run(self):
try:
bare_raise()
except Exception as exc:
self.exc = exc
thread = Issue27558()
thread.start()
thread.join()
self.assertIsNotNone(thread.exc)
self.assertIsInstance(thread.exc, RuntimeError)
# explicitly break the reference cycle to not leak a dangling thread
thread.exc = None
class TimerTests(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes and affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
timer1.join()
timer2.join()
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
extra = {"ThreadError"}
blacklist = {'currentThread', 'activeCount'}
support.check__all__(self, threading, ('threading', '_thread'),
extra=extra, blacklist=blacklist)
if __name__ == "__main__":
unittest.main()
|
reset_job_test.py | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import parl
from parl.remote.master import Master
from parl.remote.worker import Worker
from parl.remote.client import disconnect
from parl.utils import logger
import subprocess
import time
import threading
import subprocess
import sys
@parl.remote_class
class Actor(object):
def __init__(self, arg1=None, arg2=None):
self.arg1 = arg1
self.arg2 = arg2
def get_arg1(self):
return self.arg1
def get_arg2(self):
return self.arg2
def set_arg1(self, value):
self.arg1 = value
def set_arg2(self, value):
self.arg2 = value
def add_one(self, value):
value += 1
return value
def add(self, x, y):
time.sleep(3)
return x + y
def will_raise_exception_func(self):
x = 1 / 0
class TestJob(unittest.TestCase):
def tearDown(self):
disconnect()
def test_acor_exit_exceptionally(self):
port = 1337
master = Master(port)
th = threading.Thread(target=master.run)
th.start()
time.sleep(1)
worker1 = Worker('localhost:{}'.format(port), 1)
file_path = __file__.replace('reset_job_test', 'simulate_client')
command = [sys.executable, file_path]
proc = subprocess.Popen(command, close_fds=True)
for _ in range(6):
if master.cpu_num == 0:
break
else:
time.sleep(10)
self.assertEqual(master.cpu_num, 0)
proc.kill()
parl.connect('localhost:{}'.format(port))
actor = Actor()
master.exit()
worker1.exit()
disconnect()
if __name__ == '__main__':
unittest.main()
|
test_classify_async_multinet.py | #!/usr/bin/env python
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os.path
import math
import sys
import timeit
import json
import multiprocessing as mp
import numpy as np
from vai.dpuv1.rt import xdnn, xdnn_io
from six import itervalues, iterkeys
def run(pid, args, resultQ):
fpgaRT = xdnn.XDNNFPGAOp(args)
xdnnCPUOp = xdnn.XDNNCPUOp(args['weights'])
args['in_shape'] = tuple((fpgaRT.getBatchSize(),) + tuple(next(itervalues(fpgaRT.getInputDescriptors()))[1:] ))
fpgaInput = np.empty(args['in_shape'], dtype=np.float32, order='C')
fpgaOutput = np.empty ((fpgaRT.getBatchSize(), int(args['fpgaoutsz']),), dtype=np.float32, order='C')
labels = xdnn_io.get_labels(args['labels'])
img_paths = xdnn_io.getFilePaths(args['images'])
for j, p in enumerate(img_paths[:fpgaRT.getBatchSize()]):
fpgaInput[j, ...], _ = xdnn_io.loadImageBlobFromFile(p, args['img_raw_scale'],
args['img_mean'],
args['img_input_scale'],
args['in_shape'][2],
args['in_shape'][3])
firstInputName = next(iterkeys(fpgaRT.getInputs()))
firstOutputName = next(iterkeys(fpgaRT.getOutputs()))
fpgaRT.exec_async({ firstInputName: fpgaInput },
{ firstOutputName: fpgaOutput })
fpgaRT.get_result()
fcOut = np.empty((fpgaRT.getBatchSize(), args['outsz']), dtype=np.float32, order = 'C')
xdnnCPUOp.computeFC(fpgaOutput, fcOut)
softmaxOut = xdnnCPUOp.computeSoftmax(fcOut)
result = xdnn_io.getClassification(softmaxOut, args['images'], labels);
resultQ.put((pid, result))
# example for multiple executors
def main():
args = xdnn_io.processCommandLine()
# spawn 1 process for each run
resultQ = mp.Queue()
procs = []
for pid, runArgs in enumerate(args['jsoncfg']):
proc = mp.Process(target=run, args=(pid, runArgs, resultQ,))
proc.start()
procs.append(proc)
# collect results out-of-order
results = {}
for p in procs:
(pid, result) = resultQ.get()
results[pid] = result
# print results in order
for pid, p in enumerate(procs):
print(results[pid])
p.join()
if __name__ == '__main__':
main()
|
Hiwin_RT605_ArmCommand_Socket_20190627193018.py | #!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
arm_mode_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0.0,36.8,11.35,-90.0,0.0,0.0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0.0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def __init__(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(0,0)
class client():
def __init__(self):
self.get_connect()
def get_connect(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect(('192.168.0.1', 8080))
def send(self, msg):
self.s.send(msg.encode('utf-8')) #用utf-8來encode,還有其他encode的方法,str用utf-8就OK!
def get_recieve(self):
data = self.s.recv(1024) #1024指定buffer的大小,限制一次收多少
data.decode('utf-8')
return data
def close(self):
self.s.close()
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = x
pos.y = y
pos.z = z
pos.pitch = pitch
pos.roll = roll
pos.yaw = yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = action
socket_cmd.grip = grip
socket_cmd.ra = ra
socket_cmd.setvel = setvel
socket_cmd.setboth = setboth
arm_mode_flag = True
#Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
socket_cmd.Speedmode = speedmode
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10)
rospy.init_node(NAME)
rate = rospy.Rate(10) # 10hz
print ("Ready to connect")
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32MultiArray()
state.data = [state_feedback.ArmState,state_feedback.SentFlag]
pub.publish(state)
rate.sleep()
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global Socket,arm_mode_flag,data
if arm_mode_flag == True:
arm_mode_flag = False
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 6 ##切換初始mode狀態
print(data)
print("Socket:", Socket)
#Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
Socket.send(data)
##-----------socket client--------
def socket_client():
global Socket
try:
Socket = client()
#Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
print('Connection has been successful')
except socket.error as msg:
print(msg)
sys.exit(1)
#print('Connection has been successful')
print(Socket.get_recieve())
Socket_feedback(Socket)
# while 1:
# feedback_str = Socket.recv(1024)
# #手臂端傳送手臂狀態
# if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
# state_feedback.ArmState = 0
# if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
# state_feedback.ArmState = 1
# if str(feedback_str[2]) == '54':# 6 策略完成
# state_feedback.ArmState = 6
# print("shutdown")
# #確認傳送旗標
# if str(feedback_str[4]) == '48':#回傳0 false
# state_feedback.SentFlag = 0
# if str(feedback_str[4]) == '49':#回傳1 true
# state_feedback.SentFlag = 1
# ##---------------socket 傳輸手臂命令 end-----------------
# if state_feedback.ArmState == Taskcmd.Arm_feedback_Type.shutdown:
# break
rospy.on_shutdown(myhook)
Socket.close()
def Socket_feedback(s):
Socket = s
while 1:
feedback_str = Socket.get_recieve()
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = 0
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = 1
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = 0
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = 1
##---------------socket 傳輸手臂命令 end-----------------
if state_feedback.ArmState == Taskcmd.Arm_feedback_Type.shutdown:
break
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 6##切換初始mode狀態
## 多執行緒
t = threading.Thread(target=socket_client)
t.start() # 開啟多執行緒
#time.sleep(1)
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join()
## 多執行序 end
|
main.py | import sys
import time
from cv2 import cv2
import numpy as np
import mss
from pynput.mouse import Button, Controller
import os
from bot import Fisher
import threading
# Some images we will use to dynamically find catch bar
#dirname = os.path.dirname(__file__)
path = os.path.dirname(os.path.dirname(__file__))
img_path = os.path.join(path, 'img')
mouse = Controller()
flag = True
def Screen_Shot(left=0, top=0, width=1920, height=1080):
stc = mss.mss()
scr = stc.grab({
'left': left,
'top': top,
'width': width,
'height': height
})
img = np.array(scr)
img = cv2.cvtColor(img, cv2.IMREAD_COLOR)
return img
def Throw_Line(left=800, top=800, wait=2):
mouse.position = (left, top)
mouse.press(Button.left)
time.sleep(2)
mouse.release(Button.left)
# Need a dynamic way to find bar location.
fisher = Fisher()
fish_thread = threading.Thread(target=fisher.fish)
bar_left, bar_top = fisher.Set_Bobber()
print(bar_left, bar_top)
fish_thread.start()
while True:
stc = mss.mss()
scr = stc.grab(
{
"left": bar_left-300,
"top": bar_top,
"width": 800,
"height": 100,
}
)
frame = np.array(scr)
hsvframe = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
if fisher.fish_count >= fisher.fish_limit:
time.sleep(10)
continue
red_lower = np.array([0, 150, 150], np.uint8)
red_upper = np.array([10, 255, 255], np.uint8)
red_mask = cv2.inRange(hsvframe, red_lower, red_upper)
green_lower = np.array([40, 200, 150], np.uint8)
green_upper = np.array([70, 255, 255], np.uint8)
green_mask = cv2.inRange(hsvframe, green_lower, green_upper)
kernal = np.ones((5, 5), "uint8")
red_mask = cv2.dilate(red_mask, kernal)
res_red = cv2.bitwise_and(frame, frame, mask=red_mask)
green_mask = cv2.dilate(green_mask, kernal)
res_green = cv2.bitwise_and(frame, frame, mask=green_mask)
countours = cv2.findContours(red_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[0]
for contour in countours:
area = cv2.contourArea(contour)
if area > 900:
x1, y1, w1, h1 = cv2.boundingRect(contour)
frame_red_bar = cv2.rectangle(
frame, (x1, y1), (x1 + w1, y1 + h1), (0, 0, 255), 2
)
cv2.putText(
frame,
"red bar",
(x1 + w1, y1 + h1),
cv2.FONT_HERSHEY_SIMPLEX,
0.7,
(0, 0, 255),
)
x_red1 = int(x1 + w1 / 2)
y_red1 = int(y1 + h1 / 2)
cv2.circle(frame, (x_red1, y_red1), 3, (0, 0, 255), -1)
try:
cv2.line(frame, (x_red2, y_red2), (x_red1, y_red1), (0, 0, 255), 2)
except NameError:
pass
cv2.putText(
frame,
"red bar count: " + str(len(frame_red_bar) - 99),
(10, 72),
cv2.FONT_HERSHEY_SIMPLEX,
0.7,
(0, 0, 255),
)
for contour in countours:
area2 = cv2.contourArea(contour)
if 600 > area2 > 100:
x1, y1, w1, h1 = cv2.boundingRect(contour)
frame_red = cv2.rectangle(
frame, (x1, y1), (x1 + w1, y1 + h1), (0, 34, 255), 2
)
x_red2 = int(x1 + w1 / 2)
y_red2 = int(y1 + h1 / 2)
cv2.circle(frame, (x_red2, y_red2), 3, (0, 34, 255), -1)
cv2.putText(
frame,
"hook",
(x1 + w1, y1 + h1),
cv2.FONT_HERSHEY_SIMPLEX,
0.7,
(0, 34, 255),
)
try:
distance = int(np.sqrt((x_red2 - x_red1) ** 2 + (y_red2 - y_red1) ** 2))
distance2 = int(np.sqrt((x_red2 - x_green) ** 2 + (y_red2 - y_green) ** 2))
if not np.array_equal(frame_red, frame_green) and distance > 65:
cv2.putText(
frame,
"red: " + str(distance),
(10, 40),
cv2.FONT_HERSHEY_SIMPLEX,
0.7,
(0, 0, 255),
)
if x_green > x_red2 and (x_red2 < x_red1):
if x_green > x_red2:
cv2.putText(
frame,
"red: " + str(distance),
(10, 40),
cv2.FONT_HERSHEY_SIMPLEX,
0.7,
(0, 0, 255),
)
mouse.press(Button.left)
elif x_green < x_red2 and (x_red2 > x_red1) and distance > 65:
if x_green < x_red1:
cv2.putText(
frame,
"red: " + str(distance),
(10, 40),
cv2.FONT_HERSHEY_SIMPLEX,
0.7,
(0, 0, 255),
)
mouse.release(Button.left)
else:
cv2.putText(
frame,
"green: " + str(distance2),
(10, 60),
cv2.FONT_HERSHEY_SIMPLEX,
0.7,
(0, 255, 0),
)
if distance2 <= 7 or x_red2 > x_green and x1 > x2:
mouse.release(Button.left)
elif x_red2 < x_green and distance2 > 7 and x1 < x2:
mouse.press(Button.left)
except NameError:
pass
countours2 = cv2.findContours(green_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[0]
y_green_flag = 0
for contour in countours2:
area3 = cv2.contourArea(contour)
if area3 > 500:
x2, y2, w2, h2 = cv2.boundingRect(contour)
frame_green = cv2.rectangle(
frame, (x2, y2), (x2 + w2, y2 + h2), (0, 255, 0), 2
)
x_green = int(x2 + w2 / 2)
y_green = int(y2 + h2 / 2)
cv2.circle(frame, (x_green, y_green), 3, (0, 255, 0), -1)
cv2.putText(
frame,
"green bar",
(x2 + w2, y2 + h2),
cv2.FONT_HERSHEY_SIMPLEX,
0.7,
(0, 255, 0),
)
cv2.putText(
frame,
"green bar count: " + str(len(countours2)),
(10, 90),
cv2.FONT_HERSHEY_SIMPLEX,
0.7,
(0, 255, 0),
)
try:
cv2.line(frame, (x_red2, y_red2), (x_green, y_green), (0, 255, 0), 2)
except NameError:
pass
# np.intersect1d(frame_red, frame_green).size
try:
if np.array_equal(frame_red, frame_green):
cv2.putText(
frame, f"hooked", (320, 90), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0)
)
else:
cv2.putText(
frame,
f"not hooked",
(320, 90),
cv2.FONT_HERSHEY_SIMPLEX,
1.0,
(0, 0, 255),
)
except NameError:
cv2.putText(
frame, "not hooked", (320, 90), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255)
)
cv2.imshow("main", frame)
cv2.setWindowProperty("main", cv2.WND_PROP_TOPMOST, 1)
# Press q to quit program
if cv2.waitKey(1) & 0xFF == ord("q"):
fisher.keep_fishing = False
cv2.destroyAllWindows()
cv2.waitKey(1)
flag = False
sys.exit()
|
__init__.py | #!/usr/bin/env python
#
# Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved.
#
# Licensed under the MIT License. See the LICENSE accompanying this file
# for the specific language governing permissions and limitations under
# the License.
#
#
# Copy this script to /sbin/mount.efs and make sure it is executable.
#
# You will be able to mount an EFS file system by its short name, by adding it
# to /etc/fstab. The syntax of an fstab entry is:
#
# [Device] [Mount Point] [File System Type] [Options] [Dump] [Pass]
#
# Add an entry like this:
#
# fs-deadbeef /mount_point efs _netdev 0 0
#
# Using the 'efs' type will cause '/sbin/mount.efs' to be called by 'mount -a'
# for this file system. The '_netdev' option tells the init system that the
# 'efs' type is a networked file system type. This has been tested with systemd
# (Amazon Linux 2, CentOS 7, RHEL 7, Debian 9, and Ubuntu 16.04), and upstart
# (Amazon Linux 2017.09).
#
# Once there is an entry in fstab, the file system can be mounted with:
#
# sudo mount /mount_point
#
# The script will add recommended mount options, if not provided in fstab.
import json
import logging
import os
import random
import re
import socket
import subprocess
import sys
import threading
from contextlib import contextmanager
from logging.handlers import RotatingFileHandler
try:
import ConfigParser
except ImportError:
from configparser import ConfigParser
try:
from urllib2 import urlopen, URLError
except ImportError:
from urllib.error import URLError
from urllib.request import urlopen
VERSION = '1.5'
CONFIG_FILE = '/etc/amazon/efs/efs-utils.conf'
CONFIG_SECTION = 'mount'
LOG_DIR = '/var/log/amazon/efs'
LOG_FILE = 'mount.log'
STATE_FILE_DIR = '/var/run/efs'
FS_ID_RE = re.compile('^(?P<fs_id>fs-[0-9a-f]+)$')
EFS_FQDN_RE = re.compile('^(?P<fs_id>fs-[0-9a-f]+)\.efs\.(?P<region>[a-z0-9-]+)\.amazonaws.com$')
INSTANCE_METADATA_SERVICE_URL = 'http://169.254.169.254/latest/dynamic/instance-identity/document/'
DEFAULT_STUNNEL_VERIFY_LEVEL = 2
DEFAULT_STUNNEL_CAFILE = '/etc/amazon/efs/efs-utils.crt'
EFS_ONLY_OPTIONS = [
'tls',
'tlsport',
'verify',
]
UNSUPPORTED_OPTIONS = [
'cafile',
'capath',
]
STUNNEL_GLOBAL_CONFIG = {
'fips': 'no',
'foreground': 'yes',
'socket': [
'l:SO_REUSEADDR=yes',
'a:SO_BINDTODEVICE=lo',
],
}
STUNNEL_EFS_CONFIG = {
'client': 'yes',
'accept': '127.0.0.1:%s',
'connect': '%s:2049',
'sslVersion': 'TLSv1.2',
'renegotiation': 'no',
'TIMEOUTbusy': '20',
'TIMEOUTclose': '0',
'libwrap': 'no',
'delay': 'yes',
}
WATCHDOG_SERVICE = 'amazon-efs-mount-watchdog'
def fatal_error(user_message, log_message=None, exit_code=1):
if log_message is None:
log_message = user_message
sys.stderr.write('%s\n' % user_message)
logging.error(log_message)
sys.exit(exit_code)
def get_region():
"""Return this instance's region via the instance metadata service."""
def _fatal_error(message):
fatal_error('Error retrieving region', message)
try:
resource = urlopen(INSTANCE_METADATA_SERVICE_URL, timeout=1)
if resource.getcode() != 200:
_fatal_error('Unable to reach instance metadata service at %s: status=%d'
% (INSTANCE_METADATA_SERVICE_URL, resource.getcode()))
data = resource.read()
if type(data) is str:
instance_identity = json.loads(data)
else:
instance_identity = json.loads(data.decode(resource.headers.get_content_charset() or 'us-ascii'))
return instance_identity['region']
except URLError as e:
_fatal_error('Unable to reach the instance metadata service at %s. If this is an on-premises instance, replace '
'"{region}" in the "dns_name_format" option in %s with the region of the EFS file system you are mounting.\n'
'See %s for more detail. %s'
% (INSTANCE_METADATA_SERVICE_URL, CONFIG_FILE, 'https://docs.aws.amazon.com/console/efs/direct-connect', e))
except ValueError as e:
_fatal_error('Error parsing json: %s' % (e,))
except KeyError as e:
_fatal_error('Region not present in %s: %s' % (instance_identity, e))
def parse_options(options):
opts = {}
for o in options.split(','):
if '=' in o:
k, v = o.split('=')
opts[k] = v
else:
opts[o] = None
return opts
def get_tls_port_range(config):
lower_bound = config.getint(CONFIG_SECTION, 'port_range_lower_bound')
upper_bound = config.getint(CONFIG_SECTION, 'port_range_upper_bound')
if lower_bound >= upper_bound:
fatal_error('Configuration option "port_range_upper_bound" defined as %d '
'must be strictly greater than "port_range_lower_bound" defined as %d.'
% (upper_bound, lower_bound))
return lower_bound, upper_bound
def choose_tls_port(config):
lower_bound, upper_bound = get_tls_port_range(config)
tls_ports = list(range(lower_bound, upper_bound))
# Choose a random midpoint, and then try ports in-order from there
mid = random.randrange(len(tls_ports))
ports_to_try = tls_ports[mid:] + tls_ports[:mid]
assert len(tls_ports) == len(ports_to_try)
for tls_port in ports_to_try:
sock = socket.socket()
try:
sock.bind(('localhost', tls_port))
sock.close()
return tls_port
except socket.error:
continue
fatal_error('Failed to locate an available port in the range [%d, %d], '
'try specifying a different port range in %s'
% (lower_bound, upper_bound, CONFIG_FILE))
def get_mount_specific_filename(fs_id, mountpoint, tls_port):
return '%s.%s.%d' % (fs_id, os.path.abspath(mountpoint).replace(os.sep, '.').lstrip('.'), tls_port)
def serialize_stunnel_config(config, header=None):
lines = []
if header:
lines.append('[%s]' % header)
for k, v in config.items():
if type(v) is list:
for item in v:
lines.append('%s = %s' % (k, item))
else:
lines.append('%s = %s' % (k, v))
return lines
def add_stunnel_ca_options(efs_config, stunnel_cafile=DEFAULT_STUNNEL_CAFILE):
if not os.path.exists(stunnel_cafile):
fatal_error('Failed to find the EFS certificate authority file for verification',
'Failed to find the EFS CAfile "%s"' % stunnel_cafile)
efs_config['CAfile'] = stunnel_cafile
def is_stunnel_option_supported(stunnel_output, stunnel_option_name):
supported = False
for line in stunnel_output:
if line.startswith(stunnel_option_name):
supported = True
break
if not supported:
logging.warn('stunnel does not support "%s"', stunnel_option_name)
return supported
def get_version_specific_stunnel_options(config):
proc = subprocess.Popen(['stunnel', '-help'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.wait()
_, err = proc.communicate()
stunnel_output = err.splitlines()
check_host_supported = is_stunnel_option_supported(stunnel_output, 'checkHost')
ocsp_aia_supported = is_stunnel_option_supported(stunnel_output, 'OCSPaia')
return check_host_supported, ocsp_aia_supported
def write_stunnel_config_file(config, state_file_dir, fs_id, mountpoint, tls_port, dns_name, verify_level, log_dir=LOG_DIR):
"""
Serializes stunnel configuration to a file. Unfortunately this does not conform to Python's config file format, so we have to
hand-serialize it.
"""
mount_filename = get_mount_specific_filename(fs_id, mountpoint, tls_port)
global_config = dict(STUNNEL_GLOBAL_CONFIG)
if config.getboolean(CONFIG_SECTION, 'stunnel_debug_enabled'):
global_config['debug'] = 'debug'
global_config['output'] = os.path.join(log_dir, '%s.stunnel.log' % mount_filename)
efs_config = dict(STUNNEL_EFS_CONFIG)
efs_config['accept'] = efs_config['accept'] % tls_port
efs_config['connect'] = efs_config['connect'] % dns_name
efs_config['verify'] = verify_level
if verify_level > 0:
add_stunnel_ca_options(efs_config)
check_host_supported, ocsp_aia_supported = get_version_specific_stunnel_options(config)
tls_controls_message = 'WARNING: Your client lacks sufficient controls to properly enforce TLS. Please upgrade stunnel, ' \
'or disable "%%s" in %s.\nSee %s for more detail.' % (CONFIG_FILE,
'https://docs.aws.amazon.com/console/efs/troubleshooting-tls')
if config.getboolean(CONFIG_SECTION, 'stunnel_check_cert_hostname'):
if check_host_supported:
efs_config['checkHost'] = dns_name
else:
fatal_error(tls_controls_message % 'stunnel_check_cert_hostname')
if config.getboolean(CONFIG_SECTION, 'stunnel_check_cert_validity'):
if ocsp_aia_supported:
efs_config['OCSPaia'] = 'yes'
else:
fatal_error(tls_controls_message % 'stunnel_check_cert_validity')
stunnel_config = '\n'.join(serialize_stunnel_config(global_config) + serialize_stunnel_config(efs_config, 'efs'))
logging.debug('Writing stunnel configuration:\n%s', stunnel_config)
stunnel_config_file = os.path.join(state_file_dir, 'stunnel-config.%s' % mount_filename)
with open(stunnel_config_file, 'w') as f:
f.write(stunnel_config)
return stunnel_config_file
def write_tls_tunnel_state_file(fs_id, mountpoint, tls_port, tunnel_pid, command, files, state_file_dir):
"""
Return the name of the temporary file containing TLS tunnel state, prefixed with a '~'. This file needs to be renamed to a
non-temporary version following a successful mount.
"""
state_file = '~' + get_mount_specific_filename(fs_id, mountpoint, tls_port)
state = {
'pid': tunnel_pid,
'cmd': command,
'files': files,
}
with open(os.path.join(state_file_dir, state_file), 'w') as f:
json.dump(state, f)
return state_file
def test_tunnel_process(tunnel_proc, fs_id):
tunnel_proc.poll()
if tunnel_proc.returncode is not None:
out, err = tunnel_proc.communicate()
fatal_error('Failed to initialize TLS tunnel for %s' % fs_id,
'Failed to start TLS tunnel (errno=%d). stdout="%s" stderr="%s"'
% (tunnel_proc.returncode, out.strip(), err.strip()))
def poll_tunnel_process(tunnel_proc, fs_id, mount_completed):
"""
poll the tunnel process health every .5s during the mount attempt to fail fast if the tunnel dies - since this is not called
from the main thread, if the tunnel fails, exit uncleanly with os._exit
"""
while not mount_completed.is_set():
try:
test_tunnel_process(tunnel_proc, fs_id)
except SystemExit as e:
os._exit(e.code)
mount_completed.wait(.5)
def get_init_system(comm_file='/proc/1/comm'):
init_system = 'unknown'
try:
with open(comm_file) as f:
init_system = f.read().strip()
except IOError:
logging.warning('Unable to read %s', comm_file)
logging.debug('Identified init system: %s', init_system)
return init_system
def check_network_status(fs_id, init_system):
if init_system != 'systemd':
logging.debug('Not testing network on non-systemd init systems')
return
with open(os.devnull, 'w') as devnull:
rc = subprocess.call(['systemctl', 'status', 'network.target'], stdout=devnull, stderr=devnull)
if rc != 0:
fatal_error('Failed to mount %s because the network was not yet available, add "_netdev" to your mount options' % fs_id,
exit_code=0)
def start_watchdog(init_system):
if init_system == 'init':
proc = subprocess.Popen(['/sbin/status', WATCHDOG_SERVICE], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
status, _ = proc.communicate()
if 'stop' in status:
with open(os.devnull, 'w') as devnull:
subprocess.Popen(['/sbin/start', WATCHDOG_SERVICE], stdout=devnull, stderr=devnull)
elif 'start' in status:
logging.debug('%s is already running', WATCHDOG_SERVICE)
elif init_system == 'systemd':
rc = subprocess.call(['systemctl', 'is-active', '--quiet', WATCHDOG_SERVICE])
if rc != 0:
with open(os.devnull, 'w') as devnull:
subprocess.Popen(['systemctl', 'start', WATCHDOG_SERVICE], stdout=devnull, stderr=devnull)
else:
logging.debug('%s is already running', WATCHDOG_SERVICE)
else:
error_message = 'Could not start %s, unrecognized init system "%s"' % (WATCHDOG_SERVICE, init_system)
sys.stderr.write('%s\n' % error_message)
logging.warning(error_message)
@contextmanager
def bootstrap_tls(config, init_system, dns_name, fs_id, mountpoint, options, state_file_dir=STATE_FILE_DIR):
start_watchdog(init_system)
if not os.path.exists(state_file_dir):
os.makedirs(state_file_dir)
tls_port = choose_tls_port(config)
options['tlsport'] = tls_port
verify_level = int(options.get('verify', DEFAULT_STUNNEL_VERIFY_LEVEL))
options['verify'] = verify_level
stunnel_config_file = write_stunnel_config_file(config, state_file_dir, fs_id, mountpoint, tls_port, dns_name, verify_level)
tunnel_args = ['stunnel', stunnel_config_file]
# launch the tunnel in a process group so if it has any child processes, they can be killed easily by the mount watchdog
logging.info('Starting TLS tunnel: "%s"', ' '.join(tunnel_args))
tunnel_proc = subprocess.Popen(tunnel_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=os.setsid)
logging.info('Started TLS tunnel, pid: %d', tunnel_proc.pid)
temp_tls_state_file = write_tls_tunnel_state_file(fs_id, mountpoint, tls_port, tunnel_proc.pid, tunnel_args,
[stunnel_config_file], state_file_dir)
try:
yield tunnel_proc
finally:
os.rename(os.path.join(state_file_dir, temp_tls_state_file), os.path.join(state_file_dir, temp_tls_state_file[1:]))
def get_nfs_mount_options(options):
# If you change these options, update the man page as well at man/mount.efs.8
if 'nfsvers' not in options and 'vers' not in options:
options['nfsvers'] = '4.1'
if 'rsize' not in options:
options['rsize'] = '1048576'
if 'wsize' not in options:
options['wsize'] = '1048576'
if 'soft' not in options and 'hard' not in options:
options['hard'] = None
if 'timeo' not in options:
options['timeo'] = '600'
if 'retrans' not in options:
options['retrans'] = '2'
if 'noresvport' not in options:
options['noresvport'] = None
if 'tls' in options:
if 'port' in options:
fatal_error('The "port" and "tls" options are mutually exclusive')
options['port'] = options['tlsport']
def to_nfs_option(k, v):
if v is None:
return k
return '%s=%s' % (str(k), str(v))
nfs_options = [to_nfs_option(k, v) for k, v in options.items() if k not in EFS_ONLY_OPTIONS]
return ','.join(nfs_options)
def mount_nfs(dns_name, path, mountpoint, options):
if 'tls' in options:
mount_path = '127.0.0.1:%s' % path
else:
mount_path = '%s:%s' % (dns_name, path)
command = ['/sbin/mount.nfs4', mount_path, mountpoint, '-o', get_nfs_mount_options(options)]
logging.info('Executing: "%s"', ' '.join(command))
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
if proc.returncode == 0:
logging.info('Successfully mounted %s at %s', dns_name, mountpoint)
else:
message = 'Failed to mount %s at %s: returncode=%d, stderr="%s"' % (dns_name, mountpoint, proc.returncode, err.strip())
fatal_error(err.strip(), message, proc.returncode)
def usage(out, exit_code=1):
out.write('Usage: mount.efs [--version] [-h|--help] <fsname> <mountpoint> [-o <options>]\n')
sys.exit(exit_code)
def parse_arguments_early_exit(args=None):
"""Parse arguments, checking for early exit conditions only"""
if args is None:
args = sys.argv
if '-h' in args[1:] or '--help' in args[1:]:
usage(out=sys.stdout, exit_code=0)
if '--version' in args[1:]:
sys.stdout.write('%s Version: %s\n' % (args[0], VERSION))
sys.exit(0)
def parse_arguments(config, args=None):
"""Parse arguments, return (fsid, path, mountpoint, options)"""
if args is None:
args = sys.argv
fsname = None
mountpoint = None
options = {}
if len(args) > 1:
fsname = args[1]
if len(args) > 2:
mountpoint = args[2]
if len(args) > 4 and args[3] == '-o':
options = parse_options(args[4])
if not fsname or not mountpoint:
usage(out=sys.stderr)
fs_id, path = match_device(config, fsname)
return fs_id, path, mountpoint, options
def assert_root():
if os.geteuid() != 0:
sys.stderr.write('only root can run mount.efs\n')
sys.exit(1)
def read_config(config_file=CONFIG_FILE):
p = ConfigParser.SafeConfigParser()
p.read(config_file)
return p
def bootstrap_logging(config, log_dir=LOG_DIR):
raw_level = config.get(CONFIG_SECTION, 'logging_level')
levels = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL
}
level = levels.get(raw_level.lower())
level_error = False
if not level:
# delay logging error about malformed log level until after logging is configured
level_error = True
level = logging.INFO
max_bytes = config.getint(CONFIG_SECTION, 'logging_max_bytes')
file_count = config.getint(CONFIG_SECTION, 'logging_file_count')
handler = RotatingFileHandler(os.path.join(log_dir, LOG_FILE), maxBytes=max_bytes, backupCount=file_count)
handler.setFormatter(logging.Formatter(fmt='%(asctime)s - %(levelname)s - %(message)s'))
logger = logging.getLogger()
logger.setLevel(level)
logger.addHandler(handler)
if level_error:
logging.error('Malformed logging level "%s", setting logging level to %s', raw_level, level)
def get_dns_name(config, fs_id):
def _validate_replacement_field_count(format_str, expected_ct):
if format_str.count('{') != expected_ct or format_str.count('}') != expected_ct:
raise ValueError('DNS name format has an incorrect number of replacement fields')
dns_name_format = config.get(CONFIG_SECTION, 'dns_name_format')
if '{fs_id}' not in dns_name_format:
raise ValueError('DNS name format must include {fs_id}')
format_args = {'fs_id': fs_id}
if '{region}' in dns_name_format:
_validate_replacement_field_count(dns_name_format, 2)
format_args['region'] = get_region()
else:
_validate_replacement_field_count(dns_name_format, 1)
dns_name = dns_name_format.format(**format_args)
try:
socket.gethostbyname(dns_name)
except socket.gaierror:
fatal_error('Failed to resolve "%s" - check that your file system ID is correct.\nSee %s for more detail.'
% (dns_name, 'https://docs.aws.amazon.com/console/efs/mount-dns-name'),
'Failed to resolve "%s"' % dns_name)
return dns_name
def match_device(config, device):
"""Return the EFS id and the remote path to mount"""
try:
remote, path = device.split(':', 1)
except ValueError:
remote = device
path = '/'
if FS_ID_RE.match(remote):
return remote, path
try:
primary, secondaries, _ = socket.gethostbyname_ex(remote)
hostnames = filter(lambda e: e is not None, [primary] + secondaries)
except socket.gaierror:
fatal_error(
'Failed to resolve "%s" - check that the specified DNS name is a CNAME record resolving to a valid EFS DNS '
'name' % remote,
'Failed to resolve "%s"' % remote
)
if not hostnames:
fatal_error(
'The specified domain name "%s" did not resolve to an EFS mount target' % remote
)
for hostname in hostnames:
efs_fqdn_match = EFS_FQDN_RE.match(hostname)
if efs_fqdn_match:
fs_id = efs_fqdn_match.group('fs_id')
expected_dns_name = get_dns_name(config, fs_id)
# check that the DNS name of the mount target matches exactly the DNS name the CNAME resolves to
if hostname == expected_dns_name:
return fs_id, path
else:
fatal_error('The specified CNAME "%s" did not resolve to a valid DNS name for an EFS mount target. '
'Please refer to the EFS documentation for mounting with DNS names for examples: %s'
% (remote, 'https://docs.aws.amazon.com/efs/latest/ug/mounting-fs-mount-cmd-dns-name.html'))
def mount_tls(config, init_system, dns_name, path, fs_id, mountpoint, options):
with bootstrap_tls(config, init_system, dns_name, fs_id, mountpoint, options) as tunnel_proc:
mount_completed = threading.Event()
t = threading.Thread(target=poll_tunnel_process, args=(tunnel_proc, fs_id, mount_completed))
t.start()
mount_nfs(dns_name, path, mountpoint, options)
mount_completed.set()
t.join()
def check_unsupported_options(options):
for unsupported_option in UNSUPPORTED_OPTIONS:
if unsupported_option in options:
warn_message = 'The "%s" option is not supported and has been ignored, as amazon-efs-utils relies on a built-in ' \
'trust store.' % unsupported_option
sys.stderr.write('WARN: %s\n' % warn_message)
logging.warn(warn_message)
del options[unsupported_option]
def main():
parse_arguments_early_exit()
assert_root()
config = read_config()
bootstrap_logging(config)
fs_id, path, mountpoint, options = parse_arguments(config)
logging.info('version=%s options=%s', VERSION, options)
check_unsupported_options(options)
init_system = get_init_system()
check_network_status(fs_id, init_system)
dns_name = get_dns_name(config, fs_id)
if 'tls' in options:
mount_tls(config, init_system, dns_name, path, fs_id, mountpoint, options)
else:
mount_nfs(dns_name, path, mountpoint, options)
if '__main__' == __name__:
main()
|
multiproc_vec.py | from .utils.shared_array import SharedArray
from .utils.space_wrapper import SpaceWrapper
import multiprocessing as mp
import numpy as np
import traceback
import gym.vector
def compress_info(infos):
non_empty_infs = [(i, info) for i, info in enumerate(infos) if info]
return non_empty_infs
def decompress_info(num_envs, idx_starts, comp_infos):
all_info = [{}] * num_envs
for idx_start, comp_infos in zip(idx_starts, comp_infos):
for i, info in comp_infos:
all_info[idx_start + i] = info
return all_info
def async_loop(vec_env_constr, pipe, shared_obs, shared_actions, shared_rews, shared_dones):
try:
vec_env = vec_env_constr()
pipe.send((vec_env.num_envs))
env_start_idx = pipe.recv()
env_end_idx = env_start_idx + vec_env.num_envs
while True:
instr = pipe.recv()
if instr == "reset":
obs = vec_env.reset()
shared_obs.np_arr[env_start_idx:env_end_idx] = obs
shared_dones.np_arr[env_start_idx:env_end_idx] = False
shared_rews.np_arr[env_start_idx:env_end_idx] = 0.0
comp_infos = []
elif instr == "step":
actions = shared_actions.np_arr[env_start_idx:env_end_idx]
observations, rewards, dones, infos = vec_env.step(actions)
shared_obs.np_arr[env_start_idx:env_end_idx] = observations
shared_dones.np_arr[env_start_idx:env_end_idx] = dones
shared_rews.np_arr[env_start_idx:env_end_idx] = rewards
comp_infos = compress_info(infos)
elif isinstance(instr, tuple):
name, data = instr
if name == "seed":
vec_env.seed(data)
comp_infos = []
elif instr == "terminate":
return
pipe.send(comp_infos)
except BaseException as e:
tb = traceback.format_exc()
pipe.send((e, tb))
class ProcConcatVec(gym.vector.VectorEnv):
def __init__(self, vec_env_constrs, observation_space, action_space, tot_num_envs):
self.observation_space = observation_space
self.action_space = action_space
self.num_envs = num_envs = tot_num_envs
self.shared_obs = SharedArray((num_envs,) + self.observation_space.shape, dtype=self.observation_space.dtype)
act_space_wrap = SpaceWrapper(self.action_space)
self.shared_act = SharedArray((num_envs,) + act_space_wrap.shape, dtype=act_space_wrap.dtype)
self.shared_rews = SharedArray((num_envs,), dtype=np.float32)
self.shared_dones = SharedArray((num_envs,), dtype=np.uint8)
pipes = []
procs = []
for constr in vec_env_constrs:
inpt, outpt = mp.Pipe()
proc = mp.Process(
target=async_loop, args=(constr, outpt, self.shared_obs, self.shared_act, self.shared_rews, self.shared_dones)
)
proc.start()
pipes.append(inpt)
procs.append(proc)
self.pipes = pipes
self.procs = procs
num_envs = 0
env_nums = self._receive_info()
idx_starts = []
for pipe, cnum_env in zip(self.pipes, env_nums):
cur_env_idx = num_envs
num_envs += cnum_env
pipe.send(cur_env_idx)
idx_starts.append(cur_env_idx)
assert num_envs == tot_num_envs
self.idx_starts = idx_starts
def reset(self):
for pipe in self.pipes:
pipe.send("reset")
self._receive_info()
observations = self.shared_obs.np_arr
return observations
def step_async(self, actions):
self.shared_act.np_arr[:] = actions
for pipe in self.pipes:
pipe.send("step")
def _receive_info(self):
all_data = []
for cin in self.pipes:
data = cin.recv()
if isinstance(data, tuple):
e, tb = data
print(tb)
raise e
all_data.append(data)
return all_data
def step_wait(self):
compressed_infos = self._receive_info()
infos = decompress_info(self.num_envs, self.idx_starts, compressed_infos)
observations = self.shared_obs.np_arr
rewards = self.shared_rews.np_arr
dones = self.shared_dones.np_arr
return observations, rewards, dones, infos
def step(self, actions):
self.step_async(actions)
return self.step_wait()
def seed(self, seed=None):
for i, pipe in enumerate(self.pipes):
pipe.send(("seed", seed + self.idx_starts[i]))
self._receive_info()
def __del__(self):
for pipe in self.pipes:
try:
pipe.send("terminate")
except BrokenPipeError:
pass
for proc in self.procs:
proc.join()
|
example_sync.py | #!/usr/bin/env python3
"""
This is an example of how the pytradfri-library can be used.
To run the script, do the following:
$ pip3 install pytradfri
$ Download this file (example_sync.py)
$ python3 example_sync.py <IP>
Where <IP> is the address to your IKEA gateway. The first time
running you will be asked to input the 'Security Code' found on
the back of your IKEA gateway.
"""
# Hack to allow relative import above top level package
import sys
import os
folder = os.path.dirname(os.path.abspath(__file__)) # noqa
sys.path.insert(0, os.path.normpath("%s/.." % folder)) # noqa
from pytradfri import Gateway
from pytradfri.api.libcoap_api import APIFactory
from pytradfri.error import PytradfriError
from pytradfri.util import load_json, save_json
import uuid
import argparse
import threading
import time
CONFIG_FILE = 'tradfri_standalone_psk.conf'
parser = argparse.ArgumentParser()
parser.add_argument('host', metavar='IP', type=str,
help='IP Address of your Tradfri gateway')
parser.add_argument('-K', '--key', dest='key', required=False,
help='Security code found on your Tradfri gateway')
args = parser.parse_args()
if args.host not in load_json(CONFIG_FILE) and args.key is None:
print("Please provide the 'Security Code' on the back of your "
"Tradfri gateway:", end=" ")
key = input().strip()
if len(key) != 16:
raise PytradfriError("Invalid 'Security Code' provided.")
else:
args.key = key
def observe(api, device):
def callback(updated_device):
light = updated_device.light_control.lights[0]
print("Received message for: %s" % light)
def err_callback(err):
print(err)
def worker():
api(device.observe(callback, err_callback, duration=120))
threading.Thread(target=worker, daemon=True).start()
print('Sleeping to start observation task')
time.sleep(1)
def run():
# Assign configuration variables.
# The configuration check takes care they are present.
conf = load_json(CONFIG_FILE)
try:
identity = conf[args.host].get('identity')
psk = conf[args.host].get('key')
api_factory = APIFactory(host=args.host, psk_id=identity, psk=psk)
except KeyError:
identity = uuid.uuid4().hex
api_factory = APIFactory(host=args.host, psk_id=identity)
try:
psk = api_factory.generate_psk(args.key)
print('Generated PSK: ', psk)
conf[args.host] = {'identity': identity,
'key': psk}
save_json(CONFIG_FILE, conf)
except AttributeError:
raise PytradfriError("Please provide the 'Security Code' on the "
"back of your Tradfri gateway using the "
"-K flag.")
api = api_factory.request
gateway = Gateway()
devices_command = gateway.get_devices()
devices_commands = api(devices_command)
devices = api(devices_commands)
lights = [dev for dev in devices if dev.has_light_control]
# Print all lights
print(lights)
# Lights can be accessed by its index, so lights[1] is the second light
if lights:
light = lights[0]
else:
print("No lights found!")
light = None
if light:
observe(api, light)
# Example 1: checks state of the light (true=on)
print("State: {}".format(light.light_control.lights[0].state))
# Example 2: get dimmer level of the light
print("Dimmer: {}".format(light.light_control.lights[0].dimmer))
# Example 3: What is the name of the light
print("Name: {}".format(light.name))
# Example 4: Set the light level of the light
dim_command = light.light_control.set_dimmer(254)
api(dim_command)
# Example 5: Change color of the light
# f5faf6 = cold | f1e0b5 = normal | efd275 = warm
color_command = light.light_control.set_color_temp(250)
api(color_command)
tasks_command = gateway.get_smart_tasks()
tasks_commands = api(tasks_command)
tasks = api(tasks_commands)
# Example 6: Return the transition time (in minutes) for task#1
if tasks:
print(tasks[0].task_control.tasks[0].transition_time)
# Example 7: Set the dimmer stop value to 30 for light#1 in task#1
dim_command_2 = tasks[0].start_action.devices[0].item_controller \
.set_dimmer(30)
api(dim_command_2)
if light:
print("Sleeping for 2 min to listen for more observation events")
print("Try altering the light (%s) in the app, and watch the events!" %
light.name)
time.sleep(120)
run()
|
__init__.py | import sys
import threading
import itertools
class Spinner(object):
spinner_cycle = itertools.cycle(['-', '/', '|', '\\'])
def __init__(self, beep=False, disable=False, force=False, stream=sys.stdout):
self.disable = disable
self.beep = beep
self.force = force
self.stream = stream
self.stop_running = None
self.spin_thread = None
def start(self):
if self.disable:
return
if self.stream.isatty() or self.force:
self.stop_running = threading.Event()
self.spin_thread = threading.Thread(target=self.init_spin)
self.spin_thread.start()
def stop(self):
if self.spin_thread:
self.stop_running.set()
self.spin_thread.join()
def init_spin(self):
while not self.stop_running.is_set():
self.stream.write(next(self.spinner_cycle))
self.stream.flush()
self.stop_running.wait(0.25)
self.stream.write('\b')
self.stream.flush()
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.disable:
return False
self.stop()
if self.beep:
self.stream.write('\7')
self.stream.flush()
return False
def spinner(beep=False, disable=False, force=False, stream=sys.stdout):
"""This function creates a context manager that is used to display a
spinner on stdout as long as the context has not exited.
The spinner is created only if stdout is not redirected, or if the spinner
is forced using the `force` parameter.
Parameters
----------
beep : bool
Beep when spinner finishes.
disable : bool
Hide spinner.
force : bool
Force creation of spinner even when stdout is redirected.
Example
-------
with spinner():
do_something()
do_something_else()
"""
return Spinner(beep, disable, force, stream)
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
|
dataset_np_ipc.py | """Data fetching with pandas
"""
# MIT License
#
# Copyright (c) 2018 Yichun Shi
# Copyright (c) 2021 Kai Chen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import os
import time
import math
import random
import shutil
from functools import wraps
from multiprocessing import Process, Queue
import numpy as np
import pandas as pd
queue_timeout = 600
class Dataset(object):
def __init__(self, path=None, prefix=None):
if path is not None:
self.init_from_path(path)
else:
# self.data = pd.DataFrame([], columns=['path', 'abspath', 'label', 'name'])
self.data = pd.DataFrame([], columns=['abspath', 'label'])
self.prefix = prefix
self.base_seed = 0
self.batch_queue = None
self.batch_workers = None
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
return self.data[key]
def _delitem(self, key):
self.data.__delitem__(key)
@property
def num_classes(self):
return len(self.data['label'].unique())
@property
def classes(self):
return self.data['label'].unique()
@property
def size(self):
return self.data.shape[0]
@property
def loc(self):
return self.data.loc
@property
def iloc(self):
return self.data.iloc
def init_from_path(self, path):
if type(path) == list:
self.init_from_folders(path)
return
path = os.path.expanduser(path)
_, ext = os.path.splitext(path)
if os.path.isdir(path):
self.init_from_folder(path)
elif ext == '.txt':
self.init_from_list(path)
else:
raise ValueError('Cannot initialize dataset from path: %s\n\
It should be either a folder, .txt or .hdf5 file' % path)
# print('%d images of %d classes loaded' % (len(self.images), self.num_classes))
def init_from_folder(self, folder):
folder = os.path.abspath(os.path.expanduser(folder))
class_names = os.listdir(folder)
class_names.sort()
# class_names = class_names[:2000]
print('num_classes', len(class_names))
paths = []
labels = []
names = []
for label, class_name in enumerate(class_names):
classdir = os.path.join(folder, class_name)
if os.path.isdir(classdir):
images_class = os.listdir(classdir)
images_class.sort()
images_class = [os.path.join(class_name,img) for img in images_class]
paths.extend(images_class)
labels.extend(len(images_class) * [label])
names.extend(len(images_class) * [class_name])
abspaths = [os.path.join(folder,p) for p in paths]
# self.data = pd.DataFrame({'path': paths, 'abspath': abspaths, 'label': labels, 'name': names})
self.data = pd.DataFrame({'abspath': abspaths, 'label': labels})
print('num_images', len(names))
self.prefix = folder
def init_from_folders(self, folders):
class_names_all = []
labels_all = []
abspaths_all = []
for folder in folders:
folder = os.path.abspath(os.path.expanduser(folder))
class_names = os.listdir(folder)
class_names.sort()
# class_names = class_names[:30000]
base_label = len(class_names_all)
class_names_all += class_names
print('num_classes', len(class_names), len(class_names_all))
paths = []
labels = []
names = []
for label, class_name in enumerate(class_names):
classdir = os.path.join(folder, class_name)
if os.path.isdir(classdir):
images_class = os.listdir(classdir)
images_class.sort()
images_class = [os.path.join(class_name,img) for img in images_class]
paths.extend(images_class)
labels.extend(len(images_class) * [label + base_label])
names.extend(len(images_class) * [class_name])
abspaths = [os.path.join(folder,p) for p in paths]
labels_all += labels
abspaths_all += abspaths
self.data = pd.DataFrame({'abspath': abspaths_all, 'label': labels_all})
print('num_images', len(abspaths_all))
self.prefix = folders
def init_from_list(self, filename, folder_depth=2):
print('init_from_list', filename)
with open(filename, 'r') as f:
lines = f.readlines()
lines = [line.strip().split(' ') for line in lines]
abspaths = [os.path.abspath(line[0]) for line in lines]
paths = ['/'.join(p.split('/')[-folder_depth:]) for p in abspaths]
if len(lines[0]) == 2:
labels = [int(line[1]) for line in lines]
names = [str(lb) for lb in labels]
elif len(lines[0]) == 1:
names = [p.split('/')[-folder_depth] for p in abspaths]
_, labels = np.unique(names, return_inverse=True)
else:
raise ValueError('List file must be in format: "fullpath(str) \
label(int)" or just "fullpath(str)"')
self.data = pd.DataFrame({'path': paths, 'abspath': abspaths, 'label': labels, 'name': names})
self.prefix = abspaths[0].split('/')[:-folder_depth]
print('num_classes', np.max(labels)+1)
print('num_images', len(names))
def write_datalist_to_file(self, filename):
print('write_datalist_to_file', filename)
with open(filename, 'w') as f:
s = ''
for index, row in self.data.iterrows():
s += row['abspath'] + ' ' + str(row['label']) + '\n'
if index % 10000 == 0:
print(index)
f.write(s)
s = ''
if len(s) > 0:
f.write(s)
exit(0)
#
# Data Loading
#
def set_base_seed(self, base_seed=0):
self.base_seed = base_seed
def _random_samples_from_class(self, label, num_samples, exception=None):
# indices_temp = self.class_indices[label]
indices_temp = list(np.where(self.data['label'].values == label)[0])
if exception is not None:
indices_temp.remove(exception)
assert len(indices_temp) > 0
# Sample indices multiple times when more samples are required than present.
indices = []
iterations = int(np.ceil(1.0*num_samples / len(indices_temp)))
for i in range(iterations):
sample_indices = np.random.permutation(indices_temp)
indices.append(sample_indices)
indices = list(np.concatenate(indices, axis=0)[:num_samples])
return indices
def _get_batch_indices(self, batch_format):
''' Get the indices from index queue and fetch the data with indices.'''
indices_batch = []
batch_size = batch_format['size']
num_classes = batch_format['num_classes']
assert batch_size % num_classes == 0
num_samples_per_class = batch_size // num_classes
idx_classes = np.random.permutation(self.classes)[:num_classes]
indices_batch = []
for c in idx_classes:
indices_batch.extend(self._random_samples_from_class(c, num_samples_per_class))
return indices_batch
def _get_batch(self, batch_format):
indices = self._get_batch_indices(batch_format)
batch = {}
for column in self.data.columns:
batch[column] = self.data[column].values[indices]
return batch
# Multithreading preprocessing images
def _batch_queue_worker_t(self, seed):
np.random.seed(seed+self.base_seed)
while True:
batch = self._get_batch(self.batch_format)
if self.proc_func is not None:
batch['image'] = self.proc_func(batch['abspath'], is_training=True)
self.batch_queue.put(batch)
def start_batch_queue2(self, batch_format, proc_func=None, maxsize=5, num_threads=4):
self.proc_func = proc_func
self.batch_format = batch_format
self.batch_queue = Queue(maxsize=maxsize)
self.batch_workers = []
for i in range(num_threads):
worker = Process(target=self._batch_queue_worker_t, args=(i,))
worker.daemon = True
worker.start()
self.batch_workers.append(worker)
def pop_batch_queue2(self, timeout=queue_timeout):
return self.batch_queue.get(block=True, timeout=timeout)
def start_batch_queue(self, batch_format, proc_func=None, maxsize=5, num_threads=4):
self.proc_func = proc_func
self.batch_format = batch_format
def pop_batch_queue(self, timeout=queue_timeout):
batch = self._get_batch(self.batch_format)
if self.proc_func is not None:
batch['image'] = self.proc_func(batch['abspath'], is_training=True)
return batch
def release_queue(self):
if self.index_queue is not None:
self.index_queue.close()
if self.batch_queue is not None:
self.batch_queue.close()
if self.index_worker is not None:
self.index_worker.terminate()
del self.index_worker
self.index_worker = None
if self.batch_workers is not None:
for w in self.batch_workers:
w.terminate()
del w
self.batch_workers = None
|
send fuel measure.py | # Import standard python modules
import time
import os
import sys
import threading
# Import Adafruit IO Client.
from Adafruit_IO import Client
# Import RPi.GPIO Module
try:
import RPi.GPIO as GPIO
except RuntimeError:
print("Error importing RPi.GPIO! This is probably because you need \
superuser privileges. You can achieve \
this by using 'sudo' to run your script")
# Class for params into object for Threading call
class MessageSendControl():
def __init__(self, message):
self.message=message
# Define Functions for Threading
def send_message(aioClient, tankMeasureFeedInstance, tankStatusFeedInstance, messageInstance, pinList):
while True:
if(messageInstance.message!=""):
if(messageInstance.message.isdigit()):
aioClient.send(tankMeasureFeedInstance.key, int(messageInstance.message))
# Case for trigger a normal status
if(int(messageInstance.message)>=10 and int(messageInstance.message)<=90):
aioClient.send(tankStatusFeedInstance.key, "normal")
# LED control
tankStatusFeedData=aio.receive(tankStatusFeedInstance.key)
for i in pinList:
if(i==tankStatusFeedData.value):
GPIO.output(pinList.get(i), GPIO.HIGH)
else:
GPIO.output(pinList.get(i), GPIO.LOW)
print("Capacidad en el tanque {}%".format(messageInstance.message))
else:
print("El dato '{}' no es apto para el envio".format(messageInstance.message))
time.sleep(10)
if __name__ == "__main__":
if(len(sys.argv)!=5):
sys.stderr.write('Usage: "{0}" $AIOUsername $AIOKey $TankMeasureFeedKey $TankStatusFeedKey\n'.format(sys.argv[0]))
os._exit(1)
AIOUsername=sys.argv[1]
AIOKey=sys.argv[2]# Beware, your Key is Secret! - c0354363dc384523bc1022b5fb66d6b7
TankMeasureFeedKey=sys.argv[3] # Feed key where tank measure data is received
TankStatusFeedKey=sys.argv[4] # Feed key where tank status data is received
# Connect to Adafruit IO Server
aio=Client(username=AIOUsername, key=AIOKey)
# Link to feeds
tankMeasureFeedInstance=aio.feeds(TankMeasureFeedKey)
tankStatusFeedInstance=aio.feeds(TankStatusFeedKey)
# Create messageSendControl instance
messageInstance=MessageSendControl("")
# Setup GPIO mode
GPIO.setmode(GPIO.BCM)
# List with all GPIO pin numbers
pinList={"bajo":10, "normal":11, "alto":12}
# Set GPIO pin signal OUT and initial value "shutdown"
GPIO.setup(pinList, GPIO.OUT, initial=GPIO.LOW)
# Setup Threading, to publish message every 10 seconds
hilo0=threading.Thread(target=send_message, args=(aio, tankMeasureFeedInstance, tankStatusFeedInstance, messageInstance, pinList))
hilo0.start()
# Mod publish value
while messageInstance.message!="x": # char 'x' to exit
messageInstance.message=input("Ingrese nuevo valor para el tanque\n")
os._exit(1)
|
agent.py | from abc import ABC, abstractmethod
import logging
from ROAR.utilities_module.vehicle_models import Vehicle
from ROAR.utilities_module.data_structures_models import SensorsData, IMUData, Transform
from ROAR.utilities_module.vehicle_models import VehicleControl
from typing import Optional, List
from pathlib import Path
import cv2
import numpy as np
from ROAR.utilities_module.module import Module
from ROAR.configurations.configuration import Configuration as AgentConfig
from ROAR.planning_module.local_planner.local_planner import LocalPlanner
from ROAR.planning_module.behavior_planner.behavior_planner import BehaviorPlanner
from ROAR.planning_module.mission_planner.mission_planner import MissionPlanner
import threading
from typing import Dict, Any
from datetime import datetime
import threading
from ROAR.utilities_module.camera_models import Camera
class Agent(ABC):
"""
Abstract Agent class that define the minimum of a ROAR agent.
Inherited agent can perform different duties.
"""
def __init__(self, vehicle: Vehicle, agent_settings: AgentConfig, imu: Optional[IMUData] = None,
should_init_default_cam=True, **kwargs):
"""
Initialize cameras, output folder, and logging utilities
Args:
vehicle: Vehicle instance
agent_settings: User specified settings for Agent
imu: IMU data (will be deprecated to be passed in like this)
"""
self.logger = logging.getLogger(__name__)
self.vehicle = vehicle
self.agent_settings = agent_settings
self.front_rgb_camera: Optional[Camera] = agent_settings.front_rgb_cam
self.front_depth_camera: Optional[Camera] = agent_settings.front_depth_cam
self.rear_rgb_camera = agent_settings.rear_rgb_cam
self.imu = imu
self.is_done = False
self.output_folder_path = \
Path(self.agent_settings.output_data_folder_path)
self.front_depth_camera_output_folder_path = \
self.output_folder_path / "front_depth"
self.front_rgb_camera_output_folder_path = \
self.output_folder_path / "front_rgb"
self.rear_rgb_camera_output_folder_path = \
self.output_folder_path / "rear_rgb"
self.should_save_sensor_data = self.agent_settings.save_sensor_data
self.transform_output_folder_path = self.output_folder_path / "transform"
self.vehicle_state_output_folder_path = self.output_folder_path / "vehicle_state"
self.local_planner_next_waypoint_output_foler_path = self.output_folder_path / "next_waypoints"
self.local_planner: Optional[LocalPlanner] = None
self.behavior_planner: Optional[BehaviorPlanner] = None
self.mission_planner: Optional[MissionPlanner] = None
self.threaded_modules: List[Module] = []
self.time_counter = 0
if should_init_default_cam:
self.init_cam()
self.transform_file: Optional = None
if self.should_save_sensor_data:
self.front_depth_camera_output_folder_path.mkdir(parents=True,
exist_ok=True)
self.front_rgb_camera_output_folder_path.mkdir(parents=True,
exist_ok=True)
self.rear_rgb_camera_output_folder_path.mkdir(parents=True,
exist_ok=True)
self.transform_output_folder_path.mkdir(parents=True,
exist_ok=True)
self.vehicle_state_output_folder_path.mkdir(parents=True,
exist_ok=True)
self.local_planner_next_waypoint_output_foler_path.mkdir(parents=True, exist_ok=True)
self.write_meta_data()
self.transform_file = (Path(self.transform_output_folder_path) /
f"{datetime.now().strftime('%m_%d_%Y_%H')}.txt").open('w+')
self.kwargs: Dict[str, Any] = kwargs # additional info
def write_meta_data(self):
vehicle_state_file = (self.vehicle_state_output_folder_path / "meta_data.txt").open(mode='w')
vehicle_state_file.write("x,y,z,roll,pitch,yaw,vx,vy,vz,ax,ay,az,throttle,steering")
vehicle_state_file.close()
def add_threaded_module(self, module: Module):
if module.threaded:
self.threaded_modules.append(module)
else:
msg = f"Module {module} is not registered as threaded, but is attempting to run threaded"
self.logger.error(msg)
raise threading.ThreadError(msg)
def init_cam(self) -> None:
"""
Initialize the cameras by calculating the camera intrinsics and
ensuring that the output folder path exists
Returns:
None
"""
if self.front_rgb_camera is not None:
self.front_rgb_camera.intrinsics_matrix = (
self.front_rgb_camera.calculate_default_intrinsics_matrix()
)
if self.front_depth_camera is not None:
self.front_depth_camera.intrinsics_matrix = (
self.front_depth_camera.calculate_default_intrinsics_matrix()
)
if self.rear_rgb_camera is not None:
self.rear_rgb_camera.intrinsics_matrix = (
self.rear_rgb_camera.calculate_default_intrinsics_matrix()
)
@abstractmethod
def run_step(self, sensors_data: SensorsData,
vehicle: Vehicle) -> VehicleControl:
"""
Receive Sensor Data and vehicle state information on every step and
return a control
Args:
sensors_data: sensor data on this frame
vehicle: vehicle state on this frame
Returns:
Vehicle Control
"""
self.time_counter += 1
self.sync_data(sensors_data=sensors_data, vehicle=vehicle)
if self.should_save_sensor_data:
self.save_sensor_data_async()
if self.local_planner is not None and self.local_planner.is_done():
self.is_done = True
else:
self.is_done = False
return VehicleControl()
def sync_data(self, sensors_data: SensorsData, vehicle: Vehicle) -> None:
"""
Sync agent's state by updating Sensor Data and vehicle information
Args:
sensors_data: the new frame's sensor data
vehicle: the new frame's vehicle state
Returns:
None
"""
self.vehicle = vehicle
if self.front_rgb_camera is not None:
self.front_rgb_camera.data = (
sensors_data.front_rgb.data
if sensors_data.front_rgb is not None
else None
)
if self.front_depth_camera is not None:
self.front_depth_camera.data = (
sensors_data.front_depth.data
if sensors_data.front_depth is not None
else None
)
if self.rear_rgb_camera is not None:
self.rear_rgb_camera.data = (
sensors_data.rear_rgb.data
if sensors_data.rear_rgb is not None
else None
)
if self.imu is not None:
self.imu = sensors_data.imu_data
def save_sensor_data_async(self) -> None:
x = threading.Thread(target=self.save_sensor_data, args=())
x.start()
def save_sensor_data(self) -> None:
"""
Failure-safe saving function that saves all the sensor data of the
current frame
Returns:
None
"""
now = datetime.now().strftime('%m_%d_%Y_%H_%M_%S_%f')
self.logger.info(f"Saving sensor data -> {now}")
try:
if self.front_rgb_camera is not None and self.front_rgb_camera.data is not None:
cv2.imwrite((self.front_rgb_camera_output_folder_path /
f"frame_{now}.png").as_posix(),
self.front_rgb_camera.data)
except Exception as e:
self.logger.error(
f"Failed to save at Frame {self.time_counter}. Error: {e}")
try:
if self.front_rgb_camera is not None and self.front_rgb_camera.data is not None:
np.save((self.front_depth_camera_output_folder_path /
f"frame_{now}").as_posix(),
self.front_depth_camera.data)
except Exception as e:
self.logger.error(
f"Failed to save at Frame {self.time_counter}. Error: {e}")
try:
if self.rear_rgb_camera is not None and self.rear_rgb_camera.data is not None:
cv2.imwrite((self.rear_rgb_camera_output_folder_path /
f"frame_{now}.png").as_posix(),
self.rear_rgb_camera.data)
except Exception as e:
self.logger.error(
f"Failed to save at Frame {self.time_counter}. Error: {e}")
try:
self.transform_file.write(self.vehicle.transform.record() + "\n")
except Exception as e:
self.logger.error(
f"Failed to save at Frame {self.time_counter}. Error: {e}")
try:
if self.vehicle is not None:
data = self.vehicle.to_array()
np.save((Path(self.vehicle_state_output_folder_path) /
f"frame_{now}").as_posix(), data)
except Exception as e:
self.logger.error(
f"Failed to save at Frame {self.time_counter}. Error: {e}")
try:
if self.local_planner is not None and self.local_planner.way_points_queue is not None and len(
self.local_planner.way_points_queue) > 0:
next_waypoint: Transform = self.local_planner.way_points_queue[0]
np.save((Path(self.local_planner_next_waypoint_output_foler_path) / f"frame_{now}").as_posix(),
next_waypoint.location.to_array())
except Exception as e:
self.logger.error(f"Failed to save at Frame {self.time_counter}. Error: {e}")
def start_module_threads(self):
for module in self.threaded_modules:
module.start()
self.logger.debug(f"Module: {module.name} -> started")
def shutdown_module_threads(self):
for module in self.threaded_modules:
module.shutdown()
if self.transform_file is not None and self.transform_file.closed is False and self.should_save_sensor_data:
self.transform_file.close()
|
exchange_rate.py | from datetime import datetime
import inspect
import requests
import sys
from threading import Thread
import time
import traceback
import csv
from decimal import Decimal
from bitcoin import COIN
from i18n import _
from util import PrintError, ThreadJob
from util import format_satoshis
# See https://en.wikipedia.org/wiki/ISO_4217
CCY_PRECISIONS = {'BHD': 3, 'BIF': 0, 'BYR': 0, 'CLF': 4, 'CLP': 0,
'CVE': 0, 'DJF': 0, 'GNF': 0, 'IQD': 3, 'ISK': 0,
'JOD': 3, 'JPY': 0, 'KMF': 0, 'KRW': 0, 'KWD': 3,
'LYD': 3, 'MGA': 1, 'MRO': 1, 'OMR': 3, 'PYG': 0,
'RWF': 0, 'TND': 3, 'UGX': 0, 'UYI': 0, 'VND': 0,
'VUV': 0, 'XAF': 0, 'XAU': 4, 'XOF': 0, 'XPF': 0}
class ExchangeBase(PrintError):
def __init__(self, on_quotes, on_history):
self.history = {}
self.quotes = {}
self.on_quotes = on_quotes
self.on_history = on_history
def get_json(self, site, get_string):
# APIs must have https
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electrum'})
return response.json()
def get_csv(self, site, get_string):
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electrum'})
reader = csv.DictReader(response.content.split('\n'))
return list(reader)
def name(self):
return self.__class__.__name__
def update_safe(self, ccy):
try:
self.print_error("getting fx quotes for", ccy)
self.quotes = self.get_rates(ccy)
self.print_error("received fx quotes")
except BaseException as e:
self.print_error("failed fx quotes:", e)
self.on_quotes()
def update(self, ccy):
t = Thread(target=self.update_safe, args=(ccy,))
t.setDaemon(True)
t.start()
def get_historical_rates_safe(self, ccy):
try:
self.print_error("requesting fx history for", ccy)
self.history[ccy] = self.historical_rates(ccy)
self.print_error("received fx history for", ccy)
self.on_history()
except BaseException as e:
self.print_error("failed fx history:", e)
def get_historical_rates(self, ccy):
result = self.history.get(ccy)
if not result and ccy in self.history_ccys():
t = Thread(target=self.get_historical_rates_safe, args=(ccy,))
t.setDaemon(True)
t.start()
return result
def history_ccys(self):
return []
def historical_rate(self, ccy, d_t):
return self.history.get(ccy, {}).get(d_t.strftime('%Y-%m-%d'))
def get_currencies(self):
rates = self.get_rates('')
return sorted([str(a) for (a, b) in rates.iteritems() if b is not None and len(a)==3])
class Bit2C(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.bit2c.co.il', '/Exchanges/LTCNIS/Ticker.json')
return {'NIS': Decimal(json['ll'])}
class BitcoinAverage(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('apiv2.bitcoinaverage.com', '/indices/global/ticker/short')
return dict([(r.replace("LTC", ""), Decimal(json[r]['last']))
for r in json if r != 'timestamp'])
def history_ccys(self):
return ['AUD', 'BRL', 'CAD', 'CHF', 'CNY', 'EUR', 'GBP', 'IDR', 'ILS',
'MXN', 'NOK', 'NZD', 'PLN', 'RON', 'RUB', 'SEK', 'SGD', 'USD',
'ZAR']
def historical_rates(self, ccy):
history = self.get_csv('apiv2.bitcoinaverage.com',
"/indices/global/history/LTC%s?period=alltime&format=csv" % ccy)
return dict([(h['DateTime'][:10], h['Average'])
for h in history])
class BitcoinVenezuela(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitcoinvenezuela.com', '/')
rates = [(r, json['LTC'][r]) for r in json['LTC']
if json['LTC'][r] is not None] # Giving NULL sometimes
return dict(rates)
def history_ccys(self):
return ['ARS', 'EUR', 'USD', 'VEF']
def historical_rates(self, ccy):
return self.get_json('api.bitcoinvenezuela.com',
"/historical/index.php?coin=LTC")[ccy +'_LTC']
class Bitfinex(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitfinex.com', '/v1/pubticker/ltcusd')
return {'USD': Decimal(json['last_price'])}
class BitStamp(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.bitstamp.net', '/api/v2/ticker/ltcusd/')
return {'USD': Decimal(json['last'])}
class BTCChina(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('data.btcchina.com', '/data/ticker?market=ltccny')
return {'CNY': Decimal(json['ticker']['last'])}
class BTCe(ExchangeBase):
def get_rates(self, ccy):
json_eur = self.get_json('btc-e.nz', '/api/3/ticker/ltc_eur')
json_rub = self.get_json('btc-e.nz', '/api/3/ticker/ltc_rur')
json_usd = self.get_json('btc-e.nz', '/api/3/ticker/ltc_usd')
return {'EUR': Decimal(json_eur['ltc_eur']['last']),
'RUB': Decimal(json_rub['ltc_rur']['last']),
'USD': Decimal(json_usd['ltc_usd']['last'])}
class CaVirtEx(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.cavirtex.com', '/api2/ticker.json?currencypair=LTCCAD')
return {'CAD': Decimal(json['ticker']['LTCCAD']['last'])}
class CoinSpot(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.coinspot.com.au', '/pubapi/latest')
return {'AUD': Decimal(json['prices']['ltc']['last'])}
class GoCoin(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('x.g0cn.com', '/prices')
ltc_prices = json['prices']['LTC']
return dict([(r, Decimal(ltc_prices[r])) for r in ltc_prices])
class HitBTC(ExchangeBase):
def get_rates(self, ccy):
ccys = ['EUR', 'USD']
json = self.get_json('api.hitbtc.com', '/api/1/public/LTC%s/ticker' % ccy)
result = dict.fromkeys(ccys)
if ccy in ccys:
result[ccy] = Decimal(json['last'])
return result
class Kraken(ExchangeBase):
def get_rates(self, ccy):
dicts = self.get_json('api.kraken.com', '/0/public/AssetPairs')
pairs = [k for k in dicts['result'] if k.startswith('XLTCZ')]
json = self.get_json('api.kraken.com',
'/0/public/Ticker?pair=%s' % ','.join(pairs))
ccys = [p[5:] for p in pairs]
result = dict.fromkeys(ccys)
result[ccy] = Decimal(json['result']['XLTCZ'+ccy]['c'][0])
return result
def history_ccys(self):
return ['EUR', 'USD']
def historical_rates(self, ccy):
query = '/0/public/OHLC?pair=LTC%s&interval=1440' % ccy
json = self.get_json('api.kraken.com', query)
history = json['result']['XLTCZ'+ccy]
return dict([(time.strftime('%Y-%m-%d', time.localtime(t[0])), t[4])
for t in history])
class OKCoin(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.okcoin.cn', '/api/ticker.do?symbol=ltc_cny')
return {'CNY': Decimal(json['ticker']['last'])}
class MercadoBitcoin(ExchangeBase):
def get_rates(self,ccy):
json = self.get_json('mercadobitcoin.net',
"/api/v2/ticker_litecoin")
return {'BRL': Decimal(json['ticker']['last'])}
class Bitcointoyou(ExchangeBase):
def get_rates(self,ccy):
json = self.get_json('bitcointoyou.com',
"/API/ticker_litecoin.aspx")
return {'BRL': Decimal(json['ticker']['last'])}
def dictinvert(d):
inv = {}
for k, vlist in d.iteritems():
for v in vlist:
keys = inv.setdefault(v, [])
keys.append(k)
return inv
def get_exchanges_and_currencies():
import os, json
path = os.path.join(os.path.dirname(__file__), 'currencies.json')
try:
return json.loads(open(path, 'r').read())
except:
pass
d = {}
is_exchange = lambda obj: (inspect.isclass(obj)
and issubclass(obj, ExchangeBase)
and obj != ExchangeBase)
exchanges = dict(inspect.getmembers(sys.modules[__name__], is_exchange))
for name, klass in exchanges.items():
exchange = klass(None, None)
try:
d[name] = exchange.get_currencies()
except:
continue
with open(path, 'w') as f:
f.write(json.dumps(d, indent=4, sort_keys=True))
return d
CURRENCIES = get_exchanges_and_currencies()
def get_exchanges_by_ccy(history=True):
if not history:
return dictinvert(CURRENCIES)
d = {}
exchanges = CURRENCIES.keys()
for name in exchanges:
klass = globals()[name]
exchange = klass(None, None)
d[name] = exchange.history_ccys()
return dictinvert(d)
class FxThread(ThreadJob):
def __init__(self, config, network):
self.config = config
self.network = network
self.ccy = self.get_currency()
self.history_used_spot = False
self.ccy_combo = None
self.hist_checkbox = None
self.set_exchange(self.config_exchange())
def get_currencies(self, h):
d = get_exchanges_by_ccy(h)
return sorted(d.keys())
def get_exchanges_by_ccy(self, ccy, h):
d = get_exchanges_by_ccy(h)
return d.get(ccy, [])
def ccy_amount_str(self, amount, commas):
prec = CCY_PRECISIONS.get(self.ccy, 2)
fmt_str = "{:%s.%df}" % ("," if commas else "", max(0, prec))
return fmt_str.format(round(amount, prec))
def run(self):
# This runs from the plugins thread which catches exceptions
if self.is_enabled():
if self.timeout ==0 and self.show_history():
self.exchange.get_historical_rates(self.ccy)
if self.timeout <= time.time():
self.timeout = time.time() + 150
self.exchange.update(self.ccy)
def is_enabled(self):
return bool(self.config.get('use_exchange_rate'))
def set_enabled(self, b):
return self.config.set_key('use_exchange_rate', bool(b))
def get_history_config(self):
return bool(self.config.get('history_rates'))
def set_history_config(self, b):
self.config.set_key('history_rates', bool(b))
def get_currency(self):
'''Use when dynamic fetching is needed'''
return self.config.get("currency", "EUR")
def config_exchange(self):
return self.config.get('use_exchange', 'BitcoinAverage')
def show_history(self):
return self.is_enabled() and self.get_history_config() and self.ccy in self.exchange.history_ccys()
def set_currency(self, ccy):
self.ccy = ccy
self.config.set_key('currency', ccy, True)
self.timeout = 0 # Because self.ccy changes
self.on_quotes()
def set_exchange(self, name):
class_ = globals().get(name, BitcoinAverage)
self.print_error("using exchange", name)
if self.config_exchange() != name:
self.config.set_key('use_exchange', name, True)
self.exchange = class_(self.on_quotes, self.on_history)
# A new exchange means new fx quotes, initially empty. Force
# a quote refresh
self.timeout = 0
def on_quotes(self):
self.network.trigger_callback('on_quotes')
def on_history(self):
self.network.trigger_callback('on_history')
def exchange_rate(self):
'''Returns None, or the exchange rate as a Decimal'''
rate = self.exchange.quotes.get(self.ccy)
if rate:
return Decimal(rate)
def format_amount_and_units(self, btc_balance):
rate = self.exchange_rate()
return '' if rate is None else "%s %s" % (self.value_str(btc_balance, rate), self.ccy)
def get_fiat_status_text(self, btc_balance, base_unit, decimal_point):
rate = self.exchange_rate()
return _(" (No FX rate available)") if rate is None else " 1 %s~%s %s" % (base_unit,
self.value_str(COIN / (10**(8 - decimal_point)), rate), self.ccy)
def value_str(self, satoshis, rate):
if satoshis is None: # Can happen with incomplete history
return _("Unknown")
if rate:
value = Decimal(satoshis) / COIN * Decimal(rate)
return "%s" % (self.ccy_amount_str(value, True))
return _("No data")
def history_rate(self, d_t):
rate = self.exchange.historical_rate(self.ccy, d_t)
# Frequently there is no rate for today, until tomorrow :)
# Use spot quotes in that case
if rate is None and (datetime.today().date() - d_t.date()).days <= 2:
rate = self.exchange.quotes.get(self.ccy)
self.history_used_spot = True
return rate
def historical_value_str(self, satoshis, d_t):
rate = self.history_rate(d_t)
return self.value_str(satoshis, rate)
|
PyShell.py | #! /usr/bin/env python3
import getopt
import os
import os.path
import re
import socket
import subprocess
import sys
import threading
import time
import tokenize
import io
import linecache
from code import InteractiveInterpreter
from platform import python_version, system
try:
from tkinter import *
except ImportError:
print("** IDLE can't import Tkinter.\n"
"Your Python may not be configured for Tk. **", file=sys.__stderr__)
sys.exit(1)
import tkinter.messagebox as tkMessageBox
from idlelib.EditorWindow import EditorWindow, fixwordbreaks
from idlelib.FileList import FileList
from idlelib.ColorDelegator import ColorDelegator
from idlelib.UndoDelegator import UndoDelegator
from idlelib.OutputWindow import OutputWindow
from idlelib.configHandler import idleConf
from idlelib import rpc
from idlelib import Debugger
from idlelib import RemoteDebugger
from idlelib import macosxSupport
HOST = '127.0.0.1' # python execution server on localhost loopback
PORT = 0 # someday pass in host, port for remote debug capability
# Override warnings module to write to warning_stream. Initialize to send IDLE
# internal warnings to the console. ScriptBinding.check_syntax() will
# temporarily redirect the stream to the shell window to display warnings when
# checking user's code.
warning_stream = sys.__stderr__ # None, at least on Windows, if no console.
import warnings
def idle_formatwarning(message, category, filename, lineno, line=None):
"""Format warnings the IDLE way."""
s = "\nWarning (from warnings module):\n"
s += ' File \"%s\", line %s\n' % (filename, lineno)
if line is None:
line = linecache.getline(filename, lineno)
line = line.strip()
if line:
s += " %s\n" % line
s += "%s: %s\n" % (category.__name__, message)
return s
def idle_showwarning(
message, category, filename, lineno, file=None, line=None):
"""Show Idle-format warning (after replacing warnings.showwarning).
The differences are the formatter called, the file=None replacement,
which can be None, the capture of the consequence AttributeError,
and the output of a hard-coded prompt.
"""
if file is None:
file = warning_stream
try:
file.write(idle_formatwarning(
message, category, filename, lineno, line=line))
file.write(">>> ")
except (AttributeError, OSError):
pass # if file (probably __stderr__) is invalid, skip warning.
_warnings_showwarning = None
def capture_warnings(capture):
"Replace warning.showwarning with idle_showwarning, or reverse."
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = idle_showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
capture_warnings(True)
def extended_linecache_checkcache(filename=None,
orig_checkcache=linecache.checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the
<pyshell#...> entries, call the original linecache.checkcache()
(skipping them), and then restore the saved entries.
orig_checkcache is bound at definition time to the original
method, allowing it to be patched.
"""
cache = linecache.cache
save = {}
for key in list(cache):
if key[:1] + key[-1:] == '<>':
save[key] = cache.pop(key)
orig_checkcache(filename)
cache.update(save)
# Patch linecache.checkcache():
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"Regular text edit window in IDLE, supports breakpoints"
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
self.text.bind("<<open-python-shell>>", self.flist.open_shell)
self.breakpointPath = os.path.join(idleConf.GetUserCfgDir(),
'breakpoints.lst')
# whenever a file is changed, restore breakpoints
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
if self.io.filename:
self.restore_file_breaks()
self.color_breakpoint_text()
rmenu_specs = [
("Cut", "<<cut>>", "rmenu_check_cut"),
("Copy", "<<copy>>", "rmenu_check_copy"),
("Paste", "<<paste>>", "rmenu_check_paste"),
(None, None, None),
("Set Breakpoint", "<<set-breakpoint-here>>", None),
("Clear Breakpoint", "<<clear-breakpoint-here>>", None)
]
def color_breakpoint_text(self, color=True):
"Turn colorizing of breakpoint text on or off"
if self.io is None:
# possible due to update in restore_file_breaks
return
if color:
theme = idleConf.GetOption('main','Theme','name')
cfg = idleConf.GetHighlight(theme, "break")
else:
cfg = {'foreground': '', 'background': ''}
self.text.tag_config('BREAK', cfg)
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
try:
self.breakpoints.index(lineno)
except ValueError: # only add if missing, i.e. do once
self.breakpoints.append(lineno)
try: # update the subprocess debugger
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, lineno)
except: # but debugger may not be active right now....
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
self.set_breakpoint(lineno)
def clear_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove("BREAK", "insert linestart",\
"insert lineend +1char")
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove("BREAK", "1.0", END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"Save breakpoints when file is saved"
# XXX 13 Dec 2002 KBK Currently the file must be saved before it can
# be run. The breaks are saved at that time. If we introduce
# a temporary file save feature the save breaks functionality
# needs to be re-verified, since the breaks at the time the
# temp file is created may differ from the breaks at the last
# permanent save of the file. Currently, a break introduced
# after a save will be effective, but not persistent.
# This is necessary to keep the saved breaks synched with the
# saved file.
#
# Breakpoints are set as tagged ranges in the text.
# Since a modified file has to be saved before it is
# run, and since self.breakpoints (from which the subprocess
# debugger is loaded) is updated during the save, the visible
# breaks stay synched with the subprocess even if one of these
# unexpected breakpoint deletions occurs.
breaks = self.breakpoints
filename = self.io.filename
try:
with open(self.breakpointPath, "r") as fp:
lines = fp.readlines()
except OSError:
lines = []
try:
with open(self.breakpointPath, "w") as new_file:
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
new_file.write(filename + '=' + str(breaks) + '\n')
except OSError as err:
if not getattr(self.root, "breakpoint_error_displayed", False):
self.root.breakpoint_error_displayed = True
tkMessageBox.showerror(title='IDLE Error',
message='Unable to update breakpoint list:\n%s'
% str(err),
parent=self.text)
def restore_file_breaks(self):
self.text.update() # this enables setting "BREAK" tags to be visible
if self.io is None:
# can happen if IDLE closes due to the .update() call
return
filename = self.io.filename
if filename is None:
return
if os.path.isfile(self.breakpointPath):
with open(self.breakpointPath, "r") as fp:
lines = fp.readlines()
for line in lines:
if line.startswith(filename + '='):
breakpoint_linenumbers = eval(line[len(filename)+1:])
for breakpoint_linenumber in breakpoint_linenumbers:
self.set_breakpoint(breakpoint_linenumber)
def update_breakpoints(self):
"Retrieves all the breakpoints in the current window"
text = self.text
ranges = text.tag_ranges("BREAK")
linenumber_list = self.ranges_to_linenumbers(ranges)
self.breakpoints = linenumber_list
def ranges_to_linenumbers(self, ranges):
lines = []
for index in range(0, len(ranges), 2):
lineno = int(float(ranges[index].string))
end = int(float(ranges[index+1].string))
while lineno < end:
lines.append(lineno)
lineno += 1
return lines
# XXX 13 Dec 2002 KBK Not used currently
# def saved_change_hook(self):
# "Extend base method - clear breaks if module is modified"
# if not self.get_saved():
# self.clear_file_breaks()
# EditorWindow.saved_change_hook(self)
def _close(self):
"Extend base method - clear breaks when module is closed"
self.clear_file_breaks()
EditorWindow._close(self)
class PyShellFileList(FileList):
"Extend base class: IDLE supports a shell and breakpoints"
# override FileList's class variable, instances return PyShellEditorWindow
# instead of EditorWindow when new edit windows are created.
EditorWindow = PyShellEditorWindow
pyshell = None
def open_shell(self, event=None):
if self.pyshell:
self.pyshell.top.wakeup()
else:
self.pyshell = PyShell(self)
if self.pyshell:
if not self.pyshell.begin():
return None
return self.pyshell
class ModifiedColorDelegator(ColorDelegator):
"Extend base class: colorizer for the shell window itself"
def __init__(self):
ColorDelegator.__init__(self)
self.LoadTagDefs()
def recolorize_main(self):
self.tag_remove("TODO", "1.0", "iomark")
self.tag_add("SYNC", "1.0", "iomark")
ColorDelegator.recolorize_main(self)
def LoadTagDefs(self):
ColorDelegator.LoadTagDefs(self)
theme = idleConf.GetOption('main','Theme','name')
self.tagdefs.update({
"stdin": {'background':None,'foreground':None},
"stdout": idleConf.GetHighlight(theme, "stdout"),
"stderr": idleConf.GetHighlight(theme, "stderr"),
"console": idleConf.GetHighlight(theme, "console"),
})
def removecolors(self):
# Don't remove shell color tags before "iomark"
for tag in self.tagdefs:
self.tag_remove(tag, "iomark", "end")
class ModifiedUndoDelegator(UndoDelegator):
"Extend base class: forbid insert/delete before the I/O mark"
def insert(self, index, chars, tags=None):
try:
if self.delegate.compare(index, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.insert(self, index, chars, tags)
def delete(self, index1, index2=None):
try:
if self.delegate.compare(index1, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.delete(self, index1, index2)
class MyRPCClient(rpc.RPCClient):
def handle_EOF(self):
"Override the base class - just re-raise EOFError"
raise EOFError
class ModifiedInterpreter(InteractiveInterpreter):
def __init__(self, tkconsole):
self.tkconsole = tkconsole
locals = sys.modules['__main__'].__dict__
InteractiveInterpreter.__init__(self, locals=locals)
self.save_warnings_filters = None
self.restarting = False
self.subprocess_arglist = None
self.port = PORT
self.original_compiler_flags = self.compile.compiler.flags
_afterid = None
rpcclt = None
rpcsubproc = None
def spawn_subprocess(self):
if self.subprocess_arglist is None:
self.subprocess_arglist = self.build_subprocess_arglist()
self.rpcsubproc = subprocess.Popen(self.subprocess_arglist)
def build_subprocess_arglist(self):
assert (self.port!=0), (
"Socket should have been assigned a port number.")
w = ['-W' + s for s in sys.warnoptions]
# Maybe IDLE is installed and is being accessed via sys.path,
# or maybe it's not installed and the idle.py script is being
# run from the IDLE source directory.
del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc',
default=False, type='bool')
if __name__ == 'idlelib.PyShell':
command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,)
else:
command = "__import__('run').main(%r)" % (del_exitf,)
return [sys.executable] + w + ["-c", command, str(self.port)]
def start_subprocess(self):
addr = (HOST, self.port)
# GUI makes several attempts to acquire socket, listens for connection
for i in range(3):
time.sleep(i)
try:
self.rpcclt = MyRPCClient(addr)
break
except OSError:
pass
else:
self.display_port_binding_error()
return None
# if PORT was 0, system will assign an 'ephemeral' port. Find it out:
self.port = self.rpcclt.listening_sock.getsockname()[1]
# if PORT was not 0, probably working with a remote execution server
if PORT != 0:
# To allow reconnection within the 2MSL wait (cf. Stevens TCP
# V1, 18.6), set SO_REUSEADDR. Note that this can be problematic
# on Windows since the implementation allows two active sockets on
# the same address!
self.rpcclt.listening_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self.spawn_subprocess()
#time.sleep(20) # test to simulate GUI not accepting connection
# Accept the connection from the Python execution server
self.rpcclt.listening_sock.settimeout(10)
try:
self.rpcclt.accept()
except socket.timeout:
self.display_no_subprocess_error()
return None
self.rpcclt.register("console", self.tkconsole)
self.rpcclt.register("stdin", self.tkconsole.stdin)
self.rpcclt.register("stdout", self.tkconsole.stdout)
self.rpcclt.register("stderr", self.tkconsole.stderr)
self.rpcclt.register("flist", self.tkconsole.flist)
self.rpcclt.register("linecache", linecache)
self.rpcclt.register("interp", self)
self.transfer_path(with_cwd=True)
self.poll_subprocess()
return self.rpcclt
def restart_subprocess(self, with_cwd=False, filename=''):
if self.restarting:
return self.rpcclt
self.restarting = True
# close only the subprocess debugger
debug = self.getdebugger()
if debug:
try:
# Only close subprocess debugger, don't unregister gui_adap!
RemoteDebugger.close_subprocess_debugger(self.rpcclt)
except:
pass
# Kill subprocess, spawn a new one, accept connection.
self.rpcclt.close()
self.terminate_subprocess()
console = self.tkconsole
was_executing = console.executing
console.executing = False
self.spawn_subprocess()
try:
self.rpcclt.accept()
except socket.timeout:
self.display_no_subprocess_error()
return None
self.transfer_path(with_cwd=with_cwd)
console.stop_readline()
# annotate restart in shell window and mark it
console.text.delete("iomark", "end-1c")
tag = 'RESTART: ' + (filename if filename else 'Shell')
halfbar = ((int(console.width) -len(tag) - 4) // 2) * '='
console.write("\n{0} {1} {0}".format(halfbar, tag))
console.text.mark_set("restart", "end-1c")
console.text.mark_gravity("restart", "left")
if not filename:
console.showprompt()
# restart subprocess debugger
if debug:
# Restarted debugger connects to current instance of debug GUI
RemoteDebugger.restart_subprocess_debugger(self.rpcclt)
# reload remote debugger breakpoints for all PyShellEditWindows
debug.load_breakpoints()
self.compile.compiler.flags = self.original_compiler_flags
self.restarting = False
return self.rpcclt
def __request_interrupt(self):
self.rpcclt.remotecall("exec", "interrupt_the_server", (), {})
def interrupt_subprocess(self):
threading.Thread(target=self.__request_interrupt).start()
def kill_subprocess(self):
if self._afterid is not None:
self.tkconsole.text.after_cancel(self._afterid)
try:
self.rpcclt.listening_sock.close()
except AttributeError: # no socket
pass
try:
self.rpcclt.close()
except AttributeError: # no socket
pass
self.terminate_subprocess()
self.tkconsole.executing = False
self.rpcclt = None
def terminate_subprocess(self):
"Make sure subprocess is terminated"
try:
self.rpcsubproc.kill()
except OSError:
# process already terminated
return
else:
try:
self.rpcsubproc.wait()
except OSError:
return
def transfer_path(self, with_cwd=False):
if with_cwd: # Issue 13506
path = [''] # include Current Working Directory
path.extend(sys.path)
else:
path = sys.path
self.runcommand("""if 1:
import sys as _sys
_sys.path = %r
del _sys
\n""" % (path,))
active_seq = None
def poll_subprocess(self):
clt = self.rpcclt
if clt is None:
return
try:
response = clt.pollresponse(self.active_seq, wait=0.05)
except (EOFError, OSError, KeyboardInterrupt):
# lost connection or subprocess terminated itself, restart
# [the KBI is from rpc.SocketIO.handle_EOF()]
if self.tkconsole.closing:
return
response = None
self.restart_subprocess()
if response:
self.tkconsole.resetoutput()
self.active_seq = None
how, what = response
console = self.tkconsole.console
if how == "OK":
if what is not None:
print(repr(what), file=console)
elif how == "EXCEPTION":
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.remote_stack_viewer()
elif how == "ERROR":
errmsg = "PyShell.ModifiedInterpreter: Subprocess ERROR:\n"
print(errmsg, what, file=sys.__stderr__)
print(errmsg, what, file=console)
# we received a response to the currently active seq number:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
# Reschedule myself
if not self.tkconsole.closing:
self._afterid = self.tkconsole.text.after(
self.tkconsole.pollinterval, self.poll_subprocess)
debugger = None
def setdebugger(self, debugger):
self.debugger = debugger
def getdebugger(self):
return self.debugger
def open_remote_stack_viewer(self):
"""Initiate the remote stack viewer from a separate thread.
This method is called from the subprocess, and by returning from this
method we allow the subprocess to unblock. After a bit the shell
requests the subprocess to open the remote stack viewer which returns a
static object looking at the last exception. It is queried through
the RPC mechanism.
"""
self.tkconsole.text.after(300, self.remote_stack_viewer)
return
def remote_stack_viewer(self):
from idlelib import RemoteObjectBrowser
oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {})
if oid is None:
self.tkconsole.root.bell()
return
item = RemoteObjectBrowser.StubObjectTreeItem(self.rpcclt, oid)
from idlelib.TreeWidget import ScrolledCanvas, TreeNode
top = Toplevel(self.tkconsole.root)
theme = idleConf.GetOption('main','Theme','name')
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
node = TreeNode(sc.canvas, None, item)
node.expand()
# XXX Should GC the remote tree when closing the window
gid = 0
def execsource(self, source):
"Like runsource() but assumes complete exec source"
filename = self.stuffsource(source)
self.execfile(filename, source)
def execfile(self, filename, source=None):
"Execute an existing file"
if source is None:
with tokenize.open(filename) as fp:
source = fp.read()
try:
code = compile(source, filename, "exec")
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
print('*** Error in script or command!\n'
'Traceback (most recent call last):',
file=self.tkconsole.stderr)
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)
def runsource(self, source):
"Extend base class method: Stuff the source in the line cache first"
filename = self.stuffsource(source)
self.more = 0
self.save_warnings_filters = warnings.filters[:]
warnings.filterwarnings(action="error", category=SyntaxWarning)
# at the moment, InteractiveInterpreter expects str
assert isinstance(source, str)
#if isinstance(source, str):
# from idlelib import IOBinding
# try:
# source = source.encode(IOBinding.encoding)
# except UnicodeError:
# self.tkconsole.resetoutput()
# self.write("Unsupported characters in input\n")
# return
try:
# InteractiveInterpreter.runsource() calls its runcode() method,
# which is overridden (see below)
return InteractiveInterpreter.runsource(self, source, filename)
finally:
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
def stuffsource(self, source):
"Stuff source in the filename cache"
filename = "<pyshell#%d>" % self.gid
self.gid = self.gid + 1
lines = source.split("\n")
linecache.cache[filename] = len(source)+1, 0, lines, filename
return filename
def prepend_syspath(self, filename):
"Prepend sys.path with file's directory if not already included"
self.runcommand("""if 1:
_filename = %r
import sys as _sys
from os.path import dirname as _dirname
_dir = _dirname(_filename)
if not _dir in _sys.path:
_sys.path.insert(0, _dir)
del _filename, _sys, _dirname, _dir
\n""" % (filename,))
def showsyntaxerror(self, filename=None):
"""Override Interactive Interpreter method: Use Colorizing
Color the offending position instead of printing it and pointing at it
with a caret.
"""
tkconsole = self.tkconsole
text = tkconsole.text
text.tag_remove("ERROR", "1.0", "end")
type, value, tb = sys.exc_info()
msg = getattr(value, 'msg', '') or value or "<no detail available>"
lineno = getattr(value, 'lineno', '') or 1
offset = getattr(value, 'offset', '') or 0
if offset == 0:
lineno += 1 #mark end of offending line
if lineno == 1:
pos = "iomark + %d chars" % (offset-1)
else:
pos = "iomark linestart + %d lines + %d chars" % \
(lineno-1, offset-1)
tkconsole.colorize_syntax_error(text, pos)
tkconsole.resetoutput()
self.write("SyntaxError: %s\n" % msg)
tkconsole.showprompt()
def showtraceback(self):
"Extend base class method to reset output properly"
self.tkconsole.resetoutput()
self.checklinecache()
InteractiveInterpreter.showtraceback(self)
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.tkconsole.open_stack_viewer()
def checklinecache(self):
c = linecache.cache
for key in list(c.keys()):
if key[:1] + key[-1:] != "<>":
del c[key]
def runcommand(self, code):
"Run the code without invoking the debugger"
# The code better not raise an exception!
if self.tkconsole.executing:
self.display_executing_dialog()
return 0
if self.rpcclt:
self.rpcclt.remotequeue("exec", "runcode", (code,), {})
else:
exec(code, self.locals)
return 1
def runcode(self, code):
"Override base class method"
if self.tkconsole.executing:
self.interp.restart_subprocess()
self.checklinecache()
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
debugger = self.debugger
try:
self.tkconsole.beginexecuting()
if not debugger and self.rpcclt is not None:
self.active_seq = self.rpcclt.asyncqueue("exec", "runcode",
(code,), {})
elif debugger:
debugger.run(code, self.locals)
else:
exec(code, self.locals)
except SystemExit:
if not self.tkconsole.closing:
if tkMessageBox.askyesno(
"Exit?",
"Do you want to exit altogether?",
default="yes",
parent=self.tkconsole.text):
raise
else:
self.showtraceback()
else:
raise
except:
if use_subprocess:
print("IDLE internal error in runcode()",
file=self.tkconsole.stderr)
self.showtraceback()
self.tkconsole.endexecuting()
else:
if self.tkconsole.canceled:
self.tkconsole.canceled = False
print("KeyboardInterrupt", file=self.tkconsole.stderr)
else:
self.showtraceback()
finally:
if not use_subprocess:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
def write(self, s):
"Override base class method"
return self.tkconsole.stderr.write(s)
def display_port_binding_error(self):
tkMessageBox.showerror(
"Port Binding Error",
"IDLE can't bind to a TCP/IP port, which is necessary to "
"communicate with its Python execution server. This might be "
"because no networking is installed on this computer. "
"Run IDLE with the -n command line switch to start without a "
"subprocess and refer to Help/IDLE Help 'Running without a "
"subprocess' for further details.",
parent=self.tkconsole.text)
def display_no_subprocess_error(self):
tkMessageBox.showerror(
"Subprocess Startup Error",
"IDLE's subprocess didn't make connection. Either IDLE can't "
"start a subprocess or personal firewall software is blocking "
"the connection.",
parent=self.tkconsole.text)
def display_executing_dialog(self):
tkMessageBox.showerror(
"Already executing",
"The Python Shell window is already executing a command; "
"please wait until it is finished.",
parent=self.tkconsole.text)
class PyShell(OutputWindow):
shell_title = "Python " + python_version() + " Shell"
# Override classes
ColorDelegator = ModifiedColorDelegator
UndoDelegator = ModifiedUndoDelegator
# Override menus
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("debug", "_Debug"),
("options", "_Options"),
("windows", "_Window"),
("help", "_Help"),
]
# New classes
from idlelib.IdleHistory import History
def __init__(self, flist=None):
if use_subprocess:
ms = self.menu_specs
if ms[2][0] != "shell":
ms.insert(2, ("shell", "She_ll"))
self.interp = ModifiedInterpreter(self)
if flist is None:
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
#
OutputWindow.__init__(self, flist, None, None)
#
## self.config(usetabs=1, indentwidth=8, context_use_ps1=1)
self.usetabs = True
# indentwidth must be 8 when using tabs. See note in EditorWindow:
self.indentwidth = 8
self.context_use_ps1 = True
#
text = self.text
text.configure(wrap="char")
text.bind("<<newline-and-indent>>", self.enter_callback)
text.bind("<<plain-newline-and-indent>>", self.linefeed_callback)
text.bind("<<interrupt-execution>>", self.cancel_callback)
text.bind("<<end-of-file>>", self.eof_callback)
text.bind("<<open-stack-viewer>>", self.open_stack_viewer)
text.bind("<<toggle-debugger>>", self.toggle_debugger)
text.bind("<<toggle-jit-stack-viewer>>", self.toggle_jit_stack_viewer)
if use_subprocess:
text.bind("<<view-restart>>", self.view_restart_mark)
text.bind("<<restart-shell>>", self.restart_shell)
#
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.save_stdin = sys.stdin
from idlelib import IOBinding
self.stdin = PseudoInputFile(self, "stdin", IOBinding.encoding)
self.stdout = PseudoOutputFile(self, "stdout", IOBinding.encoding)
self.stderr = PseudoOutputFile(self, "stderr", IOBinding.encoding)
self.console = PseudoOutputFile(self, "console", IOBinding.encoding)
if not use_subprocess:
sys.stdout = self.stdout
sys.stderr = self.stderr
sys.stdin = self.stdin
try:
# page help() text to shell.
import pydoc # import must be done here to capture i/o rebinding.
# XXX KBK 27Dec07 use a textView someday, but must work w/o subproc
pydoc.pager = pydoc.plainpager
except:
sys.stderr = sys.__stderr__
raise
#
self.history = self.History(self.text)
#
self.pollinterval = 50 # millisec
def get_standard_extension_names(self):
return idleConf.GetExtensions(shell_only=True)
reading = False
executing = False
canceled = False
endoffile = False
closing = False
_stop_readline_flag = False
def set_warning_stream(self, stream):
global warning_stream
warning_stream = stream
def get_warning_stream(self):
return warning_stream
def toggle_debugger(self, event=None):
if self.executing:
tkMessageBox.showerror("Don't debug now",
"You can only toggle the debugger when idle",
parent=self.text)
self.set_debugger_indicator()
return "break"
else:
db = self.interp.getdebugger()
if db:
self.close_debugger()
else:
self.open_debugger()
def set_debugger_indicator(self):
db = self.interp.getdebugger()
self.setvar("<<toggle-debugger>>", not not db)
def toggle_jit_stack_viewer(self, event=None):
pass # All we need is the variable
def close_debugger(self):
db = self.interp.getdebugger()
if db:
self.interp.setdebugger(None)
db.close()
if self.interp.rpcclt:
RemoteDebugger.close_remote_debugger(self.interp.rpcclt)
self.resetoutput()
self.console.write("[DEBUG OFF]\n")
sys.ps1 = ">>> "
self.showprompt()
self.set_debugger_indicator()
def open_debugger(self):
if self.interp.rpcclt:
dbg_gui = RemoteDebugger.start_remote_debugger(self.interp.rpcclt,
self)
else:
dbg_gui = Debugger.Debugger(self)
self.interp.setdebugger(dbg_gui)
dbg_gui.load_breakpoints()
sys.ps1 = "[DEBUG ON]\n>>> "
self.showprompt()
self.set_debugger_indicator()
def beginexecuting(self):
"Helper for ModifiedInterpreter"
self.resetoutput()
self.executing = 1
def endexecuting(self):
"Helper for ModifiedInterpreter"
self.executing = 0
self.canceled = 0
self.showprompt()
def close(self):
"Extend EditorWindow.close()"
if self.executing:
response = tkMessageBox.askokcancel(
"Kill?",
"The program is still running!\n Do you want to kill it?",
default="ok",
parent=self.text)
if response is False:
return "cancel"
self.stop_readline()
self.canceled = True
self.closing = True
return EditorWindow.close(self)
def _close(self):
"Extend EditorWindow._close(), shut down debugger and execution server"
self.close_debugger()
if use_subprocess:
self.interp.kill_subprocess()
# Restore std streams
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
sys.stdin = self.save_stdin
# Break cycles
self.interp = None
self.console = None
self.flist.pyshell = None
self.history = None
EditorWindow._close(self)
def ispythonsource(self, filename):
"Override EditorWindow method: never remove the colorizer"
return True
def short_title(self):
return self.shell_title
COPYRIGHT = \
'Type "copyright", "credits" or "license()" for more information.'
def begin(self):
self.text.mark_set("iomark", "insert")
self.resetoutput()
if use_subprocess:
nosub = ''
client = self.interp.start_subprocess()
if not client:
self.close()
return False
else:
nosub = ("==== No Subprocess ====\n\n" +
"WARNING: Running IDLE without a Subprocess is deprecated\n" +
"and will be removed in a later version. See Help/IDLE Help\n" +
"for details.\n\n")
sys.displayhook = rpc.displayhook
self.write("Python %s on %s\n%s\n%s" %
(sys.version, sys.platform, self.COPYRIGHT, nosub))
self.text.focus_force()
self.showprompt()
import tkinter
tkinter._default_root = None # 03Jan04 KBK What's this?
return True
def stop_readline(self):
if not self.reading: # no nested mainloop to exit.
return
self._stop_readline_flag = True
self.top.quit()
def readline(self):
save = self.reading
try:
self.reading = 1
self.top.mainloop() # nested mainloop()
finally:
self.reading = save
if self._stop_readline_flag:
self._stop_readline_flag = False
return ""
line = self.text.get("iomark", "end-1c")
if len(line) == 0: # may be EOF if we quit our mainloop with Ctrl-C
line = "\n"
self.resetoutput()
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
if self.endoffile:
self.endoffile = 0
line = ""
return line
def isatty(self):
return True
def cancel_callback(self, event=None):
try:
if self.text.compare("sel.first", "!=", "sel.last"):
return # Active selection -- always use default binding
except:
pass
if not (self.executing or self.reading):
self.resetoutput()
self.interp.write("KeyboardInterrupt\n")
self.showprompt()
return "break"
self.endoffile = 0
self.canceled = 1
if (self.executing and self.interp.rpcclt):
if self.interp.getdebugger():
self.interp.restart_subprocess()
else:
self.interp.interrupt_subprocess()
if self.reading:
self.top.quit() # exit the nested mainloop() in readline()
return "break"
def eof_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (delete next char) take over
if not (self.text.compare("iomark", "==", "insert") and
self.text.compare("insert", "==", "end-1c")):
return # Let the default binding (delete next char) take over
if not self.executing:
self.resetoutput()
self.close()
else:
self.canceled = 0
self.endoffile = 1
self.top.quit()
return "break"
def linefeed_callback(self, event):
# Insert a linefeed without entering anything (still autoindented)
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
return "break"
def enter_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (insert '\n') take over
# If some text is selected, recall the selection
# (but only if this before the I/O mark)
try:
sel = self.text.get("sel.first", "sel.last")
if sel:
if self.text.compare("sel.last", "<=", "iomark"):
self.recall(sel, event)
return "break"
except:
pass
# If we're strictly before the line containing iomark, recall
# the current line, less a leading prompt, less leading or
# trailing whitespace
if self.text.compare("insert", "<", "iomark linestart"):
# Check if there's a relevant stdin range -- if so, use it
prev = self.text.tag_prevrange("stdin", "insert")
if prev and self.text.compare("insert", "<", prev[1]):
self.recall(self.text.get(prev[0], prev[1]), event)
return "break"
next = self.text.tag_nextrange("stdin", "insert")
if next and self.text.compare("insert lineend", ">=", next[0]):
self.recall(self.text.get(next[0], next[1]), event)
return "break"
# No stdin mark -- just get the current line, less any prompt
indices = self.text.tag_nextrange("console", "insert linestart")
if indices and \
self.text.compare(indices[0], "<=", "insert linestart"):
self.recall(self.text.get(indices[1], "insert lineend"), event)
else:
self.recall(self.text.get("insert linestart", "insert lineend"), event)
return "break"
# If we're between the beginning of the line and the iomark, i.e.
# in the prompt area, move to the end of the prompt
if self.text.compare("insert", "<", "iomark"):
self.text.mark_set("insert", "iomark")
# If we're in the current input and there's only whitespace
# beyond the cursor, erase that whitespace first
s = self.text.get("insert", "end-1c")
if s and not s.strip():
self.text.delete("insert", "end-1c")
# If we're in the current input before its last line,
# insert a newline right at the insert point
if self.text.compare("insert", "<", "end-1c linestart"):
self.newline_and_indent_event(event)
return "break"
# We're in the last line; append a newline and submit it
self.text.mark_set("insert", "end-1c")
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
self.text.tag_add("stdin", "iomark", "end-1c")
self.text.update_idletasks()
if self.reading:
self.top.quit() # Break out of recursive mainloop()
else:
self.runit()
return "break"
def recall(self, s, event):
# remove leading and trailing empty or whitespace lines
s = re.sub(r'^\s*\n', '' , s)
s = re.sub(r'\n\s*$', '', s)
lines = s.split('\n')
self.text.undo_block_start()
try:
self.text.tag_remove("sel", "1.0", "end")
self.text.mark_set("insert", "end-1c")
prefix = self.text.get("insert linestart", "insert")
if prefix.rstrip().endswith(':'):
self.newline_and_indent_event(event)
prefix = self.text.get("insert linestart", "insert")
self.text.insert("insert", lines[0].strip())
if len(lines) > 1:
orig_base_indent = re.search(r'^([ \t]*)', lines[0]).group(0)
new_base_indent = re.search(r'^([ \t]*)', prefix).group(0)
for line in lines[1:]:
if line.startswith(orig_base_indent):
# replace orig base indentation with new indentation
line = new_base_indent + line[len(orig_base_indent):]
self.text.insert('insert', '\n'+line.rstrip())
finally:
self.text.see("insert")
self.text.undo_block_stop()
def runit(self):
line = self.text.get("iomark", "end-1c")
# Strip off last newline and surrounding whitespace.
# (To allow you to hit return twice to end a statement.)
i = len(line)
while i > 0 and line[i-1] in " \t":
i = i-1
if i > 0 and line[i-1] == "\n":
i = i-1
while i > 0 and line[i-1] in " \t":
i = i-1
line = line[:i]
self.interp.runsource(line)
def open_stack_viewer(self, event=None):
if self.interp.rpcclt:
return self.interp.remote_stack_viewer()
try:
sys.last_traceback
except:
tkMessageBox.showerror("No stack trace",
"There is no stack trace yet.\n"
"(sys.last_traceback is not defined)",
parent=self.text)
return
from idlelib.StackViewer import StackBrowser
StackBrowser(self.root, self.flist)
def view_restart_mark(self, event=None):
self.text.see("iomark")
self.text.see("restart")
def restart_shell(self, event=None):
"Callback for Run/Restart Shell Cntl-F6"
self.interp.restart_subprocess(with_cwd=True)
def showprompt(self):
self.resetoutput()
try:
s = str(sys.ps1)
except:
s = ""
self.console.write(s)
self.text.mark_set("insert", "end-1c")
self.set_line_and_column()
self.io.reset_undo()
def resetoutput(self):
source = self.text.get("iomark", "end-1c")
if self.history:
self.history.store(source)
if self.text.get("end-2c") != "\n":
self.text.insert("end-1c", "\n")
self.text.mark_set("iomark", "end-1c")
self.set_line_and_column()
def write(self, s, tags=()):
if isinstance(s, str) and len(s) and max(s) > '\uffff':
# Tk doesn't support outputting non-BMP characters
# Let's assume what printed string is not very long,
# find first non-BMP character and construct informative
# UnicodeEncodeError exception.
for start, char in enumerate(s):
if char > '\uffff':
break
raise UnicodeEncodeError("UCS-2", char, start, start+1,
'Non-BMP character not supported in Tk')
try:
self.text.mark_gravity("iomark", "right")
count = OutputWindow.write(self, s, tags, "iomark")
self.text.mark_gravity("iomark", "left")
except:
raise ###pass # ### 11Aug07 KBK if we are expecting exceptions
# let's find out what they are and be specific.
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
return count
def rmenu_check_cut(self):
try:
if self.text.compare('sel.first', '<', 'iomark'):
return 'disabled'
except TclError: # no selection, so the index 'sel.first' doesn't exist
return 'disabled'
return super().rmenu_check_cut()
def rmenu_check_paste(self):
if self.text.compare('insert','<','iomark'):
return 'disabled'
return super().rmenu_check_paste()
class PseudoFile(io.TextIOBase):
def __init__(self, shell, tags, encoding=None):
self.shell = shell
self.tags = tags
self._encoding = encoding
@property
def encoding(self):
return self._encoding
@property
def name(self):
return '<%s>' % self.tags
def isatty(self):
return True
class PseudoOutputFile(PseudoFile):
def writable(self):
return True
def write(self, s):
if self.closed:
raise ValueError("write to closed file")
if type(s) is not str:
if not isinstance(s, str):
raise TypeError('must be str, not ' + type(s).__name__)
# See issue #19481
s = str.__str__(s)
return self.shell.write(s, self.tags)
class PseudoInputFile(PseudoFile):
def __init__(self, shell, tags, encoding=None):
PseudoFile.__init__(self, shell, tags, encoding)
self._line_buffer = ''
def readable(self):
return True
def read(self, size=-1):
if self.closed:
raise ValueError("read from closed file")
if size is None:
size = -1
elif not isinstance(size, int):
raise TypeError('must be int, not ' + type(size).__name__)
result = self._line_buffer
self._line_buffer = ''
if size < 0:
while True:
line = self.shell.readline()
if not line: break
result += line
else:
while len(result) < size:
line = self.shell.readline()
if not line: break
result += line
self._line_buffer = result[size:]
result = result[:size]
return result
def readline(self, size=-1):
if self.closed:
raise ValueError("read from closed file")
if size is None:
size = -1
elif not isinstance(size, int):
raise TypeError('must be int, not ' + type(size).__name__)
line = self._line_buffer or self.shell.readline()
if size < 0:
size = len(line)
eol = line.find('\n', 0, size)
if eol >= 0:
size = eol + 1
self._line_buffer = line[size:]
return line[:size]
def close(self):
self.shell.close()
usage_msg = """\
USAGE: idle [-deins] [-t title] [file]*
idle [-dns] [-t title] (-c cmd | -r file) [arg]*
idle [-dns] [-t title] - [arg]*
-h print this help message and exit
-n run IDLE without a subprocess (DEPRECATED,
see Help/IDLE Help for details)
The following options will override the IDLE 'settings' configuration:
-e open an edit window
-i open a shell window
The following options imply -i and will open a shell:
-c cmd run the command in a shell, or
-r file run script from file
-d enable the debugger
-s run $IDLESTARTUP or $PYTHONSTARTUP before anything else
-t title set title of shell window
A default edit window will be bypassed when -c, -r, or - are used.
[arg]* are passed to the command (-c) or script (-r) in sys.argv[1:].
Examples:
idle
Open an edit window or shell depending on IDLE's configuration.
idle foo.py foobar.py
Edit the files, also open a shell if configured to start with shell.
idle -est "Baz" foo.py
Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell
window with the title "Baz".
idle -c "import sys; print(sys.argv)" "foo"
Open a shell window and run the command, passing "-c" in sys.argv[0]
and "foo" in sys.argv[1].
idle -d -s -r foo.py "Hello World"
Open a shell window, run a startup script, enable the debugger, and
run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in
sys.argv[1].
echo "import sys; print(sys.argv)" | idle - "foobar"
Open a shell window, run the script piped in, passing '' in sys.argv[0]
and "foobar" in sys.argv[1].
"""
def main():
global flist, root, use_subprocess
capture_warnings(True)
use_subprocess = True
enable_shell = False
enable_edit = False
debug = False
cmd = None
script = None
startup = False
try:
opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:")
except getopt.error as msg:
print("Error: %s\n%s" % (msg, usage_msg), file=sys.stderr)
sys.exit(2)
for o, a in opts:
if o == '-c':
cmd = a
enable_shell = True
if o == '-d':
debug = True
enable_shell = True
if o == '-e':
enable_edit = True
if o == '-h':
sys.stdout.write(usage_msg)
sys.exit()
if o == '-i':
enable_shell = True
if o == '-n':
print(" Warning: running IDLE without a subprocess is deprecated.",
file=sys.stderr)
use_subprocess = False
if o == '-r':
script = a
if os.path.isfile(script):
pass
else:
print("No script file: ", script)
sys.exit()
enable_shell = True
if o == '-s':
startup = True
enable_shell = True
if o == '-t':
PyShell.shell_title = a
enable_shell = True
if args and args[0] == '-':
cmd = sys.stdin.read()
enable_shell = True
# process sys.argv and sys.path:
for i in range(len(sys.path)):
sys.path[i] = os.path.abspath(sys.path[i])
if args and args[0] == '-':
sys.argv = [''] + args[1:]
elif cmd:
sys.argv = ['-c'] + args
elif script:
sys.argv = [script] + args
elif args:
enable_edit = True
pathx = []
for filename in args:
pathx.append(os.path.dirname(filename))
for dir in pathx:
dir = os.path.abspath(dir)
if not dir in sys.path:
sys.path.insert(0, dir)
else:
dir = os.getcwd()
if dir not in sys.path:
sys.path.insert(0, dir)
# check the IDLE settings configuration (but command line overrides)
edit_start = idleConf.GetOption('main', 'General',
'editor-on-startup', type='bool')
enable_edit = enable_edit or edit_start
enable_shell = enable_shell or not enable_edit
# start editor and/or shell windows:
root = Tk(className="Idle")
# set application icon
icondir = os.path.join(os.path.dirname(__file__), 'Icons')
if system() == 'Windows':
iconfile = os.path.join(icondir, 'idle.ico')
root.wm_iconbitmap(default=iconfile)
elif TkVersion >= 8.5:
ext = '.png' if TkVersion >= 8.6 else '.gif'
iconfiles = [os.path.join(icondir, 'idle_%d%s' % (size, ext))
for size in (16, 32, 48)]
icons = [PhotoImage(file=iconfile) for iconfile in iconfiles]
root.wm_iconphoto(True, *icons)
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
macosxSupport.setupApp(root, flist)
if macosxSupport.isAquaTk():
# There are some screwed up <2> class bindings for text
# widgets defined in Tk which we need to do away with.
# See issue #24801.
root.unbind_class('Text', '<B2>')
root.unbind_class('Text', '<B2-Motion>')
root.unbind_class('Text', '<<PasteSelection>>')
if enable_edit:
if not (cmd or script):
for filename in args[:]:
if flist.open(filename) is None:
# filename is a directory actually, disconsider it
args.remove(filename)
if not args:
flist.new()
if enable_shell:
shell = flist.open_shell()
if not shell:
return # couldn't open shell
if macosxSupport.isAquaTk() and flist.dict:
# On OSX: when the user has double-clicked on a file that causes
# IDLE to be launched the shell window will open just in front of
# the file she wants to see. Lower the interpreter window when
# there are open files.
shell.top.lower()
else:
shell = flist.pyshell
# Handle remaining options. If any of these are set, enable_shell
# was set also, so shell must be true to reach here.
if debug:
shell.open_debugger()
if startup:
filename = os.environ.get("IDLESTARTUP") or \
os.environ.get("PYTHONSTARTUP")
if filename and os.path.isfile(filename):
shell.interp.execfile(filename)
if cmd or script:
shell.interp.runcommand("""if 1:
import sys as _sys
_sys.argv = %r
del _sys
\n""" % (sys.argv,))
if cmd:
shell.interp.execsource(cmd)
elif script:
shell.interp.prepend_syspath(script)
shell.interp.execfile(script)
elif shell:
# If there is a shell window and no cmd or script in progress,
# check for problematic OS X Tk versions and print a warning
# message in the IDLE shell window; this is less intrusive
# than always opening a separate window.
tkversionwarning = macosxSupport.tkVersionWarning(root)
if tkversionwarning:
shell.interp.runcommand("print('%s')" % tkversionwarning)
while flist.inversedict: # keep IDLE running while files are open.
root.mainloop()
root.destroy()
capture_warnings(False)
if __name__ == "__main__":
sys.modules['PyShell'] = sys.modules['__main__']
main()
capture_warnings(False) # Make sure turned off; see issue 18081
|
ChatRoom1.1Server.py | #!/usr/bin/env python
# -.- coding: utf-8 -.-y
import threading
import subprocess
import Queue
import socket
import time
import sys
import os
import datetime
from cmd import Cmd
#Created by Camerin Figueroa
cv = "1.1"
q = Queue.Queue()
q.put([[]])
errors = Queue.Queue()
errors.put([])
motd = Queue.Queue()
quit = Queue.Queue()
quit.put("")
mesg = Queue.Queue()
mesg.put("")
online = Queue.Queue()
online.put([])
print """\33[91m
═════════════════════════════════════════════════════════
███████ ██████ ███████
█ █ █ █ ║
█ █════╗ █ ╔═█ ║
█═════════════█ ╚█ ║█═══╝
█ ██████ ║█
█ █ █ ╚╗█ ╔═══════Server
█════════╗ █ █ ╚═█ ║
███████ ║ █ █ ███████
Chat Room Client════════╝
═════════════════════════════════════════════════════════
\33[92m"""
port = 99999
configcont = "#Replace Everything behind = sign\n#Ex before: config = edit\n#Ex after: config = configinput\n\nmotd = Hello world This is a new Chat Room Server made by Camerin Figueroa\nport = 22550\n"
if os.path.isfile('./crsconfig.txt') == True:
f = open('./crsconfig.txt', 'r')
configuration = f.read()
f.close()
configuration = configuration.split("\n")
for line in configuration:
if "motd =" in line:
motd.put(line[10:])
else:
pass
if "port = " in line:
port = int(line[7:])
else:
pass
else:
f = open('./crsconfig.txt', 'w')
f.write(configcont)
f.close()
print "Please edit crsconfig.txt"
sys.exit()
if port != 99999:
pass
else:
f = open('./crsconfig.txt', 'w')
f.write(configcont)
f.close()
print "Please edit crsconfig.txt"
sys.exit()
def console(q, errors, motd):
if __name__ == '__main__':
prompt = consoleprompt()
prompt.prompt = '> '
prompt.cmdloop('Starting prompt...')
class consoleprompt(Cmd):
def do_say(self, args):
if args == "" or args == " ":
print "say messagetosay\nor\nsay Message to say"
else:
curtime = str(int(time.time()))
curmes = mesg.get()
if curmes.split(":")[0] == curtime:
mesg.put(curmes)
else:
db = q.get()
db[1].append("OP" + ":" + args)
q.put(db)
mesg.put(curtime + ":" + "OP" + ":" + args)
def do_printdb(self, args):
global q
self.quit = quit
db = q.get()
q.put(db)
tick = 0
for line in db:
for lin in line:
if tick == 0:
for li in lin:
print li
tick = 1
else:
print lin
def do_online(self, args):
global online
on = online.get()
online.put(on)
print "Online:"
for username in on:
print username
def do_printerrors(self, args):
global errors
erlist = errors.get()
errors.put(erlist)
print "Errors:"
for error in erlist:
print error
def do_motd(self, args):
if "-c" in args:
global motd
oldmotd = motd.get()
motd.put(args[3:])
print "motd changed from " + oldmotd + " to " + args[3:]
else:
print "add -c newcmd"
def do_quit(self, args):
global quit
print "Quitting.\33[97m"
quit.get()
quit.put("quitting:")
time.sleep(2)
os._exit(0)
class Server(object):
def __init__(self, host, port, q, motd, errors, mesg, quit, online):
self.motd = motd
self.quit = quit
self.errors = errors
self.host = host
self.port = port
self.q = q
self.mesg = mesg
self.online = online
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.host, self.port))
def listen(self):
self.sock.listen(5)
while True:
try:
client, address = self.sock.accept()
client.settimeout(60)
threading.Thread(target = self.listenToClient,args = (client,address)).start()
except:
pass
def listenToClient(self, client, address):
global cv
cmd = self.motd.get()
self.motd.put(cmd)
rcv = client.recv(128)
if str(cv) != rcv[3:] and "cv:" in rcv:
client.send("comp:0:" + str(cv))
elif rcv == "screen:":
online = self.online.get()
self.online.put(online)
client.send(str(online))
cmessage = self.mesg.get()
self.mesg.put(cmessage)
lm = cmessage
tick = 0
qi = False
try:
while qi == False:
cmessage = self.mesg.get()
self.mesg.put(cmessage)
online = self.online.get()
self.online.put(online)
if cmessage != lm:
csend = cmessage.split(":")
client.send(csend[1] + ":" + csend[2])
lm = cmessage
else:
pass
quit = self.quit.get()
self.quit.put(quit)
if tick == 1000:
client.send("online:" + str(online))
onlinecheck = client.recv(1024)
if onlinecheck == "quitting:":
quit = "quitting:"
qi = True
else:
pass
tick = 0
else:
pass
tick = tick + 1
if quit == "quitting:":
client.send("quitting:")
client.close()
qi = True
else:
pass
time.sleep(.001)
except:
error = self.errors.get()
error.append("A screen raised an error")
self.errors.put(error)
pass
else:
client.send("comp:1")
name = client.recv(1024)
if "user:" not in name:
client.send("error:wrong type of packet received. 'user:' was not within the packet")
erlist = errors.get()
erlist.append(client.getpeername() + ":wrong type of packet received. 'user:' was not within the packet")
errors.put(erlist)
else:
name = name[5:]
used = False
online = self.online.get()
self.online.put(online)
for user in online:
if user == name:
used = True
else:
pass
if used == True:
client.send("error:Username has already been used before.")
client.close()
erlist = errors.get()
erlist.append(name + ":" + name + ":Username has already been used before.")
errors.put(erlist)
check = False
else:
client.send("user:" + name)
check = True
if check == True:
db = q.get()
q.put(db)
leng = 1
for nam in db[0]:
if name in nam:
nl = leng
else:
leng = leng
if 'nl' in locals():
db[0][nl - 1].append(address)
else:
nl = leng
db.append([name,])
db[0].append([name, address])
q.get()
q.put(db)
try:
online = self.online.get()
online.append(name)
self.online.put(online)
warntim = 0
while True:
rmesg = client.recv(1024)
if "" == rmesg:
pass
elif "/help" == rmesg:
pass
elif "quitting:" == rmesg:
on = online.get()
on.remove(name)
online.put(on)
elif "ping:" == rmesg:
pass
else:
curtime = str(int(time.time()))
curmes = self.mesg.get()
if curmes.split(":")[0] == curtime:
self.mesg.put(curmes)
warntim = warntim + 1
if warntim == 100:
client.close()
else:
pass
else:
db = q.get()
db[leng].append(name + ":" + rmesg[5:])
q.put(db)
self.mesg.put(curtime + ":" + name + ":" + rmesg[5:])
except:
online = self.online.get()
if name in online:
online.remove(name)
else:
pass
self.online.put(online)
else:
pass
def writeoutput(q, errors):
if os.path.isdir("./logs") == False:
subprocess.Popen(['mkdir', './logs'], stdout=subprocess.PIPE,).communicate()[0]
else:
pass
tim = str(datetime.datetime.now())
tim = tim.replace(" ", "")
log = "./logs/log" + tim + ".txt"
while True:
try:
time.sleep(10)
tta = q.get()
q.put(tta)
error = errors.get()
errors.put(error)
fw = "Users:\n"
errs = ""
for err in error:
errs = errs + err + "\n"
for line in tta:
for lin in line:
fw = fw + str(lin) + "\n"
fw = fw + "═════════════════════════════════════════════════════════\nErrors:\n" + errs
f = open(log, 'w')
f.write(fw)
f.close()
except:
error = errors.get()
error.append("Error while writing output\n")
errors.put(error)
if __name__ == "__main__":
threading.Thread(target = writeoutput,args = (q,errors)).start()
threading.Thread(target = console,args = (q, errors, motd)).start()
Server('',port,q,motd,errors,mesg, quit, online).listen()
|
sock4-local.py | #coding=utf-8
import socket
import select
import socketserver
import logging
import json
#import time
from multiprocessing import Process
#import threading
with open('config.json', 'rb') as f:
config = json.load(f)
#port = int(config['loaclport'])
serverdd = config['server']
port = int(config['port'])
localport = int(config['localport'])
localserver = config['localserver']
try:
sockserver = config['sockserver']
except:
sockserver = ""
try:
sock5 = config['sock5']
except:
sock5 = "0"
print(sock5)
pss = config['password']
key1 = int.from_bytes(pss[1].encode(),byteorder='big')
if sockserver=="":
sockserver=localserver
class UDPSocks5Server(socketserver.BaseRequestHandler):
def handle(self):
#print(123)
#print('======?>', self.request, self.server, self.client_address)
#print(self.request)
date,sockd = self.request
ccc = self.client_address
con=b'\x02'+len(pss).to_bytes(length=1,byteorder='big')+pss.encode()+ date[4:]
cop = xorr(con)
server11 = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
server11.sendto(cop,(serverdd,port))
data,server_addr1 = server11.recvfrom(1024*100)
cop=xorr(data)
sockd.sendto(cop,ccc)
try:
fds = [sockd,server_addr1]
while True:
r,w,e = select.select(fds,[],[],5)
if client in r:
cli_data = client.recv(1024 * 100)
cli_data_de = xorr(cli_data)
if len(cli_data) <= 0:
break
result = send_all(remote, cli_data_de)
if result < len(cli_data):
logging.warn("Failed pipping all data to target!!!")
break
if remote in r:
remote_data = remote.recv(1024 * 100)
#remmote_data_en=remote_data
remote_data_en=xorr(remote_data)
#print(remote_data)
#print(remote_data_en)
if len(remote_data) <= 0:
break
result = send_all(client, remote_data_en)
if result < len(remote_data):
logging("Failed pipping all data to client!!!")
break
except Exception as e:
logging.error(e)
finally:
client.close()
remote.close()
class ThreadingTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
#StreamRequestHandler
class Socks5Server(socketserver.StreamRequestHandler):
def handle_tcp(self, client, remote):
try:
fds = [client,remote]
while True:
r,w,e = select.select(fds,[],[],5)
if client in r:
cli_data = client.recv(1024)
#cli_data_de = cli_data
cli_data_de = xorr(cli_data)
if len(cli_data) <= 0:
break
result = send_all(remote, cli_data_de)
if result < len(cli_data):
logging.warn("Failed pipping all data to target!!!")
break
if remote in r:
remote_data = remote.recv(1024)
#remmote_data_en=remote_data
remote_data_en=xorr(remote_data)
#print(remote_data)
#print(remote_data_en)
if len(remote_data) <= 0:
break
result = send_all(client, remote_data_en)
if result < len(remote_data):
logging("Failed pipping all data to client!!!")
break
except Exception as e:
logging.error(e)
finally:
client.close()
remote.close()
def handle(self):
client = self.request
ver,methods = client.recv(1),client.recv(1)
methods = client.recv(ord(methods))
if sock5=="1":
client.send(b'\x05\x02')
ver,len2 = client.recv(1),client.recv(1)
user = client.recv(ord(len2))
len2 = client.recv(1)
passwd = client.recv(ord(len2))
if user.decode()!=pss or passwd.decode()!=pss:
client.send(b'\x01\x01')
client.close()
return
else:
client.send(b'\x01\x00')
else:
client.send(b'\x05\x00')
ver,cmd,rsv,atype = client.recv(1),client.recv(1),client.recv(1),client.recv(1)
print(ver)
#print(ord(cmd))
#if ord(cmd) is not 1:
#client.close()
#return
# 判断是否支持atype,目前不支持IPv6
# 比特流转化成整型 big表示编码为大端法,
if(ord(cmd)==1):
if ord(atype) == 1:
# IPv4
ip=client.recv(4)
pp=client.recv(2)
remote_addr = socket.inet_ntoa(ip)
remote_port = int.from_bytes(pp, 'big')
#con=b'\x01'+len(pss).to_bytes(length=1,byteorder='big')+pss.encode()+b'\x02'+ip+pp
con=b'\x01'+len(pss).to_bytes(length=1,byteorder='big')+pss.encode()+b'\x02'+ip+pp
elif ord(atype) == 3:
# 域名
#ip=client.recv(4)
#pp=client.recv(2)
len1=client.recv(1)
addr_len = int.from_bytes(len1, byteorder = 'big')
remote_addr = client.recv(addr_len)
print(remote_addr)
pp=client.recv(2)
remote_port = int.from_bytes(pp, byteorder = 'big')
#con=b'\x01'+len(pss).to_bytes(length=1,byteorder='big')+pss.encode()+b'\x01'+len1+encode1(remote_addr,0)+pp
con=b'\x01'+len(pss).to_bytes(length=1,byteorder='big')+pss.encode()+b'\x01'+len1+xorr(remote_addr)+pp
else:
#不支持则关闭连接
client.close()
return
remote = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
logging.info('[+] %s:%dConnect to --> %s:%d' % (self.client_address[0], self.client_address[1], remote_addr, remote_port))
#remote.connect((remote_addr, remote_port))
remote.connect((serverdd,port))
print(con)
remote.send(con)
reply = b"\x05\x00\x00\x01" + socket.inet_aton(sockserver) + (2222).to_bytes(2, byteorder = 'big')
client.send(reply)
if(remote.recv(2) == b'\x03\x00'):
print("handle ok")
self.handle_tcp(client,remote)
if(ord(cmd)==3):
print("UDP-Newconnection")
remotetcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
remotetcp.connect((serverdd,port))
remotetcp.send(b'\x02'+len(pss).to_bytes(length=1,byteorder='big')+pss.encode())
bindport1=remotetcp.recv(1024*100)
bindport2=int.from_bytes(bindport1,byteorder='big')
remoteudp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
remoteudp.bind(('0.0.0.0',0))
print(bindport1)
#remoteudp.sendto(xorr(b'\x01'+len(pss).to_bytes(length=1,byteorder='big')+pss.encode()+bindport1+b'\x01\x02\x03\x04'),(serverdd,port))
#tlo = remoteudp.recvfrom(1024*100)
#tlo1=tlo[0]
#print(tlo)
#if tlo1==b'\x03\x01':
#print('Hanle Udp OK')
#else:
#print('No! Cheak your password')
sockudp = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
sockudp.bind(('0.0.0.0',0))
#print(b'\x05\x00\x00\x01\x00\x00\x00\x00'+sockudp.getsockname()[1].to_bytes(length=2,byteorder='big'))
client.send(b'\x05\x00\x00\x01'+socket.inet_aton(sockserver)+sockudp.getsockname()[1].to_bytes(length=2,byteorder='big'))
#global tyui
try:
fds = [sockudp,remoteudp,client]
while True:
r,w,e = select.select(fds,[],[],5)
for i in r:
if i is client:
if len(client.recv(1024))==0:
print('beak connection-OUT')
remoteudp.close()
sockudp.close()
client.close()
break
if i is remoteudp:
#dateback1=remoteudp.recvfrom(1024*100)
#date1=dateback1[0]
#sockudp.sendto(,)
dateback1=remoteudp.recvfrom(1024*100)
date1=dateback1[0]
backoo=xorr(date1)
print(backoo,end=' In\n')
sockudp.sendto(backoo,user)
if i is sockudp:
dateback2=sockudp.recvfrom(1024*100)
date2=dateback2[0]
user=dateback2[1]
data111= xorr(b'\x01'+len(pss).to_bytes(length=1,byteorder='big')+pss.encode()+bindport1+date2)
print(data111,end=' Out\n')
remoteudp.sendto(data111,(serverdd,port))
#dateback1=remoteudp.recvfrom(1024*100)
#date1=dateback1[0]
#backoo=xorr(date1)
#sockudp.sendto(backoo,user)
except Exception as e:
logging.error(e)
finally:
client.close()
remoteudp.close()
sockudp.close()
def xorr(data):
ddd=b''
for i in data:
ddd+= (i^key1).to_bytes(length=1,byteorder='big')
return ddd
def encode1(data,m):
q=""
for i in data:
tt=i^9
q=q+ chr( tt + 4 )
#q=q+chr(i^9)
j=q.encode()
if( m == 1 ):
return q
else:
return j
def decode1(data,m):
q = ""
for i in data:
tt = i -4
q=q+ chr( tt ^ 9)
#q=q+chr(i^9)
j=q.encode()
if( m == 1 ):
return q
else:
return j
def send_all(sock, data):
bytes_sent = 0
while True:
r = sock.send(data[bytes_sent:])
if r < 0:
return r
bytes_sent += r
if bytes_sent == len(data):
return bytes_sent
#def UDP():
#print("UDPrunning")
#UDPserver = socketserver.ThreadingUDPServer((localserver, localport), UDPSocks5Server)
#UDPserver.serve_forever()
def TCP():
server=socketserver.ThreadingTCPServer((localserver,localport),Socks5Server)
server.serve_forever()
if __name__ == '__main__':
try:
#global remote1
#remote1 = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
print('[+] Lintening(UDP&TCP) on port:%d' % localport)
TCP ()
#UDPshd = Process(target=TCP,)
#UDPshd.start()
#print("[+] UDPrunning in :%d" % port)
#UDPserver = socketserver.ThreadingUDPServer((serverdd, port), UDPSocks5Server)
#UDPserver.serve_forever()
except Exception as e:
logging.error(e)
|
sim.py | from serial import Serial
import threading
import config
import time
serial = Serial(config.SERIAL_PORT_SIM, config.SERIAL_BAUD_SIM)
NODE_MAX = 2
def loop_write():
address = 0
location_x = 123.456
location_y = 99.2222
location_z = 789.123
cnt = 0
while True:
if(address < NODE_MAX/2):
str = '{"type": "STATUS", "msg": {"address": "%s", "type": "ANCHOR", "location": [%f, %f, %f]}}\n' % (address, location_x, location_y, location_z)
serial.write(bytes(str, 'utf-8'))
else:
str = '{"type": "STATUS", "msg": {"address": "%s", "type": "TAG", "location": [%f, %f, %f]}}\n' % (address, location_x, location_y, location_z)
serial.write(bytes(str, 'utf-8'))
time.sleep(0.05)
location_x += 1
location_y += 2
location_z += 10
cnt += 1
if(not (cnt%20)):
address += 1
address = address % NODE_MAX
def loop_read():
while True:
line = serial.readline()
print(line)
thread_write = threading.Thread(target=loop_write)
thread_read = threading.Thread(target=loop_read)
thread_write.start()
thread_read.start() |
test_flaskparser.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import threading
from werkzeug.exceptions import HTTPException
import mock
import pytest
from flask import Flask
from webargs import fields, ValidationError, missing
from webargs.flaskparser import parser, abort
from webargs.core import MARSHMALLOW_VERSION_INFO, json
from .apps.flask_app import app
from webargs.testing import CommonTestCase
class TestFlaskParser(CommonTestCase):
def create_app(self):
return app
def test_parsing_view_args(self, testapp):
res = testapp.get("/echo_view_arg/42")
assert res.json == {"view_arg": 42}
def test_parsing_invalid_view_arg(self, testapp):
res = testapp.get("/echo_view_arg/foo", expect_errors=True)
assert res.status_code == 422
assert res.json == {"view_arg": ["Not a valid integer."]}
def test_use_args_with_view_args_parsing(self, testapp):
res = testapp.get("/echo_view_arg_use_args/42")
assert res.json == {"view_arg": 42}
def test_use_args_on_a_method_view(self, testapp):
res = testapp.post("/echo_method_view_use_args", {"val": 42})
assert res.json == {"val": 42}
def test_use_kwargs_on_a_method_view(self, testapp):
res = testapp.post("/echo_method_view_use_kwargs", {"val": 42})
assert res.json == {"val": 42}
def test_use_kwargs_with_missing_data(self, testapp):
res = testapp.post("/echo_use_kwargs_missing", {"username": "foo"})
assert res.json == {"username": "foo"}
# regression test for https://github.com/marshmallow-code/webargs/issues/145
def test_nested_many_with_data_key(self, testapp):
res = testapp.post_json("/echo_nested_many_data_key", {"x_field": [{"id": 42}]})
# https://github.com/marshmallow-code/marshmallow/pull/714
if MARSHMALLOW_VERSION_INFO[0] < 3:
assert res.json == {"x_field": [{"id": 42}]}
res = testapp.post_json("/echo_nested_many_data_key", {"X-Field": [{"id": 24}]})
assert res.json == {"x_field": [{"id": 24}]}
res = testapp.post_json("/echo_nested_many_data_key", {})
assert res.json == {}
@mock.patch("webargs.flaskparser.abort")
def test_abort_called_on_validation_error(mock_abort):
app = Flask("testapp")
def validate(x):
return x == 42
argmap = {"value": fields.Field(validate=validate)}
with app.test_request_context(
"/foo",
method="post",
data=json.dumps({"value": 41}),
content_type="application/json",
):
parser.parse(argmap)
mock_abort.assert_called()
abort_args, abort_kwargs = mock_abort.call_args
assert abort_args[0] == 422
expected_msg = "Invalid value."
assert abort_kwargs["messages"]["value"] == [expected_msg]
assert type(abort_kwargs["exc"]) == ValidationError
def test_parse_form_returns_missing_if_no_form():
req = mock.Mock()
req.form.get.side_effect = AttributeError("no form")
assert parser.parse_form(req, "foo", fields.Field()) is missing
def test_abort_with_message():
with pytest.raises(HTTPException) as excinfo:
abort(400, message="custom error message")
assert excinfo.value.data["message"] == "custom error message"
def test_abort_has_serializable_data():
with pytest.raises(HTTPException) as excinfo:
abort(400, message="custom error message")
serialized_error = json.dumps(excinfo.value.data)
error = json.loads(serialized_error)
assert isinstance(error, dict)
assert error["message"] == "custom error message"
with pytest.raises(HTTPException) as excinfo:
abort(
400,
message="custom error message",
exc=ValidationError("custom error message"),
)
serialized_error = json.dumps(excinfo.value.data)
error = json.loads(serialized_error)
assert isinstance(error, dict)
assert error["message"] == "custom error message"
def test_json_cache_race_condition():
app = Flask("testapp")
lock = threading.Lock()
lock.acquire()
class MyField(fields.Field):
def _deserialize(self, value, attr, data):
with lock:
return value
argmap = {"value": MyField()}
results = {}
def thread_fn(value):
with app.test_request_context(
"/foo",
method="post",
data=json.dumps({"value": value}),
content_type="application/json",
):
results[value] = parser.parse(argmap)["value"]
t1 = threading.Thread(target=thread_fn, args=(42,))
t2 = threading.Thread(target=thread_fn, args=(23,))
t1.start()
t2.start()
lock.release()
t1.join()
t2.join()
# ensure we didn't get contaminated by a parallel request
assert results[42] == 42
assert results[23] == 23
|
beni_s_hammer.py | import logging
from fragile import _instrument
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
logger.addHandler(ch)
import multiprocessing as mp
def send_errors(batch):
opbeat = _instrument('prd')
for j in range(batch):
opbeat.client.capture('Message', param_message={'message': 'Hello there, again 3'}),
for transport in opbeat.client._transports.values():
transport.close()
if __name__ == '__main__':
processes = [mp.Process(target=send_errors, args=(20,)) for x in range(1, 10)]
for p in processes:
p.start()
for p in processes:
p.join()
|
safe_t.py | from binascii import hexlify, unhexlify
import traceback
import sys
from electrum_xzc.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException
from electrum_xzc.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum_xzc.bip32 import BIP32Node
from electrum_xzc import constants
from electrum_xzc.i18n import _
from electrum_xzc.plugin import Device
from electrum_xzc.transaction import deserialize, Transaction
from electrum_xzc.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum_xzc.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data
# Safe-T mini initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class SafeTKeyStore(Hardware_KeyStore):
hw_type = 'safe_t'
device = 'Safe-T mini'
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class SafeTPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://safe-t.io'
libraries_URL = 'https://github.com/archos-safe-t/python-safet'
minimum_firmware = (1, 0, 5)
keystore_class = SafeTKeyStore
minimum_library = (0, 1, 0)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
from . import client
from . import transport
import safetlib.messages
self.client_class = client.SafeTClient
self.types = safetlib.messages
self.DEVICE_IDS = ('Safe-T mini',)
self.transport_handler = transport.SafeTTransport()
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import safetlib
try:
return safetlib.__version__
except AttributeError:
return 'unknown'
def enumerate(self):
devices = self.transport_handler.enumerate_devices()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key='Safe-T mini',
usage_page=0,
transport_ui_string=d.get_path())
for d in devices]
def create_client(self, device, handler):
try:
self.logger.info(f"connecting to device at {device.path}")
transport = self.transport_handler.get_transport(device.path)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
if not transport:
self.logger.info(f"cannot connect at {device.path}")
return
self.logger.info(f"connected to device at {device.path}")
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.logger.info(f"ping failed {e}")
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.logger.info(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Zcoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_safe_t_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
if method == TIM_RECOVER:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_safet_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_safet_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 0):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_type = self.get_safet_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_type = self.get_safet_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
txinputtype.script_type = self.get_safet_input_script_type(txin['type'])
else:
def f(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
return self._make_node_path(xpub, s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=list(map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures'))),
m=txin.get('num_sig'),
)
script_type = self.get_safet_input_script_type(txin['type'])
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
script_type = self.get_safet_output_script_type(info.script_type)
if len(xpubs) == 1:
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
address_n = self.client_class.expand_path("/%d/%d" % index)
pubkeys = [self._make_node_path(xpub, address_n) for xpub in xpubs]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for vout in d['outputs']:
o = t._add_bin_outputs()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
testhread.py | import time
from threading import Thread
def sleeper(i):
print ("thread %d sleeps for 5 seconds" % i)
time.sleep(5)
print ("thread %d woke up" % i)
for i in range(10):
t = Thread(target=sleeper, args=(i,))
t.start() |
profiler_test.py |
from mail_serv_test import *
from mail_serv.profiler import *
os.environ['PROFILER_REPORT_DIR'] = f"{THIS_DIR}/tmp/profiler"
def dummy_fun1(index:int) -> None:
time.sleep(0.010)
def dummy_fun2(index:int) -> None:
time.sleep(0.020)
def test_system_profiler():
print()
profiler = SystemProfiler() # active in main thread
def dummy_thread():
profiler.interrupt_activate() # active in extra thread
for index in range(10):
dummy_fun1(index)
dummy_fun2(index)
for index in range(3):
thread = threading.Thread(target=dummy_thread)
thread.setDaemon(True)
thread.start()
update_stat_tree(profiler.frame_stat_map)
output = render_stat_tree(profiler.frame_stat_map)
print(f"===")
print(output)
time.sleep(1)
for node_guid, node_stat in profiler.frame_stat_map.items():
print(f"===")
print(f"node_guid={node_guid}")
print(f"node_stat={node_stat}")
def test_profiler_session():
print()
session = 'tester-session'
with profiler_session(session):
for index in range(50):
dummy_fun1(index)
dummy_fun2(index)
report_file = profiler_report_file(session)
with open(report_file, 'r') as report_text:
print(report_text.read())
def test_profiler_thread():
print()
session = 'tester-thread'
profiler = Profiler(interval=profiler_interval())
def dummy_thread():
profiler.start()
for index in range(10):
dummy_fun1(index)
dummy_fun2(index)
profiler.stop()
for index in range(5):
thread = threading.Thread(target=dummy_thread)
thread.setDaemon(True)
thread.start()
thread.join()
profiler_produce_report(profiler, session)
report_file = profiler_report_file(session)
with open(report_file, 'r') as report_text:
print(report_text.read())
|
train.py | from dataloader import EvalDataset, TrainDataset, NewBidirectionalOneShotIterator
from dataloader import get_dataset
import argparse
import os
import logging
import time
backend = os.environ.get('DGLBACKEND', 'pytorch')
if backend.lower() == 'mxnet':
import multiprocessing as mp
from train_mxnet import load_model
from train_mxnet import train
from train_mxnet import test
else:
import torch.multiprocessing as mp
from train_pytorch import load_model
from train_pytorch import train
from train_pytorch import test
class ArgParser(argparse.ArgumentParser):
def __init__(self):
super(ArgParser, self).__init__()
self.add_argument('--model_name', default='TransE',
choices=['TransE', 'TransE_l1', 'TransE_l2', 'TransR',
'RESCAL', 'DistMult', 'ComplEx', 'RotatE'],
help='model to use')
self.add_argument('--data_path', type=str, default='data',
help='root path of all dataset')
self.add_argument('--dataset', type=str, default='FB15k',
help='dataset name, under data_path')
self.add_argument('--format', type=str, default='1',
help='the format of the dataset.')
self.add_argument('--save_path', type=str, default='ckpts',
help='place to save models and logs')
self.add_argument('--save_emb', type=str, default=None,
help='save the embeddings in the specific location.')
self.add_argument('--max_step', type=int, default=80000,
help='train xx steps')
self.add_argument('--warm_up_step', type=int, default=None,
help='for learning rate decay')
self.add_argument('--batch_size', type=int, default=1024,
help='batch size')
self.add_argument('--batch_size_eval', type=int, default=8,
help='batch size used for eval and test')
self.add_argument('--neg_sample_size', type=int, default=128,
help='negative sampling size')
self.add_argument('--neg_chunk_size', type=int, default=-1,
help='chunk size of the negative edges.')
self.add_argument('--neg_deg_sample', action='store_true',
help='negative sample proportional to vertex degree in the training')
self.add_argument('--neg_deg_sample_eval', action='store_true',
help='negative sampling proportional to vertex degree in the evaluation')
self.add_argument('--neg_sample_size_valid', type=int, default=1000,
help='negative sampling size for validation')
self.add_argument('--neg_chunk_size_valid', type=int, default=-1,
help='chunk size of the negative edges.')
self.add_argument('--neg_sample_size_test', type=int, default=-1,
help='negative sampling size for testing')
self.add_argument('--neg_chunk_size_test', type=int, default=-1,
help='chunk size of the negative edges.')
self.add_argument('--hidden_dim', type=int, default=256,
help='hidden dim used by relation and entity')
self.add_argument('--lr', type=float, default=0.0001,
help='learning rate')
self.add_argument('-g', '--gamma', type=float, default=12.0,
help='margin value')
self.add_argument('--eval_percent', type=float, default=1,
help='sample some percentage for evaluation.')
self.add_argument('--no_eval_filter', action='store_true',
help='do not filter positive edges among negative edges for evaluation')
self.add_argument('--gpu', type=int, default=[-1], nargs='+',
help='a list of active gpu ids, e.g. 0 1 2 4')
self.add_argument('--mix_cpu_gpu', action='store_true',
help='mix CPU and GPU training')
self.add_argument('-de', '--double_ent', action='store_true',
help='double entitiy dim for complex number')
self.add_argument('-dr', '--double_rel', action='store_true',
help='double relation dim for complex number')
self.add_argument('--seed', type=int, default=0,
help='set random seed fro reproducibility')
self.add_argument('-log', '--log_interval', type=int, default=1000,
help='do evaluation after every x steps')
self.add_argument('--eval_interval', type=int, default=10000,
help='do evaluation after every x steps')
self.add_argument('-adv', '--neg_adversarial_sampling', action='store_true',
help='if use negative adversarial sampling')
self.add_argument('-a', '--adversarial_temperature', default=1.0, type=float)
self.add_argument('--valid', action='store_true',
help='if valid a model')
self.add_argument('--test', action='store_true',
help='if test a model')
self.add_argument('-rc', '--regularization_coef', type=float, default=0.000002,
help='set value > 0.0 if regularization is used')
self.add_argument('-rn', '--regularization_norm', type=int, default=3,
help='norm used in regularization')
self.add_argument('--num_worker', type=int, default=16,
help='number of workers used for loading data')
self.add_argument('--non_uni_weight', action='store_true',
help='if use uniform weight when computing loss')
self.add_argument('--init_step', type=int, default=0,
help='DONT SET MANUALLY, used for resume')
self.add_argument('--step', type=int, default=0,
help='DONT SET MANUALLY, track current step')
self.add_argument('--pickle_graph', action='store_true',
help='pickle built graph, building a huge graph is slow.')
self.add_argument('--num_proc', type=int, default=1,
help='number of process used')
self.add_argument('--rel_part', action='store_true',
help='enable relation partitioning')
def get_logger(args):
if not os.path.exists(args.save_path):
os.mkdir(args.save_path)
folder = '{}_{}_'.format(args.model_name, args.dataset)
n = len([x for x in os.listdir(args.save_path) if x.startswith(folder)])
folder += str(n)
args.save_path = os.path.join(args.save_path, folder)
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
log_file = os.path.join(args.save_path, 'train.log')
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S',
filename=log_file,
filemode='w'
)
logger = logging.getLogger(__name__)
print("Logs are being recorded at: {}".format(log_file))
return logger
def run(args, logger):
# load dataset and samplers
dataset = get_dataset(args.data_path, args.dataset, args.format)
n_entities = dataset.n_entities
n_relations = dataset.n_relations
if args.neg_sample_size_test < 0:
args.neg_sample_size_test = n_entities
args.eval_filter = not args.no_eval_filter
if args.neg_deg_sample_eval:
assert not args.eval_filter, "if negative sampling based on degree, we can't filter positive edges."
# When we generate a batch of negative edges from a set of positive edges,
# we first divide the positive edges into chunks and corrupt the edges in a chunk
# together. By default, the chunk size is equal to the negative sample size.
# Usually, this works well. But we also allow users to specify the chunk size themselves.
if args.neg_chunk_size < 0:
args.neg_chunk_size = args.neg_sample_size
if args.neg_chunk_size_valid < 0:
args.neg_chunk_size_valid = args.neg_sample_size_valid
if args.neg_chunk_size_test < 0:
args.neg_chunk_size_test = args.neg_sample_size_test
train_data = TrainDataset(dataset, args, ranks=args.num_proc)
if args.num_proc > 1:
train_samplers = []
for i in range(args.num_proc):
train_sampler_head = train_data.create_sampler(args.batch_size, args.neg_sample_size,
args.neg_chunk_size,
mode='chunk-head',
num_workers=args.num_worker,
shuffle=True,
exclude_positive=True,
rank=i)
train_sampler_tail = train_data.create_sampler(args.batch_size, args.neg_sample_size,
args.neg_chunk_size,
mode='chunk-tail',
num_workers=args.num_worker,
shuffle=True,
exclude_positive=True,
rank=i)
train_samplers.append(NewBidirectionalOneShotIterator(train_sampler_head, train_sampler_tail,
args.neg_chunk_size,
True, n_entities))
else:
train_sampler_head = train_data.create_sampler(args.batch_size, args.neg_sample_size,
args.neg_chunk_size,
mode='chunk-head',
num_workers=args.num_worker,
shuffle=True,
exclude_positive=True)
train_sampler_tail = train_data.create_sampler(args.batch_size, args.neg_sample_size,
args.neg_chunk_size,
mode='chunk-tail',
num_workers=args.num_worker,
shuffle=True,
exclude_positive=True)
train_sampler = NewBidirectionalOneShotIterator(train_sampler_head, train_sampler_tail,
args.neg_chunk_size,
True, n_entities)
# for multiprocessing evaluation, we don't need to sample multiple batches at a time
# in each process.
num_workers = args.num_worker
if args.num_proc > 1:
num_workers = 1
if args.valid or args.test:
eval_dataset = EvalDataset(dataset, args)
if args.valid:
# Here we want to use the regualr negative sampler because we need to ensure that
# all positive edges are excluded.
if args.num_proc > 1:
valid_sampler_heads = []
valid_sampler_tails = []
for i in range(args.num_proc):
valid_sampler_head = eval_dataset.create_sampler('valid', args.batch_size_eval,
args.neg_sample_size_valid,
args.neg_chunk_size_valid,
args.eval_filter,
mode='chunk-head',
num_workers=num_workers,
rank=i, ranks=args.num_proc)
valid_sampler_tail = eval_dataset.create_sampler('valid', args.batch_size_eval,
args.neg_sample_size_valid,
args.neg_chunk_size_valid,
args.eval_filter,
mode='chunk-tail',
num_workers=num_workers,
rank=i, ranks=args.num_proc)
valid_sampler_heads.append(valid_sampler_head)
valid_sampler_tails.append(valid_sampler_tail)
else:
valid_sampler_head = eval_dataset.create_sampler('valid', args.batch_size_eval,
args.neg_sample_size_valid,
args.neg_chunk_size_valid,
args.eval_filter,
mode='chunk-head',
num_workers=num_workers,
rank=0, ranks=1)
valid_sampler_tail = eval_dataset.create_sampler('valid', args.batch_size_eval,
args.neg_sample_size_valid,
args.neg_chunk_size_valid,
args.eval_filter,
mode='chunk-tail',
num_workers=num_workers,
rank=0, ranks=1)
if args.test:
# Here we want to use the regualr negative sampler because we need to ensure that
# all positive edges are excluded.
if args.num_proc > 1:
test_sampler_tails = []
test_sampler_heads = []
for i in range(args.num_proc):
test_sampler_head = eval_dataset.create_sampler('test', args.batch_size_eval,
args.neg_sample_size_test,
args.neg_chunk_size_test,
args.eval_filter,
mode='chunk-head',
num_workers=num_workers,
rank=i, ranks=args.num_proc)
test_sampler_tail = eval_dataset.create_sampler('test', args.batch_size_eval,
args.neg_sample_size_test,
args.neg_chunk_size_test,
args.eval_filter,
mode='chunk-tail',
num_workers=num_workers,
rank=i, ranks=args.num_proc)
test_sampler_heads.append(test_sampler_head)
test_sampler_tails.append(test_sampler_tail)
else:
test_sampler_head = eval_dataset.create_sampler('test', args.batch_size_eval,
args.neg_sample_size_test,
args.neg_chunk_size_test,
args.eval_filter,
mode='chunk-head',
num_workers=num_workers,
rank=0, ranks=1)
test_sampler_tail = eval_dataset.create_sampler('test', args.batch_size_eval,
args.neg_sample_size_test,
args.neg_chunk_size_test,
args.eval_filter,
mode='chunk-tail',
num_workers=num_workers,
rank=0, ranks=1)
# We need to free all memory referenced by dataset.
eval_dataset = None
dataset = None
# load model
model = load_model(logger, args, n_entities, n_relations)
if args.num_proc > 1:
model.share_memory()
# train
start = time.time()
if args.num_proc > 1:
procs = []
for i in range(args.num_proc):
rel_parts = train_data.rel_parts if args.rel_part else None
valid_samplers = [valid_sampler_heads[i], valid_sampler_tails[i]] if args.valid else None
proc = mp.Process(target=train, args=(args, model, train_samplers[i], i, rel_parts, valid_samplers))
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
else:
valid_samplers = [valid_sampler_head, valid_sampler_tail] if args.valid else None
train(args, model, train_sampler, valid_samplers)
print('training takes {} seconds'.format(time.time() - start))
if args.save_emb is not None:
if not os.path.exists(args.save_emb):
os.mkdir(args.save_emb)
model.save_emb(args.save_emb, args.dataset)
# test
if args.test:
start = time.time()
if args.num_proc > 1:
queue = mp.Queue(args.num_proc)
procs = []
for i in range(args.num_proc):
proc = mp.Process(target=test, args=(args, model, [test_sampler_heads[i], test_sampler_tails[i]],
i, 'Test', queue))
procs.append(proc)
proc.start()
total_metrics = {}
for i in range(args.num_proc):
metrics = queue.get()
for k, v in metrics.items():
if i == 0:
total_metrics[k] = v / args.num_proc
else:
total_metrics[k] += v / args.num_proc
for k, v in metrics.items():
print('Test average {} at [{}/{}]: {}'.format(k, args.step, args.max_step, v))
for proc in procs:
proc.join()
else:
test(args, model, [test_sampler_head, test_sampler_tail])
print('test:', time.time() - start)
if __name__ == '__main__':
args = ArgParser().parse_args()
logger = get_logger(args)
run(args, logger)
|
vnokex.py | # encoding: UTF-8
from __future__ import print_function
import ssl
import hashlib
import json
import traceback
from threading import Thread
from time import sleep
import websocket
# 常量定义
OKEX_SPOT_HOST = 'wss://real.okex.com:10441/websocket'
SPOT_CURRENCY = ["usdt",
"btc",
"ltc",
"eth",
"etc",
"bch"]
SPOT_SYMBOL = ["ltc_btc",
"eth_btc",
"etc_btc",
"bch_btc",
"btc_usdt",
"eth_usdt",
"ltc_usdt",
"etc_usdt",
"bch_usdt",
"etc_eth",
"bt1_btc",
"bt2_btc",
"btg_btc",
"qtum_btc",
"hsr_btc",
"neo_btc",
"gas_btc",
"qtum_usdt",
"hsr_usdt",
"neo_usdt",
"gas_usdt"]
KLINE_PERIOD = ["1min",
"3min",
"5min",
"15min",
"30min",
"1hour",
"2hour",
"4hour",
"6hour",
"12hour",
"day",
"3day",
"week"]
########################################################################
class OkexApi(object):
"""交易接口"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.host = '' # 服务器
self.apiKey = '' # 用户名
self.secretKey = '' # 密码
self.active = False # 工作状态
self.ws = None # websocket应用对象
self.wsThread = None # websocket工作线程
self.heartbeatCount = 0 # 心跳计数
self.heartbeatThread = None # 心跳线程
self.heartbeatReceived = True # 心跳是否收到
self.reconnecting = False # 重新连接中
#----------------------------------------------------------------------
def heartbeat(self):
""""""
while self.active:
self.heartbeatCount += 1
if self.heartbeatCount < 10:
sleep(1)
else:
self.heartbeatCount = 0
if not self.heartbeatReceived:
self.reconnect()
else:
self.heartbeatReceived = False
d = {'event': 'ping'}
j = json.dumps(d)
try:
self.ws.send(j)
except:
msg = traceback.format_exc()
self.onError(msg)
self.reconnect()
#----------------------------------------------------------------------
def reconnect(self):
"""重新连接"""
if not self.reconnecting:
self.reconnecting = True
self.closeWebsocket() # 首先关闭之前的连接
self.initWebsocket()
self.reconnecting = False
#----------------------------------------------------------------------
def connect(self, host, apiKey, secretKey, trace=False):
"""连接"""
self.host = host
self.apiKey = apiKey
self.secretKey = secretKey
websocket.enableTrace(trace)
self.initWebsocket()
self.active = True
#----------------------------------------------------------------------
def initWebsocket(self):
""""""
self.ws = websocket.WebSocketApp(self.host,
on_message=self.onMessageCallback,
on_error=self.onErrorCallback,
on_close=self.onCloseCallback,
on_open=self.onOpenCallback)
kwargs = {'sslopt': {'cert_reqs': ssl.CERT_NONE}}
self.wsThread = Thread(target=self.ws.run_forever, kwargs=kwargs)
self.wsThread.start()
#----------------------------------------------------------------------
def readData(self, evt):
"""解码推送收到的数据"""
data = json.loads(evt)
return data
#----------------------------------------------------------------------
def closeHeartbeat(self):
"""关闭接口"""
if self.heartbeatThread and self.heartbeatThread.isAlive():
self.active = False
self.heartbeatThread.join()
#----------------------------------------------------------------------
def closeWebsocket(self):
"""关闭WS"""
if self.wsThread and self.wsThread.isAlive():
self.ws.close()
self.wsThread.join()
#----------------------------------------------------------------------
def close(self):
""""""
self.closeHeartbeat()
self.closeWebsocket()
#----------------------------------------------------------------------
def onMessage(self, data):
"""信息推送"""
print('onMessage')
print(evt)
#----------------------------------------------------------------------
def onError(self, data):
"""错误推送"""
print('onError')
print(evt)
#----------------------------------------------------------------------
def onClose(self):
"""接口断开"""
print('onClose')
#----------------------------------------------------------------------
def onOpen(self):
"""接口打开"""
print('onOpen')
#----------------------------------------------------------------------
def onMessageCallback(self, ws, evt):
""""""
data = self.readData(evt)
if 'event' in data:
self.heartbeatReceived = True
else:
self.onMessage(data[0])
#----------------------------------------------------------------------
def onErrorCallback(self, ws, evt):
""""""
self.onError(evt)
#----------------------------------------------------------------------
def onCloseCallback(self, ws):
""""""
self.onClose()
#----------------------------------------------------------------------
def onOpenCallback(self, ws):
""""""
if not self.heartbeatThread:
self.heartbeatThread = Thread(target=self.heartbeat)
self.heartbeatThread.start()
self.onOpen()
#----------------------------------------------------------------------
def generateSign(self, params):
"""生成签名"""
l = []
for key in sorted(params.keys()):
l.append('%s=%s' %(key, params[key]))
l.append('secret_key=%s' %self.secretKey)
sign = '&'.join(l)
return hashlib.md5(sign.encode('utf-8')).hexdigest().upper()
#----------------------------------------------------------------------
def sendRequest(self, channel, params=None):
"""发送请求"""
# 生成请求
d = {}
d['event'] = 'addChannel'
d['channel'] = channel
# 如果有参数,在参数字典中加上api_key和签名字段
if params is not None:
params['api_key'] = self.apiKey
params['sign'] = self.generateSign(params)
d['parameters'] = params
# 使用json打包并发送
j = json.dumps(d)
# 若触发异常则重连
try:
self.ws.send(j)
return True
except websocket.WebSocketConnectionClosedException:
self.reconnect()
return False
#----------------------------------------------------------------------
def login(self):
params = {}
params['api_key'] = self.apiKey
params['sign'] = self.generateSign(params)
# 生成请求
d = {}
d['event'] = 'login'
d['parameters'] = params
j = json.dumps(d)
# 若触发异常则重连
try:
self.ws.send(j)
return True
except websocket.WebSocketConnectionClosedException:
self.reconnect()
return False
########################################################################
class OkexSpotApi(OkexApi):
"""现货交易接口"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(OkexSpotApi, self).__init__()
#----------------------------------------------------------------------
def subscribeSpotTicker(self, symbol):
"""订阅现货的Tick"""
channel = 'ok_sub_spot_%s_ticker' %symbol
self.sendRequest(channel)
#----------------------------------------------------------------------
def subscribeSpotDepth(self, symbol, depth=0):
"""订阅现货的深度"""
channel = 'ok_sub_spot_%s_depth' %symbol
if depth:
channel = channel + '_' + str(depth)
self.sendRequest(channel)
#----------------------------------------------------------------------
def subscribeSpotDeals(self, symbol):
channel = 'ok_sub_spot_%s_deals' %symbol
self.sendRequest(channel)
#----------------------------------------------------------------------
def subscribeSpotKlines(self, symbol, period):
channel = 'ok_sub_spot_%s_kline_%s' %(symbol, period)
self.sendRequest(channel)
#----------------------------------------------------------------------
def spotOrder(self, symbol, type_, price, amount):
"""现货委托"""
params = {}
params['symbol'] = str(symbol)
params['type'] = str(type_)
params['price'] = str(price)
params['amount'] = str(amount)
channel = 'ok_spot_order'
return self.sendRequest(channel, params)
#----------------------------------------------------------------------
def spotCancelOrder(self, symbol, orderid):
"""现货撤单"""
params = {}
params['symbol'] = str(symbol)
params['order_id'] = str(orderid)
channel = 'ok_spot_cancel_order'
self.sendRequest(channel, params)
#----------------------------------------------------------------------
def spotUserInfo(self):
"""查询现货账户"""
channel = 'ok_spot_userinfo'
self.sendRequest(channel, {})
#----------------------------------------------------------------------
def spotOrderInfo(self, symbol, orderid):
"""查询现货委托信息"""
params = {}
params['symbol'] = str(symbol)
params['order_id'] = str(orderid)
channel = 'ok_spot_orderinfo'
self.sendRequest(channel, params)
#----------------------------------------------------------------------
def subSpotOrder(self, symbol):
"""订阅委托推送"""
channel = 'ok_sub_spot_%s_order' %symbol
self.sendRequest(channel)
#----------------------------------------------------------------------
def subSpotBalance(self, symbol):
"""订阅资金推送"""
channel = 'ok_sub_spot_%s_balance' %symbol
self.sendRequest(channel)
|
lisp-itr.py | # -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-itr.py
#
# This file performs LISP Ingress Tunnel Router (ITR) functionality.
#
# -----------------------------------------------------------------------------
if 64 - 64: i11iIiiIii
import lisp
import lispconfig
import socket
import select
import threading
import pcappy
import time
import os
import commands
import struct
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
II1iII1i = [ None , None , None ]
oO0oIIII = None
Oo0oO0oo0oO00 = None
i111I = None
II1Ii1iI1i = None
iiI1iIiI = lisp . lisp_get_ephemeral_port ( )
OOo = lisp . lisp_get_ephemeral_port ( )
Ii1IIii11 = None
Oooo0000 = None
i11 = None
I11 = None
if 98 - 98: i11iIiiIii * I1IiiI % iII111i * iII111i * II111iiii
if 79 - 79: IiII
if 86 - 86: OoOoOO00 % I1IiiI
if 80 - 80: OoooooooOO . I1IiiI
if 87 - 87: oO0o / ooOoO0o + I1Ii111 - ooOoO0o . ooOoO0o / II111iiii
if 11 - 11: I1IiiI % o0oOOo0O0Ooo - Oo0Ooo
oo0O000OoO = False
if 34 - 34: I11i * I1IiiI
if 31 - 31: II111iiii + OoO0O00 . I1Ii111
if 68 - 68: I1IiiI - i11iIiiIii - OoO0O00 / OOooOOo - OoO0O00 + i1IIi
if 48 - 48: OoooooooOO % o0oOOo0O0Ooo . I1IiiI - Ii1I % i1IIi % OoooooooOO
i1iIIi1 = threading . Lock ( )
if 50 - 50: i11iIiiIii - Ii1I
if 78 - 78: OoO0O00
if 18 - 18: O0 - iII111i / iII111i + ooOoO0o % ooOoO0o - IiII
if 62 - 62: iII111i - IiII - OoOoOO00 % i1IIi / oO0o
if 77 - 77: II111iiii - II111iiii . I1IiiI / o0oOOo0O0Ooo
if 14 - 14: I11i % O0
if 41 - 41: i1IIi + I1Ii111 + OOooOOo - IiII
if 77 - 77: Oo0Ooo . IiII % ooOoO0o
def IIiiIiI1 ( parameter ) :
return ( lispconfig . lisp_itr_rtr_show_command ( parameter , "ITR" , [ ] ) )
if 41 - 41: OoOoOO00
if 13 - 13: Oo0Ooo . i11iIiiIii - iIii1I11I1II1 - OoOoOO00
if 6 - 6: I1IiiI / Oo0Ooo % Ii1I
if 84 - 84: i11iIiiIii . o0oOOo0O0Ooo
if 100 - 100: Ii1I - Ii1I - I1Ii111
if 20 - 20: OoooooooOO
if 13 - 13: i1IIi - Ii1I % oO0o / iIii1I11I1II1 % iII111i
def oo ( parameter ) :
return ( lispconfig . lisp_show_crypto_list ( "ITR" ) )
if 68 - 68: I11i + OOooOOo . iIii1I11I1II1 - IiII % iIii1I11I1II1 - ooOoO0o
if 79 - 79: Oo0Ooo + I1IiiI - iII111i
if 83 - 83: ooOoO0o
if 64 - 64: OoO0O00 % ooOoO0o % iII111i / OoOoOO00 - OoO0O00
if 74 - 74: iII111i * O0
if 89 - 89: oO0o + Oo0Ooo
if 3 - 3: i1IIi / I1IiiI % I11i * i11iIiiIii / O0 * I11i
if 49 - 49: oO0o % Ii1I + i1IIi . I1IiiI % I1ii11iIi11i
def I1i1iii ( parameter ) :
return ( lispconfig . lisp_itr_rtr_show_rloc_probe_command ( "ITR" ) )
if 20 - 20: o0oOOo0O0Ooo
if 77 - 77: OoOoOO00 / I11i
if 98 - 98: iIii1I11I1II1 / i1IIi / i11iIiiIii / o0oOOo0O0Ooo
if 28 - 28: OOooOOo - IiII . IiII + OoOoOO00 - OoooooooOO + O0
if 95 - 95: OoO0O00 % oO0o . O0
if 15 - 15: ooOoO0o / Ii1I . Ii1I - i1IIi
if 53 - 53: IiII + I1IiiI * oO0o
if 61 - 61: i1IIi * OOooOOo / OoooooooOO . i11iIiiIii . OoOoOO00
if 60 - 60: I11i / I11i
if 46 - 46: Ii1I * OOooOOo - OoO0O00 * oO0o - I1Ii111
def oo0 ( lisp_sockets , lisp_ephem_port ) :
lisp . lisp_set_exception ( )
if 57 - 57: OOooOOo . OOooOOo
if 95 - 95: O0 + OoO0O00 . II111iiii / O0
if 97 - 97: ooOoO0o - OOooOOo * i11iIiiIii / OoOoOO00 % I1Ii111 - OoooooooOO
if 59 - 59: O0 + I1IiiI + IiII % I1IiiI
for o0OOoo0OO0OOO in lisp . lisp_crypto_keys_by_nonce . values ( ) :
for iI1iI1I1i1I in o0OOoo0OO0OOO : del ( iI1iI1I1i1I )
if 24 - 24: I1ii11iIi11i
lisp . lisp_crypto_keys_by_nonce = { }
if 56 - 56: ooOoO0o
if 92 - 92: iII111i . I11i + o0oOOo0O0Ooo
if 28 - 28: i1IIi * Oo0Ooo - o0oOOo0O0Ooo * IiII * Ii1I / OoO0O00
if 94 - 94: II111iiii % I1ii11iIi11i / OoOoOO00 * iIii1I11I1II1
if 54 - 54: o0oOOo0O0Ooo - I1IiiI + OoooooooOO
if ( lisp . lisp_l2_overlay ) :
O0o0 = lisp . LISP_AFI_MAC
OO00Oo = lisp . lisp_default_iid
O0OOO0OOoO0O = lisp . lisp_address ( O0o0 , "0000-0000-0000" , 0 , OO00Oo )
O0OOO0OOoO0O . mask_len = 0
O00Oo000ooO0 = lisp . lisp_address ( O0o0 , "ffff-ffff-ffff" , 48 , OO00Oo )
lisp . lisp_send_map_request ( lisp_sockets , lisp_ephem_port , O0OOO0OOoO0O , O00Oo000ooO0 , None )
if 100 - 100: O0 + IiII - OOooOOo + i11iIiiIii * Ii1I
if 30 - 30: o0oOOo0O0Ooo . Ii1I - OoooooooOO
if 8 - 8: i1IIi - iIii1I11I1II1 * II111iiii + i11iIiiIii / I1Ii111 % OOooOOo
if 16 - 16: I1ii11iIi11i + OoO0O00 - II111iiii
if 85 - 85: OoOoOO00 + i1IIi
lisp . lisp_timeout_map_cache ( lisp . lisp_map_cache )
if 58 - 58: II111iiii * OOooOOo * I1ii11iIi11i / OOooOOo
if 75 - 75: oO0o
if 50 - 50: Ii1I / Oo0Ooo - oO0o - I11i % iII111i - oO0o
if 91 - 91: OoO0O00 / I11i - II111iiii . I11i
i11 = threading . Timer ( 60 , oo0 ,
[ lisp_sockets , lisp_ephem_port ] )
i11 . start ( )
return
if 18 - 18: o0oOOo0O0Ooo
if 98 - 98: iII111i * iII111i / iII111i + I11i
if 34 - 34: ooOoO0o
if 15 - 15: I11i * ooOoO0o * Oo0Ooo % i11iIiiIii % OoOoOO00 - OOooOOo
if 68 - 68: I1Ii111 % i1IIi . IiII . I1ii11iIi11i
if 92 - 92: iII111i . I1Ii111
if 31 - 31: I1Ii111 . OoOoOO00 / O0
if 89 - 89: OoOoOO00
def OO0oOoOO0oOO0 ( lisp_socket ) :
lisp . lisp_set_exception ( )
if 86 - 86: OOooOOo
OOoo0O = lisp . lisp_get_timestamp ( )
for Oo0ooOo0o in lisp . lisp_db_list :
if ( Oo0ooOo0o . dynamic_eid_configured ( ) == False ) : continue
if 22 - 22: iIii1I11I1II1 / i11iIiiIii * iIii1I11I1II1 * II111iiii . OOooOOo / i11iIiiIii
Iiii = [ ]
for OO0OoO0o00 in Oo0ooOo0o . dynamic_eids . values ( ) :
ooOO0O0ooOooO = OO0OoO0o00 . last_packet
if ( ooOO0O0ooOooO == None ) : continue
if ( ooOO0O0ooOooO + OO0OoO0o00 . timeout > OOoo0O ) : continue
if 55 - 55: o0oOOo0O0Ooo * OoOoOO00
if 61 - 61: I11i
if 86 - 86: I11i % OoOoOO00 / I1IiiI / OoOoOO00
if 42 - 42: OoO0O00
if 67 - 67: I1Ii111 . iII111i . O0
if ( lisp . lisp_program_hardware ) :
IIIIiiII111 = OO0OoO0o00 . dynamic_eid . print_prefix_no_iid ( )
if ( lisp . lisp_arista_is_alive ( IIIIiiII111 ) ) :
lisp . lprint ( ( "Hardware indicates dynamic-EID {} " + "still active" ) . format ( lisp . green ( IIIIiiII111 , False ) ) )
if 97 - 97: I1ii11iIi11i + OOooOOo / iIii1I11I1II1 / iII111i
continue
if 37 - 37: iII111i - ooOoO0o * oO0o % i11iIiiIii - I1Ii111
if 83 - 83: I11i / I1IiiI
if 34 - 34: IiII
if 57 - 57: oO0o . I11i . i1IIi
if 42 - 42: I11i + I1ii11iIi11i % O0
if 6 - 6: oO0o
oOOo0oOo0 = OO0OoO0o00 . dynamic_eid . print_address ( )
II = "learn%{}%None" . format ( oOOo0oOo0 )
II = lisp . lisp_command_ipc ( II , "lisp-itr" )
lisp . lisp_ipc ( II , lisp_socket , "lisp-etr" )
if 60 - 60: I1IiiI
lisp . lprint ( "Dynamic-EID {}" . format ( lisp . bold ( lisp . green ( oOOo0oOo0 , False ) + " activity timeout" ,
# II111iiii . I1IiiI
False ) ) )
Iiii . append ( oOOo0oOo0 )
if 1 - 1: Oo0Ooo / o0oOOo0O0Ooo % iII111i * IiII . i11iIiiIii
if 2 - 2: I1ii11iIi11i * I11i - iIii1I11I1II1 + I1IiiI . oO0o % iII111i
if 92 - 92: iII111i
if 25 - 25: Oo0Ooo - I1IiiI / OoooooooOO / o0oOOo0O0Ooo
if 12 - 12: I1IiiI * iII111i % i1IIi % iIii1I11I1II1
for oOOo0oOo0 in Iiii : Oo0ooOo0o . dynamic_eids . pop ( oOOo0oOo0 )
if 20 - 20: OOooOOo % Ii1I / Ii1I + Ii1I
if 45 - 45: oO0o - IiII - OoooooooOO - OoO0O00 . II111iiii / O0
if 51 - 51: O0 + iII111i
if 8 - 8: oO0o * OoOoOO00 - Ii1I - OoO0O00 * OOooOOo % I1IiiI
if 48 - 48: O0
threading . Timer ( lisp . LISP_DEFAULT_DYN_EID_TIMEOUT ,
OO0oOoOO0oOO0 , [ lisp_socket ] ) . start ( )
return
if 11 - 11: I11i + OoooooooOO - OoO0O00 / o0oOOo0O0Ooo + Oo0Ooo . II111iiii
if 41 - 41: Ii1I - O0 - O0
if 68 - 68: OOooOOo % I1Ii111
if 88 - 88: iIii1I11I1II1 - ooOoO0o + OOooOOo
if 40 - 40: I1IiiI * Ii1I + OOooOOo % iII111i
if 74 - 74: oO0o - Oo0Ooo + OoooooooOO + I1Ii111 / OoOoOO00
if 23 - 23: O0
if 85 - 85: Ii1I
if 84 - 84: I1IiiI . iIii1I11I1II1 % OoooooooOO + Ii1I % OoooooooOO % OoO0O00
if 42 - 42: OoO0O00 / I11i / o0oOOo0O0Ooo + iII111i / OoOoOO00
if 84 - 84: ooOoO0o * II111iiii + Oo0Ooo
if 53 - 53: iII111i % II111iiii . IiII - iIii1I11I1II1 - IiII * II111iiii
if 77 - 77: iIii1I11I1II1 * OoO0O00
def oOooOo0 ( ) :
if ( lisp . lisp_is_macos ( ) ) : return ( [ "en0" , "en1" , "lo0" ] )
if 38 - 38: I1Ii111
if 84 - 84: iIii1I11I1II1 % iII111i / iIii1I11I1II1 % I11i
if 45 - 45: O0
if 26 - 26: I11i - iIii1I11I1II1 - I1IiiI / OoO0O00 . OoOoOO00 % iIii1I11I1II1
OO = "Link encap"
iIiIIi1 = commands . getoutput ( "ifconfig | egrep '{}'" . format ( OO ) )
if ( iIiIIi1 == "" ) :
OO = ": flags="
iIiIIi1 = commands . getoutput ( "ifconfig | egrep '{}'" . format ( OO ) )
if 7 - 7: ooOoO0o - Oo0Ooo - oO0o + ooOoO0o
if 26 - 26: Ii1I
iIiIIi1 = iIiIIi1 . split ( "\n" )
if 35 - 35: Ii1I - I1IiiI % o0oOOo0O0Ooo . OoooooooOO % Ii1I
I1i1Iiiii = [ ]
for OOo0oO00ooO00 in iIiIIi1 :
oOO0O00oO0Ooo = OOo0oO00ooO00 . split ( OO ) [ 0 ] . replace ( " " , "" )
I1i1Iiiii . append ( oOO0O00oO0Ooo )
if 67 - 67: OoO0O00 - OOooOOo
return ( I1i1Iiiii )
if 36 - 36: IiII
if 36 - 36: ooOoO0o / O0 * Oo0Ooo - OOooOOo % iIii1I11I1II1 * oO0o
if 79 - 79: O0
if 78 - 78: I1ii11iIi11i + OOooOOo - I1Ii111
if 38 - 38: o0oOOo0O0Ooo - oO0o + iIii1I11I1II1 / OoOoOO00 % Oo0Ooo
if 57 - 57: OoO0O00 / ooOoO0o
if 29 - 29: iIii1I11I1II1 + OoOoOO00 * OoO0O00 * OOooOOo . I1IiiI * I1IiiI
def I111I1Iiii1i ( ) :
global II1iII1i
global oO0oIIII
global Oo0oO0oo0oO00
global i111I
global II1Ii1iI1i
global Ii1IIii11 , Oooo0000
if 56 - 56: I1ii11iIi11i % O0 - I1IiiI
lisp . lisp_i_am ( "itr" )
lisp . lisp_set_exception ( )
lisp . lisp_print_banner ( "ITR starting up" )
if 100 - 100: Ii1I - O0 % oO0o * OOooOOo + I1IiiI
if 88 - 88: OoooooooOO - OoO0O00 * O0 * OoooooooOO . OoooooooOO
if 33 - 33: I1Ii111 + iII111i * oO0o / iIii1I11I1II1 - I1IiiI
if 54 - 54: I1Ii111 / OOooOOo . oO0o % iII111i
lisp . lisp_get_local_interfaces ( )
lisp . lisp_get_local_macs ( )
if ( lisp . lisp_get_local_addresses ( ) == False ) : return ( False )
if 57 - 57: i11iIiiIii . I1ii11iIi11i - Ii1I - oO0o + OoOoOO00
if 63 - 63: OoOoOO00 * iII111i
if 69 - 69: O0 . OoO0O00
if 49 - 49: I1IiiI - I11i
II1iII1i [ 0 ] = lisp . lisp_open_send_socket ( "" , lisp . LISP_AFI_IPV4 )
II1iII1i [ 1 ] = lisp . lisp_open_send_socket ( "" , lisp . LISP_AFI_IPV6 )
oO0oIIII = lisp . lisp_open_listen_socket ( "" , "lisp-itr" )
Oo0oO0oo0oO00 = lisp . lisp_open_listen_socket ( "" , "lispers.net-itr" )
II1iII1i [ 2 ] = oO0oIIII
OoOOoOooooOOo = "0.0.0.0" if lisp . lisp_is_raspbian ( ) else "0::0"
i111I = lisp . lisp_open_listen_socket ( OoOOoOooooOOo ,
str ( iiI1iIiI ) )
if 87 - 87: I1IiiI
if 58 - 58: OoOoOO00 % o0oOOo0O0Ooo
if 50 - 50: I1Ii111 . o0oOOo0O0Ooo
if 97 - 97: O0 + OoOoOO00
II1Ii1iI1i = lisp . lisp_open_listen_socket ( "0.0.0.0" ,
str ( OOo ) )
if 89 - 89: o0oOOo0O0Ooo + OoO0O00 * I11i * Ii1I
if 37 - 37: OoooooooOO - O0 - o0oOOo0O0Ooo
if 77 - 77: OOooOOo * iIii1I11I1II1
if 98 - 98: I1IiiI % Ii1I * OoooooooOO
Ii1IIii11 = socket . socket ( socket . AF_INET , socket . SOCK_RAW ,
socket . IPPROTO_RAW )
Ii1IIii11 . setsockopt ( socket . SOL_IP , socket . IP_HDRINCL , 1 )
if 51 - 51: iIii1I11I1II1 . OoOoOO00 / oO0o + o0oOOo0O0Ooo
if ( lisp . lisp_is_raspbian ( ) == False ) :
Oooo0000 = socket . socket ( socket . AF_INET6 , socket . SOCK_RAW ,
socket . IPPROTO_UDP )
if 33 - 33: ooOoO0o . II111iiii % iII111i + o0oOOo0O0Ooo
if 71 - 71: Oo0Ooo % OOooOOo
if 98 - 98: I11i % i11iIiiIii % ooOoO0o + Ii1I
if 78 - 78: I1ii11iIi11i % oO0o / iII111i - iIii1I11I1II1
if 69 - 69: I1Ii111
if 11 - 11: I1IiiI
if 16 - 16: Ii1I + IiII * O0 % i1IIi . I1IiiI
if 67 - 67: OoooooooOO / I1IiiI * Ii1I + I11i
lisp . lisp_ipc_socket = oO0oIIII
if 65 - 65: OoooooooOO - I1ii11iIi11i / ooOoO0o / II111iiii / i1IIi
if 71 - 71: I1Ii111 + Ii1I
if 28 - 28: OOooOOo
if 38 - 38: ooOoO0o % II111iiii % I11i / OoO0O00 + OoOoOO00 / i1IIi
threading . Thread ( target = OoOOo0OOoO ) . start ( )
if 72 - 72: Ii1I
if 1 - 1: OoO0O00 * IiII * OoooooooOO + ooOoO0o
if 33 - 33: O0 * o0oOOo0O0Ooo - I1Ii111 % I1Ii111
if 18 - 18: I1Ii111 / Oo0Ooo * I1Ii111 + I1Ii111 * i11iIiiIii * I1ii11iIi11i
lisp . lisp_load_checkpoint ( )
if 11 - 11: ooOoO0o / OoOoOO00 - IiII * OoooooooOO + OoooooooOO . OoOoOO00
if 26 - 26: Ii1I % I1ii11iIi11i
if 76 - 76: IiII * iII111i
if 52 - 52: OOooOOo
lisp . lisp_load_split_pings = ( os . getenv ( "LISP_LOAD_SPLIT_PINGS" ) != None )
if 19 - 19: I1IiiI
if 25 - 25: Ii1I / ooOoO0o
if 31 - 31: OOooOOo . O0 % I1IiiI . o0oOOo0O0Ooo + IiII
if 71 - 71: I1Ii111 . II111iiii
i11 = threading . Timer ( 60 , oo0 ,
[ II1iII1i , iiI1iIiI ] )
i11 . start ( )
if 62 - 62: OoooooooOO . I11i
if 61 - 61: OoOoOO00 - OOooOOo - i1IIi
if 25 - 25: O0 * I11i + I1ii11iIi11i . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 58 - 58: I1IiiI
threading . Timer ( lisp . LISP_DEFAULT_DYN_EID_TIMEOUT ,
OO0oOoOO0oOO0 , [ oO0oIIII ] ) . start ( )
return ( True )
if 53 - 53: i1IIi
if 59 - 59: o0oOOo0O0Ooo
if 81 - 81: OoOoOO00 - OoOoOO00 . iII111i
if 73 - 73: I11i % i11iIiiIii - I1IiiI
if 7 - 7: O0 * i11iIiiIii * Ii1I + ooOoO0o % OoO0O00 - ooOoO0o
if 39 - 39: Oo0Ooo * OOooOOo % OOooOOo - OoooooooOO + o0oOOo0O0Ooo - I11i
if 23 - 23: i11iIiiIii
if 30 - 30: o0oOOo0O0Ooo - i1IIi % II111iiii + I11i * iIii1I11I1II1
def o0ooooO0o0O ( ) :
iiIi11iI1iii = open ( "./lisp.config" , "r" )
if 67 - 67: O0 / I1Ii111
OOO0000oO = False
iI1i111I1Ii = 0
for i11i1ii1I in iiIi11iI1iii :
if ( i11i1ii1I == "lisp database-mapping {\n" ) : OOO0000oO = True
if ( i11i1ii1I == "}\n" ) : OOO0000oO = False
if ( OOO0000oO == False ) : continue
if ( i11i1ii1I [ 0 ] == " " and i11i1ii1I . find ( "prefix {" ) != - 1 ) : iI1i111I1Ii += 1
if 88 - 88: I11i % I1ii11iIi11i
iiIi11iI1iii . close ( )
return ( iI1i111I1Ii )
if 48 - 48: ooOoO0o / I1Ii111 . iIii1I11I1II1 * OoOoOO00 * oO0o / i1IIi
if 92 - 92: Oo0Ooo % Oo0Ooo - o0oOOo0O0Ooo / OoOoOO00
if 10 - 10: iII111i + Oo0Ooo * I1ii11iIi11i + iIii1I11I1II1 / I1Ii111 / I1ii11iIi11i
if 42 - 42: I1IiiI
if 38 - 38: OOooOOo + II111iiii % ooOoO0o % OoOoOO00 - Ii1I / OoooooooOO
if 73 - 73: o0oOOo0O0Ooo * O0 - i11iIiiIii
if 85 - 85: Ii1I % iII111i + I11i / o0oOOo0O0Ooo . oO0o + OOooOOo
if 62 - 62: i11iIiiIii + i11iIiiIii - o0oOOo0O0Ooo
if 28 - 28: iII111i . iII111i % iIii1I11I1II1 * iIii1I11I1II1 . o0oOOo0O0Ooo / iII111i
if 27 - 27: OoO0O00 + ooOoO0o - i1IIi
def O00oOOooo ( ) :
if 50 - 50: I1ii11iIi11i % O0 * o0oOOo0O0Ooo
if 5 - 5: IiII * OoOoOO00
if 5 - 5: I1Ii111
if 90 - 90: I1Ii111 . ooOoO0o / Ii1I - I11i
if 40 - 40: OoooooooOO
iI1i111I1Ii = o0ooooO0o0O ( )
if 25 - 25: IiII + Ii1I / ooOoO0o . o0oOOo0O0Ooo % O0 * OoO0O00
if 84 - 84: ooOoO0o % Ii1I + i11iIiiIii
if 28 - 28: Oo0Ooo + OoO0O00 * OOooOOo % oO0o . I11i % O0
if 16 - 16: I11i - iIii1I11I1II1 / I1IiiI . II111iiii + iIii1I11I1II1
if 19 - 19: OoO0O00 - Oo0Ooo . O0
if 60 - 60: II111iiii + Oo0Ooo
I1IiIiiIiIII = os . getenv ( "LISP_ITR_WAIT_TIME" )
I1IiIiiIiIII = 1 if ( I1IiIiiIiIII == None ) else int ( I1IiIiiIiIII )
if 8 - 8: oO0o / I1ii11iIi11i
if 20 - 20: I1IiiI
if 95 - 95: iII111i - I1IiiI
if 34 - 34: ooOoO0o * I1IiiI . i1IIi * ooOoO0o / ooOoO0o
if 30 - 30: I1ii11iIi11i + Oo0Ooo / Oo0Ooo % I1ii11iIi11i . I1ii11iIi11i
while ( iI1i111I1Ii != len ( lisp . lisp_db_list ) ) :
lisp . lprint ( ( "Waiting {} second(s) for {} database-mapping EID-" + "prefixes, {} processed so far ..." ) . format ( I1IiIiiIiIII , iI1i111I1Ii ,
# I1ii11iIi11i % OoOoOO00 * OoO0O00 % II111iiii
len ( lisp . lisp_db_list ) ) )
time . sleep ( I1IiIiiIiIII )
if 70 - 70: OoO0O00 % oO0o + OOooOOo / Ii1I % O0
if 100 - 100: o0oOOo0O0Ooo + OOooOOo * o0oOOo0O0Ooo
if 80 - 80: o0oOOo0O0Ooo * O0 - Ii1I
if 66 - 66: i11iIiiIii - OOooOOo * Oo0Ooo
if 76 - 76: i11iIiiIii + o0oOOo0O0Ooo / I1ii11iIi11i - OoO0O00 - Ii1I + I1ii11iIi11i
if 51 - 51: iIii1I11I1II1 . ooOoO0o + iIii1I11I1II1
oOoOO = [ ]
Ii1i1 = [ ]
for Oo0ooOo0o in lisp . lisp_db_list :
if ( Oo0ooOo0o . eid . is_ipv4 ( ) or Oo0ooOo0o . eid . is_ipv6 ( ) or Oo0ooOo0o . eid . is_mac ( ) ) :
oOOo0oOo0 = Oo0ooOo0o . eid . print_prefix_no_iid ( )
if ( Oo0ooOo0o . dynamic_eid_configured ( ) ) : Ii1i1 . append ( oOOo0oOo0 )
oOoOO . append ( oOOo0oOo0 )
if 65 - 65: ooOoO0o . OoooooooOO / I1ii11iIi11i . i1IIi * OoO0O00
if 19 - 19: i11iIiiIii + OoooooooOO - Oo0Ooo - I11i
return ( oOoOO , Ii1i1 )
if 21 - 21: O0 % IiII . I1IiiI / II111iiii + IiII
if 53 - 53: oO0o - I1IiiI - oO0o * iII111i
if 71 - 71: O0 - iIii1I11I1II1
if 12 - 12: OOooOOo / o0oOOo0O0Ooo
if 42 - 42: Oo0Ooo
if 19 - 19: oO0o % I1ii11iIi11i * iIii1I11I1II1 + I1IiiI
if 46 - 46: Oo0Ooo
if 1 - 1: iII111i
def OoOOo0OOoO ( ) :
global i1iIIi1
if 97 - 97: OOooOOo + iII111i + O0 + i11iIiiIii
lisp . lisp_set_exception ( )
if 77 - 77: o0oOOo0O0Ooo / OoooooooOO
if 46 - 46: o0oOOo0O0Ooo % iIii1I11I1II1 . iII111i % iII111i + i11iIiiIii
if 72 - 72: iIii1I11I1II1 * Ii1I % ooOoO0o / OoO0O00
if 35 - 35: ooOoO0o + i1IIi % I1ii11iIi11i % I11i + oO0o
if 17 - 17: i1IIi
oOoOO , Ii1i1 = O00oOOooo ( )
if 21 - 21: Oo0Ooo
if 29 - 29: I11i / II111iiii / ooOoO0o * OOooOOo
if 10 - 10: I1Ii111 % IiII * IiII . I11i / Ii1I % OOooOOo
if 49 - 49: OoO0O00 / oO0o + O0 * o0oOOo0O0Ooo
if 28 - 28: ooOoO0o + i11iIiiIii / I11i % OoOoOO00 % Oo0Ooo - O0
if 54 - 54: i1IIi + II111iiii
if 83 - 83: I1ii11iIi11i - I1IiiI + OOooOOo
if 5 - 5: Ii1I
if 46 - 46: IiII
ii1iIi1iIiI1i = None
if ( lisp . lisp_ipc_data_plane ) :
lisp . lprint ( lisp . bold ( "Data-plane packet capture disabled" , False ) )
ii1iIi1iIiI1i = "(udp src port 4342 and ip[28] == 0x28)" + " or (ip[16] >= 224 and ip[16] < 240 and (ip[28] & 0xf0) == 0x30)"
if 40 - 40: i1IIi % OOooOOo
if 71 - 71: OoOoOO00
lisp . lprint ( "Control-plane capture: '{}'" . format ( ii1iIi1iIiI1i ) )
else :
lisp . lprint ( "Capturing packets for source-EIDs {}" . format ( lisp . green ( str ( oOoOO ) , False ) ) )
if 14 - 14: i11iIiiIii % OOooOOo
if 82 - 82: iIii1I11I1II1 + Oo0Ooo . iIii1I11I1II1 % IiII / Ii1I . Ii1I
if ( lisp . lisp_pitr ) : lisp . lprint ( "Configured for PITR functionality" )
if 14 - 14: o0oOOo0O0Ooo . OOooOOo . I11i + OoooooooOO - OOooOOo + IiII
if 9 - 9: Ii1I
if 59 - 59: I1IiiI * II111iiii . O0
if 56 - 56: Ii1I - iII111i % I1IiiI - o0oOOo0O0Ooo
if 51 - 51: O0 / ooOoO0o * iIii1I11I1II1 + I1ii11iIi11i + o0oOOo0O0Ooo
if 98 - 98: iIii1I11I1II1 * I1ii11iIi11i * OOooOOo + ooOoO0o % i11iIiiIii % O0
i1 = lisp . lisp_l2_overlay
if ( i1 == False ) :
if ( lisp . lisp_is_linux ( ) ) : OO0oOOoo ( oOoOO , Ii1i1 )
if 52 - 52: o0oOOo0O0Ooo % Oo0Ooo
if 64 - 64: O0 % I11i % O0 * OoO0O00 . oO0o + I1IiiI
if 75 - 75: I11i . OoooooooOO % o0oOOo0O0Ooo * I11i % OoooooooOO
if 13 - 13: IiII / i11iIiiIii % II111iiii % I11i . I1ii11iIi11i
if 8 - 8: OoOoOO00 + Oo0Ooo - II111iiii
if 11 - 11: i1IIi % i11iIiiIii - i1IIi * OoOoOO00
if ( ii1iIi1iIiI1i == None ) :
if ( lisp . lisp_pitr ) :
i1I11IiI1iiII = o00oOo0oOoo ( oOoOO , [ ] , False , True )
else :
i1I11IiI1iiII = o00oOo0oOoo ( oOoOO , Ii1i1 , i1 ,
False )
if 57 - 57: OoOoOO00 - I1ii11iIi11i
else :
i1I11IiI1iiII = ii1iIi1iIiI1i
if 50 - 50: I1Ii111 / i1IIi % OoO0O00 . I1IiiI / iII111i
if 88 - 88: OOooOOo . I11i * o0oOOo0O0Ooo . OoOoOO00 / ooOoO0o . I11i
if 10 - 10: o0oOOo0O0Ooo * Oo0Ooo % O0 * iIii1I11I1II1 . O0 % I1ii11iIi11i
if 44 - 44: II111iiii / iII111i / I11i % II111iiii / i1IIi . Ii1I
if 59 - 59: OoooooooOO
iIiIIi1 = oOooOo0 ( )
i1iiiii1 = os . getenv ( "LISP_PCAP_LIST" )
if ( i1iiiii1 == None ) :
O0iII1 = ""
IIII1i = [ ]
else :
Ii1IIIIi1ii1I = list ( set ( i1iiiii1 . split ( ) ) & set ( iIiIIi1 ) )
IIII1i = list ( set ( i1iiiii1 . split ( ) ) ^ set ( iIiIIi1 ) )
O0iII1 = "user-selected "
lisp . lprint ( "User pcap-list: {}, active-interfaces: {}" . format ( i1iiiii1 , iIiIIi1 ) )
if 13 - 13: I1IiiI % OoOoOO00 . I1ii11iIi11i / Oo0Ooo % OOooOOo . OoooooooOO
iIiIIi1 = Ii1IIIIi1ii1I
if 22 - 22: IiII / i11iIiiIii
if 62 - 62: OoO0O00 / I1ii11iIi11i
if 7 - 7: OoooooooOO . IiII
if 53 - 53: Ii1I % Ii1I * o0oOOo0O0Ooo + OoOoOO00
if 92 - 92: OoooooooOO + i1IIi / Ii1I * O0
if 100 - 100: ooOoO0o % iIii1I11I1II1 * II111iiii - iII111i
if 92 - 92: ooOoO0o
II11iI111i1 = ( i1I11IiI1iiII . find ( "ether host" ) != - 1 )
for Oo00OoOo in iIiIIi1 :
if ( Oo00OoOo in [ "lo" , "lispers.net" ] and II11iI111i1 ) :
lisp . lprint ( ( "Capturing suppressed on interface {}, " + "MAC filters configured" ) . format ( Oo00OoOo ) )
if 24 - 24: i11iIiiIii - I1Ii111
continue
if 21 - 21: I11i
if 92 - 92: i11iIiiIii / I1Ii111 - iII111i % ooOoO0o * I1Ii111 + Oo0Ooo
ii1 = [ Oo00OoOo , i1I11IiI1iiII , i1iIIi1 ]
lisp . lprint ( "Capturing packets on {}interface {}" . format ( O0iII1 , Oo00OoOo ) )
threading . Thread ( target = Oo0000oOo , args = ii1 ) . start ( )
if 31 - 31: I11i . I1Ii111 * ooOoO0o + i11iIiiIii * oO0o
if ( ii1iIi1iIiI1i ) : return
if 93 - 93: I1ii11iIi11i / iIii1I11I1II1 * i1IIi % OoooooooOO * O0 * I11i
if 64 - 64: II111iiii + O0 / iIii1I11I1II1 / Oo0Ooo . ooOoO0o % IiII
if 50 - 50: iIii1I11I1II1 - IiII + OOooOOo
if 69 - 69: O0
if 85 - 85: ooOoO0o / O0
iI1iIIIi1i = "(udp src port 4342 and ip[28] == 0x28)"
for Oo00OoOo in IIII1i :
ii1 = [ Oo00OoOo , iI1iIIIi1i , i1iIIi1 ]
lisp . lprint ( "Capture RLOC-probe replies on RLOC interface {}" . format ( Oo00OoOo ) )
if 89 - 89: iIii1I11I1II1
threading . Thread ( target = Oo0000oOo , args = ii1 ) . start ( )
if 21 - 21: I11i % I11i
return
if 27 - 27: i11iIiiIii / I1ii11iIi11i
if 84 - 84: Oo0Ooo
if 43 - 43: oO0o - OoooooooOO
if 3 - 3: O0 / iII111i
if 31 - 31: OOooOOo + o0oOOo0O0Ooo . OoooooooOO
if 89 - 89: II111iiii + i1IIi + II111iiii
if 7 - 7: O0 % o0oOOo0O0Ooo + I1ii11iIi11i * iII111i - iII111i
def II1Ii11I111I ( ) :
if 13 - 13: ooOoO0o / iII111i * OoO0O00 . OoO0O00 * ooOoO0o
if 63 - 63: I1Ii111 / O0 * Oo0Ooo + II111iiii / IiII + Ii1I
if 63 - 63: OoO0O00 + I1ii11iIi11i . I1Ii111 % I1Ii111
if 57 - 57: II111iiii
if ( I11 ) : I11 . cancel ( )
if 54 - 54: Oo0Ooo + oO0o + i11iIiiIii
if 28 - 28: oO0o
if 70 - 70: IiII
if 34 - 34: I1Ii111 % IiII
lisp . lisp_close_socket ( II1iII1i [ 0 ] , "" )
lisp . lisp_close_socket ( II1iII1i [ 1 ] , "" )
lisp . lisp_close_socket ( i111I , "" )
lisp . lisp_close_socket ( II1Ii1iI1i , "" )
lisp . lisp_close_socket ( oO0oIIII , "lisp-itr" )
lisp . lisp_close_socket ( Oo0oO0oo0oO00 , "lispers.net-itr" )
return
if 3 - 3: II111iiii / OOooOOo + IiII . ooOoO0o . OoO0O00
if 83 - 83: oO0o + OoooooooOO
if 22 - 22: Ii1I % iII111i * OoooooooOO - o0oOOo0O0Ooo / iIii1I11I1II1
if 86 - 86: OoooooooOO . iII111i % OoOoOO00 / I11i * iII111i / o0oOOo0O0Ooo
if 64 - 64: i11iIiiIii
if 38 - 38: IiII / I1IiiI - IiII . I11i
if 69 - 69: OoooooooOO + I1ii11iIi11i
def O0oOo00o0 ( packet , device , input_interface , macs , my_sa ) :
global II1iII1i
global iiI1iIiI
global Ii1IIii11 , Oooo0000
global oO0oIIII
if 65 - 65: II111iiii . I1IiiI % oO0o * OoO0O00
if 38 - 38: OoOoOO00 / iII111i % Oo0Ooo
if 11 - 11: iII111i - oO0o + II111iiii - iIii1I11I1II1
if 7 - 7: IiII - I11i / II111iiii * Ii1I . iII111i * iII111i
O0O0oOOo0O = packet
packet , II11 , O00oooo00o0O , ii1iii1I1I = lisp . lisp_is_rloc_probe ( packet , 1 )
if ( O0O0oOOo0O != packet ) :
if ( II11 == None ) : return
lisp . lisp_parse_packet ( II1iII1i , packet , II11 , O00oooo00o0O , ii1iii1I1I )
return
if 95 - 95: IiII
if 51 - 51: II111iiii + IiII . i1IIi . I1ii11iIi11i + OoOoOO00 * I1IiiI
packet = lisp . lisp_packet ( packet )
if ( packet . decode ( False , None , None ) == None ) : return
if 72 - 72: oO0o + oO0o / II111iiii . OoooooooOO % Ii1I
if 49 - 49: oO0o . OoO0O00 - Oo0Ooo * OoooooooOO . Oo0Ooo
if 2 - 2: OoooooooOO % OOooOOo
if 63 - 63: I1IiiI % iIii1I11I1II1
if 39 - 39: iII111i / II111iiii / I1ii11iIi11i % I1IiiI
if 89 - 89: I1Ii111 + OoooooooOO + I1Ii111 * i1IIi + iIii1I11I1II1 % I11i
if ( my_sa ) : input_interface = device
if 59 - 59: OOooOOo + i11iIiiIii
if 88 - 88: i11iIiiIii - ooOoO0o
if 67 - 67: OOooOOo . Oo0Ooo + OoOoOO00 - OoooooooOO
if 70 - 70: OOooOOo / II111iiii - iIii1I11I1II1 - iII111i
Iii = packet . inner_source
OO00Oo = lisp . lisp_get_interface_instance_id ( input_interface , Iii )
packet . inner_dest . instance_id = OO00Oo
packet . inner_source . instance_id = OO00Oo
if 20 - 20: o0oOOo0O0Ooo / i1IIi
if 71 - 71: OoOoOO00 . i1IIi
if 94 - 94: OOooOOo . I1Ii111
if 84 - 84: O0 . I11i - II111iiii . ooOoO0o / II111iiii
if ( macs != "" ) : macs = ", MACs: " + macs + ","
packet . print_packet ( "Receive {}{}" . format ( device , macs ) , False )
if 47 - 47: OoooooooOO
if 4 - 4: I1IiiI % I11i
if 10 - 10: IiII . OoooooooOO - OoO0O00 + IiII - O0
if 82 - 82: ooOoO0o + II111iiii
if ( device != input_interface and device != "lispers.net" ) :
lisp . dprint ( "Not our MAC address on interface {}, pcap interface {}" . format ( input_interface , device ) )
if 39 - 39: oO0o % iIii1I11I1II1 % O0 % OoooooooOO * I1ii11iIi11i + iII111i
return
if 68 - 68: Oo0Ooo + i11iIiiIii
if 69 - 69: iIii1I11I1II1 * iIii1I11I1II1 * i11iIiiIii + I1IiiI / OOooOOo % Ii1I
O0OO0oOoO0O0O = lisp . lisp_decent_push_configured
if ( O0OO0oOoO0O0O ) :
oo000oOo0 = packet . inner_dest . is_multicast_address ( )
iIiI1I1Ii = packet . inner_source . is_local ( )
O0OO0oOoO0O0O = ( iIiI1I1Ii and oo000oOo0 )
if 50 - 50: OoO0O00 . i11iIiiIii - oO0o . oO0o
if 31 - 31: OOooOOo / Oo0Ooo * i1IIi . OoOoOO00
if ( O0OO0oOoO0O0O == False ) :
if 57 - 57: OOooOOo + iIii1I11I1II1 % i1IIi % I1IiiI
if 83 - 83: o0oOOo0O0Ooo / i11iIiiIii % iIii1I11I1II1 . I11i % oO0o . OoooooooOO
if 94 - 94: Ii1I + iIii1I11I1II1 % OoO0O00
if 93 - 93: Ii1I - OOooOOo + iIii1I11I1II1 * o0oOOo0O0Ooo + I1Ii111 . iII111i
Oo0ooOo0o = lisp . lisp_db_for_lookups . lookup_cache ( packet . inner_source , False )
if ( Oo0ooOo0o == None ) :
lisp . dprint ( "Packet received from non-EID source" )
return
if 49 - 49: OoooooooOO * I11i - Oo0Ooo . oO0o
if 89 - 89: ooOoO0o + Ii1I * ooOoO0o / ooOoO0o
if 46 - 46: OoO0O00
if 71 - 71: I11i / I11i * oO0o * oO0o / II111iiii
if 35 - 35: OOooOOo * o0oOOo0O0Ooo * I1IiiI % Oo0Ooo . OoOoOO00
if ( Oo0ooOo0o . dynamic_eid_configured ( ) ) :
O00o00O = lisp . lisp_allow_dynamic_eid ( input_interface ,
packet . inner_source )
if ( O00o00O ) :
lisp . lisp_itr_discover_eid ( Oo0ooOo0o , packet . inner_source ,
input_interface , O00o00O , oO0oIIII )
else :
ii1iii11i1 = lisp . green ( packet . inner_source . print_address ( ) , False )
lisp . dprint ( "Disallow dynamic-EID {} on interface {}" . format ( ii1iii11i1 ,
input_interface ) )
return
if 4 - 4: IiII . IiII % I1ii11iIi11i % Ii1I / Ii1I
if 29 - 29: Oo0Ooo * ooOoO0o * I1ii11iIi11i / i11iIiiIii
if 26 - 26: IiII % I1Ii111 % oO0o % Ii1I
if ( packet . inner_source . is_local ( ) and
packet . udp_dport == lisp . LISP_CTRL_PORT ) : return
if 55 - 55: ooOoO0o % OoooooooOO / OoooooooOO % OoooooooOO
if 52 - 52: I1ii11iIi11i + I1ii11iIi11i . II111iiii
if 34 - 34: OoooooooOO . O0 / oO0o * OoOoOO00 - I1ii11iIi11i
if 36 - 36: i1IIi / O0 / OoO0O00 - O0 - i1IIi
if 22 - 22: i1IIi + Ii1I
if ( packet . inner_version == 4 ) :
O0o0O0OO00o , packet . packet = lisp . lisp_ipv4_input ( packet . packet )
if ( packet . packet == None ) : return
packet . inner_ttl -= 1
elif ( packet . inner_version == 6 ) :
packet . packet = lisp . lisp_ipv6_input ( packet )
if ( packet . packet == None ) : return
packet . inner_ttl -= 1
else :
packet . packet = lisp . lisp_mac_input ( packet . packet )
if ( packet . packet == None ) : return
packet . encap_port = lisp . LISP_L2_DATA_PORT
if 92 - 92: o0oOOo0O0Ooo + I1Ii111 / Oo0Ooo % OoO0O00 % IiII . OoooooooOO
if 52 - 52: ooOoO0o / i11iIiiIii - OOooOOo . IiII % iIii1I11I1II1 + o0oOOo0O0Ooo
if 71 - 71: oO0o % I11i * OoOoOO00 . O0 / Ii1I . I1ii11iIi11i
if 58 - 58: Oo0Ooo / oO0o
if 44 - 44: OOooOOo
if 54 - 54: Ii1I - I11i - I1Ii111 . iIii1I11I1II1
if ( oo0O000OoO == False ) :
Oo0ooOo0o = lisp . lisp_db_for_lookups . lookup_cache ( packet . inner_dest , False )
if ( Oo0ooOo0o and Oo0ooOo0o . dynamic_eid_configured == False ) :
lisp . dprint ( ( "Packet destined to local EID-prefix {}, " + "natively forwarding" ) . format ( Oo0ooOo0o . print_eid_tuple ( ) ) )
if 79 - 79: Ii1I . OoO0O00
packet . send_packet ( Ii1IIii11 , packet . inner_dest )
return
if 40 - 40: o0oOOo0O0Ooo + Oo0Ooo . o0oOOo0O0Ooo % ooOoO0o
if 15 - 15: Ii1I * Oo0Ooo % I1ii11iIi11i * iIii1I11I1II1 - i11iIiiIii
if 60 - 60: I1IiiI * I1Ii111 % OoO0O00 + oO0o
if 52 - 52: i1IIi
if 84 - 84: Ii1I / IiII
if 86 - 86: OoOoOO00 * II111iiii - O0 . OoOoOO00 % iIii1I11I1II1 / OOooOOo
IiIIiIIIiIii = lisp . lisp_map_cache_lookup ( packet . inner_source , packet . inner_dest )
if ( IiIIiIIIiIii ) : IiIIiIIIiIii . add_recent_source ( packet . inner_source )
if 23 - 23: iII111i + I11i . OoOoOO00 * I1IiiI + I1ii11iIi11i
if 18 - 18: IiII * o0oOOo0O0Ooo . IiII / O0
if 8 - 8: o0oOOo0O0Ooo
if 4 - 4: I1ii11iIi11i + I1ii11iIi11i * ooOoO0o - OoOoOO00
if 78 - 78: Ii1I / II111iiii % OoOoOO00
if 52 - 52: OOooOOo - iII111i * oO0o
if 17 - 17: OoooooooOO + OOooOOo * I11i * OoOoOO00
iiIii1I = Oo0ooOo0o . secondary_iid if ( Oo0ooOo0o != None ) else None
if ( iiIii1I and IiIIiIIIiIii and IiIIiIIIiIii . action == lisp . LISP_NATIVE_FORWARD_ACTION ) :
i1I11iIiII = packet . inner_dest
i1I11iIiII . instance_id = iiIii1I
IiIIiIIIiIii = lisp . lisp_map_cache_lookup ( packet . inner_source , i1I11iIiII )
if ( IiIIiIIIiIii ) : IiIIiIIIiIii . add_recent_source ( packet . inner_source )
if 66 - 66: Oo0Ooo - o0oOOo0O0Ooo * IiII + OoOoOO00 + o0oOOo0O0Ooo - iIii1I11I1II1
if 17 - 17: oO0o
if 22 - 22: I11i + iIii1I11I1II1
if 24 - 24: OoOoOO00 % i1IIi + iII111i . i11iIiiIii . I1ii11iIi11i
if 17 - 17: I1ii11iIi11i . II111iiii . ooOoO0o / I1ii11iIi11i
if ( IiIIiIIIiIii == None or IiIIiIIIiIii . action == lisp . LISP_SEND_MAP_REQUEST_ACTION ) :
if ( lisp . lisp_rate_limit_map_request ( packet . inner_source ,
packet . inner_dest ) ) : return
lisp . lisp_send_map_request ( II1iII1i , iiI1iIiI ,
packet . inner_source , packet . inner_dest , None )
if 57 - 57: I11i
if ( packet . is_trace ( ) ) :
lisp . lisp_trace_append ( packet , reason = "map-cache miss" )
if 67 - 67: OoO0O00 . ooOoO0o
return
if 87 - 87: oO0o % Ii1I
if 83 - 83: II111iiii - I11i
if 35 - 35: i1IIi - iIii1I11I1II1 + i1IIi
if 86 - 86: iIii1I11I1II1 + OoOoOO00 . i11iIiiIii - Ii1I
if 51 - 51: OoOoOO00
if 14 - 14: IiII % oO0o % Oo0Ooo - i11iIiiIii
if ( IiIIiIIIiIii and IiIIiIIIiIii . is_active ( ) and IiIIiIIIiIii . has_ttl_elapsed ( ) ) :
lisp . lprint ( "Refresh map-cache entry {}" . format ( lisp . green ( IiIIiIIIiIii . print_eid_tuple ( ) , False ) ) )
if 53 - 53: Ii1I % Oo0Ooo
lisp . lisp_send_map_request ( II1iII1i , iiI1iIiI ,
packet . inner_source , packet . inner_dest , None )
if 59 - 59: OOooOOo % iIii1I11I1II1 . i1IIi + II111iiii * IiII
if 41 - 41: Ii1I % I1ii11iIi11i
if 12 - 12: OOooOOo
if 69 - 69: OoooooooOO + OOooOOo
if 26 - 26: Oo0Ooo + OOooOOo / OoO0O00 % OoOoOO00 % I1ii11iIi11i + II111iiii
if 31 - 31: I11i % OOooOOo * I11i
IiIIiIIIiIii . stats . increment ( len ( packet . packet ) )
if 45 - 45: i1IIi . I1IiiI + OOooOOo - OoooooooOO % ooOoO0o
if 1 - 1: iIii1I11I1II1
if 93 - 93: i1IIi . i11iIiiIii . Oo0Ooo
if 99 - 99: I11i - I1Ii111 - oO0o % OoO0O00
IiiIIiiiiii , OOOO0o , i1I1iIi1IiI , i1111 , O0O000OOOo , i11ii1Ii1 = IiIIiIIIiIii . select_rloc ( packet , oO0oIIII )
if 25 - 25: OOooOOo
if 83 - 83: I1Ii111
if ( IiiIIiiiiii == None and O0O000OOOo == None ) :
if ( i1111 == lisp . LISP_NATIVE_FORWARD_ACTION ) :
lisp . dprint ( "Natively forwarding" )
packet . send_packet ( Ii1IIii11 , packet . inner_dest )
if 48 - 48: II111iiii * OOooOOo * I1Ii111
if ( packet . is_trace ( ) ) :
lisp . lisp_trace_append ( packet , reason = "not an EID" )
if 50 - 50: IiII % i1IIi
return
if 21 - 21: OoooooooOO - iIii1I11I1II1
OO0OoOOO0 = "No reachable RLOCs found"
lisp . dprint ( OO0OoOOO0 )
if ( packet . is_trace ( ) ) : lisp . lisp_trace_append ( packet , reason = OO0OoOOO0 )
return
if 90 - 90: ooOoO0o + II111iiii * I1ii11iIi11i / Ii1I . o0oOOo0O0Ooo + o0oOOo0O0Ooo
if ( IiiIIiiiiii and IiiIIiiiiii . is_null ( ) ) :
OO0OoOOO0 = "Drop action RLOC found"
lisp . dprint ( OO0OoOOO0 )
if 40 - 40: ooOoO0o / OoOoOO00 % i11iIiiIii % I1ii11iIi11i / I1IiiI
if ( packet . is_trace ( ) ) : lisp . lisp_trace_append ( packet , reason = OO0OoOOO0 )
return
if 62 - 62: i1IIi - OoOoOO00
if 62 - 62: i1IIi + Oo0Ooo % IiII
if 28 - 28: I1ii11iIi11i . i1IIi
if 10 - 10: OoO0O00 / Oo0Ooo
if 15 - 15: iII111i . OoOoOO00 / iII111i * I11i - I1IiiI % I1ii11iIi11i
packet . outer_tos = packet . inner_tos
packet . outer_ttl = 32 if ( O0o0O0OO00o ) else packet . inner_ttl
if 57 - 57: O0 % OoOoOO00 % oO0o
if 45 - 45: I1ii11iIi11i + II111iiii * i11iIiiIii
if 13 - 13: OoooooooOO * oO0o - Ii1I / OOooOOo + I11i + IiII
if 39 - 39: iIii1I11I1II1 - OoooooooOO
if ( IiiIIiiiiii ) :
packet . outer_dest . copy_address ( IiiIIiiiiii )
oO0oooooo = packet . outer_dest . afi_to_version ( )
packet . outer_version = oO0oooooo
o0OO0Oo = lisp . lisp_myrlocs [ 0 ] if ( oO0oooooo == 4 ) else lisp . lisp_myrlocs [ 1 ]
if 3 - 3: I1Ii111 - O0 % iIii1I11I1II1 / IiII . o0oOOo0O0Ooo
packet . outer_source . copy_address ( o0OO0Oo )
if 3 - 3: O0 % OoooooooOO / OOooOOo
if ( packet . is_trace ( ) ) :
if ( lisp . lisp_trace_append ( packet , rloc_entry = i11ii1Ii1 ) == False ) : return
if 89 - 89: II111iiii / oO0o
if 14 - 14: OOooOOo . I1IiiI * ooOoO0o + II111iiii - ooOoO0o + OOooOOo
if 18 - 18: oO0o - o0oOOo0O0Ooo - I1IiiI - I1IiiI
if 54 - 54: Oo0Ooo + I1IiiI / iII111i . I1IiiI * OoOoOO00
if 1 - 1: OoOoOO00 * OoO0O00 . i1IIi / Oo0Ooo . I1ii11iIi11i + Oo0Ooo
if 17 - 17: Oo0Ooo + OoO0O00 / Ii1I / iII111i * OOooOOo
if ( packet . encode ( i1I1iIi1IiI ) == None ) : return
if ( len ( packet . packet ) <= 1500 ) : packet . print_packet ( "Send" , True )
if 29 - 29: OoO0O00 % OoooooooOO * oO0o / II111iiii - oO0o
if 19 - 19: i11iIiiIii
if 54 - 54: II111iiii . I11i
if 73 - 73: OoOoOO00 . I1IiiI
II1i11i1iIi11 = Oooo0000 if oO0oooooo == 6 else Ii1IIii11
packet . send_packet ( II1i11i1iIi11 , packet . outer_dest )
if 83 - 83: Ii1I
elif ( O0O000OOOo ) :
if 25 - 25: I11i + OoOoOO00 . o0oOOo0O0Ooo % OoOoOO00 * OOooOOo
if 32 - 32: i11iIiiIii - I1Ii111
if 53 - 53: OoooooooOO - IiII
if 87 - 87: oO0o . I1IiiI
if 17 - 17: Ii1I . i11iIiiIii
IIIiiiI = O0O000OOOo . rle_nodes [ 0 ] . level
OoO00oo00 = len ( packet . packet )
for Oo0Oo0O in O0O000OOOo . rle_forwarding_list :
if ( Oo0Oo0O . level != IIIiiiI ) : return
if 44 - 44: OoooooooOO % OoooooooOO
packet . outer_dest . copy_address ( Oo0Oo0O . address )
if ( O0OO0oOoO0O0O ) : packet . inner_dest . instance_id = 0xffffff
oO0oooooo = packet . outer_dest . afi_to_version ( )
packet . outer_version = oO0oooooo
o0OO0Oo = lisp . lisp_myrlocs [ 0 ] if ( oO0oooooo == 4 ) else lisp . lisp_myrlocs [ 1 ]
if 35 - 35: iII111i / I1ii11iIi11i * OoooooooOO . II111iiii / Oo0Ooo
packet . outer_source . copy_address ( o0OO0Oo )
if 1 - 1: OoooooooOO + IiII . i1IIi % I11i
if ( packet . is_trace ( ) ) :
if ( lisp . lisp_trace_append ( packet ) == False ) : return
if 66 - 66: o0oOOo0O0Ooo + I1ii11iIi11i + I1IiiI - oO0o
if 12 - 12: iII111i . IiII . OoOoOO00 / O0
if ( packet . encode ( None ) == None ) : return
if 58 - 58: o0oOOo0O0Ooo - II111iiii % oO0o + I1Ii111 . OoOoOO00 / IiII
if 8 - 8: I1ii11iIi11i . OoO0O00 * I11i + II111iiii % i11iIiiIii
if 8 - 8: ooOoO0o * O0
if 73 - 73: o0oOOo0O0Ooo / oO0o / I11i / OoO0O00
packet . print_packet ( "Replicate-to-L{}" . format ( Oo0Oo0O . level ) , True )
packet . send_packet ( Ii1IIii11 , packet . outer_dest )
if 11 - 11: OoOoOO00 + IiII - OoooooooOO / OoO0O00
if 34 - 34: ooOoO0o
if 45 - 45: ooOoO0o / Oo0Ooo / Ii1I
if 44 - 44: I1ii11iIi11i - Ii1I / II111iiii * OoO0O00 * Oo0Ooo
if 73 - 73: o0oOOo0O0Ooo - I1IiiI * i1IIi / i11iIiiIii * OOooOOo % II111iiii
OooOoOOo0oO00 = len ( packet . packet ) - OoO00oo00
packet . packet = packet . packet [ OooOoOOo0oO00 : : ]
if 73 - 73: iII111i / I1ii11iIi11i % I1ii11iIi11i * I11i / I1ii11iIi11i
if 8 - 8: Ii1I
if 35 - 35: IiII + i1IIi * oO0o - Ii1I . Oo0Ooo
if 31 - 31: o0oOOo0O0Ooo
if 15 - 15: O0 / Oo0Ooo % I1ii11iIi11i + o0oOOo0O0Ooo
if 23 - 23: iIii1I11I1II1 + O0
del ( packet )
return
if 58 - 58: Oo0Ooo
if 9 - 9: iIii1I11I1II1 % I1ii11iIi11i . OOooOOo + OoooooooOO
if 62 - 62: O0 / I1IiiI % O0 * OoO0O00 % I1IiiI
if 33 - 33: I1IiiI . oO0o * OoO0O00 * iIii1I11I1II1
if 5 - 5: Oo0Ooo / IiII % O0 . I1Ii111 * IiII
if 83 - 83: OOooOOo
if 12 - 12: i1IIi . i1IIi - o0oOOo0O0Ooo
def ii1iIIiii1 ( device , not_used , packet ) :
ooOo0O0o0 = 4 if device == "lo0" else 0 if device == "lispers.net" else 14
if 65 - 65: ooOoO0o + O0
if ( lisp . lisp_frame_logging ) :
IiII1iiI = lisp . bold ( "Received frame on interface '{}'" . format ( device ) ,
False )
iII = lisp . lisp_format_packet ( packet [ 0 : 64 ] )
lisp . lprint ( "{}: {}" . format ( IiII1iiI , iII ) )
if 63 - 63: o0oOOo0O0Ooo * iII111i
if 63 - 63: iII111i * I1ii11iIi11i . OoooooooOO / OOooOOo * Oo0Ooo . ooOoO0o
if 62 - 62: i1IIi / ooOoO0o . I1IiiI * o0oOOo0O0Ooo
if 21 - 21: o0oOOo0O0Ooo
if 81 - 81: I11i / iIii1I11I1II1 - ooOoO0o * I1Ii111 . I1IiiI * I1ii11iIi11i
o0000 = ""
i111i1i = False
OOo0oO00ooO00 = device
if ( ooOo0O0o0 == 14 ) :
iIiIIi1 , IiIii1I1I , OO0Oooo0oo , i111i1i = lisp . lisp_get_input_interface ( packet )
OOo0oO00ooO00 = device if ( device in iIiIIi1 ) else iIiIIi1 [ 0 ]
o0000 = lisp . lisp_format_macs ( IiIii1I1I , OO0Oooo0oo )
if ( OOo0oO00ooO00 . find ( "vlan" ) != - 1 ) : ooOo0O0o0 += 4
if 42 - 42: Ii1I * I1Ii111 . IiII * I1IiiI + OoOoOO00
if 25 - 25: I11i . I1IiiI + oO0o
if 75 - 75: IiII - o0oOOo0O0Ooo % iII111i + i11iIiiIii
if 100 - 100: I11i + o0oOOo0O0Ooo - i11iIiiIii - II111iiii
if 40 - 40: OoOoOO00 % OoO0O00
if 62 - 62: o0oOOo0O0Ooo
if ( int ( OO0Oooo0oo [ 1 ] , 16 ) & 1 ) : i111i1i = True
if 15 - 15: I11i + Ii1I . OOooOOo * OoO0O00 . OoOoOO00
if 18 - 18: i1IIi % II111iiii + I1Ii111 % Ii1I
if 72 - 72: iIii1I11I1II1
if 45 - 45: Oo0Ooo - o0oOOo0O0Ooo % I1Ii111
if 38 - 38: I1Ii111 % OOooOOo - OoooooooOO
if ( ooOo0O0o0 != 0 ) :
oOo0OOoooO = struct . unpack ( "H" , packet [ ooOo0O0o0 - 2 : ooOo0O0o0 ] ) [ 0 ]
oOo0OOoooO = socket . ntohs ( oOo0OOoooO )
if ( oOo0OOoooO == 0x8100 ) :
iIi1iIIIiIiI = struct . unpack ( "I" , packet [ ooOo0O0o0 : ooOo0O0o0 + 4 ] ) [ 0 ]
iIi1iIIIiIiI = socket . ntohl ( iIi1iIIIiIiI )
OOo0oO00ooO00 = "vlan" + str ( iIi1iIIIiIiI >> 16 )
ooOo0O0o0 += 4
elif ( oOo0OOoooO == 0x806 ) :
lisp . dprint ( "Dropping ARP packets, host should have default route" )
return
if 62 - 62: i11iIiiIii % OOooOOo . IiII . OOooOOo
if 84 - 84: i11iIiiIii * OoO0O00
if 18 - 18: OOooOOo - Ii1I - OoOoOO00 / I1Ii111 - O0
if ( lisp . lisp_l2_overlay ) : ooOo0O0o0 = 0
if 30 - 30: O0 + I1ii11iIi11i + II111iiii
O0oOo00o0 ( packet [ ooOo0O0o0 : : ] , device , OOo0oO00ooO00 , o0000 , i111i1i )
return
if 14 - 14: o0oOOo0O0Ooo / OOooOOo - iIii1I11I1II1 - oO0o % ooOoO0o
if 49 - 49: ooOoO0o * oO0o / o0oOOo0O0Ooo / Oo0Ooo * iIii1I11I1II1
if 57 - 57: OoOoOO00 - oO0o / ooOoO0o % i11iIiiIii
if 3 - 3: iII111i . ooOoO0o % I1IiiI + I1ii11iIi11i
if 64 - 64: i1IIi
if 29 - 29: o0oOOo0O0Ooo / i11iIiiIii / I1IiiI % oO0o % i11iIiiIii
if 18 - 18: OOooOOo + I1Ii111
if 80 - 80: oO0o + o0oOOo0O0Ooo * Ii1I + OoO0O00
if 75 - 75: I11i / o0oOOo0O0Ooo / OOooOOo / IiII % ooOoO0o + II111iiii
if 4 - 4: iII111i - Oo0Ooo - IiII - I11i % i11iIiiIii / OoO0O00
if 50 - 50: ooOoO0o + i1IIi
if 31 - 31: Ii1I
if 78 - 78: i11iIiiIii + o0oOOo0O0Ooo + I1Ii111 / o0oOOo0O0Ooo % iIii1I11I1II1 % IiII
if 83 - 83: iIii1I11I1II1 % OoOoOO00 % o0oOOo0O0Ooo % I1Ii111 . I1ii11iIi11i % O0
if 47 - 47: o0oOOo0O0Ooo
if 66 - 66: I1IiiI - IiII
if 33 - 33: I1IiiI / OoO0O00
if 12 - 12: II111iiii
if 2 - 2: i1IIi - I1IiiI + I11i . II111iiii
if 25 - 25: oO0o
if 34 - 34: OoOoOO00 . iIii1I11I1II1 % O0
if 43 - 43: I1ii11iIi11i - iII111i
if 70 - 70: iII111i / OOooOOo % ooOoO0o - Ii1I
if 47 - 47: iII111i
if 92 - 92: OOooOOo + OoOoOO00 % i1IIi
if 23 - 23: I1Ii111 - OOooOOo + Ii1I - OoOoOO00 * OoOoOO00 . Oo0Ooo
if 47 - 47: oO0o % iIii1I11I1II1
if 11 - 11: I1IiiI % Ii1I - OoO0O00 - oO0o + o0oOOo0O0Ooo
if 98 - 98: iII111i + Ii1I - OoO0O00
if 79 - 79: OOooOOo / I1Ii111 . OoOoOO00 - I1ii11iIi11i
if 47 - 47: OoooooooOO % O0 * iII111i . Ii1I
if 38 - 38: O0 - IiII % I1Ii111
if 64 - 64: iIii1I11I1II1
if 15 - 15: I1ii11iIi11i + OOooOOo / I1ii11iIi11i / I1Ii111
if 31 - 31: ooOoO0o + O0 + ooOoO0o . iIii1I11I1II1 + Oo0Ooo / o0oOOo0O0Ooo
if 6 - 6: Oo0Ooo % IiII * I11i / I1IiiI + Oo0Ooo
if 39 - 39: OoOoOO00 - Oo0Ooo / iII111i * OoooooooOO
def OO0oOOoo ( sources , dyn_eids ) :
if ( os . getenv ( "LISP_NO_IPTABLES" ) != None ) :
lisp . lprint ( "User selected to suppress installing iptables rules" )
return
if 100 - 100: O0 . I11i . OoO0O00 + O0 * oO0o
if 42 - 42: oO0o % OoooooooOO + o0oOOo0O0Ooo
os . system ( "sudo iptables -t raw -N lisp" )
os . system ( "sudo iptables -t raw -A PREROUTING -j lisp" )
os . system ( "sudo ip6tables -t raw -N lisp" )
os . system ( "sudo ip6tables -t raw -A PREROUTING -j lisp" )
if 56 - 56: OoooooooOO + I1ii11iIi11i - iII111i
if 24 - 24: o0oOOo0O0Ooo + ooOoO0o + I11i - iIii1I11I1II1
if 49 - 49: I11i . ooOoO0o * OoOoOO00 % IiII . O0
if 48 - 48: O0 * Ii1I - O0 / Ii1I + OoOoOO00
if 52 - 52: OoO0O00 % Ii1I * II111iiii
if 4 - 4: I11i % O0 - OoooooooOO + ooOoO0o . oO0o % II111iiii
if 9 - 9: II111iiii * II111iiii . i11iIiiIii * iIii1I11I1II1
if 18 - 18: OoO0O00 . II111iiii % OoOoOO00 % Ii1I
oo0i1iIIi1II1iiI = "sudo ip{}tables -t raw -A lisp -j ACCEPT -d {}"
III1Ii1i1I1 = [ "127.0.0.1" , "::1" , "224.0.0.0/4 -p igmp" , "ff00::/8" ,
"fe80::/16" ]
III1Ii1i1I1 += sources + lisp . lisp_get_all_addresses ( )
for O0O00OooO in III1Ii1i1I1 :
if ( lisp . lisp_is_mac_string ( O0O00OooO ) ) : continue
I1IiI1iI11 = "" if O0O00OooO . find ( ":" ) == - 1 else "6"
os . system ( oo0i1iIIi1II1iiI . format ( I1IiI1iI11 , O0O00OooO ) )
if 2 - 2: iIii1I11I1II1
if 45 - 45: OoooooooOO / i11iIiiIii
if 10 - 10: iII111i - oO0o * iIii1I11I1II1 % iIii1I11I1II1 * IiII - I1ii11iIi11i
if 97 - 97: II111iiii % I1Ii111 + I1Ii111 - OoO0O00 / Ii1I * I1IiiI
if 17 - 17: Ii1I
if 39 - 39: ooOoO0o . II111iiii
if 45 - 45: oO0o * OoOoOO00 / iIii1I11I1II1
if 77 - 77: I1Ii111 - I11i
if ( lisp . lisp_pitr == False ) :
oo0i1iIIi1II1iiI = "sudo ip{}tables -t raw -A lisp -j ACCEPT -s {} -d {}"
iiI1iI1I = "sudo ip{}tables -t raw -C lisp -j ACCEPT -s {} -d {}"
for II11 in sources :
if ( lisp . lisp_is_mac_string ( II11 ) ) : continue
if ( II11 in dyn_eids ) : continue
I1IiI1iI11 = "" if II11 . find ( ":" ) == - 1 else "6"
for O0OOO0OOoO0O in sources :
if ( lisp . lisp_is_mac_string ( O0OOO0OOoO0O ) ) : continue
if ( O0OOO0OOoO0O in dyn_eids ) : continue
if ( O0OOO0OOoO0O . find ( "." ) != - 1 and II11 . find ( "." ) == - 1 ) : continue
if ( O0OOO0OOoO0O . find ( ":" ) != - 1 and II11 . find ( ":" ) == - 1 ) : continue
if ( commands . getoutput ( iiI1iI1I . format ( I1IiI1iI11 , II11 , O0OOO0OOoO0O ) ) == "" ) :
continue
if 27 - 27: I1ii11iIi11i * I1Ii111 - OoO0O00 + Ii1I * Ii1I
os . system ( oo0i1iIIi1II1iiI . format ( I1IiI1iI11 , II11 , O0OOO0OOoO0O ) )
if 55 - 55: ooOoO0o
if 82 - 82: I1Ii111 - OOooOOo + OoO0O00
if 64 - 64: o0oOOo0O0Ooo . O0 * Ii1I + OoooooooOO - Oo0Ooo . OoooooooOO
if 70 - 70: Oo0Ooo - oO0o . iIii1I11I1II1 % I11i / OoOoOO00 - O0
if 55 - 55: iII111i - OoO0O00
if 100 - 100: O0
if 79 - 79: iIii1I11I1II1
O00oO0o = "sudo ip{}tables -t raw -A lisp -j DROP -s {}"
for II11 in sources :
if ( lisp . lisp_is_mac_string ( II11 ) ) : continue
I1IiI1iI11 = "" if II11 . find ( ":" ) == - 1 else "6"
os . system ( O00oO0o . format ( I1IiI1iI11 , II11 ) )
if 15 - 15: I1Ii111 + I11i . OoooooooOO . i11iIiiIii
if 31 - 31: OoooooooOO + iII111i - OoOoOO00 . i1IIi % iII111i
if 43 - 43: OOooOOo * ooOoO0o / iIii1I11I1II1 - Ii1I * Ii1I
if 60 - 60: iIii1I11I1II1 . OOooOOo + I1ii11iIi11i
if 44 - 44: O0 . oO0o * i11iIiiIii % i11iIiiIii + O0 / OOooOOo
o00oOOO0Ooo = commands . getoutput ( "sudo iptables -t raw -S lisp" ) . split ( "\n" )
o00oOOO0Ooo += commands . getoutput ( "sudo ip6tables -t raw -S lisp" ) . split ( "\n" )
lisp . lprint ( "Using kernel filters: {}" . format ( o00oOOO0Ooo ) )
if 50 - 50: Ii1I - i11iIiiIii + iIii1I11I1II1 / O0 - Ii1I + o0oOOo0O0Ooo
if 22 - 22: II111iiii - Ii1I / ooOoO0o % OoooooooOO + OOooOOo
if 5 - 5: OoO0O00 / iII111i + i11iIiiIii % I11i
if 93 - 93: OoOoOO00 % iIii1I11I1II1
if 90 - 90: I1IiiI - OOooOOo / Ii1I / O0 / I11i
if 87 - 87: OoOoOO00 / IiII + iIii1I11I1II1
if 93 - 93: iIii1I11I1II1 + oO0o % ooOoO0o
if 21 - 21: OOooOOo
if 6 - 6: IiII
if 46 - 46: IiII + oO0o
if 79 - 79: OoooooooOO - IiII * IiII . OoOoOO00
if ( os . getenv ( "LISP_VIRTIO_BUG" ) != None ) :
Oo00ooO0OoOo = ( "sudo iptables -A POSTROUTING -t mangle -p tcp -j " + "CHECKSUM --checksum-fill; " )
if 99 - 99: OoOoOO00
Oo00ooO0OoOo += ( "sudo iptables -A POSTROUTING -t mangle -p udp -j " + "CHECKSUM --checksum-fill; " )
if 77 - 77: o0oOOo0O0Ooo
Oo00ooO0OoOo += ( "sudo ip6tables -A POSTROUTING -t mangle -p tcp -j " + "CHECKSUM --checksum-fill; " )
if 48 - 48: OoOoOO00 % I1ii11iIi11i / I11i . iIii1I11I1II1 * II111iiii
Oo00ooO0OoOo += ( "sudo ip6tables -A POSTROUTING -t mangle -p udp -j " + "CHECKSUM --checksum-fill" )
if 65 - 65: OoOoOO00
os . system ( Oo00ooO0OoOo )
I1iI11I1III1 = lisp . bold ( "virtio" , False )
lisp . lprint ( "{} bug workaround, configure '{}'" . format ( I1iI11I1III1 , Oo00ooO0OoOo ) )
if 8 - 8: i11iIiiIii / II111iiii + o0oOOo0O0Ooo * Ii1I % IiII . I11i
return
if 6 - 6: IiII % Oo0Ooo . Oo0Ooo - I1ii11iIi11i / I11i . i1IIi
if 99 - 99: OoOoOO00 . I1Ii111
if 59 - 59: I11i / Oo0Ooo / OOooOOo / O0 / OoOoOO00 + o0oOOo0O0Ooo
if 13 - 13: o0oOOo0O0Ooo % oO0o / I1Ii111 % I1Ii111 % O0
if 90 - 90: IiII . ooOoO0o / iIii1I11I1II1
if 28 - 28: IiII + oO0o - ooOoO0o / iIii1I11I1II1 - I1IiiI
if 45 - 45: O0 / i1IIi * oO0o * OoO0O00
def o00oOo0oOoo ( sources , dyn_eids , l2_overlay , pitr ) :
if ( l2_overlay ) :
i1I11IiI1iiII = "ether[6:4] >= 0 and ether[10:2] >= 0"
lisp . lprint ( "Using pcap filter: '{}'" . format ( i1I11IiI1iiII ) )
return ( i1I11IiI1iiII )
if 35 - 35: I1ii11iIi11i / iII111i % I1IiiI + iIii1I11I1II1
if 79 - 79: OoOoOO00 / ooOoO0o
oOo00o = "(not ether proto 0x806)"
iI1iIIIi1i = " or (udp src port 4342 and ip[28] == 0x28)"
OOoooooooO = " or (ip[16] >= 224 and ip[16] < 240 and (ip[28] & 0xf0) == 0x30)"
if 4 - 4: Oo0Ooo + o0oOOo0O0Ooo
if 17 - 17: OoO0O00 * OoOoOO00
ii11i = ""
o00Oo = ""
for II11 in sources :
O000oOo = II11
if ( lisp . lisp_is_mac_string ( II11 ) ) :
O000oOo = II11 . split ( "/" ) [ 0 ]
O000oOo = O000oOo . replace ( "-" , "" )
IiiIIi1 = [ ]
for O00o00O in range ( 0 , 12 , 2 ) : IiiIIi1 . append ( O000oOo [ O00o00O : O00o00O + 2 ] )
O000oOo = "ether host " + ":" . join ( IiiIIi1 )
if 28 - 28: o0oOOo0O0Ooo
if 45 - 45: o0oOOo0O0Ooo . I1IiiI / I1Ii111 - Oo0Ooo * iIii1I11I1II1
ii11i += "{}" . format ( O000oOo )
if ( II11 not in dyn_eids ) : o00Oo += "{}" . format ( O000oOo )
if ( sources [ - 1 ] == II11 ) : break
ii11i += " or "
if ( II11 not in dyn_eids ) : o00Oo += " or "
if 86 - 86: II111iiii + ooOoO0o + IiII
if ( o00Oo [ - 4 : : ] == " or " ) : o00Oo = o00Oo [ 0 : - 4 ]
if 9 - 9: ooOoO0o + II111iiii % ooOoO0o % IiII + iIii1I11I1II1
if 59 - 59: i1IIi
if 48 - 48: O0 * Ii1I * OoO0O00 . OoO0O00 * I11i - Ii1I
if 14 - 14: I1ii11iIi11i + i11iIiiIii
if 83 - 83: I1ii11iIi11i / i11iIiiIii + II111iiii . iII111i * OOooOOo + IiII
if 42 - 42: i1IIi % II111iiii . ooOoO0o
II1II1iI = commands . getoutput ( "egrep 'lisp-nat = yes' ./lisp.config" )
II1II1iI = ( II1II1iI != "" and II1II1iI [ 0 ] == " " )
Ooo = lisp . lisp_get_loopback_address ( ) if ( II1II1iI ) else None
if 88 - 88: OoooooooOO
iIiI1I1ii1I1 = ""
O00oO = lisp . lisp_get_all_addresses ( )
for O0O00OooO in O00oO :
if ( O0O00OooO == Ooo ) : continue
iIiI1I1ii1I1 += "{}" . format ( O0O00OooO )
if ( O00oO [ - 1 ] == O0O00OooO ) : break
iIiI1I1ii1I1 += " or "
if 83 - 83: o0oOOo0O0Ooo
if 38 - 38: I1Ii111 + OoooooooOO . i1IIi
if ( ii11i != "" ) :
ii11i = " and (src net {})" . format ( ii11i )
if 19 - 19: iII111i - o0oOOo0O0Ooo - Ii1I - OoOoOO00 . iII111i . I1Ii111
if ( o00Oo != "" ) :
o00Oo = " and not (dst net {})" . format ( o00Oo )
if 48 - 48: iII111i + IiII
if ( iIiI1I1ii1I1 != "" ) :
iIiI1I1ii1I1 = " and not (dst host {})" . format ( iIiI1I1ii1I1 )
if 60 - 60: I11i + iII111i . IiII / i1IIi . iIii1I11I1II1
if 14 - 14: OOooOOo
if 79 - 79: Ii1I
if 76 - 76: iIii1I11I1II1
if 80 - 80: iIii1I11I1II1 . O0 / Ii1I % Ii1I
if 93 - 93: OoooooooOO * Oo0Ooo
if 10 - 10: I1Ii111 * OoooooooOO + I11i - I1ii11iIi11i / I1ii11iIi11i . i11iIiiIii
if ( pitr ) :
o00Oo = ""
iIiI1I1ii1I1 = iIiI1I1ii1I1 . replace ( "dst " , "" )
if 22 - 22: I1Ii111 / o0oOOo0O0Ooo
if 98 - 98: i1IIi
if 51 - 51: I1ii11iIi11i + ooOoO0o + Oo0Ooo / i1IIi + i1IIi
if 12 - 12: iIii1I11I1II1 . Ii1I . I1ii11iIi11i % I1IiiI . II111iiii . oO0o
if 32 - 32: I1ii11iIi11i + IiII / O0 / OoOoOO00 * OoooooooOO % ooOoO0o
i1I11IiI1iiII = oOo00o + ii11i + o00Oo + iIiI1I1ii1I1
i1I11IiI1iiII += iI1iIIIi1i
i1I11IiI1iiII += OOoooooooO
if 50 - 50: OoO0O00
lisp . lprint ( "Using pcap filter: '{}'" . format ( i1I11IiI1iiII ) )
return ( i1I11IiI1iiII )
if 66 - 66: iIii1I11I1II1
if 41 - 41: I1Ii111 . O0 * I1IiiI * I1ii11iIi11i
if 100 - 100: iII111i
if 73 - 73: I1ii11iIi11i % II111iiii
if 79 - 79: OoOoOO00 + OoO0O00 - II111iiii + Ii1I
if 11 - 11: oO0o + iIii1I11I1II1
if 10 - 10: O0
def Oo0000oOo ( device , pfilter , pcap_lock ) :
lisp . lisp_set_exception ( )
if 68 - 68: OOooOOo + oO0o . O0 . Ii1I % i1IIi % OOooOOo
pcap_lock . acquire ( )
i1I1iI = pcappy . open_live ( device , 9000 , 0 , 100 )
pcap_lock . release ( )
if 92 - 92: Oo0Ooo / i11iIiiIii + I1ii11iIi11i
i1I1iI . filter = pfilter
i1I1iI . loop ( - 1 , ii1iIIiii1 , device )
return
if 87 - 87: OoOoOO00 % iIii1I11I1II1
if 72 - 72: OOooOOo . OOooOOo - I1ii11iIi11i
if 48 - 48: Oo0Ooo - ooOoO0o + Oo0Ooo - I1IiiI * i11iIiiIii . iII111i
if 35 - 35: IiII . O0 + Oo0Ooo + OOooOOo + i1IIi
if 65 - 65: O0 * I1IiiI / I1IiiI . OoOoOO00
if 87 - 87: II111iiii * I1ii11iIi11i % Oo0Ooo * Oo0Ooo
if 58 - 58: OOooOOo . o0oOOo0O0Ooo + I1IiiI % Oo0Ooo - OoO0O00
if 50 - 50: iII111i % II111iiii - ooOoO0o . i1IIi + O0 % iII111i
if 10 - 10: iII111i . i1IIi + Ii1I
def oOOoOOO0oo0 ( ) :
global I11
global II1Ii1iI1i
global II1iII1i
if 87 - 87: ooOoO0o / OoOoOO00 % o0oOOo0O0Ooo * oO0o
lisp . lisp_set_exception ( )
if 77 - 77: oO0o - Oo0Ooo - iIii1I11I1II1
if 16 - 16: OoO0O00 / iII111i / i1IIi . iII111i + oO0o
if 26 - 26: iIii1I11I1II1 + i1IIi / OoOoOO00 % I1ii11iIi11i
if 44 - 44: OoooooooOO . II111iiii . OOooOOo % OoooooooOO
if 86 - 86: i11iIiiIii + O0 * IiII - OoO0O00 * OOooOOo + O0
Oo0 = [ II1Ii1iI1i , II1Ii1iI1i ,
oO0oIIII ]
lisp . lisp_build_info_requests ( Oo0 , None , lisp . LISP_CTRL_PORT )
if 94 - 94: I1Ii111 % II111iiii * i1IIi * iIii1I11I1II1
if 81 - 81: Oo0Ooo - I11i
if 24 - 24: OoooooooOO . OoO0O00 * II111iiii
if 59 - 59: I1Ii111 + OoO0O00 / OOooOOo
I11 . cancel ( )
I11 = threading . Timer ( lisp . LISP_INFO_INTERVAL ,
oOOoOOO0oo0 , [ ] )
I11 . start ( )
return
if 97 - 97: Oo0Ooo * iII111i % ooOoO0o . iII111i - I1Ii111 - OOooOOo
if 79 - 79: I1IiiI - ooOoO0o
if 37 - 37: IiII . Oo0Ooo * Oo0Ooo * II111iiii * O0
if 83 - 83: IiII / I1Ii111
if 64 - 64: OoO0O00 % IiII . I1Ii111 % OoO0O00 + I11i * IiII
if 83 - 83: o0oOOo0O0Ooo % oO0o + I11i % i11iIiiIii + O0
if 65 - 65: iIii1I11I1II1 % oO0o + O0 / OoooooooOO
def O0000oO0o00 ( kv_pair ) :
global II1iII1i
global iiI1iIiI
global I11
if 80 - 80: OoooooooOO + IiII
lispconfig . lisp_map_resolver_command ( kv_pair )
if 95 - 95: I1Ii111 / oO0o * I1Ii111 - OoooooooOO * OoooooooOO % OoO0O00
if ( lisp . lisp_test_mr_timer == None or
lisp . lisp_test_mr_timer . is_alive ( ) == False ) :
lisp . lisp_test_mr_timer = threading . Timer ( 2 , lisp . lisp_test_mr ,
[ II1iII1i , iiI1iIiI ] )
lisp . lisp_test_mr_timer . start ( )
if 43 - 43: Oo0Ooo . I1Ii111
if 12 - 12: I1Ii111 + OOooOOo + I11i . IiII / Ii1I
if 29 - 29: IiII . ooOoO0o - II111iiii
if 68 - 68: iIii1I11I1II1 + II111iiii / oO0o
if 91 - 91: OoOoOO00 % iIii1I11I1II1 . I1IiiI
I11 = threading . Timer ( 0 , oOOoOOO0oo0 , [ ] )
I11 . start ( )
return
if 70 - 70: I11i % II111iiii % O0 . i1IIi / I1Ii111
if 100 - 100: I1ii11iIi11i * i11iIiiIii % oO0o / Oo0Ooo / ooOoO0o + I1ii11iIi11i
if 59 - 59: I1Ii111 - IiII
if 14 - 14: iIii1I11I1II1 - iIii1I11I1II1
if 5 - 5: IiII
if 84 - 84: II111iiii * oO0o * II111iiii % IiII / I1IiiI
if 100 - 100: IiII . Ii1I - iIii1I11I1II1 . i11iIiiIii / II111iiii
if 71 - 71: I1Ii111 * Oo0Ooo . I11i
def i1ii1iiIi1II ( kv_pair ) :
lispconfig . lisp_database_mapping_command ( kv_pair )
return
if 98 - 98: OoO0O00 - Ii1I . IiII % i11iIiiIii
if 69 - 69: I1ii11iIi11i + iII111i * O0 . OOooOOo % OoOoOO00
if 96 - 96: ooOoO0o . ooOoO0o - I11i / I11i
if 96 - 96: i11iIiiIii / I1IiiI - O0 . ooOoO0o
if 39 - 39: ooOoO0o / O0 * IiII
if 17 - 17: Ii1I / iIii1I11I1II1 - OoO0O00 + I1IiiI % OOooOOo
if 14 - 14: o0oOOo0O0Ooo % IiII + I1ii11iIi11i + OoO0O00
if 76 - 76: OoO0O00 - i11iIiiIii + OoOoOO00 + OOooOOo / OoooooooOO
def IiI1Iii1 ( kv_pair ) :
global i111I
if 85 - 85: i11iIiiIii / i11iIiiIii . OoO0O00 . O0
if 67 - 67: II111iiii / o0oOOo0O0Ooo . OOooOOo . OoooooooOO
if 19 - 19: IiII . I1ii11iIi11i / OoOoOO00
if 68 - 68: ooOoO0o / OoooooooOO * I11i / oO0o
if 88 - 88: o0oOOo0O0Ooo
iI11 = lisp . lisp_nat_traversal
OO0O00O = lisp . lisp_rloc_probing
if 31 - 31: i11iIiiIii
if 12 - 12: ooOoO0o
if 86 - 86: oO0o - OoO0O00
if 63 - 63: I1IiiI / OoOoOO00 + OoooooooOO . I11i . ooOoO0o
lispconfig . lisp_xtr_command ( kv_pair )
if 48 - 48: i1IIi - iII111i - i11iIiiIii . I11i - iII111i * I11i
if 60 - 60: OoOoOO00 / I1ii11iIi11i + OOooOOo - iII111i
if 49 - 49: OoO0O00 - O0 / OoO0O00 * OoOoOO00 + I1Ii111
if 35 - 35: II111iiii . I1IiiI / i1IIi / I1IiiI * oO0o
Oo0O0000Oo00o = ( iI11 == False and lisp . lisp_nat_traversal and lisp . lisp_rloc_probing )
if 20 - 20: OoO0O00 . I1IiiI * i11iIiiIii / i11iIiiIii
o00 = ( OO0O00O == False and lisp . lisp_rloc_probing )
if 4 - 4: OoO0O00
ooOO = 0
if ( o00 ) : ooOO = 1
if ( Oo0O0000Oo00o ) : ooOO = 5
if 5 - 5: OoooooooOO / o0oOOo0O0Ooo % I11i % OoO0O00 * iII111i + iIii1I11I1II1
if ( ooOO != 0 ) :
I11iiI11iiI = [ i111I , i111I ]
lisp . lisp_start_rloc_probe_timer ( ooOO , I11iiI11iiI )
if 51 - 51: oO0o . iIii1I11I1II1 + OoO0O00 * Ii1I + i1IIi
if 81 - 81: O0 - Ii1I + Oo0Ooo
if 67 - 67: Ii1I
if 43 - 43: OoO0O00 % OoO0O00
if 46 - 46: Oo0Ooo % iIii1I11I1II1 . iII111i . O0 * ooOoO0o / OoooooooOO
if 7 - 7: oO0o - O0 * I11i - o0oOOo0O0Ooo - II111iiii
if 41 - 41: I1IiiI - I1Ii111 % II111iiii . I1Ii111 - I11i
if ( lisp . lisp_crypto_ephem_port == None and lisp . lisp_data_plane_security ) :
O00oooo00o0O = i111I . getsockname ( ) [ 1 ]
lisp . lisp_crypto_ephem_port = O00oooo00o0O
lisp . lprint ( "Use port {} for lisp-crypto packets" . format ( O00oooo00o0O ) )
i1I111Ii = { "type" : "itr-crypto-port" , "port" : O00oooo00o0O }
lisp . lisp_write_to_dp_socket ( i1I111Ii )
if 31 - 31: I1IiiI
if 73 - 73: ooOoO0o . O0 / o0oOOo0O0Ooo - OoooooooOO % i11iIiiIii
if 80 - 80: Ii1I / ooOoO0o % O0 . Oo0Ooo
if 63 - 63: OOooOOo . II111iiii . I11i
if 46 - 46: ooOoO0o % IiII - o0oOOo0O0Ooo - Oo0Ooo - Ii1I / I11i
lisp . lisp_ipc_write_xtr_parameters ( lisp . lisp_debug_logging ,
lisp . lisp_data_plane_logging )
return
if 68 - 68: i1IIi - I1ii11iIi11i / Oo0Ooo % I11i . iII111i
if 9 - 9: IiII
if 48 - 48: o0oOOo0O0Ooo + o0oOOo0O0Ooo - Oo0Ooo
if 27 - 27: OoO0O00 + OoOoOO00 * ooOoO0o
if 83 - 83: iIii1I11I1II1
if 72 - 72: I11i
if 87 - 87: i1IIi
if 48 - 48: Oo0Ooo * oO0o * iIii1I11I1II1 + i11iIiiIii - OoooooooOO
if 38 - 38: OoOoOO00 / iIii1I11I1II1 % i11iIiiIii - IiII * iII111i / OoOoOO00
def iIII11I1I1II ( ipc ) :
ii1IIiII111I , O00OoOoO , ooO0o0oo , i1I1iIi1IiI = ipc . split ( "%" )
i1I1iIi1IiI = int ( i1I1iIi1IiI , 16 )
if 79 - 79: IiII % OoO0O00
Oo0oOO = lisp . lisp_get_echo_nonce ( None , ooO0o0oo )
if ( Oo0oOO == None ) : Oo0oOO = lisp . lisp_echo_nonce ( ooO0o0oo )
if 86 - 86: iIii1I11I1II1 / O0
if 17 - 17: II111iiii
if 9 - 9: OoooooooOO + oO0o
if 33 - 33: O0
if 39 - 39: I1IiiI + Oo0Ooo
if ( O00OoOoO == "R" ) :
Oo0oOO . request_nonce_rcvd = i1I1iIi1IiI
Oo0oOO . last_request_nonce_rcvd = lisp . lisp_get_timestamp ( )
Oo0oOO . echo_nonce_sent = i1I1iIi1IiI
Oo0oOO . last_new_echo_nonce_sent = lisp . lisp_get_timestamp ( )
lisp . lprint ( "Start echo-nonce mode for {}, nonce 0x{}" . format ( lisp . red ( Oo0oOO . rloc_str , False ) , lisp . lisp_hex_string ( i1I1iIi1IiI ) ) )
if 83 - 83: i1IIi
if 76 - 76: Ii1I + iIii1I11I1II1 + OoOoOO00 . OoO0O00
if 49 - 49: IiII / ooOoO0o / OOooOOo
if ( O00OoOoO == "E" ) :
Oo0oOO . echo_nonce_rcvd = i1I1iIi1IiI
Oo0oOO . last_echo_nonce_rcvd = lisp . lisp_get_timestamp ( )
if 25 - 25: I1IiiI % O0 + i1IIi - ooOoO0o
if ( Oo0oOO . request_nonce_sent == i1I1iIi1IiI ) :
III1IiI1i1i = lisp . bold ( "echoed nonce" , False )
lisp . lprint ( "Received {} {} from {}" . format ( III1IiI1i1i ,
lisp . lisp_hex_string ( i1I1iIi1IiI ) ,
lisp . red ( Oo0oOO . rloc_str , False ) ) )
if 94 - 94: iII111i - Oo0Ooo + oO0o
Oo0oOO . request_nonce_sent = None
lisp . lprint ( "Stop request-nonce mode for {}" . format ( lisp . red ( Oo0oOO . rloc_str , False ) ) )
if 59 - 59: I11i . I1IiiI - iIii1I11I1II1 + iIii1I11I1II1
Oo0oOO . last_good_echo_nonce_rcvd = lisp . lisp_get_timestamp ( )
else :
oO0o0Oo = "none"
if ( Oo0oOO . request_nonce_sent ) :
oO0o0Oo = lisp . lisp_hex_string ( Oo0oOO . request_nonce_sent )
if 76 - 76: ooOoO0o / OoOoOO00 + I1ii11iIi11i
lisp . lprint ( ( "Received echo-nonce 0x{} from {}, but request-" + "nonce is {}" ) . format ( lisp . lisp_hex_string ( i1I1iIi1IiI ) ,
# OOooOOo
lisp . red ( Oo0oOO . rloc_str , False ) , oO0o0Oo ) )
if 65 - 65: OoOoOO00
if 91 - 91: IiII + Ii1I % Ii1I - O0 - i11iIiiIii
return
if 84 - 84: Oo0Ooo % iII111i % OoooooooOO + OOooOOo % i11iIiiIii
if 47 - 47: i1IIi + II111iiii . Oo0Ooo * oO0o . I11i / i1IIi
if 50 - 50: I1Ii111 / i1IIi % OoooooooOO
if 83 - 83: I1ii11iIi11i * I1ii11iIi11i + OOooOOo
if 57 - 57: O0 - O0 . I1ii11iIi11i / o0oOOo0O0Ooo / Ii1I
I1IiII1I1i1I1 = {
"lisp xtr-parameters" : [ IiI1Iii1 , {
"rloc-probing" : [ True , "yes" , "no" ] ,
"nonce-echoing" : [ True , "yes" , "no" ] ,
"data-plane-security" : [ True , "yes" , "no" ] ,
"data-plane-logging" : [ True , "yes" , "no" ] ,
"frame-logging" : [ True , "yes" , "no" ] ,
"flow-logging" : [ True , "yes" , "no" ] ,
"nat-traversal" : [ True , "yes" , "no" ] ,
"checkpoint-map-cache" : [ True , "yes" , "no" ] ,
"ipc-data-plane" : [ True , "yes" , "no" ] ,
"decentralized-push-xtr" : [ True , "yes" , "no" ] ,
"decentralized-pull-xtr-modulus" : [ True , 1 , 0xff ] ,
"decentralized-pull-xtr-dns-suffix" : [ True ] ,
"register-reachable-rtrs" : [ True , "yes" , "no" ] ,
"program-hardware" : [ True , "yes" , "no" ] } ] ,
"lisp interface" : [ lispconfig . lisp_interface_command , {
"interface-name" : [ True ] ,
"device" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"dynamic-eid" : [ True ] ,
"multi-tenant-eid" : [ True ] ,
"lisp-nat" : [ True , "yes" , "no" ] ,
"dynamic-eid-device" : [ True ] ,
"dynamic-eid-timeout" : [ True , 0 , 0xff ] } ] ,
"lisp map-resolver" : [ O0000oO0o00 , {
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"dns-name" : [ True ] ,
"address" : [ True ] } ] ,
"lisp database-mapping" : [ i1ii1iiIi1II , {
"prefix" : [ ] ,
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"secondary-instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"dynamic-eid" : [ True , "yes" , "no" ] ,
"signature-eid" : [ True , "yes" , "no" ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"elp-name" : [ True ] ,
"geo-name" : [ True ] ,
"rle-name" : [ True ] ,
"json-name" : [ True ] ,
"address" : [ True ] ,
"interface" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp map-cache" : [ lispconfig . lisp_map_cache_command , {
"prefix" : [ ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"send-map-request" : [ True , "yes" , "no" ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"rle-name" : [ True ] ,
"elp-name" : [ True ] ,
"address" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp itr-map-cache" : [ lispconfig . lisp_map_cache_command , {
"prefix" : [ ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"rle-name" : [ True ] ,
"elp-name" : [ True ] ,
"address" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp explicit-locator-path" : [ lispconfig . lisp_elp_command , {
"elp-name" : [ False ] ,
"elp-node" : [ ] ,
"address" : [ True ] ,
"probe" : [ True , "yes" , "no" ] ,
"strict" : [ True , "yes" , "no" ] ,
"eid" : [ True , "yes" , "no" ] } ] ,
"lisp replication-list-entry" : [ lispconfig . lisp_rle_command , {
"rle-name" : [ False ] ,
"rle-node" : [ ] ,
"address" : [ True ] ,
"level" : [ True , 0 , 255 ] } ] ,
"lisp geo-coordinates" : [ lispconfig . lisp_geo_command , {
"geo-name" : [ False ] ,
"geo-tag" : [ False ] } ] ,
"show itr-map-cache" : [ IIiiIiI1 , { } ] ,
"show itr-rloc-probing" : [ I1i1iii , { } ] ,
"show itr-keys" : [ oo , { } ] ,
"show itr-dynamic-eid" : [ lispconfig . lisp_show_dynamic_eid_command , { } ]
}
if 28 - 28: Oo0Ooo + IiII % II111iiii / OoO0O00 + i11iIiiIii
if 20 - 20: I1ii11iIi11i
if 3 - 3: OoO0O00 * i1IIi . I1IiiI . O0 - OoOoOO00
if 81 - 81: I1IiiI - iIii1I11I1II1 / I1IiiI / O0
if 34 - 34: Ii1I * Ii1I - I1ii11iIi11i - O0 . i11iIiiIii
if 32 - 32: iIii1I11I1II1 . OoO0O00 * oO0o / OOooOOo . II111iiii - Oo0Ooo
if ( I111I1Iiii1i ( ) == False ) :
lisp . lprint ( "lisp_itr_startup() failed" )
lisp . lisp_print_banner ( "ITR abnormal exit" )
exit ( 1 )
if 10 - 10: I1ii11iIi11i / i11iIiiIii - Ii1I + oO0o * I1IiiI
if 94 - 94: I1IiiI + iIii1I11I1II1 / O0 - OoooooooOO % I1ii11iIi11i
o0Oo0oo = [ i111I , oO0oIIII ,
II1Ii1iI1i , Oo0oO0oo0oO00 ]
if 44 - 44: I1IiiI % Ii1I * I1IiiI . Oo0Ooo + I1ii11iIi11i . OOooOOo
if 6 - 6: IiII * OoooooooOO + I1Ii111 / Ii1I
if 35 - 35: ooOoO0o % I1IiiI - ooOoO0o - OoO0O00 - OoooooooOO
if 46 - 46: i1IIi . i1IIi . oO0o / I11i / ooOoO0o
Ii1Iiii = True
Oo = [ i111I ] * 3
i1IIii11i1I1 = [ II1Ii1iI1i ] * 3
if 12 - 12: i1IIi / OOooOOo % ooOoO0o * IiII * O0 * iIii1I11I1II1
while ( True ) :
try : OOOO , oO , ii1IIiII111I = select . select ( o0Oo0oo , [ ] , [ ] )
except : break
if 19 - 19: I1IiiI % Ii1I . IiII * ooOoO0o
if 89 - 89: OoOoOO00 . OOooOOo
if 7 - 7: oO0o % OoOoOO00 - I1IiiI + Oo0Ooo
if 70 - 70: II111iiii + I1Ii111 + i11iIiiIii - i1IIi / IiII
if ( lisp . lisp_ipc_data_plane and Oo0oO0oo0oO00 in OOOO ) :
lisp . lisp_process_punt ( Oo0oO0oo0oO00 , II1iII1i ,
iiI1iIiI )
if 40 - 40: I1ii11iIi11i * I1Ii111
if 38 - 38: O0 . Oo0Ooo + OoOoOO00 - oO0o
if 43 - 43: iII111i + Oo0Ooo / OoooooooOO
if 24 - 24: O0 + o0oOOo0O0Ooo * Ii1I - I1Ii111
if 10 - 10: i11iIiiIii
if ( i111I in OOOO ) :
O00OoOoO , II11 , O00oooo00o0O , ii11iO000oo00OOOOO = lisp . lisp_receive ( Oo [ 0 ] ,
False )
if ( II11 == "" ) : break
if 52 - 52: Oo0Ooo . I11i / o0oOOo0O0Ooo + Ii1I % I11i
if ( lisp . lisp_is_rloc_probe_reply ( ii11iO000oo00OOOOO [ 0 ] ) ) :
lisp . lprint ( "ITR ignoring RLOC-probe reply, using pcap" )
continue
if 47 - 47: OoooooooOO / OOooOOo % OoO0O00 / Oo0Ooo - I1ii11iIi11i
lisp . lisp_parse_packet ( Oo , ii11iO000oo00OOOOO , II11 , O00oooo00o0O )
if 13 - 13: iII111i . I1IiiI * OOooOOo + Ii1I + I1IiiI - i11iIiiIii
if 79 - 79: ooOoO0o . oO0o / oO0o - ooOoO0o * Oo0Ooo / o0oOOo0O0Ooo
if 19 - 19: I1ii11iIi11i
if 46 - 46: iIii1I11I1II1 . i11iIiiIii - OoOoOO00 % O0 / II111iiii * i1IIi
if 66 - 66: O0
if ( II1Ii1iI1i in OOOO ) :
O00OoOoO , II11 , O00oooo00o0O , ii11iO000oo00OOOOO = lisp . lisp_receive ( i1IIii11i1I1 [ 0 ] ,
False )
if ( II11 == "" ) : break
if 52 - 52: OoO0O00 * OoooooooOO
if ( lisp . lisp_is_rloc_probe_reply ( ii11iO000oo00OOOOO [ 0 ] ) ) :
lisp . lprint ( "ITR ignoring RLOC-probe reply, using pcap" )
continue
if 12 - 12: O0 + IiII * i1IIi . OoO0O00
o0OO0oooo = lisp . lisp_parse_packet ( i1IIii11i1I1 , ii11iO000oo00OOOOO , II11 , O00oooo00o0O )
if 40 - 40: I1Ii111 - OoOoOO00 * I11i - IiII / OoOoOO00
if 71 - 71: oO0o / OoooooooOO % IiII / OoOoOO00 % I1Ii111
if 19 - 19: I1Ii111 + IiII / oO0o / II111iiii
if 92 - 92: i1IIi % ooOoO0o + ooOoO0o - iIii1I11I1II1 . Ii1I
if 33 - 33: o0oOOo0O0Ooo / O0 + OOooOOo
if ( o0OO0oooo ) :
I11iiI11iiI = [ i111I , i111I ]
lisp . lisp_start_rloc_probe_timer ( 0 , I11iiI11iiI )
if 75 - 75: IiII % i11iIiiIii + iIii1I11I1II1
if 92 - 92: OoOoOO00 % O0
if 55 - 55: iIii1I11I1II1 * iII111i
if 85 - 85: iIii1I11I1II1 . II111iiii
if 54 - 54: Ii1I . OoooooooOO % Oo0Ooo
if 22 - 22: OOooOOo
if 22 - 22: iII111i * I11i - Oo0Ooo * O0 / i11iIiiIii
if ( oO0oIIII in OOOO ) :
O00OoOoO , II11 , O00oooo00o0O , ii11iO000oo00OOOOO = lisp . lisp_receive ( oO0oIIII , True )
if 78 - 78: Oo0Ooo * O0 / ooOoO0o + OoooooooOO + OOooOOo
if ( II11 == "" ) : break
if 23 - 23: iII111i % OoooooooOO / iIii1I11I1II1 + I1ii11iIi11i / i1IIi / o0oOOo0O0Ooo
if ( O00OoOoO == "command" ) :
if ( ii11iO000oo00OOOOO == "clear" ) :
lisp . lisp_clear_map_cache ( )
continue
if 94 - 94: i1IIi
if ( ii11iO000oo00OOOOO . find ( "nonce%" ) != - 1 ) :
iIII11I1I1II ( ii11iO000oo00OOOOO )
continue
if 36 - 36: I1IiiI + Oo0Ooo
lispconfig . lisp_process_command ( oO0oIIII , O00OoOoO ,
ii11iO000oo00OOOOO , "lisp-itr" , [ I1IiII1I1i1I1 ] )
elif ( O00OoOoO == "api" ) :
lisp . lisp_process_api ( "lisp-itr" , oO0oIIII , ii11iO000oo00OOOOO )
elif ( O00OoOoO == "data-packet" ) :
O0oOo00o0 ( ii11iO000oo00OOOOO , "ipc" )
else :
if ( lisp . lisp_is_rloc_probe_reply ( ii11iO000oo00OOOOO [ 0 ] ) ) :
lisp . lprint ( "ITR ignoring RLOC-probe request, using pcap" )
continue
if 46 - 46: iII111i
lisp . lisp_parse_packet ( II1iII1i , ii11iO000oo00OOOOO , II11 , O00oooo00o0O )
if 65 - 65: i1IIi . I1ii11iIi11i / ooOoO0o
if 11 - 11: IiII * ooOoO0o / ooOoO0o - OOooOOo
if 68 - 68: I1IiiI % IiII - IiII / I1IiiI + I1ii11iIi11i - Oo0Ooo
if 65 - 65: ooOoO0o - i1IIi
if 62 - 62: I11i / oO0o % Oo0Ooo . OoooooooOO / i11iIiiIii / I1Ii111
II1Ii11I111I ( )
lisp . lisp_print_banner ( "ITR normal exit" )
exit ( 0 )
if 60 - 60: I1IiiI % oO0o / o0oOOo0O0Ooo % oO0o * i11iIiiIii / iII111i
if 34 - 34: I1Ii111 - OOooOOo
# dd678faae9ac167bc83abf78e5cb2f3f0688d3a3
|
interactive.py | import asyncio
import logging
import os
import tempfile
import textwrap
import uuid
from functools import partial
from multiprocessing import Process
from typing import Any, Callable, Dict, List, Optional, Text, Tuple, Union
import numpy as np
from aiohttp import ClientError
from colorclass import Color
from sanic import Sanic, response
from sanic.exceptions import NotFound
from terminaltables import AsciiTable, SingleTable
import questionary
import rasa.cli.utils
from questionary import Choice, Form, Question
from rasa.cli import utils as cliutils
from rasa.core import constants, run, train, utils
from rasa.core.actions.action import ACTION_LISTEN_NAME, default_action_names
from rasa.core.channels.channel import UserMessage
from rasa.core.constants import (
DEFAULT_SERVER_FORMAT,
DEFAULT_SERVER_PORT,
DEFAULT_SERVER_URL,
REQUESTED_SLOT,
UTTER_PREFIX,
)
from rasa.core.domain import Domain
import rasa.core.events
from rasa.core.events import (
ActionExecuted,
ActionReverted,
BotUttered,
Event,
Restarted,
UserUttered,
UserUtteranceReverted,
)
from rasa.core.interpreter import INTENT_MESSAGE_PREFIX, NaturalLanguageInterpreter
from rasa.core.trackers import EventVerbosity, DialogueStateTracker
from rasa.core.training import visualization
from rasa.core.training.visualization import (
VISUALIZATION_TEMPLATE_PATH,
visualize_neighborhood,
)
from rasa.core.utils import AvailableEndpoints
from rasa.utils.common import update_sanic_log_level
from rasa.utils.endpoints import EndpointConfig
# noinspection PyProtectedMember
from rasa.nlu.training_data import loading
from rasa.nlu.training_data.message import Message
# WARNING: This command line UI is using an external library
# communicating with the shell - these functions are hard to test
# automatically. If you change anything in here, please make sure to
# run the interactive learning and check if your part of the "ui"
# still works.
import rasa.utils.io as io_utils
logger = logging.getLogger(__name__)
MAX_VISUAL_HISTORY = 3
PATHS = {
"stories": "data/stories.md",
"nlu": "data/nlu.md",
"backup": "data/nlu_interactive.md",
"domain": "domain.yml",
}
SAVE_IN_E2E = False
# choose other intent, making sure this doesn't clash with an existing intent
OTHER_INTENT = uuid.uuid4().hex
OTHER_ACTION = uuid.uuid4().hex
NEW_ACTION = uuid.uuid4().hex
NEW_TEMPLATES = {}
class RestartConversation(Exception):
"""Exception used to break out the flow and restart the conversation."""
pass
class ForkTracker(Exception):
"""Exception used to break out the flow and fork at a previous step.
The tracker will be reset to the selected point in the past and the
conversation will continue from there."""
pass
class UndoLastStep(Exception):
"""Exception used to break out the flow and undo the last step.
The last step is either the most recent user message or the most
recent action run by the bot."""
pass
class Abort(Exception):
"""Exception used to abort the interactive learning and exit."""
pass
async def send_message(
endpoint: EndpointConfig,
sender_id: Text,
message: Text,
parse_data: Optional[Dict[Text, Any]] = None,
) -> Dict[Text, Any]:
"""Send a user message to a conversation."""
payload = {
"sender": UserUttered.type_name,
"text": message,
"parse_data": parse_data,
}
return await endpoint.request(
json=payload,
method="post",
subpath="/conversations/{}/messages".format(sender_id),
)
async def request_prediction(
endpoint: EndpointConfig, sender_id: Text
) -> Dict[Text, Any]:
"""Request the next action prediction from core."""
return await endpoint.request(
method="post", subpath="/conversations/{}/predict".format(sender_id)
)
async def retrieve_domain(endpoint: EndpointConfig) -> Dict[Text, Any]:
"""Retrieve the domain from core."""
return await endpoint.request(
method="get", subpath="/domain", headers={"Accept": "application/json"}
)
async def retrieve_status(endpoint: EndpointConfig) -> Dict[Text, Any]:
"""Retrieve the status from core."""
return await endpoint.request(method="get", subpath="/status")
async def retrieve_tracker(
endpoint: EndpointConfig,
sender_id: Text,
verbosity: EventVerbosity = EventVerbosity.ALL,
) -> Dict[Text, Any]:
"""Retrieve a tracker from core."""
path = "/conversations/{}/tracker?include_events={}".format(
sender_id, verbosity.name
)
return await endpoint.request(
method="get", subpath=path, headers={"Accept": "application/json"}
)
async def send_action(
endpoint: EndpointConfig,
sender_id: Text,
action_name: Text,
policy: Optional[Text] = None,
confidence: Optional[float] = None,
is_new_action: bool = False,
) -> Dict[Text, Any]:
"""Log an action to a conversation."""
payload = ActionExecuted(action_name, policy, confidence).as_dict()
subpath = "/conversations/{}/execute".format(sender_id)
try:
return await endpoint.request(json=payload, method="post", subpath=subpath)
except ClientError:
if is_new_action:
if action_name in NEW_TEMPLATES:
warning_questions = questionary.confirm(
"WARNING: You have created a new action: '{0}', "
"with matching template: '{1}'. "
"This action will not return its message in this session, "
"but the new utterance will be saved to your domain file "
"when you exit and save this session. "
"You do not need to do anything further. "
"".format(action_name, [*NEW_TEMPLATES[action_name]][0])
)
await _ask_questions(warning_questions, sender_id, endpoint)
else:
warning_questions = questionary.confirm(
"WARNING: You have created a new action: '{}', "
"which was not successfully executed. "
"If this action does not return any events, "
"you do not need to do anything. "
"If this is a custom action which returns events, "
"you are recommended to implement this action "
"in your action server and try again."
"".format(action_name)
)
await _ask_questions(warning_questions, sender_id, endpoint)
payload = ActionExecuted(action_name).as_dict()
return await send_event(endpoint, sender_id, payload)
else:
logger.error("failed to execute action!")
raise
async def send_event(
endpoint: EndpointConfig,
sender_id: Text,
evt: Union[List[Dict[Text, Any]], Dict[Text, Any]],
) -> Dict[Text, Any]:
"""Log an event to a conversation."""
subpath = "/conversations/{}/tracker/events".format(sender_id)
return await endpoint.request(json=evt, method="post", subpath=subpath)
def format_bot_output(message: BotUttered) -> Text:
"""Format a bot response to be displayed in the history table."""
# First, add text to output
output = message.text or ""
# Then, append all additional items
data = message.data or {}
if not data:
return output
if data.get("image"):
output += "\nImage: " + data.get("image")
if data.get("attachment"):
output += "\nAttachment: " + data.get("attachment")
if data.get("buttons"):
output += "\nButtons:"
choices = cliutils.button_choices_from_message_data(
data, allow_free_text_input=True
)
for choice in choices:
output += "\n" + choice
if data.get("elements"):
output += "\nElements:"
for idx, element in enumerate(data.get("elements")):
element_str = cliutils.element_to_string(element, idx)
output += "\n" + element_str
if data.get("quick_replies"):
output += "\nQuick replies:"
for idx, element in enumerate(data.get("quick_replies")):
element_str = cliutils.element_to_string(element, idx)
output += "\n" + element_str
return output
def latest_user_message(events: List[Dict[Text, Any]]) -> Optional[Dict[Text, Any]]:
"""Return most recent user message."""
for i, e in enumerate(reversed(events)):
if e.get("event") == UserUttered.type_name:
return e
return None
def all_events_before_latest_user_msg(
events: List[Dict[Text, Any]]
) -> List[Dict[Text, Any]]:
"""Return all events that happened before the most recent user message."""
for i, e in enumerate(reversed(events)):
if e.get("event") == UserUttered.type_name:
return events[: -(i + 1)]
return events
async def _ask_questions(
questions: Union[Form, Question],
sender_id: Text,
endpoint: EndpointConfig,
is_abort: Callable[[Dict[Text, Any]], bool] = lambda x: False,
) -> Any:
"""Ask the user a question, if Ctrl-C is pressed provide user with menu."""
should_retry = True
answers = {}
while should_retry:
answers = questions.ask()
if answers is None or is_abort(answers):
should_retry = await _ask_if_quit(sender_id, endpoint)
else:
should_retry = False
return answers
def _selection_choices_from_intent_prediction(
predictions: List[Dict[Text, Any]]
) -> List[Dict[Text, Any]]:
""""Given a list of ML predictions create a UI choice list."""
sorted_intents = sorted(predictions, key=lambda k: (-k["confidence"], k["name"]))
choices = []
for p in sorted_intents:
name_with_confidence = "{:03.2f} {:40}".format(
p.get("confidence"), p.get("name")
)
choice = {"name": name_with_confidence, "value": p.get("name")}
choices.append(choice)
return choices
async def _request_free_text_intent(sender_id: Text, endpoint: EndpointConfig) -> Text:
question = questionary.text(
message="Please type the intent name:",
validate=io_utils.not_empty_validator("Please enter an intent name"),
)
return await _ask_questions(question, sender_id, endpoint)
async def _request_free_text_action(sender_id: Text, endpoint: EndpointConfig) -> Text:
question = questionary.text(
message="Please type the action name:",
validate=io_utils.not_empty_validator("Please enter an action name"),
)
return await _ask_questions(question, sender_id, endpoint)
async def _request_free_text_utterance(
sender_id: Text, endpoint: EndpointConfig, action: Text
) -> Text:
question = questionary.text(
message=(
"Please type the message for your new utterance "
"template '{}':".format(action)
),
validate=io_utils.not_empty_validator("Please enter a template message"),
)
return await _ask_questions(question, sender_id, endpoint)
async def _request_selection_from_intents(
intents: List[Dict[Text, Text]], sender_id: Text, endpoint: EndpointConfig
) -> Text:
question = questionary.select("What intent is it?", choices=intents)
return await _ask_questions(question, sender_id, endpoint)
async def _request_fork_point_from_list(
forks: List[Dict[Text, Text]], sender_id: Text, endpoint: EndpointConfig
) -> Text:
question = questionary.select(
"Before which user message do you want to fork?", choices=forks
)
return await _ask_questions(question, sender_id, endpoint)
async def _request_fork_from_user(
sender_id, endpoint
) -> Optional[List[Dict[Text, Any]]]:
"""Take in a conversation and ask at which point to fork the conversation.
Returns the list of events that should be kept. Forking means, the
conversation will be reset and continued from this previous point."""
tracker = await retrieve_tracker(endpoint, sender_id, EventVerbosity.AFTER_RESTART)
choices = []
for i, e in enumerate(tracker.get("events", [])):
if e.get("event") == UserUttered.type_name:
choices.append({"name": e.get("text"), "value": i})
fork_idx = await _request_fork_point_from_list(
list(reversed(choices)), sender_id, endpoint
)
if fork_idx is not None:
return tracker.get("events", [])[: int(fork_idx)]
else:
return None
async def _request_intent_from_user(
latest_message, intents, sender_id, endpoint
) -> Dict[Text, Any]:
"""Take in latest message and ask which intent it should have been.
Returns the intent dict that has been selected by the user."""
predictions = latest_message.get("parse_data", {}).get("intent_ranking", [])
predicted_intents = {p["name"] for p in predictions}
for i in intents:
if i not in predicted_intents:
predictions.append({"name": i, "confidence": 0.0})
# convert intents to ui list and add <other> as a free text alternative
choices = [
{"name": "<create_new_intent>", "value": OTHER_INTENT}
] + _selection_choices_from_intent_prediction(predictions)
intent_name = await _request_selection_from_intents(choices, sender_id, endpoint)
if intent_name == OTHER_INTENT:
intent_name = await _request_free_text_intent(sender_id, endpoint)
selected_intent = {"name": intent_name, "confidence": 1.0}
else:
# returns the selected intent with the original probability value
selected_intent = next(
(x for x in predictions if x["name"] == intent_name), {"name": None}
)
return selected_intent
async def _print_history(sender_id: Text, endpoint: EndpointConfig) -> None:
"""Print information about the conversation for the user."""
tracker_dump = await retrieve_tracker(
endpoint, sender_id, EventVerbosity.AFTER_RESTART
)
events = tracker_dump.get("events", [])
table = _chat_history_table(events)
slot_strs = _slot_history(tracker_dump)
print("------")
print("Chat History\n")
print(table)
if slot_strs:
print("\n")
print("Current slots: \n\t{}\n".format(", ".join(slot_strs)))
print("------")
def _chat_history_table(events: List[Dict[Text, Any]]) -> Text:
"""Create a table containing bot and user messages.
Also includes additional information, like any events and
prediction probabilities."""
def wrap(txt: Text, max_width: int) -> Text:
return "\n".join(textwrap.wrap(txt, max_width, replace_whitespace=False))
def colored(txt: Text, color: Text) -> Text:
return "{" + color + "}" + txt + "{/" + color + "}"
def format_user_msg(user_event: UserUttered, max_width: int) -> Text:
intent = user_event.intent or {}
intent_name = intent.get("name", "")
_confidence = intent.get("confidence", 1.0)
_md = _as_md_message(user_event.parse_data)
_lines = [
colored(wrap(_md, max_width), "hired"),
"intent: {} {:03.2f}".format(intent_name, _confidence),
]
return "\n".join(_lines)
def bot_width(_table: AsciiTable) -> int:
return _table.column_max_width(1)
def user_width(_table: AsciiTable) -> int:
return _table.column_max_width(3)
def add_bot_cell(data, cell):
data.append([len(data), Color(cell), "", ""])
def add_user_cell(data, cell):
data.append([len(data), "", "", Color(cell)])
# prints the historical interactions between the bot and the user,
# to help with correctly identifying the action
table_data = [
[
"# ",
Color(colored("Bot ", "autoblue")),
" ",
Color(colored("You ", "hired")),
]
]
table = SingleTable(table_data, "Chat History")
bot_column = []
tracker = DialogueStateTracker.from_dict("any", events)
applied_events = tracker.applied_events()
for idx, event in enumerate(applied_events):
if isinstance(event, ActionExecuted):
bot_column.append(colored(event.action_name, "autocyan"))
if event.confidence is not None:
bot_column[-1] += colored(
" {:03.2f}".format(event.confidence), "autowhite"
)
elif isinstance(event, UserUttered):
if bot_column:
text = "\n".join(bot_column)
add_bot_cell(table_data, text)
bot_column = []
msg = format_user_msg(event, user_width(table))
add_user_cell(table_data, msg)
elif isinstance(event, BotUttered):
wrapped = wrap(format_bot_output(event), bot_width(table))
bot_column.append(colored(wrapped, "autoblue"))
else:
if event.as_story_string():
bot_column.append(wrap(event.as_story_string(), bot_width(table)))
if bot_column:
text = "\n".join(bot_column)
add_bot_cell(table_data, text)
table.inner_heading_row_border = False
table.inner_row_border = True
table.inner_column_border = False
table.outer_border = False
table.justify_columns = {0: "left", 1: "left", 2: "center", 3: "right"}
return table.table
def _slot_history(tracker_dump: Dict[Text, Any]) -> List[Text]:
"""Create an array of slot representations to be displayed."""
slot_strs = []
for k, s in tracker_dump.get("slots", {}).items():
colored_value = cliutils.wrap_with_color(
str(s), color=rasa.cli.utils.bcolors.WARNING
)
slot_strs.append("{}: {}".format(k, colored_value))
return slot_strs
async def _write_data_to_file(sender_id: Text, endpoint: EndpointConfig):
"""Write stories and nlu data to file."""
story_path, nlu_path, domain_path = _request_export_info()
tracker = await retrieve_tracker(endpoint, sender_id)
events = tracker.get("events", [])
serialised_domain = await retrieve_domain(endpoint)
domain = Domain.from_dict(serialised_domain)
await _write_stories_to_file(story_path, events, domain)
await _write_nlu_to_file(nlu_path, events)
await _write_domain_to_file(domain_path, events, domain)
logger.info("Successfully wrote stories and NLU data")
async def _ask_if_quit(sender_id: Text, endpoint: EndpointConfig) -> bool:
"""Display the exit menu.
Return `True` if the previous question should be retried."""
answer = questionary.select(
message="Do you want to stop?",
choices=[
Choice("Continue", "continue"),
Choice("Undo Last", "undo"),
Choice("Fork", "fork"),
Choice("Start Fresh", "restart"),
Choice("Export & Quit", "quit"),
],
).ask()
if not answer or answer == "quit":
# this is also the default answer if the user presses Ctrl-C
await _write_data_to_file(sender_id, endpoint)
raise Abort()
elif answer == "continue":
# in this case we will just return, and the original
# question will get asked again
return True
elif answer == "undo":
raise UndoLastStep()
elif answer == "fork":
raise ForkTracker()
elif answer == "restart":
raise RestartConversation()
async def _request_action_from_user(
predictions: List[Dict[Text, Any]], sender_id: Text, endpoint: EndpointConfig
) -> Tuple[Text, bool]:
"""Ask the user to correct an action prediction."""
await _print_history(sender_id, endpoint)
choices = [
{
"name": "{:03.2f} {:40}".format(a.get("score"), a.get("action")),
"value": a.get("action"),
}
for a in predictions
]
tracker = await retrieve_tracker(endpoint, sender_id)
events = tracker.get("events", [])
session_actions_all = [a["name"] for a in _collect_actions(events)]
session_actions_unique = list(set(session_actions_all))
old_actions = [action["value"] for action in choices]
new_actions = [
{"name": action, "value": OTHER_ACTION + action}
for action in session_actions_unique
if action not in old_actions
]
choices = (
[{"name": "<create new action>", "value": NEW_ACTION}] + new_actions + choices
)
question = questionary.select("What is the next action of the bot?", choices)
action_name = await _ask_questions(question, sender_id, endpoint)
is_new_action = action_name == NEW_ACTION
if is_new_action:
# create new action
action_name = await _request_free_text_action(sender_id, endpoint)
if action_name.startswith(UTTER_PREFIX):
utter_message = await _request_free_text_utterance(
sender_id, endpoint, action_name
)
NEW_TEMPLATES[action_name] = {utter_message: ""}
elif action_name[:32] == OTHER_ACTION:
# action was newly created in the session, but not this turn
is_new_action = True
action_name = action_name[32:]
print("Thanks! The bot will now run {}.\n".format(action_name))
return action_name, is_new_action
def _request_export_info() -> Tuple[Text, Text, Text]:
"""Request file path and export stories & nlu data to that path"""
# export training data and quit
questions = questionary.form(
export_stories=questionary.text(
message="Export stories to (if file exists, this "
"will append the stories)",
default=PATHS["stories"],
validate=io_utils.file_type_validator(
[".md"],
"Please provide a valid export path for the stories, e.g. 'stories.md'.",
),
),
export_nlu=questionary.text(
message="Export NLU data to (if file exists, this will "
"merge learned data with previous training examples)",
default=PATHS["nlu"],
validate=io_utils.file_type_validator(
[".md"],
"Please provide a valid export path for the NLU data, e.g. 'nlu.md'.",
),
),
export_domain=questionary.text(
message="Export domain file to (if file exists, this "
"will be overwritten)",
default=PATHS["domain"],
validate=io_utils.file_type_validator(
[".yml", ".yaml"],
"Please provide a valid export path for the domain file, e.g. 'domain.yml'.",
),
),
)
answers = questions.ask()
if not answers:
raise Abort()
return (answers["export_stories"], answers["export_nlu"], answers["export_domain"])
def _split_conversation_at_restarts(
events: List[Dict[Text, Any]]
) -> List[List[Dict[Text, Any]]]:
"""Split a conversation at restart events.
Returns an array of event lists, without the restart events."""
sub_conversations = []
current = []
for e in events:
if e.get("event") == "restart":
if current:
sub_conversations.append(current)
current = []
else:
current.append(e)
if current:
sub_conversations.append(current)
return sub_conversations
def _collect_messages(events: List[Dict[Text, Any]]) -> List[Message]:
"""Collect the message text and parsed data from the UserMessage events
into a list"""
from rasa.nlu.extractors.duckling_http_extractor import DucklingHTTPExtractor
from rasa.nlu.extractors.mitie_entity_extractor import MitieEntityExtractor
from rasa.nlu.extractors.spacy_entity_extractor import SpacyEntityExtractor
msgs = []
for event in events:
if event.get("event") == UserUttered.type_name:
data = event.get("parse_data", {})
for entity in data.get("entities", []):
excluded_extractors = [
DucklingHTTPExtractor.__name__,
SpacyEntityExtractor.__name__,
MitieEntityExtractor.__name__,
]
logger.debug(
"Exclude entity marking of following extractors"
" {} when writing nlu data "
"to file.".format(excluded_extractors)
)
if entity.get("extractor") in excluded_extractors:
data["entities"].remove(entity)
msg = Message.build(data["text"], data["intent"]["name"], data["entities"])
msgs.append(msg)
elif event.get("event") == UserUtteranceReverted.type_name and msgs:
msgs.pop() # user corrected the nlu, remove incorrect example
return msgs
def _collect_actions(events: List[Dict[Text, Any]]) -> List[Dict[Text, Any]]:
"""Collect all the `ActionExecuted` events into a list."""
return [evt for evt in events if evt.get("event") == ActionExecuted.type_name]
async def _write_stories_to_file(
export_story_path: Text, events: List[Dict[Text, Any]], domain: Domain
) -> None:
"""Write the conversation of the sender_id to the file paths."""
sub_conversations = _split_conversation_at_restarts(events)
io_utils.create_path(export_story_path)
if os.path.exists(export_story_path):
append_write = "a" # append if already exists
else:
append_write = "w" # make a new file if not
with open(export_story_path, append_write, encoding=io_utils.DEFAULT_ENCODING) as f:
i = 1
for conversation in sub_conversations:
parsed_events = rasa.core.events.deserialise_events(conversation)
tracker = DialogueStateTracker.from_events(
"interactive_story_{}".format(i), evts=parsed_events, slots=domain.slots
)
if any(
isinstance(event, UserUttered) for event in tracker.applied_events()
):
i += 1
f.write("\n" + tracker.export_stories(SAVE_IN_E2E))
def _filter_messages(msgs: List[Message]) -> List[Message]:
"""Filter messages removing those that start with INTENT_MESSAGE_PREFIX"""
filtered_messages = []
for msg in msgs:
if not msg.text.startswith(INTENT_MESSAGE_PREFIX):
filtered_messages.append(msg)
return filtered_messages
async def _write_nlu_to_file(
export_nlu_path: Text, events: List[Dict[Text, Any]]
) -> None:
"""Write the nlu data of the sender_id to the file paths."""
from rasa.nlu.training_data import TrainingData
msgs = _collect_messages(events)
msgs = _filter_messages(msgs)
# noinspection PyBroadException
try:
previous_examples = loading.load_data(export_nlu_path)
except Exception as e:
logger.debug(
"An exception occurred while trying to load the NLU data. {}".format(str(e))
)
# No previous file exists, use empty training data as replacement.
previous_examples = TrainingData()
nlu_data = previous_examples.merge(TrainingData(msgs))
# need to guess the format of the file before opening it to avoid a read
# in a write
if loading.guess_format(export_nlu_path) in {"md", "unk"}:
fformat = "md"
else:
fformat = "json"
if fformat == "md":
stringified_training_data = nlu_data.nlu_as_markdown()
else:
stringified_training_data = nlu_data.nlu_as_json()
io_utils.write_text_file(stringified_training_data, export_nlu_path)
def _entities_from_messages(messages):
"""Return all entities that occur in atleast one of the messages."""
return list({e["entity"] for m in messages for e in m.data.get("entities", [])})
def _intents_from_messages(messages):
"""Return all intents that occur in at least one of the messages."""
# set of distinct intents
distinct_intents = {m.data["intent"] for m in messages if "intent" in m.data}
return distinct_intents
async def _write_domain_to_file(
domain_path: Text, events: List[Dict[Text, Any]], old_domain: Domain
) -> None:
"""Write an updated domain file to the file path."""
io_utils.create_path(domain_path)
messages = _collect_messages(events)
actions = _collect_actions(events)
templates = NEW_TEMPLATES
# TODO for now there is no way to distinguish between action and form
collected_actions = list(
{e["name"] for e in actions if e["name"] not in default_action_names()}
)
new_domain = Domain(
intents=_intents_from_messages(messages),
entities=_entities_from_messages(messages),
slots=[],
templates=templates,
action_names=collected_actions,
form_names=[],
)
old_domain.merge(new_domain).persist_clean(domain_path)
async def _predict_till_next_listen(
endpoint: EndpointConfig,
sender_id: Text,
sender_ids: List[Text],
plot_file: Optional[Text],
) -> None:
"""Predict and validate actions until we need to wait for a user message."""
listen = False
while not listen:
result = await request_prediction(endpoint, sender_id)
predictions = result.get("scores")
probabilities = [prediction["score"] for prediction in predictions]
pred_out = int(np.argmax(probabilities))
action_name = predictions[pred_out].get("action")
policy = result.get("policy")
confidence = result.get("confidence")
await _print_history(sender_id, endpoint)
await _plot_trackers(
sender_ids, plot_file, endpoint, unconfirmed=[ActionExecuted(action_name)]
)
listen = await _validate_action(
action_name, policy, confidence, predictions, endpoint, sender_id
)
await _plot_trackers(sender_ids, plot_file, endpoint)
tracker_dump = await retrieve_tracker(
endpoint, sender_id, EventVerbosity.AFTER_RESTART
)
events = tracker_dump.get("events", [])
if len(events) >= 2:
last_event = events[-2] # last event before action_listen
# if bot message includes buttons the user will get a list choice to reply
# the list choice is displayed in place of action listen
if last_event.get("event") == BotUttered.type_name and last_event["data"].get(
"buttons", None
):
response = _get_button_choice(last_event)
if response != cliutils.FREE_TEXT_INPUT_PROMPT:
await send_message(endpoint, sender_id, response)
def _get_button_choice(last_event: Dict[Text, Any]) -> Text:
data = last_event["data"]
message = last_event.get("text", "")
choices = cliutils.button_choices_from_message_data(
data, allow_free_text_input=True
)
question = questionary.select(message, choices)
response = cliutils.payload_from_button_question(question)
return response
async def _correct_wrong_nlu(
corrected_nlu: Dict[Text, Any],
events: List[Dict[Text, Any]],
endpoint: EndpointConfig,
sender_id: Text,
) -> None:
"""A wrong NLU prediction got corrected, update core's tracker."""
revert_latest_user_utterance = UserUtteranceReverted().as_dict()
# `UserUtteranceReverted` also removes the `ACTION_LISTEN` event before, hence we
# have to replay it.
listen_for_next_message = ActionExecuted(ACTION_LISTEN_NAME).as_dict()
corrected_message = latest_user_message(events)
if corrected_message is None:
raise Exception("Failed to correct NLU data. User message not found.")
corrected_message["parse_data"] = corrected_nlu
await send_event(
endpoint,
sender_id,
[revert_latest_user_utterance, listen_for_next_message, corrected_message],
)
async def _correct_wrong_action(
corrected_action: Text,
endpoint: EndpointConfig,
sender_id: Text,
is_new_action: bool = False,
) -> None:
"""A wrong action prediction got corrected, update core's tracker."""
await send_action(
endpoint, sender_id, corrected_action, is_new_action=is_new_action
)
def _form_is_rejected(action_name, tracker):
"""Check if the form got rejected with the most recent action name."""
return (
tracker.get("active_form", {}).get("name")
and action_name != tracker["active_form"]["name"]
and action_name != ACTION_LISTEN_NAME
)
def _form_is_restored(action_name, tracker):
"""Check whether the form is called again after it was rejected."""
return (
tracker.get("active_form", {}).get("rejected")
and tracker.get("latest_action_name") == ACTION_LISTEN_NAME
and action_name == tracker.get("active_form", {}).get("name")
)
async def _confirm_form_validation(action_name, tracker, endpoint, sender_id):
"""Ask a user whether an input for a form should be validated.
Previous to this call, the active form was chosen after it was rejected."""
requested_slot = tracker.get("slots", {}).get(REQUESTED_SLOT)
validation_questions = questionary.confirm(
"Should '{}' validate user input to fill "
"the slot '{}'?".format(action_name, requested_slot)
)
validate_input = await _ask_questions(validation_questions, sender_id, endpoint)
if not validate_input:
# notify form action to skip validation
await send_event(
endpoint, sender_id, {"event": "form_validation", "validate": False}
)
elif not tracker.get("active_form", {}).get("validate"):
# handle contradiction with learned behaviour
warning_question = questionary.confirm(
"ERROR: FormPolicy predicted no form validation "
"based on previous training stories. "
"Make sure to remove contradictory stories "
"from training data. "
"Otherwise predicting no form validation "
"will not work as expected."
)
await _ask_questions(warning_question, sender_id, endpoint)
# notify form action to validate an input
await send_event(
endpoint, sender_id, {"event": "form_validation", "validate": True}
)
async def _validate_action(
action_name: Text,
policy: Text,
confidence: float,
predictions: List[Dict[Text, Any]],
endpoint: EndpointConfig,
sender_id: Text,
) -> bool:
"""Query the user to validate if an action prediction is correct.
Returns `True` if the prediction is correct, `False` otherwise."""
question = questionary.confirm(
"The bot wants to run '{}', correct?".format(action_name)
)
is_correct = await _ask_questions(question, sender_id, endpoint)
if not is_correct:
action_name, is_new_action = await _request_action_from_user(
predictions, sender_id, endpoint
)
else:
is_new_action = False
tracker = await retrieve_tracker(endpoint, sender_id, EventVerbosity.AFTER_RESTART)
if _form_is_rejected(action_name, tracker):
# notify the tracker that form was rejected
await send_event(
endpoint,
sender_id,
{
"event": "action_execution_rejected",
"name": tracker["active_form"]["name"],
},
)
elif _form_is_restored(action_name, tracker):
await _confirm_form_validation(action_name, tracker, endpoint, sender_id)
if not is_correct:
await _correct_wrong_action(
action_name, endpoint, sender_id, is_new_action=is_new_action
)
else:
await send_action(endpoint, sender_id, action_name, policy, confidence)
return action_name == ACTION_LISTEN_NAME
def _as_md_message(parse_data: Dict[Text, Any]) -> Text:
"""Display the parse data of a message in markdown format."""
from rasa.nlu.training_data.formats import MarkdownWriter
if parse_data.get("text", "").startswith(INTENT_MESSAGE_PREFIX):
return parse_data["text"]
if not parse_data.get("entities"):
parse_data["entities"] = []
# noinspection PyProtectedMember
return MarkdownWriter()._generate_message_md(parse_data)
def _validate_user_regex(latest_message: Dict[Text, Any], intents: List[Text]) -> bool:
"""Validate if a users message input is correct.
This assumes the user entered an intent directly, e.g. using
`/greet`. Return `True` if the intent is a known one."""
parse_data = latest_message.get("parse_data", {})
intent = parse_data.get("intent", {}).get("name")
if intent in intents:
return True
else:
return False
async def _validate_user_text(
latest_message: Dict[Text, Any], endpoint: EndpointConfig, sender_id: Text
) -> bool:
"""Validate a user message input as free text.
This assumes the user message is a text message (so NOT `/greet`)."""
parse_data = latest_message.get("parse_data", {})
text = _as_md_message(parse_data)
intent = parse_data.get("intent", {}).get("name")
entities = parse_data.get("entities", [])
if entities:
message = (
"Is the intent '{}' correct for '{}' and are "
"all entities labeled correctly?".format(intent, text)
)
else:
message = (
"Your NLU model classified '{}' with intent '{}'"
" and there are no entities, is this correct?".format(text, intent)
)
if intent is None:
print("The NLU classification for '{}' returned '{}'".format(text, intent))
return False
else:
question = questionary.confirm(message)
return await _ask_questions(question, sender_id, endpoint)
async def _validate_nlu(
intents: List[Text], endpoint: EndpointConfig, sender_id: Text
) -> None:
"""Validate if a user message, either text or intent is correct.
If the prediction of the latest user message is incorrect,
the tracker will be corrected with the correct intent / entities."""
tracker = await retrieve_tracker(endpoint, sender_id, EventVerbosity.AFTER_RESTART)
latest_message = latest_user_message(tracker.get("events", [])) or {}
if latest_message.get("text", "").startswith( # pytype: disable=attribute-error
INTENT_MESSAGE_PREFIX
):
valid = _validate_user_regex(latest_message, intents)
else:
valid = await _validate_user_text(latest_message, endpoint, sender_id)
if not valid:
corrected_intent = await _request_intent_from_user(
latest_message, intents, sender_id, endpoint
)
# corrected intents have confidence 1.0
corrected_intent["confidence"] = 1.0
events = tracker.get("events", [])
entities = await _correct_entities(latest_message, endpoint, sender_id)
corrected_nlu = {
"intent": corrected_intent,
"entities": entities,
"text": latest_message.get("text"),
}
await _correct_wrong_nlu(corrected_nlu, events, endpoint, sender_id)
async def _correct_entities(
latest_message: Dict[Text, Any], endpoint: EndpointConfig, sender_id: Text
) -> List[Dict[Text, Any]]:
"""Validate the entities of a user message.
Returns the corrected entities"""
from rasa.nlu.training_data.formats import MarkdownReader
parse_original = latest_message.get("parse_data", {})
entity_str = _as_md_message(parse_original)
question = questionary.text(
"Please mark the entities using [value](type) notation", default=entity_str
)
annotation = await _ask_questions(question, sender_id, endpoint)
# noinspection PyProtectedMember
parse_annotated = MarkdownReader()._parse_training_example(annotation)
corrected_entities = _merge_annotated_and_original_entities(
parse_annotated, parse_original
)
return corrected_entities
def _merge_annotated_and_original_entities(parse_annotated, parse_original):
# overwrite entities which have already been
# annotated in the original annotation to preserve
# additional entity parser information
entities = parse_annotated.get("entities", [])[:]
for i, entity in enumerate(entities):
for original_entity in parse_original.get("entities", []):
if _is_same_entity_annotation(entity, original_entity):
entities[i] = original_entity
break
return entities
def _is_same_entity_annotation(entity, other):
return entity["value"] == other["value"] and entity["entity"] == other["entity"]
async def _enter_user_message(sender_id: Text, endpoint: EndpointConfig) -> None:
"""Request a new message from the user."""
question = questionary.text("Your input ->")
message = await _ask_questions(question, sender_id, endpoint, lambda a: not a)
if message == (INTENT_MESSAGE_PREFIX + constants.USER_INTENT_RESTART):
raise RestartConversation()
await send_message(endpoint, sender_id, message)
async def is_listening_for_message(sender_id: Text, endpoint: EndpointConfig) -> bool:
"""Check if the conversation is in need for a user message."""
tracker = await retrieve_tracker(endpoint, sender_id, EventVerbosity.APPLIED)
for i, e in enumerate(reversed(tracker.get("events", []))):
if e.get("event") == UserUttered.type_name:
return False
elif e.get("event") == ActionExecuted.type_name:
return e.get("name") == ACTION_LISTEN_NAME
return False
async def _undo_latest(sender_id: Text, endpoint: EndpointConfig) -> None:
"""Undo either the latest bot action or user message, whatever is last."""
tracker = await retrieve_tracker(endpoint, sender_id, EventVerbosity.ALL)
# Get latest `UserUtterance` or `ActionExecuted` event.
last_event_type = None
for i, e in enumerate(reversed(tracker.get("events", []))):
last_event_type = e.get("event")
if last_event_type in {ActionExecuted.type_name, UserUttered.type_name}:
break
elif last_event_type == Restarted.type_name:
break
if last_event_type == ActionExecuted.type_name:
undo_action = ActionReverted().as_dict()
await send_event(endpoint, sender_id, undo_action)
elif last_event_type == UserUttered.type_name:
undo_user_message = UserUtteranceReverted().as_dict()
listen_for_next_message = ActionExecuted(ACTION_LISTEN_NAME).as_dict()
await send_event(
endpoint, sender_id, [undo_user_message, listen_for_next_message]
)
async def _fetch_events(
sender_ids: List[Union[Text, List[Event]]], endpoint: EndpointConfig
) -> List[List[Event]]:
"""Retrieve all event trackers from the endpoint for all sender ids."""
event_sequences = []
for sender_id in sender_ids:
if isinstance(sender_id, str):
tracker = await retrieve_tracker(endpoint, sender_id)
events = tracker.get("events", [])
for conversation in _split_conversation_at_restarts(events):
parsed_events = rasa.core.events.deserialise_events(conversation)
event_sequences.append(parsed_events)
else:
event_sequences.append(sender_id)
return event_sequences
async def _plot_trackers(
sender_ids: List[Union[Text, List[Event]]],
output_file: Optional[Text],
endpoint: EndpointConfig,
unconfirmed: Optional[List[Event]] = None,
):
"""Create a plot of the trackers of the passed sender ids.
This assumes that the last sender id is the conversation we are currently
working on. If there are events that are not part of this active tracker
yet, they can be passed as part of `unconfirmed`. They will be appended
to the currently active conversation."""
if not output_file or not sender_ids:
# if there is no output file provided, we are going to skip plotting
# same happens if there are no sender ids
return None
event_sequences = await _fetch_events(sender_ids, endpoint)
if unconfirmed:
event_sequences[-1].extend(unconfirmed)
graph = await visualize_neighborhood(
event_sequences[-1], event_sequences, output_file=None, max_history=2
)
from networkx.drawing.nx_pydot import write_dot
write_dot(graph, output_file)
def _print_help(skip_visualization: bool) -> None:
"""Print some initial help message for the user."""
if not skip_visualization:
visualization_url = DEFAULT_SERVER_FORMAT.format(
"http", DEFAULT_SERVER_PORT + 1
)
visualization_help = "Visualisation at {}/visualization.html.".format(
visualization_url
)
else:
visualization_help = ""
rasa.cli.utils.print_success(
"Bot loaded. {}\n"
"Type a message and press enter "
"(press 'Ctr-c' to exit). "
"".format(visualization_help)
)
async def record_messages(
endpoint: EndpointConfig,
sender_id: Text = UserMessage.DEFAULT_SENDER_ID,
max_message_limit: Optional[int] = None,
stories: Optional[Text] = None,
skip_visualization: bool = False,
):
"""Read messages from the command line and print bot responses."""
from rasa.core import training
try:
_print_help(skip_visualization)
try:
domain = await retrieve_domain(endpoint)
except ClientError:
logger.exception(
"Failed to connect to Rasa Core server at '{}'. "
"Is the server running?".format(endpoint.url)
)
return
trackers = await training.load_data(
stories,
Domain.from_dict(domain),
augmentation_factor=0,
use_story_concatenation=False,
)
intents = [next(iter(i)) for i in (domain.get("intents") or [])]
num_messages = 0
sender_ids = [t.events for t in trackers] + [sender_id]
if not skip_visualization:
plot_file = "story_graph.dot"
await _plot_trackers(sender_ids, plot_file, endpoint)
else:
plot_file = None
while not utils.is_limit_reached(num_messages, max_message_limit):
try:
if await is_listening_for_message(sender_id, endpoint):
await _enter_user_message(sender_id, endpoint)
await _validate_nlu(intents, endpoint, sender_id)
await _predict_till_next_listen(
endpoint, sender_id, sender_ids, plot_file
)
num_messages += 1
except RestartConversation:
await send_event(endpoint, sender_id, Restarted().as_dict())
await send_event(
endpoint, sender_id, ActionExecuted(ACTION_LISTEN_NAME).as_dict()
)
logger.info("Restarted conversation, starting a new one.")
except UndoLastStep:
await _undo_latest(sender_id, endpoint)
await _print_history(sender_id, endpoint)
except ForkTracker:
await _print_history(sender_id, endpoint)
events_fork = await _request_fork_from_user(sender_id, endpoint)
await send_event(endpoint, sender_id, Restarted().as_dict())
if events_fork:
for evt in events_fork:
await send_event(endpoint, sender_id, evt)
logger.info("Restarted conversation at fork.")
await _print_history(sender_id, endpoint)
await _plot_trackers(sender_ids, plot_file, endpoint)
except Abort:
return
except Exception:
logger.exception("An exception occurred while recording messages.")
raise
def _serve_application(app, stories, skip_visualization):
"""Start a core server and attach the interactive learning IO."""
endpoint = EndpointConfig(url=DEFAULT_SERVER_URL)
async def run_interactive_io(running_app: Sanic):
"""Small wrapper to shut down the server once cmd io is done."""
await record_messages(
endpoint=endpoint,
stories=stories,
skip_visualization=skip_visualization,
sender_id=uuid.uuid4().hex,
)
logger.info("Killing Sanic server now.")
running_app.stop() # kill the sanic server
app.add_task(run_interactive_io)
update_sanic_log_level()
app.run(host="0.0.0.0", port=DEFAULT_SERVER_PORT)
return app
def start_visualization(image_path: Text = None) -> None:
"""Add routes to serve the conversation visualization files."""
app = Sanic(__name__)
# noinspection PyUnusedLocal
@app.exception(NotFound)
async def ignore_404s(request, exception):
return response.text("Not found", status=404)
# noinspection PyUnusedLocal
@app.route(VISUALIZATION_TEMPLATE_PATH, methods=["GET"])
def visualisation_html(request):
return response.file(visualization.visualization_html_path())
# noinspection PyUnusedLocal
@app.route("/visualization.dot", methods=["GET"])
def visualisation_png(request):
try:
headers = {"Cache-Control": "no-cache"}
return response.file(os.path.abspath(image_path), headers=headers)
except FileNotFoundError:
return response.text("", 404)
update_sanic_log_level()
app.run(host="0.0.0.0", port=DEFAULT_SERVER_PORT + 1, access_log=False)
# noinspection PyUnusedLocal
async def train_agent_on_start(args, endpoints, additional_arguments, app, loop):
_interpreter = NaturalLanguageInterpreter.create(args.get("nlu"), endpoints.nlu)
model_directory = args.get("out", tempfile.mkdtemp(suffix="_core_model"))
_agent = await train(
args.get("domain"),
args.get("stories"),
model_directory,
_interpreter,
endpoints,
args.get("dump_stories"),
args.get("config")[0],
None,
additional_arguments,
)
app.agent = _agent
async def wait_til_server_is_running(endpoint, max_retries=30, sleep_between_retries=1):
"""Try to reach the server, retry a couple of times and sleep in between."""
while max_retries:
try:
r = await retrieve_status(endpoint)
logger.info("Reached core: {}".format(r))
if not r.get("is_ready"):
# server did not finish loading the agent yet
# in this case, we need to wait till the model trained
# so we might be sleeping for a while...
await asyncio.sleep(sleep_between_retries)
continue
else:
# server is ready to go
return True
except ClientError:
max_retries -= 1
if max_retries:
await asyncio.sleep(sleep_between_retries)
return False
def run_interactive_learning(
stories: Text = None,
skip_visualization: bool = False,
server_args: Dict[Text, Any] = None,
additional_arguments: Dict[Text, Any] = None,
):
"""Start the interactive learning with the model of the agent."""
global SAVE_IN_E2E
server_args = server_args or {}
if server_args.get("nlu_data"):
PATHS["nlu"] = server_args["nlu_data"]
if server_args.get("stories"):
PATHS["stories"] = server_args["stories"]
if server_args.get("domain"):
PATHS["domain"] = server_args["domain"]
SAVE_IN_E2E = server_args["e2e"]
if not skip_visualization:
p = Process(target=start_visualization, args=("story_graph.dot",))
p.daemon = True
p.start()
else:
p = None
app = run.configure_app(enable_api=True)
endpoints = AvailableEndpoints.read_endpoints(server_args.get("endpoints"))
# before_server_start handlers make sure the agent is loaded before the
# interactive learning IO starts
if server_args.get("model"):
app.register_listener(
partial(run.load_agent_on_start, server_args.get("model"), endpoints, None),
"before_server_start",
)
else:
app.register_listener(
partial(train_agent_on_start, server_args, endpoints, additional_arguments),
"before_server_start",
)
_serve_application(app, stories, skip_visualization)
if not skip_visualization and p is not None:
p.terminate() # pytype: disable=attribute-error
p.join() # pytype: disable=attribute-error
|
Backend.py | from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import QPixmap
from utils.Design_Files.MainFrontEnd import *
from utils.Design_Files import download_dialog
import music_tag
import os, sys, time, shutil, re
import threading, webbrowser, zipfile, shutil
class DownloadWidget(QDialog):
def __init__(self, parent):
super().__init__(parent=parent)
self.downloadUI = download_dialog.Ui_downloadForm()
self.downloadUI.setupUi(self)
## variables
self.default_options = ["--newline", "--extract-audio", "--abort-on-error", "--verbose"] # Removed "--ignore-errors", "--rm-cache-dir"
self.download_options = []
# Save all downloads under Music directory in your home directory
if (sys.platform == "win32") or (sys.platform == "cygwin"):
self.destination_path = f"C:\\Users\\{os.getlogin()}\\Music"
self.output_format = f"-o \"{self.destination_path}\\%(title)s.%(ext)s\""
elif sys.platform == "linux":
self.destination_path = "~/Music"
self.output_format = f"-o \"{self.destination_path}/%(title)s.%(ext)s\""
## Setups
self.downloadUI.textBrowser.setOpenExternalLinks(True)
self.cursor = self.downloadUI.loggingTextEdit.textCursor()
# QProcess object for external app
self.process = QProcess(self)
# QProcess emits `readyRead` when there is data to be read
self.process.readyRead.connect(self.dataReady)
self.process.finished.connect(self.finisedProcess)
## Connections
self.downloadUI.downloadButton.clicked.connect(self.downloadAudio)
self.downloadUI.viewLogsButton.clicked.connect(self.viewLogs)
self.downloadUI.seeAvailFormatsButton.clicked.connect(self.seeAvailFormats)
self.downloadUI.setDestButton.clicked.connect(self.findDestination)
self.downloadUI.stopButton.clicked.connect(self.stopExecution)
## PLartform specific variables
if sys.platform == "win32":
self.yt_dl = "utils\\yt_dl-exes\\youtube-dl.exe"
else:
self.yt_dl = "youtube-dl"
## Check if the youtube downloader files are there
x = os.system(f"{self.yt_dl} --version")
if x == 1:
parent_path = os.getcwd()
git_link = "https://github.com/ayieko168/Audio-Metadata-Editor/tree/master/utils/Design_Files"
QMessageBox.critical(self,
"ERROR!!!",
f"""There seems to be a problem with the downloader files. Try Fixing By:
1.) On the program, Going to 'File -> Audio Downloader' and Click the 'Initialize Downloader Files' Options.
2.) If (1) does not work, visit this link '{git_link}' and download the zip file and extract it on here {parent_path}.
3.) If all these don't work, visit the main program's download site and download it once again, Sorry!
""",
QMessageBox.Ok
)
def stopExecution(self):
self.process.kill()
self.downloadUI.downloadButton.setEnabled(True)
self.downloadUI.seeAvailFormatsButton.setEnabled(True)
self.downloadUI.loggingTextEdit.insertPlainText(f"[Application] Process Killed Successfully\n")
def findDestination(self):
self.destination_path = QFileDialog.getExistingDirectory()
if (sys.platform == "win32") or (sys.platform == "cygwin"):
self.output_format = f"-o \"{self.destination_path}\\%(title)s.%(ext)s\""
elif sys.platform == "linux":
self.output_format = f"-o \"{self.destination_path}/%(title)s.%(ext)s\""
self.downloadUI.loggingTextEdit.insertPlainText(f"[Application] Destination directory set to :: {self.destination_path}")
def seeAvailFormats(self):
## Get the current url
self.url = self.downloadUI.urlEntry.text()
## Verify the url - Exit if any of these are TRUE
if (self.url == "") and (not self.url.startswith("http")):
return
## Run the command
command = f"{self.yt_dl} -F \"{self.url}\""
self.downloadUI.loggingTextEdit.moveCursor(QtGui.QTextCursor.End)
self.downloadUI.loggingTextEdit.insertPlainText("[Application] Started Formats Aquisition Process...\n")
self.process.start(command)
self.downloadUI.downloadButton.setEnabled(False)
self.downloadUI.seeAvailFormatsButton.setEnabled(False)
def downloadAudio(self):
## Reset the options list
self.download_options = []
## Get the current url
self.url = self.downloadUI.urlEntry.text()
## Verify the url - Exit if any of these are TRUE
if (self.url == "") and (not self.url.startswith("http")):
return
## Get current download options
# get the audio type combo box option
self.audio_typeOP = self.downloadUI.typeComboBox.currentText()
self.download_options.append(f"--audio-format {self.audio_typeOP.lower()}")
# get the check-box options
if self.downloadUI.playlistCheck.isChecked():
self.download_options.append("--yes-playlist")
# Change the output format to satisfy playlist format
if (sys.platform == "win32") or (sys.platform == "cygwin"):
self.output_format = f"-o \"{self.destination_path}\\%(playlist)s\\%(playlist_index)s - %(title)s.%(ext)s\"" # Save all videos under Music directory in your home directory
elif sys.platform == "linux":
self.output_format = f"-o \"{self.destination_path}/%(playlist)s/%(playlist_index)s - %(title)s.%(ext)s\""
else:
self.download_options.append("--no-playlist")
if self.downloadUI.getThumbCheck.isChecked():
self.download_options.append("--write-thumbnail")
if self.downloadUI.embedThumbCheck.isChecked():
self.download_options.append("--embed-thumbnail")
if self.downloadUI.geoBypassCheck.isChecked():
self.download_options.append("--geo-bypass")
if self.downloadUI.getDescriptionCheck.isChecked():
self.download_options.append("--write-description")
if self.downloadUI.writeMetaCheck.isChecked():
self.download_options.append("--add-metadata")
## Create the command string
command = f"{self.yt_dl} {' '.join(self.default_options)} {' '.join(self.download_options)} {self.output_format} \"{self.url}\""
self.downloadUI.loggingTextEdit.moveCursor(QtGui.QTextCursor.End)
self.downloadUI.loggingTextEdit.insertPlainText(f"[Application] Options Selected For Downloading are :: {self.download_options}\n")
self.downloadUI.loggingTextEdit.insertPlainText(f"[Application] The Destination Path is Set to :: {self.output_format}\n")
self.downloadUI.loggingTextEdit.insertPlainText(f"[Application] The Command to be run is :: {command}\n")
## Run the command
self.downloadUI.loggingTextEdit.moveCursor(QtGui.QTextCursor.End)
self.downloadUI.loggingTextEdit.insertPlainText("[Application] Started Download Process...\n")
self.process.start(command)
self.downloadUI.downloadButton.setEnabled(False)
self.downloadUI.stopButton.setEnabled(True)
def viewLogs(self):
log_file_path = os.path.join(os.getcwd(), "logging.md")
## Show th Widget For Logging text
if self.downloadUI.viewLogsButton.isChecked():
self.setMaximumHeight(600)
self.setMinimumHeight(600)
else:
self.setMinimumHeight(200)
self.setMaximumHeight(200)
def show_dialog(self):
"""Opens A Dialog For Downloading Audio Files"""
self.show()
self.exec_()
def dataReady(self):
string = str(self.process.readAll(), encoding="utf-8")
time_now = time.time()
## Log out to the logging file
with open("logging.md", "a") as logFO:
logFO.write(f">> [{time_now}] :: {string} ")
## Send the log to the logging text widget
self.downloadUI.loggingTextEdit.moveCursor(QtGui.QTextCursor.End)
self.downloadUI.loggingTextEdit.insertPlainText(string)
def finisedProcess(self):
self.downloadUI.downloadButton.setEnabled(True)
self.downloadUI.seeAvailFormatsButton.setEnabled(True)
self.downloadUI.stopButton.setEnabled(False)
self.downloadUI.loggingTextEdit.moveCursor(QtGui.QTextCursor.End)
self.downloadUI.loggingTextEdit.insertPlainText("[Application] Download Finished Successfully\n")
class Application(QMainWindow):
def __init__(self):
super().__init__()
self.MainUi = Ui_MainWindow()
self.MainUi.setupUi(self)
## CONNECTIONS
self.MainUi.openButtonSingle.clicked.connect(self.openButtonSingleCMD)
self.MainUi.addFileAlbum.clicked.connect(self.addFileAlbumCMD)
self.MainUi.removeFileButtonAlbum.clicked.connect(self.removeFileCMD)
self.MainUi.removeAllAlbum.clicked.connect(self.removeAllCMD)
self.MainUi.destuttonAlbum.clicked.connect(self.destinationAlbumCMD)
self.MainUi.findArtButtonAlbum.clicked.connect(self.findArtAlbumCMD)
self.MainUi.saveButtonAlbum.clicked.connect(self.saveAlbumCMD)
self.MainUi.saveAllButtonAlbum.clicked.connect(self.saveAllAlbumCMD)
self.MainUi.exitButtonAlbum.clicked.connect(lambda: print("exit"))
self.MainUi.tableWidgetAlbum.currentItemChanged.connect(self.updateDataAlbumCMD)
self.MainUi.saveButtonSingle.clicked.connect(self.saveSingleCMD)
self.MainUi.findArtButtonSingle.clicked.connect(self.findArtSingleCMD)
## MENUBAR CONNECTIONS
self.MainUi.menuFile.triggered[QAction].connect(self.menuFileCMD)
## CONSTANTS
self.FILE_FILTERS = """ Audio Files (*.aac *.aiff *.dsf *.flac *.m4a *.mp3 *.ogg *.opus *.wav *.wv);;
AAC Files (*.aac);;
AIFF Files (*.aiff);;
DSF Files (*.dsf);;
FLAC Files (*.flac);;
M4A Files (*.m4a);;
MP3 Files (*.mp3);;
OGG Files (*.ogg);;
OPUS Files (*.opus);;
WAV Files (*.wav);;
WV Files (*.wv)
"""
self.IMAGE_FILTERS = """Jpeg Files (*.jpg *.jpeg );;
All Files (*)"""
## SETUPS
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.updateAlbumMetas)
self.timer.start(100)
## Variables
self.album = "" ;self.albumartist = "" ;self.artist = "" ;self.artwork = "" ;self.comment = "" ;self.compilation = ""
self.composer = "" ;self.discnumber = 0 ;self.genre = "" ;self.lyrics = "" ;self.totaldiscs = 0 ;self.totaltracks = 0
self.tracknumber = 0 ;self.tracktitle = "" ;self.year = 0
self.metadata_dictinary = {} # keys=muic_names and values=the music's metadata
## Plartform Specific Setup
if (sys.platform == "win32") or (sys.platform == "cygwin"):
self.defaultMusicPath = f"C:\\Users\\{os.getlogin()}\\Music"
self.defaultPicturesPath = f"C:\\Users\\{os.getlogin()}\\Pictures"
self.youtube_path = "utils\\yt_dl-exes\\youtube-dl.exe"
elif sys.platform == "linux":
self.defaultMusicPath = f"/{os.getlogin()}/Music"
self.defaultPicturesPath = f"/{os.getlogin()}/Pictures"
self.youtube_path = "utils/yt_dl-exes/youtube-dl.exe"
def updateAlbumMetas(self):
if self.MainUi.tabWidget.currentIndex() == 1:
self.getCurrentVarData("album")
if self.MainUi.tableWidgetAlbum.item(self.MainUi.tableWidgetAlbum.currentRow(), 0) is not None:
# print("Update Meta divt ", self.metadata_dictinary)
curent_music_path = self.MainUi.tableWidgetAlbum.item(self.MainUi.tableWidgetAlbum.currentRow(), 1).text()
curent_music_name = os.path.basename(curent_music_path)
current_music_meta_object = self.metadata_dictinary[curent_music_name]
# print(f"Editing Values of {curent_music_name}, Self Value = {self.totaltracks}, meta Value = {current_music_meta_object['totaltracks']}")
self.writeMusicInfo(current_music_meta_object)
def openButtonSingleCMD(self):
fullpPath = QFileDialog.getOpenFileName(filter=self.FILE_FILTERS, directory=self.defaultMusicPath)[0]
print(fullpPath)
if fullpPath != "":
self.MainUi.filePathEntrySingle.setText(fullpPath)
f = self.getMusicInfo(fullpPath)
self.populateFields(f)
print(f)
else:
print("Invalid File Path!")
def getMusicInfo(self, music_path):
"""return the meta music object"""
return music_tag.load_file(music_path)
def findArtSingleCMD(self):
fullpPath = QFileDialog.getOpenFileName(filter=self.IMAGE_FILTERS, directory=self.defaultPicturesPath)[0]
print(fullpPath)
if fullpPath != "":
pixmap = QPixmap(fullpPath)
pixmap = pixmap.scaled(200, 200)
self.MainUi.artLabelSingle.setPixmap(pixmap)
self.MainUi.artLabelSingle.setToolTip(fullpPath)
self.artwork = fullpPath
else:
print("Invalid File Path!")
def writeMusicInfo(self, musicMetaObJ_Write):
if self.album != "":
try:
musicMetaObJ_Write['album'] = str(self.album)
except Exception as e:
print("album ERROR", e)
if self.albumartist != "":
try:
musicMetaObJ_Write['albumartist'] = self.albumartist
except Exception as e:
print("albumartist ERROR", e)
if self.artist != "":
try:
musicMetaObJ_Write['artist'] = self.artist
except Exception as e:
print("artist ERROR", e)
if self.artwork != "":
if os.path.exists(self.artwork):
with open(self.artwork, 'rb') as img_in:
musicMetaObJ_Write['artwork'] = img_in.read()
if self.comment != "":
try:
musicMetaObJ_Write['comment'] = self.comment
except Exception as e:
print("comment ERROR", e)
if self.compilation != "":
try:
musicMetaObJ_Write['compilation'] = self.compilation
except Exception as e:
print("compilation ERROR", e)
if self.composer != "":
try:
musicMetaObJ_Write['composer'] = self.composer
except Exception as e:
print("composer ERROR", e)
if self.discnumber is not None:
try:
musicMetaObJ_Write['discnumber'] = str(self.discnumber)
except Exception as e:
print("discnumber ERROR", e)
if self.genre != "":
try:
musicMetaObJ_Write['genre'] = self.genre
except Exception as e:
print("genre ERROR", e)
if self.lyrics != "":
try:
musicMetaObJ_Write['lyrics'] = self.lyrics
except Exception as e:
print("lyrics ERROR", e)
if self.totaldiscs is not None:
try:
musicMetaObJ_Write['totaldiscs'] = str(self.totaldiscs)
except Exception as e:
print("totaldiscs ERROR", e)
if self.totaltracks is not None:
try:
musicMetaObJ_Write['totaltracks'] = str(self.totaltracks)
except Exception as e:
print("totaltracks ERROR", e)
if self.tracknumber is not None:
try:
musicMetaObJ_Write['tracknumber'] = str(self.tracknumber)
except Exception as e:
print("tracknumber ERROR", e)
if self.tracktitle != "":
try:
musicMetaObJ_Write['tracktitle'] = self.tracktitle
except Exception as e:
print("tracktitle ERROR", e)
if self.year != 0:
try:
musicMetaObJ_Write['year'] = str(self.year)
except Exception as e:
print("year write ERROR", e)
def populateFields(self, musicMetaObJ_Pop):
self.refreshFields()
if self.MainUi.tabWidget.currentIndex() == 0: # The Single tab
try:
self.MainUi.titleEditSingle.setText(musicMetaObJ_Pop["tracktitle"].first)
except Exception as e:
print(e)
try:
self.MainUi.albumEditSingle.setText(musicMetaObJ_Pop["album"].first)
except Exception as e:
print(e)
try:
self.MainUi.artistEditSingle.setText(musicMetaObJ_Pop["albumartist"].first)
except Exception as e:
print(e)
try:
self.MainUi.contributorsEditSingle.setText(musicMetaObJ_Pop["artist"].first)
except Exception as e:
print(e)
try:
self.MainUi.GenreEditSingle.setText(musicMetaObJ_Pop["genre"].first)
except Exception as e:
print(e)
try:
self.MainUi.composerEditSingle.setText(musicMetaObJ_Pop["composer"].first)
except Exception as e:
print(e)
try:
self.MainUi.compilationEditSingle.setText(musicMetaObJ_Pop["compilation"].first)
except Exception as e:
print(e)
try:
self.MainUi.commentTextSingle.setText(musicMetaObJ_Pop["comment"].first)
except Exception as e:
print(e)
try:
if int(musicMetaObJ_Pop["year"].first) is not None:
self.MainUi.yearSpinSingle.setValue(musicMetaObJ_Pop["year"].first)
except Exception as e:
print("year ERROR ", e)
try:
self.MainUi.trackNumSpinSingle.setValue(musicMetaObJ_Pop["tracknumber"].first)
except Exception as e:
print(e)
try:
self.MainUi.totalTracksSpinSingle.setValue(musicMetaObJ_Pop["totaltracks"].first)
except Exception as e:
print("populate error totlatracks", e)
try:
self.MainUi.diskNumSpinSingle.setValue(musicMetaObJ_Pop["discnumber"].first)
except Exception as e:
print(e)
try:
self.MainUi.totalDiskSpinSingle.setValue(musicMetaObJ_Pop["totaldiscs"].first)
except Exception as e:
print(e)
try:
art = musicMetaObJ_Pop["artwork"]
if art.first is not None:
raw_img = art.first.raw_thumbnail([200, 200])
with open("test_img.jpg", 'wb') as img_out:
img_out.write(raw_img)
pixmap = QPixmap("test_img.jpg")
self.MainUi.artLabelSingle.setPixmap(pixmap)
self.cleanImages()
else:
self.MainUi.artLabelSingle.clear()
except Exception as e:
print("art ERROR ", e)
elif self.MainUi.tabWidget.currentIndex() == 1:
try:
self.MainUi.titleEditAlbum.setText(musicMetaObJ_Pop["tracktitle"].first)
except Exception as e:
print(e)
try:
self.MainUi.albumEntryAlbum.setText(musicMetaObJ_Pop["album"].first)
except Exception as e:
print(e)
try:
self.MainUi.artistEntryAlbum.setText(musicMetaObJ_Pop["albumartist"].first)
except Exception as e:
print(e)
try:
self.MainUi.contributorsEntryAlbum.setText(musicMetaObJ_Pop["artist"].first)
except Exception as e:
print(e)
try:
self.MainUi.GenreEntryAlbum.setText(musicMetaObJ_Pop["genre"].first)
except Exception as e:
print(e)
try:
self.MainUi.composerEntryAlbum.setText(musicMetaObJ_Pop["composer"].first)
except Exception as e:
print(e)
try:
self.MainUi.compilationEntryAlbum.setText(musicMetaObJ_Pop["compilation"].first)
except Exception as e:
print(e)
try:
self.MainUi.commentTextAlbum.setText(musicMetaObJ_Pop["comment"].first)
except Exception as e:
print(e)
try:
self.MainUi.yearSpinAlbum.setValue(musicMetaObJ_Pop["year"].first)
except Exception as e:
print(e)
try:
self.MainUi.trackNumSpinAlbum.setValue(musicMetaObJ_Pop["tracknumber"].first)
except Exception as e:
print(e)
try:
self.MainUi.totalDiskSpinAlbum.setValue(musicMetaObJ_Pop["totaltracks"].first)
except Exception as e:
print("populate error totlatracks", e)
try:
self.MainUi.diskNumSpinAlbum.setValue(musicMetaObJ_Pop["discnumber"].first)
except Exception as e:
print(e)
try:
self.MainUi.totalDiskSpinAlbum.setValue(musicMetaObJ_Pop["totaldiscs"].first)
except Exception as e:
print(e)
try:
art = musicMetaObJ_Pop["artwork"]
if art.first is not None:
raw_img = art.first.raw_thumbnail([200, 200])
with open("test_img.jpg", 'wb') as img_out:
img_out.write(raw_img)
pixmap = QPixmap("test_img.jpg")
self.MainUi.artLabelAlbum.setPixmap(pixmap)
self.cleanImages()
else:
self.MainUi.artLabelAlbum.clear()
except Exception as e:
print(e)
def addFileAlbumCMD(self):
filesPath = QFileDialog.getOpenFileNames(filter=self.FILE_FILTERS, directory=self.defaultMusicPath)[0]
for _file in filesPath:
fileName = os.path.basename(_file)
rowPosition = self.MainUi.tableWidgetAlbum.rowCount()
self.MainUi.tableWidgetAlbum.insertRow(rowPosition)
self.MainUi.tableWidgetAlbum.setItem(rowPosition, 0, QTableWidgetItem(fileName))
self.MainUi.tableWidgetAlbum.setItem(rowPosition, 1, QTableWidgetItem(_file))
self.metadata_dictinary[fileName] = self.getMusicInfo(_file)
print(self.metadata_dictinary)
def removeFileCMD(self):
"""remove the current row with focus, if no focus, remove any row"""
if self.MainUi.tableWidgetAlbum.currentIndex().row() != None:
try:
row_data = self.MainUi.tableWidgetAlbum.currentItem().text()
music_name = os.path.basename(row_data)
del self.metadata_dictinary[music_name]
print(self.metadata_dictinary)
except Exception as e:
print("Remove ERROR ", e)
item_row = self.MainUi.tableWidgetAlbum.currentIndex().row()
self.MainUi.tableWidgetAlbum.removeRow(item_row)
else:
print("Nothing to remove")
def removeAllCMD(self):
row_count = self.MainUi.tableWidgetAlbum.rowCount()
print("row count is ", row_count)
if row_count != 0:
for _ in range(0, (row_count+1)):
print("removing row ", self.MainUi.tableWidgetAlbum.rowCount()-1)
self.MainUi.tableWidgetAlbum.removeRow(self.MainUi.tableWidgetAlbum.rowCount()-1)
self.metadata_dictinary = {}
self.refreshFields()
else:
print("Nothing to remove")
# self.MainUi.tableWidgetAlbum.clear()
def destinationAlbumCMD(self):
destPath = QFileDialog.getExistingDirectory(self, directory=self.defaultMusicPath)
if destPath != "":
self.MainUi.destntryAlbum.setText(destPath)
print(destPath)
else:
print("Invalid Directory!")
def findArtAlbumCMD(self):
fullpPath = QFileDialog.getOpenFileName(filter=self.IMAGE_FILTERS, directory=self.defaultPicturesPath)[0]
if fullpPath != "":
pixmap = QPixmap(fullpPath)
pixmap = pixmap.scaled(200, 200)
self.MainUi.artLabelAlbum.setPixmap(pixmap)
self.MainUi.artLabelAlbum.setToolTip(fullpPath)
self.artwork = fullpPath
else:
print("Invalid File Path!")
def saveAlbumCMD(self):
"""similar to save all but moves the edited music files to <DESTINATION> Hence creating an album with <ALBUM NAME>"""
# verify that the created path exists else, exit
if not os.path.exists(self.MainUi.destntryAlbum.text()):
print("Enter valid Storage Path!")
return
album_destination_path = self.MainUi.destntryAlbum.text()
print(f"Destination set to {album_destination_path}")
# ask for the album name
album_name, resp = QInputDialog.getText(self, "Whats The Album Name?", "Album Name: ")
# Verify that the user agreed to the name.
if not resp:
print("Fatal! You exited without chosing an Album Name.")
return
elif album_name == "":
print("Fatal! You havent entered a valid name.")
return
elif len([x for x in re.compile(r"^[\w\-. ]+$").finditer(album_name)]) <= 0:
print("Fatal! You havent entered a valid name.")
return
print(f"Album Name set to >> {album_name.title()}")
# Verify that There are music files selected
if self.MainUi.tableWidgetAlbum.rowCount() <= 0:
print("Exiting! You havent selected any music files for the Album.")
return
# Create The Album
print("Creating the album...")
itemCount = self.MainUi.tableWidgetAlbum.rowCount()
musicPathList = list(set([self.MainUi.tableWidgetAlbum.item(row, 1).text() for row in range(0, itemCount)]))
musicNameList = list(set([os.path.basename(musicpath) for musicpath in musicPathList]))
edited_musicNames = list(set([self.MainUi.tableWidgetAlbum.item(row, 0).text() for row in range(0, itemCount)]))
edited_musicPaths = list(set([os.path.dirname(new_path) + os.path.sep + ".".join(new_name.split(".")[:-1]) + os.path.splitext(new_path)[-1] for new_path in musicPathList for new_name in edited_musicNames]))
edited_musicPaths_Album = list(set([album_destination_path + os.path.sep + album_name.title() + os.path.sep + ".".join(new_name.split(".")[:-1]) + os.path.splitext(new_path)[-1] for new_path in musicPathList for new_name in edited_musicNames]))
new_name = os.path.basename(edited_musicPaths_Album[-1])
album_path = os.path.dirname(edited_musicPaths_Album[-1])
# Save the new files to the album directory
print("All cnanges made to renamed file will be lost, try renaming the files and saving them then editing the metadata after")
self.metadata_dictinary = {}
for old, new in list(zip(musicPathList, edited_musicPaths_Album)):
print(f"Old >> {old}, New >> {new}")
# create the album directory if non existant
if not os.path.exists(os.path.dirname(new)):
print("making dest dirrs")
os.makedirs(os.path.dirname(new))
# copy files
print(f"copy file, old>>{old}, new>>{new}")
shutil.copy2(old, new)
# update table widget with new name and path
print("update table widget with new names and paths")
for _file in os.listdir(album_path):
fileName = os.path.basename(_file)
rowPosition = self.MainUi.tableWidgetAlbum.rowCount()
self.MainUi.tableWidgetAlbum.insertRow(rowPosition)
self.MainUi.tableWidgetAlbum.setItem(rowPosition, 0, QTableWidgetItem(fileName))
self.MainUi.tableWidgetAlbum.setItem(rowPosition, 1, QTableWidgetItem(_file))
print("Done refresh table widget Ops.")
## refreshing the operation lists
itemCount = self.MainUi.tableWidgetAlbum.rowCount()
musicPathList = list(set([self.MainUi.tableWidgetAlbum.item(row, 1).text() for row in range(0, itemCount)]))
musicNameList = list(set([os.path.basename(musicpath) for musicpath in musicPathList]))
edited_musicNames = list(set([self.MainUi.tableWidgetAlbum.item(row, 0).text() for row in range(0, itemCount)]))
edited_musicPaths = list(set([os.path.dirname(new_path) + os.path.sep + ".".join(new_name.split(".")[:-1]) + os.path.splitext(new_path)[-1] for new_path in musicPathList for new_name in edited_musicNames]))
edited_musicPaths_Album = list(set([album_destination_path + os.path.sep + album_name.title() + os.path.sep + ".".join(new_name.split(".")[:-1]) + os.path.splitext(new_path)[-1] for new_path in musicPathList for new_name in edited_musicNames]))
## refresh changed filename meta objects
print("creating new meta objects and meta dict")
for new in musicPathList:
old_name = os.path.basename(old)
new_name = os.path.basename(new)
self.metadata_dictinary[new_name] = self.getMusicInfo(new)
print("Added Meta divt ", self.metadata_dictinary)
## Save the Edited metadata To disk
print("Done Refreshing, Now Saving the edited metadatas")
for pth, metaObj in self.metadata_dictinary.items():
metaObj.save()
pass
print("Done Saving metadata\nDone All Ops.")
def saveAllAlbumCMD(self):
"""save all the edited metadata and file names to disk without creating any new folders"""
itemCount = self.MainUi.tableWidgetAlbum.rowCount()
musicPathList = [self.MainUi.tableWidgetAlbum.item(row, 1).text() for row in range(0, itemCount)]
musicNameList = [os.path.basename(musicpath) for musicpath in musicPathList]
edited_musicNames = [self.MainUi.tableWidgetAlbum.item(row, 0).text() for row in range(0, itemCount)]
edited_musicPaths = [os.path.dirname(new_path)+os.path.sep+".".join(new_name.split(".")[:-1])+os.path.splitext(new_path)[-1] for new_path in musicPathList for new_name in edited_musicNames]
## Save Cahnged File names to disk and change the list values with the current ones
if not musicNameList == edited_musicNames:
print("A Name changed")
QMessageBox.question()
msg = QMessageBox.question("QUESTION",
"I seams you have renamed some file names, do you want to rename the file?",
" More Info title", "More Info Data",
QMessageBox.Yes | QMessageBox.No)
if msg == QMessageBox.Yes:
print("yesss")
print("\aAll cnanges made to renamed file will be lost, try renaming the files and saving them then editing the metadata after")
for old, new in list(zip(musicPathList, edited_musicPaths)):
if (old != new):
print(f"Old >> {old}, New >> {new}")
new_name = os.path.basename(new)
# rename file
print("rename file")
os.rename(old, new)
# update lists with new file
print("update lists with new file")
itemCount = self.MainUi.tableWidgetAlbum.rowCount()
musicPathList = [self.MainUi.tableWidgetAlbum.item(row, 1).text() for row in range(0, itemCount)]
edited_musicNames = [self.MainUi.tableWidgetAlbum.item(row, 0).text() for row in range(0, itemCount)]
edited_musicPaths = [os.path.dirname(new_path)+os.path.sep+".".join(new_name.split(".")[:-1])+os.path.splitext(new_path)[-1] for new_path in musicPathList for new_name in edited_musicNames]
# update table widget with new name and path
print("update table widget with new names and paths")
self.removeFileCMD()
rowPosition = self.MainUi.tableWidgetAlbum.rowCount()
self.MainUi.tableWidgetAlbum.insertRow(rowPosition)
self.MainUi.tableWidgetAlbum.setItem(rowPosition, 0, QTableWidgetItem(new_name))
self.MainUi.tableWidgetAlbum.setItem(rowPosition, 1, QTableWidgetItem(new))
print("Done Rename Ops.")
else:
print("noooooooo")
## refresh changed filename meta objects
print("Now Refreshing metadata dict with new re-named file objects")
for old, new in list(zip(musicPathList, edited_musicPaths)):
if old != new:
old_name = os.path.basename(old)
new_name = os.path.basename(new)
print(f"Removing old>{old_name} and adding new>{new_name} to the meta dict")
old_meta = self.metadata_dictinary[old_name]
self.metadata_dictinary[new_name] = self.metadata_dictinary.pop(old_name)
self.metadata_dictinary[new_name] = self.getMusicInfo(new)
print("Edited Meta divt ", self.metadata_dictinary)
## Save the Edited metadata To disk
print("Done Refreshing, Now Saving the edited metadatas")
for pth, metaObj in self.metadata_dictinary.items():
metaObj.save()
pass
print("Done Saving metadata\nDone All Ops.")
def saveSingleCMD(self):
print("\n Setting Paths")
music_path = self.MainUi.filePathEntrySingle.text()
music_meta = self.getMusicInfo(music_path)
self.getCurrentVarData()
self.writeMusicInfo(music_meta)
music_meta.save()
self.populateFields(music_meta)
print("done all ops\n")
def updateDataAlbumCMD(self):
""" Update the data fields when scrolling throung the music table list. Display the data of the
curently highlighted row(music)"""
if self.MainUi.tableWidgetAlbum.currentItem() != None:
current_row = self.MainUi.tableWidgetAlbum.currentRow()
music_path = self.MainUi.tableWidgetAlbum.item(current_row, 1).text()
music_name = os.path.basename(music_path)
print(f"change>> File Name == {music_name}, path == {music_path}")
# meta_obj = self.getMusicInfo(music_path)
meta_obj = self.metadata_dictinary[music_name]
self.populateFields(meta_obj)
def refreshFields(self):
"""Reset all fields to empty remove any text and images"""
if self.MainUi.tabWidget.currentIndex() == 0:
self.MainUi.titleEditSingle.setText("")
self.MainUi.albumEditSingle.setText("")
self.MainUi.artistEditSingle.setText("")
self.MainUi.contributorsEditSingle.setText("")
self.MainUi.GenreEditSingle.setText("")
self.MainUi.composerEditSingle.setText("")
self.MainUi.compilationEditSingle.setText("")
self.MainUi.commentTextSingle.setText("")
self.MainUi.yearSpinSingle.setValue(0)
self.MainUi.trackNumSpinSingle.setValue(0)
self.MainUi.totalTracksSpinSingle.setValue(0)
self.MainUi.diskNumSpinSingle.setValue(0)
self.MainUi.totalDiskSpinSingle.setValue(0)
self.MainUi.artLabelSingle.clear()
elif self.MainUi.tabWidget.currentIndex() == 1:
self.MainUi.titleEditAlbum.setText("")
self.MainUi.albumEntryAlbum.setText("")
self.MainUi.artistEntryAlbum.setText("")
self.MainUi.contributorsEntryAlbum.setText("")
self.MainUi.GenreEntryAlbum.setText("")
self.MainUi.composerEntryAlbum.setText("")
self.MainUi.compilationEntryAlbum.setText("")
self.MainUi.commentTextAlbum.setText("")
self.MainUi.yearSpinAlbum.setValue(0)
self.MainUi.trackNumSpinAlbum.setValue(0)
self.MainUi.totalTracksAlbum.setValue(0)
self.MainUi.diskNumSpinAlbum.setValue(0)
self.MainUi.totalDiskSpinAlbum.setValue(0)
self.MainUi.artLabelAlbum.clear()
def getCurrentVarData(self, _from="single"):
"""update the global variables with the curently set variable data ie album name from the album entry"""
if _from == "single":
self.tracktitle = self.MainUi.titleEditSingle.text()
self.album = self.MainUi.albumEditSingle.text()
self.albumartist = self.MainUi.artistEditSingle.text()
self.artist = self.MainUi.contributorsEditSingle.text()
self.genre = self.MainUi.GenreEditSingle.text()
self.composer = self.MainUi.composerEditSingle.text()
self.compilation = self.MainUi.compilationEditSingle.text()
self.comment = self.MainUi.commentTextSingle.toPlainText()
self.year = self.MainUi.yearSpinSingle.value()
self.tracknumber = self.MainUi.trackNumSpinSingle.value()
self.totaltracks = self.MainUi.totalTracksSpinSingle.value()
self.discnumber = self.MainUi.diskNumSpinSingle.value()
self.totaldiscs = self.MainUi.totalDiskSpinSingle.value()
elif _from == "album":
self.tracktitle = self.MainUi.titleEditAlbum.text()
self.album = self.MainUi.albumEntryAlbum.text()
self.albumartist = self.MainUi.artistEntryAlbum.text()
self.artist = self.MainUi.contributorsEntryAlbum.text()
self.genre = self.MainUi.GenreEntryAlbum.text()
self.composer = self.MainUi.composerEntryAlbum.text()
self.compilation = self.MainUi.compilationEntryAlbum.text()
self.comment = self.MainUi.commentTextAlbum.toPlainText()
self.year = self.MainUi.yearSpinAlbum.value()
self.tracknumber = self.MainUi.trackNumSpinAlbum.value()
self.totaltracks = self.MainUi.totalTracksAlbum.value()
self.discnumber = self.MainUi.diskNumSpinAlbum.value()
self.totaldiscs = self.MainUi.totalDiskSpinAlbum.value()
def cleanImages(self):
"""Remove the temporarily created art image"""
os.remove("test_img.jpg")
def menuFileCMD(self, q):
selection = q.text()
print("You have selected .. ", selection)
if selection == "Download File":
download_obj = DownloadWidget(self)
download_obj.show_dialog()
elif selection == "Update Downloader":
if (sys.platform == "win32") or (sys.platform == "cygwin"):
th1 = threading.Thread(target=lambda: os.system(f"{self.youtube_path} -U && pause"))
th1.daemon
th1.start()
elif sys.platform == "linux":
th1 = threading.Thread(target=lambda: os.system(f"youtube-dl -U && pause"))
th1.daemon
th1.start()
elif selection == "Check Out A Real Downloader":
"""Open my youtube downloader github page"""
webbrowser.open_new_tab("https://github.com/ayieko168/New-YouTube-Downloader")
elif selection == "Initialize Downloader Files":
## Ask if you really want to initialize it
## Extract the file
shutil.unpack_archive("utils/yt_dl-exes.tar.xz", "utils/yt_dl-exes")
print("Done unpacking")
elif selection == "Reset everything":
msg = QMessageBox.question(self, "QUESTION", "ARE YOU SURE YOU WANT TO RESET EVERY THING?", QMessageBox.Yes | QMessageBox.No)
if msg == 16384:
## Remove The yt-exes file
try: shutil.rmtree("utils/yt_dl-exes")
except: pass
## Reset the logging file
fo = open("logging.md", "w")
fo.write("")
fo.close()
elif msg == 65536:
pass
if __name__ == "__main__":
w = QApplication([])
app = Application()
app.show()
w.exec_() |
video_demo.py | # -*- coding: utf-8 -*-
"""
This Module is ppe
Example:
$python video_demo.py
Author: Ming'en Zheng
"""
import os
import time
from multiprocessing import Process, Queue, Value
import queue
import numpy as np
import tensorflow as tf
import cv2
import argparse
import requests
from distutils.version import StrictVersion
import visualization_utils as vis_utils
import config
import base64
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
if StrictVersion(tf.__version__) < StrictVersion('1.12.0'):
raise ImportError('Please upgrade your TensorFlow installation to v1.12.*')
def load_model(inference_model_path):
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(inference_model_path, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return detection_graph
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)
def run_inference_for_single_image(image, sess, tensor_dict):
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
output_dict = sess.run(tensor_dict, feed_dict={image_tensor: image})
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict['detection_classes'][0].astype(np.int64)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
return output_dict
def is_wearing_hardhat(person_box, hardhat_box, intersection_ratio):
xA = max(person_box[0], hardhat_box[0])
yA = max(person_box[1], hardhat_box[1])
xB = min(person_box[2], hardhat_box[2])
yB = min(person_box[3], hardhat_box[3])
interArea = max(0, xB - xA ) * max(0, yB - yA )
hardhat_size = (hardhat_box[2] - hardhat_box[0]) * (hardhat_box[3] - hardhat_box[1])
if interArea / hardhat_size > intersection_ratio:
return True
else:
return False
def is_wearing_vest(person_box, vest_box, vest_intersection_ratio):
xA = max(person_box[0], vest_box[0])
yA = max(person_box[1], vest_box[1])
xB = min(person_box[2], vest_box[2])
yB = min(person_box[3], vest_box[3])
interArea = max(0, xB - xA) * max(0, yB - yA)
vest_size = (vest_box[2] - vest_box[0]) * (vest_box[3] - vest_box[1])
if interArea / vest_size > vest_intersection_ratio:
return True
else:
return False
def is_wearing_hardhat_vest(hardhat_boxes, vest_boxes, person_box):
hardhat_flag = False
vest_flag = False
hardhat_intersection_ratio = 0.6
vest_intersection_ratio = 0.6
for hardhat_box in hardhat_boxes:
hardhat_flag = is_wearing_hardhat(person_box, hardhat_box, hardhat_intersection_ratio)
if hardhat_flag:
break
for vest_box in vest_boxes:
vest_flag = is_wearing_vest(person_box, vest_box, vest_intersection_ratio)
if vest_flag:
break
return hardhat_flag, vest_flag
def post_message_process(run_flag, message_queue):
while run_flag.value:
try:
camera_id, output_dict, image, min_score_thresh = message_queue.get(block=True, timeout=5)
post_message(camera_id, output_dict, image, min_score_thresh)
except queue.Empty:
continue
def post_message(camera_id, output_dict, image, min_score_thresh):
message = dict()
message["timestamp"] = int(time.time() * 1000)
message["cameraId"] = camera_id
image_info = {}
image_info["height"] = image.shape[0]
image_info["width"] = image.shape[1]
image_info["format"] = "jpeg"
success, encoded_image = cv2.imencode('.jpg', image)
content = encoded_image.tobytes()
image_info["raw"] = base64.b64encode(content).decode('utf-8')
message["image"] = image_info
detection_scores = np.where(output_dict["detection_scores"] > min_score_thresh, True, False)
detection_boxes = output_dict["detection_boxes"][detection_scores]
detection_classes = output_dict["detection_classes"][detection_scores]
hardhat_boxes = detection_boxes[np.where(detection_classes == 1)]
vest_boxes = detection_boxes[np.where(detection_classes == 2)]
person_boxes = detection_boxes[np.where(detection_classes == 3)]
persons = []
for person_box in person_boxes:
person = dict()
person["hardhat"], person["vest"] = is_wearing_hardhat_vest(hardhat_boxes, vest_boxes, person_box)
persons.append(person)
message["persons"] = persons
if len(persons) == 0:
return False
print(message["persons"])
try:
headers = {'Content-type': 'application/json'}
if len(persons):
result = requests.post(config.detection_api, json=message, headers=headers)
print(result)
return True
except requests.exceptions.ConnectionError:
print("Connect to backend failed")
return False
def image_processing(graph, category_index, image_file_name, show_video_window):
img = cv2.imread(image_file_name)
image_expanded = np.expand_dims(img, axis=0)
with graph.as_default():
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
tensor_name)
with tf.Session() as sess:
output_dict = run_inference_for_single_image(image_expanded, sess, tensor_dict)
vis_utils.visualize_boxes_and_labels_on_image_array(
img,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=4)
if show_video_window:
cv2.imshow('ppe', img)
cv2.waitKey(5000)
def video_processing(graph, category_index, video_file_name, show_video_window, camera_id, run_flag, message_queue):
cap = cv2.VideoCapture(video_file_name)
if show_video_window:
cv2.namedWindow('ppe', cv2.WINDOW_NORMAL)
if config.display_full_screen:
cv2.setWindowProperty('ppe', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
else:
cv2.setWindowProperty('ppe', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_NORMAL)
if (config.capture_image_width, config.capture_image_height) in config.supported_video_resolution:
print("video_processing:", "supported video resoulution")
cap.set(cv2.CAP_PROP_FRAME_WIDTH, config.capture_image_width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, config.capture_image_height)
with graph.as_default():
print("video_processing:", "default tensorflow graph")
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
tensor_name)
with tf.Session() as sess:
print("video_processing:", "tensorflow session")
send_message_time = time.time()
frame_counter = 0
while True:
ret, frame = cap.read()
if config.input_type.lower() == "file":
frame_counter += 1
if frame_counter == int(cap.get(cv2.CAP_PROP_FRAME_COUNT)):
frame_counter = 0
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
continue
if frame is None:
print("video_processing:", "null frame")
break
image_expanded = np.expand_dims(frame, axis=0)
output_dict = run_inference_for_single_image(image_expanded, sess, tensor_dict)
vis_utils.visualize_boxes_and_labels_on_image_array(
frame,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=4)
if time.time() - send_message_time > config.message_send_interval / 1000.0:
resized_frame = cv2.resize(frame, dsize=(config.storage_image_width, config.storage_image_height))
try:
message_queue.put_nowait((camera_id, output_dict, resized_frame, config.object_confidence_threshold))
except queue.Full:
print("message queue is full")
else:
send_message_time = time.time()
if show_video_window:
resized_frame = cv2.resize(frame, dsize=(config.display_window_width, config.display_window_height))
cv2.imshow('ppe', resized_frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
run_flag.value = 0
break
print("video_processing:", "releasing video capture")
cap.release()
cv2.destroyAllWindows()
def main():
parser = argparse.ArgumentParser(description="Hardhat and Vest Detection", add_help=True)
parser.add_argument("--model_dir", type=str, required=True, help="path to model directory")
parser.add_argument("--video_file_name", type=str, required=True, help="path to video file, or camera device, i.e /dev/video1")
parser.add_argument("--show_video_window", type=int, required=True, help="the flag for showing the video window, 0 is not dispaly, 1 display")
parser.add_argument("--camera_id", type=str, required=True, help="camera identifier")
args = parser.parse_args()
frozen_model_path = os.path.join(args.model_dir, "frozen_inference_graph.pb")
if not os.path.exists(frozen_model_path):
print("frozen_inference_graph.db file is not exist in model directory")
exit(-1)
print("loading model")
graph = load_model(frozen_model_path)
category_index = {1: {'id': 1 , 'name': 'hardhat'},
2: {'id': 2, 'name': 'vest'},
3: {'id': 3, 'name': 'person'}}
print("start message queue")
run_flag = Value('i', 1)
message_queue = Queue(1)
p = Process(target=post_message_process, args=(run_flag, message_queue))
p.start()
print("video processing")
video_processing(graph, category_index, args.video_file_name, args.show_video_window, args.camera_id, run_flag, message_queue)
p.join()
#image_processing(graph, category_index, './examples/002.jpg', True)
if __name__ == '__main__':
main()
|
pid.py | '''
Created on 11/04/2015
@author: david
'''
import logging
from threading import Thread
import time
class PID(object):
'''
Proportional Integrative Derivative stabilizer
'''
#Period range to be considered as correct loop rate
PERIOD_RANGE_MARGIN = 0.1
def __init__(self, period, kpMatrix, kiMatrix, kdMatrix, readInputDelegate, setOutputDelegate, pidName = ""):
'''
Constructor
'''
self._pidName = pidName
length = len(kpMatrix)
self._targets = [0.0] * length
#self._previousErrors = [0.0] * length #Not used currently since not using derivative component
self._integrals = [0.0] * length
self._lastErrors = [0.0] * length
self._period = period
self._minPeriod = period * (1.0 - PID.PERIOD_RANGE_MARGIN)
self._maxPeriod = period * (1.0 + PID.PERIOD_RANGE_MARGIN)
self._periodTarget = (self._minPeriod + self._period) / 2.0
self._lastTime = time.time()
self._currentPeriod = period
self._kp = kpMatrix
self._ki = kiMatrix
self._kd = kdMatrix
self._readInput = readInputDelegate
self._setOutput = setOutputDelegate
self._isRunning = False
self._thread = None
self._length = length
# self._integralsEnabled = True
self._deltaTimeSum = 0.0
self._iterationCount = 0
def _calculate(self):
outputArray = [0.0]*self._length
currentValues = self._readInput()
currentTime = time.time()
dt = currentTime - self._lastTime
for i in range(self._length):
error = self._targets[i] - currentValues[i]
#if self._integralsEnabled:
self._integrals[i] += error * dt
# else:
# self._integrals[i] = 0.0
#
result = \
self._kp[i] * error \
+ self._ki[i] * self._integrals[i] \
+ (self._kd[i] * (error - self._lastErrors[i]) / dt)
self._lastErrors[i] = error
outputArray[i] = result
self._lastTime = currentTime
self._setOutput(outputArray)
self._currentPeriod = dt
self._deltaTimeSum += dt
self._iterationCount += 1
def setTarget(self, target, index):
self._targets[index] = target
def setTargets(self, targets):
self._targets = targets
def getTarget(self, index):
return self._targets[index]
def getTargets(self):
return self._targets
def getCurrentPeriod(self):
return self._currentPeriod
def _do(self):
#dtSum = 0.0
iterCount = 0
underFreq = 0
overFreq = 0
rightFreq = 0
acceptableFreq = 0
diff = 0.0
minFreq = 1.0/self._maxPeriod
message = "Minimal freq. is {0:.3f}Hz.".format(minFreq)
print(message)
logging.info(message)
self._lastTime = time.time()
time.sleep(self._period)
while self._isRunning:
t0 = time.time()
self._calculate()
calculationTime = time.time() - t0
#dtSum += calculationTime
iterCount += 1
if self._currentPeriod < self._minPeriod:
overFreq += 1
elif self._currentPeriod >= self._minPeriod and self._currentPeriod <= self._period:
rightFreq += 1
elif self._currentPeriod > self._period and self._currentPeriod <= self._maxPeriod:
acceptableFreq += 1
else:
underFreq += 1
diff += self._periodTarget - self._currentPeriod
sleepTime = self._period - calculationTime + 0.1 * diff
if sleepTime > 0.0:
time.sleep(sleepTime)
else:
time.sleep(0.001)
'''
if dtSum != 0.0 and iterCount != 0:
tAvg = dtSum * 1000.0 / iterCount
fAvg = float(iterCount) / dtSum
else:
tAvg = 0.0
fAvg = float("inf")
message = "PID-\"{0}\" (net values) t: {1:.3f}ms; f: {2:.3f}Hz".format(self._pidName, tAvg, fAvg)
logging.info(message)
print(message)
'''
underFreqPerc = underFreq * 100.0 / iterCount
overFreqPerc = overFreq * 100.0 / iterCount
rightFreqPerc = rightFreq * 100.0 / iterCount
acceptableFreqPerc = acceptableFreq * 100.0 / iterCount
message = "In freq: {0:.3f}%; Acceptable: {1:.3f}%; Under f.: {2:.3f}%; Over f.: {3:.3f}%"\
.format(rightFreqPerc, acceptableFreqPerc, underFreqPerc, overFreqPerc)
logging.info(message)
print(message)
def start(self):
if self._thread == None or not self._thread.isAlive():
logging.info("Starting PID-\"{0}\"".format(self._pidName))
self._deltaTimeSum = 0.0
self._iterationCount = 0
#Reset PID variables
length = len(self._kp)
self._integrals = [0.0] * length
self._lastErrors = [0.0] * length
self._isRunning = True
self._thread = Thread(target=self._do)
self._thread.start()
def stop(self):
self._isRunning = False
if self._thread != None and self._thread.isAlive():
self._thread.join()
if self._iterationCount != 0 and self._deltaTimeSum:
averageDeltaTime = self._deltaTimeSum * 1000.0/ self._iterationCount
averageFrequency = self._iterationCount / self._deltaTimeSum
else:
averageDeltaTime = 0.0
averageFrequency = float("inf")
message = "PID-\"{0}\" - Avg. time: {1:.3f}ms - Avg. freq: {2:.3f}Hz".format(self._pidName, averageDeltaTime, averageFrequency)
print(message)
logging.info(message)
def isRunning(self):
return self._isRunning
# def disableIntegrals(self):
#
# self._integralsEnabled = False
#
#
# def enableIntegrals(self):
#
# self._integralsEnabled = True
|
lrf_node.py | #!/usr/bin/env python
"""
Controls and publishes readings from the laser range finder.
"""
from __future__ import print_function
import os
import time
import sys
from math import pi
from threading import Thread
import io
import yaml
from PIL import Image as PilImage
import cv2
# from scipy.signal import medfilt2d
import rospy
from sensor_msgs.msg import CompressedImage, Image, LaserScan
from std_srvs.srv import Empty as EmptySrv, EmptyResponse
from cv_bridge import CvBridge
import numpy as np
from rpi_gpio.srv import DigitalWrite
from rpi_gpio import msg as msgs
from ros_homebot_msgs.srv import *
from ros_homebot_msgs import msg as msgs
from ros_homebot_lrf.lrf import LaserRangeFinder, calibrate
from ros_homebot_lrf.utils import compress_list
from ros_homebot_lrf import constants as c
from ros_homebot_python.utils import fail, success
CONFIG_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../config')
# DEFAULT_LOG_LEVEL = rospy.DEBUG
DEFAULT_LOG_LEVEL = rospy.ERROR
# DEFAULT_LOG_LEVEL = rospy.FATAL
class CalibrationManager(object):
"""
Walks the user through manually identify calibration markers.
"""
def __init__(self, node, calibration_fn):
self.node = node
self.calibration_data = yaml.load(open(calibration_fn))
print('calibration_data:', self.calibration_data)
self.distances = self.calibration_data['distances'] # {pixel column: distance in mm}
assert len(self.distances) >= 3, \
'At least three or more calibration distances are needed. Only %i found in %s.' \
% (len(self.distances), calibration_fn)
self.pending_distances = sorted(self.distances.values())
self.pixel_readings = None
self.markers = {} # {pixel column: distance}
self.current_distance = None
self._last_distance = None
def on_mouse(self, event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
if self.pixel_readings[x] == -1:
print('No laser was detected in this column. Please select again.')
else:
self.markers[x] = self.current_distance
self.current_distance = None
def announce_next_marker(self):
if self.current_distance:
pass
elif self.pending_distances:
self.current_distance = self.pending_distances.pop(0)
print('Please click on the center of the marker for distance %i.' \
% self.current_distance)
def run(self):
print('Collecting readings...')
self.node.turn_laser_off()
off_img = self.node.get_image_pil()
self.node.turn_laser_on()
on_img = self.node.get_image_pil()
self.dist_readings, self.pixel_readings = self.node._lrf.get_distance(
off_img,
on_img,
save_images_dir=os.path.expanduser('~'),#TODO
as_pfc=True,
)
missing_count = sum(1 for _ in self.pixel_readings if _ < 0)
missing_ratio = missing_count/float(len(self.pixel_readings))
missing_percent = missing_ratio*100
print('readings:', self.pixel_readings)
print('missing_ratio:', missing_ratio)
if missing_ratio > .2:
print(('\nWarning! %.0f%% columns contain no laser readings. '
'This may hinder manual classification. '
'If you have problems locating markers, abort, remove any obstacles '
'or bright lights that may interfere with measurements, and then re-run.'
) % missing_percent)
print((
'\nYou will now be asked to identify where %i distance markers are located. '
'Press <enter> to begin.') % len(self.distances))
raw_input()
# Collect manual marker positions.
img = self.node.get_image_cv2()
height, width, channels = img.shape
cv2.namedWindow('image')
cv2.setMouseCallback('image', self.on_mouse)
self.announce_next_marker()
while self.pending_distances or self.current_distance is not None:
self.announce_next_marker()
cv2.imshow('image', img)
k = cv2.waitKey(20) & 0xFF
if k == 27:
print('Escape key pressed. Aborting.')
return
cv2.destroyAllWindows()
# Save updated calibration file.
calibration_fn = os.path.join(CONFIG_DIR, 'calibration_%i.yaml' % width)
write_to_file = False
if raw_input('\nWrite calibration to %s? ' % calibration_fn).startswith('y'):
fout = open(calibration_fn, 'w')
write_to_file = True
else:
fout = sys.stdout
print('readings:', self.pixel_readings, file=fout)
print('distances:', file=fout)
for k in sorted(self.markers):
print(' %i: %i' % (k, self.markers[k]), file=fout)
print('h: %s' % self.calibration_data['h'], file=fout)
print('image_width: %i' % width, file=fout)
print('image_height: %i' % height, file=fout)
print('laser_position: %s' % self.calibration_data['laser_position'], file=fout)
if write_to_file:
print('\nCalibration file written to %s.' % calibration_fn)
class NoiseFilter(object):
def __init__(self, name='medfilt', func_kwargs=None, history_size=3):
from scipy import signal
self.func = getattr(signal, name)
self.func_kwargs = dict(kernel_size=3)
self.history_size = history_size
self.history_buckets = None
def add(self, distances):
# Initialize buckets.
if self.history_buckets is None:
self.history_buckets = [[] for _ in distances]
for i, v in enumerate(distances):
# Ignore missing values.
if v < 0 and self.history_buckets[i]:
continue
# Add value to history.
self.history_buckets[i].append(v)
# Forget old values.
if len(self.history_buckets[i]) > self.history_size:
self.history_buckets[i].pop(0)
def get(self):
lst = [
np.median(bucket)
for bucket in self.history_buckets
]
lst = self.func(lst, **self.func_kwargs)
return lst
class LRF():
def __init__(self):
rospy.init_node('homebot_lrf', log_level=DEFAULT_LOG_LEVEL)
# Cleanup when termniating the node
rospy.on_shutdown(self.shutdown)
# Overall loop rate: should be faster than fastest sensor rate
self.rate = int(rospy.get_param("~rate", 50))
r = rospy.Rate(self.rate)
self._started = False
self._state = c.OFF
self._state_change_ts = None
self._image_without_laser = None
self._image_with_laser = None
self.noise_filter = NoiseFilter()
# The time when we started measuring distances.
self._t0 = None
ro = float(rospy.get_param('~ro', -0.0563705005565))
rpc = float(rospy.get_param('~rpc', 0.00298408515511))
h = float(rospy.get_param('~h', 22.5))
laser_position = rospy.get_param('~laser_position', 'bottom')
self.show_line_image = int(rospy.get_param('~show_line_image', 1))
self.show_straightening = int(rospy.get_param("~straightening", 0))
calibration_fn = rospy.get_param(
"~calibration", os.path.join(CONFIG_DIR, 'calibration.yaml'))
if not os.path.isfile(calibration_fn):
tmp_fn = os.path.join(CONFIG_DIR, calibration_fn)
if os.path.isfile(tmp_fn):
calibration_fn = tmp_fn
if self.show_straightening:
# When testing the line laser level, use the non-calibrated LRF.
print('rpc, ro, h, laser_position:', rpc, ro, h, laser_position)
self._lrf = LaserRangeFinder(
ro=ro,
rpc=rpc,
h=h,
laser_position=laser_position,
track_progress_images=self.show_line_image,
filter_outliers=int(rospy.get_param('~filter_outliers', 1)),
)
else:
# Otherwise, load full calibration.
if os.path.isfile(calibration_fn):
print('Loading calibration file...')
rpc, ro, h, laser_position = calibrate(calibration_fn)
print('rpc, ro, h, laser_position:', rpc, ro, h, laser_position)
self._lrf = LaserRangeFinder(
ro=ro,
rpc=rpc,
h=h,
laser_position=laser_position,
track_progress_images=self.show_line_image,
filter_outliers=int(rospy.get_param('~filter_outliers', 1)),
)
self.verbose = int(rospy.get_param("~verbose", 0))
self.laser_pin = int(rospy.get_param("~laser_pin", c.LASER_PIN))
self.distances_pub = rospy.Publisher('~scan', LaserScan, queue_size=1)
self.line_image_pub = rospy.Publisher('~line/image', Image, queue_size=1)
# This republishes images which don't have the laser line in them.
self.image_off_pub = rospy.Publisher('~image/off', CompressedImage, queue_size=1)
# This republishes images which do have the laser line in them.
self.image_on_pub = rospy.Publisher('~image/on', CompressedImage, queue_size=1)
self.line_columns_pub = rospy.Publisher(
'~line/columns',
msgs.LaserLineColumns,
queue_size=1)
self.rpi_gpio_set = rospy.ServiceProxy('/rpi_gpio/set_pin', DigitalWrite)
self.camera_topic = rospy.get_param('~topic', '/raspicam/image')
self.bridge = CvBridge()
rospy.Service('~start', EmptySrv, self.start)
rospy.Service('~stop', EmptySrv, self.stop)
rospy.Service('~capture', EmptySrv, self.capture)
self.show_marker = int(rospy.get_param("~marker", 0))
markers = [] # [column]
if self.show_marker:
manager = CalibrationManager(self, calibration_fn)
manager.run()
return
if int(rospy.get_param("~start", 0)):
self.start()
# Start polling the sensors and base controller
while not rospy.is_shutdown():
# Publish all sensor values on a single topic for convenience
now = rospy.Time.now()
#TODO:laser off, capture image, laser on, capture image, produce distance
r.sleep()
def log(self, *msg):
if self.verbose:
print(' '.join(map(str, msg)))
def turn_laser_on(self):
self.rpi_gpio_set(self.laser_pin, 1)
def turn_laser_off(self):
self.rpi_gpio_set(self.laser_pin, 0)
# def on_gpio_pin_change(self, msg):
# print('GPIO pin change:', msg
def start(self, msg=None):
"""
Launches a thread that infinitely publishes distance measurements.
"""
if self._started:
return
self._started = True
self._processing_thread = Thread(target=self.process)
self._processing_thread.daemon = True
self._processing_thread.start()
return EmptyResponse()
def stop(self, msg=None):
"""
Stops the thread publishing distance measurements.
"""
if not self._started:
return
self._started = False
self._image_without_laser = None
self._image_with_laser = None
return EmptyResponse()
def capture(self, msg=None):
"""
Essentially a blocking version of start(), except only runs one iteration, then exits.
"""
self._started = True
self.process(iterations=1)
def normalize_image_cv2(self, msg):
if isinstance(msg, CompressedImage):
pil_image = self.normalize_compressed_image(msg)
cv_image = np.array(pil_image)
return cv_image
else:
cv_image = self.bridge.imgmsg_to_cv2(msg, "bgra8")
return cv2.cvtColor(cv_image, cv2.COLOR_BGRA2RGB)
def normalize_image_pil(self, msg):
if isinstance(msg, CompressedImage):
return self.normalize_compressed_image(msg)
else:
cv_image = self.normalize_image_cv2(msg)
return PilImage.fromarray(cv_image)
def normalize_compressed_image(self, msg):
return PilImage.open(io.BytesIO(bytearray(msg.data)))
def get_image(self):
if 'compressed' in self.camera_topic:
# print('waiting for compressed image from %s' % self.camera_topic)
return rospy.wait_for_message(self.camera_topic, CompressedImage)
else:
# print('waiting for raw image from %s' % self.camera_topic)
return rospy.wait_for_message(self.camera_topic, Image)
def get_image_pil(self):
return self.normalize_image_pil(self.get_image())
def get_image_cv2(self):
return self.normalize_image_cv2(self.get_image())
def process(self, iterations=0):
"""
Continually captures distance measurements and publishes the data
via standard ROS messages.
Parameters
----------
iterations : int
If positive, description the number of measurement loops to perform before exiting.
Otherwise, an infinite loop will be performed.
"""
max_straight_readings = 10
straight_readings = []
self.log('Processing thread started.')
count = 0
while self._started:
count += 1
t00 = time.time()
# Ensure laser starts off.
t0 = time.time()
self.turn_laser_off()
self.log('laser off:', time.time() - t0)
# Save camera image.
t0 = time.time()
off_img = self.get_image_pil()
#off_img.save(os.path.expanduser('~/off_img.jpeg'))#TODO
image_message = self.bridge.cv2_to_compressed_imgmsg(
np.array(off_img), dst_format='jpg')
self.image_off_pub.publish(image_message)
self.log('image capture:', time.time() - t0)
# Ensure laser is on.
t0 = time.time()
self.turn_laser_on()
self.log('laser on:', time.time() - t0)
# Save camera image.
on_img = self.get_image_pil()
# on_img.save(os.path.expanduser('~/on_img.jpeg'))#TODO
image_message = self.bridge.cv2_to_compressed_imgmsg(
np.array(on_img), dst_format='jpg')
self.image_on_pub.publish(image_message)
self.log('image capture:', time.time() - t0)
# Turn laser off again while we process.
self.turn_laser_off()
# Calculate distance
t0 = time.time()
distances, pfc = self._lrf.get_distance(
off_img,#self._image_without_laser,
on_img,#self._image_with_laser,
save_images_dir=os.path.expanduser('~'),#TODO
as_pfc=self.show_straightening,
)
assert len(distances) == len(pfc)
self.log('dist calc:', time.time() - t0)
# Filter distances to remove noise.
self.noise_filter.add(distances)
distances = self.noise_filter.get()
if self.show_straightening:
level_variance = np.var([_ for _ in distances if _ >= 0])
print(
'raw pixels:',
' '.join(str(int(v)) for v in compress_list(distances)),
'level variance (should be close to 0):', level_variance)
sys.stdout.flush()
straight_readings.append(level_variance)
if len(straight_readings) >= max_straight_readings:
avg_level_offset = sum(straight_readings)/float(len(straight_readings))
is_good = avg_level_offset <= c.MAX_LEVEL_VARIANCE
color_func = success if is_good else fail
print('average level variance:', color_func('%.02f' % avg_level_offset))
if is_good:
print('Laser is level. Good job!')
else:
print(
'This is not good. Ensure your line laser is level and that '
'it is projecting against a flat wall about 50 cm away.')
rospy.signal_shutdown('complete')
return
# else:
# # print('distances0:', distances)
# # print('distances2:', compress_list(distances))
# pass
# break
# Publish line image.
#http://stackoverflow.com/a/14140796/247542
if self.show_line_image:
t0 = time.time()
pil_image = self._lrf.out3.convert('RGB')
cv_image = np.array(pil_image)
#cv2.cvtColor(cv_image, cv2.COLOR_BGRA2RGB)
# cv_image = cv_image[:, :, ::-1].copy()
image_message = self.bridge.cv2_to_imgmsg(cv_image, encoding='bgr8')#outputs pipe?
self.line_image_pub.publish(image_message)
self.log('line pub:', time.time() - t0)
#
# #http://docs.ros.org/api/sensor_msgs/html/msg/LaserScan.html
#http://answers.ros.org/question/198843/need-explanation-on-sensor_msgslaserscanmsg/
width, height = on_img.size
detection_angle = self._lrf.horz_fov_deg
line_msg = msgs.LaserLineColumns()
line_msg.width = width
line_msg.height = height
line_msg.line = pfc
self.line_columns_pub.publish(line_msg)
msg = LaserScan()
msg.angle_max = (detection_angle/2.)*pi/180.
msg.angle_min = -msg.angle_max
msg.angle_increment = detection_angle*pi/180./width
#msg.time_increment = 0#time.time() - self._t0
#msg.scan_time = None
msg.range_min = -1 # 0mm
msg.range_max = 10000 # 10meters
msg.ranges = [_*1000 for _ in distances]
# msg.intensities = None
self.distances_pub.publish(msg)
tdd = time.time() - t00
self.log('full tdd:', tdd)
if iterations > 0 and count >= iterations:
break
self._started = False
self.log('Processing thread stopped.')
def shutdown(self):
rospy.loginfo("Shutting down the node...")
self.turn_laser_off()
if __name__ == '__main__':
LRF()
|
testCfsVersusVfs.py | #!/usr/bin/python3
import os
import re
import string
import sys
import subprocess
import time
import traceback
from queue import Queue
from queue import Empty
import random
from threading import Thread
macro_var_regex = re.compile("%(?P<var>[-0-9]+)%")
macro_rand_uint_regex = re.compile("%RANDUINT\((?P<length>[-0-9A-Z()]+)\)%")
macro_rand_string_regex = re.compile("%RANDSTRING\((?P<length>[-0-9A-Z()]+)\)%")
return_regex = re.compile("return +(?P<retval>[-0-9]+).*")
cfs_not_empty_regex = re.compile("readdir result -- ino:(?!1 )[0-9]+ name:.*")
def queue_output(output, q):
while True:
line = output.readline()
q.put(line.strip())
def get_random_string(length):
letters = string.ascii_letters
return ''.join(random.choice(letters) for i in range(length))
def get_random_uint(length_in_bits):
return random.randrange(0, pow(2, length_in_bits))
class ProcessManager:
def __init__(self, test_bin, test_output_directory, test_set, fsp_shm_id):
self.data = {
"instance": {},
"fdtable": {}, # {vfs: {line: return value}, cfs: {line: return value}}
"output_queue": {},
"output_worker_thread": {},
"line_no": 1
}
if "vfs" in test_set:
self.data["instance"]['vfs'] = subprocess.Popen([test_bin['vfs'], test_output_directory['vfs']],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
bufsize=1,
encoding="utf-8"
)
if "cfs" in test_set:
self.data["instance"]['cfs'] = subprocess.Popen([test_bin['cfs'], repr(fsp_shm_id)],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
bufsize=1,
encoding="utf-8"
)
for key in self.data["instance"].keys():
instance = self.data["instance"][key]
q = Queue()
t = Thread(target=queue_output, args=(instance.stdout, q))
t.daemon = True
t.start()
self.data["output_queue"][key] = q
self.data["output_worker_thread"][key] = t
time.sleep(0.1)
while True:
try:
line = q.get_nowait()
except Empty:
break
else:
print("%s -> %s" % (key, line))
self.data["fdtable"][key] = {}
self.data["fdtable"][key] = {}
def check_live(self):
# Verify that the clients are still alive
for key in self.data["instance"].keys():
instance = self.data["instance"][key]
ret = instance.poll()
if ret is not None:
print("%s has exited unexpectedly with status %i" % (key, ret))
return False
return True
def terminus(self):
for key in self.data["instance"].keys():
instance = self.data["instance"][key]
instance.stdin.close()
def run_command(self, command):
line_no = self.data["line_no"]
self.data["line_no"] = line_no + 1
# Prepare randomness
rand_uint = 0
rand_string = ""
print("====== Line %d ======" % line_no)
for key in self.data["instance"].keys():
instance = self.data["instance"][key]
output = self.data["output_queue"][key]
input = instance.stdin
# Tokenize and replace macro in input command
command_token = command.strip().split(" ")
for index in range(0, len(command_token)):
token = command_token[index]
# Check existence of variable
matched = macro_var_regex.match(token)
if matched:
# Regular expression lexer only returns string,
# so we require explicit casting here.
param = int(matched.groupdict()["var"])
command_token[index] = self.data["fdtable"][key][param]
continue
# Check if we need to generate a random string
matched = macro_rand_string_regex.match(token)
if matched:
param = int(matched.groupdict()["length"])
if not param:
print("Cannot generate a 0-character string")
sys.exit(1)
if not rand_string:
rand_string = get_random_string(param)
command_token[index] = rand_string
continue
# Check if we need to generate a random uint
matched = macro_rand_uint_regex.match(token)
if matched:
param = int(matched.groupdict()["length"])
if not rand_uint:
rand_uint = get_random_uint(param)
command_token[index] = repr(rand_uint)
real_command = " ".join(command_token)
print("%s <- %s" % (key, real_command.strip()))
print(real_command.strip(), file=input, flush=True)
time.sleep(0.1)
try:
while True:
line = output.get(timeout=0.4)
if not self.check_live():
sys.exit(1)
print("%s -> %s" % (key, line), flush=True)
matched = return_regex.match(line)
# Be aware that line_no here is an integer
if matched:
self.data["fdtable"][key][line_no] = matched.groupdict()["retval"]
except Empty:
pass
def sanity_check(test_bin, test_output_directory, test_set, fsp_shm):
# Check output directories
# If we are testing on CFS only, test_output_directory isn't really used
if "vfs" in test_set:
for key in test_set:
check_dir = test_output_directory[key]
if not os.path.isdir(check_dir):
print("%s is not a directory. Cannot continue." % check_dir)
return False
if os.listdir(check_dir):
print("%s is not empty. Cannot continue." % check_dir)
return False
# Check CFS content
if "cfs" in test_set:
cfs = subprocess.Popen([test_bin['cfs'], repr(fsp_shm)],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
bufsize=1,
encoding="utf-8"
)
(out, err) = cfs.communicate(input="lsdir .\n")
for line in out.split("\n"):
matched = cfs_not_empty_regex.match(line)
if matched:
print("CFS is not empty. Cannot continue.")
return False
return True
def usage_and_exit(e):
print("Usage: testCfsVersysVfs.py -c <fsp shm> -v -i <batch command file>")
print(" -c <fsp shm>")
print(" If specified, the testing sequence will run on CFS with this shm id.")
print(" -v")
print(" If specified, the testing sequence will run on Linux VFS.")
print(" -e")
print(" Allow existing files in cfs / vfs paths")
print(" If both -c and -v are specified, resulting FS structure will be diffed.")
print("Each line in a batch file should be a single command, where %KEYWORD% expands:")
print(" %N% where N is a positive integer")
print(" Return value of N-th line. Will trigger a KeyError if it refers to a")
print(" line not encountered yet")
print(" %RANDUINT(N)%")
print(" Return a random unsigned integer with at most N bits")
print(" %RANDSTRING(N)%")
print(" Return a random string with length N")
sys.exit(e)
def main():
tests_prefix_dir = '@CMAKE_CURRENT_BINARY_DIR@/'
test_bin = {'cfs': tests_prefix_dir + 'testAppCli', 'vfs': tests_prefix_dir + 'testAppCliVfs'}
test_set = []
fsp_shm = -1
input_file = ""
skip_sanity_check = False
arg_index = 1
while arg_index < len(sys.argv):
current_arg = sys.argv[arg_index]
if current_arg == "-c":
test_set.append("cfs")
if arg_index + 1 >= len(sys.argv):
usage_and_exit(1)
arg_index += 1
try:
fsp_shm = int(sys.argv[arg_index])
except ValueError as e:
print(traceback.format_exc())
usage_and_exit(1)
elif current_arg == "-v":
test_set.append("vfs")
elif current_arg == "-i":
if arg_index + 1 >= len(sys.argv):
usage_and_exit(1)
arg_index += 1
input_file = sys.argv[arg_index]
elif current_arg == "-e":
skip_sanity_check = True
arg_index += 1
if not test_set:
print("Neither VFS or CFS is specified. Nothing to do.")
usage_and_exit(0)
if not input_file:
print("No input testing sequence specified. Nothing to do.")
usage_and_exit(0)
test_output_directory = {'vfs': "/tmp/vfs",
'cfs': "/tmp/cfs"}
if not sanity_check(test_bin, test_output_directory, test_set, fsp_shm):
if not skip_sanity_check:
print("Sanity check failed")
sys.exit(1)
else:
print("Warning: Sanity check skipped")
try:
batch_fd = open(input_file, "r")
except IOError as e:
print("Cannot open batch file")
sys.exit(1)
mgr = ProcessManager(test_bin, test_output_directory, test_set, fsp_shm)
for line in batch_fd:
mgr.run_command(line)
mgr.terminus()
if "vfs" in test_set and "cfs" in test_set:
# Now dump CFS content out to test_output_directory
print("===== Dumping CFS =====")
completed = subprocess.run(["@CMAKE_CURRENT_BINARY_DIR@/testDumpToVfs", repr(fsp_shm), test_output_directory['cfs']])
if completed.returncode != 0:
print("===== Dumper returned %d indicated error =====" % completed.returncode)
sys.exit(completed.returncode)
# Now do diff
print("===== Diff start =====")
completed = subprocess.run(["diff", "-aur", test_output_directory['cfs'], test_output_directory['vfs']])
print("===== Diff returned %d =====" % completed.returncode)
sys.exit(completed.returncode)
if __name__ == '__main__':
main()
|
simple_sample.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
@ Author : Max_Pengjb
@ date : 2018/9/23 22:37
@ IDE : PyCharm
@ GitHub : https://github.com/JackyPJB
@ Contact : pengjianbiao@hotmail.com
-------------------------------------------------
Description :
-------------------------------------------------
"""
import time
import multiprocessing
import os
__author__ = 'Max_Pengjb'
start = time.time()
# 下面写上代码块
"""
怎么实现多进程
Python是跨平台的,自然也应该提供一个跨平台的多进程支持。multiprocessing模块就是跨平台版本的多进程模块。
实现多进程可以使用python官方提供的一个类Process。
Process类用来描述一个进程对象。创建子进程的时候,只需要传入一个执行函数和函数的参数即可完成Process示例的创建。
•star()方法启动进程;
•join()方法实现进程间的同步,等待所有进程退出;(等所有子进程执行完了,在执行后面的代码)
•close()用来阻止多余的进程涌入进程池 Pool造成进程阻塞;
multiprocessing.Process(group=None, target=None, name=None, args=(), kwargs={}, *, daemon=None)
•target是函数名字,需要调用的函数;
•args函数需要的参数,以tuple的形式传入。
首先我们来写一个简单的多进程程序!
"""
def run_proc(name):
print('Child process {0} {1} Running '.format(name, os.getpid()))
if __name__ == '__main__':
print('Parent process {0} is Running'.format(os.getpid()))
for i in range(5):
p = multiprocessing.Process(target=run_proc, args=(str(i),))
print('process start')
p.start()
p.join() # 这一句加上就表示等所有子进程执行完了,在执行后面的代码
print('Process close')
# 上面中间写上代码块
end = time.time()
print('Running time: %s Seconds' % (end - start))
|
test_pdb.py | # A test suite for pdb; not very comprehensive at the moment.
import doctest
import os
import pdb
import sys
import types
import unittest
import subprocess
import textwrap
from contextlib import ExitStack
from io import StringIO
from test import support
# This little helper class is essential for testing pdb under doctest.
from test.test_doctest import _FakeInput
from unittest.mock import patch
class PdbTestInput(object):
"""Context manager that makes testing Pdb in doctests easier."""
def __init__(self, input):
self.input = input
def __enter__(self):
self.real_stdin = sys.stdin
sys.stdin = _FakeInput(self.input)
self.orig_trace = sys.gettrace() if hasattr(sys, 'gettrace') else None
def __exit__(self, *exc):
sys.stdin = self.real_stdin
if self.orig_trace:
sys.settrace(self.orig_trace)
def test_pdb_displayhook():
"""This tests the custom displayhook for pdb.
>>> def test_function(foo, bar):
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... pass
>>> with PdbTestInput([
... 'foo',
... 'bar',
... 'for i in range(5): print(i)',
... 'continue',
... ]):
... test_function(1, None)
> <doctest test.test_pdb.test_pdb_displayhook[0]>(3)test_function()
-> pass
(Pdb) foo
1
(Pdb) bar
(Pdb) for i in range(5): print(i)
0
1
2
3
4
(Pdb) continue
"""
def test_pdb_basic_commands():
"""Test the basic commands of pdb.
>>> def test_function_2(foo, bar='default'):
... print(foo)
... for i in range(5):
... print(i)
... print(bar)
... for i in range(10):
... never_executed
... print('after for')
... print('...')
... return foo.upper()
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... ret = test_function_2('baz')
... print(ret)
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'step', # entering the function call
... 'args', # display function args
... 'list', # list function source
... 'bt', # display backtrace
... 'up', # step up to test_function()
... 'down', # step down to test_function_2() again
... 'next', # stepping to print(foo)
... 'next', # stepping to the for loop
... 'step', # stepping into the for loop
... 'until', # continuing until out of the for loop
... 'next', # executing the print(bar)
... 'jump 8', # jump over second for loop
... 'return', # return out of function
... 'retval', # display return value
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) args
foo = 'baz'
bar = 'default'
(Pdb) list
1 -> def test_function_2(foo, bar='default'):
2 print(foo)
3 for i in range(5):
4 print(i)
5 print(bar)
6 for i in range(10):
7 never_executed
8 print('after for')
9 print('...')
10 return foo.upper()
[EOF]
(Pdb) bt
...
<doctest test.test_pdb.test_pdb_basic_commands[2]>(18)<module>()
-> test_function()
<doctest test.test_pdb.test_pdb_basic_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) up
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) down
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(2)test_function_2()
-> print(foo)
(Pdb) next
baz
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(3)test_function_2()
-> for i in range(5):
(Pdb) step
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(4)test_function_2()
-> print(i)
(Pdb) until
0
1
2
3
4
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(5)test_function_2()
-> print(bar)
(Pdb) next
default
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(6)test_function_2()
-> for i in range(10):
(Pdb) jump 8
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(8)test_function_2()
-> print('after for')
(Pdb) return
after for
...
--Return--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(10)test_function_2()->'BAZ'
-> return foo.upper()
(Pdb) retval
'BAZ'
(Pdb) continue
BAZ
"""
def test_pdb_breakpoint_commands():
"""Test basic commands related to breakpoints.
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... print(1)
... print(2)
... print(3)
... print(4)
First, need to clear bdb state that might be left over from previous tests.
Otherwise, the new breakpoints might get assigned different numbers.
>>> from bdb import Breakpoint
>>> Breakpoint.next = 1
>>> Breakpoint.bplist = {}
>>> Breakpoint.bpbynumber = [None]
Now test the breakpoint commands. NORMALIZE_WHITESPACE is needed because
the breakpoint list outputs a tab for the "stop only" and "ignore next"
lines, which we don't want to put in here.
>>> with PdbTestInput([ # doctest: +NORMALIZE_WHITESPACE
... 'break 3',
... 'disable 1',
... 'ignore 1 10',
... 'condition 1 1 < 2',
... 'break 4',
... 'break 4',
... 'break',
... 'clear 3',
... 'break',
... 'condition 1',
... 'enable 1',
... 'clear 1',
... 'commands 2',
... 'p "42"',
... 'print("42", 7*6)', # Issue 18764 (not about breakpoints)
... 'end',
... 'continue', # will stop at breakpoint 2 (line 4)
... 'clear', # clear all!
... 'y',
... 'tbreak 5',
... 'continue', # will stop at temporary breakpoint
... 'break', # make sure breakpoint is gone
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(3)test_function()
-> print(1)
(Pdb) break 3
Breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) disable 1
Disabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) ignore 1 10
Will ignore next 10 crossings of breakpoint 1.
(Pdb) condition 1 1 < 2
New condition set for breakpoint 1.
(Pdb) break 4
Breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break 4
Breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
3 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) clear 3
Deleted breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) condition 1
Breakpoint 1 is now unconditional.
(Pdb) enable 1
Enabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) clear 1
Deleted breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) commands 2
(com) p "42"
(com) print("42", 7*6)
(com) end
(Pdb) continue
1
'42'
42 42
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(4)test_function()
-> print(2)
(Pdb) clear
Clear all breaks? y
Deleted breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) tbreak 5
Breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
(Pdb) continue
2
Deleted breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(5)test_function()
-> print(3)
(Pdb) break
(Pdb) continue
3
4
"""
def do_nothing():
pass
def do_something():
print(42)
def test_list_commands():
"""Test the list and source commands of pdb.
>>> def test_function_2(foo):
... import test.test_pdb
... test.test_pdb.do_nothing()
... 'some...'
... 'more...'
... 'code...'
... 'to...'
... 'make...'
... 'a...'
... 'long...'
... 'listing...'
... 'useful...'
... '...'
... '...'
... return foo
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... ret = test_function_2('baz')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'list', # list first function
... 'step', # step into second function
... 'list', # list second function
... 'list', # continue listing to EOF
... 'list 1,3', # list specific lines
... 'list x', # invalid argument
... 'next', # step to import
... 'next', # step over import
... 'step', # step into do_nothing
... 'longlist', # list all lines
... 'source do_something', # list all lines of function
... 'source fooxxx', # something that doesn't exit
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_list_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
3 -> ret = test_function_2('baz')
[EOF]
(Pdb) step
--Call--
> <doctest test.test_pdb.test_list_commands[0]>(1)test_function_2()
-> def test_function_2(foo):
(Pdb) list
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
4 'some...'
5 'more...'
6 'code...'
7 'to...'
8 'make...'
9 'a...'
10 'long...'
11 'listing...'
(Pdb) list
12 'useful...'
13 '...'
14 '...'
15 return foo
[EOF]
(Pdb) list 1,3
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
(Pdb) list x
*** ...
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(2)test_function_2()
-> import test.test_pdb
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(3)test_function_2()
-> test.test_pdb.do_nothing()
(Pdb) step
--Call--
> ...test_pdb.py(...)do_nothing()
-> def do_nothing():
(Pdb) longlist
... -> def do_nothing():
... pass
(Pdb) source do_something
... def do_something():
... print(42)
(Pdb) source fooxxx
*** ...
(Pdb) continue
"""
def test_post_mortem():
"""Test post mortem traceback debugging.
>>> def test_function_2():
... try:
... 1/0
... finally:
... print('Exception!')
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... test_function_2()
... print('Not reached.')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'next', # step over exception-raising call
... 'bt', # get a backtrace
... 'list', # list code of test_function()
... 'down', # step into test_function_2()
... 'list', # list code of test_function_2()
... 'continue',
... ]):
... try:
... test_function()
... except ZeroDivisionError:
... print('Correctly reraised.')
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) next
Exception!
ZeroDivisionError: division by zero
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) bt
...
<doctest test.test_pdb.test_post_mortem[2]>(10)<module>()
-> test_function()
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
<doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
3 -> test_function_2()
4 print('Not reached.')
[EOF]
(Pdb) down
> <doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function_2():
2 try:
3 >> 1/0
4 finally:
5 -> print('Exception!')
[EOF]
(Pdb) continue
Correctly reraised.
"""
def test_pdb_skip_modules():
"""This illustrates the simple case of module skipping.
>>> def skip_module():
... import string
... import pdb; pdb.Pdb(skip=['stri*'], nosigint=True, readrc=False).set_trace()
... string.capwords('FOO')
>>> with PdbTestInput([
... 'step',
... 'continue',
... ]):
... skip_module()
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()
-> string.capwords('FOO')
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()->None
-> string.capwords('FOO')
(Pdb) continue
"""
# Module for testing skipping of module that makes a callback
mod = types.ModuleType('module_to_skip')
exec('def foo_pony(callback): x = 1; callback(); return None', mod.__dict__)
def test_pdb_skip_modules_with_callback():
"""This illustrates skipping of modules that call into other code.
>>> def skip_module():
... def callback():
... return None
... import pdb; pdb.Pdb(skip=['module_to_skip*'], nosigint=True, readrc=False).set_trace()
... mod.foo_pony(callback)
>>> with PdbTestInput([
... 'step',
... 'step',
... 'step',
... 'step',
... 'step',
... 'continue',
... ]):
... skip_module()
... pass # provides something to "step" to
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()
-> mod.foo_pony(callback)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(2)callback()
-> def callback():
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()->None
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()->None
-> mod.foo_pony(callback)
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[1]>(10)<module>()
-> pass # provides something to "step" to
(Pdb) continue
"""
def test_pdb_continue_in_bottomframe():
"""Test that "continue" and "next" work properly in bottom frame (issue #5294).
>>> def test_function():
... import pdb, sys; inst = pdb.Pdb(nosigint=True, readrc=False)
... inst.set_trace()
... inst.botframe = sys._getframe() # hackery to get the right botframe
... print(1)
... print(2)
... print(3)
... print(4)
>>> with PdbTestInput([ # doctest: +ELLIPSIS
... 'next',
... 'break 7',
... 'continue',
... 'next',
... 'continue',
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(4)test_function()
-> inst.botframe = sys._getframe() # hackery to get the right botframe
(Pdb) next
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(5)test_function()
-> print(1)
(Pdb) break 7
Breakpoint ... at <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>:7
(Pdb) continue
1
2
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(7)test_function()
-> print(3)
(Pdb) next
3
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(8)test_function()
-> print(4)
(Pdb) continue
4
"""
def pdb_invoke(method, arg):
"""Run pdb.method(arg)."""
getattr(pdb.Pdb(nosigint=True, readrc=False), method)(arg)
def test_pdb_run_with_incorrect_argument():
"""Testing run and runeval with incorrect first argument.
>>> pti = PdbTestInput(['continue',])
>>> with pti:
... pdb_invoke('run', lambda x: x)
Traceback (most recent call last):
TypeError: exec() arg 1 must be a string, bytes or code object
>>> with pti:
... pdb_invoke('runeval', lambda x: x)
Traceback (most recent call last):
TypeError: eval() arg 1 must be a string, bytes or code object
"""
def test_pdb_run_with_code_object():
"""Testing run and runeval with code object as a first argument.
>>> with PdbTestInput(['step','x', 'continue']): # doctest: +ELLIPSIS
... pdb_invoke('run', compile('x=1', '<string>', 'exec'))
> <string>(1)<module>()...
(Pdb) step
--Return--
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
>>> with PdbTestInput(['x', 'continue']):
... x=0
... pdb_invoke('runeval', compile('x+1', '<string>', 'eval'))
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
"""
def test_next_until_return_at_return_event():
"""Test that pdb stops after a next/until/return issued at a return debug event.
>>> def test_function_2():
... x = 1
... x = 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... test_function_2()
... test_function_2()
... test_function_2()
... end = 1
>>> from bdb import Breakpoint
>>> Breakpoint.next = 1
>>> with PdbTestInput(['break test_function_2',
... 'continue',
... 'return',
... 'next',
... 'continue',
... 'return',
... 'until',
... 'continue',
... 'return',
... 'return',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(3)test_function()
-> test_function_2()
(Pdb) break test_function_2
Breakpoint 1 at <doctest test.test_pdb.test_next_until_return_at_return_event[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) next
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(4)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) until
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(5)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) return
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(6)test_function()
-> end = 1
(Pdb) continue
"""
def test_pdb_next_command_for_generator():
"""Testing skip unwindng stack on yield for generators for "next" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... it = test_gen()
... try:
... if next(it) != 0:
... raise AssertionError
... next(it)
... except StopIteration as ex:
... if ex.value != 1:
... raise AssertionError
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'next',
... 'next',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(5)test_function()
-> if next(it) != 0:
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(2)test_gen()
-> yield 0
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()
-> return 1
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()->1
-> return 1
(Pdb) step
StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(7)test_function()
-> next(it)
(Pdb) continue
finished
"""
def test_pdb_next_command_for_coroutine():
"""Testing skip unwindng stack on yield for coroutines for "next" command
>>> import asyncio
>>> async def test_coro():
... await asyncio.sleep(0)
... await asyncio.sleep(0)
... await asyncio.sleep(0)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'next',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(2)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(3)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(4)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
Internal StopIteration
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()->None
-> await test_coro()
(Pdb) continue
finished
"""
def test_pdb_next_command_for_asyncgen():
"""Testing skip unwindng stack on yield for coroutines for "next" command
>>> import asyncio
>>> async def agen():
... yield 1
... await asyncio.sleep(0)
... yield 2
>>> async def test_coro():
... async for x in agen():
... print(x)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'step',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[3]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(2)test_coro()
-> async for x in agen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(3)test_coro()
-> print(x)
(Pdb) next
1
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(2)test_coro()
-> async for x in agen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[1]>(2)agen()
-> yield 1
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[1]>(3)agen()
-> await asyncio.sleep(0)
(Pdb) continue
2
finished
"""
def test_pdb_return_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "return" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... it = test_gen()
... try:
... if next(it) != 0:
... raise AssertionError
... next(it)
... except StopIteration as ex:
... if ex.value != 1:
... raise AssertionError
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'return',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(5)test_function()
-> if next(it) != 0:
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_return_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) return
StopIteration: 1
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(7)test_function()
-> next(it)
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(8)test_function()
-> except StopIteration as ex:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(9)test_function()
-> if ex.value != 1:
(Pdb) continue
finished
"""
def test_pdb_return_command_for_coroutine():
"""Testing no unwindng stack on yield for coroutines for "return" command
>>> import asyncio
>>> async def test_coro():
... await asyncio.sleep(0)
... await asyncio.sleep(0)
... await asyncio.sleep(0)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(2)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(3)test_coro()
-> await asyncio.sleep(0)
(Pdb) continue
finished
"""
def test_pdb_until_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "until" command if target breakpoing is not reached
>>> def test_gen():
... yield 0
... yield 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print(i)
... print("finished")
>>> with PdbTestInput(['step',
... 'until 4',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) until 4
0
1
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()
-> yield 2
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()->2
-> yield 2
(Pdb) step
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(4)test_function()
-> print(i)
(Pdb) continue
2
finished
"""
def test_pdb_until_command_for_coroutine():
"""Testing no unwindng stack for coroutines
for "until" command if target breakpoing is not reached
>>> import asyncio
>>> async def test_coro():
... print(0)
... await asyncio.sleep(0)
... print(1)
... await asyncio.sleep(0)
... print(2)
... await asyncio.sleep(0)
... print(3)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... print("finished")
>>> with PdbTestInput(['step',
... 'until 8',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) until 8
0
1
2
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[1]>(8)test_coro()
-> print(3)
(Pdb) continue
3
finished
"""
def test_pdb_next_command_in_generator_for_loop():
"""The next command on returning from a generator controlled by a for loop.
>>> def test_gen():
... yield 0
... return 1
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> with PdbTestInput(['break test_gen',
... 'continue',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) break test_gen
Breakpoint 6 at <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(2)test_gen()
-> yield 0
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(3)test_gen()
-> return 1
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(5)test_function()
-> x = 123
(Pdb) continue
"""
def test_pdb_next_command_subiterator():
"""The next command in a generator with a subiterator.
>>> def test_subgenerator():
... yield 0
... return 1
>>> def test_gen():
... x = yield from test_subgenerator()
... return x
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(1)test_gen()
-> def test_gen():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(2)test_gen()
-> x = yield from test_subgenerator()
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(3)test_gen()
-> return x
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(5)test_function()
-> x = 123
(Pdb) continue
"""
def test_pdb_issue_20766():
# This test depends on undefined behaviour of C-Python: the value
# of frame.f_lineno is only valid, if frame.f_trace is not None.
# Stackless Python behaves exactly like C-Python, if soft-switching
# is disabled.
"""Test for reference leaks when the SIGINT handler is set.
>>> def test_function():
... i = 1
... while i <= 2:
... sess = pdb.Pdb()
... sess.set_trace(sys._getframe())
... print('pdb %d: %s' % (i, sess._previous_sigint_handler))
... i += 1
>>> with PdbTestInput(['continue',
... 'continue']):
... try:
... import stackless
... softswitch_state = stackless.enable_softswitch(False)
... except ImportError:
... test_function()
... else:
... try:
... test_function()
... finally:
... softswitch_state = stackless.enable_softswitch(softswitch_state)
> <doctest test.test_pdb.test_pdb_issue_20766[0]>(6)test_function()
-> print('pdb %d: %s' % (i, sess._previous_sigint_handler))
(Pdb) continue
pdb 1: <built-in function default_int_handler>
> <doctest test.test_pdb.test_pdb_issue_20766[0]>(5)test_function()
-> sess.set_trace(sys._getframe())
(Pdb) continue
pdb 2: <built-in function default_int_handler>
"""
class PdbTestCase(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def _run_pdb(self, pdb_args, commands):
self.addCleanup(support.rmtree, '__pycache__')
cmd = [sys.executable, '-m', 'pdb'] + pdb_args
with subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
) as proc:
stdout, stderr = proc.communicate(str.encode(commands))
stdout = stdout and bytes.decode(stdout)
stderr = stderr and bytes.decode(stderr)
return stdout, stderr
def run_pdb_script(self, script, commands):
"""Run 'script' lines with pdb and the pdb 'commands'."""
filename = 'main.py'
with open(filename, 'w') as f:
f.write(textwrap.dedent(script))
self.addCleanup(support.unlink, filename)
return self._run_pdb([filename], commands)
def run_pdb_module(self, script, commands):
"""Runs the script code as part of a module"""
self.module_name = 't_main'
support.rmtree(self.module_name)
main_file = self.module_name + '/__main__.py'
init_file = self.module_name + '/__init__.py'
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
pass
with open(main_file, 'w') as f:
f.write(textwrap.dedent(script))
self.addCleanup(support.rmtree, self.module_name)
return self._run_pdb(['-m', self.module_name], commands)
def _assert_find_function(self, file_content, func_name, expected):
file_content = textwrap.dedent(file_content)
with open(support.TESTFN, 'w') as f:
f.write(file_content)
expected = None if not expected else (
expected[0], support.TESTFN, expected[1])
self.assertEqual(
expected, pdb.find_function(func_name, support.TESTFN))
def test_find_function_empty_file(self):
self._assert_find_function('', 'foo', None)
def test_find_function_found(self):
self._assert_find_function(
"""\
def foo():
pass
def bar():
pass
def quux():
pass
""",
'bar',
('bar', 4),
)
def test_issue7964(self):
# open the file as binary so we can force \r\n newline
with open(support.TESTFN, 'wb') as f:
f.write(b'print("testing my pdb")\r\n')
cmd = [sys.executable, '-m', 'pdb', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'quit\n')
self.assertNotIn(b'SyntaxError', stdout,
"Got a syntax error running test script under PDB")
def test_issue13183(self):
script = """
from bar import bar
def foo():
bar()
def nope():
pass
def foobar():
foo()
nope()
foobar()
"""
commands = """
from bar import bar
break bar
continue
step
step
quit
"""
bar = """
def bar():
pass
"""
with open('bar.py', 'w') as f:
f.write(textwrap.dedent(bar))
self.addCleanup(support.unlink, 'bar.py')
stdout, stderr = self.run_pdb_script(script, commands)
self.assertTrue(
any('main.py(5)foo()->None' in l for l in stdout.splitlines()),
'Fail to step into the caller after a return')
def test_issue13210(self):
# invoking "continue" on a non-main thread triggered an exception
# inside signal.signal
with open(support.TESTFN, 'wb') as f:
f.write(textwrap.dedent("""
import threading
import pdb
def start_pdb():
pdb.Pdb(readrc=False).set_trace()
x = 1
y = 1
t = threading.Thread(target=start_pdb)
t.start()""").encode('ascii'))
cmd = [sys.executable, '-u', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'cont\n')
self.assertNotIn('Error', stdout.decode(),
"Got an error running test script under PDB")
def test_issue16180(self):
# A syntax error in the debuggee.
script = "def f: pass\n"
commands = ''
expected = "SyntaxError:"
stdout, stderr = self.run_pdb_script(script, commands)
self.assertIn(expected, stdout,
'\n\nExpected:\n{}\nGot:\n{}\n'
'Fail to handle a syntax error in the debuggee.'
.format(expected, stdout))
def test_readrc_kwarg(self):
script = textwrap.dedent("""
import pdb; pdb.Pdb(readrc=False).set_trace()
print('hello')
""")
save_home = os.environ.pop('HOME', None)
try:
with support.temp_cwd():
with open('.pdbrc', 'w') as f:
f.write("invalid\n")
with open('main.py', 'w') as f:
f.write(script)
cmd = [sys.executable, 'main.py']
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
with proc:
stdout, stderr = proc.communicate(b'q\n')
self.assertNotIn("NameError: name 'invalid' is not defined",
stdout.decode())
finally:
if save_home is not None:
os.environ['HOME'] = save_home
def test_header(self):
stdout = StringIO()
header = 'Nobody expects... blah, blah, blah'
with ExitStack() as resources:
resources.enter_context(patch('sys.stdout', stdout))
resources.enter_context(patch.object(pdb.Pdb, 'set_trace'))
pdb.set_trace(header=header)
self.assertEqual(stdout.getvalue(), header + '\n')
def test_run_module(self):
script = """print("SUCCESS")"""
commands = """
continue
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("SUCCESS" in l for l in stdout.splitlines()), stdout)
def test_module_is_run_as_main(self):
script = """
if __name__ == '__main__':
print("SUCCESS")
"""
commands = """
continue
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("SUCCESS" in l for l in stdout.splitlines()), stdout)
def test_breakpoint(self):
script = """
if __name__ == '__main__':
pass
print("SUCCESS")
pass
"""
commands = """
b 3
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("Breakpoint 1 at" in l for l in stdout.splitlines()), stdout)
self.assertTrue(all("SUCCESS" not in l for l in stdout.splitlines()), stdout)
def test_run_pdb_with_pdb(self):
commands = """
c
quit
"""
stdout, stderr = self._run_pdb(["-m", "pdb"], commands)
self.assertIn(
pdb._usage,
stdout.replace('\r', '') # remove \r for windows
)
def test_module_without_a_main(self):
module_name = 't_main'
support.rmtree(module_name)
init_file = module_name + '/__init__.py'
os.mkdir(module_name)
with open(init_file, 'w') as f:
pass
self.addCleanup(support.rmtree, module_name)
stdout, stderr = self._run_pdb(['-m', module_name], "")
self.assertIn("ImportError: No module named t_main.__main__",
stdout.splitlines())
def test_blocks_at_first_code_line(self):
script = """
#This is a comment, on line 2
print("SUCCESS")
"""
commands = """
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("__main__.py(4)<module>()"
in l for l in stdout.splitlines()), stdout)
def test_relative_imports(self):
self.module_name = 't_main'
support.rmtree(self.module_name)
main_file = self.module_name + '/__main__.py'
init_file = self.module_name + '/__init__.py'
module_file = self.module_name + '/module.py'
self.addCleanup(support.rmtree, self.module_name)
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
f.write(textwrap.dedent("""
top_var = "VAR from top"
"""))
with open(main_file, 'w') as f:
f.write(textwrap.dedent("""
from . import top_var
from .module import var
from . import module
pass # We'll stop here and print the vars
"""))
with open(module_file, 'w') as f:
f.write(textwrap.dedent("""
var = "VAR from module"
var2 = "second var"
"""))
commands = """
b 5
c
p top_var
p var
p module.var2
quit
"""
stdout, _ = self._run_pdb(['-m', self.module_name], commands)
self.assertTrue(any("VAR from module" in l for l in stdout.splitlines()))
self.assertTrue(any("VAR from top" in l for l in stdout.splitlines()))
self.assertTrue(any("second var" in l for l in stdout.splitlines()))
def load_tests(*args):
from test import test_pdb
suites = [
unittest.makeSuite(PdbTestCase),
doctest.DocTestSuite(test_pdb)
]
return unittest.TestSuite(suites)
if __name__ == '__main__':
unittest.main()
|
process.py | # Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
# ********************************************* Process ***************************************************************
# ***************************************** PROCESS CLASS ********************************************************
"""
..
Sections:
* :ref:`Process_Overview`
* :ref:`Process_Creation`
* :ref:`Process_Structure`
* :ref:`Process_Pathway`
* :ref:`Process_Mechanisms`
* :ref:`Process_Projections`
* :ref:`Process_Input_And_Output`
* :ref:`Process_Learning_Sequence`
* :ref:`Process_Execution`
* :ref:`Process_Execution_Learning`
* :ref:`Process_Class_Reference`
.. _Process_Overview:
Overview
--------
A Process is the simplest form of `Composition <Composition>`, made up of a `lineal <Process_Footnotes>` sequence of
`Mechanisms <Mechanism>` linked by `Projections <Projection>`. Processes can be executed on their own, but most
commonly they are used to compose a `System`, which is the most powerful form of Composition in PsyNeuLink. Processes
are nevertheless useful, as they define a simpler unit of processing than a System (e.g., for debugging, or for use in
multiple Systems), and are used as the unit of `learning <System_Learning>` within a System. The general features of
Processes are summarized below, followed by a more detailed description in the sections that follow.
Mechanisms and Projections are composed into a Process by assigning them to the Process' `pathway
<Process.pathway>` attribute. Executing a Process executes all of its Mechanisms in the order in which they are
listed in its `pathway <Process.pathway>`. Projections can be specified among any Mechanisms in a Process,
including to themselves, however they must compose a `lineal <Process_Footnotes>` sequence. A Process cannot involve
any "branching" (beyond what may be produced by recurrent loops within the Process); that must be done by using a
Process to compose each branch, and then composing the Processes into a `System`. Mechanisms in a Process can project
to and receive Projections from Mechanisms in other Processes, however these will not have any effect when the Process
is executed; these will only have an effect if all of the Processes involved are members of the same System and the
`System is executed <System_Execution_Processing>`.
Projections between Mechanisms can be trained by `specifying them for learning
<MappingProjection_Learning_Specification>`. Learning can also be `specified for the entire Process
<Process_Learning_Specification>`, in which case all of the Projections among Mechanisms in the Process are trained
(see `Process_Learning_Sequence` below).
.. _Process_Creation:
Creating a Process
------------------
A Process is created by instantiating the `Process` class. The Mechanisms to be included are specified in a list in its
**pathway** argument, in the order in which they should be executed by the Process. The Mechanism entries can be
separated by `Projections <Projection>` used to connect them. If no arguments are provided to the **pathway** argument,
a Process with an empty pathway is created.
.. _Process_Structure:
Structure
---------
.. _Process_Pathway:
*Pathway*
~~~~~~~~~
A Process is defined by its `pathway <Process.pathway>` attribute, which is a list of `Mechanisms <Mechanism>` and
`Projections <Projection>`, that are executed in the order in which they are specified in the list. Each Mechanism in
the `pathway <Process.pathway>` must project at least to the next one in the `pathway <Process.pathway>`,
though it can project to others, and receive recurrent (feedback) Projections from them. However, a `pathway
<Process.pathway>` cannot include branching patterns beyond any produced by recurrent loops (see `Examples
<Process_Examples>` below); that is, a Mechanism cannot project to another Mechanism that falls outside the `lineal
<Process_Footnotes>` sequence of the `pathway <Process.pathway>` To compose more complex, branched, structures,
a Process should be created for each "branch", and these used to compose a `System <System_Creation>`.
The Mechanisms specified in the `pathway` for a Process must be `ProcessingMechanisms <ProcessingMechanism>`, and
the projections between Mechanisms in a Process must be `MappingProjections <MappingProjection>`. These transmit the
output of a Mechanism (the Projection's `sender <MappingProjection.MappingProjection.sender>`) to the input of
another Mechanism (the Projection's `receiver <MappingProjection.MappingProjection.receiver>`). Specification of a
`pathway` requires, at the least, a list of Mechanisms. Each of these can be specified directly, or using a **tuple**
that also contains a set of `runtime parameters <Mechanism_Runtime_Parameters>` (see `below
<Process_Mechanism_Specification>`). A Projection between a pair of Mechanisms can be specified by interposing it in
the list between the pair. If no Projection is specified between two adjacent Mechanisms in the `pathway
<Process.pathway>`, and there is no otherwise specified Projection between them, a default MappingProjection is
automatically created when the Process is created that projects from the first to the second member of the pair.
Specifying the Components of a pathway is described in detail below.
.. _Process_Mechanisms:
*Mechanisms*
~~~~~~~~~~~~
The `Mechanisms <Mechanism>` of a Process must be listed explicitly in the **pathway** argument of the `Process`
class, in the order they are to be executed when the Process (or any System to which it belongs) is `executed
<Process_Execution>`. The first Mechanism in a Process is designated as its `ORIGIN` Mechanism, and is assigned to its
`origin_mechanism <Process.origin_mechanism>` attribute; it receives as its input any `input
<Process_Input_And_Output>` provided to the Process' `execute <Process.execute>` or `run <Process.run>`
methods. The last Mechanism listed in the `pathway <Process.pathway>` is designated as the `TERMINAL` Mechanism,
and is assigned to its `terminal_mechanism <Process.terminal_mechanism>` attribute; its `output_values
<Mechanism_Base.output_values>` is assigned as the `output <Process_Output>` of the Process.
.. _Process_Mechanism_Initialize_Cycle:
Any Mechanism that sends a Projection that closes a recurrent loop within the `pathway <Process.pathway>` is
designated as `INITIALIZE_CYCLE`; whenever that Mechanism is `initialized <Process_Execution_Initialization>`,
it is assigned the value specified for it in the **initial_values** argument of the Process' `execute
<Process.execute>` or `run <Process.run>` methods. Mechanisms that receive a Projection from one designated
`INITIALIZE_CYCLE` are themselves designated as `CYCLE`. All other Mechanisms in the `pathway <Process.pathway>`
are designated as `INTERNAL`.
.. note::
The `origin_mechanism <Process.origin_mechanism>` and `terminal_mechanism <Process.terminal_mechanism>`
of a Process are not necessarily the `ORIGIN` and/or `TERMINAL` Mechanisms of the System(s)to which the Process
belongs (see `example <LearningProjection_Target_vs_Terminal_Figure>`). The designations of a Mechanism's status
in the Process(es) to which it belongs are listed in its `processes <Mechanism_Base.processes>` attribute.
.. _Process_Mechanism_Specification:
Mechanisms can be specified in the **pathway** argument of the `Process` class in one of two ways:
* **Directly** -- using any of the ways used to `specify a Mechanism <Mechanism_Creation>`.
..
* **MechanismTuple** -- the first item must be a specification for the Mechanism using any of the ways used to
`specify a Mechanism <Mechanism_Creation>`; the second must be a set of `runtime parameters
<Mechanism_Runtime_Parameters>`. Runtime parameters are used for that Mechanism when the Process (or a System
to which it belongs) is executed; otherwise they do not remain associated with the Mechanism.
The same Mechanism can appear more than once in a `pathway <Process.pathway>`, as one means of generating a
recurrent processing loop (another is to specify this in the Projections -- see below).
.. _Process_Projections:
*Projections*
~~~~~~~~~~~~~
`MappingProjections <MappingProjection>` between Mechanisms in the `pathway <Process.pathway>` of a Process can be
specified in any of the following ways:
* **Inline specification** -- a MappingProjection specification can be interposed between any two Mechanisms in the
`pathway <Process.pathway>` list. This creates a Projection from the preceding Mechanism in the list to the
one that follows it. It can be specified using any of the ways used to `specify a Projection
<Projection_Specification>` or the `matrix parameter <Mapping_Matrix_Specification>` of one.
..
.. _Process_Tuple_Specification:
* **Tuple learning specification** -- this can be used in the same way as an inline specification; the first item
must a MappingProjection specification that takes the same form as an inline specification, and the second must be
a `learning specification <MappingProjection_Learning_Tuple_Specification>`.
..
* **Stand-alone MappingProjection** -- when a Projection is `created <Projection_Creation>` on its own,
it can be assigned a `sender <Projection_Sender>` and/or a `receiver <Projection_Receiver>`
Mechanism. If both are in the Process, then that Projection will be used when creating the Process. Stand-alone
specification of a MappingProjection between two Mechanisms in a Process takes precedence over any other
form of specification; that is, the stand-alone Projection will be used in place of any that is specified between
the Mechanisms in a `pathway <Process.pathway>`. Stand-alone specification is required to implement
MappingProjections between Mechanisms that are not adjacent to one another in the `pathway <Process.pathway>`.
..
* **Default assignment** -- for any Mechanism that does not receive a MappingProjection from another Mechanism in the
Process (specified using one of the methods above), a `MappingProjection` is automatically created from the
Mechanism that precedes it in the `pathway <Process.pathway>`. If the format of the `value <OutputState.value>`
of the preceding Mechanism's `primary OutputState <OutputState_Primary>` matches that of the next Mechanism, then an
`IDENTITY_MATRIX` is used for the Projection's `matrix <MappingProjection.matrix>` parameter; if the formats do not
match, or `learning has been specified <Process_Learning_Sequence>` either for the Projection or the Process, then a
`FULL_CONNECTIVITY_MATRIX` is used. If the Mechanism is the `origin_mechanism <Process.origin_mechanism>`
(i.e., first in the `pathway <Process.pathway>`), a `ProcessInputState <Process_Input_And_Output>` is used
as the `sender <MappingProjection.sender>`, and an `IDENTITY_MATRIX` is used for the MappingProjection.
.. _Process_Input_And_Output:
*Process input and output*
~~~~~~~~~~~~~~~~~~~~~~~~~~
The `input <Process.input>` of a Process is a list or 2d np.array provided as the **input** argument in its
`execute <Process.execute>` method, or the **inputs** argument of its `run <Process.run>` method. When a
Process is created, a set of `ProcessInputStates <ProcessInputState>` (listed in its `process_input_states` attribute)
and `MappingProjections <MappingProjection>` are automatically created to transmit the Process' `input
<Process.input>` to its `origin_mechanism <Process.origin_mechanism>`, as follows:
* if the number of items in the **input** is the same as the number of `InputStates <InputState>` for the
`origin_mechanism <Process.origin_mechanism>`, a MappingProjection is created for each item of the input to a
distinct InputState of the `origin_mechanism <Process.origin_mechanism>`;
..
* if the **input** has only one item but the `origin_mechanism <Process.origin_mechanism>` has more than one
InputState, a single `ProcessInputState <ProcessInputState>` is created with Projections to each of the
`origin_mechanism <Process.origin_mechanism>`'s InputStates;
..
* if the **input** has more than one item but the `origin_mechanism <Process.origin_mechanism>` has only one
InputState, a `ProcessInputState <ProcessInputState>` is created for each item of the input, and all project to
the `origin_mechanism <Process.origin_mechanism>`'s InputState;
..
* otherwise, if the **input** has more than one item and the `origin_mechanism <Process.origin_mechanism>` has
more than one InputState, but the numbers are not equal, an error message is generated indicating that there is an
ambiguous mapping from the Process' **input** value to `origin_mechanism <Process.origin_mechanism>`'s
InputStates.
.. _Process_Output:
The output of a Process is assigned as the `output_values <Mechanism_Base.output_values>` attribute of its `TERMINAL`
Mechanism.
.. _Process_Learning_Sequence:
*Learning*
~~~~~~~~~~
Learning operates over a *learning sequence*: a contiguous sequence of `ProcessingMechanisms <ProcessingMechanism>` in
a Process `pathway <Process.pathway>`, and the `MappingProjections <MappingProjection>` between them, that have
been specified for learning. Learning modifies the `matrix <MappingProjection.matrix>` parameter of the
MappingProjections in the sequence, so that the input to the first ProcessingMechanism in the sequence generates an
output from the last ProcessingMechanism that matches as closely as possible the target specified for the sequence
(see `Process_Execution_Learning` below for a more detailed description).
.. _Process_Learning_Specification:
Learning can be `specified for individual (or subsets of) MappingProjections
<MappingProjection_Learning_Specification>`, or for the entire Process. It is specified for the entire process by
assigning a specification for a `LearningProjection <LearningProjection_Creation>` or `LearningSignal
<LearningSignal_Specification>` specification, or the keyword *ENABLED*, to the **learning** argument of the
Process' constructor. Specifying learning for a Process implements it for all MappingProjections in the Process (except
those that project from the `process_input_states` to the `origin_mechanism <Process.origin_mechanism>`), which
are treated as a single learning sequence. Mechanisms that receive MappingProjections for which learning has been
specified must be compatible with learning (that is, their `function <Mechanism_Base.function>` must be compatible with
the `function <LearningMechanism.function>` of the `LearningMechanism` for the MappingProjections they receive (see
`LearningMechanism_Function`).
.. _Process_Learning_Components:
The following Components are created for each learning sequence specified for a Process (see figure below):
* a `TARGET` `ComparatorMechanism` (assigned to the Process' `target_nodes <Process.target_nodes>`
attribute), that is used to `calculate an error signal <ComparatorMechanism_Function>` for the sequence, by
comparing `a specified output <LearningMechanism_Activation_Output>` of the last Mechanism in the learning
sequence (received in the ComparatorMechanism's *SAMPLE* `InputState <ComparatorMechanism_Structure>`) with the
item of the **target** argument in Process' `execute <Process.execute>` or `run <Process.run>` method
corresponding to the learning sequence (received in the ComparatorMechanism's *TARGET* `InputState
<ComparatorMechanism_Structure>`).
..
* a MappingProjection that projects from the last ProcessingMechanism in the sequence to the *SAMPLE* `InputState
<ComparatorMechanism_Structure>` of the `TARGET` Mechanism;
..
* a ProcessingInputState to represent the corresponding item of the **target** argument of the Process' `execute
<Process.execute>` and `run <Process.run>` methods;
..
* a MappingProjection that projects from the `ProcessInputState <ProcessInputState>` for the **target** item to the
*TARGET* `InputState <ComparatorMechanism_Structure>` of the `TARGET` Mechanism;
..
* a `LearningMechanism` for each MappingProjection in the sequence that calculates the `learning_signal
<LearningMechanism.learning_signal>` used to modify the `matrix <MappingProjection.matrix>` parameter for that
MappingProjection, along with a `LearningSignal` and `LearningProjection` that convey the `learning_signal
<LearningMechanism.learning_signal>` to the MappingProjection's *MATRIX* `ParameterState
<Mapping_Matrix_ParameterState>` (additional MappingProjections are created for the LearningMechanism -- see
`LearningMechanism_Learning_Configurations` for details).
.. note::
The Components created when learning is specified for individual MappingProjections of a Process (or subsets of
them) take effect only if the Process is executed on its own (i.e., using its `execute <Process.execute>`
or `run <Process.run>` methods. For learning to in a Process when it is `executed as part of a System
<System_Execution_Learning>`, learning must be specified for the *entire Process*, as described above.
COMMENT:
XXX ?HOW:
Different learning algorithms can be specified (e.g., `Reinforcement` or `BackPropagation`), that implement the
Mechanisms and LearningSignals required for the specified type of learning. However, as noted above,
all Mechanisms that receive Projections being learned must be compatible with learning.
COMMENT
.. _Process_Learning_Figure:
**Figure: Learning Components in a Process**
.. figure:: _static/Process_Learning_fig.svg
:alt: Schematic of LearningMechanism and LearningProjections in a Process
Learning using the `BackPropagation` learning algorithm in a three-layered network, using a `TransferMechanism` for
each layer (capitalized labels in Mechanism components are their `designated roles
<Mechanism_Role_In_Processes_And_Systems>` in the Process -- see also `Process_Mechanisms` and `Keywords`).
.. _Process_Execution:
Execution
---------
A Process can be executed as part of a `System <System_Execution>` or on its own. On its own, it is executed by calling
either its `execute <Process.execute>` or `run <Process.run>` method. `execute <Process.execute>` executes
the Process once (that is, it executes a single `TRIAL`); `run <Process.run>` allows a series of `TRIAL`\\s to be
executed.
. _Process_Processing
*Processing*
~~~~~~~~~~~~
When a Process is executed, its `input` is conveyed to the `origin_mechanism <Process.origin_mechanism>`
(the first Mechanism in the `pathway <Process.pathway>`). By default, the input is presented only once. If
the `origin_mechanism <Process.origin_mechanism>` is executed again in the same `PASS` of execution (e.g., if it
appears again in the pathway, or receives recurrent projections), the input is not presented again. However, the input
can be "clamped" on using the **clamp_input** argument of `execute <Process.execute>` or `run <Process.run>`.
After the `origin_mechanism <Process.origin_mechanism>` is executed, each subsequent Mechanism in the `pathway` is
executed in sequence. If a Mechanism is specified in the pathway using a `MechanismTuple
<Process_Mechanism_Specification>`, then the `runtime parameters <Mechanism_Runtime_Parameters>` are applied and the
Mechanism is executed using them (see `Mechanism <Mechanism_ParameterStates>` for parameter specification). Finally the
output of the `terminal_mechanism <Process.terminal_mechanism>` (the last one in the pathway) is assigned as the
`output <Process_Output>` of the Process.
.. note::
Processes do not use a `Scheduler`; each Mechanism is executed once, in the order listed in its `pathway` attribute.
To more precisely control the order of, and/or any dependencies in, the sequence of executions, the Process
should be used to construct a `System`, together with `Conditions <Condition>` to implement a custom schedule.
.. _Process_Execution_Initialization
The `input <Process_Input_And_Output>` to a Process is specified in the **input** argument of either its `execute
<Process.execute>` or `run <Process.run>` method. In both cases, the input for a single `TRIAL` must be a
number, list or ndarray of values that is compatible with the `variable <Mechanism_Base.variable>` of the
`origin_mechanism <Process.origin_mechanism>`. If the `execute <Process.execute>` method is used, input for
only a single `TRIAL` is provided, and only a single `TRIAL` is executed. The `run <System.run>` method can be
used for a sequence of `TRIAL`\\s, by providing it with a list or ndarray of inputs, one for each `TRIAL`. In both
cases, two other types of input can be provided in corresponding arguments of the `execute <Process.execute>`
and `run <Process.run>` methods: a list or ndarray of **initial_values**, and a list or ndarray of **target**
values. The **initial_values** are assigned as input to Mechanisms that close recurrent loops (designated as
`INITIALIZE_CYCLE`) at the start of a `TRIAL` (if **initialize** is set to `True`), and/or whenever the Process`
`initialize <Process.initialize>` method is called; **target** values are assigned as the *TARGET* input of the
`target_nodes <Process.target_nodes>` in each `TRIAL` of execution, if `learning
<Process_Learning_Sequence>` has been specified (see the next setion for how Learning is executed; also,
see `Run` documentation for additional details of formatting `Run_Input` and `Run_Target` specifications of the
`run <Process.run>` method).
.. _Process_Execution_Learning:
*Learning*
~~~~~~~~~~
If `learning <Process_Learning_Sequence>` has been specified for the Process or any of the projections in its `pathway
<Process.pathway>`, then the learning Components described `above <Process_Learning_Components>` are executed after
all of the ProcessingMechanisms in the `pathway <Process.pathway>` have executed. The learning Components
calculate changes that will be made to `matrix <MappingProjection.matrix>` of the MappingProjections involved. This
requires that a set of `target values <Run_Targets>` be provided (along with the **inputs**) in the **targets**
argument of the Process' `execute <Process.execute>` or `run <Process.run>` method, one for each `learning
sequence <Process_Learning_Sequence>`. These are used to calculate a `learning_signal
<LearningMechanism.learning_signal>` for each MappingProjection in a learning sequence. This is conveyed by a
`LearningProjection` as a `weight_change_matrix <LearningProjection.weight_change_matrix>` to the MappingProjection's
*MATRIX* `ParameterState <Mapping_Matrix_ParameterState>`, that is used to modify the MappingProjection's `matrix
<MappingProjection.matrix>` parameter when it executes.
.. note::
The changes to a Projection induced by learning are not applied until the Mechanisms that receive those
projections are next executed (see :ref:`Lazy Evaluation <LINK>` for an explanation of "lazy" updating).
The `learning_signal <LearningMechanism>`\\s for a learning sequence are calculated, for each sequence, so as to reduce
the difference between the value received by the *TARGET* Mechanism in its *SAMPLE* `InputState
<ComparatorMechanism_Structure>` (see `above <Process_Learning_Sequence>`) and the target value for the sequence
specified in the corresponding item of the **target** argument of the Process' `execute <Process.execute>` or
`run <Process.run>` method.
.. _Process_Examples:
Examples
--------
*Specification of Mechanisms in a pathway:* The first Mechanism in the example below is specified as a reference to an
instance, the second as a default instance of a Mechanism type, and the third in `MechanismTuple format
<Process_Mechanism_Specification>`, specifying a reference to a Mechanism that should receive my_params at runtime::
mechanism_1 = TransferMechanism()
mechanism_2 = DDM()
some_params = {PARAMETER_STATE_PARAMS:{THRESHOLD:2,NOISE:0.1}}
my_process = Process(pathway=[mechanism_1, TransferMechanism, (mechanism_2, my_params)])
*Default Projection specification:* The `pathway` for this Process uses default Projection specifications; as a
result, a `MappingProjection` is automatically instantiated between each of the Mechanisms listed::
my_process = Process(pathway=[mechanism_1, mechanism_2, mechanism_3])
*Inline Projection specification using an existing Projection:* In this `pathway <Process.pathway>`,
``projection_A`` is specified as the Projection between the first and second Mechanisms; a default Projection is
created between ``mechanism_2`` and ``mechanism_3``::
projection_A = MappingProjection()
my_process = Process(pathway=[mechanism_1, projection_A, mechanism_2, mechanism_3])
*Inline Projection specification using a keyword:* In this `pathway <Process.pathway>`, a
`RANDOM_CONNECTIVITY_MATRIX` is used to specify the Projection between the first and second Mechanisms::
my_process = Process(pathway=[mechanism_1, RANDOM_CONNECTIVITY_MATRIX, mechanism_2, mechanism_3])
*Stand-alone Projection specification:* In this `pathway <Process.pathway>`, ``projection_A`` is explicitly
specified as a Projection between ``mechanism_1`` and ``mechanism_2``, and so is used as the Projection between them
in ``my_process``; a default Projection is created between ``mechanism_2`` and ``mechanism_3``::
projection_A = MappingProjection(sender=mechanism_1, receiver=mechanism_2)
my_process = Process(pathway=[mechanism_1, mechanism_2, mechanism_3])
*Process that implements learning:* This `pathway <Process.pathway>` implements a series of Mechanisms with
Projections between them, all of which will be learned using `BackPropagation` (the default learning algorithm).
Note that it uses the `Logistic` function, which is compatible with BackPropagation::
mechanism_1 = TransferMechanism(function=Logistic)
mechanism_2 = TransferMechanism(function=Logistic)
mechanism_3 = TransferMechanism(function=Logistic)
my_process = Process(pathway=[mechanism_1, mechanism_2, mechanism_3],
learning=ENABLED,
target=[0])
*Process with individual Projections that implement learning:* This `pathway <Process.pathway>` implements learning
for two MappingProjections (between ``mechanism_1`` and ``mechanism_2``, and ``mechanism_3`` and ``mechanism_4``).
Since they are not contiguous, two `learning sequences <Process_Learning_Sequence>` are created, with `TARGET`
Mechanisms assigned to ``mechanism_2`` and ``mechanism_4`` (that will be listed in ``my_process.target_nodes``)::
mechanism_1 = TransferMechanism(function=Logistic)
mechanism_2 = TransferMechanism(function=Logistic)
mechanism_3 = TransferMechanism(function=Logistic)
mechanism_4 = TransferMechanism(function=Logistic)
my_process = Process(pathway=[mechanism_1,
MappingProjection(matrix=(RANDOM_CONNECTIVITY_MATRIX, LEARNING),
mechanism_2,
mechanism_3,
MappingProjection(matrix=(RANDOM_CONNECTIVITY_MATRIX, LEARNING)),
mechanism_4])
.. _Process_Footnotes:
Footnotes
---------
*lineal*: this term is used rather than "linear" to refer to the flow of processing -- i.e., the graph structure
of the Process -- rather than the (potentially non-linear) processing characteristics of its individual Components.
.. _Process_Class_Reference:
Class Reference
---------------
"""
import inspect
import itertools
import numbers
import re
import warnings
from collections import UserList, namedtuple
import numpy as np
import typecheck as tc
from psyneulink.core.components.component import Component, function_type
from psyneulink.core.components.mechanisms.adaptive.control.controlmechanism import ControlMechanism
from psyneulink.core.components.mechanisms.mechanism import MechanismList, Mechanism_Base
from psyneulink.core.components.mechanisms.processing.objectivemechanism import ObjectiveMechanism
from psyneulink.core.components.projections.modulatory.learningprojection import LearningProjection
from psyneulink.core.components.projections.pathway.mappingprojection import MappingProjection
from psyneulink.core.components.projections.projection import _add_projection_to, _is_projection_spec
from psyneulink.core.components.shellclasses import Mechanism, Process_Base, Projection, System_Base
from psyneulink.core.components.states.modulatorysignals.learningsignal import LearningSignal
from psyneulink.core.components.states.parameterstate import ParameterState
from psyneulink.core.components.states.state import _instantiate_state, _instantiate_state_list
from psyneulink.core.globals.context import ContextFlags
from psyneulink.core.globals.keywords import AUTO_ASSIGN_MATRIX, ENABLED, EXECUTING, FUNCTION, FUNCTION_PARAMS, INITIALIZING, INITIAL_VALUES, INTERNAL, LEARNING, LEARNING_PROJECTION, MAPPING_PROJECTION, MATRIX, NAME, OBJECTIVE_MECHANISM, ORIGIN, PARAMETER_STATE, PATHWAY, PROCESS, PROCESS_INIT, SENDER, SINGLETON, TARGET, TERMINAL, kwProcessComponentCategory, kwReceiverArg, kwSeparator
from psyneulink.core.globals.parameters import Defaults, Parameter
from psyneulink.core.globals.preferences.componentpreferenceset import is_pref_set
from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel
from psyneulink.core.globals.registry import register_category
from psyneulink.core.globals.utilities import append_type_to_name, convert_to_np_array, iscompatible
__all__ = [
'DEFAULT_PHASE_SPEC', 'DEFAULT_PROJECTION_MATRIX', 'defaultInstanceCount', 'kwProcessInputState', 'kwTarget',
'Process', 'proc', 'ProcessError', 'ProcessInputState', 'ProcessList', 'ProcessRegistry', 'ProcessTuple',
]
# ***************************************** PROCESS CLASS ********************************************************
# ProcessRegistry ------------------------------------------------------------------------------------------------------
defaultInstanceCount = 0 # Number of default instances (used to index name)
DEFAULT_PHASE_SPEC = 0
# FIX: NOT WORKING WHEN ACCESSED AS DEFAULT:
DEFAULT_PROJECTION_MATRIX = AUTO_ASSIGN_MATRIX
# DEFAULT_PROJECTION_MATRIX = IDENTITY_MATRIX
ProcessRegistry = {}
class ProcessError(Exception):
def __init__(self, error_value):
self.error_value = error_value
def __str__(self):
return repr(self.error_value)
kwProcessInputState = 'ProcessInputState'
kwTarget = 'target'
from psyneulink.core.components.states.outputstate import OutputState
# DOCUMENT: HOW DO MULTIPLE PROCESS INPUTS RELATE TO # OF INPUTSTATES IN FIRST MECHANISM
# WHAT HAPPENS IF LENGTH OF INPUT TO PROCESS DOESN'T MATCH LENGTH OF VARIABLE FOR FIRST MECHANISM??
def proc(*args, **kwargs):
"""Factory method
**args** can be `Mechanisms <Mechanism>` with our without `Projections <Projection>`, or a list of them,
that conform to the format for the `pathway <Process.pathway>` argument of a `Process`.
**kwargs** can be any arguments of the `Process` constructor.
"""
return Process(pathway=list(args), **kwargs)
class Process(Process_Base):
"""
Process(process_spec=None, \
default_variable=None, \
pathway=None, \
initial_values={}, \
clamp_input:=None, \
default_projection_matrix=DEFAULT_PROJECTION_MATRIX, \
learning=None, \
learning_rate=None \
target=None, \
params=None, \
name=None, \
prefs=None
Base class for Process.
COMMENT:
Description
-----------
Process is a Category of the Component class.
It implements a Process that is used to execute a sequence of Mechanisms connected by projections.
NOTES:
* if no pathway is provided:
no mechanism is used
* the input to the Process is assigned as the input to its ORIGIN Mechanism
* the output of the Process is taken as the value of the primary OutputState of its TERMINAL Mechanism
Class attributes
----------------
componentCategory : str : default kwProcessFunctionCategory
className : str : default kwProcessFunctionCategory
suffix : str : default "<kwMechanismFunctionCategory>"
registry : dict : default ProcessRegistry
classPreference : PreferenceSet : default ProcessPreferenceSet instantiated in __init__()
classPreferenceLevel (PreferenceLevel): PreferenceLevel.CATEGORY
+ class_defaults.variable = inputValueSystemDefault # Used as default input value to Process)
+ paramClassDefaults = {PATHWAY: []}
Class methods
-------------
- execute(input, control_signal_allocations):
executes the Process by calling execute_functions of the Mechanisms (in order) in the pathway list
assigns input to sender.output (and passed through mapping) of first Mechanism in the pathway list
assigns output of last Mechanism in the pathway list to self.output
- register_process(): registers Process with ProcessRegistry
[TBI: - adjust(control_signal_allocations=NotImplemented):
modifies the control_signal_allocations while the Process is executing;
calling it without control_signal_allocations functions like interrogate
returns (responseState, accuracy)
[TBI: - interrogate(): returns (responseState, accuracy)
[TBI: - terminate(): terminates the Process and returns output
[TBI: - accuracy(target):
a function that uses target together with the pathway's output.value(s)
and its accuracyFunction to return an accuracy measure;
the target must be in a pathway-appropriate format (checked with call)
ProcessRegistry
---------------
All Processes are registered in ProcessRegistry, which maintains a dict for the subclass,
a count for all instances of it, and a dictionary of those instances
COMMENT
Attributes
----------
componentType : "Process"
pathway : List[ProcessingMechanism, MappingProjection, ProcessingMechanism...]
the `ProcessingMechanisms <ProcessingMechanism>` and `MappingProjections <MappingProjection>` between them that
are executed in the order listed when the Process `executes <Process_Execution>`.
process_input_states : List[ProcessInputState]
represent the input to the Process when it is executed. Each `ProcessInputState <ProcessInputState>` represents
an item of the `input <Process.base>` to a corresponding `InputState` of the Process' `origin_mechanism
<Process.origin_mechanism>` (see `Process_Input_And_Output` for details).
input : List[value] or ndarray
input to the Process for each `TRIAL` of execution; it is assigned the value of the **input** argument
in a call to the Process' `execute <Process.execute>` or `run <Process.run>` method. Each of its
items is assigned as the `value <InputState.value>` of the corresponding `ProcessInputState <ProcessInputState>`
in `process_input_states`, and each must match the format of the corresponding item of the `variable
<Mechanism_Base.variable>` for the Process' `origin_mechanism <Process.origin_mechanism>`
(see `Process_Input_And_Output` for details).
.. note::
The `input <Process.input>` attribute of a Process preserves its value throughout the execution of the
Process. It's value is assigned to the `variable <Mechanism_Base.variable>` attribute of the
`origin_mechanism <Process.origin_mechanism>` at the start of execution. After that, by default, that
Mechanism's `variable <Mechanism_Base.variable>` attribute is zeroed. This is so that if the
`origin_mechanism <Process.origin_mechanism>` is executed again in the same `TRIAL` of execution
(e.g., if it is part of a recurrent loop) it does not continue to receive the initial input to the
Process. However, this behavior can be modified with the Process' `clamp_input <Process.clamp_input>`
attribute.
COMMENT
input_value : 2d np.array : default ``defaults.variable``
same as the `variable <Process.variable>` attribute of the Process; contains the `value
<InputState.value>` of each ProcessInputState in its `process_input_states` attribute.
COMMENT
clamp_input : Optional[keyword]
determines whether the Process' `input <Process.input>` continues to be applied to the `origin_mechanism
<Process.origin_mechanism>` if it is executed again within the same `TRIAL`. It can take the following
values:
* `None`: applies the Process' `input <Process.input>` to the `origin_mechanism
<Process.origin_mechanism>` only once (the first time it is executed) in a given `TRIAL` of execution.
* `SOFT_CLAMP`: combines the Process' `input <Process.input>` with input from any other Projections to the
`origin_mechanism <Process.origin_mechanism>` every time the latter is executed within a `TRIAL` of
execution.
* `HARD_CLAMP`: applies the Process' `input <Process.input>` to the `origin_mechanism
<Process.origin_mechanism>` to the exclusion of any other source(s) of input every time the Process is
executed.
initial_values : Dict[ProcessingMechanism, param value]
values used to initialize ProcessingMechanisms designated as `INITIALIZE_CYCLE` whenever the Process'
`initialize <Process.initialize>` method is called. The key for each entry is a ProcessingMechanism, and
the value is a number, list or np.array that is assigned to that Mechanism's `value <Mechanism_Base.value>`
attribute whenever it is initialized. `ProcessingMechanisms <ProcessingMechanism>` that are designated as
`INITIALIZE_CYCLE` but not included in the **initial_values** specification are initialized with the value of
their `variable <Mechanism_Base.variable>` attribute (i.e., the default input for that Mechanism).
value: 2d np.array
same as the `value <OutputState.value>` of the `primary OutputState <OutputState_Primary>` of
`terminal_mechanism <Process.terminal_mechanism>`.
output_state : State
the `primary OutputState <OutputState_Primary>` of `terminal_mechanism <Process.terminal_mechanism>`.
output : list
same as the `output_values <Mechanism_Base.output_values>` attribute of `terminal_mechanism
<Process.terminal_mechanism>`.
COMMENT
.. _mechs : List[MechanismTuple]
:class:`MechanismTuple` for all Mechanisms in the Process, listed in the order specified in pathway.
MechanismTuples are of the form: (Mechanism, runtime_params, phase) where runtime_params is dictionary
of {argument keyword: argument values} entries and phase is an int.
Note: the list includes ComparatorMechanisms and LearningMechanism.
.. _all_mechanisms : MechanismList
Contains all Mechanisms in the System (based on _mechs).
.. _origin_mechs : List[MechanismTuple]
Contains a tuple for the `ORIGIN` Mechanism of the Process.
(Note: the use of a list is for compatibility with the MechanismList object)
.. _terminal_mechs : List[MechanismTuple]
Contains a tuple for the `TERMINAL` Mechanism of the Process.
(Note: the use of a list is for compatibility with the MechanismList object)
.. _target_mechs : List[MechanismTuple]
Contains a tuple for the `TARGET` Mechanism of the Process.
(Note: the use of a list is for compatibility with the MechanismList object)
.. _learning_mechs : List[MechanismTuple]
`MechanismTuple
Process (used for learning).
.. mechanisms : List[Mechanism]
List of all Mechanisms in the Process.
property that points to _all_mechanisms.mechanisms (see below).
COMMENT
mechanism_names : List[str]
the names of the Mechanisms listed in the `Mechanisms <Process.mechanisms>` attribute.
.. property that points to _all_mechanisms.names (see below).
mechanisms : List[Mechanism]
*all* of the Mechanisms in the Process, including those in the `pathway <Process.pathway>`
and those created for `learning <Process_Learning_Sequence>`.
origin_mechanism : Mechanism
the `ORIGIN` Mechanism of the Process (see `Process Mechanisms <Process_Mechanisms>` for a description).
COMMENT
.. origin_mechanisms : MechanismList
a list with the `ORIGIN` Mechanism of the Process.
.. note:: A Process can have only one `ORIGIN` Mechanism; the use of a list is for compatibility with
methods that are also used for Systems.
COMMENT
terminal_mechanism : Mechanism
the `TERMINAL` Mechanism of the Process (see `Process Mechanisms <Process_Mechanisms>` for a description).
COMMENT
.. terminalMechanisms : MechanismList
a list with the `TERMINAL` Mechanism of the Process.
.. note:: A Process can have only one `TERMINAL` Mechanism; the use of a list is for compatibility with
methods that are also used for Systems.
COMMENT
learning_mechanisms : MechanismList
all of the `LearningMechanism in the Process <Process_Learning_Sequence>`, listed in
``learning_mechanisms.data``.
.. based on _learning_mechs
target_mechanisms : MechanismList
the `TARGET` Mechanisms for the Process, listed in ``target_nodes.data``; each is a `ComparatorMechanism`
associated with the last ProcessingMechanism of a `learning sequence <Process_Learning_Sequence>` in the
Process;
COMMENT:
.. note:: A Process can have only one `TARGET` Mechanism; the use of a list is for compatibility with
methods that are also used for Systems.
COMMENT
COMMENT:
based on _target_mechs
COMMENT
systems : List[System]
the `Systems <System>` to which the Process belongs.
.. _phaseSpecMax : int : default 0
phase of last (set of) ProcessingMechanism(s) to be executed in the Process.
It is assigned to the ``phaseSpec`` for the Mechanism in the pathway with the largest ``phaseSpec`` value.
.. numPhases : int : default 1
the number of :ref:`phases <System_Execution_Phase>` for the Process.
COMMENT:
It is assigned as ``_phaseSpecMax + 1``.
COMMENT
.. _isControllerProcess : bool : :keyword:`False`
identifies whether the Process is an internal one created by a ControlMechanism.
learning : Optional[LearningProjection]
indicates whether the Process is configured for learning. If it has a value other than `None`, then `learning
has been configured <Process_Learning_Specification>` for one or more `MappingProjections <MappingProjection>`
in the Process; if it is `None`, none of MappingProjections in the Process has been configured for learning.
.. note::
The `learning <Process.learning>` attribute of a Process may have a value other than `None` even
if no assignment is made to the **learning** argument of the `process` command; this occurs if one or more
MappingProjections in the Process are `specified individually for learning
<Process_Learning_Specification>`.
COMMENT:
.. note:: If an existing `LearningProjection` or a call to the constructor is used for the specification,
the object itself will **not** be used as the LearningProjection for the Process. Rather it
will be used as a template (including any parameters that are specified) for creating
LearningProjections for all of the `MappingProjections <MappingProjection>` in the Process.
.. _learning_enabled : bool
indicates whether or not learning is enabled. This only has effect if the ``learning`` parameter
has been specified (see above).
COMMENT
learning_rate : float : default None
determines the `learning_rate <LearningMechanism.learning_rate>` used for `MappingProjections
<MappingProjection>` `specified for learning <Process_Learning_Sequence>` in the Process that do not have their
`learning_rate <LearningProjection.learning_rate>` otherwise specified. If is `None`, and the Process is
executed as part of a `System`, and the System has a `learning_rate <System.learning_rate>` specified,
then that is the value used. Otherwise, the default value of the :keyword:`learning_rate` parameter for the
`function <LearningMechanism.function>` of the `LearningMechanism associated with each MappingProjection
<Process_Learning_Sequence>` is used. If a :keyword:`learning_rate` is specified for the `LearningSignal
<LearningSignal_Learning_Rate>` or `LearningProjection <LearningProjection_Function_and_Learning_Rate>`
associated with a MappingProjection, that is applied in addition to any specified for the Process or the
relevant LearningMechanism.
results : List[OutputState.value]
the return values from a sequence of executions of the Process; its value is `None` if the Process has not
been executed.
name : str
the name of the Process; if it is not specified in the **name** argument of the constructor, a
default is assigned by ProcessRegistry (see `Naming` for conventions used for default and duplicate names).
prefs : PreferenceSet or specification dict
the `PreferenceSet` for the Process; if it is not specified in the **prefs** argument of the
constructor, a default is assigned using `classPreferences` defined in __init__.py (see :doc:`PreferenceSet
<LINK>` for details).
"""
componentCategory = kwProcessComponentCategory
className = componentCategory
suffix = " " + className
componentType = "Process"
registry = ProcessRegistry
classPreferenceLevel = PreferenceLevel.CATEGORY
# These will override those specified in TypeDefaultPreferences
# classPreferences = {
# kwPreferenceSetName: 'ProcessCustomClassPreferences',
# kpReportOutputPref: PreferenceEntry(False, PreferenceLevel.INSTANCE)}
# Use inputValueSystemDefault as default input to process
class Parameters(Process_Base.Parameters):
"""
Attributes
----------
variable
see `variable <Process.variable>`
:default value: None
:type:
input
see `input <Process.input>`
:default value: None
:type:
"""
variable = None
input = None
paramClassDefaults = Component.paramClassDefaults.copy()
paramClassDefaults.update({
'_execution_id': None,
PATHWAY: None,
'input':[],
'process_input_states': [],
'targets': None,
'target_input_states': [],
'systems': [],
'_phaseSpecMax': 0,
'_isControllerProcess': False
})
@tc.typecheck
def __init__(self,
default_variable=None,
size=None,
pathway=None,
initial_values=None,
clamp_input=None,
default_projection_matrix=DEFAULT_PROJECTION_MATRIX,
learning=None,
learning_rate=None,
target=None,
params=None,
name=None,
prefs:is_pref_set=None,
context=None):
pathway = pathway or []
self.projections = []
# Assign args to params and functionParams dicts
params = self._assign_args_to_param_dicts(pathway=pathway,
initial_values=initial_values,
clamp_input=clamp_input,
default_projection_matrix=default_projection_matrix,
learning=learning,
learning_rate=learning_rate,
target=target,
params=params)
register_category(entry=self,
base_class=Process,
name=name,
registry=ProcessRegistry,
context=context)
if not context:
self.context.initialization_status = ContextFlags.INITIALIZING
self.context.string = INITIALIZING + self.name + kwSeparator + PROCESS_INIT
# If input was not provided, generate defaults to match format of ORIGIN mechanisms for process
if default_variable is None and len(pathway) > 0:
default_variable = pathway[0].defaults.variable
self.default_execution_id = self.name
super(Process, self).__init__(default_variable=default_variable,
size=size,
param_defaults=params,
name=self.name,
prefs=prefs)
def _parse_arg_variable(self, variable):
if variable is None:
return None
return super()._parse_arg_variable(convert_to_np_array(variable, dimension=2))
def _validate_params(self, request_set, target_set=None, context=None):
"""Validate initial_values args
Note: validation of target (for learning) is deferred until _instantiate_target since,
if it doesn't have a TARGET Mechanism (see _check_for_target_mechanisms),
it will not need a target.
"""
super()._validate_params(request_set=request_set, target_set=target_set, context=context)
# Note: target_set (argument of validate_params) should not be confused with
# self.target (process attribute for learning)
if INITIAL_VALUES in target_set and target_set[INITIAL_VALUES]:
for mech, value in target_set[INITIAL_VALUES].items():
if not isinstance(mech, Mechanism):
raise SystemError("{} (key for entry in initial_values arg for \'{}\') "
"is not a Mechanism object".format(mech, self.name))
def _instantiate_attributes_before_function(self, function=None, context=None):
"""Call methods that must be run before function method is instantiated
Need to do this before _instantiate_function as mechanisms in pathway must be instantiated
in order to assign input Projection and self.outputState to first and last mechanisms, respectively
:param function:
:param context:
:return:
"""
self._instantiate_pathway(context=context)
def _instantiate_function(self, function, function_params=None, context=None):
"""Override Function._instantiate_function:
This is necessary to:
- insure there is no FUNCTION specified (not allowed for a Process object)
- suppress validation (and attendant execution) of Process execute method (unless VALIDATE_PROCESS is set)
since generally there is no need, as all of the mechanisms in the pathway have already been validated;
Note: this means learning is not validated either
"""
if self.paramsCurrent[FUNCTION] != self.execute:
print("Process object ({0}) should not have a specification ({1}) for a {2} param; it will be ignored").\
format(self.name, self.paramsCurrent[FUNCTION], FUNCTION)
self.paramsCurrent[FUNCTION] = self.execute
# DOCUMENTATION:
# 1) ITERATE THROUGH CONFIG LIST TO PARSE AND INSTANTIATE EACH MECHANISM ITEM
# - RAISE EXCEPTION IF TWO PROJECTIONS IN A ROW
# 2) ITERATE THROUGH CONFIG LIST AND ASSIGN PROJECTIONS (NOW THAT ALL MECHANISMS ARE INSTANTIATED)
#
#
def _instantiate_pathway(self, context):
# DOCUMENT: Projections SPECIFIED IN A PATHWAY MUST BE A MappingProjection
# DOCUMENT:
# Each item in Pathway can be a Mechanism or Projection object, class ref, or specification dict,
# str as name for a default Mechanism,
# keyword (IDENTITY_MATRIX or FULL_CONNECTIVITY_MATRIX) as specification for a default Projection,
# or a tuple with any of the above as the first item and a param dict as the second
"""Construct pathway list of Mechanisms and Projections used to execute process
Iterate through Pathway, parsing and instantiating each Mechanism item;
- raise exception if two Projections are found in a row;
- for last Mechanism in Pathway, assign ouputState to Process.outputState
Iterate through Pathway, assigning Projections to Mechanisms:
- first Mechanism in Pathway:
if it does NOT already have any projections:
assign Projection(s) from ProcessInputState(s) to corresponding Mechanism.input_state(s):
if it DOES already has a Projection, and it is from:
(A) the current Process input, leave intact
(B) another Process input, if verbose warn
(C) another Mechanism in the current process, if verbose warn about recurrence
(D) a Mechanism not in the current Process or System, if verbose warn
(E) another Mechanism in the current System, OK so ignore
(F) from something other than a Mechanism in the System, so warn (irrespective of verbose)
(G) a Process in something other than a System, so warn (irrespective of verbose)
- subsequent Mechanisms:
assign projections from each Mechanism to the next one in the list:
- if Projection is explicitly specified as item between them in the list, use that;
- if Projection is NOT explicitly specified,
but the next Mechanism already has a Projection from the previous one, use that;
- otherwise, instantiate a default MappingProjection from previous Mechanism to next:
use kwIdentity (identity matrix) if len(sender.value) == len(receiver.defaults.variable)
use FULL_CONNECTIVITY_MATRIX (full connectivity matrix with unit weights) if the lengths are not equal
use FULL_CONNECTIVITY_MATRIX (full connectivity matrix with unit weights) if LEARNING has been set
:param context:
:return:
"""
pathway = self.paramsCurrent[PATHWAY]
self._mechs = []
self._learning_mechs = []
self._target_mechs = []
# VALIDATE PATHWAY THEN PARSE AND INSTANTIATE MECHANISM ENTRIES ------------------------------------
self._parse_and_instantiate_mechanism_entries(pathway=pathway, context=context)
# Identify ORIGIN and TERMINAL Mechanisms in the Process and
# and assign the Mechanism's status in the Process to its entry in the Mechanism's processes dict
# Move any ControlMechanisms in the pathway to the end
from psyneulink.core.components.mechanisms.adaptive.control.controlmechanism import ControlMechanism
for i, item in enumerate(pathway):
if len(pathway)>1 and isinstance(item, ControlMechanism):
pathway += [pathway.pop(i)]
# Identify and assign first Mechanism as first_mechanism and ORIGIN
self.first_mechanism = pathway[0]
self.first_mechanism._add_process(self, ORIGIN)
self._origin_mechs = [pathway[0]]
self.origin_mechanisms = MechanismList(self, self._origin_mechs)
# Identify and assign last Mechanism as last_mechanism and ORIGIN
i = -1
while (not isinstance(pathway[i],Mechanism_Base) or
(isinstance(pathway[i], ControlMechanism) and len(pathway)>1)):
i -=1
self.last_mechanism = pathway[i]
if self.last_mechanism is self.first_mechanism:
self.last_mechanism._add_process(self, SINGLETON)
else:
self.last_mechanism._add_process(self, TERMINAL)
self._terminal_mechs = [pathway[-1]]
self.terminal_mechanisms = MechanismList(self, self._terminal_mechs)
# # Assign process OutputState to last mechanisms in pathway
# self.outputState = self.last_mechanism.outputState
# PARSE AND INSTANTIATE PROJECTION ENTRIES ------------------------------------
self._parse_and_instantiate_projection_entries(pathway=pathway, context=context)
self.pathway = pathway
self._instantiate__deferred_inits(context=context)
if self.learning:
if self._check_for_target_mechanisms():
if self._target_mechs:
self._instantiate_target_input(context=context)
self._learning_enabled = True
else:
self._learning_enabled = False
else:
self._learning_enabled = False
self._all_mechanisms = MechanismList(self, self._mechs)
self.learning_mechanisms = MechanismList(self, self._learning_mechs)
self.target_mechanisms = MechanismList(self, self._target_mechs)
def _instantiate_value(self, context=None):
# If validation pref is set, execute the Process
if self.prefs.paramValidationPref:
super()._instantiate_value(context=context)
# Otherwise, just set Process output info to the corresponding info for the last mechanism in the pathway
else:
# MODIFIED 6/24/18 OLD:
# value = self.pathway[-1].output_state.value
# MODIFIED 6/24/18 NEW:
value = self.last_mechanism.output_state.value
# MODIFIED 6/24/18 END
try:
# Could be mutable, so assign copy
self.defaults.value = value.copy()
except AttributeError:
# Immutable, so just assign value
self.defaults.value = value
def _parse_and_instantiate_mechanism_entries(self, pathway, context=None):
# FIX: SHOULD MOVE VALIDATION COMPONENTS BELOW TO Process._validate_params
# - make sure first entry is not a Projection
# - make sure Projection entries do NOT occur back-to-back (i.e., no two in a row)
# - instantiate Mechanism entries
previous_item_was_projection = False
for i in range(len(pathway)):
item = pathway[i]
# VALIDATE PLACEMENT OF PROJECTION ENTRIES ----------------------------------------------------------
# Can't be first entry, and can never have two in a row
# Config entry is a Projection
if _is_projection_spec(item, proj_type=Projection):
# Projection not allowed as first entry
if i==0:
raise ProcessError("Projection cannot be first entry in pathway ({0})".format(self.name))
# Projections not allowed back-to-back
if previous_item_was_projection:
raise ProcessError("Illegal sequence of two adjacent projections ({0}:{1} and {1}:{2})"
" in pathway for {3}".
format(i-1, pathway[i-1], i, pathway[i], self.name))
previous_item_was_projection = True
continue
previous_item_was_projection = False
mech = item
# INSTANTIATE MECHANISM -----------------------------------------------------------------------------
# Must do this before assigning projections (below)
# Mechanism entry must be a Mechanism object, class, specification dict, str, or (Mechanism, params) tuple
# Don't use params item of tuple (if present) to instantiate Mechanism, as they are runtime only params
# Entry is NOT already a Mechanism object
if not isinstance(mech, Mechanism):
raise ProcessError("Entry {0} ({1}) is not a recognized form of Mechanism specification".format(i, mech))
# Params in mech tuple must be a dict or None
# if params and not isinstance(params, dict):
# raise ProcessError("Params entry ({0}) of tuple in item {1} of pathway for {2} is not a dict".
# format(params, i, self.name))
# Replace Pathway entry with new tuple containing instantiated Mechanism object and params
pathway[i] = mech
# Entry IS already a Mechanism object
# Add entry to _mechs and name to mechanism_names list
# Add Process to the mechanism's list of processes to which it belongs
if not self in mech.processes:
mech._add_process(self, INTERNAL)
self._mechs.append(pathway[i])
# self.mechanism_names.append(mech.name)
try:
# previously this was only getting called for ControlMechanisms, but GatingMechanisms
# need to activate their projections too! This is not being tested for anywhere
mech._activate_projections_for_compositions(self)
except AttributeError:
pass
# FIX: ADD RECURRENT PROJECTION AND MECHANISM
# IMPLEMENTATION NOTE: THIS IS A TOTAL HACK TO ALLOW SELF-RECURRENT MECHANISMS IN THE CURRENT SYSTEM
# SHOULD BE HANDLED MORE APPROPRIATELY IN COMPOSITION
# If this is the last mechanism in the pathway, and it has a self-recurrent Projection,
# add that to the pathway so that it can be identified and assigned for learning if so specified
if i+1 == len(pathway):
if mech.output_states and any(any(proj.receiver.owner is mech
for proj in state.efferents)
for state in mech.output_states):
for state in mech.output_states:
for proj in state.efferents:
if proj.receiver.owner is mech:
pathway.append(proj)
pathway.append(pathway[i])
# Validate initial values
# FIX: CHECK WHETHER ALL MECHANISMS DESIGNATED AS INITALIZE HAVE AN INITIAL_VALUES ENTRY
if self.initial_values:
for mech, value in self.initial_values.items():
if not mech in self.mechanisms:
raise SystemError("{} (entry in initial_values arg) is not a Mechanism in pathway for \'{}\'".
format(mech.name, self.name))
if not iscompatible(value, mech.defaults.variable):
raise SystemError("{} (in initial_values arg for {}) is not a valid value for {}".
format(value,
append_type_to_name(self),
append_type_to_name(mech)))
def _parse_and_instantiate_projection_entries(self, pathway, context=None):
from psyneulink.core.components.mechanisms.adaptive.control.controlmechanism import ControlMechanism
# ASSIGN DEFAULT PROJECTION PARAMS
# If learning is specified for the Process, add learning specification to default Projection params
# and store any learning-related specifications
if self.learning:
# if spec is LEARNING or ENABLED (convenience spec),
# change to Projection version of keyword for consistency below
if self.learning in {LEARNING, LEARNING_PROJECTION, ENABLED}:
self.learning = LEARNING
# FIX: IF self.learning IS AN ACTUAL LearningProjection OBJECT, NEED TO RESPECIFY AS CLASS + PARAMS
# FIX: OR CAN THE SAME LearningProjection OBJECT BE SHARED BY MULTIPLE PROJECTIONS?
# FIX: DOES IT HAVE ANY INTERNAL STATE VARIABLES OR PARAMS THAT NEED TO BE PROJECTIONS-SPECIFIC?
# FIX: MAKE IT A COPY?
matrix_spec = (self.default_projection_matrix, self.learning)
else:
matrix_spec = self.default_projection_matrix
projection_params = {FUNCTION_PARAMS: {MATRIX: matrix_spec}}
for i in range(len(pathway)):
item = pathway[i]
learning_projection_specified = False
# FIRST ENTRY
# Must be a Mechanism (enforced above)
# Assign input(s) from Process to it if it doesn't already have any
# Note: does not include learning (even if specified for the process)
if i == 0:
# Relabel for clarity
mech = item
# Check if first Mechanism already has any projections and, if so, issue appropriate warning
if mech.input_state.path_afferents:
self._warn_about_existing_projections_to_first_mechanism(mech, context)
# Assign input Projection from Process
self._assign_process_input_projections(mech, context=context)
continue
# SUBSEQUENT ENTRIES
# Item is a Mechanism
item = item
if isinstance(item, Mechanism):
preceding_item = pathway[i - 1]
# PRECEDING ITEM IS A PROJECTION
if isinstance(preceding_item, Projection):
if self.learning:
# Check if preceding_item has a matrix ParameterState and, if so, it has any learningSignals
# If it does, assign them to learning_projections
try:
learning_projections = list(
projection for projection in preceding_item._parameter_states[MATRIX].mod_afferents
if isinstance(projection, LearningProjection)
)
# FIX: 10/3/17: USE OF TUPLE AS ITEM IN state_list ARGS BELOW IS NO LONGER SUPPORTED
# NEED TO REFORMAT SPECS FOR state_list BELOW
# (NOTE: THESE EXCEPTIONS ARE NOT BEING CALLED IN CURRENT TEST SUITES)
# preceding_item doesn't have a _parameter_states attrib, so assign one with self.learning
except AttributeError:
# Instantiate _parameter_states Ordered dict with ParameterState and self.learning
preceding_item._parameter_states = _instantiate_state_list(
owner=preceding_item,
state_list=[(MATRIX, self.learning)],
state_types=ParameterState,
state_param_identifier=PARAMETER_STATE,
reference_value=self.learning,
reference_value_name=LEARNING_PROJECTION,
context=context
)
# preceding_item has _parameter_states but not (yet!) one for MATRIX, so instantiate it
except KeyError:
# Instantiate ParameterState for MATRIX
preceding_item._parameter_states[MATRIX] = _instantiate_state(
owner=preceding_item,
state_type=ParameterState,
name=MATRIX,
# # FIX: NOT SURE IF THIS IS CORRECT:
# state_spec=PARAMETER_STATE,
reference_value=self.learning,
reference_value_name=LEARNING_PROJECTION,
params=self.learning,
context=context
)
# preceding_item has ParameterState for MATRIX,
else:
if not learning_projections:
# Add learningProjection to Projection if it doesn't have one
projs = _add_projection_to(
preceding_item,
preceding_item._parameter_states[MATRIX],
projection_spec=self.learning
)
for proj in projs:
proj._activate_for_compositions(self)
self._add_projection(proj)
continue
# Preceding item was a Mechanism, so check if a Projection needs to be instantiated between them
# Check if Mechanism already has a Projection from the preceding Mechanism, by testing whether the
# preceding mechanism is the sender of any projections received by the current one's inputState
# FIX: THIS SHOULD BE DONE FOR ALL INPUTSTATES
# FIX: POTENTIAL PROBLEM - EVC *CAN* HAVE MULTIPLE PROJECTIONS FROM (DIFFERENT OutputStates OF) THE SAME MECHANISM
# PRECEDING ITEM IS A MECHANISM
projection_list = item.input_state.path_afferents
projection_found = False
for projection in projection_list:
# Current mechanism DOES receive a Projection from the preceding item
# DEPRECATED: this allows any projection existing between A->B to automatically be added
# to this process
if preceding_item == projection.sender.owner:
projection_found = True
if self.learning:
# Make sure Projection includes a learningSignal and add one if it doesn't
try:
matrix_param_state = projection._parameter_states[MATRIX]
# Projection doesn't have a _parameter_states attrib, so assign one with self.learning
except AttributeError:
# Instantiate _parameter_states Ordered dict with ParameterState for self.learning
projection._parameter_states = _instantiate_state_list(
owner=preceding_item,
state_list=[(MATRIX, self.learning)],
state_types=ParameterState,
state_param_identifier=PARAMETER_STATE,
reference_value=self.learning,
reference_value_name=LEARNING_PROJECTION,
context=context
)
# Projection has _parameter_states but not (yet!) one for MATRIX,
# so instantiate it with self.learning
except KeyError:
# Instantiate ParameterState for MATRIX
projection._parameter_states[MATRIX] = _instantiate_state(
owner=preceding_item,
state_type=ParameterState,
name=MATRIX,
# state_spec=PARAMETER_STATE,
reference_value=self.learning,
reference_value_name=LEARNING_PROJECTION,
params=self.learning,
context=context
)
# Check if Projection's matrix param has a learningSignal
else:
if not (
any(
isinstance(projection, LearningProjection)
for projection in matrix_param_state.mod_afferents
)
):
projs = _add_projection_to(
projection,
matrix_param_state,
projection_spec=self.learning
)
for p in projs:
p._activate_for_compositions(self)
if self.prefs.verbosePref:
print("LearningProjection added to Projection from Mechanism {0} to Mechanism {1} "
"in pathway of {2}".format(preceding_item.name, item.name, self.name))
# remove this to enforce that projections need to be explicitly added to Compositions
# left in for backwards compatibility
# DEPRECATED
projection._activate_for_compositions(self)
# warnings.warn(
# 'The ability for Process to associate with itself all projections between '
# 'subsequent mechanisms implicitly is deprecated. In the future, you must '
# 'explicitly state the projections you want included in any Composition.',
# FutureWarning
# )
break
if not projection_found:
# No Projection found, so instantiate MappingProjection from preceding mech to current one;
# Note: if self.learning arg is specified, it has already been added to projection_params above
# MODIFIED 9/19/17 NEW:
# [ALLOWS ControlMechanism AND ASSOCIATED ObjectiveMechanism TO BE ADDED TO PATHWAY)
# If it is a ControlMechanism with an associated ObjectiveMechanism, try projecting to that
if isinstance(item, ControlMechanism) and item.objective_mechanism is not None:
# If it already has an associated ObjectiveMechanism, make sure it has been implemented
if not isinstance(item.objective_mechanism, Mechanism):
raise ProcessError(
"{} included in {} for {} ({}) has an {} arugment, but it is not an {}".format(
ControlMechanism.__name__,
PATHWAY,
self.name,
item.objective_mechanism,
OBJECTIVE_MECHANISM,
ObjectiveMechanism.name
)
)
# Check whether ObjectiveMechanism already receives a projection
# from the preceding Mechanism in the pathway
# if not any(projection.sender.owner is preceding_item
# for projection in item.objective_mechanism.input_state.path_afferents):
item._objective_projection._activate_for_compositions(self)
if (
not any(
any(
projection.sender.owner is preceding_item
for projection in input_state.path_afferents
)
for input_state in item.objective_mechanism.input_states
)
):
# Assign projection from preceding Mechanism in pathway to ObjectiveMechanism
receiver = item.objective_mechanism
else:
# Ignore (ObjectiveMechanism already as a projection from the Mechanism)
for input_state in item.objective_mechanism.input_states:
for projection in input_state.path_afferents:
if projection.sender.owner is preceding_item:
projection._activate_for_compositions(self)
continue
else:
receiver = item
# MODIFIED 9/19/17 END
projection = MappingProjection(
sender=preceding_item,
receiver=receiver,
params=projection_params,
name='{} from {} to {}'.format(MAPPING_PROJECTION, preceding_item.name, item.name)
)
projection._activate_for_compositions(self)
for mod_proj in itertools.chain.from_iterable([p.mod_afferents for p in projection.parameter_states]):
mod_proj._activate_for_compositions(self)
if self.prefs.verbosePref:
print("MappingProjection added from Mechanism {0} to Mechanism {1}"
" in pathway of {2}".format(preceding_item.name, item.name, self.name))
# Item is a Projection or specification for one
else:
# Instantiate Projection, assigning Mechanism in previous entry as sender and next one as receiver
# IMPLEMENTATION NOTE: FOR NOW:
# - ASSUME THAT PROJECTION SPECIFICATION (IN item) IS ONE OF THE FOLLOWING:
# + Projection object
# + Matrix object
# # + Matrix keyword (IDENTITY_MATRIX or FULL_CONNECTIVITY_MATRIX)
# + Matrix keyword (use "is_projection" to validate)
# - params IS IGNORED
# 9/5/16:
# FIX: IMPLEMENT _validate_params TO VALIDATE PROJECTION SPEC USING Projection.is_projection
# FIX: ADD SPECIFICATION OF PROJECTION BY KEYWORD:
# FIX: ADD learningSignal spec if specified at Process level (overrided individual projection spec?)
# FIX: PARSE/VALIDATE ALL FORMS OF PROJECTION SPEC (ITEM PART OF TUPLE) HERE:
# FIX: CLASS, OBJECT, DICT, STR, TUPLE??
# IMPLEMENT: MOVE State._instantiate_projections_to_state(), _check_projection_receiver()
# and _parse_projection_keyword() all to Projection_Base.__init__() and call that
# VALIDATION OF PROJECTION OBJECT:
# MAKE SURE IT IS A MappingProjection
# CHECK THAT SENDER IS pathway[i-1][OBJECT_ITEM]
# CHECK THAT RECEVIER IS pathway[i+1][OBJECT_ITEM]
# Get sender for Projection
sender_mech = pathway[i - 1]
# Get receiver for Projection
try:
receiver_mech = pathway[i + 1]
except IndexError:
# There are no more entries in the pathway
# so the Projection had better project to a mechanism already in the pathway;
# otherwise, raise and exception
try:
receiver_mech = item.receiver.owner
if receiver_mech not in [object_item for object_item in pathway]:
raise AttributeError
except AttributeError:
raise ProcessError(
"The last entry in the pathway for {} is a project specification {}, "
"so its receiver must be a Mechanism in the pathway".format(self.name, item)
)
# # Check if there is already a projection between the sender and receiver
# if self._check_for_duplicate_projection(sender_mech, receiver_mech, item, i):
# continue
# Projection spec is an instance of a MappingProjection
if isinstance(item, MappingProjection):
# Check that Projection's sender and receiver are to the mech before and after it in the list
# IMPLEMENT: CONSIDER ADDING LEARNING TO ITS SPECIFICATION?
# FIX: SHOULD MOVE VALIDATION COMPONENTS BELOW TO Process._validate_params
# If initialization of MappingProjection has been deferred,
# check sender and receiver, assign them if they have not been assigned, and initialize it
if item.context.initialization_status == ContextFlags.DEFERRED_INIT:
# Check sender arg
try:
sender_arg = item.init_args[SENDER]
except AttributeError:
raise ProcessError(
"PROGRAM ERROR: initialization_status of {} is {} but "
"it does not have init_args".format(
item, ContextFlags.DEFERRED_INIT.name
)
)
except KeyError:
raise ProcessError(
"PROGRAM ERROR: Value of {} is {} but "
"init_args does not have entry for {}".format(
item.init_args[NAME], ContextFlags.DEFERRED_INIT.name, SENDER
)
)
else:
# If sender is not specified for the Projection,
# assign mechanism that precedes in pathway
if sender_arg is None:
item.init_args[SENDER] = sender_mech
elif sender_arg is not sender_mech:
raise ProcessError(
"Sender of Projection ({}) specified in item {} of"
" pathway for {} is not the Mechanism ({}) "
"that precedes it in the pathway".format(
item.init_args[NAME], i, self.name, sender_mech.name
)
)
# Check receiver arg
try:
receiver_arg = item.init_args[kwReceiverArg]
except AttributeError:
raise ProcessError(
"PROGRAM ERROR: initialization_status of {} is {} "
"but it does not have init_args".format(
item, ContextFlags.DEFERRED_INIT
)
)
except KeyError:
raise ProcessError(
"PROGRAM ERROR: initialization_status of {} is {} "
"but init_args does not have entry for {}".format(
item.init_args[NAME], ContextFlags.DEFERRED_INIT, kwReceiverArg
)
)
else:
# If receiver is not specified for the Projection,
# assign mechanism that follows it in the pathway
if receiver_arg is None:
item.init_args[kwReceiverArg] = receiver_mech
elif receiver_arg is not receiver_mech:
raise ProcessError(
"Receiver of Projection ({}) specified in item {} of"
" pathway for {} is not the Mechanism ({}) "
"that follows it in the pathway". format(
item.init_args[NAME], i, self.name, receiver_mech.name
)
)
# Check if it is specified for learning
matrix_spec = item.function_params[MATRIX]
if (
isinstance(matrix_spec, tuple)
and (
matrix_spec[1] in {LEARNING, LEARNING_PROJECTION}
or isinstance(matrix_spec[1], (LearningProjection, LearningSignal))
)
):
self.learning = True
# Complete initialization of Projection
item._deferred_init()
if item.sender.owner is not sender_mech:
raise ProcessError("Sender of Projection ({}) specified in item {} of pathway for {} "
"is not the Mechanism ({}) that precedes it in the pathway".
format(item.name, i, self.name, sender_mech.name))
if item.receiver.owner is not receiver_mech:
raise ProcessError("Receiver of Projection ({}) specified in item {} of pathway for "
"{} is not the Mechanism ({}) that follows it in the pathway".
format(item.name, i, self.name, sender_mech.name))
projection = item
if projection.has_learning_projection:
self.learning = True
# TEST
# if params:
# projection.matrix = params
# Projection spec is a MappingProjection class reference
elif inspect.isclass(item) and issubclass(item, MappingProjection):
# if params:
# # Note: If self.learning is specified, it has already been added to projection_params above
# projection_params = params
projection = MappingProjection(
sender=sender_mech,
receiver=receiver_mech,
# params=projection_params
)
# Projection spec is a matrix spec, a keyword for one, or a (matrix, LearningProjection) tuple
# Note: this is tested above by call to _is_projection_spec()
elif (
isinstance(item, (np.matrix, str, tuple))
or (isinstance(item, np.ndarray) and item.ndim == 2)
):
# If a LearningProjection is explicitly specified for this Projection, use it
if isinstance(item, tuple):
matrix_spec = item
learning_projection_specified = True
# If a LearningProjection is not specified for this Projection but self.learning is, use that
elif self.learning:
matrix_spec = (item, self.learning)
# Otherwise, do not include any LearningProjection
else:
matrix_spec = item
projection = MappingProjection(
sender=sender_mech,
receiver=receiver_mech,
matrix=matrix_spec
)
else:
raise ProcessError(
"Item {0} ({1}) of pathway for {2} is not "
"a valid Mechanism or Projection specification".format(i, item, self.name)
)
# Reassign Pathway entry
# with Projection as OBJECT item and original params as PARAMS item of the tuple
# IMPLEMENTATION NOTE: params is currently ignored
pathway[i] = projection
projection._activate_for_compositions(self)
if learning_projection_specified:
self.learning = LEARNING
def _check_for_duplicate_projection(self, sndr_mech, rcvr_mech, proj_spec, pathway_index):
'''Check if there is already a projection between sndr_mech and rcvr_mech
If so:
- if it has just found the same project (e.g., as in case of AutoAssociativeProjection), let pass
- otherwise:
- if verbosePref, warn
- replace proj_spec with existing projection
'''
for input_state in rcvr_mech.input_states:
for proj in input_state.path_afferents:
if proj.sender.owner is sndr_mech:
# Skip recurrent projections
try:
if self.pathway[pathway_index] == proj:
continue
except:
pass
if self.prefs.verbosePref:
print("WARNING: Duplicate {} specified between {} and {} ({}) in {}; it will be ignored".
format(Projection.__name__, sndr_mech.name, rcvr_mech.name, proj_spec, self.name))
self.pathway[pathway_index] = proj
return True
return False
def _warn_about_existing_projections_to_first_mechanism(self, mechanism, context=None):
# Check where the Projection(s) is/are from and, if verbose pref is set, issue appropriate warnings
for projection in mechanism.input_state.all_afferents:
# Projection to first Mechanism in Pathway comes from a Process input
if isinstance(projection.sender, ProcessInputState):
# If it is:
# (A) from self, ignore
# (B) from another Process, warn if verbose pref is set
if not projection.sender.owner is self:
if self.prefs.verbosePref:
print("WARNING: {0} in pathway for {1} already has an input from {2} that will be used".
format(mechanism.name, self.name, projection.sender.owner.name))
return
# (C) Projection to first Mechanism in Pathway comes from one in the Process' _mechs;
# so warn if verbose pref is set
if projection.sender.owner in self._mechs:
if self.prefs.verbosePref:
print("WARNING: first Mechanism ({0}) in pathway for {1} receives "
"a (recurrent) Projection from another Mechanism {2} in {1}".
format(mechanism.name, self.name, projection.sender.owner.name))
# Projection to first Mechanism in Pathway comes from a Mechanism not in the Process;
# check if Process is in a System, and Projection is from another Mechanism in the System
else:
try:
if (inspect.isclass(context) and issubclass(context, System_Base)):
# Relabel for clarity
system = context
else:
system = None
except:
# Process is NOT being implemented as part of a System, so Projection is from elsewhere;
# (D) Issue warning if verbose
if self.prefs.verbosePref:
print("WARNING: first Mechanism ({0}) in pathway for {1} receives a "
"Projection ({2}) that is not part of {1} or the System it is in".
format(mechanism.name, self.name, projection.sender.owner.name))
else:
# Process IS being implemented as part of a System,
if system:
# (E) Projection is from another Mechanism in the System
# (most likely the last in a previous Process)
if mechanism in system.mechanisms:
pass
# (F) Projection is from something other than a mechanism,
# so warn irrespective of verbose (since can't be a Process input
# which was checked above)
else:
print("First Mechanism ({0}) in pathway for {1}"
" receives a Projection {2} that is not in {1} "
"or its System ({3}); it will be ignored and "
"a Projection assigned to it by {3}".
format(mechanism.name,
self.name,
projection.sender.owner.name,
context.name))
# Process is being implemented in something other than a System
# so warn (irrespecive of verbose)
elif self.verbosePref:
print("WARNING: Process ({}) is being instantiated outside of a System".
format(self.name))
def _assign_process_input_projections(self, mechanism, context=None):
"""Create Projection(s) for each item in Process input to InputState(s) of the specified Mechanism
For each item in Process input:
- create process_input_state, as sender for MappingProjection to the ORIGIN Mechanism.input_state
- create the MappingProjection (with process_input_state as sender, and ORIGIN Mechanism as receiver)
If number of Process inputs == len(ORIGIN Mechanism.defaults.variable):
- create one Projection for each of the ORIGIN Mechanism.input_state(s)
If number of Process inputs == 1 but len(ORIGIN Mechanism.defaults.variable) > 1:
- create a Projection for each of the ORIGIN Mechanism.input_states, and provide Process' input to each
If number of Process inputs > 1 but len(ORIGIN Mechanism.defaults.variable) == 1:
- create one Projection for each Process input and assign all to ORIGIN Mechanism.input_state
Otherwise, if number of Process inputs != len(ORIGIN Mechanism.defaults.) and both > 1:
- raise exception: ambiguous mapping from Process input values to ORIGIN Mechanism's input_states
:param Mechanism:
:return:
"""
# FIX: LENGTH OF EACH PROCESS INPUT STATE SHOUD BE MATCHED TO LENGTH OF INPUT STATE FOR CORRESPONDING ORIGIN MECHANISM
process_input = self.defaults.variable
# Get number of Process inputs
num_process_inputs = len(process_input)
# Get number of mechanism.input_states
# - assume mechanism.defaults.variable is a 2D np.array, and that
# - there is one inputState for each item (1D array) in mechanism.defaults.variable
num_mechanism_input_states = len(mechanism.defaults.variable)
# There is a mismatch between number of Process inputs and number of mechanism.input_states:
if num_process_inputs > 1 and num_mechanism_input_states > 1 and num_process_inputs != num_mechanism_input_states:
raise ProcessError("Mismatch between number of input values ({0}) for {1} and "
"number of input_states ({2}) for {3}".format(num_process_inputs,
self.name,
num_mechanism_input_states,
mechanism.name))
# Create input state for each item of Process input, and assign to list
for i in range(num_process_inputs):
process_input_state = ProcessInputState(owner=self,
variable=process_input[i],
prefs=self.prefs)
self.process_input_states.append(process_input_state)
from psyneulink.core.components.projections.pathway.mappingprojection import MappingProjection
# If there is the same number of Process input values and mechanism.input_states, assign one to each
if num_process_inputs == num_mechanism_input_states:
for i in range(num_mechanism_input_states):
if mechanism.input_states[i].internal_only:
continue
# Insure that each Process input value is compatible with corresponding variable of mechanism.input_state
input_state_variable = mechanism.input_states[i].socket_template
if not iscompatible(process_input[i], input_state_variable):
raise ProcessError("Input value {0} ({1}) for {2} is not compatible with "
"variable for corresponding inputState of {3} (format: {4})".
format(i, process_input[i], self.name, mechanism.name, input_state_variable))
# Create MappingProjection from Process input state to corresponding mechanism.input_state
proj = MappingProjection(sender=self.process_input_states[i],
receiver=mechanism.input_states[i],
name=self.name+'_Input Projection')
proj._activate_for_compositions(self)
if self.prefs.verbosePref:
print("Assigned input value {0} ({1}) of {2} to corresponding inputState of {3}".
format(i, process_input[i], self.name, mechanism.name))
# If the number of Process inputs and mechanism.input_states is unequal, but only a single of one or the other:
# - if there is a single Process input value and multiple mechanism.input_states,
# instantiate a single Process input state with projections to each of the mechanism.input_states
# - if there are multiple Process input values and a single mechanism.input_state,
# instantiate multiple Process input states each with a Projection to the single mechanism.input_state
else:
for i in range(num_mechanism_input_states):
if mechanism.input_states[i].internal_only:
continue
for j in range(num_process_inputs):
if not iscompatible(process_input[j], mechanism.defaults.variable[i]):
raise ProcessError("Input value {0} ({1}) for {2} is not compatible with "
"variable ({3}) for inputState {4} of {5}".
format(j, process_input[j], self.name,
mechanism.defaults.variable[i], i, mechanism.name))
# Create MappingProjection from Process buffer_intput_state to corresponding mechanism.input_state
proj = MappingProjection(sender=self.process_input_states[j],
receiver=mechanism.input_states[i],
name=self.name+'_Input Projection')
proj._activate_for_compositions(self)
if self.prefs.verbosePref:
print("Assigned input value {0} ({1}) of {2} to inputState {3} of {4}".
format(j, process_input[j], self.name, i, mechanism.name))
mechanism._receivesProcessInput = True
def _assign_input_values(self, input, execution_id=None, context=None):
"""Validate input, assign each item (1D array) in input to corresponding process_input_state
Returns converted version of input
Args:
input:
Returns:
"""
if isinstance(input, dict):
input = list(input.values())[0]
# Validate input
if input is None:
input = self.first_mechanism.defaults.variable
if (self.prefs.verbosePref and not (context == ContextFlags.COMMAND_LINE or
self.parameters.context.get(execution_id).initializaton_status == ContextFlags.INITIALIZING)):
print("- No input provided; default will be used: {0}")
else:
# Insure that input is a list of 1D array items, one for each processInputState
# If input is a single number, wrap in a list
from numpy import ndarray
if isinstance(input, numbers.Number) or (isinstance(input, ndarray) and input.ndim == 0):
input = [input]
# If input is a list of numbers, wrap in an outer list (for processing below)
if all(isinstance(i, numbers.Number) for i in input):
input = [input]
if len(self.process_input_states) != len(input):
raise ProcessError("Length ({}) of input to {} does not match the number "
"required for the inputs of its origin Mechanisms ({}) ".
format(len(input), self.name, len(self.process_input_states)))
# Assign items in input to value of each process_input_state
for i in range(len(self.process_input_states)):
self.process_input_states[i].parameters.value.set(input[i], execution_id, override=True)
return input
def _update_input(self):
for s, i in zip(self.process_input_states, range(len(self.process_input_states))):
self.input = s.value
def _instantiate__deferred_inits(self, context=None):
"""Instantiate any objects in the Process that have deferred their initialization
Description:
For learning:
go through _mechs in reverse order of pathway since
LearningProjections are processed from the output (where the training signal is provided) backwards
exhaustively check all of Components of each Mechanism,
including all projections to its input_states and _parameter_states
initialize all items that specified deferred initialization
construct a _learning_mechs of Mechanism tuples (mech, params):
add _learning_mechs to the Process' _mechs
assign input Projection from Process to first Mechanism in _learning_mechs
IMPLEMENTATION NOTE: assume that the only Projection to a Projection is a LearningProjection
this is implemented to be fully general, but at present may be overkill
since the only objects that currently use deferred initialization are LearningProjections
"""
# For each mechanism in the Process, in backwards order through its _mechs
for item in reversed(self._mechs):
mech = item
mech._deferred_init()
# For each inputState of the mechanism
for input_state in mech.input_states:
input_state._deferred_init()
# Restrict projections to those from mechanisms in the current process
projections = []
for projection in input_state.all_afferents:
try:
if self in projection.sender.owner.processes:
projections.append(projection)
self._add_projection(projection)
except AttributeError:
pass
self._instantiate__deferred_init_projections(projections, context=context)
# For each ParameterState of the mechanism
for parameter_state in mech._parameter_states:
parameter_state._deferred_init()
# MODIFIED 5/2/17 OLD:
# self._instantiate__deferred_init_projections(parameter_state.path_afferents)
# MODIFIED 5/2/17 NEW:
# Defer instantiation of ControlProjections to System
# and there should not be any other type of Projection to the ParameterState of a Mechanism
from psyneulink.core.components.projections.modulatory.controlprojection import ControlProjection
if not all(isinstance(proj, ControlProjection) for proj in parameter_state.mod_afferents):
raise ProcessError("PROGRAM ERROR: non-ControlProjection found to ParameterState for a Mechanism")
# MODIFIED 5/2/17 END
# CHANGYAN NOTE: check this spot
# Label monitoring mechanisms and add _learning_mechs to _mechs for execution
if self._learning_mechs:
# Add designations to newly created LearningMechanism:
for object_item in self._learning_mechs:
mech = object_item
# If
# - mech is a TARGET ObjectiveMechanism, and
# - the mech that projects to mech is a TERMINAL for the current process, and
# - current process has learning specified
# then designate mech as a TARGET
if (isinstance(mech, ObjectiveMechanism) and
# any(projection.sender.owner.processes[self] == TERMINAL
# for projection in mech.input_states[SAMPLE].path_afferents) and
mech._learning_role is TARGET and
self.learning
):
object_item._add_process(self, TARGET)
else:
# mech must be a LearningMechanism;
# If a learning_rate has been specified for the process, assign that to all LearningMechanism
# for which a mechanism-specific learning_rate has NOT been assigned
if (self.learning_rate is not None and
mech.function.learning_rate is None):
mech.function.learning_rate = self.learning_rate
# Assign its label
object_item._add_process(self, LEARNING)
# Add _learning_mechs to _mechs
self._mechs.extend(self._learning_mechs)
# IMPLEMENTATION NOTE:
# LearningMechanism are assigned _phaseSpecMax;
# this is so that they will run after the last ProcessingMechansisms have run
def _instantiate__deferred_init_projections(self, projection_list, context=None):
# For each Projection in the list
for projection in projection_list:
projection._deferred_init()
# FIX: WHY DOESN'T THE PROJECTION HANDLE THIS? (I.E., IN ITS deferred_init() METHOD?)
# For each parameter_state of the Projection
try:
for parameter_state in projection._parameter_states:
# Initialize each Projection to the ParameterState (learning or control)
# IMPLEMENTATION NOTE: SHOULD ControlProjections BE IGNORED HERE?
for param_projection in parameter_state.mod_afferents:
param_projection._deferred_init()
if isinstance(param_projection, LearningProjection):
# Get ObjectiveMechanism if there is one, and add to _learning_mechs
try:
objective_mechanism = projection.objective_mechanism
except AttributeError:
pass
else:
# If objective_mechanism is not already in _learning_mechs,
# pack in tuple and add it
if objective_mechanism and not objective_mechanism in self._learning_mechs:
# objective_object_item = objective_mechanism
self._learning_mechs.append(objective_mechanism)
# Get LearningMechanism and add to _learning_mechs; raise exception if not found
try:
learning_mechanism = projection.learning_mechanism
except AttributeError:
raise ProcessError("{} is missing a LearningMechanism".format(param_projection.name))
else:
# If learning_mechanism is not already in _learning_mechs,
# pack in tuple and add it
if (learning_mechanism and not any(learning_mechanism is object_item for
object_item in self._learning_mechs)) :
self._learning_mechs.append(learning_mechanism)
try:
lc = param_projection._learning_components
for proj in [
param_projection,
lc.error_projection,
lc._activation_mech_input_projection,
lc._activation_mech_output_projection,
]:
proj._activate_for_compositions(self)
self._add_projection(proj)
for proj in lc.learning_mechanism.projections:
proj._activate_for_compositions(self)
self._add_projection(proj)
except AttributeError:
pass
# Not all Projection subclasses instantiate ParameterStates
except TypeError as e:
if 'parameterStates' in e.args[0]:
pass
else:
error_msg = 'Error in attempt to initialize LearningProjection ({}) for {}: \"{}\"'.\
format(param_projection.name, projection.name, e.args[0])
raise ProcessError(error_msg)
def _check_for_target_mechanisms(self):
"""Check for and assign TARGET ObjectiveMechanism to use for reporting error during learning.
This should only be called if self.learning is specified
Identify TARGET Mechanisms and assign to self.target_nodes,
assign self to each TARGET Mechanism
and report assignment if verbose
Returns True of TARGET Mechanisms are found and/or assigned, else False
"""
from psyneulink.core.components.mechanisms.processing.objectivemechanism import ObjectiveMechanism
from psyneulink.core.components.mechanisms.adaptive.learning.learningmechanism \
import ACTIVATION_INPUT
def trace_learning_objective_mechanism_projections(mech):
"""Recursively trace projections to Objective mechanisms;
return TARGET ObjectiveMechanism if one is found upstream;
return None if no TARGET ObjectiveMechanism is found.
"""
for input_state in mech.input_states:
for projection in input_state.path_afferents:
sender = projection.sender.owner
# If Projection is not from another ObjectiveMechanism, ignore
if not isinstance(sender, (ObjectiveMechanism)):
continue
if isinstance(sender, ObjectiveMechanism) and sender._learning_role is TARGET:
return sender
if sender.input_states:
target_mech = trace_learning_objective_mechanism_projections(sender)
if target_mech:
return target_mech
else:
continue
else:
continue
if not self.learning:
raise ProcessError("PROGRAM ERROR: _check_for_target_mechanisms should only be called"
" for a process if it has a learning specification")
target_mechs = list(object_item
for object_item in self._mechs
if (isinstance(object_item, ObjectiveMechanism) and
object_item._learning_role is TARGET))
if target_mechs:
# self.target_nodes = target_mechs
self._target_mechs = target_mechs
if self.prefs.verbosePref:
print("\'{}\' assigned as TARGET Mechanism(s) for \'{}\'".
format([mech.name for mech in self._target_mechs], self.name))
return True
# No target_mechs already specified, so get from learning_mechanism
elif self._learning_mechs:
last_learning_mech = self._learning_mechs[0]
# Trace projections to first learning ObjectiveMechanism, which is for the last mechanism in the process,
# unless TERMINAL mechanism of process is part of another process that has learning implemented
# in which case, shouldn't assign target ObjectiveMechanism, but rather just a LearningMechanism)
# NOTE: ignores need for ObjectiveMechanism for AutoAssociativeLearning
try:
target_mech = trace_learning_objective_mechanism_projections(last_learning_mech)
except IndexError:
raise ProcessError("Learning specified for {} but no ObjectiveMechanisms or LearningMechanism found"
.format(self.name))
if target_mech:
if self.prefs.verbosePref:
warnings.warn("{} itself has no Target Mechanism, but its TERMINAL_MECHANISM ({}) "
"appears to be in one or more pathways ({}) that has one".
format(self.name,
# list(self.terminalMechanisms)[0].name,
self.last_mechanism.name,
list(process.name for process in target_mech.processes)))
# Check for AutoAssociativeLearningMechanism:
# its *ACTIVATION_INPUT* InputState should receive a projection from the same Mechanism
# that receives a MappingProjection to which its *LEARNING_SIGNAL* projects
elif any(projection.sender.owner in [projection.sender.owner
for projection in
last_learning_mech.input_states[ACTIVATION_INPUT].path_afferents]
for projection in
last_learning_mech.input_states[ACTIVATION_INPUT].path_afferents):
pass
else:
raise ProcessError("PROGRAM ERROR: {} has a learning specification ({}) "
"but no TARGET ObjectiveMechanism".format(self.name, self.learning))
return True
else:
return False
def _instantiate_target_input(self, context=None):
if self.target is None:
# target arg was not specified in Process' constructor,
# so use the value of the TARGET InputState for each TARGET Mechanism as the default
self.target = [mech.input_states[TARGET].value for mech in self._target_mechs]
if self.verbosePref:
warnings.warn("Learning has been specified for {} and it has TARGET Mechanism(s), but its "
"\'target\' argument was not specified; default value(s) will be used ({})".
format(self.name, self.target))
else:
self.target = np.atleast_2d(self.target)
# Create ProcessInputState for each item of target and
# assign to TARGET inputState of each item of _target_mechs
for target_mech, target in zip(self._target_mechs, self.target):
target_mech_target = target_mech.input_states[TARGET]
target = np.atleast_1d(target)
# Check that length of process' target input matches length of TARGET Mechanism's target input
if len(target) != len(target_mech_target.value):
raise ProcessError("Length of target ({}) does not match length of input for TARGET Mechanism {} ({})".
format(len(target),
target_mech.name,
len(target_mech_target.value)))
target_input_state = ProcessInputState(owner=self,
variable=target,
prefs=self.prefs,
name=TARGET)
self.target_input_states.append(target_input_state)
# Add MappingProjection from target_input_state to ComparatorMechanism's TARGET InputState
from psyneulink.core.components.projections.pathway.mappingprojection import MappingProjection
proj = MappingProjection(sender=target_input_state,
receiver=target_mech_target,
name=self.name+'_Input Projection to '+target_mech_target.name)
proj._activate_for_compositions(self)
# MODIFIED 8/14/17 END
def initialize(self, execution_context=None):
"""Assign the values specified for each Mechanism in the process' `initial_values` attribute. This usually
occurs at the beginning of one or a series of executions invoked by a call to the Process` `execute
<Process.execute>` or `run <Process.run>` methods.
"""
# FIX: INITIALIZE PROCESS INPUTS??
for mech, value in self.initial_values.items():
mech.initialize(value, execution_context)
def execute(
self,
input=None,
target=None,
execution_id=None,
base_execution_id=None,
runtime_params=None,
termination_processing=None,
termination_learning=None,
context=None
):
"""Execute the Mechanisms specified in the process` `pathway` attribute.
COMMENT:
First check that input is provided (required) and appropriate.
Then execute each Mechanism in the order they appear in the `pathway` list.
COMMENT
Arguments
---------
input : List[value] or ndarray: default zeroes
specifies the value(s) of the Process' `input <Process.input>` for the `execution <Process_Execution>`;
it is provided as the input to the `origin_mechanism <Process.origin_mechanism>` and must be compatible
(in number and type of items) with its `variable <Mechanism_Base.variable>` attribute (see
`Process_Input_And_Output` for details).
target : List[value] or ndarray: default None
specifies the target value assigned to each of the `target_nodes <Process.target_nodes>` for
the `execution <Process_Execution>`. Each item is assigned to the *TARGET* `InputState
<ComparatorMechanism_Structure>` of the corresponding `ComparatorMechanism` in `target_nodes
<Process.target_nodes>`; the number of items must equal the number of items in
`target_nodes <Process.target_nodes>`, and each item of **target** be compatible with the
`variable <InputState.variable>` attribute of the *TARGET* `InputState <ComparatorMechanism_Structure>`
for the corresponding `ComparatorMechanism` in `target_nodes <Process.target_nodes>`.
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterState_Specification>` that can include any of the parameters used
as arguments to instantiate the object. Use parameter's name as the keyword for its entry. Values specified
for parameters in the dictionary override any assigned to those parameters in arguments of the constructor.
COMMENT:
context : str : default EXECUTING + self.name
a string used for contextualization of instantiation, hierarchical calls, executions, etc.
COMMENT
Returns
-------
output of Process : ndarray
the `value <OutputState.value>` of the `primary OutputState <OutputState_Primary>` of the
`terminal_mechanism <Process.terminal_mechanism>` of the Process.
COMMENT:
output of Process : list
value of the Process' `output <Process.output>` attribute (same as the `output_values
<Mechanism_Base.output_values>` attribute of the `terminal_mechanism <Process.terminal_mechanism>`.
COMMENT
COMMENT:
IMPLEMENTATION NOTE:
Still need to:
* coordinate execution of multiple processes (in particular, Mechanisms that appear in more than one process)
* deal with different time scales
COMMENT
"""
from psyneulink.core.components.mechanisms.adaptive.learning.learningmechanism import LearningMechanism
if not context:
context = ContextFlags.COMPOSITION
self._assign_context_values(
execution_id,
execution_phase=ContextFlags.PROCESSING,
source=context,
string=EXECUTING + " " + PROCESS + " " + self.name
)
if execution_id is None:
execution_id = self.default_execution_id
for mech in self.mechanisms:
mech._execution_id = self._execution_id
mech._assign_context_values(execution_id, composition=self)
for proj in self.projections:
proj._assign_context_values(execution_id, composition=self)
# initialize from base context but don't overwrite any values already set for this execution_id
self._initialize_from_context(execution_id, base_execution_id, override=False)
# Report output if reporting preference is on and this is not an initialization run
report_output = self.prefs.reportOutputPref and self.parameters.context.get(execution_id).initialization_status == ContextFlags.INITIALIZED
# FIX: CONSOLIDATE/REARRANGE _assign_input_values, _check_args, AND ASSIGNMENT OF input TO variable
# FIX: (SO THAT assign_input_value DOESN'T HAVE TO RETURN input
variable = self._assign_input_values(input=input, execution_id=execution_id, context=context)
self._check_args(variable, runtime_params)
# Use Process self.input as input to first Mechanism in Pathway
self.parameters.input.set(variable, execution_id)
# Generate header and report input
if report_output:
self._report_process_initiation(input=variable, separator=True)
# Execute each Mechanism in the pathway, in the order listed, except those used for learning
for mechanism in self._mechs:
if (isinstance(mechanism, LearningMechanism) or
(isinstance(mechanism, ObjectiveMechanism) and mechanism._role is LEARNING)):
continue
# Execute Mechanism
# Note: DON'T include input arg, as that will be resolved by mechanism from its sender projections
mechanism.parameters.context.get(execution_id).execution_phase = ContextFlags.PROCESSING
context = ContextFlags.PROCESS
mechanism.execute(execution_id=execution_id, context=context)
mechanism.parameters.context.get(execution_id).execution_phase = ContextFlags.IDLE
if report_output:
# FIX: USE clamp_input OPTION HERE, AND ADD HARD_CLAMP AND SOFT_CLAMP
self._report_mechanism_execution(mechanism)
if mechanism is self.first_mechanism and not self.clamp_input:
# Zero self.input to first mechanism after first run
# in case it is repeated in the pathway or receives a recurrent Projection
variable = variable * 0
# Execute LearningMechanisms
if self._learning_enabled:
self._execute_learning(execution_id=execution_id, target=target, context=context)
if report_output:
self._report_process_completion(separator=True, execution_context=execution_id)
# FIX: WHICH SHOULD THIS BE?
return self.output_state.parameters.value.get(execution_id)
# return self.output
def _execute_learning(self, execution_id=None, target=None, context=None):
""" Update each LearningProjection for mechanisms in _mechs of process
# Begin with Projection(s) to last Mechanism in _mechs, and work backwards
"""
# FIRST, assign targets
# If target was provided to execute, use that; otherwise, will use value provided on instantiation
#
if target is not None:
target = np.atleast_2d(target)
else:
target = self.target
# If targets were specified as a function in call to Run() or in System (and assigned to self.targets),
# call the function now (i.e., after execution of the pathways, but before learning)
# and assign value to self.target (that will be used below to assign values to target_input_states)
# Note: this accommodates functions that predicate the target on the outcome of processing
# (e.g., for rewards in reinforcement learning)
if isinstance(target, function_type):
target = target()
# If target itself is callable, call that now
if callable(target):
target = target()
# Assign items of self.target to target_input_states
# (ProcessInputStates that project to corresponding target_nodes for the Process)
for i, target_input_state in zip(range(len(self.target_input_states)), self.target_input_states):
target_input_state.parameters.value.set(target[i], execution_id, override=True)
# # Zero any input from projections to target(s) from any other processes
for target_mech in self.target_mechanisms:
for process in target_mech.processes:
if process is self:
continue
for target_input_state in process.target_input_states:
target_input_state.value *= 0
# THEN, execute ComparatorMechanism and LearningMechanism
for mechanism in self._learning_mechs:
mechanism._assign_context_values(execution_id, execution_phase=ContextFlags.LEARNING)
mechanism.execute(execution_id=execution_id, context=context)
mechanism._assign_context_values(execution_id, execution_phase=ContextFlags.IDLE)
# FINALLY, execute LearningProjections to MappingProjections in the process' pathway
for mech in self._mechs:
mech._assign_context_values(execution_id, execution_phase=ContextFlags.LEARNING)
mech._assign_context_values(
execution_id,
string=self.parameters.context.get(execution_id).string.replace(EXECUTING, LEARNING + ' ')
)
# IMPLEMENTATION NOTE:
# This implementation restricts learning to ParameterStates of projections to input_states
# That means that other parameters (e.g. object or function parameters) are not currenlty learnable
# For each inputState of the mechanism
for input_state in mech.input_states:
# For each Projection in the list
for projection in input_state.path_afferents:
# Skip learning if Projection is an input from the Process or a system
# or comes from a mechanism that belongs to another process
# (this is to prevent "double-training" of projections from mechanisms belonging
# to different processes when call to _execute_learning() comes from a System)
sender = projection.sender.owner
if isinstance(sender, Process) or not self in (sender.processes):
continue
# Call parameter_state.update with LEARNING in context to update LearningSignals
# Note: context is set on the projection,
# as the ParameterStates are assigned their owner's context in their update methods
# Note: do this rather just calling LearningSignals directly
# since parameter_state.update() handles parsing of LearningProjection-specific params
projection._assign_context_values(
execution_id,
string=self.parameters.context.get(execution_id).string.replace(EXECUTING, LEARNING + ' '),
execution_phase=ContextFlags.LEARNING
)
# For each parameter_state of the Projection
try:
for parameter_state in projection._parameter_states:
# Skip learning if the LearningMechanism to which the LearningProjection belongs is disabled
if all(projection.sender.owner.learning_enabled is False
for projection in parameter_state.mod_afferents):
continue
# NOTE: This will need to be updated when runtime params are re-enabled
# parameter_state.update(params=params, context=context)
parameter_state.update(execution_id=execution_id, context=context)
# Not all Projection subclasses instantiate ParameterStates
except AttributeError as e:
if e.args[0] is '_parameter_states':
pass
else:
raise ProcessError("PROGRAM ERROR: unrecognized attribute (\'{}\') encountered "
"while attempting to update {} {} of {}".
format(e.args[0], parameter_state.name, ParameterState.__name__,
projection.name))
projection._assign_context_values(execution_id, execution_phase=ContextFlags.IDLE)
mech._assign_context_values(
execution_id,
execution_phase=ContextFlags.IDLE,
string=self.parameters.context.get(execution_id).string.replace(LEARNING, EXECUTING)
)
def run(self,
inputs,
num_trials=None,
initialize=False,
initial_values=None,
targets=None,
learning=None,
call_before_trial=None,
call_after_trial=None,
call_before_time_step=None,
call_after_time_step=None
):
"""Run a sequence of executions
COMMENT:
Call execute method for each execution in a sequence specified by the `inputs` argument (required).
See `Run` for details of formatting input specifications.
COMMENT
Arguments
---------
inputs : List[input] or ndarray(input) : default default_variable for a single execution
specifies the input used to `execute <Process_Execution>` the Process for each `TRIAL` in a sequence of
executions (see `Run` for formatting requirements and options). Each item of the outermost level (if a
nested list) or axis 0 (if an ndarray) is assigned as the `input <Process.input>` to the Process for the
corresponding `TRIAL`, and therefore must be compatible (in number and type of items) with the `variable
<Mechanism_Base.variable>` of the `origin_mechanism <Process.origin_mechanism>` for the Process. If the
number of items is less than **num_trials**, the **inputs** are cycled until the number of `TRIALS`\\s
specified in **num_trials** has been executed.
num_trials : int : default None
number of `TRIAL`\\s to execute. If the number exceeds the number of **inputs** specified, they are cycled
until the number of `TRIALS`\\s specified in **num_trials** has been executed.
initialize : bool default False
specifies whether to call the Process` `initialize <Process.initialize>` method before executing
each `TRIAL`; if it is `False`, then `initialize <Process.initialize>` is called only *once*,
before the first `TRIAL` executed.
initial_values : ProcessingMechanism, List[input] or np.ndarray(input)] : default None
specifies the values used to initialize `ProcessingMechanisms <ProcessingMechanism>` designated as
`INITIALIZE_CYCLE` whenever the Process' `initialize <Process.initialize>` method is called. The key
for each entry must be a ProcessingMechanism `designated <Process_Mechanism_Initialize_Cycle>`
`INITIALIZE_CYCLE`, and the value must be a number, list or np.array that is compatible with the format
of the ProcessingMechanism's `value <Mechanism_Base.value>` attribute. ProcessingMechanisms designated as
`INITIALIZE_CYCLE` but not specified in **initial_values** are initialized with the value of their
`variable <Mechanism_Base.variable>` attribute (the default input for that Mechanism).
targets : List[input] or np.ndarray(input) : default None
specifies the target value assigned to each of the `target_nodes <Process.target_nodes>` in
each `TRIAL` of execution. Each item of the outermost level (if a nested list) or axis 0 (if an ndarray)
corresponds to a single `TRIAL`; the number of items must equal the number of items in the **inputs**
argument. Each item is assigned to the *TARGET* `InputState <ComparatorMechanism_Structure>` of the
corresponding `ComparatorMechanism` in `target_nodes <Process.target_nodes>`; the number of
items must equal the number of items in `target_nodes <Process.target_nodes>`, and each item
of **target** be compatible with the `variable <InputState.variable>` attribute of the *TARGET* `InputState
<ComparatorMechanism_Structure>` for the corresponding `ComparatorMechanism` in `target_nodes
<Process.target_nodes>`.
learning : bool : default None
enables or disables `learning <Process_Execution_Learning>` during execution.
If it is not specified, its current value (from possible prior assignment) is left intact.
If `True`, learning is forced on; if `False`, learning is forced off.
call_before_trial : Function : default None
called before each `TRIAL` in the sequence is executed.
call_after_trial : Function : default None
called after each `TRIAL` in the sequence is executed.
call_before_time_step : Function : default None
called before each `TIME_STEP` of each trial is executed.
call_after_time_step : Function : default None
called after each `TIME_STEP` of each trial is executed.
Returns
-------
<Process>.results : List[OutputState.value]
list of the `value <OutputState.value>`\\s of the `primary OutputState <OutputState_Primary>` for the
`terminal_mechanism <Process.terminal_mechanism>` of the Process returned for each execution.
"""
if initial_values is None and self.initial_values:
initial_values = self.initial_values
from psyneulink.core.globals.environment import run
return run(self,
inputs=inputs,
num_trials=num_trials,
initialize=initialize,
initial_values=initial_values,
targets=targets,
learning=learning,
call_before_trial=call_before_trial,
call_after_trial=call_after_trial,
call_before_time_step=call_before_time_step,
call_after_time_step=call_after_time_step)
def _report_process_initiation(self, input=None, separator=False):
"""
Parameters
----------
input : ndarray
input to ORIGIN Mechanism for current execution. By default, it is the value specified by the
`ProcessInputState <ProcessInputState>` that projects to the ORIGIN Mechanism. Used by system to specify
the input from the `SystemInputState <SystemInputState>` when the ORIGIN Mechanism is executed as part of
that System.
separator : boolean
determines whether separator is printed above output
Returns
-------
"""
if separator:
print("\n\n****************************************\n")
print("\n\'{}\' executing with:\n- pathway: [{}]".
format(append_type_to_name(self),
# format(self.name,
re.sub(r'[\[,\],\n]','',str(self.mechanism_names))))
if input is None:
input = self.input
print("- input: {}".format(input))
def _report_mechanism_execution(self, mechanism):
# DEPRECATED: Reporting of mechanism execution relegated to individual mechanism prefs
pass
# print("\n{0} executed {1}:\n- output: {2}\n\n--------------------------------------".
# format(self.name,
# mechanism.name,
# re.sub('[\[,\],\n]','',
# str(mechanism.outputState.value))))
def _report_process_completion(self, execution_context=None, separator=False):
print("\n\'{}' completed:\n- output: {}".
format(append_type_to_name(self),
re.sub(r'[\[,\],\n]','',str([float("{:0.3}".format(float(i))) for i in self.output_state.parameters.value.get(execution_context)]))))
if self.learning:
from psyneulink.library.components.mechanisms.processing.objective.comparatormechanism import MSE
for mech in self.target_mechanisms:
if not MSE in mech.output_states:
continue
print("\n- MSE: {:0.3}".
format(float(mech.output_states[MSE].parameters.value.get(execution_context))))
elif separator:
print("\n\n****************************************\n")
def show(self, options=None):
"""Print list of all Mechanisms in the process, followed by its `ORIGIN` and `TERMINAL` Mechanisms.
Arguments
---------
COMMENT:
options : InspectionOptions
[TBI]
COMMENT
"""
# # IMPLEMENTATION NOTE: Stub for implementing options:
# if options and self.InspectOptions.ALL_OUTPUT_LABELS in options:
# pass
print ("\n---------------------------------------------------------")
print ("\n{}\n".format(self.name))
print ("\tLearning enabled: {}".format(self._learning_enabled))
# print ("\n\tMechanisms:")
# for mech_name in self.mechanism_names:
# print ("\t\t{}".format(mech_name))
print ("\n\tMechanisms:")
for object_item in self._mechs:
print ("\t\t{}".format(object_item.name))
print ("\n\tOrigin Mechanism: ".format(self.name))
print("\t\t{}".format(self.origin_mechanism.name))
print ("\n\tTerminal Mechanism: ".format(self.name))
print("\t\t{}".format(self.terminal_mechanism.name))
for output_state in self.terminal_mechanism.output_states:
print("\t\t\t{0}".format(output_state.name))
print ("\n---------------------------------------------------------")
def _add_projection(self, projection):
self.projections.append(projection)
@property
def function(self):
return self.execute
@property
def mechanisms(self):
return self._all_mechanisms.mechanisms
@property
def mechanism_names(self):
return self._all_mechanisms.names
@property
def output_state(self):
return self.last_mechanism.output_state
@property
def output(self):
# FIX: THESE NEED TO BE PROPERLY MAPPED
# return np.array(list(item.value for item in self.last_mechanism.output_states.values()))
return self.last_mechanism.output_values
@property
def origin_mechanism(self):
return self.first_mechanism
@property
def terminal_mechanism(self):
return self.last_mechanism
@property
def numPhases(self):
return self._phaseSpecMax + 1
@property
def _dependent_components(self):
return list(itertools.chain(
super()._dependent_components,
self._mechs,
self.projections,
))
class ProcessInputState(OutputState):
"""Represents inputs and targets specified in a call to the Process' `execute <Process.execute>` and `run
<Process.run>` methods.
COMMENT:
Each instance encodes one of the following:
- an input to the Process and provides it to a `MappingProjection` that projects to one or more
`input_states <Mechanism_Base.input_states>` of the `ORIGIN` Mechanism in the process.
- a target to the Process (also a 1d array) and provides it to a `MappingProjection` that
projects to the `TARGET` Mechanism of the process.
COMMENT
.. _ProcessInputState:
A ProcessInputState is created for each `InputState` of the `origin_mechanism`, and for the *TARGET* `InputState
<ComparatorMechanism_Structure>` of each `ComparatorMechanism <ComparatorMechanism>` listed in `target_nodes
<Process.target_nodes>`. A `MappingProjection` is created that projects to each of these InputStates
from the corresponding ProcessingInputState. When the Process' `execute <Process.execute>` or
`run <Process.run>` method is called, each item of its **inputs** and **targets** arguments is assigned as
the `value <ProcessInputState.value>` of a ProcessInputState, which is then conveyed to the
corresponding InputState of the `origin_mechanism <Process.origin_mechanism>` and `terminal_mechanisms
<Process.terminal_mechanisms>`. See `Process_Input_And_Output` for additional details.
COMMENT:
.. Declared as a sublcass of OutputState so that it is recognized as a legitimate sender to a Projection
in Projection_Base._instantiate_sender()
value is used to represent the corresponding item of the input arg to process.execute or process.run
COMMENT
"""
class Parameters(OutputState.Parameters):
"""
Attributes
----------
variable
see `variable <ProcessInputState.variable>`
:default value: numpy.array([0])
:type: numpy.ndarray
:read only: True
value
see `value <ProcessInputState.value>`
:default value: numpy.array([0])
:type: numpy.ndarray
:read only: True
"""
# just grabs input from the process
variable = Parameter(np.array([0]), read_only=True)
value = Parameter(np.array([0]), read_only=True)
def __init__(self, owner=None, variable=None, name=None, prefs=None):
"""Pass variable to MappingProjection from Process to first Mechanism in Pathway
:param variable:
"""
if not name:
self.name = owner.name + "_" + kwProcessInputState
else:
self.name = owner.name + "_" + name
self.prefs = prefs
self.efferents = []
self.owner = owner
self.parameters = self.Parameters(owner=self, parent=self.class_parameters)
self.defaults = Defaults(owner=self, variable=variable, value=variable)
self.parameters.value.set(variable, override=True)
# self.path_afferents = []
# self.index = PRIMARY
# self.assign = None
@property
def _dependent_components(self):
return list(itertools.chain(
self.efferents,
))
ProcessTuple = namedtuple('ProcessTuple', 'process, input')
class ProcessList(UserList):
"""Provides access to items in (process, process_input) tuples in a list of ProcessTuples
"""
def __init__(self, owner, tuples_list:list):
super().__init__()
self.owner = owner
for item in tuples_list:
if not isinstance(item, ProcessTuple):
raise ProcessError("{} in the tuples_list arg of ProcessList() is not a ProcessTuple".format(item))
self.process_tuples = tuples_list
def __getitem__(self, item):
# return list(self.process_tuples[item])[PROCESS]
return self.process_tuples[item].process
def __setitem__(self, key, value):
raise ("MyList is read only ")
def __len__(self):
return (len(self.process_tuples))
def _get_tuple_for_process(self, process):
"""Return first process tuple containing specified process from list of process_tuples
"""
# FIX:
# if list(item[MECHANISM] for item in self.mechs).count(mech):
# if self.owner.verbosePref:
# print("PROGRAM ERROR: {} found in more than one object_item in {} in {}".
# format(append_type_to_name(mech), self.__class__.__name__, self.owner.name))
return next((ProcessTuple for ProcessTuple in self.process_tuples if ProcessTuple.process is process), None)
@property
def process_tuples_sorted(self):
"""Return list of mechs sorted by Mechanism name"""
return sorted(self.process_tuples, key=lambda process_tuple: process_tuple[0].name)
@property
def processes(self):
"""Return list of all processes in ProcessList
"""
# MODIFIED 11/1/16 OLD:
return list(item.process for item in self.process_tuples)
# # MODIFIED 11/1/16 NEW:
# return sorted(list(item.process for item in self.process_tuples), key=lambda process: process.name)
# MODIFIED 11/1/16 END
@property
def processNames(self):
"""Return names of all processes in ProcessList
"""
# MODIFIED 11/1/16 OLD:
return list(item.process.name for item in self.process_tuples)
# # MODIFIED 11/1/16 NEW:
# return sorted(list(item.process.name for item in self.process_tuples))
# MODIFIED 11/1/16 END
@property
def _mechs(self):
return self.__mechs__
@_mechs.setter
def _mechs(self, value):
self.__mechs__ = value
|
minion.py | # -*- coding: utf-8 -*-
'''
Routines to set up a minion
'''
# Import python libs
from __future__ import absolute_import, print_function, with_statement
import os
import re
import sys
import copy
import time
import types
import signal
import random
import fnmatch
import logging
import threading
import traceback
import contextlib
import multiprocessing
from random import randint, shuffle
from stat import S_IMODE
import salt.serializers.msgpack
from binascii import crc32
# Import Salt Libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext import six
if six.PY3:
import ipaddress
else:
import salt.ext.ipaddress as ipaddress
from salt.ext.six.moves import range
# pylint: enable=no-name-in-module,redefined-builtin
# Import third party libs
try:
import zmq
# TODO: cleanup
import zmq.eventloop.ioloop
# support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x
if not hasattr(zmq.eventloop.ioloop, u'ZMQIOLoop'):
zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop
LOOP_CLASS = zmq.eventloop.ioloop.ZMQIOLoop
HAS_ZMQ = True
except ImportError:
import tornado.ioloop
LOOP_CLASS = tornado.ioloop.IOLoop
HAS_ZMQ = False
HAS_RANGE = False
try:
import seco.range
HAS_RANGE = True
except ImportError:
pass
HAS_PSUTIL = False
try:
import salt.utils.psutil_compat as psutil
HAS_PSUTIL = True
except ImportError:
pass
HAS_RESOURCE = False
try:
import resource
HAS_RESOURCE = True
except ImportError:
pass
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
# pylint: enable=import-error
# Import salt libs
import salt
import salt.client
import salt.crypt
import salt.loader
import salt.beacons
import salt.engines
import salt.payload
import salt.pillar
import salt.syspaths
import salt.utils
import salt.utils.args
import salt.utils.context
import salt.utils.error
import salt.utils.event
import salt.utils.files
import salt.utils.jid
import salt.utils.minion
import salt.utils.minions
import salt.utils.network
import salt.utils.platform
import salt.utils.schedule
import salt.utils.zeromq
import salt.defaults.exitcodes
import salt.cli.daemons
import salt.log.setup
import salt.utils.dictupdate
from salt.config import DEFAULT_MINION_OPTS
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.utils.debug import enable_sigusr1_handler
from salt.utils.event import tagify
from salt.utils.odict import OrderedDict
from salt.utils.process import (default_signals,
SignalHandlingMultiprocessingProcess,
ProcessManager)
from salt.exceptions import (
CommandExecutionError,
CommandNotFoundError,
SaltInvocationError,
SaltReqTimeoutError,
SaltClientError,
SaltSystemExit,
SaltDaemonNotRunning,
SaltException,
)
import tornado.gen # pylint: disable=F0401
import tornado.ioloop # pylint: disable=F0401
log = logging.getLogger(__name__)
# To set up a minion:
# 1. Read in the configuration
# 2. Generate the function mapping dict
# 3. Authenticate with the master
# 4. Store the AES key
# 5. Connect to the publisher
# 6. Handle publications
def resolve_dns(opts, fallback=True):
'''
Resolves the master_ip and master_uri options
'''
ret = {}
check_dns = True
if (opts.get(u'file_client', u'remote') == u'local' and
not opts.get(u'use_master_when_local', False)):
check_dns = False
if check_dns is True:
# Because I import salt.log below I need to re-import salt.utils here
import salt.utils
try:
if opts[u'master'] == u'':
raise SaltSystemExit
ret[u'master_ip'] = \
salt.utils.dns_check(opts[u'master'], int(opts[u'master_port']), True, opts[u'ipv6'])
except SaltClientError:
if opts[u'retry_dns']:
while True:
import salt.log
msg = (u'Master hostname: \'{0}\' not found or not responsive. '
u'Retrying in {1} seconds').format(opts[u'master'], opts[u'retry_dns'])
if salt.log.setup.is_console_configured():
log.error(msg)
else:
print(u'WARNING: {0}'.format(msg))
time.sleep(opts[u'retry_dns'])
try:
ret[u'master_ip'] = salt.utils.dns_check(
opts[u'master'], int(opts[u'master_port']), True, opts[u'ipv6']
)
break
except SaltClientError:
pass
else:
if fallback:
ret[u'master_ip'] = u'127.0.0.1'
else:
raise
except SaltSystemExit:
unknown_str = u'unknown address'
master = opts.get(u'master', unknown_str)
if master == u'':
master = unknown_str
if opts.get(u'__role') == u'syndic':
err = u'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \
u'Set \'syndic_master\' value in minion config.'.format(master)
else:
err = u'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \
u'Set \'master\' value in minion config.'.format(master)
log.error(err)
raise SaltSystemExit(code=42, msg=err)
else:
ret[u'master_ip'] = u'127.0.0.1'
if u'master_ip' in ret and u'master_ip' in opts:
if ret[u'master_ip'] != opts[u'master_ip']:
log.warning(
u'Master ip address changed from %s to %s',
opts[u'master_ip'], ret[u'master_ip']
)
ret[u'master_uri'] = u'tcp://{ip}:{port}'.format(
ip=ret[u'master_ip'], port=opts[u'master_port'])
return ret
def prep_ip_port(opts):
ret = {}
# Use given master IP if "ip_only" is set or if master_ip is an ipv6 address without
# a port specified. The is_ipv6 check returns False if brackets are used in the IP
# definition such as master: '[::1]:1234'.
if opts[u'master_uri_format'] == u'ip_only' or salt.utils.network.is_ipv6(opts[u'master']):
ret[u'master'] = opts[u'master']
else:
ip_port = opts[u'master'].rsplit(u':', 1)
if len(ip_port) == 1:
# e.g. master: mysaltmaster
ret[u'master'] = ip_port[0]
else:
# e.g. master: localhost:1234
# e.g. master: 127.0.0.1:1234
# e.g. master: [::1]:1234
# Strip off brackets for ipv6 support
ret[u'master'] = ip_port[0].strip(u'[]')
# Cast port back to an int! Otherwise a TypeError is thrown
# on some of the socket calls elsewhere in the minion and utils code.
ret[u'master_port'] = int(ip_port[1])
return ret
def get_proc_dir(cachedir, **kwargs):
'''
Given the cache directory, return the directory that process data is
stored in, creating it if it doesn't exist.
The following optional Keyword Arguments are handled:
mode: which is anything os.makedir would accept as mode.
uid: the uid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
uid. Must be int. Works only on unix/unix like systems.
gid: the gid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
gid. Must be int. Works only on unix/unix like systems.
'''
fn_ = os.path.join(cachedir, u'proc')
mode = kwargs.pop(u'mode', None)
if mode is None:
mode = {}
else:
mode = {u'mode': mode}
if not os.path.isdir(fn_):
# proc_dir is not present, create it with mode settings
os.makedirs(fn_, **mode)
d_stat = os.stat(fn_)
# if mode is not an empty dict then we have an explicit
# dir mode. So lets check if mode needs to be changed.
if mode:
mode_part = S_IMODE(d_stat.st_mode)
if mode_part != mode[u'mode']:
os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode[u'mode'])
if hasattr(os, u'chown'):
# only on unix/unix like systems
uid = kwargs.pop(u'uid', -1)
gid = kwargs.pop(u'gid', -1)
# if uid and gid are both -1 then go ahead with
# no changes at all
if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \
[i for i in (uid, gid) if i != -1]:
os.chown(fn_, uid, gid)
return fn_
def load_args_and_kwargs(func, args, data=None, ignore_invalid=False):
'''
Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed.
'''
argspec = salt.utils.args.get_function_argspec(func)
_args = []
_kwargs = {}
invalid_kwargs = []
for arg in args:
if isinstance(arg, dict) and arg.pop(u'__kwarg__', False) is True:
# if the arg is a dict with __kwarg__ == True, then its a kwarg
for key, val in six.iteritems(arg):
if argspec.keywords or key in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs[key] = val
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append(u'{0}={1}'.format(key, val))
continue
else:
string_kwarg = salt.utils.args.parse_input([arg], condition=False)[1] # pylint: disable=W0632
if string_kwarg:
if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs.update(string_kwarg)
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
for key, val in six.iteritems(string_kwarg):
invalid_kwargs.append(u'{0}={1}'.format(key, val))
else:
_args.append(arg)
if invalid_kwargs and not ignore_invalid:
salt.utils.args.invalid_kwargs(invalid_kwargs)
if argspec.keywords and isinstance(data, dict):
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(data):
_kwargs[u'__pub_{0}'.format(key)] = val
return _args, _kwargs
def eval_master_func(opts):
'''
Evaluate master function if master type is 'func'
and save it result in opts['master']
'''
if u'__master_func_evaluated' not in opts:
# split module and function and try loading the module
mod_fun = opts[u'master']
mod, fun = mod_fun.split(u'.')
try:
master_mod = salt.loader.raw_mod(opts, mod, fun)
if not master_mod:
raise KeyError
# we take whatever the module returns as master address
opts[u'master'] = master_mod[mod_fun]()
# Check for valid types
if not isinstance(opts[u'master'], (six.string_types, list)):
raise TypeError
opts[u'__master_func_evaluated'] = True
except KeyError:
log.error(u'Failed to load module %s', mod_fun)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
except TypeError:
log.error(u'%s returned from %s is not a string', opts[u'master'], mod_fun)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
log.info(u'Evaluated master from module: %s', mod_fun)
def master_event(type, master=None):
'''
Centralized master event function which will return event type based on event_map
'''
event_map = {u'connected': u'__master_connected',
u'disconnected': u'__master_disconnected',
u'failback': u'__master_failback',
u'alive': u'__master_alive'}
if type == u'alive' and master is not None:
return u'{0}_{1}'.format(event_map.get(type), master)
return event_map.get(type, None)
class MinionBase(object):
def __init__(self, opts):
self.opts = opts
@staticmethod
def process_schedule(minion, loop_interval):
try:
if hasattr(minion, u'schedule'):
minion.schedule.eval()
else:
log.error(u'Minion scheduler not initialized. Scheduled jobs will not be run.')
return
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if minion.schedule.loop_interval < loop_interval:
loop_interval = minion.schedule.loop_interval
log.debug(
u'Overriding loop_interval because of scheduled jobs.'
)
except Exception as exc:
log.error(u'Exception %s occurred in scheduled job', exc)
return loop_interval
def process_beacons(self, functions):
'''
Evaluate all of the configured beacons, grab the config again in case
the pillar or grains changed
'''
if u'config.merge' in functions:
b_conf = functions[u'config.merge'](u'beacons', self.opts[u'beacons'], omit_opts=True)
if b_conf:
return self.beacons.process(b_conf, self.opts[u'grains']) # pylint: disable=no-member
return []
@tornado.gen.coroutine
def eval_master(self,
opts,
timeout=60,
safe=True,
failed=False,
failback=False):
'''
Evaluates and returns a tuple of the current master address and the pub_channel.
In standard mode, just creates a pub_channel with the given master address.
With master_type=func evaluates the current master address from the given
module and then creates a pub_channel.
With master_type=failover takes the list of masters and loops through them.
The first one that allows the minion to create a pub_channel is then
returned. If this function is called outside the minions initialization
phase (for example from the minions main event-loop when a master connection
loss was detected), 'failed' should be set to True. The current
(possibly failed) master will then be removed from the list of masters.
'''
# return early if we are not connecting to a master
if opts[u'master_type'] == u'disable':
log.warning(u'Master is set to disable, skipping connection')
self.connected = False
raise tornado.gen.Return((None, None))
# check if master_type was altered from its default
elif opts[u'master_type'] != u'str' and opts[u'__role'] != u'syndic':
# check for a valid keyword
if opts[u'master_type'] == u'func':
eval_master_func(opts)
# if failover or distributed is set, master has to be of type list
elif opts[u'master_type'] in (u'failover', u'distributed'):
if isinstance(opts[u'master'], list):
log.info(
u'Got list of available master addresses: %s',
opts[u'master']
)
if opts[u'master_type'] == u'distributed':
master_len = len(opts[u'master'])
if master_len > 1:
secondary_masters = opts[u'master'][1:]
master_idx = crc32(opts[u'id']) % master_len
try:
preferred_masters = opts[u'master']
preferred_masters[0] = opts[u'master'][master_idx]
preferred_masters[1:] = [m for m in opts[u'master'] if m != preferred_masters[0]]
opts[u'master'] = preferred_masters
log.info(u'Distributed to the master at \'{0}\'.'.format(opts[u'master'][0]))
except (KeyError, AttributeError, TypeError):
log.warning(u'Failed to distribute to a specific master.')
else:
log.warning(u'master_type = distributed needs more than 1 master.')
if opts[u'master_shuffle']:
if opts[u'master_failback']:
secondary_masters = opts[u'master'][1:]
shuffle(secondary_masters)
opts[u'master'][1:] = secondary_masters
else:
shuffle(opts[u'master'])
opts[u'auth_tries'] = 0
if opts[u'master_failback'] and opts[u'master_failback_interval'] == 0:
opts[u'master_failback_interval'] = opts[u'master_alive_interval']
# if opts['master'] is a str and we have never created opts['master_list']
elif isinstance(opts[u'master'], six.string_types) and (u'master_list' not in opts):
# We have a string, but a list was what was intended. Convert.
# See issue 23611 for details
opts[u'master'] = [opts[u'master']]
elif opts[u'__role'] == u'syndic':
log.info(u'Syndic setting master_syndic to \'%s\'', opts[u'master'])
# if failed=True, the minion was previously connected
# we're probably called from the minions main-event-loop
# because a master connection loss was detected. remove
# the possibly failed master from the list of masters.
elif failed:
if failback:
# failback list of masters to original config
opts[u'master'] = opts[u'master_list']
else:
log.info(
u'Moving possibly failed master %s to the end of '
u'the list of masters', opts[u'master']
)
if opts[u'master'] in opts[u'local_masters']:
# create new list of master with the possibly failed
# one moved to the end
failed_master = opts[u'master']
opts[u'master'] = [x for x in opts[u'local_masters'] if opts[u'master'] != x]
opts[u'master'].append(failed_master)
else:
opts[u'master'] = opts[u'master_list']
else:
msg = (u'master_type set to \'failover\' but \'master\' '
u'is not of type list but of type '
u'{0}'.format(type(opts[u'master'])))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# If failover is set, minion have to failover on DNS errors instead of retry DNS resolve.
# See issue 21082 for details
if opts[u'retry_dns'] and opts[u'master_type'] == u'failover':
msg = (u'\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. '
u'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.')
log.critical(msg)
opts[u'retry_dns'] = 0
else:
msg = (u'Invalid keyword \'{0}\' for variable '
u'\'master_type\''.format(opts[u'master_type']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# FIXME: if SMinion don't define io_loop, it can't switch master see #29088
# Specify kwargs for the channel factory so that SMinion doesn't need to define an io_loop
# (The channel factories will set a default if the kwarg isn't passed)
factory_kwargs = {u'timeout': timeout, u'safe': safe}
if getattr(self, u'io_loop', None):
factory_kwargs[u'io_loop'] = self.io_loop # pylint: disable=no-member
tries = opts.get(u'master_tries', 1)
attempts = 0
# if we have a list of masters, loop through them and be
# happy with the first one that allows us to connect
if isinstance(opts[u'master'], list):
conn = False
# shuffle the masters and then loop through them
opts[u'local_masters'] = copy.copy(opts[u'master'])
if opts[u'random_master']:
shuffle(opts[u'local_masters'])
last_exc = None
opts[u'master_uri_list'] = list()
# This sits outside of the connection loop below because it needs to set
# up a list of master URIs regardless of which masters are available
# to connect _to_. This is primarily used for masterless mode, when
# we need a list of master URIs to fire calls back to.
for master in opts[u'local_masters']:
opts[u'master'] = master
opts.update(prep_ip_port(opts))
opts[u'master_uri_list'].append(resolve_dns(opts)[u'master_uri'])
while True:
if attempts != 0:
# Give up a little time between connection attempts
# to allow the IOLoop to run any other scheduled tasks.
yield tornado.gen.sleep(opts[u'acceptance_wait_time'])
attempts += 1
if tries > 0:
log.debug(
u'Connecting to master. Attempt %s of %s',
attempts, tries
)
else:
log.debug(
u'Connecting to master. Attempt %s (infinite attempts)',
attempts
)
for master in opts[u'local_masters']:
opts[u'master'] = master
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
# on first run, update self.opts with the whole master list
# to enable a minion to re-use old masters if they get fixed
if u'master_list' not in opts:
opts[u'master_list'] = copy.copy(opts[u'local_masters'])
self.opts = opts
try:
pub_channel = salt.transport.client.AsyncPubChannel.factory(opts, **factory_kwargs)
yield pub_channel.connect()
conn = True
break
except SaltClientError as exc:
last_exc = exc
log.info(
u'Master %s could not be reached, trying next '
u'next master (if any)', opts[u'master']
)
continue
if not conn:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
self.opts[u'master'] = copy.copy(self.opts[u'local_masters'])
log.error(
u'No master could be reached or all masters '
u'denied the minion\'s connection attempt.'
)
# If the code reaches this point, 'last_exc'
# should already be set.
raise last_exc # pylint: disable=E0702
else:
self.tok = pub_channel.auth.gen_token(u'salt')
self.connected = True
raise tornado.gen.Return((opts[u'master'], pub_channel))
# single master sign in
else:
if opts[u'random_master']:
log.warning(u'random_master is True but there is only one master specified. Ignoring.')
while True:
if attempts != 0:
# Give up a little time between connection attempts
# to allow the IOLoop to run any other scheduled tasks.
yield tornado.gen.sleep(opts[u'acceptance_wait_time'])
attempts += 1
if tries > 0:
log.debug(
u'Connecting to master. Attempt %s of %s',
attempts, tries
)
else:
log.debug(
u'Connecting to master. Attempt %s (infinite attempts)',
attempts
)
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
try:
if self.opts[u'transport'] == u'detect':
self.opts[u'detect_mode'] = True
for trans in (u'zeromq', u'tcp'):
if trans == u'zeromq' and not HAS_ZMQ:
continue
self.opts[u'transport'] = trans
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
if not pub_channel.auth.authenticated:
continue
del self.opts[u'detect_mode']
break
else:
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
self.tok = pub_channel.auth.gen_token(u'salt')
self.connected = True
raise tornado.gen.Return((opts[u'master'], pub_channel))
except SaltClientError as exc:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
raise exc
class SMinion(MinionBase):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def __init__(self, opts):
# Late setup of the opts grains, so we can log from the grains module
opts[u'grains'] = salt.loader.grains(opts)
super(SMinion, self).__init__(opts)
# Clean out the proc directory (default /var/cache/salt/minion/proc)
if (self.opts.get(u'file_client', u'remote') == u'remote'
or self.opts.get(u'use_master_when_local', False)):
if self.opts[u'transport'] == u'zeromq' and HAS_ZMQ:
io_loop = zmq.eventloop.ioloop.ZMQIOLoop()
else:
io_loop = LOOP_CLASS.current()
io_loop.run_sync(
lambda: self.eval_master(self.opts, failed=True)
)
self.gen_modules(initial_load=True)
# If configured, cache pillar data on the minion
if self.opts[u'file_client'] == u'remote' and self.opts.get(u'minion_pillar_cache', False):
import yaml
from salt.utils.yamldumper import SafeOrderedDumper
pdir = os.path.join(self.opts[u'cachedir'], u'pillar')
if not os.path.isdir(pdir):
os.makedirs(pdir, 0o700)
ptop = os.path.join(pdir, u'top.sls')
if self.opts[u'environment'] is not None:
penv = self.opts[u'environment']
else:
penv = u'base'
cache_top = {penv: {self.opts[u'id']: [u'cache']}}
with salt.utils.files.fopen(ptop, u'wb') as fp_:
fp_.write(
yaml.dump(
cache_top,
Dumper=SafeOrderedDumper
)
)
os.chmod(ptop, 0o600)
cache_sls = os.path.join(pdir, u'cache.sls')
with salt.utils.files.fopen(cache_sls, u'wb') as fp_:
fp_.write(
yaml.dump(
self.opts[u'pillar'],
Dumper=SafeOrderedDumper
)
)
os.chmod(cache_sls, 0o600)
def gen_modules(self, initial_load=False):
'''
Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
'''
self.opts[u'pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts[u'grains'],
self.opts[u'id'],
self.opts[u'environment'],
pillarenv=self.opts.get(u'pillarenv'),
).compile_pillar()
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=self.utils)
self.serializers = salt.loader.serializers(self.opts)
self.returners = salt.loader.returners(self.opts, self.functions)
self.proxy = salt.loader.proxy(self.opts, self.functions, self.returners, None)
# TODO: remove
self.function_errors = {} # Keep the funcs clean
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
self.rend = salt.loader.render(self.opts, self.functions)
self.matcher = Matcher(self.opts, self.functions)
self.functions[u'sys.reload_modules'] = self.gen_modules
self.executors = salt.loader.executors(self.opts)
class MasterMinion(object):
'''
Create a fully loaded minion function object for generic use on the
master. What makes this class different is that the pillar is
omitted, otherwise everything else is loaded cleanly.
'''
def __init__(
self,
opts,
returners=True,
states=True,
rend=True,
matcher=True,
whitelist=None,
ignore_config_errors=True):
self.opts = salt.config.minion_config(opts[u'conf_file'], ignore_config_errors=ignore_config_errors)
self.opts.update(opts)
self.whitelist = whitelist
self.opts[u'grains'] = salt.loader.grains(opts)
self.opts[u'pillar'] = {}
self.mk_returners = returners
self.mk_states = states
self.mk_rend = rend
self.mk_matcher = matcher
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
'''
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(
self.opts,
utils=self.utils,
whitelist=self.whitelist,
initial_load=initial_load)
self.serializers = salt.loader.serializers(self.opts)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matcher = Matcher(self.opts, self.functions)
self.functions[u'sys.reload_modules'] = self.gen_modules
class MinionManager(MinionBase):
'''
Create a multi minion interface, this creates as many minions as are
defined in the master option and binds each minion object to a respective
master.
'''
def __init__(self, opts):
super(MinionManager, self).__init__(opts)
self.auth_wait = self.opts[u'acceptance_wait_time']
self.max_auth_wait = self.opts[u'acceptance_wait_time_max']
self.minions = []
self.jid_queue = []
if HAS_ZMQ:
zmq.eventloop.ioloop.install()
self.io_loop = LOOP_CLASS.current()
self.process_manager = ProcessManager(name=u'MultiMinionProcessManager')
self.io_loop.spawn_callback(self.process_manager.run, async=True)
def __del__(self):
self.destroy()
def _bind(self):
# start up the event publisher, so we can see events during startup
self.event_publisher = salt.utils.event.AsyncEventPublisher(
self.opts,
io_loop=self.io_loop,
)
self.event = salt.utils.event.get_event(u'minion', opts=self.opts, io_loop=self.io_loop)
self.event.subscribe(u'')
self.event.set_event_handler(self.handle_event)
@tornado.gen.coroutine
def handle_event(self, package):
yield [minion.handle_event(package) for minion in self.minions]
def _create_minion_object(self, opts, timeout, safe,
io_loop=None, loaded_base_name=None,
jid_queue=None):
'''
Helper function to return the correct type of object
'''
return Minion(opts,
timeout,
safe,
io_loop=io_loop,
loaded_base_name=loaded_base_name,
jid_queue=jid_queue)
def _spawn_minions(self):
'''
Spawn all the coroutines which will sign in to masters
'''
masters = self.opts[u'master']
if (self.opts[u'master_type'] in (u'failover', u'distributed')) or not isinstance(self.opts[u'master'], list):
masters = [masters]
for master in masters:
s_opts = copy.deepcopy(self.opts)
s_opts[u'master'] = master
s_opts[u'multimaster'] = True
minion = self._create_minion_object(s_opts,
s_opts[u'auth_timeout'],
False,
io_loop=self.io_loop,
loaded_base_name=u'salt.loader.{0}'.format(s_opts[u'master']),
jid_queue=self.jid_queue,
)
self.minions.append(minion)
self.io_loop.spawn_callback(self._connect_minion, minion)
@tornado.gen.coroutine
def _connect_minion(self, minion):
'''
Create a minion, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = minion.opts[u'acceptance_wait_time']
failed = False
while True:
try:
yield minion.connect_master(failed=failed)
minion.tune_in(start=False)
break
except SaltClientError as exc:
failed = True
log.error(
u'Error while bringing up minion for multi-master. Is '
u'master at %s responding?', minion.opts[u'master']
)
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except Exception as e:
failed = True
log.critical(
u'Unexpected error while connecting to %s',
minion.opts[u'master'], exc_info=True
)
# Multi Master Tune In
def tune_in(self):
'''
Bind to the masters
This loop will attempt to create connections to masters it hasn't connected
to yet, but once the initial connection is made it is up to ZMQ to do the
reconnect (don't know of an API to get the state here in salt)
'''
self._bind()
# Fire off all the minion coroutines
self._spawn_minions()
# serve forever!
self.io_loop.start()
@property
def restart(self):
for minion in self.minions:
if minion.restart:
return True
return False
def stop(self, signum):
for minion in self.minions:
minion.process_manager.stop_restarting()
minion.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
minion.process_manager.kill_children()
minion.destroy()
def destroy(self):
for minion in self.minions:
minion.destroy()
class Minion(MinionBase):
'''
This class instantiates a minion, runs connections for a minion,
and loads all of the functions into the minion
'''
def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231
'''
Pass in the options dict
'''
# this means that the parent class doesn't know *which* master we connect to
super(Minion, self).__init__(opts)
self.timeout = timeout
self.safe = safe
self._running = None
self.win_proc = []
self.loaded_base_name = loaded_base_name
self.connected = False
self.restart = False
# Flag meaning minion has finished initialization including first connect to the master.
# True means the Minion is fully functional and ready to handle events.
self.ready = False
self.jid_queue = jid_queue or []
if io_loop is None:
if HAS_ZMQ:
zmq.eventloop.ioloop.install()
self.io_loop = LOOP_CLASS.current()
else:
self.io_loop = io_loop
# Warn if ZMQ < 3.2
if HAS_ZMQ:
try:
zmq_version_info = zmq.zmq_version_info()
except AttributeError:
# PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to
# using zmq.zmq_version() and build a version info tuple.
zmq_version_info = tuple(
[int(x) for x in zmq.zmq_version().split(u'.')] # pylint: disable=no-member
)
if zmq_version_info < (3, 2):
log.warning(
u'You have a version of ZMQ less than ZMQ 3.2! There are '
u'known connection keep-alive issues with ZMQ < 3.2 which '
u'may result in loss of contact with minions. Please '
u'upgrade your ZMQ!'
)
# Late setup the of the opts grains, so we can log from the grains
# module. If this is a proxy, however, we need to init the proxymodule
# before we can get the grains. We do this for proxies in the
# post_master_init
if not salt.utils.platform.is_proxy():
self.opts[u'grains'] = salt.loader.grains(opts)
log.info(u'Creating minion process manager')
if self.opts[u'random_startup_delay']:
sleep_time = random.randint(0, self.opts[u'random_startup_delay'])
log.info(
u'Minion sleeping for %s seconds due to configured '
u'startup_delay between 0 and %s seconds',
sleep_time, self.opts[u'random_startup_delay']
)
time.sleep(sleep_time)
self.process_manager = ProcessManager(name=u'MinionProcessManager')
self.io_loop.spawn_callback(self.process_manager.run, async=True)
# We don't have the proxy setup yet, so we can't start engines
# Engines need to be able to access __proxy__
if not salt.utils.platform.is_proxy():
self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
self.process_manager)
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, self._handle_signals)
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGTERM, self._handle_signals)
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
self._running = False
# escalate the signals to the process manager
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
self.process_manager.kill_children()
time.sleep(1)
sys.exit(0)
def sync_connect_master(self, timeout=None, failed=False):
'''
Block until we are connected to a master
'''
self._sync_connect_master_success = False
log.debug(u"sync_connect_master")
def on_connect_master_future_done(future):
self._sync_connect_master_success = True
self.io_loop.stop()
self._connect_master_future = self.connect_master(failed=failed)
# finish connecting to master
self._connect_master_future.add_done_callback(on_connect_master_future_done)
if timeout:
self.io_loop.call_later(timeout, self.io_loop.stop)
try:
self.io_loop.start()
except KeyboardInterrupt:
self.destroy()
# I made the following 3 line oddity to preserve traceback.
# Please read PR #23978 before changing, hopefully avoiding regressions.
# Good luck, we're all counting on you. Thanks.
future_exception = self._connect_master_future.exc_info()
if future_exception:
# This needs to be re-raised to preserve restart_on_error behavior.
raise six.reraise(*future_exception)
if timeout and self._sync_connect_master_success is False:
raise SaltDaemonNotRunning(u'Failed to connect to the salt-master')
@tornado.gen.coroutine
def connect_master(self, failed=False):
'''
Return a future which will complete when you are connected to a master
'''
master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed)
yield self._post_master_init(master)
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
If this function is changed, please check ProxyMinion._post_master_init
to see if those changes need to be propagated.
Minions and ProxyMinions need significantly different post master setups,
which is why the differences are not factored out into separate helper
functions.
'''
if self.connected:
self.opts[u'master'] = master
# Initialize pillar before loader to make pillar accessible in modules
self.opts[u'pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts[u'grains'],
self.opts[u'id'],
self.opts[u'environment'],
pillarenv=self.opts.get(u'pillarenv')
).compile_pillar()
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.get_uid(user=self.opts.get(u'user', None))
self.proc_dir = get_proc_dir(self.opts[u'cachedir'], uid=uid)
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[master_event(type=u'alive')])
# add default scheduling jobs to the minions scheduler
if self.opts[u'mine_enabled'] and u'mine.update' in self.functions:
self.schedule.add_job({
u'__mine_interval':
{
u'function': u'mine.update',
u'minutes': self.opts[u'mine_interval'],
u'jid_include': True,
u'maxrunning': 2,
u'return_job': self.opts.get(u'mine_return_job', False)
}
}, persist=True)
log.info(u'Added mine.update to scheduler')
else:
self.schedule.delete_job(u'__mine_interval', persist=True)
# add master_alive job if enabled
if (self.opts[u'transport'] != u'tcp' and
self.opts[u'master_alive_interval'] > 0 and
self.connected):
self.schedule.add_job({
master_event(type=u'alive', master=self.opts[u'master']):
{
u'function': u'status.master',
u'seconds': self.opts[u'master_alive_interval'],
u'jid_include': True,
u'maxrunning': 1,
u'return_job': False,
u'kwargs': {u'master': self.opts[u'master'],
u'connected': True}
}
}, persist=True)
if self.opts[u'master_failback'] and \
u'master_list' in self.opts and \
self.opts[u'master'] != self.opts[u'master_list'][0]:
self.schedule.add_job({
master_event(type=u'failback'):
{
u'function': u'status.ping_master',
u'seconds': self.opts[u'master_failback_interval'],
u'jid_include': True,
u'maxrunning': 1,
u'return_job': False,
u'kwargs': {u'master': self.opts[u'master_list'][0]}
}
}, persist=True)
else:
self.schedule.delete_job(master_event(type=u'failback'), persist=True)
else:
self.schedule.delete_job(master_event(type=u'alive', master=self.opts[u'master']), persist=True)
self.schedule.delete_job(master_event(type=u'failback'), persist=True)
self.grains_cache = self.opts[u'grains']
self.ready = True
def _return_retry_timer(self):
'''
Based on the minion configuration, either return a randomized timer or
just return the value of the return_retry_timer.
'''
msg = u'Minion return retry timer set to {0} seconds'
# future lint: disable=str-format-in-logging
if self.opts.get(u'return_retry_timer_max'):
try:
random_retry = randint(self.opts[u'return_retry_timer'], self.opts[u'return_retry_timer_max'])
log.debug(msg.format(random_retry) + u' (randomized)')
return random_retry
except ValueError:
# Catch wiseguys using negative integers here
log.error(
u'Invalid value (return_retry_timer: %s or '
u'return_retry_timer_max: %s). Both must be positive '
u'integers.',
self.opts[u'return_retry_timer'],
self.opts[u'return_retry_timer_max'],
)
log.debug(msg.format(DEFAULT_MINION_OPTS[u'return_retry_timer']))
return DEFAULT_MINION_OPTS[u'return_retry_timer']
else:
log.debug(msg.format(self.opts.get(u'return_retry_timer')))
return self.opts.get(u'return_retry_timer')
# future lint: enable=str-format-in-logging
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
mod_opts = {}
for key, val in six.iteritems(self.opts):
if key == u'logger':
continue
mod_opts[key] = val
return mod_opts
def _load_modules(self, force_refresh=False, notify=False, grains=None):
'''
Return the functions and the returners loaded up from the loader
module
'''
# if this is a *nix system AND modules_max_memory is set, lets enforce
# a memory limit on module imports
# this feature ONLY works on *nix like OSs (resource module doesn't work on windows)
modules_max_memory = False
if self.opts.get(u'modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
log.debug(
u'modules_max_memory set, enforcing a maximum of %s',
self.opts[u'modules_max_memory']
)
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
rss, vms = psutil.Process(os.getpid()).memory_info()
mem_limit = rss + vms + self.opts[u'modules_max_memory']
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
elif self.opts.get(u'modules_max_memory', -1) > 0:
if not HAS_PSUTIL:
log.error(u'Unable to enforce modules_max_memory because psutil is missing')
if not HAS_RESOURCE:
log.error(u'Unable to enforce modules_max_memory because resource is missing')
# This might be a proxy minion
if hasattr(self, u'proxy'):
proxy = self.proxy
else:
proxy = None
if grains is None:
self.opts[u'grains'] = salt.loader.grains(self.opts, force_refresh, proxy=proxy)
self.utils = salt.loader.utils(self.opts, proxy=proxy)
if self.opts.get(u'multimaster', False):
s_opts = copy.deepcopy(self.opts)
functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy,
loaded_base_name=self.loaded_base_name, notify=notify)
else:
functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=notify, proxy=proxy)
returners = salt.loader.returners(self.opts, functions, proxy=proxy)
errors = {}
if u'_errors' in functions:
errors = functions[u'_errors']
functions.pop(u'_errors')
# we're done, reset the limits!
if modules_max_memory is True:
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
executors = salt.loader.executors(self.opts, functions, proxy=proxy)
return functions, returners, errors, executors
def _send_req_sync(self, load, timeout):
if self.opts[u'minion_sign_messages']:
log.trace(u'Signing event to be published onto the bus.')
minion_privkey_path = os.path.join(self.opts[u'pki_dir'], u'minion.pem')
sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load))
load[u'sig'] = sig
channel = salt.transport.Channel.factory(self.opts)
return channel.send(load, timeout=timeout)
@tornado.gen.coroutine
def _send_req_async(self, load, timeout):
if self.opts[u'minion_sign_messages']:
log.trace(u'Signing event to be published onto the bus.')
minion_privkey_path = os.path.join(self.opts[u'pki_dir'], u'minion.pem')
sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load))
load[u'sig'] = sig
channel = salt.transport.client.AsyncReqChannel.factory(self.opts)
ret = yield channel.send(load, timeout=timeout)
raise tornado.gen.Return(ret)
def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None):
'''
Fire an event on the master, or drop message if unable to send.
'''
load = {u'id': self.opts[u'id'],
u'cmd': u'_minion_event',
u'pretag': pretag,
u'tok': self.tok}
if events:
load[u'events'] = events
elif data and tag:
load[u'data'] = data
load[u'tag'] = tag
elif not data and tag:
load[u'data'] = {}
load[u'tag'] = tag
else:
return
if sync:
try:
self._send_req_sync(load, timeout)
except salt.exceptions.SaltReqTimeoutError:
log.info(u'fire_master failed: master could not be contacted. Request timed out.')
return False
except Exception:
log.info(u'fire_master failed: %s', traceback.format_exc())
return False
else:
if timeout_handler is None:
def handle_timeout(*_):
log.info(u'fire_master failed: master could not be contacted. Request timed out.')
return True
timeout_handler = handle_timeout
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
return True
@tornado.gen.coroutine
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
if u'user' in data:
log.info(
u'User %s Executing command %s with jid %s',
data[u'user'], data[u'fun'], data[u'jid']
)
else:
log.info(
u'Executing command %s with jid %s',
data[u'fun'], data[u'jid']
)
log.debug(u'Command details %s', data)
# Don't duplicate jobs
log.trace(u'Started JIDs: %s', self.jid_queue)
if self.jid_queue is not None:
if data[u'jid'] in self.jid_queue:
return
else:
self.jid_queue.append(data[u'jid'])
if len(self.jid_queue) > self.opts[u'minion_jid_queue_hwm']:
self.jid_queue.pop(0)
if isinstance(data[u'fun'], six.string_types):
if data[u'fun'] == u'sys.reload_modules':
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
process_count_max = self.opts.get('process_count_max')
if process_count_max > 0:
process_count = len(salt.utils.minion.running(self.opts))
while process_count >= process_count_max:
log.warn("Maximum number of processes reached while executing jid {0}, waiting...".format(data['jid']))
yield tornado.gen.sleep(10)
process_count = len(salt.utils.minion.running(self.opts))
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
multiprocessing_enabled = self.opts.get(u'multiprocessing', True)
if multiprocessing_enabled:
if sys.platform.startswith(u'win'):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
with default_signals(signal.SIGINT, signal.SIGTERM):
process = SignalHandlingMultiprocessingProcess(
target=self._target, args=(instance, self.opts, data, self.connected)
)
else:
process = threading.Thread(
target=self._target,
args=(instance, self.opts, data, self.connected),
name=data[u'jid']
)
if multiprocessing_enabled:
with default_signals(signal.SIGINT, signal.SIGTERM):
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
process.start()
else:
process.start()
# TODO: remove the windows specific check?
if multiprocessing_enabled and not salt.utils.platform.is_windows():
# we only want to join() immediately if we are daemonizing a process
process.join()
else:
self.win_proc.append(process)
def ctx(self):
'''
Return a single context manager for the minion's data
'''
if six.PY2:
return contextlib.nested(
self.functions.context_dict.clone(),
self.returners.context_dict.clone(),
self.executors.context_dict.clone(),
)
else:
exitstack = contextlib.ExitStack()
exitstack.enter_context(self.functions.context_dict.clone())
exitstack.enter_context(self.returners.context_dict.clone())
exitstack.enter_context(self.executors.context_dict.clone())
return exitstack
@classmethod
def _target(cls, minion_instance, opts, data, connected):
if not minion_instance:
minion_instance = cls(opts)
minion_instance.connected = connected
if not hasattr(minion_instance, u'functions'):
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts[u'grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
if not hasattr(minion_instance, u'serial'):
minion_instance.serial = salt.payload.Serial(opts)
if not hasattr(minion_instance, u'proc_dir'):
uid = salt.utils.get_uid(user=opts.get(u'user', None))
minion_instance.proc_dir = (
get_proc_dir(opts[u'cachedir'], uid=uid)
)
with tornado.stack_context.StackContext(minion_instance.ctx):
if isinstance(data[u'fun'], tuple) or isinstance(data[u'fun'], list):
Minion._thread_multi_return(minion_instance, opts, data)
else:
Minion._thread_return(minion_instance, opts, data)
@classmethod
def _thread_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
fn_ = os.path.join(minion_instance.proc_dir, data[u'jid'])
if opts[u'multiprocessing'] and not salt.utils.platform.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.daemonize_if(opts)
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
salt.utils.appendproctitle(u'{0}._thread_return {1}'.format(cls.__name__, data[u'jid']))
sdata = {u'pid': os.getpid()}
sdata.update(data)
log.info(u'Starting a new job with PID %s', sdata[u'pid'])
with salt.utils.files.fopen(fn_, u'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {u'success': False}
function_name = data[u'fun']
if function_name in minion_instance.functions:
try:
minion_blackout_violation = False
if minion_instance.connected and minion_instance.opts[u'pillar'].get(u'minion_blackout', False):
whitelist = minion_instance.opts[u'pillar'].get(u'minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if function_name != u'saltutil.refresh_pillar' and function_name not in whitelist:
minion_blackout_violation = True
elif minion_instance.opts[u'grains'].get(u'minion_blackout', False):
whitelist = minion_instance.opts[u'grains'].get(u'minion_blackout_whitelist', [])
if function_name != u'saltutil.refresh_pillar' and function_name not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError(u'Minion in blackout mode. Set \'minion_blackout\' '
u'to False in pillar or grains to resume operations. Only '
u'saltutil.refresh_pillar allowed in blackout mode.')
func = minion_instance.functions[function_name]
args, kwargs = load_args_and_kwargs(
func,
data[u'arg'],
data)
minion_instance.functions.pack[u'__context__'][u'retcode'] = 0
executors = data.get(u'module_executors') or opts.get(u'module_executors', [u'direct_call'])
if isinstance(executors, six.string_types):
executors = [executors]
elif not isinstance(executors, list) or not executors:
raise SaltInvocationError(u"Wrong executors specification: {0}. String or non-empty list expected".
format(executors))
if opts.get(u'sudo_user', u'') and executors[-1] != u'sudo':
executors[-1] = u'sudo' # replace the last one with sudo
log.trace(u'Executors list %s', executors) # pylint: disable=no-member
for name in executors:
fname = u'{0}.execute'.format(name)
if fname not in minion_instance.executors:
raise SaltInvocationError(u"Executor '{0}' is not available".format(name))
return_data = minion_instance.executors[fname](opts, data, func, args, kwargs)
if return_data is not None:
break
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, dict):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify([data[u'jid'], u'prog', opts[u'id'], str(ind)], u'job')
event_data = {u'return': single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret[u'return'] = iret
else:
ret[u'return'] = return_data
ret[u'retcode'] = minion_instance.functions.pack[u'__context__'].get(
u'retcode',
0
)
ret[u'success'] = True
except CommandNotFoundError as exc:
msg = u'Command required for \'{0}\' not found'.format(
function_name
)
log.debug(msg, exc_info=True)
ret[u'return'] = u'{0}: {1}'.format(msg, exc)
ret[u'out'] = u'nested'
except CommandExecutionError as exc:
log.error(
u'A command in \'%s\' had a problem: %s',
function_name, exc,
exc_info_on_loglevel=logging.DEBUG
)
ret[u'return'] = u'ERROR: {0}'.format(exc)
ret[u'out'] = u'nested'
except SaltInvocationError as exc:
log.error(
u'Problem executing \'%s\': %s',
function_name, exc,
exc_info_on_loglevel=logging.DEBUG
)
ret[u'return'] = u'ERROR executing \'{0}\': {1}'.format(
function_name, exc
)
ret[u'out'] = u'nested'
except TypeError as exc:
msg = u'Passed invalid arguments to {0}: {1}\n{2}'.format(function_name, exc, func.__doc__)
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret[u'return'] = msg
ret[u'out'] = u'nested'
except Exception:
msg = u'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=True)
salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data)
ret[u'return'] = u'{0}: {1}'.format(msg, traceback.format_exc())
ret[u'out'] = u'nested'
else:
docs = minion_instance.functions[u'sys.doc'](u'{0}*'.format(function_name))
if docs:
docs[function_name] = minion_instance.functions.missing_fun_string(function_name)
ret[u'return'] = docs
else:
ret[u'return'] = minion_instance.functions.missing_fun_string(function_name)
mod_name = function_name.split('.')[0]
if mod_name in minion_instance.function_errors:
ret[u'return'] += u' Possible reasons: \'{0}\''.format(
minion_instance.function_errors[mod_name]
)
ret[u'success'] = False
ret[u'retcode'] = 254
ret[u'out'] = u'nested'
ret[u'jid'] = data[u'jid']
ret[u'fun'] = data[u'fun']
ret[u'fun_args'] = data[u'arg']
if u'master_id' in data:
ret[u'master_id'] = data[u'master_id']
if u'metadata' in data:
if isinstance(data[u'metadata'], dict):
ret[u'metadata'] = data[u'metadata']
else:
log.warning(u'The metadata parameter must be a dictionary. Ignoring.')
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
# Add default returners from minion config
# Should have been coverted to comma-delimited string already
if isinstance(opts.get(u'return'), six.string_types):
if data[u'ret']:
data[u'ret'] = u','.join((data[u'ret'], opts[u'return']))
else:
data[u'ret'] = opts[u'return']
log.debug(u'minion return: %s', ret)
# TODO: make a list? Seems odd to split it this late :/
if data[u'ret'] and isinstance(data[u'ret'], six.string_types):
if u'ret_config' in data:
ret[u'ret_config'] = data[u'ret_config']
if u'ret_kwargs' in data:
ret[u'ret_kwargs'] = data[u'ret_kwargs']
ret[u'id'] = opts[u'id']
for returner in set(data[u'ret'].split(u',')):
try:
returner_str = u'{0}.returner'.format(returner)
if returner_str in minion_instance.returners:
minion_instance.returners[returner_str](ret)
else:
returner_err = minion_instance.returners.missing_fun_string(returner_str)
log.error(
u'Returner %s could not be loaded: %s',
returner_str, returner_err
)
except Exception as exc:
log.exception(
u'The return failed for job %s: %s', data[u'jid'], exc
)
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
salt.utils.appendproctitle(u'{0}._thread_multi_return {1}'.format(cls.__name__, data[u'jid']))
multifunc_ordered = opts.get(u'multifunc_ordered', False)
num_funcs = len(data[u'fun'])
if multifunc_ordered:
ret = {
u'return': [None] * num_funcs,
u'retcode': [None] * num_funcs,
u'success': [False] * num_funcs
}
else:
ret = {
u'return': {},
u'retcode': {},
u'success': {}
}
for ind in range(0, num_funcs):
if not multifunc_ordered:
ret[u'success'][data[u'fun'][ind]] = False
try:
minion_blackout_violation = False
if minion_instance.connected and minion_instance.opts[u'pillar'].get(u'minion_blackout', False):
whitelist = minion_instance.opts[u'pillar'].get(u'minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if data[u'fun'][ind] != u'saltutil.refresh_pillar' and data[u'fun'][ind] not in whitelist:
minion_blackout_violation = True
elif minion_instance.opts[u'grains'].get(u'minion_blackout', False):
whitelist = minion_instance.opts[u'grains'].get(u'minion_blackout_whitelist', [])
if data[u'fun'][ind] != u'saltutil.refresh_pillar' and data[u'fun'][ind] not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError(u'Minion in blackout mode. Set \'minion_blackout\' '
u'to False in pillar or grains to resume operations. Only '
u'saltutil.refresh_pillar allowed in blackout mode.')
func = minion_instance.functions[data[u'fun'][ind]]
args, kwargs = load_args_and_kwargs(
func,
data[u'arg'][ind],
data)
minion_instance.functions.pack[u'__context__'][u'retcode'] = 0
if multifunc_ordered:
ret[u'return'][ind] = func(*args, **kwargs)
ret[u'retcode'][ind] = minion_instance.functions.pack[u'__context__'].get(
u'retcode',
0
)
ret[u'success'][ind] = True
else:
ret[u'return'][data[u'fun'][ind]] = func(*args, **kwargs)
ret[u'retcode'][data[u'fun'][ind]] = minion_instance.functions.pack[u'__context__'].get(
u'retcode',
0
)
ret[u'success'][data[u'fun'][ind]] = True
except Exception as exc:
trb = traceback.format_exc()
log.warning(u'The minion function caused an exception: %s', exc)
if multifunc_ordered:
ret[u'return'][ind] = trb
else:
ret[u'return'][data[u'fun'][ind]] = trb
ret[u'jid'] = data[u'jid']
ret[u'fun'] = data[u'fun']
ret[u'fun_args'] = data[u'arg']
if u'metadata' in data:
ret[u'metadata'] = data[u'metadata']
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
if data[u'ret']:
if u'ret_config' in data:
ret[u'ret_config'] = data[u'ret_config']
if u'ret_kwargs' in data:
ret[u'ret_kwargs'] = data[u'ret_kwargs']
for returner in set(data[u'ret'].split(u',')):
ret[u'id'] = opts[u'id']
try:
minion_instance.returners[u'{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
u'The return failed for job %s: %s',
data[u'jid'], exc
)
def _return_pub(self, ret, ret_cmd=u'_return', timeout=60, sync=True):
'''
Return the data from the executed command to the master server
'''
jid = ret.get(u'jid', ret.get(u'__jid__'))
fun = ret.get(u'fun', ret.get(u'__fun__'))
if self.opts[u'multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info(u'Returning information for job: %s', jid)
if ret_cmd == u'_syndic_return':
load = {u'cmd': ret_cmd,
u'id': self.opts[u'uid'],
u'jid': jid,
u'fun': fun,
u'arg': ret.get(u'arg'),
u'tgt': ret.get(u'tgt'),
u'tgt_type': ret.get(u'tgt_type'),
u'load': ret.get(u'__load__')}
if u'__master_id__' in ret:
load[u'master_id'] = ret[u'__master_id__']
load[u'return'] = {}
for key, value in six.iteritems(ret):
if key.startswith(u'__'):
continue
load[u'return'][key] = value
else:
load = {u'cmd': ret_cmd,
u'id': self.opts[u'id']}
for key, value in six.iteritems(ret):
load[key] = value
if u'out' in ret:
if isinstance(ret[u'out'], six.string_types):
load[u'out'] = ret[u'out']
else:
log.error(
u'Invalid outputter %s. This is likely a bug.',
ret[u'out']
)
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load[u'out'] = oput
if self.opts[u'cache_jobs']:
# Local job cache has been enabled
salt.utils.minion.cache_jobs(self.opts, load[u'jid'], ret)
if not self.opts[u'pub_ret']:
return u''
def timeout_handler(*_):
log.warning(
u'The minion failed to return the job information for job %s. '
u'This is often due to the master being shut down or '
u'overloaded. If the master is running, consider increasing '
u'the worker_threads value.', jid
)
return True
if sync:
try:
ret_val = self._send_req_sync(load, timeout=timeout)
except SaltReqTimeoutError:
timeout_handler()
return u''
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
log.trace(u'ret_val = %s', ret_val) # pylint: disable=no-member
return ret_val
def _state_run(self):
'''
Execute a state run based on information set in the minion config file
'''
if self.opts[u'startup_states']:
if self.opts.get(u'master_type', u'str') == u'disable' and \
self.opts.get(u'file_client', u'remote') == u'remote':
log.warning(
u'Cannot run startup_states when \'master_type\' is set '
u'to \'disable\' and \'file_client\' is set to '
u'\'remote\'. Skipping.'
)
else:
data = {u'jid': u'req', u'ret': self.opts.get(u'ext_job_cache', u'')}
if self.opts[u'startup_states'] == u'sls':
data[u'fun'] = u'state.sls'
data[u'arg'] = [self.opts[u'sls_list']]
elif self.opts[u'startup_states'] == u'top':
data[u'fun'] = u'state.top'
data[u'arg'] = [self.opts[u'top_file']]
else:
data[u'fun'] = u'state.highstate'
data[u'arg'] = []
self._handle_decoded_payload(data)
def _refresh_grains_watcher(self, refresh_interval_in_minutes):
'''
Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion
:param refresh_interval_in_minutes:
:return: None
'''
if u'__update_grains' not in self.opts.get(u'schedule', {}):
if u'schedule' not in self.opts:
self.opts[u'schedule'] = {}
self.opts[u'schedule'].update({
u'__update_grains':
{
u'function': u'event.fire',
u'args': [{}, u'grains_refresh'],
u'minutes': refresh_interval_in_minutes
}
})
def _fire_master_minion_start(self):
# Send an event to the master that the minion is live
self._fire_master(
u'Minion {0} started at {1}'.format(
self.opts[u'id'],
time.asctime()
),
u'minion_start'
)
# dup name spaced event
self._fire_master(
u'Minion {0} started at {1}'.format(
self.opts[u'id'],
time.asctime()
),
tagify([self.opts[u'id'], u'start'], u'minion'),
)
def module_refresh(self, force_refresh=False, notify=False):
'''
Refresh the functions and returners.
'''
log.debug(u'Refreshing modules. Notify=%s', notify)
self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify)
self.schedule.functions = self.functions
self.schedule.returners = self.returners
def beacons_refresh(self):
'''
Refresh the functions and returners.
'''
log.debug(u'Refreshing beacons.')
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
# TODO: only allow one future in flight at a time?
@tornado.gen.coroutine
def pillar_refresh(self, force_refresh=False):
'''
Refresh the pillar
'''
if self.connected:
log.debug(u'Refreshing pillar')
try:
self.opts[u'pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts[u'grains'],
self.opts[u'id'],
self.opts[u'environment'],
pillarenv=self.opts.get(u'pillarenv'),
).compile_pillar()
except SaltClientError:
# Do not exit if a pillar refresh fails.
log.error(u'Pillar data could not be refreshed. '
u'One or more masters may be down!')
self.module_refresh(force_refresh)
def manage_schedule(self, tag, data):
'''
Refresh the functions and returners.
'''
func = data.get(u'func', None)
name = data.get(u'name', None)
schedule = data.get(u'schedule', None)
where = data.get(u'where', None)
persist = data.get(u'persist', None)
if func == u'delete':
self.schedule.delete_job(name, persist)
elif func == u'add':
self.schedule.add_job(schedule, persist)
elif func == u'modify':
self.schedule.modify_job(name, schedule, persist)
elif func == u'enable':
self.schedule.enable_schedule()
elif func == u'disable':
self.schedule.disable_schedule()
elif func == u'enable_job':
self.schedule.enable_job(name, persist)
elif func == u'run_job':
self.schedule.run_job(name)
elif func == u'disable_job':
self.schedule.disable_job(name, persist)
elif func == u'reload':
self.schedule.reload(schedule)
elif func == u'list':
self.schedule.list(where)
elif func == u'save_schedule':
self.schedule.save_schedule()
def manage_beacons(self, tag, data):
'''
Manage Beacons
'''
func = data.get(u'func', None)
name = data.get(u'name', None)
beacon_data = data.get(u'beacon_data', None)
if func == u'add':
self.beacons.add_beacon(name, beacon_data)
elif func == u'modify':
self.beacons.modify_beacon(name, beacon_data)
elif func == u'delete':
self.beacons.delete_beacon(name)
elif func == u'enable':
self.beacons.enable_beacons()
elif func == u'disable':
self.beacons.disable_beacons()
elif func == u'enable_beacon':
self.beacons.enable_beacon(name)
elif func == u'disable_beacon':
self.beacons.disable_beacon(name)
elif func == u'list':
self.beacons.list_beacons()
elif func == u'list_available':
self.beacons.list_available_beacons()
elif func == u'validate_beacon':
self.beacons.validate_beacon(name, beacon_data)
def environ_setenv(self, tag, data):
'''
Set the salt-minion main process environment according to
the data contained in the minion event data
'''
environ = data.get(u'environ', None)
if environ is None:
return False
false_unsets = data.get(u'false_unsets', False)
clear_all = data.get(u'clear_all', False)
import salt.modules.environ as mod_environ
return mod_environ.setenv(environ, false_unsets, clear_all)
def _pre_tune(self):
'''
Set the minion running flag and issue the appropriate warnings if
the minion cannot be started or is already running
'''
if self._running is None:
self._running = True
elif self._running is False:
log.error(
u'This %s was scheduled to stop. Not running %s.tune_in()',
self.__class__.__name__, self.__class__.__name__
)
return
elif self._running is True:
log.error(
u'This %s is already running. Not running %s.tune_in()',
self.__class__.__name__, self.__class__.__name__
)
return
try:
log.info(
u'%s is starting as user \'%s\'',
self.__class__.__name__, salt.utils.get_user()
)
except Exception as err:
# Only windows is allowed to fail here. See #3189. Log as debug in
# that case. Else, error.
log.log(
salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR,
u'Failed to get the user who is starting %s',
self.__class__.__name__,
exc_info=err
)
def _mine_send(self, tag, data):
'''
Send mine data to the master
'''
channel = salt.transport.Channel.factory(self.opts)
data[u'tok'] = self.tok
try:
ret = channel.send(data)
return ret
except SaltReqTimeoutError:
log.warning(u'Unable to send mine data to master.')
return None
@tornado.gen.coroutine
def handle_event(self, package):
'''
Handle an event from the epull_sock (all local minion events)
'''
if not self.ready:
raise tornado.gen.Return()
tag, data = salt.utils.event.SaltEvent.unpack(package)
log.debug(
u'Minion of \'%s\' is handling event tag \'%s\'',
self.opts[u'master'], tag
)
if tag.startswith(u'module_refresh'):
self.module_refresh(
force_refresh=data.get(u'force_refresh', False),
notify=data.get(u'notify', False)
)
elif tag.startswith(u'pillar_refresh'):
yield self.pillar_refresh(
force_refresh=data.get(u'force_refresh', False)
)
elif tag.startswith(u'beacons_refresh'):
self.beacons_refresh()
elif tag.startswith(u'manage_schedule'):
self.manage_schedule(tag, data)
elif tag.startswith(u'manage_beacons'):
self.manage_beacons(tag, data)
elif tag.startswith(u'grains_refresh'):
if (data.get(u'force_refresh', False) or
self.grains_cache != self.opts[u'grains']):
self.pillar_refresh(force_refresh=True)
self.grains_cache = self.opts[u'grains']
elif tag.startswith(u'environ_setenv'):
self.environ_setenv(tag, data)
elif tag.startswith(u'_minion_mine'):
self._mine_send(tag, data)
elif tag.startswith(u'fire_master'):
if self.connected:
log.debug(u'Forwarding master event tag=%s', data[u'tag'])
self._fire_master(data[u'data'], data[u'tag'], data[u'events'], data[u'pretag'])
elif tag.startswith(master_event(type=u'disconnected')) or tag.startswith(master_event(type=u'failback')):
# if the master disconnect event is for a different master, raise an exception
if tag.startswith(master_event(type=u'disconnected')) and data[u'master'] != self.opts[u'master']:
# not mine master, ignore
return
if tag.startswith(master_event(type=u'failback')):
# if the master failback event is not for the top master, raise an exception
if data[u'master'] != self.opts[u'master_list'][0]:
raise SaltException(u'Bad master \'{0}\' when mine failback is \'{1}\''.format(
data[u'master'], self.opts[u'master']))
# if the master failback event is for the current master, raise an exception
elif data[u'master'] == self.opts[u'master'][0]:
raise SaltException(u'Already connected to \'{0}\''.format(data[u'master']))
if self.connected:
# we are not connected anymore
self.connected = False
log.info(u'Connection to master %s lost', self.opts[u'master'])
if self.opts[u'master_type'] != u'failover':
# modify the scheduled job to fire on reconnect
if self.opts[u'transport'] != u'tcp':
schedule = {
u'function': u'status.master',
u'seconds': self.opts[u'master_alive_interval'],
u'jid_include': True,
u'maxrunning': 1,
u'return_job': False,
u'kwargs': {u'master': self.opts[u'master'],
u'connected': False}
}
self.schedule.modify_job(name=master_event(type=u'alive', master=self.opts[u'master']),
schedule=schedule)
else:
# delete the scheduled job to don't interfere with the failover process
if self.opts[u'transport'] != u'tcp':
self.schedule.delete_job(name=master_event(type=u'alive'))
log.info(u'Trying to tune in to next master from master-list')
if hasattr(self, u'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, u'auth'):
self.pub_channel.auth.invalidate()
if hasattr(self.pub_channel, u'close'):
self.pub_channel.close()
del self.pub_channel
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
try:
master, self.pub_channel = yield self.eval_master(
opts=self.opts,
failed=True,
failback=tag.startswith(master_event(type=u'failback')))
except SaltClientError:
pass
if self.connected:
self.opts[u'master'] = master
# re-init the subsystems to work with the new master
log.info(
u'Re-initialising subsystems for new master %s',
self.opts[u'master']
)
# put the current schedule into the new loaders
self.opts[u'schedule'] = self.schedule.option(u'schedule')
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
# make the schedule to use the new 'functions' loader
self.schedule.functions = self.functions
self.pub_channel.on_recv(self._handle_payload)
self._fire_master_minion_start()
log.info(u'Minion is ready to receive requests!')
# update scheduled job to run with the new master addr
if self.opts[u'transport'] != u'tcp':
schedule = {
u'function': u'status.master',
u'seconds': self.opts[u'master_alive_interval'],
u'jid_include': True,
u'maxrunning': 1,
u'return_job': False,
u'kwargs': {u'master': self.opts[u'master'],
u'connected': True}
}
self.schedule.modify_job(name=master_event(type=u'alive', master=self.opts[u'master']),
schedule=schedule)
if self.opts[u'master_failback'] and u'master_list' in self.opts:
if self.opts[u'master'] != self.opts[u'master_list'][0]:
schedule = {
u'function': u'status.ping_master',
u'seconds': self.opts[u'master_failback_interval'],
u'jid_include': True,
u'maxrunning': 1,
u'return_job': False,
u'kwargs': {u'master': self.opts[u'master_list'][0]}
}
self.schedule.modify_job(name=master_event(type=u'failback'),
schedule=schedule)
else:
self.schedule.delete_job(name=master_event(type=u'failback'), persist=True)
else:
self.restart = True
self.io_loop.stop()
elif tag.startswith(master_event(type=u'connected')):
# handle this event only once. otherwise it will pollute the log
# also if master type is failover all the reconnection work is done
# by `disconnected` event handler and this event must never happen,
# anyway check it to be sure
if not self.connected and self.opts[u'master_type'] != u'failover':
log.info(u'Connection to master %s re-established', self.opts[u'master'])
self.connected = True
# modify the __master_alive job to only fire,
# if the connection is lost again
if self.opts[u'transport'] != u'tcp':
schedule = {
u'function': u'status.master',
u'seconds': self.opts[u'master_alive_interval'],
u'jid_include': True,
u'maxrunning': 1,
u'return_job': False,
u'kwargs': {u'master': self.opts[u'master'],
u'connected': True}
}
self.schedule.modify_job(name=master_event(type=u'alive', master=self.opts[u'master']),
schedule=schedule)
elif tag.startswith(u'__schedule_return'):
# reporting current connection with master
if data[u'schedule'].startswith(master_event(type=u'alive', master=u'')):
if data[u'return']:
log.debug(
u'Connected to master %s',
data[u'schedule'].split(master_event(type=u'alive', master=u''))[1]
)
self._return_pub(data, ret_cmd=u'_return', sync=False)
elif tag.startswith(u'_salt_error'):
if self.connected:
log.debug(u'Forwarding salt error event tag=%s', tag)
self._fire_master(data, tag)
elif tag.startswith(u'salt/auth/creds'):
key = tuple(data[u'key'])
log.debug(
u'Updating auth data for %s: %s -> %s',
key, salt.crypt.AsyncAuth.creds_map.get(key), data[u'creds']
)
salt.crypt.AsyncAuth.creds_map[tuple(data[u'key'])] = data[u'creds']
def _fallback_cleanups(self):
'''
Fallback cleanup routines, attempting to fix leaked processes, threads, etc.
'''
# Add an extra fallback in case a forked process leaks through
multiprocessing.active_children()
# Cleanup Windows threads
if not salt.utils.platform.is_windows():
return
for thread in self.win_proc:
if not thread.is_alive():
thread.join()
try:
self.win_proc.remove(thread)
del thread
except (ValueError, NameError):
pass
# Main Minion Tune In
def tune_in(self, start=True):
'''
Lock onto the publisher. This is the main event loop for the minion
:rtype : None
'''
self._pre_tune()
log.debug(u'Minion \'%s\' trying to tune in', self.opts[u'id'])
if start:
self.sync_connect_master()
if self.connected:
self._fire_master_minion_start()
log.info(u'Minion is ready to receive requests!')
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
# Make sure to gracefully handle CTRL_LOGOFF_EVENT
salt.utils.enable_ctrl_logoff_handler()
# On first startup execute a state run if configured to do so
self._state_run()
loop_interval = self.opts[u'loop_interval']
try:
if self.opts[u'grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
log.debug(
u'Enabling the grains refresher. Will run every %s '
u'minute%s.',
self.opts[u'grains_refresh_every'],
u's' if self.opts[u'grains_refresh_every'] > 1 else u''
)
self._refresh_grains_watcher(
abs(self.opts[u'grains_refresh_every'])
)
except Exception as exc:
log.error(
u'Exception occurred in attempt to initialize grain refresh '
u'routine during minion tune-in: %s', exc
)
self.periodic_callbacks = {}
# schedule the stuff that runs every interval
ping_interval = self.opts.get(u'ping_interval', 0) * 60
if ping_interval > 0 and self.connected:
def ping_master():
try:
def ping_timeout_handler(*_):
if not self.opts.get(u'auth_safemode', True):
log.error(u'** Master Ping failed. Attempting to restart minion**')
delay = self.opts.get(u'random_reauth_delay', 5)
log.info(u'delaying random_reauth_delay %ss', delay)
# regular sys.exit raises an exception -- which isn't sufficient in a thread
os._exit(salt.defaults.exitcodes.SALT_KEEPALIVE)
self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler)
except Exception:
log.warning(u'Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG)
self.periodic_callbacks[u'ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000, io_loop=self.io_loop)
self.periodic_callbacks[u'cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
def handle_beacons():
# Process Beacons
beacons = None
try:
beacons = self.process_beacons(self.functions)
except Exception:
log.critical(u'The beacon errored: ', exc_info=True)
if beacons and self.connected:
self._fire_master(events=beacons, sync=False)
self.periodic_callbacks[u'beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop)
# TODO: actually listen to the return and change period
def handle_schedule():
self.process_schedule(self, loop_interval)
if hasattr(self, u'schedule'):
self.periodic_callbacks[u'schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000, io_loop=self.io_loop)
# start all the other callbacks
for periodic_cb in six.itervalues(self.periodic_callbacks):
periodic_cb.start()
# add handler to subscriber
if hasattr(self, u'pub_channel') and self.pub_channel is not None:
self.pub_channel.on_recv(self._handle_payload)
elif self.opts.get(u'master_type') != u'disable':
log.error(u'No connection to master found. Scheduled jobs will not run.')
if start:
try:
self.io_loop.start()
if self.restart:
self.destroy()
except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown
self.destroy()
def _handle_payload(self, payload):
if payload is not None and payload[u'enc'] == u'aes':
if self._target_load(payload[u'load']):
self._handle_decoded_payload(payload[u'load'])
elif self.opts[u'zmq_filtering']:
# In the filtering enabled case, we'd like to know when minion sees something it shouldnt
log.trace(
u'Broadcast message received not for this minion, Load: %s',
payload[u'load']
)
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the minion currently has no need.
def _target_load(self, load):
# Verify that the publication is valid
if u'tgt' not in load or u'jid' not in load or u'fun' not in load \
or u'arg' not in load:
return False
# Verify that the publication applies to this minion
# It's important to note that the master does some pre-processing
# to determine which minions to send a request to. So for example,
# a "salt -G 'grain_key:grain_val' test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
if u'tgt_type' in load:
match_func = getattr(self.matcher,
u'{0}_match'.format(load[u'tgt_type']), None)
if match_func is None:
return False
if load[u'tgt_type'] in (u'grain', u'grain_pcre', u'pillar'):
delimiter = load.get(u'delimiter', DEFAULT_TARGET_DELIM)
if not match_func(load[u'tgt'], delimiter=delimiter):
return False
elif not match_func(load[u'tgt']):
return False
else:
if not self.matcher.glob_match(load[u'tgt']):
return False
return True
def destroy(self):
'''
Tear down the minion
'''
self._running = False
if hasattr(self, u'schedule'):
del self.schedule
if hasattr(self, u'pub_channel') and self.pub_channel is not None:
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, u'close'):
self.pub_channel.close()
del self.pub_channel
if hasattr(self, u'periodic_callbacks'):
for cb in six.itervalues(self.periodic_callbacks):
cb.stop()
def __del__(self):
self.destroy()
class Syndic(Minion):
'''
Make a Syndic minion, this minion will use the minion keys on the
master to authenticate with a higher level master.
'''
def __init__(self, opts, **kwargs):
self._syndic_interface = opts.get(u'interface')
self._syndic = True
# force auth_safemode True because Syndic don't support autorestart
opts[u'auth_safemode'] = True
opts[u'loop_interval'] = 1
super(Syndic, self).__init__(opts, **kwargs)
self.mminion = salt.minion.MasterMinion(opts)
self.jid_forward_cache = set()
self.jids = {}
self.raw_events = []
self.pub_future = None
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# TODO: even do this??
data[u'to'] = int(data.get(u'to', self.opts[u'timeout'])) - 1
# Only forward the command if it didn't originate from ourselves
if data.get(u'master_id', 0) != self.opts.get(u'master_id', 1):
self.syndic_cmd(data)
def syndic_cmd(self, data):
'''
Take the now clear load and forward it on to the client cmd
'''
# Set up default tgt_type
if u'tgt_type' not in data:
data[u'tgt_type'] = u'glob'
kwargs = {}
# optionally add a few fields to the publish data
for field in (u'master_id', # which master the job came from
u'user', # which user ran the job
):
if field in data:
kwargs[field] = data[field]
def timeout_handler(*args):
log.warning(u'Unable to forward pub data: %s', args[1])
return True
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self.local.pub_async(data[u'tgt'],
data[u'fun'],
data[u'arg'],
data[u'tgt_type'],
data[u'ret'],
data[u'jid'],
data[u'to'],
io_loop=self.io_loop,
callback=lambda _: None,
**kwargs)
def fire_master_syndic_start(self):
# Send an event to the master that the minion is live
self._fire_master(
u'Syndic {0} started at {1}'.format(
self.opts[u'id'],
time.asctime()
),
u'syndic_start',
sync=False,
)
self._fire_master(
u'Syndic {0} started at {1}'.format(
self.opts[u'id'],
time.asctime()
),
tagify([self.opts[u'id'], u'start'], u'syndic'),
sync=False,
)
# TODO: clean up docs
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
# Instantiate the local client
self.local = salt.client.get_local_client(
self.opts[u'_minion_conf_file'], io_loop=self.io_loop)
# add handler to subscriber
self.pub_channel.on_recv(self._process_cmd_socket)
def _process_cmd_socket(self, payload):
if payload is not None and payload[u'enc'] == u'aes':
log.trace(u'Handling payload')
self._handle_decoded_payload(payload[u'load'])
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the syndic currently has no need.
@tornado.gen.coroutine
def _return_pub_multi(self, values):
for value in values:
yield self._return_pub(value,
u'_syndic_return',
timeout=self._return_retry_timer(),
sync=False)
@tornado.gen.coroutine
def reconnect(self):
if hasattr(self, u'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, u'close'):
self.pub_channel.close()
del self.pub_channel
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
master, self.pub_channel = yield self.eval_master(opts=self.opts)
if self.connected:
self.opts[u'master'] = master
self.pub_channel.on_recv(self._process_cmd_socket)
log.info(u'Minion is ready to receive requests!')
raise tornado.gen.Return(self)
def destroy(self):
'''
Tear down the syndic minion
'''
# We borrowed the local clients poller so give it back before
# it's destroyed. Reset the local poller reference.
super(Syndic, self).destroy()
if hasattr(self, u'local'):
del self.local
if hasattr(self, u'forward_events'):
self.forward_events.stop()
# TODO: need a way of knowing if the syndic connection is busted
class SyndicManager(MinionBase):
'''
Make a MultiMaster syndic minion, this minion will handle relaying jobs and returns from
all minions connected to it to the list of masters it is connected to.
Modes (controlled by `syndic_mode`:
sync: This mode will synchronize all events and publishes from higher level masters
cluster: This mode will only sync job publishes and returns
Note: jobs will be returned best-effort to the requesting master. This also means
(since we are using zmq) that if a job was fired and the master disconnects
between the publish and return, that the return will end up in a zmq buffer
in this Syndic headed to that original master.
In addition, since these classes all seem to use a mix of blocking and non-blocking
calls (with varying timeouts along the way) this daemon does not handle failure well,
it will (under most circumstances) stall the daemon for ~15s trying to forward events
to the down master
'''
# time to connect to upstream master
SYNDIC_CONNECT_TIMEOUT = 5
SYNDIC_EVENT_TIMEOUT = 5
def __init__(self, opts, io_loop=None):
opts[u'loop_interval'] = 1
super(SyndicManager, self).__init__(opts)
self.mminion = salt.minion.MasterMinion(opts)
# sync (old behavior), cluster (only returns and publishes)
self.syndic_mode = self.opts.get(u'syndic_mode', u'sync')
self.syndic_failover = self.opts.get(u'syndic_failover', u'random')
self.auth_wait = self.opts[u'acceptance_wait_time']
self.max_auth_wait = self.opts[u'acceptance_wait_time_max']
self._has_master = threading.Event()
self.jid_forward_cache = set()
if io_loop is None:
if HAS_ZMQ:
zmq.eventloop.ioloop.install()
self.io_loop = LOOP_CLASS.current()
else:
self.io_loop = io_loop
# List of events
self.raw_events = []
# Dict of rets: {master_id: {event_tag: job_ret, ...}, ...}
self.job_rets = {}
# List of delayed job_rets which was unable to send for some reason and will be resend to
# any available master
self.delayed = []
# Active pub futures: {master_id: (future, [job_ret, ...]), ...}
self.pub_futures = {}
def _spawn_syndics(self):
'''
Spawn all the coroutines which will sign in the syndics
'''
self._syndics = OrderedDict() # mapping of opts['master'] -> syndic
masters = self.opts[u'master']
if not isinstance(masters, list):
masters = [masters]
for master in masters:
s_opts = copy.copy(self.opts)
s_opts[u'master'] = master
self._syndics[master] = self._connect_syndic(s_opts)
@tornado.gen.coroutine
def _connect_syndic(self, opts):
'''
Create a syndic, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = opts[u'acceptance_wait_time']
failed = False
while True:
log.debug(
u'Syndic attempting to connect to %s',
opts[u'master']
)
try:
syndic = Syndic(opts,
timeout=self.SYNDIC_CONNECT_TIMEOUT,
safe=False,
io_loop=self.io_loop,
)
yield syndic.connect_master(failed=failed)
# set up the syndic to handle publishes (specifically not event forwarding)
syndic.tune_in_no_block()
# Send an event to the master that the minion is live
syndic.fire_master_syndic_start()
log.info(
u'Syndic successfully connected to %s',
opts[u'master']
)
break
except SaltClientError as exc:
failed = True
log.error(
u'Error while bringing up syndic for multi-syndic. Is the '
u'master at %s responding?', opts[u'master']
)
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except KeyboardInterrupt:
raise
except: # pylint: disable=W0702
failed = True
log.critical(
u'Unexpected error while connecting to %s',
opts[u'master'], exc_info=True
)
raise tornado.gen.Return(syndic)
def _mark_master_dead(self, master):
'''
Mark a master as dead. This will start the sign-in routine
'''
# if its connected, mark it dead
if self._syndics[master].done():
syndic = self._syndics[master].result() # pylint: disable=no-member
self._syndics[master] = syndic.reconnect()
else:
# TODO: debug?
log.info(
u'Attempting to mark %s as dead, although it is already '
u'marked dead', master
)
def _call_syndic(self, func, args=(), kwargs=None, master_id=None):
'''
Wrapper to call a given func on a syndic, best effort to get the one you asked for
'''
if kwargs is None:
kwargs = {}
successful = False
# Call for each master
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error(
u'Unable to call %s on %s, that syndic is not connected',
func, master
)
continue
try:
getattr(syndic_future.result(), func)(*args, **kwargs)
successful = True
except SaltClientError:
log.error(
u'Unable to call %s on %s, trying another...',
func, master
)
self._mark_master_dead(master)
if not successful:
log.critical(u'Unable to call %s on any masters!', func)
def _return_pub_syndic(self, values, master_id=None):
'''
Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for
'''
func = u'_return_pub_multi'
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error(
u'Unable to call %s on %s, that syndic is not connected',
func, master
)
continue
future, data = self.pub_futures.get(master, (None, None))
if future is not None:
if not future.done():
if master == master_id:
# Targeted master previous send not done yet, call again later
return False
else:
# Fallback master is busy, try the next one
continue
elif future.exception():
# Previous execution on this master returned an error
log.error(
u'Unable to call %s on %s, trying another...',
func, master
)
self._mark_master_dead(master)
del self.pub_futures[master]
# Add not sent data to the delayed list and try the next master
self.delayed.extend(data)
continue
future = getattr(syndic_future.result(), func)(values)
self.pub_futures[master] = (future, values)
return True
# Loop done and didn't exit: wasn't sent, try again later
return False
def iter_master_options(self, master_id=None):
'''
Iterate (in order) over your options for master
'''
masters = list(self._syndics.keys())
if self.opts[u'syndic_failover'] == u'random':
shuffle(masters)
if master_id not in self._syndics:
master_id = masters.pop(0)
else:
masters.remove(master_id)
while True:
yield master_id, self._syndics[master_id]
if len(masters) == 0:
break
master_id = masters.pop(0)
def _reset_event_aggregation(self):
self.job_rets = {}
self.raw_events = []
def reconnect_event_bus(self, something):
future = self.local.event.set_event_handler(self._process_event)
self.io_loop.add_future(future, self.reconnect_event_bus)
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
self._spawn_syndics()
# Instantiate the local client
self.local = salt.client.get_local_client(
self.opts[u'_minion_conf_file'], io_loop=self.io_loop)
self.local.event.subscribe(u'')
log.debug(u'SyndicManager \'%s\' trying to tune in', self.opts[u'id'])
# register the event sub to the poller
self.job_rets = {}
self.raw_events = []
self._reset_event_aggregation()
future = self.local.event.set_event_handler(self._process_event)
self.io_loop.add_future(future, self.reconnect_event_bus)
# forward events every syndic_event_forward_timeout
self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events,
self.opts[u'syndic_event_forward_timeout'] * 1000,
io_loop=self.io_loop)
self.forward_events.start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
self.io_loop.start()
def _process_event(self, raw):
# TODO: cleanup: Move down into event class
mtag, data = self.local.event.unpack(raw, self.local.event.serial)
log.trace(u'Got event %s', mtag) # pylint: disable=no-member
tag_parts = mtag.split(u'/')
if len(tag_parts) >= 4 and tag_parts[1] == u'job' and \
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == u'ret' and \
u'return' in data:
if u'jid' not in data:
# Not a job return
return
if self.syndic_mode == u'cluster' and data.get(u'master_id', 0) == self.opts.get(u'master_id', 1):
log.debug(u'Return received with matching master_id, not forwarding')
return
master = data.get(u'master_id')
jdict = self.job_rets.setdefault(master, {}).setdefault(mtag, {})
if not jdict:
jdict[u'__fun__'] = data.get(u'fun')
jdict[u'__jid__'] = data[u'jid']
jdict[u'__load__'] = {}
fstr = u'{0}.get_load'.format(self.opts[u'master_job_cache'])
# Only need to forward each load once. Don't hit the disk
# for every minion return!
if data[u'jid'] not in self.jid_forward_cache:
jdict[u'__load__'].update(
self.mminion.returners[fstr](data[u'jid'])
)
self.jid_forward_cache.add(data[u'jid'])
if len(self.jid_forward_cache) > self.opts[u'syndic_jid_forward_cache_hwm']:
# Pop the oldest jid from the cache
tmp = sorted(list(self.jid_forward_cache))
tmp.pop(0)
self.jid_forward_cache = set(tmp)
if master is not None:
# __'s to make sure it doesn't print out on the master cli
jdict[u'__master_id__'] = master
ret = {}
for key in u'return', u'retcode', u'success':
if key in data:
ret[key] = data[key]
jdict[data[u'id']] = ret
else:
# TODO: config to forward these? If so we'll have to keep track of who
# has seen them
# if we are the top level masters-- don't forward all the minion events
if self.syndic_mode == u'sync':
# Add generic event aggregation here
if u'retcode' not in data:
self.raw_events.append({u'data': data, u'tag': mtag})
def _forward_events(self):
log.trace(u'Forwarding events') # pylint: disable=no-member
if self.raw_events:
events = self.raw_events
self.raw_events = []
self._call_syndic(u'_fire_master',
kwargs={u'events': events,
u'pretag': tagify(self.opts[u'id'], base=u'syndic'),
u'timeout': self.SYNDIC_EVENT_TIMEOUT,
u'sync': False,
},
)
if self.delayed:
res = self._return_pub_syndic(self.delayed)
if res:
self.delayed = []
for master in list(six.iterkeys(self.job_rets)):
values = self.job_rets[master].values()
res = self._return_pub_syndic(values, master_id=master)
if res:
del self.job_rets[master]
class Matcher(object):
'''
Use to return the value for matching calls from the master
'''
def __init__(self, opts, functions=None):
self.opts = opts
self.functions = functions
def confirm_top(self, match, data, nodegroups=None):
'''
Takes the data passed to a top file environment and determines if the
data matches this minion
'''
matcher = u'compound'
if not data:
log.error(u'Received bad data when setting the match from the top '
u'file')
return False
for item in data:
if isinstance(item, dict):
if u'match' in item:
matcher = item[u'match']
if hasattr(self, matcher + u'_match'):
funcname = u'{0}_match'.format(matcher)
if matcher == u'nodegroup':
return getattr(self, funcname)(match, nodegroups)
return getattr(self, funcname)(match)
else:
log.error(u'Attempting to match with unknown matcher: %s', matcher)
return False
def glob_match(self, tgt):
'''
Returns true if the passed glob matches the id
'''
if not isinstance(tgt, six.string_types):
return False
return fnmatch.fnmatch(self.opts[u'id'], tgt)
def pcre_match(self, tgt):
'''
Returns true if the passed pcre regex matches
'''
return bool(re.match(tgt, self.opts[u'id']))
def list_match(self, tgt):
'''
Determines if this host is on the list
'''
if isinstance(tgt, six.string_types):
tgt = tgt.split(u',')
return bool(self.opts[u'id'] in tgt)
def grain_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the grains glob match
'''
log.debug(u'grains target: %s', tgt)
if delimiter not in tgt:
log.error(u'Got insufficient arguments for grains match '
u'statement from master')
return False
return salt.utils.subdict_match(
self.opts[u'grains'], tgt, delimiter=delimiter
)
def grain_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Matches a grain based on regex
'''
log.debug(u'grains pcre target: %s', tgt)
if delimiter not in tgt:
log.error(u'Got insufficient arguments for grains pcre match '
u'statement from master')
return False
return salt.utils.subdict_match(self.opts[u'grains'], tgt,
delimiter=delimiter, regex_match=True)
def data_match(self, tgt):
'''
Match based on the local data store on the minion
'''
if self.functions is None:
utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=utils)
comps = tgt.split(u':')
if len(comps) < 2:
return False
val = self.functions[u'data.getval'](comps[0])
if val is None:
# The value is not defined
return False
if isinstance(val, list):
# We are matching a single component to a single list member
for member in val:
if fnmatch.fnmatch(str(member).lower(), comps[1].lower()):
return True
return False
if isinstance(val, dict):
if comps[1] in val:
return True
return False
return bool(fnmatch.fnmatch(
val,
comps[1],
))
def pillar_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar glob match
'''
log.debug(u'pillar target: %s', tgt)
if delimiter not in tgt:
log.error(u'Got insufficient arguments for pillar match '
u'statement from master')
return False
return salt.utils.subdict_match(
self.opts[u'pillar'], tgt, delimiter=delimiter
)
def pillar_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar pcre match
'''
log.debug(u'pillar PCRE target: %s', tgt)
if delimiter not in tgt:
log.error(u'Got insufficient arguments for pillar PCRE match '
u'statement from master')
return False
return salt.utils.subdict_match(
self.opts[u'pillar'], tgt, delimiter=delimiter, regex_match=True
)
def pillar_exact_match(self, tgt, delimiter=u':'):
'''
Reads in the pillar match, no globbing, no PCRE
'''
log.debug(u'pillar target: %s', tgt)
if delimiter not in tgt:
log.error(u'Got insufficient arguments for pillar match '
u'statement from master')
return False
return salt.utils.subdict_match(self.opts[u'pillar'],
tgt,
delimiter=delimiter,
exact_match=True)
def ipcidr_match(self, tgt):
'''
Matches based on IP address or CIDR notation
'''
try:
# Target is an address?
tgt = ipaddress.ip_address(tgt)
except: # pylint: disable=bare-except
try:
# Target is a network?
tgt = ipaddress.ip_network(tgt)
except: # pylint: disable=bare-except
log.error(u'Invalid IP/CIDR target: %s', tgt)
return []
proto = u'ipv{0}'.format(tgt.version)
grains = self.opts[u'grains']
if proto not in grains:
match = False
elif isinstance(tgt, (ipaddress.IPv4Address, ipaddress.IPv6Address)):
match = str(tgt) in grains[proto]
else:
match = salt.utils.network.in_subnet(tgt, grains[proto])
return match
def range_match(self, tgt):
'''
Matches based on range cluster
'''
if HAS_RANGE:
range_ = seco.range.Range(self.opts[u'range_server'])
try:
return self.opts[u'grains'][u'fqdn'] in range_.expand(tgt)
except seco.range.RangeException as exc:
log.debug(u'Range exception in compound match: %s', exc)
return False
return False
def compound_match(self, tgt):
'''
Runs the compound target check
'''
if not isinstance(tgt, six.string_types) and not isinstance(tgt, (list, tuple)):
log.error(u'Compound target received that is neither string, list nor tuple')
return False
log.debug(u'compound_match: %s ? %s', self.opts[u'id'], tgt)
ref = {u'G': u'grain',
u'P': u'grain_pcre',
u'I': u'pillar',
u'J': u'pillar_pcre',
u'L': u'list',
u'N': None, # Nodegroups should already be expanded
u'S': u'ipcidr',
u'E': u'pcre'}
if HAS_RANGE:
ref[u'R'] = u'range'
results = []
opers = [u'and', u'or', u'not', u'(', u')']
if isinstance(tgt, six.string_types):
words = tgt.split()
else:
words = tgt
for word in words:
target_info = salt.utils.minions.parse_target(word)
# Easy check first
if word in opers:
if results:
if results[-1] == u'(' and word in (u'and', u'or'):
log.error(u'Invalid beginning operator after "(": %s', word)
return False
if word == u'not':
if not results[-1] in (u'and', u'or', u'('):
results.append(u'and')
results.append(word)
else:
# seq start with binary oper, fail
if word not in [u'(', u'not']:
log.error(u'Invalid beginning operator: %s', word)
return False
results.append(word)
elif target_info and target_info[u'engine']:
if u'N' == target_info[u'engine']:
# Nodegroups should already be expanded/resolved to other engines
log.error(
u'Detected nodegroup expansion failure of "%s"', word)
return False
engine = ref.get(target_info[u'engine'])
if not engine:
# If an unknown engine is called at any time, fail out
log.error(
u'Unrecognized target engine "%s" for target '
u'expression "%s"', target_info[u'engine'], word
)
return False
engine_args = [target_info[u'pattern']]
engine_kwargs = {}
if target_info[u'delimiter']:
engine_kwargs[u'delimiter'] = target_info[u'delimiter']
results.append(
str(getattr(self, u'{0}_match'.format(engine))(*engine_args, **engine_kwargs))
)
else:
# The match is not explicitly defined, evaluate it as a glob
results.append(str(self.glob_match(word)))
results = u' '.join(results)
log.debug(u'compound_match %s ? "%s" => "%s"', self.opts[u'id'], tgt, results)
try:
return eval(results) # pylint: disable=W0123
except Exception:
log.error(
u'Invalid compound target: %s for results: %s', tgt, results)
return False
return False
def nodegroup_match(self, tgt, nodegroups):
'''
This is a compatibility matcher and is NOT called when using
nodegroups for remote execution, but is called when the nodegroups
matcher is used in states
'''
if tgt in nodegroups:
return self.compound_match(
salt.utils.minions.nodegroup_comp(tgt, nodegroups)
)
return False
class ProxyMinionManager(MinionManager):
'''
Create the multi-minion interface but for proxy minions
'''
def _create_minion_object(self, opts, timeout, safe,
io_loop=None, loaded_base_name=None,
jid_queue=None):
'''
Helper function to return the correct type of object
'''
return ProxyMinion(opts,
timeout,
safe,
io_loop=io_loop,
loaded_base_name=loaded_base_name,
jid_queue=jid_queue)
class ProxyMinion(Minion):
'''
This class instantiates a u'proxy' minion--a minion that does not manipulate
the host it runs on, but instead manipulates a device that cannot run a minion.
'''
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
If this function is changed, please check Minion._post_master_init
to see if those changes need to be propagated.
ProxyMinions need a significantly different post master setup,
which is why the differences are not factored out into separate helper
functions.
'''
log.debug(u"subclassed _post_master_init")
if self.connected:
self.opts[u'master'] = master
self.opts[u'pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts[u'grains'],
self.opts[u'id'],
saltenv=self.opts[u'environment'],
pillarenv=self.opts.get(u'pillarenv'),
).compile_pillar()
if u'proxy' not in self.opts[u'pillar'] and u'proxy' not in self.opts:
errmsg = u'No proxy key found in pillar or opts for id ' + self.opts[u'id'] + u'. ' + \
u'Check your pillar/opts configuration and contents. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
if u'proxy' not in self.opts:
self.opts[u'proxy'] = self.opts[u'pillar'][u'proxy']
if self.opts.get(u'proxy_merge_pillar_in_opts'):
# Override proxy opts with pillar data when the user required.
self.opts = salt.utils.dictupdate.merge(self.opts,
self.opts[u'pillar'],
strategy=self.opts.get(u'proxy_merge_pillar_in_opts_strategy'),
merge_lists=self.opts.get(u'proxy_deep_merge_pillar_in_opts', False))
elif self.opts.get(u'proxy_mines_pillar'):
# Even when not required, some details such as mine configuration
# should be merged anyway whenever possible.
if u'mine_interval' in self.opts[u'pillar']:
self.opts[u'mine_interval'] = self.opts[u'pillar'][u'mine_interval']
if u'mine_functions' in self.opts[u'pillar']:
general_proxy_mines = self.opts.get(u'mine_functions', [])
specific_proxy_mines = self.opts[u'pillar'][u'mine_functions']
try:
self.opts[u'mine_functions'] = general_proxy_mines + specific_proxy_mines
except TypeError as terr:
log.error(u'Unable to merge mine functions from the pillar in the opts, for proxy {}'.format(
self.opts[u'id']))
fq_proxyname = self.opts[u'proxy'][u'proxytype']
# Need to load the modules so they get all the dunder variables
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
# we can then sync any proxymodules down from the master
# we do a sync_all here in case proxy code was installed by
# SPM or was manually placed in /srv/salt/_modules etc.
self.functions[u'saltutil.sync_all'](saltenv=self.opts[u'environment'])
# Pull in the utils
self.utils = salt.loader.utils(self.opts)
# Then load the proxy module
self.proxy = salt.loader.proxy(self.opts, utils=self.utils)
# And re-load the modules so the __proxy__ variable gets injected
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.functions.pack[u'__proxy__'] = self.proxy
self.proxy.pack[u'__salt__'] = self.functions
self.proxy.pack[u'__ret__'] = self.returners
self.proxy.pack[u'__pillar__'] = self.opts[u'pillar']
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
self.proxy.pack[u'__utils__'] = self.utils
# Reload all modules so all dunder variables are injected
self.proxy.reload_modules()
# Start engines here instead of in the Minion superclass __init__
# This is because we need to inject the __proxy__ variable but
# it is not setup until now.
self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
self.process_manager, proxy=self.proxy)
if (u'{0}.init'.format(fq_proxyname) not in self.proxy
or u'{0}.shutdown'.format(fq_proxyname) not in self.proxy):
errmsg = u'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname) + \
u'Check your proxymodule. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
proxy_init_fn = self.proxy[fq_proxyname + u'.init']
proxy_init_fn(self.opts)
self.opts[u'grains'] = salt.loader.grains(self.opts, proxy=self.proxy)
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.get_uid(user=self.opts.get(u'user', None))
self.proc_dir = get_proc_dir(self.opts[u'cachedir'], uid=uid)
if self.connected and self.opts[u'pillar']:
# The pillar has changed due to the connection to the master.
# Reload the functions so that they can use the new pillar data.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
if hasattr(self, u'schedule'):
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if not hasattr(self, u'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[master_event(type=u'alive')],
proxy=self.proxy)
# add default scheduling jobs to the minions scheduler
if self.opts[u'mine_enabled'] and u'mine.update' in self.functions:
self.schedule.add_job({
u'__mine_interval':
{
u'function': u'mine.update',
u'minutes': self.opts[u'mine_interval'],
u'jid_include': True,
u'maxrunning': 2,
u'return_job': self.opts.get(u'mine_return_job', False)
}
}, persist=True)
log.info(u'Added mine.update to scheduler')
else:
self.schedule.delete_job(u'__mine_interval', persist=True)
# add master_alive job if enabled
if (self.opts[u'transport'] != u'tcp' and
self.opts[u'master_alive_interval'] > 0):
self.schedule.add_job({
master_event(type=u'alive', master=self.opts[u'master']):
{
u'function': u'status.master',
u'seconds': self.opts[u'master_alive_interval'],
u'jid_include': True,
u'maxrunning': 1,
u'return_job': False,
u'kwargs': {u'master': self.opts[u'master'],
u'connected': True}
}
}, persist=True)
if self.opts[u'master_failback'] and \
u'master_list' in self.opts and \
self.opts[u'master'] != self.opts[u'master_list'][0]:
self.schedule.add_job({
master_event(type=u'failback'):
{
u'function': u'status.ping_master',
u'seconds': self.opts[u'master_failback_interval'],
u'jid_include': True,
u'maxrunning': 1,
u'return_job': False,
u'kwargs': {u'master': self.opts[u'master_list'][0]}
}
}, persist=True)
else:
self.schedule.delete_job(master_event(type=u'failback'), persist=True)
else:
self.schedule.delete_job(master_event(type=u'alive', master=self.opts[u'master']), persist=True)
self.schedule.delete_job(master_event(type=u'failback'), persist=True)
# proxy keepalive
proxy_alive_fn = fq_proxyname+u'.alive'
if (proxy_alive_fn in self.proxy
and u'status.proxy_reconnect' in self.functions
and self.opts.get(u'proxy_keep_alive', True)):
# if `proxy_keep_alive` is either not specified, either set to False does not retry reconnecting
self.schedule.add_job({
u'__proxy_keepalive':
{
u'function': u'status.proxy_reconnect',
u'minutes': self.opts.get(u'proxy_keep_alive_interval', 1), # by default, check once per minute
u'jid_include': True,
u'maxrunning': 1,
u'return_job': False,
u'kwargs': {
u'proxy_name': fq_proxyname
}
}
}, persist=True)
self.schedule.enable_schedule()
else:
self.schedule.delete_job(u'__proxy_keepalive', persist=True)
# Sync the grains here so the proxy can communicate them to the master
self.functions[u'saltutil.sync_grains'](saltenv=u'base')
self.grains_cache = self.opts[u'grains']
self.ready = True
@classmethod
def _target(cls, minion_instance, opts, data, connected):
if not minion_instance:
minion_instance = cls(opts)
minion_instance.connected = connected
if not hasattr(minion_instance, u'functions'):
# Need to load the modules so they get all the dunder variables
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts[u'grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
# Pull in the utils
minion_instance.utils = salt.loader.utils(minion_instance.opts)
# Then load the proxy module
minion_instance.proxy = salt.loader.proxy(minion_instance.opts, utils=minion_instance.utils)
# And re-load the modules so the __proxy__ variable gets injected
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts[u'grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
minion_instance.functions.pack[u'__proxy__'] = minion_instance.proxy
minion_instance.proxy.pack[u'__salt__'] = minion_instance.functions
minion_instance.proxy.pack[u'__ret__'] = minion_instance.returners
minion_instance.proxy.pack[u'__pillar__'] = minion_instance.opts[u'pillar']
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
minion_instance.utils = salt.loader.utils(minion_instance.opts, proxy=minion_instance.proxy)
minion_instance.proxy.pack[u'__utils__'] = minion_instance.utils
# Reload all modules so all dunder variables are injected
minion_instance.proxy.reload_modules()
fq_proxyname = opts[u'proxy'][u'proxytype']
proxy_init_fn = minion_instance.proxy[fq_proxyname + u'.init']
proxy_init_fn(opts)
if not hasattr(minion_instance, u'serial'):
minion_instance.serial = salt.payload.Serial(opts)
if not hasattr(minion_instance, u'proc_dir'):
uid = salt.utils.get_uid(user=opts.get(u'user', None))
minion_instance.proc_dir = (
get_proc_dir(opts[u'cachedir'], uid=uid)
)
with tornado.stack_context.StackContext(minion_instance.ctx):
if isinstance(data[u'fun'], tuple) or isinstance(data[u'fun'], list):
Minion._thread_multi_return(minion_instance, opts, data)
else:
Minion._thread_return(minion_instance, opts, data)
|
fetcher.py | from selenium import webdriver
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from bs4 import BeautifulSoup
from urllib2 import urlopen
import threading
import Queue
import re
class Fetcher(object):
"""Fetching Capability using Selenium"""
search_driver = None
@staticmethod
def get_selenium_driver(timeout=10):
if Fetcher.search_driver is not None:
return Fetcher.search_driver
else:
capabilities = DesiredCapabilities.FIREFOX
capabilities['takesScreenShot'] = False
binary = FirefoxBinary('/data/projects/G-817549/standalone/tools/firefox/firefox')
driver = webdriver.Firefox(firefox_binary=binary,
capabilities=capabilities,
log_path='/data/projects/G-817549/standalone/logs/firefox/selenium.log')
#capabilities = DesiredCapabilities.PHANTOMJS
#capabilities['takesScreenShot'] = False
#driver = webdriver.PhantomJS(executable_path='/data/projects/G-817549/standalone/tools/phantomjs-2.1.1/bin/phantomjs',
#driver = webdriver.PhantomJS(executable_path='/Users/ksingh/phantomjs-2.1.1-macosx/bin/phantomjs',
# desired_capabilities=capabilities,
#service_log_path='/data/projects/G-817549/standalone/logs/firefox/selenium.log')
# service_log_path='/Users/ksingh/phantomjs.log')
#capabilities = DesiredCapabilities.CHROME
#capabilities['takesScreenShot'] = False
#driver = webdriver.Chrome(executable_path='/Users/ksingh/chromedriver',
# desired_capabilities=capabilities,
# service_log_path='/Users/ksingh/chrome.log')
Fetcher.search_driver = driver
return Fetcher.search_driver
@staticmethod
def new_selenium_driver(timeout=10):
driver = webdriver.Remote(command_executor='http://localhost:4444/wd/hub',
desired_capabilities=DesiredCapabilities.CHROME)
#driver.set_page_load_timeout(timeout)
return driver
@staticmethod
def close_selenium_driver(driver):
if driver:
try:
driver.close()
driver.quit()
except:
print('An error occurred while closing the driver')
@staticmethod
def selenium(url):
bad_request = False
driver = Fetcher.new_selenium_driver()
html = ''
#text = ''
title = ''
try:
driver.get(url)
except:
print('An error occurred while fetching URL: ' + url)
bad_request = True
finally:
try:
if not bad_request:
html = driver.page_source
#text = driver.find_element_by_tag_name('body').text
title = driver.title
except:
print ('An error occurred while fetching URL: ' + url + ' from Selenium')
finally:
if driver:
Fetcher.close_selenium_driver(driver)
return [html.encode('utf8')[:20]]
@staticmethod
def plain(url):
res = urlopen(url)
html = res.read()
if res.headers.getparam('charset').lower() != 'utf-8':
html = html.encode('utf-8')
#start = html.find('<title>') + 7 # Add length of <title> tag
#end = html.find('</title>', start)
#title = html[start:end]
soup = BeautifulSoup(html, 'html.parser')
return [html, soup.title.string.encode('utf-8'), soup.text.encode('utf-8')]
@staticmethod
def read_url(url, queue):
try:
res = urlopen(url)
data = res.read()
print('Fetched %s from %s' % (len(data), url))
if res.headers.getparam('charset').lower() != 'utf-8':
data = data.encode('utf-8')
soup = BeautifulSoup(data, 'html.parser')
print('Parsed %s from %s' % (len(data), url))
queue.put([url, data, soup.title.string.encode('utf-8'), soup.text.encode('utf-8')])
except:
print('An error occurred while fetching URL: ' + url + ' using urllib. Skipping it!')
@staticmethod
def is_alive(threads):
for t in threads:
if t.isAlive():
return True
else:
threads.remove(t)
return False
@staticmethod
def parallel(urls, top_n):
result = Queue.Queue()
threads = [threading.Thread(target=Fetcher.read_url, args=(url, result)) for url in urls]
for t in threads:
t.daemon = True
t.start()
#for t in threads:
# t.join()
data = []
while len(data) <= top_n and Fetcher.is_alive(threads):
data.append(result.get())
return data
@staticmethod
def fetch_multiple(urls, top_n):
result = Fetcher.parallel(urls, top_n)
return result
@staticmethod
def fetch(url):
result = ['', '', '']
try:
result = Fetcher.plain(url)
except:
print('An error occurred while fetching URL: ' + url + ' using urllib. Skipping it!')
#print('An error occurred while fetching URL: ' + url + ' using urllib. Trying Selenium...')
#result = Fetcher.selenium(url)
return result
|
regrtest.py | #! /usr/bin/env python3
"""
Script to run Python regression tests.
Run this script with -h or --help for documentation.
"""
USAGE = """\
python -m test [options] [test_name1 [test_name2 ...]]
python path/to/Lib/test/regrtest.py [options] [test_name1 [test_name2 ...]]
"""
DESCRIPTION = """\
Run Python regression tests.
If no arguments or options are provided, finds all files matching
the pattern "test_*" in the Lib/test subdirectory and runs
them in alphabetical order (but see -M and -u, below, for exceptions).
For more rigorous testing, it is useful to use the following
command line:
python -E -Wd -m test [options] [test_name1 ...]
"""
EPILOG = """\
Additional option details:
-r randomizes test execution order. You can use --randseed=int to provide an
int seed value for the randomizer; this is useful for reproducing troublesome
test orders.
-s On the first invocation of regrtest using -s, the first test file found
or the first test file given on the command line is run, and the name of
the next test is recorded in a file named pynexttest. If run from the
Python build directory, pynexttest is located in the 'build' subdirectory,
otherwise it is located in tempfile.gettempdir(). On subsequent runs,
the test in pynexttest is run, and the next test is written to pynexttest.
When the last test has been run, pynexttest is deleted. In this way it
is possible to single step through the test files. This is useful when
doing memory analysis on the Python interpreter, which process tends to
consume too many resources to run the full regression test non-stop.
-S is used to continue running tests after an aborted run. It will
maintain the order a standard run (ie, this assumes -r is not used).
This is useful after the tests have prematurely stopped for some external
reason and you want to start running from where you left off rather
than starting from the beginning.
-f reads the names of tests from the file given as f's argument, one
or more test names per line. Whitespace is ignored. Blank lines and
lines beginning with '#' are ignored. This is especially useful for
whittling down failures involving interactions among tests.
-L causes the leaks(1) command to be run just before exit if it exists.
leaks(1) is available on Mac OS X and presumably on some other
FreeBSD-derived systems.
-R runs each test several times and examines sys.gettotalrefcount() to
see if the test appears to be leaking references. The argument should
be of the form stab:run:fname where 'stab' is the number of times the
test is run to let gettotalrefcount settle down, 'run' is the number
of times further it is run and 'fname' is the name of the file the
reports are written to. These parameters all have defaults (5, 4 and
"reflog.txt" respectively), and the minimal invocation is '-R :'.
-M runs tests that require an exorbitant amount of memory. These tests
typically try to ascertain containers keep working when containing more than
2 billion objects, which only works on 64-bit systems. There are also some
tests that try to exhaust the address space of the process, which only makes
sense on 32-bit systems with at least 2Gb of memory. The passed-in memlimit,
which is a string in the form of '2.5Gb', determines howmuch memory the
tests will limit themselves to (but they may go slightly over.) The number
shouldn't be more memory than the machine has (including swap memory). You
should also keep in mind that swap memory is generally much, much slower
than RAM, and setting memlimit to all available RAM or higher will heavily
tax the machine. On the other hand, it is no use running these tests with a
limit of less than 2.5Gb, and many require more than 20Gb. Tests that expect
to use more than memlimit memory will be skipped. The big-memory tests
generally run very, very long.
-u is used to specify which special resource intensive tests to run,
such as those requiring large file support or network connectivity.
The argument is a comma-separated list of words indicating the
resources to test. Currently only the following are defined:
all - Enable all special resources.
none - Disable all special resources (this is the default).
audio - Tests that use the audio device. (There are known
cases of broken audio drivers that can crash Python or
even the Linux kernel.)
curses - Tests that use curses and will modify the terminal's
state and output modes.
largefile - It is okay to run some test that may create huge
files. These tests can take a long time and may
consume >2GB of disk space temporarily.
network - It is okay to run tests that use external network
resource, e.g. testing SSL support for sockets.
decimal - Test the decimal module against a large suite that
verifies compliance with standards.
cpu - Used for certain CPU-heavy tests.
subprocess Run all tests for the subprocess module.
urlfetch - It is okay to download files required on testing.
gui - Run tests that require a running GUI.
To enable all resources except one, use '-uall,-<resource>'. For
example, to run all the tests except for the gui tests, give the
option '-uall,-gui'.
"""
# We import importlib *ASAP* in order to test #15386
import importlib
import argparse
import builtins
import faulthandler
import io
import json
import locale
import logging
import os
import platform
import random
import re
import shutil
import signal
import sys
import sysconfig
import tempfile
import time
import traceback
import unittest
import warnings
from inspect import isabstract
try:
import threading
except ImportError:
threading = None
try:
import _multiprocessing, multiprocessing.process
except ImportError:
multiprocessing = None
# Some times __path__ and __file__ are not absolute (e.g. while running from
# Lib/) and, if we change the CWD to run the tests in a temporary dir, some
# imports might fail. This affects only the modules imported before os.chdir().
# These modules are searched first in sys.path[0] (so '' -- the CWD) and if
# they are found in the CWD their __file__ and __path__ will be relative (this
# happens before the chdir). All the modules imported after the chdir, are
# not found in the CWD, and since the other paths in sys.path[1:] are absolute
# (site.py absolutize them), the __file__ and __path__ will be absolute too.
# Therefore it is necessary to absolutize manually the __file__ and __path__ of
# the packages to prevent later imports to fail when the CWD is different.
for module in sys.modules.values():
if hasattr(module, '__path__'):
module.__path__ = [os.path.abspath(path) for path in module.__path__]
if hasattr(module, '__file__'):
module.__file__ = os.path.abspath(module.__file__)
# MacOSX (a.k.a. Darwin) has a default stack size that is too small
# for deeply recursive regular expressions. We see this as crashes in
# the Python test suite when running test_re.py and test_sre.py. The
# fix is to set the stack limit to 2048.
# This approach may also be useful for other Unixy platforms that
# suffer from small default stack limits.
if sys.platform == 'darwin':
try:
import resource
except ImportError:
pass
else:
soft, hard = resource.getrlimit(resource.RLIMIT_STACK)
newsoft = min(hard, max(soft, 1024*2048))
resource.setrlimit(resource.RLIMIT_STACK, (newsoft, hard))
# Test result constants.
PASSED = 1
FAILED = 0
ENV_CHANGED = -1
SKIPPED = -2
RESOURCE_DENIED = -3
INTERRUPTED = -4
CHILD_ERROR = -5 # error in a child process
from test import support
RESOURCE_NAMES = ('audio', 'curses', 'largefile', 'network',
'decimal', 'cpu', 'subprocess', 'urlfetch', 'gui')
# When tests are run from the Python build directory, it is best practice
# to keep the test files in a subfolder. This eases the cleanup of leftover
# files using the "make distclean" command.
if sysconfig.is_python_build():
TEMPDIR = os.path.join(sysconfig.get_config_var('srcdir'), 'build')
else:
TEMPDIR = tempfile.gettempdir()
TEMPDIR = os.path.abspath(TEMPDIR)
class _ArgParser(argparse.ArgumentParser):
def error(self, message):
super().error(message + "\nPass -h or --help for complete help.")
def _create_parser():
# Set prog to prevent the uninformative "__main__.py" from displaying in
# error messages when using "python -m test ...".
parser = _ArgParser(prog='regrtest.py',
usage=USAGE,
description=DESCRIPTION,
epilog=EPILOG,
add_help=False,
formatter_class=argparse.RawDescriptionHelpFormatter)
# Arguments with this clause added to its help are described further in
# the epilog's "Additional option details" section.
more_details = ' See the section at bottom for more details.'
group = parser.add_argument_group('General options')
# We add help explicitly to control what argument group it renders under.
group.add_argument('-h', '--help', action='help',
help='show this help message and exit')
group.add_argument('--timeout', metavar='TIMEOUT', type=float,
help='dump the traceback and exit if a test takes '
'more than TIMEOUT seconds; disabled if TIMEOUT '
'is negative or equals to zero')
group.add_argument('--wait', action='store_true',
help='wait for user input, e.g., allow a debugger '
'to be attached')
group.add_argument('--slaveargs', metavar='ARGS')
group.add_argument('-S', '--start', metavar='START',
help='the name of the test at which to start.' +
more_details)
group = parser.add_argument_group('Verbosity')
group.add_argument('-v', '--verbose', action='count',
help='run tests in verbose mode with output to stdout')
group.add_argument('-w', '--verbose2', action='store_true',
help='re-run failed tests in verbose mode')
group.add_argument('-W', '--verbose3', action='store_true',
help='display test output on failure')
group.add_argument('-q', '--quiet', action='store_true',
help='no output unless one or more tests fail')
group.add_argument('-o', '--slow', action='store_true', dest='print_slow',
help='print the slowest 10 tests')
group.add_argument('--header', action='store_true',
help='print header with interpreter info')
group = parser.add_argument_group('Selecting tests')
group.add_argument('-r', '--randomize', action='store_true',
help='randomize test execution order.' + more_details)
group.add_argument('--randseed', metavar='SEED',
dest='random_seed', type=int,
help='pass a random seed to reproduce a previous '
'random run')
group.add_argument('-f', '--fromfile', metavar='FILE',
help='read names of tests to run from a file.' +
more_details)
group.add_argument('-x', '--exclude', action='store_true',
help='arguments are tests to *exclude*')
group.add_argument('-s', '--single', action='store_true',
help='single step through a set of tests.' +
more_details)
group.add_argument('-m', '--match', metavar='PAT',
dest='match_tests',
help='match test cases and methods with glob pattern PAT')
group.add_argument('-G', '--failfast', action='store_true',
help='fail as soon as a test fails (only with -v or -W)')
group.add_argument('-u', '--use', metavar='RES1,RES2,...',
action='append', type=resources_list,
help='specify which special resource intensive tests '
'to run.' + more_details)
group.add_argument('-M', '--memlimit', metavar='LIMIT',
help='run very large memory-consuming tests.' +
more_details)
group.add_argument('--testdir', metavar='DIR',
type=relative_filename,
help='execute test files in the specified directory '
'(instead of the Python stdlib test suite)')
group = parser.add_argument_group('Special runs')
group.add_argument('-l', '--findleaks', action='store_true',
help='if GC is available detect tests that leak memory')
group.add_argument('-L', '--runleaks', action='store_true',
help='run the leaks(1) command just before exit.' +
more_details)
group.add_argument('-R', '--huntrleaks', metavar='RUNCOUNTS',
type=huntrleaks,
help='search for reference leaks (needs debug build, '
'very slow).' + more_details)
group.add_argument('-j', '--multiprocess', metavar='PROCESSES',
dest='use_mp', type=int,
help='run PROCESSES processes at once')
group.add_argument('-T', '--coverage', action='store_true',
dest='trace',
help='turn on code coverage tracing using the trace '
'module')
group.add_argument('-D', '--coverdir', metavar='DIR',
type=relative_filename,
help='directory where coverage files are put')
group.add_argument('-N', '--nocoverdir',
action='store_const', const=None, dest='coverdir',
help='put coverage files alongside modules')
group.add_argument('-t', '--threshold', metavar='THRESHOLD',
type=int,
help='call gc.set_threshold(THRESHOLD)')
group.add_argument('-n', '--nowindows', action='store_true',
help='suppress error message boxes on Windows')
group.add_argument('-F', '--forever', action='store_true',
help='run the specified tests in a loop, until an '
'error happens')
group.add_argument('-P', '--pgo', dest='pgo', action='store_true',
help='enable Profile Guided Optimization training')
return parser
def relative_filename(string):
# CWD is replaced with a temporary dir before calling main(), so we
# join it with the saved CWD so it ends up where the user expects.
return os.path.join(support.SAVEDCWD, string)
def huntrleaks(string):
args = string.split(':')
if len(args) not in (2, 3):
raise argparse.ArgumentTypeError(
'needs 2 or 3 colon-separated arguments')
nwarmup = int(args[0]) if args[0] else 5
ntracked = int(args[1]) if args[1] else 4
fname = args[2] if len(args) > 2 and args[2] else 'reflog.txt'
return nwarmup, ntracked, fname
def resources_list(string):
u = [x.lower() for x in string.split(',')]
for r in u:
if r == 'all' or r == 'none':
continue
if r[0] == '-':
r = r[1:]
if r not in RESOURCE_NAMES:
raise argparse.ArgumentTypeError('invalid resource: ' + r)
return u
def _parse_args(args, **kwargs):
# Defaults
ns = argparse.Namespace(testdir=None, verbose=0, quiet=False,
exclude=False, single=False, randomize=False, fromfile=None,
findleaks=False, use_resources=None, trace=False, coverdir='coverage',
runleaks=False, huntrleaks=False, verbose2=False, print_slow=False,
random_seed=None, use_mp=None, verbose3=False, forever=False,
header=False, failfast=False, match_tests=None, pgo=False)
for k, v in kwargs.items():
if not hasattr(ns, k):
raise TypeError('%r is an invalid keyword argument '
'for this function' % k)
setattr(ns, k, v)
if ns.use_resources is None:
ns.use_resources = []
parser = _create_parser()
# Issue #14191: argparse doesn't support "intermixed" positional and
# optional arguments. Use parse_known_args() as workaround.
ns.args = parser.parse_known_args(args=args, namespace=ns)[1]
for arg in ns.args:
if arg.startswith('-'):
parser.error("unrecognized arguments: %s" % arg)
sys.exit(1)
if ns.single and ns.fromfile:
parser.error("-s and -f don't go together!")
if ns.use_mp is not None and ns.trace:
parser.error("-T and -j don't go together!")
if ns.use_mp is not None and ns.findleaks:
parser.error("-l and -j don't go together!")
if ns.use_mp is not None and ns.memlimit:
parser.error("-M and -j don't go together!")
if ns.failfast and not (ns.verbose or ns.verbose3):
parser.error("-G/--failfast needs either -v or -W")
if ns.quiet:
ns.verbose = 0
if ns.timeout is not None:
if hasattr(faulthandler, 'dump_traceback_later'):
if ns.timeout <= 0:
ns.timeout = None
else:
print("Warning: The timeout option requires "
"faulthandler.dump_traceback_later")
ns.timeout = None
if ns.use_mp is not None:
if ns.use_mp <= 0:
# Use all cores + extras for tests that like to sleep
ns.use_mp = 2 + (os.cpu_count() or 1)
if ns.use_mp == 1:
ns.use_mp = None
if ns.use:
for a in ns.use:
for r in a:
if r == 'all':
ns.use_resources[:] = RESOURCE_NAMES
continue
if r == 'none':
del ns.use_resources[:]
continue
remove = False
if r[0] == '-':
remove = True
r = r[1:]
if remove:
if r in ns.use_resources:
ns.use_resources.remove(r)
elif r not in ns.use_resources:
ns.use_resources.append(r)
if ns.random_seed is not None:
ns.randomize = True
return ns
def run_test_in_subprocess(testname, ns):
"""Run the given test in a subprocess with --slaveargs.
ns is the option Namespace parsed from command-line arguments. regrtest
is invoked in a subprocess with the --slaveargs argument; when the
subprocess exits, its return code, stdout and stderr are returned as a
3-tuple.
"""
from subprocess import Popen, PIPE
base_cmd = ([sys.executable] + support.args_from_interpreter_flags() +
['-X', 'faulthandler', '-m', 'test.regrtest'])
# required to spawn a new process with PGO flag on/off
if ns.pgo:
base_cmd = base_cmd + ['--pgo']
slaveargs = (
(testname, ns.verbose, ns.quiet),
dict(huntrleaks=ns.huntrleaks,
use_resources=ns.use_resources,
output_on_failure=ns.verbose3,
timeout=ns.timeout, failfast=ns.failfast,
match_tests=ns.match_tests, pgo=ns.pgo))
# Running the child from the same working directory as regrtest's original
# invocation ensures that TEMPDIR for the child is the same when
# sysconfig.is_python_build() is true. See issue 15300.
popen = Popen(base_cmd + ['--slaveargs', json.dumps(slaveargs)],
stdout=PIPE, stderr=PIPE,
universal_newlines=True,
close_fds=(os.name != 'nt'),
cwd=support.SAVEDCWD)
stdout, stderr = popen.communicate()
retcode = popen.wait()
return retcode, stdout, stderr
def main(tests=None, **kwargs):
"""Execute a test suite.
This also parses command-line options and modifies its behavior
accordingly.
tests -- a list of strings containing test names (optional)
testdir -- the directory in which to look for tests (optional)
Users other than the Python test suite will certainly want to
specify testdir; if it's omitted, the directory containing the
Python test suite is searched for.
If the tests argument is omitted, the tests listed on the
command-line will be used. If that's empty, too, then all *.py
files beginning with test_ will be used.
The other default arguments (verbose, quiet, exclude,
single, randomize, findleaks, use_resources, trace, coverdir,
print_slow, and random_seed) allow programmers calling main()
directly to set the values that would normally be set by flags
on the command line.
"""
# Display the Python traceback on fatal errors (e.g. segfault)
faulthandler.enable(all_threads=True)
# Display the Python traceback on SIGALRM or SIGUSR1 signal
signals = []
if hasattr(signal, 'SIGALRM'):
signals.append(signal.SIGALRM)
if hasattr(signal, 'SIGUSR1'):
signals.append(signal.SIGUSR1)
for signum in signals:
faulthandler.register(signum, chain=True)
replace_stdout()
support.record_original_stdout(sys.stdout)
ns = _parse_args(sys.argv[1:], **kwargs)
if ns.huntrleaks:
# Avoid false positives due to various caches
# filling slowly with random data:
warm_caches()
if ns.memlimit is not None:
support.set_memlimit(ns.memlimit)
if ns.threshold is not None:
import gc
gc.set_threshold(ns.threshold)
if ns.nowindows:
print('The --nowindows (-n) option is deprecated. '
'Use -vv to display assertions in stderr.')
try:
import msvcrt
except ImportError:
pass
else:
msvcrt.SetErrorMode(msvcrt.SEM_FAILCRITICALERRORS|
msvcrt.SEM_NOALIGNMENTFAULTEXCEPT|
msvcrt.SEM_NOGPFAULTERRORBOX|
msvcrt.SEM_NOOPENFILEERRORBOX)
try:
msvcrt.CrtSetReportMode
except AttributeError:
# release build
pass
else:
for m in [msvcrt.CRT_WARN, msvcrt.CRT_ERROR, msvcrt.CRT_ASSERT]:
if ns.verbose and ns.verbose >= 2:
msvcrt.CrtSetReportMode(m, msvcrt.CRTDBG_MODE_FILE)
msvcrt.CrtSetReportFile(m, msvcrt.CRTDBG_FILE_STDERR)
else:
msvcrt.CrtSetReportMode(m, 0)
if ns.wait:
input("Press any key to continue...")
if ns.slaveargs is not None:
args, kwargs = json.loads(ns.slaveargs)
if kwargs.get('huntrleaks'):
unittest.BaseTestSuite._cleanup = False
try:
result = runtest(*args, **kwargs)
except KeyboardInterrupt:
result = INTERRUPTED, ''
except BaseException as e:
traceback.print_exc()
result = CHILD_ERROR, str(e)
sys.stdout.flush()
print() # Force a newline (just in case)
print(json.dumps(result))
sys.exit(0)
good = []
bad = []
skipped = []
resource_denieds = []
environment_changed = []
interrupted = False
if ns.findleaks:
try:
import gc
except ImportError:
print('No GC available, disabling findleaks.')
ns.findleaks = False
else:
# Uncomment the line below to report garbage that is not
# freeable by reference counting alone. By default only
# garbage that is not collectable by the GC is reported.
#gc.set_debug(gc.DEBUG_SAVEALL)
found_garbage = []
if ns.huntrleaks:
unittest.BaseTestSuite._cleanup = False
if ns.single:
filename = os.path.join(TEMPDIR, 'pynexttest')
try:
with open(filename, 'r') as fp:
next_test = fp.read().strip()
tests = [next_test]
except OSError:
pass
if ns.fromfile:
tests = []
with open(os.path.join(support.SAVEDCWD, ns.fromfile)) as fp:
count_pat = re.compile(r'\[\s*\d+/\s*\d+\]')
for line in fp:
line = count_pat.sub('', line)
guts = line.split() # assuming no test has whitespace in its name
if guts and not guts[0].startswith('#'):
tests.extend(guts)
# Strip .py extensions.
removepy(ns.args)
removepy(tests)
stdtests = STDTESTS[:]
nottests = NOTTESTS.copy()
if ns.exclude:
for arg in ns.args:
if arg in stdtests:
stdtests.remove(arg)
nottests.add(arg)
ns.args = []
# For a partial run, we do not need to clutter the output.
if (ns.verbose or ns.header or
not (ns.pgo or ns.quiet or ns.single or tests or ns.args)):
# Print basic platform information
print("==", platform.python_implementation(), *sys.version.split())
print("== ", platform.platform(aliased=True),
"%s-endian" % sys.byteorder)
print("== ", "hash algorithm:", sys.hash_info.algorithm,
"64bit" if sys.maxsize > 2**32 else "32bit")
print("== ", os.getcwd())
print("Testing with flags:", sys.flags)
# if testdir is set, then we are not running the python tests suite, so
# don't add default tests to be executed or skipped (pass empty values)
if ns.testdir:
alltests = findtests(ns.testdir, list(), set())
else:
alltests = findtests(ns.testdir, stdtests, nottests)
selected = tests or ns.args or alltests
if ns.single:
selected = selected[:1]
try:
next_single_test = alltests[alltests.index(selected[0])+1]
except IndexError:
next_single_test = None
# Remove all the selected tests that precede start if it's set.
if ns.start:
try:
del selected[:selected.index(ns.start)]
except ValueError:
print("Couldn't find starting test (%s), using all tests" % ns.start)
if ns.randomize:
if ns.random_seed is None:
ns.random_seed = random.randrange(10000000)
random.seed(ns.random_seed)
print("Using random seed", ns.random_seed)
random.shuffle(selected)
if ns.trace:
import trace, tempfile
tracer = trace.Trace(ignoredirs=[sys.base_prefix, sys.base_exec_prefix,
tempfile.gettempdir()],
trace=False, count=True)
test_times = []
support.verbose = ns.verbose # Tell tests to be moderately quiet
support.use_resources = ns.use_resources
save_modules = sys.modules.keys()
def accumulate_result(test, result):
ok, test_time = result
if ok not in (CHILD_ERROR, INTERRUPTED):
test_times.append((test_time, test))
if ok == PASSED:
good.append(test)
elif ok == FAILED:
bad.append(test)
elif ok == ENV_CHANGED:
environment_changed.append(test)
elif ok == SKIPPED:
skipped.append(test)
elif ok == RESOURCE_DENIED:
skipped.append(test)
resource_denieds.append(test)
if ns.forever:
def test_forever(tests=list(selected)):
while True:
for test in tests:
yield test
if bad:
return
tests = test_forever()
test_count = ''
test_count_width = 3
else:
tests = iter(selected)
test_count = '/{}'.format(len(selected))
test_count_width = len(test_count) - 1
if ns.use_mp:
try:
from threading import Thread
except ImportError:
print("Multiprocess option requires thread support")
sys.exit(2)
from queue import Queue
debug_output_pat = re.compile(r"\[\d+ refs, \d+ blocks\]$")
output = Queue()
pending = MultiprocessTests(tests)
def work():
# A worker thread.
try:
while True:
try:
test = next(pending)
except StopIteration:
output.put((None, None, None, None))
return
retcode, stdout, stderr = run_test_in_subprocess(test, ns)
# Strip last refcount output line if it exists, since it
# comes from the shutdown of the interpreter in the subcommand.
stderr = debug_output_pat.sub("", stderr)
stdout, _, result = stdout.strip().rpartition("\n")
if retcode != 0:
result = (CHILD_ERROR, "Exit code %s" % retcode)
output.put((test, stdout.rstrip(), stderr.rstrip(), result))
return
if not result:
output.put((None, None, None, None))
return
result = json.loads(result)
output.put((test, stdout.rstrip(), stderr.rstrip(), result))
except BaseException:
output.put((None, None, None, None))
raise
workers = [Thread(target=work) for i in range(ns.use_mp)]
for worker in workers:
worker.start()
finished = 0
test_index = 1
try:
while finished < ns.use_mp:
test, stdout, stderr, result = output.get()
if test is None:
finished += 1
continue
accumulate_result(test, result)
if not ns.quiet:
if bad and not ns.pgo:
fmt = "[{1:{0}}{2}/{3}] {4}"
else:
fmt = "[{1:{0}}{2}] {4}"
print(fmt.format(
test_count_width, test_index, test_count,
len(bad), test))
if stdout:
print(stdout)
if stderr and not ns.pgo:
print(stderr, file=sys.stderr)
sys.stdout.flush()
sys.stderr.flush()
if result[0] == INTERRUPTED:
raise KeyboardInterrupt
if result[0] == CHILD_ERROR:
raise Exception("Child error on {}: {}".format(test, result[1]))
test_index += 1
except KeyboardInterrupt:
interrupted = True
pending.interrupted = True
for worker in workers:
worker.join()
else:
for test_index, test in enumerate(tests, 1):
if not ns.quiet:
if bad and not ns.pgo:
fmt = "[{1:{0}}{2}/{3}] {4}"
else:
fmt = "[{1:{0}}{2}] {4}"
print(fmt.format(
test_count_width, test_index, test_count, len(bad), test))
sys.stdout.flush()
if ns.trace:
# If we're tracing code coverage, then we don't exit with status
# if on a false return value from main.
tracer.runctx('runtest(test, ns.verbose, ns.quiet, timeout=ns.timeout)',
globals=globals(), locals=vars())
else:
try:
result = runtest(test, ns.verbose, ns.quiet,
ns.huntrleaks,
output_on_failure=ns.verbose3,
timeout=ns.timeout, failfast=ns.failfast,
match_tests=ns.match_tests, pgo=ns.pgo)
accumulate_result(test, result)
except KeyboardInterrupt:
interrupted = True
break
if ns.findleaks:
gc.collect()
if gc.garbage:
print("Warning: test created", len(gc.garbage), end=' ')
print("uncollectable object(s).")
# move the uncollectable objects somewhere so we don't see
# them again
found_garbage.extend(gc.garbage)
del gc.garbage[:]
# Unload the newly imported modules (best effort finalization)
for module in sys.modules.keys():
if module not in save_modules and module.startswith("test."):
support.unload(module)
if interrupted and not ns.pgo:
# print a newline after ^C
print()
print("Test suite interrupted by signal SIGINT.")
omitted = set(selected) - set(good) - set(bad) - set(skipped)
print(count(len(omitted), "test"), "omitted:")
printlist(omitted)
if good and not ns.quiet and not ns.pgo:
if not bad and not skipped and not interrupted and len(good) > 1:
print("All", end=' ')
print(count(len(good), "test"), "OK.")
if ns.print_slow:
test_times.sort(reverse=True)
print("10 slowest tests:")
for time, test in test_times[:10]:
print("%s: %.1fs" % (test, time))
if bad and not ns.pgo:
print(count(len(bad), "test"), "failed:")
printlist(bad)
if environment_changed and not ns.pgo:
print("{} altered the execution environment:".format(
count(len(environment_changed), "test")))
printlist(environment_changed)
if skipped and not ns.quiet and not ns.pgo:
print(count(len(skipped), "test"), "skipped:")
printlist(skipped)
if ns.verbose2 and bad:
print("Re-running failed tests in verbose mode")
for test in bad[:]:
if not ns.pgo:
print("Re-running test %r in verbose mode" % test)
sys.stdout.flush()
try:
ns.verbose = True
ok = runtest(test, True, ns.quiet, ns.huntrleaks,
timeout=ns.timeout, pgo=ns.pgo)
except KeyboardInterrupt:
# print a newline separate from the ^C
print()
break
else:
if ok[0] in {PASSED, ENV_CHANGED, SKIPPED, RESOURCE_DENIED}:
bad.remove(test)
else:
if bad:
print(count(len(bad), 'test'), "failed again:")
printlist(bad)
if ns.single:
if next_single_test:
with open(filename, 'w') as fp:
fp.write(next_single_test + '\n')
else:
os.unlink(filename)
if ns.trace:
r = tracer.results()
r.write_results(show_missing=True, summary=True, coverdir=ns.coverdir)
if ns.runleaks:
os.system("leaks %d" % os.getpid())
sys.exit(len(bad) > 0 or interrupted)
# small set of tests to determine if we have a basically functioning interpreter
# (i.e. if any of these fail, then anything else is likely to follow)
STDTESTS = [
'test_grammar',
'test_opcodes',
'test_dict',
'test_builtin',
'test_exceptions',
'test_types',
'test_unittest',
'test_doctest',
'test_doctest2',
'test_support'
]
# set of tests that we don't want to be executed when using regrtest
NOTTESTS = set()
def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS):
"""Return a list of all applicable test modules."""
testdir = findtestdir(testdir)
names = os.listdir(testdir)
tests = []
others = set(stdtests) | nottests
for name in names:
mod, ext = os.path.splitext(name)
if mod[:5] == "test_" and ext in (".py", "") and mod not in others:
tests.append(mod)
return stdtests + sorted(tests)
# We do not use a generator so multiple threads can call next().
class MultiprocessTests(object):
"""A thread-safe iterator over tests for multiprocess mode."""
def __init__(self, tests):
self.interrupted = False
self.lock = threading.Lock()
self.tests = tests
def __iter__(self):
return self
def __next__(self):
with self.lock:
if self.interrupted:
raise StopIteration('tests interrupted')
return next(self.tests)
def replace_stdout():
"""Set stdout encoder error handler to backslashreplace (as stderr error
handler) to avoid UnicodeEncodeError when printing a traceback"""
import atexit
stdout = sys.stdout
sys.stdout = open(stdout.fileno(), 'w',
encoding=stdout.encoding,
errors="backslashreplace",
closefd=False,
newline='\n')
def restore_stdout():
sys.stdout.close()
sys.stdout = stdout
atexit.register(restore_stdout)
def runtest(test, verbose, quiet,
huntrleaks=False, use_resources=None,
output_on_failure=False, failfast=False, match_tests=None,
timeout=None, *, pgo=False):
"""Run a single test.
test -- the name of the test
verbose -- if true, print more messages
quiet -- if true, don't print 'skipped' messages (probably redundant)
huntrleaks -- run multiple times to test for leaks; requires a debug
build; a triple corresponding to -R's three arguments
use_resources -- list of extra resources to use
output_on_failure -- if true, display test output on failure
timeout -- dump the traceback and exit if a test takes more than
timeout seconds
failfast, match_tests -- See regrtest command-line flags for these.
pgo -- if true, do not print unnecessary info when running the test
for Profile Guided Optimization build
Returns the tuple result, test_time, where result is one of the constants:
INTERRUPTED KeyboardInterrupt when run under -j
RESOURCE_DENIED test skipped because resource denied
SKIPPED test skipped for some other reason
ENV_CHANGED test failed because it changed the execution environment
FAILED test failed
PASSED test passed
"""
if use_resources is not None:
support.use_resources = use_resources
use_timeout = (timeout is not None)
if use_timeout:
faulthandler.dump_traceback_later(timeout, exit=True)
try:
support.match_tests = match_tests
if failfast:
support.failfast = True
if output_on_failure:
support.verbose = True
# Reuse the same instance to all calls to runtest(). Some
# tests keep a reference to sys.stdout or sys.stderr
# (eg. test_argparse).
if runtest.stringio is None:
stream = io.StringIO()
runtest.stringio = stream
else:
stream = runtest.stringio
stream.seek(0)
stream.truncate()
orig_stdout = sys.stdout
orig_stderr = sys.stderr
try:
sys.stdout = stream
sys.stderr = stream
result = runtest_inner(test, verbose, quiet, huntrleaks,
display_failure=False, pgo=pgo)
if result[0] != PASSED and not pgo:
output = stream.getvalue()
orig_stderr.write(output)
orig_stderr.flush()
finally:
sys.stdout = orig_stdout
sys.stderr = orig_stderr
else:
support.verbose = verbose # Tell tests to be moderately quiet
result = runtest_inner(test, verbose, quiet, huntrleaks,
display_failure=not verbose, pgo=pgo)
return result
finally:
if use_timeout:
faulthandler.cancel_dump_traceback_later()
cleanup_test_droppings(test, verbose)
runtest.stringio = None
# Unit tests are supposed to leave the execution environment unchanged
# once they complete. But sometimes tests have bugs, especially when
# tests fail, and the changes to environment go on to mess up other
# tests. This can cause issues with buildbot stability, since tests
# are run in random order and so problems may appear to come and go.
# There are a few things we can save and restore to mitigate this, and
# the following context manager handles this task.
class saved_test_environment:
"""Save bits of the test environment and restore them at block exit.
with saved_test_environment(testname, verbose, quiet):
#stuff
Unless quiet is True, a warning is printed to stderr if any of
the saved items was changed by the test. The attribute 'changed'
is initially False, but is set to True if a change is detected.
If verbose is more than 1, the before and after state of changed
items is also printed.
"""
changed = False
def __init__(self, testname, verbose=0, quiet=False, *, pgo=False):
self.testname = testname
self.verbose = verbose
self.quiet = quiet
self.pgo = pgo
# To add things to save and restore, add a name XXX to the resources list
# and add corresponding get_XXX/restore_XXX functions. get_XXX should
# return the value to be saved and compared against a second call to the
# get function when test execution completes. restore_XXX should accept
# the saved value and restore the resource using it. It will be called if
# and only if a change in the value is detected.
#
# Note: XXX will have any '.' replaced with '_' characters when determining
# the corresponding method names.
resources = ('sys.argv', 'cwd', 'sys.stdin', 'sys.stdout', 'sys.stderr',
'os.environ', 'sys.path', 'sys.path_hooks', '__import__',
'asyncore.socket_map',
'logging._handlers', 'logging._handlerList', 'sys.gettrace',
'sys.warnoptions',
# multiprocessing.process._cleanup() may release ref
# to a thread, so check processes first.
'multiprocessing.process._dangling', 'threading._dangling',
'sysconfig._CONFIG_VARS', 'sysconfig._INSTALL_SCHEMES',
'files', 'locale', 'warnings.showwarning',
'shutil_archive_formats', 'shutil_unpack_formats',
)
def get_sys_argv(self):
return id(sys.argv), sys.argv, sys.argv[:]
def restore_sys_argv(self, saved_argv):
sys.argv = saved_argv[1]
sys.argv[:] = saved_argv[2]
def get_cwd(self):
return os.getcwd()
def restore_cwd(self, saved_cwd):
os.chdir(saved_cwd)
def get_sys_stdout(self):
return sys.stdout
def restore_sys_stdout(self, saved_stdout):
sys.stdout = saved_stdout
def get_sys_stderr(self):
return sys.stderr
def restore_sys_stderr(self, saved_stderr):
sys.stderr = saved_stderr
def get_sys_stdin(self):
return sys.stdin
def restore_sys_stdin(self, saved_stdin):
sys.stdin = saved_stdin
def get_os_environ(self):
return id(os.environ), os.environ, dict(os.environ)
def restore_os_environ(self, saved_environ):
os.environ = saved_environ[1]
os.environ.clear()
os.environ.update(saved_environ[2])
def get_sys_path(self):
return id(sys.path), sys.path, sys.path[:]
def restore_sys_path(self, saved_path):
sys.path = saved_path[1]
sys.path[:] = saved_path[2]
def get_sys_path_hooks(self):
return id(sys.path_hooks), sys.path_hooks, sys.path_hooks[:]
def restore_sys_path_hooks(self, saved_hooks):
sys.path_hooks = saved_hooks[1]
sys.path_hooks[:] = saved_hooks[2]
def get_sys_gettrace(self):
return sys.gettrace()
def restore_sys_gettrace(self, trace_fxn):
sys.settrace(trace_fxn)
def get___import__(self):
return builtins.__import__
def restore___import__(self, import_):
builtins.__import__ = import_
def get_asyncore_socket_map(self):
asyncore = sys.modules.get('asyncore')
# XXX Making a copy keeps objects alive until __exit__ gets called.
return asyncore and asyncore.socket_map.copy() or {}
def restore_asyncore_socket_map(self, saved_map):
asyncore = sys.modules.get('asyncore')
if asyncore is not None:
asyncore.close_all(ignore_all=True)
asyncore.socket_map.update(saved_map)
def get_shutil_archive_formats(self):
# we could call get_archives_formats() but that only returns the
# registry keys; we want to check the values too (the functions that
# are registered)
return shutil._ARCHIVE_FORMATS, shutil._ARCHIVE_FORMATS.copy()
def restore_shutil_archive_formats(self, saved):
shutil._ARCHIVE_FORMATS = saved[0]
shutil._ARCHIVE_FORMATS.clear()
shutil._ARCHIVE_FORMATS.update(saved[1])
def get_shutil_unpack_formats(self):
return shutil._UNPACK_FORMATS, shutil._UNPACK_FORMATS.copy()
def restore_shutil_unpack_formats(self, saved):
shutil._UNPACK_FORMATS = saved[0]
shutil._UNPACK_FORMATS.clear()
shutil._UNPACK_FORMATS.update(saved[1])
def get_logging__handlers(self):
# _handlers is a WeakValueDictionary
return id(logging._handlers), logging._handlers, logging._handlers.copy()
def restore_logging__handlers(self, saved_handlers):
# Can't easily revert the logging state
pass
def get_logging__handlerList(self):
# _handlerList is a list of weakrefs to handlers
return id(logging._handlerList), logging._handlerList, logging._handlerList[:]
def restore_logging__handlerList(self, saved_handlerList):
# Can't easily revert the logging state
pass
def get_sys_warnoptions(self):
return id(sys.warnoptions), sys.warnoptions, sys.warnoptions[:]
def restore_sys_warnoptions(self, saved_options):
sys.warnoptions = saved_options[1]
sys.warnoptions[:] = saved_options[2]
# Controlling dangling references to Thread objects can make it easier
# to track reference leaks.
def get_threading__dangling(self):
if not threading:
return None
# This copies the weakrefs without making any strong reference
return threading._dangling.copy()
def restore_threading__dangling(self, saved):
if not threading:
return
threading._dangling.clear()
threading._dangling.update(saved)
# Same for Process objects
def get_multiprocessing_process__dangling(self):
if not multiprocessing:
return None
# Unjoined process objects can survive after process exits
multiprocessing.process._cleanup()
# This copies the weakrefs without making any strong reference
return multiprocessing.process._dangling.copy()
def restore_multiprocessing_process__dangling(self, saved):
if not multiprocessing:
return
multiprocessing.process._dangling.clear()
multiprocessing.process._dangling.update(saved)
def get_sysconfig__CONFIG_VARS(self):
# make sure the dict is initialized
sysconfig.get_config_var('prefix')
return (id(sysconfig._CONFIG_VARS), sysconfig._CONFIG_VARS,
dict(sysconfig._CONFIG_VARS))
def restore_sysconfig__CONFIG_VARS(self, saved):
sysconfig._CONFIG_VARS = saved[1]
sysconfig._CONFIG_VARS.clear()
sysconfig._CONFIG_VARS.update(saved[2])
def get_sysconfig__INSTALL_SCHEMES(self):
return (id(sysconfig._INSTALL_SCHEMES), sysconfig._INSTALL_SCHEMES,
sysconfig._INSTALL_SCHEMES.copy())
def restore_sysconfig__INSTALL_SCHEMES(self, saved):
sysconfig._INSTALL_SCHEMES = saved[1]
sysconfig._INSTALL_SCHEMES.clear()
sysconfig._INSTALL_SCHEMES.update(saved[2])
def get_files(self):
return sorted(fn + ('/' if os.path.isdir(fn) else '')
for fn in os.listdir())
def restore_files(self, saved_value):
fn = support.TESTFN
if fn not in saved_value and (fn + '/') not in saved_value:
if os.path.isfile(fn):
support.unlink(fn)
elif os.path.isdir(fn):
support.rmtree(fn)
_lc = [getattr(locale, lc) for lc in dir(locale)
if lc.startswith('LC_')]
def get_locale(self):
pairings = []
for lc in self._lc:
try:
pairings.append((lc, locale.setlocale(lc, None)))
except (TypeError, ValueError):
continue
return pairings
def restore_locale(self, saved):
for lc, setting in saved:
locale.setlocale(lc, setting)
def get_warnings_showwarning(self):
return warnings.showwarning
def restore_warnings_showwarning(self, fxn):
warnings.showwarning = fxn
def resource_info(self):
for name in self.resources:
method_suffix = name.replace('.', '_')
get_name = 'get_' + method_suffix
restore_name = 'restore_' + method_suffix
yield name, getattr(self, get_name), getattr(self, restore_name)
def __enter__(self):
self.saved_values = dict((name, get()) for name, get, restore
in self.resource_info())
return self
def __exit__(self, exc_type, exc_val, exc_tb):
saved_values = self.saved_values
del self.saved_values
support.gc_collect() # Some resources use weak references
for name, get, restore in self.resource_info():
current = get()
original = saved_values.pop(name)
# Check for changes to the resource's value
if current != original:
self.changed = True
restore(original)
if not self.quiet and not self.pgo:
print("Warning -- {} was modified by {}".format(
name, self.testname),
file=sys.stderr)
if self.verbose > 1 and not self.pgo:
print(" Before: {}\n After: {} ".format(
original, current),
file=sys.stderr)
return False
def runtest_inner(test, verbose, quiet,
huntrleaks=False, display_failure=True, pgo=False):
support.unload(test)
test_time = 0.0
refleak = False # True if the test leaked references.
try:
if test.startswith('test.'):
abstest = test
else:
# Always import it from the test package
abstest = 'test.' + test
clear_caches()
with saved_test_environment(test, verbose, quiet, pgo=pgo) as environment:
start_time = time.time()
the_module = importlib.import_module(abstest)
# If the test has a test_main, that will run the appropriate
# tests. If not, use normal unittest test loading.
test_runner = getattr(the_module, "test_main", None)
if test_runner is None:
def test_runner():
loader = unittest.TestLoader()
tests = loader.loadTestsFromModule(the_module)
for error in loader.errors:
print(error, file=sys.stderr)
if loader.errors:
raise Exception("errors while loading tests")
support.run_unittest(tests)
test_runner()
if huntrleaks:
refleak = dash_R(the_module, test, test_runner, huntrleaks)
test_time = time.time() - start_time
except support.ResourceDenied as msg:
if not quiet and not pgo:
print(test, "skipped --", msg)
sys.stdout.flush()
return RESOURCE_DENIED, test_time
except unittest.SkipTest as msg:
if not quiet and not pgo:
print(test, "skipped --", msg)
sys.stdout.flush()
return SKIPPED, test_time
except KeyboardInterrupt:
raise
except support.TestFailed as msg:
if not pgo:
if display_failure:
print("test", test, "failed --", msg, file=sys.stderr)
else:
print("test", test, "failed", file=sys.stderr)
sys.stderr.flush()
return FAILED, test_time
except:
msg = traceback.format_exc()
if not pgo:
print("test", test, "crashed --", msg, file=sys.stderr)
sys.stderr.flush()
return FAILED, test_time
else:
if refleak:
return FAILED, test_time
if environment.changed:
return ENV_CHANGED, test_time
return PASSED, test_time
def cleanup_test_droppings(testname, verbose):
import shutil
import stat
import gc
# First kill any dangling references to open files etc.
# This can also issue some ResourceWarnings which would otherwise get
# triggered during the following test run, and possibly produce failures.
gc.collect()
# Try to clean up junk commonly left behind. While tests shouldn't leave
# any files or directories behind, when a test fails that can be tedious
# for it to arrange. The consequences can be especially nasty on Windows,
# since if a test leaves a file open, it cannot be deleted by name (while
# there's nothing we can do about that here either, we can display the
# name of the offending test, which is a real help).
for name in (support.TESTFN,
"db_home",
):
if not os.path.exists(name):
continue
if os.path.isdir(name):
kind, nuker = "directory", shutil.rmtree
elif os.path.isfile(name):
kind, nuker = "file", os.unlink
else:
raise SystemError("os.path says %r exists but is neither "
"directory nor file" % name)
if verbose:
print("%r left behind %s %r" % (testname, kind, name))
try:
# if we have chmod, fix possible permissions problems
# that might prevent cleanup
if (hasattr(os, 'chmod')):
os.chmod(name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
nuker(name)
except Exception as msg:
print(("%r left behind %s %r and it couldn't be "
"removed: %s" % (testname, kind, name, msg)), file=sys.stderr)
def dash_R(the_module, test, indirect_test, huntrleaks):
"""Run a test multiple times, looking for reference leaks.
Returns:
False if the test didn't leak references; True if we detected refleaks.
"""
# This code is hackish and inelegant, but it seems to do the job.
import copyreg
import collections.abc
if not hasattr(sys, 'gettotalrefcount'):
raise Exception("Tracking reference leaks requires a debug build "
"of Python")
# Save current values for dash_R_cleanup() to restore.
fs = warnings.filters[:]
ps = copyreg.dispatch_table.copy()
pic = sys.path_importer_cache.copy()
try:
import zipimport
except ImportError:
zdc = None # Run unmodified on platforms without zipimport support
else:
zdc = zipimport._zip_directory_cache.copy()
abcs = {}
for abc in [getattr(collections.abc, a) for a in collections.abc.__all__]:
if not isabstract(abc):
continue
for obj in abc.__subclasses__() + [abc]:
abcs[obj] = obj._abc_registry.copy()
nwarmup, ntracked, fname = huntrleaks
fname = os.path.join(support.SAVEDCWD, fname)
repcount = nwarmup + ntracked
rc_deltas = [0] * repcount
alloc_deltas = [0] * repcount
print("beginning", repcount, "repetitions", file=sys.stderr)
print(("1234567890"*(repcount//10 + 1))[:repcount], file=sys.stderr)
sys.stderr.flush()
for i in range(repcount):
indirect_test()
alloc_after, rc_after = dash_R_cleanup(fs, ps, pic, zdc, abcs)
sys.stderr.write('.')
sys.stderr.flush()
if i >= nwarmup:
rc_deltas[i] = rc_after - rc_before
alloc_deltas[i] = alloc_after - alloc_before
alloc_before, rc_before = alloc_after, rc_after
print(file=sys.stderr)
# These checkers return False on success, True on failure
def check_rc_deltas(deltas):
return any(deltas)
def check_alloc_deltas(deltas):
# At least 1/3rd of 0s
if 3 * deltas.count(0) < len(deltas):
return True
# Nothing else than 1s, 0s and -1s
if not set(deltas) <= {1,0,-1}:
return True
return False
failed = False
for deltas, item_name, checker in [
(rc_deltas, 'references', check_rc_deltas),
(alloc_deltas, 'memory blocks', check_alloc_deltas)]:
if checker(deltas):
msg = '%s leaked %s %s, sum=%s' % (
test, deltas[nwarmup:], item_name, sum(deltas))
print(msg, file=sys.stderr)
sys.stderr.flush()
with open(fname, "a") as refrep:
print(msg, file=refrep)
refrep.flush()
failed = True
return failed
def dash_R_cleanup(fs, ps, pic, zdc, abcs):
import gc, copyreg
import collections.abc
from weakref import WeakSet
# Restore some original values.
warnings.filters[:] = fs
copyreg.dispatch_table.clear()
copyreg.dispatch_table.update(ps)
sys.path_importer_cache.clear()
sys.path_importer_cache.update(pic)
try:
import zipimport
except ImportError:
pass # Run unmodified on platforms without zipimport support
else:
zipimport._zip_directory_cache.clear()
zipimport._zip_directory_cache.update(zdc)
# clear type cache
sys._clear_type_cache()
# Clear ABC registries, restoring previously saved ABC registries.
for abc in [getattr(collections.abc, a) for a in collections.abc.__all__]:
if not isabstract(abc):
continue
for obj in abc.__subclasses__() + [abc]:
obj._abc_registry = abcs.get(obj, WeakSet()).copy()
obj._abc_cache.clear()
obj._abc_negative_cache.clear()
clear_caches()
# Collect cyclic trash and read memory statistics immediately after.
func1 = sys.getallocatedblocks
func2 = sys.gettotalrefcount
gc.collect()
return func1(), func2()
def clear_caches():
import gc
# Clear the warnings registry, so they can be displayed again
for mod in sys.modules.values():
if hasattr(mod, '__warningregistry__'):
del mod.__warningregistry__
# Flush standard output, so that buffered data is sent to the OS and
# associated Python objects are reclaimed.
for stream in (sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__):
if stream is not None:
stream.flush()
# Clear assorted module caches.
# Don't worry about resetting the cache if the module is not loaded
try:
distutils_dir_util = sys.modules['distutils.dir_util']
except KeyError:
pass
else:
distutils_dir_util._path_created.clear()
re.purge()
try:
_strptime = sys.modules['_strptime']
except KeyError:
pass
else:
_strptime._regex_cache.clear()
try:
urllib_parse = sys.modules['urllib.parse']
except KeyError:
pass
else:
urllib_parse.clear_cache()
try:
urllib_request = sys.modules['urllib.request']
except KeyError:
pass
else:
urllib_request.urlcleanup()
try:
linecache = sys.modules['linecache']
except KeyError:
pass
else:
linecache.clearcache()
try:
mimetypes = sys.modules['mimetypes']
except KeyError:
pass
else:
mimetypes._default_mime_types()
try:
filecmp = sys.modules['filecmp']
except KeyError:
pass
else:
filecmp._cache.clear()
try:
struct = sys.modules['struct']
except KeyError:
pass
else:
struct._clearcache()
try:
doctest = sys.modules['doctest']
except KeyError:
pass
else:
doctest.master = None
try:
ctypes = sys.modules['ctypes']
except KeyError:
pass
else:
ctypes._reset_cache()
try:
typing = sys.modules['typing']
except KeyError:
pass
else:
for f in typing._cleanups:
f()
gc.collect()
def warm_caches():
# char cache
s = bytes(range(256))
for i in range(256):
s[i:i+1]
# unicode cache
x = [chr(i) for i in range(256)]
# int cache
x = list(range(-5, 257))
def findtestdir(path=None):
return path or os.path.dirname(__file__) or os.curdir
def removepy(names):
if not names:
return
for idx, name in enumerate(names):
basename, ext = os.path.splitext(name)
if ext == '.py':
names[idx] = basename
def count(n, word):
if n == 1:
return "%d %s" % (n, word)
else:
return "%d %ss" % (n, word)
def printlist(x, width=70, indent=4):
"""Print the elements of iterable x to stdout.
Optional arg width (default 70) is the maximum line length.
Optional arg indent (default 4) is the number of blanks with which to
begin each line.
"""
from textwrap import fill
blanks = ' ' * indent
# Print the sorted list: 'x' may be a '--random' list or a set()
print(fill(' '.join(str(elt) for elt in sorted(x)), width,
initial_indent=blanks, subsequent_indent=blanks))
def main_in_temp_cwd():
"""Run main() in a temporary working directory."""
if sysconfig.is_python_build():
try:
os.mkdir(TEMPDIR)
except FileExistsError:
pass
# Define a writable temp dir that will be used as cwd while running
# the tests. The name of the dir includes the pid to allow parallel
# testing (see the -j option).
test_cwd = 'test_python_{}'.format(os.getpid())
test_cwd = os.path.join(TEMPDIR, test_cwd)
# Run the tests in a context manager that temporarily changes the CWD to a
# temporary and writable directory. If it's not possible to create or
# change the CWD, the original CWD will be used. The original CWD is
# available from support.SAVEDCWD.
with support.temp_cwd(test_cwd, quiet=True):
main()
if __name__ == '__main__':
# Remove regrtest.py's own directory from the module search path. Despite
# the elimination of implicit relative imports, this is still needed to
# ensure that submodules of the test package do not inappropriately appear
# as top-level modules even when people (or buildbots!) invoke regrtest.py
# directly instead of using the -m switch
mydir = os.path.abspath(os.path.normpath(os.path.dirname(sys.argv[0])))
i = len(sys.path)
while i >= 0:
i -= 1
if os.path.abspath(os.path.normpath(sys.path[i])) == mydir:
del sys.path[i]
# findtestdir() gets the dirname out of __file__, so we have to make it
# absolute before changing the working directory.
# For example __file__ may be relative when running trace or profile.
# See issue #9323.
__file__ = os.path.abspath(__file__)
# sanity check
assert __file__ == os.path.abspath(sys.argv[0])
main_in_temp_cwd()
|
composer.py | # Copyright 2021 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import json
import logging
import time
import threading
import traceback
from datetime import datetime
from typing import List, Optional
from sqlalchemy import func
from sqlalchemy.engine import Engine
from fedlearner_webconsole.db import get_session
from fedlearner_webconsole.composer.runner import global_runner_fn
from fedlearner_webconsole.composer.runner_cache import RunnerCache
from fedlearner_webconsole.composer.interface import IItem
from fedlearner_webconsole.composer.models import Context, decode_context, \
ContextEncoder, SchedulerItem, ItemStatus, SchedulerRunner, RunnerStatus
from fedlearner_webconsole.composer.op_locker import OpLocker
from fedlearner_webconsole.composer.thread_reaper import ThreadReaper
class ComposerConfig(object):
def __init__(
self,
runner_fn: dict,
name='default_name',
worker_num=10,
):
"""Config for composer
Args:
runner_fn: runner functions
name: composer name
worker_num: number of worker doing heavy job
"""
self.runner_fn = runner_fn
self.name = name
self.worker_num = worker_num
class Pipeline(object):
def __init__(self, name: str, deps: List[str], meta: dict):
"""Define the deps of scheduler item
Fields:
name: pipeline name
deps: items to be processed in order
meta: additional info
"""
self.name = name
self.deps = deps
self.meta = meta
class PipelineEncoder(json.JSONEncoder):
def default(self, obj):
return obj.__dict__
class Composer(object):
# attributes that you can patch
MUTABLE_ITEM_KEY = ['interval_time', 'retry_cnt']
def __init__(self, config: ComposerConfig):
"""Composer
Args:
config: config
"""
self.config = config
self.name = config.name
self.runner_fn = config.runner_fn
self.db_engine = None
self.thread_reaper = ThreadReaper(worker_num=config.worker_num)
self.runner_cache = RunnerCache(runner_fn=config.runner_fn)
self.lock = threading.Lock()
self._stop = False
def run(self, db_engine: Engine):
self.db_engine = db_engine
logging.info(f'[composer] starting {self.name}...')
loop = threading.Thread(target=self._loop, args=[], daemon=True)
loop.start()
def _loop(self):
while True:
with self.lock:
if self._stop:
logging.info('[composer] stopping...')
self.thread_reaper.stop(True)
return
try:
logging.debug(f'[composer] checking at {datetime.now()}')
self._check_items()
self._check_init_runners()
self._check_running_runners()
except Exception as e: # pylint: disable=broad-except
logging.error(f'[composer] something wrong, exception: {e}, '
f'trace: {traceback.format_exc()}')
time.sleep(5)
def stop(self):
logging.info(f'[composer] stopping {self.name}...')
with self.lock:
self._stop = True
def collect(self,
name: str,
items: List[IItem],
metadata: dict,
interval: int = -1):
"""Collect scheduler item
Args:
name: item name, should be unique
items: specify dependencies
metadata: pass metadata to share with item dependencies each other
interval: if value is -1, it's run-once job, or run
every interval time in seconds
"""
if len(name) == 0:
return
valid_interval = interval == -1 or interval >= 10
if not valid_interval: # seems non-sense if interval is less than 10
raise ValueError('interval should not less than 10 if not -1')
with get_session(self.db_engine) as session:
# check name if exists
existed = session.query(SchedulerItem).filter_by(name=name).first()
if existed:
return
item = SchedulerItem(
name=name,
pipeline=PipelineEncoder().encode(
self._build_pipeline(name, items, metadata)),
interval_time=interval,
)
session.add(item)
try:
session.commit()
except Exception as e: # pylint: disable=broad-except
logging.error(f'[composer] failed to create scheduler_item, '
f'name: {name}, exception: {e}')
session.rollback()
def finish(self, name: str):
"""Finish item
Args:
name: item name
"""
with get_session(self.db_engine) as session:
existed = session.query(SchedulerItem).filter_by(
name=name, status=ItemStatus.ON.value).first()
if not existed:
return
existed.status = ItemStatus.OFF.value
try:
session.commit()
except Exception as e: # pylint: disable=broad-except
logging.error(f'[composer] failed to finish scheduler_item, '
f'name: {name}, exception: {e}')
session.rollback()
def get_item_status(self, name: str) -> Optional[ItemStatus]:
"""Get item status
Args:
name: item name
"""
with get_session(self.db_engine) as session:
existed = session.query(SchedulerItem).filter(
SchedulerItem.name == name).first()
if not existed:
return None
return ItemStatus(existed.status)
def patch_item_attr(self, name: str, key: str, value: str):
""" patch item args
Args:
name (str): name of this item
key (str): key you want to update
value (str): value you wnat to set
Returns:
Raise if some check violates
"""
if key not in self.__class__.MUTABLE_ITEM_KEY:
raise ValueError(f'fail to change attribute {key}')
with get_session(self.db_engine) as session:
item: SchedulerItem = session.query(SchedulerItem).filter(
SchedulerItem.name == name).first()
if not item:
raise ValueError(f'cannot find item {name}')
setattr(item, key, value)
session.add(item)
try:
session.commit()
except Exception as e: # pylint: disable=broad-except
logging.error(f'[composer] failed to patch item attr, '
f'name: {name}, exception: {e}')
session.rollback()
def get_recent_runners(self,
name: str,
count: int = 10) -> List[SchedulerRunner]:
"""Get recent runners order by created_at in desc
Args:
name: item name
count: the number of runners
"""
with get_session(self.db_engine) as session:
runners = session.query(SchedulerRunner).join(
SchedulerItem,
SchedulerItem.id == SchedulerRunner.item_id).filter(
SchedulerItem.name == name).order_by(
SchedulerRunner.created_at.desc()).limit(count)
if not runners:
return []
return runners
def _check_items(self):
with get_session(self.db_engine) as session:
items = session.query(SchedulerItem).filter_by(
status=ItemStatus.ON.value).all()
for item in items:
if not item.need_run():
continue
# NOTE: use `func.now()` to let sqlalchemy handles
# the timezone.
item.last_run_at = func.now()
if item.interval_time < 0:
# finish run-once item automatically
item.status = ItemStatus.OFF.value
pp = Pipeline(**(json.loads(item.pipeline)))
context = Context(data=pp.meta,
internal={},
db_engine=self.db_engine)
runner = SchedulerRunner(
item_id=item.id,
pipeline=item.pipeline,
context=ContextEncoder().encode(context),
)
session.add(runner)
try:
logging.info(
f'[composer] insert runner, item_id: {item.id}')
session.commit()
except Exception as e: # pylint: disable=broad-except
logging.error(
f'[composer] failed to create scheduler_runner, '
f'item_id: {item.id}, exception: {e}')
session.rollback()
def _check_init_runners(self):
with get_session(self.db_engine) as session:
init_runners = session.query(SchedulerRunner).filter_by(
status=RunnerStatus.INIT.value).all()
# TODO: support priority
for runner in init_runners:
# if thread_reaper is full, skip this round and
# wait next checking
if self.thread_reaper.is_full():
return
lock_name = f'check_init_runner_{runner.id}_lock'
check_lock = OpLocker(lock_name, self.db_engine).try_lock()
if not check_lock:
logging.error(f'[composer] failed to lock, '
f'ignore current init_runner_{runner.id}')
continue
pipeline = Pipeline(**(json.loads(runner.pipeline)))
context = decode_context(val=runner.context,
db_engine=self.db_engine)
# find the first job in pipeline
first = pipeline.deps[0]
# update status
runner.start_at = func.now()
runner.status = RunnerStatus.RUNNING.value
output = json.loads(runner.output)
output[first] = {'status': RunnerStatus.RUNNING.value}
runner.output = json.dumps(output)
# record current running job
context.set_internal('current', first)
runner.context = ContextEncoder().encode(context)
# start runner
runner_fn = self.runner_cache.find_runner(runner.id, first)
self.thread_reaper.enqueue(name=lock_name,
fn=runner_fn,
context=context)
try:
logging.info(
f'[composer] update runner, status: {runner.status}, '
f'pipeline: {runner.pipeline}, '
f'output: {output}, context: {runner.context}')
if check_lock.is_latest_version() and \
check_lock.update_version():
session.commit()
else:
logging.error(f'[composer] {lock_name} is outdated, '
f'ignore updates to database')
except Exception as e: # pylint: disable=broad-except
logging.error(f'[composer] failed to update init runner'
f'status, exception: {e}')
session.rollback()
def _check_running_runners(self):
with get_session(self.db_engine) as session:
running_runners = session.query(SchedulerRunner).filter_by(
status=RunnerStatus.RUNNING.value).all()
for runner in running_runners:
if self.thread_reaper.is_full():
return
lock_name = f'check_running_runner_{runner.id}_lock'
check_lock = OpLocker(lock_name, self.db_engine).try_lock()
if not check_lock:
logging.error(f'[composer] failed to lock, '
f'ignore current running_runner_{runner.id}')
continue
# TODO: restart runner if exit unexpectedly
pipeline = Pipeline(**(json.loads(runner.pipeline)))
output = json.loads(runner.output)
context = decode_context(val=runner.context,
db_engine=self.db_engine)
current = context.internal['current']
runner_fn = self.runner_cache.find_runner(runner.id, current)
# check status of current one
status, current_output = runner_fn.result(context)
if status == RunnerStatus.RUNNING:
continue # ignore
if status == RunnerStatus.DONE:
output[current] = {'status': RunnerStatus.DONE.value}
context.set_internal(f'output_{current}', current_output)
current_idx = pipeline.deps.index(current)
if current_idx == len(pipeline.deps) - 1: # all done
runner.status = RunnerStatus.DONE.value
runner.end_at = func.now()
else: # run next one
next_one = pipeline.deps[current_idx + 1]
output[next_one] = {
'status': RunnerStatus.RUNNING.value
}
context.set_internal('current', next_one)
next_runner_fn = self.runner_cache.find_runner(
runner.id, next_one)
self.thread_reaper.enqueue(name=lock_name,
fn=next_runner_fn,
context=context)
elif status == RunnerStatus.FAILED:
# TODO: abort now, need retry
output[current] = {'status': RunnerStatus.FAILED.value}
context.set_internal(f'output_{current}', current_output)
runner.status = RunnerStatus.FAILED.value
runner.end_at = func.now()
runner.pipeline = PipelineEncoder().encode(pipeline)
runner.output = json.dumps(output)
runner.context = ContextEncoder().encode(context)
updated_db = False
try:
logging.info(
f'[composer] update runner, status: {runner.status}, '
f'pipeline: {runner.pipeline}, '
f'output: {output}, context: {runner.context}')
if check_lock.is_latest_version():
if check_lock.update_version():
session.commit()
updated_db = True
else:
logging.error(f'[composer] {lock_name} is outdated, '
f'ignore updates to database')
except Exception as e: # pylint: disable=broad-except
logging.error(f'[composer] failed to update running '
f'runner status, exception: {e}')
session.rollback()
# delete useless runner obj in runner cache
if status in (RunnerStatus.DONE,
RunnerStatus.FAILED) and updated_db:
self.runner_cache.del_runner(runner.id, current)
@staticmethod
def _build_pipeline(name: str, items: List[IItem],
metadata: dict) -> Pipeline:
deps = []
for item in items:
deps.append(f'{item.type().value}_{item.get_id()}')
return Pipeline(name=name, deps=deps, meta=metadata)
composer = Composer(config=ComposerConfig(
runner_fn=global_runner_fn, name='scheduler for fedlearner webconsole'))
|
zhidao_v5.py | import requests
from bs4 import BeautifulSoup
import threading
import json
import time
import os
import re
def BetterTime(timestr):
"将各种格式的时间字符串规范化"
if '今天' in timestr or '前' in timestr:
localtime = time.localtime()
year = str(localtime.tm_year)
month = str(localtime.tm_mon)
day = str(localtime.tm_mday)
if len(month) == 1:
month = ('0' + month)
if len(day) == 1:
day = ('0' + month)
return year + '-' + month + '-' + day
thetime = re.findall('\d{4}-\d{1,2}-\d{1,2}', timestr)
if thetime:
return thetime[0]
return timestr
def AllZhidaoUrls(moviename):
"get all urls of zhidao about 'moviename'"
url = 'http://zhidao.baidu.com/search'
data = {
'lm': '0',
'rn': '10',
'pn': '0',
'fr': 'search',
'ie': 'gbk',
'word': moviename
}
# 1.解析搜索结果第一页 获取最大页码 顺便把第一页百度知道url给yield出来
res = requests.get(url, params=data)
res.encoding = 'gbk'
soup = BeautifulSoup(res.text, 'html.parser')
# 先把第一页的百度知道yield出来
arr = soup.select('#wgt-list')
# print('url:%s' % (url))
# print('data:', data)
# print('arr:', arr)
for eachtip in arr[0].select('dl'):
titleurl = eachtip.select('.ti')[0].attrs.get('href')
yield {'last_pn': 0, 'url': titleurl}
# 获取最大页码:pn
endurl = soup.select('.pager')[0].select('a')[-1].attrs.get('href')
maxpn = int(endurl.split('=')[-1])
print(maxpn)
# maxpn = 10
# 开始get每一页搜索结果
for pn in range(10, maxpn + 1, 10):
data['pn'] = pn
res = requests.get(url, params=data)
res.encoding = 'gbk'
soup = BeautifulSoup(res.text, 'html.parser')
# 先把每一页的百度知道yield出来
arr = soup.select('#wgt-list')
if len(arr) > 0:
for eachtip in arr[0].select('dl'):
titleurl = eachtip.select('.ti')[0].attrs.get('href')
yield {'last_pn': maxpn - pn, 'url': titleurl}
def ParserSingleZhidao(zhidaourl):
"解析一个百度知道的页面 提取重要信息:时间、点赞数、反对数、标题和回答内容"
res = requests.get(zhidaourl)
res.encoding = 'gbk'
soup = BeautifulSoup(res.text, 'html.parser')
try:
title = soup.select('.ask-title ')[0].text
except IndexError as e:
return
# 1.解析最佳答案
bestans = soup.select('.wgt-best')
print(bestans)
if bestans:
bestans = bestans[0]
# 获取点赞数和讨厌数
nums = bestans.select('.evaluate')
agree = nums[0].attrs.get('data-evaluate')
disagree = nums[1].attrs.get('data-evaluate')
# 获取回答内容
mainContent = bestans.select('[accuse="aContent"]')[0]
content = mainContent.text.strip()
# 获取时间
timestr = bestans.select('.pos-time')[0].text.strip()
thetime = BetterTime(timestr)
yield {
'title': title,
'agree': agree,
'disagree': disagree,
'content': content,
'zhidaourl': zhidaourl,
'time': thetime
}
# 2.解析普通答案
nomal_anses = soup.select('.wgt-answers')
for eachans in nomal_anses:
# 获取点赞数和讨厌数
nums = eachans.select('.evaluate')
agree = nums[0].attrs.get('data-evaluate')
disagree = nums[1].attrs.get('data-evaluate')
# 获取回答内容
mainContent = eachans.select('[accuse="aContent"]')[0]
content = mainContent.text.strip()
# 获取时间
timestr = eachans.select('.pos-time')[0].text.strip()
thetime = BetterTime(timestr)
yield {
'title': title,
'agree': agree,
'disagree': disagree,
'content': content,
'zhidaourl': zhidaourl,
'time': thetime
}
# 如果有“下一页”,递归之
pager_next = soup.select('.pager-next')
if pager_next:
nexturl = 'http://zhidao.baidu.com' + pager_next[0].attrs.get('href')
for ans in ParserSingleZhidao(nexturl):
yield ans
# 存放爬去的所有最终信息
bigarr = list()
def aThread(last_pn, zhidaourl):
for ans in ParserSingleZhidao(zhidaourl):
print(last_pn, time.ctime(), ans['time'])
bigarr.append(ans)
def Main(moviename):
# 0. 新建文件夹
localtime = time.localtime()
year = str(localtime.tm_year)
month = str(localtime.tm_mon)
day = str(localtime.tm_mday)
dirname = year + '_' + month + '_' + day + '_' + moviename
# 如果这个电影在一天内已经检索过了 就不再检索了
if os.path.exists(dirname):
return dirname
threads = list()
# 1. 获取百度知道url 对每个url进行解析 解析结果装进bigarr数组里
for ans1 in AllZhidaoUrls(moviename):
each_thread = threading.Thread(
target=aThread, args=[ans1.get('last_pn'),
ans1.get('url')])
each_thread.start()
threads.append(each_thread)
for each_thread in threads:
each_thread.join()
# 2. bigarr去重 排序 结果newarr
arr = list()
# 存放已经存在的答案:时间,点赞,反对,标题,回答内容(不用url,因为多页的回答里都存在最佳答案)
oldarr = list()
for i in bigarr:
temparr = [
i.get('time'),
i.get('agree'),
i.get('disagree'),
i.get('title'),
i.get('content')
]
if temparr not in oldarr:
arr.append(i)
oldarr.append(temparr)
# 排序 按照agree-disagree
newarr = sorted(arr, key=lambda a: a.get('time'), reverse=True)
# 3. 将结果存储进文件
file = open(dirname, 'w')
file.write(json.dumps(newarr))
file.close()
return dirname
if __name__ == '__main__':
Main('明星') |
test_repeatable_read_update_blocks.py | ##############
# Setup Django
import django
django.setup()
#############
# Test proper
import threading
import time
import pytest
from django.db import connection, transaction
from django.db.models import F, Subquery
from app.models import Sock
@pytest.mark.django_db
def test_repeatable_read_update_blocks():
def create():
Sock.objects.all().delete()
Sock.objects.create(id_a=1, id_b=1, colour='black')
create_thread = threading.Thread(target=create)
create_thread.start()
create_thread.join()
barrier = threading.Barrier(2)
def update_with_sleep():
with transaction.atomic():
cursor = connection.cursor()
cursor.execute('SET TRANSACTION ISOLATION LEVEL REPEATABLE READ')
Sock.objects.all().update(colour='black')
barrier.wait()
time.sleep(11)
# So we rollback and don't get a serializable error in the other transaction. Our aim
# is to just show updates can block, but then still succeed
raise Exception()
time_to_update_read_committed = None
def update_read_committed():
nonlocal time_to_update_read_committed
with transaction.atomic():
cursor = connection.cursor()
cursor.execute('SET TRANSACTION ISOLATION LEVEL REPEATABLE READ')
barrier.wait()
start = time.time()
Sock.objects.all().update(colour='black')
end = time.time()
time_to_update_read_committed = end - start
update_with_sleep_thread = threading.Thread(target=update_with_sleep)
update_with_sleep_thread.start()
update_read_committed_thread = threading.Thread(target=update_read_committed)
update_read_committed_thread.start()
update_with_sleep_thread.join()
update_read_committed_thread.join()
assert time_to_update_read_committed >= 10.0
|
dataset_reader.py | import re
import gc
import time
from timeit import default_timer as timer
import random
import logging
from iterable_queue import IterableQueue
from multiprocessing import Process
from concurrent.futures import ProcessPoolExecutor, as_completed
from collections import Counter
from .counter_sampler import CounterSampler
from .token_map import UNK
from .unigram_dictionary import UnigramDictionary
from .embedding_utils import SequenceParser
import numpy as np
import os
import sys
import gzip
from . import embedding_utils
class TokenChooser(object):
'''
This choses which context token should be taken given a window
of +/- K around a query token
'''
def __init__(self, K, kernel):
if not len(kernel) == 2*K:
raise ValueError(
'`kernel` must have 2*K entries, one for '
'each of the elements within the windows of +/- K tokens.'
)
self.K = K
self.kernel = kernel
self.samplers = {}
self.indices = list(range(-K, 0)) + list(range(1, K+1))
def choose_token(self, idx, length):
'''
Randomly choose a token according to the kernel supplied
in the constructor. Note that when sampling the context near
the beginning of a sentence, the left part of the context window
will be truncated. Similarly, sampling context near the end of
a sentence leads to truncation of the right part of the context
window. Short sentences lead to truncation on both sides.
To ensure that samples are returned within the possibly truncated
window, two values define the actual extent of the context to be
sampled:
`idx`: index of the query word within the context. E.g. if the
valid context is constrained to a sentence, and the query word
is the 3rd token in the sentence, idx should be 2 (because
of 0-based indexing)
`length`: length of the the context, E.g. If context is
constrained to a sentence, and sentence is 7 tokens long,
length should be 7.
'''
# If the token is near the edges of the context, then the
# sampling kernel will be truncated (we can't sample before the
# first word in the sentence, or after the last word).
# Determine the slice indices that define the truncated kernel.
negative_idx = length - idx
start = max(0, self.K - idx)
stop = min(2*self.K, self.K + negative_idx - 1)
# We make a separate multinomial sampler for each different
# truncation of the kernel, because they each define a different
# set of sampling probabilities. If we don't have a sampler for
# this particular kernel shape, make one.
if not (start, stop) in self.samplers:
trunc_probabilities = self.kernel[start:stop]
self.samplers[start,stop] = (
CounterSampler(trunc_probabilities)
)
# Sample from the multinomial sampler for the context of this shape
outcome_idx = self.samplers[start,stop].sample()
# Map this into the +/- indexing relative to the query word
relative_idx = self.indices[outcome_idx + start]
# And then map this into absolute indexing
result_idx = relative_idx + idx
return result_idx
MAX_NUMPY_SEED = 4294967295
def reseed():
'''
Makes a hop in the random chain.
If called before spawning a child processes, it will ensure each child
generates random numbers independently. Unlike seeding child randomness
from an os source of randomness, this is reproducible by starting the
parent with the same random seed.
'''
np.random.seed(np.random.randint(MAX_NUMPY_SEED))
class DataSetReaderIllegalStateException(Exception):
'''
Used if DatasetReader's methods are called in an incorrect order, e.g.
calling generate_dataset() before calling prepare() on a DatasetReader
that was not initialized with a UnigramDictionary.
'''
pass
class DatasetReader(object):
def __init__(
self,
files=[],
directories=[],
skip=[],
noise_ratio=15,
t=1e-5,
num_processes=3,
unigram_dictionary=None,
min_frequency=0,
kernel=[1,2,3,4,5,5,4,3,2,1],
load_dictionary_dir=None,
max_queue_size=1000,
macrobatch_size=16000,
parser=SequenceParser(),
verbose=True,
k=None,
stride=None,
seqmap=True
):
# Register parameters to instance namespace
self.files = files
self.directories = directories
self.skip = [re.compile(s) for s in skip]
self.t = t
self.noise_ratio = noise_ratio
self.num_processes = num_processes
self.kernel = kernel
self.max_queue_size = max_queue_size
self.macrobatch_size = macrobatch_size
self._parse = parser.parse
self.verbose = verbose
self.min_frequency = min_frequency
self.k = k
self.stride = stride
self.seqmap = seqmap
# If unigram dictionary not supplied, make one
self.prepared = False
self.unigram_dictionary = UnigramDictionary(seqmap=self.seqmap)
# But load dictionary from file if load_dictionary_dir specified.
if load_dictionary_dir is not None:
if verbose:
print('Loading dictionary from %s...' % load_dictionary_dir)
self.load_dictionary(load_dictionary_dir)
# Or, if an existing dictionary was passed in, use it
if unigram_dictionary is not None:
if verbose:
print('A dictionary was supplied')
self.unigram_dictionary = unigram_dictionary
self.prune()
self.prepared = True
def is_prepared(self):
'''
Checks to see whether the dataset reader is ready to generate data.
Given the simplicity, and that the logic of deciding when
self.prepared is True is found elsewhere, this method may seem
unnecessary. However, it provides a hook for more complex checking
in subclasses.
'''
if self.prepared:
return True
return False
def parse(self, filename, **kwargs):
'''
Delegate to the parse function given to the constructor.
'''
if kwargs.get('K') is None or kwargs.get('stride') is None:
kwargs['K'] = self.k
kwargs['stride'] = self.stride
return self._parse(filename, **kwargs)
def check_access(self, save_dir):
'''
Test out writing in save_dir. The processes that generate the data
to be saved can be really long-running, so we want to find out if
there is a simple IOError early!
'''
save_dir = os.path.abspath(save_dir)
path, dirname = os.path.split(save_dir)
# Make sure that the directory we want exists (make it if not)
if not os.path.isdir(path):
raise IOError('%s is not a directory or does not exist' % path)
if not os.path.exists(save_dir):
os.mkdir(save_dir)
elif os.path.isfile(save_dir):
raise IOError('%s is a file. %' % save_dir)
# Make sure we can write to the file
f = open(os.path.join(
save_dir, '.__test-minibatch-generator-access'
), 'w')
f.write('test')
f.close
os.remove(os.path.join(
save_dir, '.__test-minibatch-generator-access' ))
def generate_filenames(self):
'''
Generator that yields all filenames (absolute paths) making up the
dataset. (Files are specified to the Minibatcher constructor
files and / or directories. All listed files and all files directly
contained in listed directories will be processed, unless they
match regex patterns in the optional `skip` list.
(no INPUTS)
YIELDS
* [str]: absolute path to a dataset file
'''
# Process the files listed in `files`, unles matches entry in skip
if self.files is not None:
# Randomize the ordering of the files!
random.shuffle(self.files)
for filename in self.files:
filename = os.path.abspath(filename)
# Skip files if they match a regex in skip
if any([s.search(filename) for s in self.skip]):
continue
if self.verbose:
print('\tprocessing', filename)
yield filename
# Process all the files listed in each directory, unless they
# match an entry in skip
if self.directories is not None:
for dirname in self.directories:
dirname = os.path.abspath(dirname)
# Skip directories if they match a regex in skip
if any([s.search(dirname) for s in self.skip]):
continue
# Randomize the ordering of the files!
myfiles = os.listdir(dirname)
random.shuffle(myfiles)
for filename in myfiles:
filename = os.path.join(dirname, filename)
# Only process the *files* under the given directories
if not os.path.isfile(filename):
continue
# Skip files if they match a regex in skip
if any([s.search(filename) for s in self.skip]):
continue
if self.verbose:
print('\tprocessing', filename)
yield filename
def numpyify(self, examples):
'''
Make an int32-type numpy array, ensuring that, even if the list of
examples is empty, the array is two-dimensional, with the second
dimension (i.e. number of columns) being 3.
'''
if len(examples) > 0:
examples = np.array(examples, dtype='int32')
else:
examples = np.empty(shape=(0,3), dtype='int32')
return examples
def produce_macrobatches(self, filename_iterator):
'''
Assembles bunches of examples from the parsed data coming from
files that were read. Normally, this function might yield
individual examples, however, in this case, we need to maintain
a distinction between the noise- and signal-examples, and to
keep them in consistent proportions. So, here, we yield small
bunches that consist of 1 signal example, and X noise examples,
where X depends on `self.noise_ratio`.
'''
mcbatch_size = self.macrobatch_size
noise_ratio = self.noise_ratio
signal_examples = []
noise_examples = []
t0 = timer()
examples = self.generate_examples(filename_iterator)
t1 = timer()
print("Time to generate this set of examples took ", (t1 - t0) * 1000, " microseconds")
for signal_chunk, noise_chunk in examples:
signal_examples.extend(signal_chunk)
noise_examples.extend(noise_chunk)
# Whenever we have enough examples, yield a macrobatch
while len(signal_examples) > mcbatch_size:
if self.verbose:
print('numpyifying')
signal_macrobatch = self.numpyify(
signal_examples[:mcbatch_size])
noise_macrobatch = self.numpyify(
noise_examples[:mcbatch_size * noise_ratio])
if self.verbose:
print('no-padding:', len(signal_macrobatch))
yield signal_macrobatch, noise_macrobatch
signal_examples = signal_examples[mcbatch_size:]
noise_examples = noise_examples[mcbatch_size*noise_ratio:]
# After all files were processed, pad any remaining examples
# to make up a final macrobatch
if len(signal_examples) > 0:
signal_remaining = mcbatch_size - len(signal_examples)
noise_remaining = (
mcbatch_size * noise_ratio - len(noise_examples))
if self.verbose:
print('padding and numpyifying')
padding_row = self.get_padding_row()
signal_macrobatch = self.numpyify(
signal_examples + [padding_row] * signal_remaining)
noise_macrobatch = self.numpyify(
noise_examples + [padding_row] * noise_remaining)
if self.verbose:
print('padded to length:', len(signal_macrobatch))
yield signal_macrobatch, noise_macrobatch
def get_padding_row(self):
return [UNK,UNK]
def generate_dataset_serial(self):
'''
Generate the dataset from files handed to the constructor.
A single process is used to read the files.
'''
# This cannot be called before calling prepare(), unless a prepared
# UnigramDictionary was passed to the self's constructor
if not self.is_prepared():
raise DataSetReaderIllegalStateException(
"DatasetReader: generate_examples() cannot be called "
"before prepare() is called unless a prepared "
"UnigramDictionary has was passed into the DatasetReader's "
"constructor."
)
# Generate the data for each file
file_iterator = self.generate_filenames()
macrobatches = self.produce_macrobatches(file_iterator)
for signal_examples, noise_examples in macrobatches:
yield signal_examples, noise_examples
def generate_dataset_worker(self, file_iterator, macrobatch_queue):
macrobatches = self.produce_macrobatches(file_iterator)
for signal_examples, noise_examples in macrobatches:
if self.verbose:
print('sending macrobatch to parent process')
macrobatch_queue.put((signal_examples, noise_examples))
#time.sleep(10.0) ### trying to fix BrokenPipe error from not being able to put before the process dies and macrobatch_queue is wrapped up ###
macrobatch_queue.close()
def generate_dataset_parallel(self, save_dir=None):
'''
Parallel version of generate_dataset_serial. Each worker is
responsible for saving its own part of the dataset to disk, called
a macrobatch. the files are saved at
'save_dir/examples/<batch-num>.npz'.
'''
# This cannot be called before calling prepare(), unless a prepared
# UnigramDictionary was passed to the self's constructor
if not self.is_prepared():
raise DataSetReaderIllegalStateException(
"DatasetReader: generate_examples() cannot be called "
"before prepare() is called unless a prepared "
"UnigramDictionary has was passed into the "
"DatasetReader's constructor."
)
# We save dataset in the "examples" subdir of the model_dir
if save_dir is not None:
examples_dir = os.path.join(save_dir, 'examples')
# We are willing to create both the save_dir, and the
# 'examples' subdir, but not their parents
if not os.path.exists(save_dir):
os.mkdir(save_dir)
if not os.path.exists(examples_dir):
os.mkdir(examples_dir)
else:
examples_dir = None
file_queue = IterableQueue()
macrobatch_queue = IterableQueue(self.max_queue_size)
# Put all the filenames on a producer queue
file_producer = file_queue.get_producer()
for filename in self.generate_filenames():
file_producer.put(filename)
file_producer.close()
# Start a bunch of worker processes
for process_num in range(self.num_processes):
# Hop to a new location in the random-number-generator's state
# chain
reseed()
# Start child process that generates a portion of the dataset
args = (
file_queue.get_consumer(),
macrobatch_queue.get_producer()
)
Process(target=self.generate_dataset_worker, args=args).start()
# This will receive the macrobatches from all workers
macrobatch_consumer = macrobatch_queue.get_consumer()
# Close the iterable queues
file_queue.close()
macrobatch_queue.close()
for signal_macrobatch, noise_macrobatch in macrobatch_consumer:
if self.verbose:
print('receiving macrobatch from child process')
yield signal_macrobatch, noise_macrobatch
# Explicitly close up macrobatch_consumer, which hopefully fixes the EOFError
macrobatch_consumer.close()
def get_vocab_size(self):
'''
Get the size of the vocabulary. Only makes sense to call this
after Minibatcher.prepare() has been called, or if an
existing (pre-filled) UnigramDictionary was loaded, since otherwise
it would just return 0.
(no INPUTS)
OUTPUTS
* [int]: size of vocabulary (including `UNK`).
'''
# Delegate to the underlying UnigramDictionary
return len(self.unigram_dictionary)
def load_dictionary(self, load_dir):
'''
Loads the unigram_dictionary from files stored in the supplied
directory.
INPUTS
* directory [str]: Path to a directory in which unigram_dictionary
files are stored. Unigram dictionary will look for default
filenames within that directory.
OUTPUTS
* [None]
'''
# Delegate to the underlying UnigramDictionary
self.unigram_dictionary.load(os.path.join(
load_dir, 'dictionary'
))
self.prune()
# It is now possible to call the data generators
# `generate_dataset_serial()` and `generate_dataset_parallel()`
self.prepared = True
def save_dictionary(self, save_dir):
'''
Save the unigram_dictionary in the subfolder 'dictionary' beneath
save_dir, in two files called 'counter-sampler.gz' and
'token-map.gz'. `save_dir` will be created if it doesn't exist.
'''
# Make save_dir if it doesn't exist
if not os.path.exists(save_dir):
os.mkdir(save_dir)
# Delegate to the underlying UnigramDictionary
self.unigram_dictionary.save(os.path.join(
save_dir, 'dictionary'
))
def generate_token_worker(self, file_iterator, **kwargs):
'''
Enumerate all tokens in the file_iterator in a collections.Counter
'''
c = Counter()
for tokens in self.parse(file_iterator, **kwargs):
c.update(tokens)
return c
def preparation_parallel(self, **kwargs):
'''
Read through the corpus, building the UnigramDictionary in parallel,
in the same manner as generate_dataset_parallel.
'''
# get all the files
all_files = [filename for filename in self.generate_filenames()]
# submit jobs to the worker processes
with ProcessPoolExecutor(max_workers=self.num_processes) as executor:
futures = [executor.submit(self.generate_token_worker, filename, **kwargs) for filename in all_files]
for future in as_completed(futures):
countr = future.result()
self.unigram_dictionary.update_counts(countr.items())
self.prepared = True
def preparation(self, **kwargs):
# Read through the corpus, building the UnigramDictionary
for filename in self.generate_filenames():
for tokens in self.parse(filename, **kwargs):
self.unigram_dictionary.update(tokens)
self.prepared = True
def prepare(self, *args, **kwargs):
'''
Used to perform any preparation steps that are needed before
minibatching can be done. E.g. assembling a dictionary that
maps tokens to integers, and determining the total vocabulary size
of the corpus. It is assumed that files will need
to be saved as part of this process, and that they should be
saved under `save_dir`, with `self.save()` managing the details
of writing files under `save_dir`.
INPUTS
* Note About Inputs *
the call signature of this method is variable and is
determined by the call signature of the core
`self.preparation()` method. Refer to that method's call
signature. Minimally, this method accepts `save_dir`
* save_dir [str]: path to directory in which preparation files
should be saved.
RETURNS
* [None]
'''
save_dir = kwargs.get('save_dir', None)
read_async = kwargs.get('read_async', False)
# Before doing anything, if we were requested to save the
# dictionary, make sure we'll be able to do that (fail fast)
if save_dir is not None:
self.check_access(save_dir)
if not read_async:
t0 = timer()
self.preparation(**kwargs)
t1 = timer()
print("Serial unigram preparation took: ", (t1 - t0)*1000, " seconds" )
else:
t0 = timer()
self.preparation_parallel(**kwargs)
t1 = timer()
print("Parallel unigram preparation took: ", (t1 - t0)*1000, " seconds" )
# Save the dictionary, if requested to do so.
if save_dir is not None:
self.save_dictionary(save_dir)
# Prune the dictionary
self.prune()
def prune(self):
'''
Exposes the prune function for the underlying UnigramDictionary
'''
if self.verbose:
print(
'pruning dictionary to eliminate tokens occuring less than '
'%d times.' % self.min_frequency
)
self.unigram_dictionary.prune(self.min_frequency, count=True)
def generate_examples(self, filename_iterator):
'''
Using the data of a parsed file, generates examples. Two kinds of
examples are generated --- signal and noise. They are yielded in a
tuple, along with a flag indicating whether the particular example
is a signal, i.e.: (is_signal, example)
'''
num_examples = 0
chooser = TokenChooser(K=len(self.kernel) // 2, kernel=self.kernel)
# include parsing kwargs that were part of the declaration of this reader
for filename in filename_iterator:
# Parse the file, then generate a bunch of examples from it
parsed = self.parse(filename)
for tokens in parsed:
# Isolated tokens (e.g. one-word sentences) have no context
# and can't be used for training.
if len(tokens) < 2:
continue
for query_token_pos, query_token in enumerate(tokens):
# Possibly discard the token
if self.do_discard(query_token):
continue
# Sample a token from the context
context_token_pos = chooser.choose_token(
query_token_pos, len(tokens)
)
context_token_id = self.unigram_dictionary.get_id(tokens[context_token_pos])
signal_examples = [[self.unigram_dictionary.get_id(query_token), context_token_id]]
num_examples += 1
noise_examples = self.generate_noise_examples(
signal_examples)
num_examples += len(noise_examples)
yield (signal_examples, noise_examples)
def generate_noise_examples(self, signal_examples):
noise_examples = []
for query_token_id, context_token_id in signal_examples:
noise_examples.extend([
[query_token_id, self.unigram_dictionary.sample()]
for i in range(self.noise_ratio)
])
return noise_examples
def make_null_example(self):
return [UNK, UNK]
def do_discard(self, token):
'''
This function helps with downsampling of very common words.
Returns true when the token should be discarded as a query word
'''
probability = self.unigram_dictionary.get_probability(token)
discard_probability = 1 - np.sqrt(self.t/probability)
do_discard = np.random.uniform() < discard_probability
return do_discard
class AtacDatasetReader(DatasetReader):
'''
Sub-class of DatasetReader for processing ATAC-seq fasta files.
Meanigful differences are:
- two chromosomes are withheld for validation, testing respectively
- noise examples can be drawn from Flanks
'''
def __init__(self, files=[], directories=[], skip=[], noise_ratio=15,
t=1e-5, num_processes=3,
unigram_dictionary=None, min_frequency=0,
kernel=[1, 2, 3, 4, 5, 5, 4, 3, 2, 1],
load_dictionary_dir=None,
max_queue_size=1000, macrobatch_size=16000,
parser=SequenceParser(), verbose=True,
k=None, stride=None, seqmap=True, flank_files=[]):
# reserve Two chromosomes for testing, validating embeddings
from random import shuffle
shuffle(files)
self.testing_chromosome_peaks = files[0]
self.validation_chromosome_peaks = files[1]
if flank_files:
testing_chrom = self.testing_chromosome.split("_")[0]
validation_chrom = self.validation_chromosome.split("_")[0]
self.testing_chromosome_flanks = [f for f in flank_files if f.startswith(testing_chrom)][0]
self.validation_chromosome_flanks = [f for f in flank_files if f.startswith(validation_chrom)][0]
self.flank_files = [f for f in flank_files if not f.startswith(testing_chrom) or not f.startswith(validation_chrom)]
# call the superclass constructor
DatasetReader.__init__(self, files[2:], directories, skip, noise_ration,
t, num_processes, unigram_dictionary, min_frequency,
kernel,
load_dictionary_dir, max_queue_size, macrobatch_size,
parser, verbose, k, stride, seqmap)
# assert that the structures are initialized
assert(self.unigram_dictionary is not None)
def generate_examples_from_peaks(self, filename_iterator):
'''
Using the data of a parsed file, generate examples of both
signal and noise examples. The noise examples are generated
from replacing kmers from within peaks with kmers drawn from
the dictionary.
Instead of generating one record at a time, generate a signal DF of 100 examples
and a noise DF of self.noise_ratio * signal DF.shape[0] examples
'''
num_examples = 0
chooser = TokenChooser(K=len(self.kernel) // 2, kernel=self.kernel)
for filename in filename_iterator:
parsed = self.parse(filename)
for tokens in parsed:
for query_token_pos, query_token in enumerate(tokens):
if self.do_discard(query_token):
continue
context_token_pos = chooser.choose_token(query_token_pos,
len(tokens))
context_token_id = self.unigram_dictionary.get_id(tokens[context_token_pos])
signal_examples = [[self.unigram_dictionary.get_id(query_token), context_token_id]]
num_examples += 1
noise_examples = self.generate_noise_examples(
signal_examples)
num_examples += len(noise_examples)
yield (signal_examples, noise_examples)
def generate_examples_with_flanks(self, peak_filename_iterator, flank_filename_iterator):
'''
Using the data from both peaks and flanks, without drawing from the
unigram dictionary.
'''
#TODO: fix, currently broken.
num_examples = 0
chooser = TokenChooser(K=len(self.kernel) // 2, kernel=self.kernel)
for peak_filename, flank_filename in zip(peak_filename_iterator, flank_filename_iterator):
parsed_peaks = self.parse(peak_filename)
parsed_flanks = self.parse(flank_filename)
### Need to associate parsed flanks with the appropriate parsed peaks; duplication of flanks to peaks is not a problem
### How to quickly split the parsed peaks, flanks? Might need a custom parser which yields flank sequences given a peak
### position
for tokens in parsed_peaks:
for query_token_pos, query_token in enumerate(tokens):
if self.do_discard(query_token):
continue
context_token_pos = chooser.choose_token(query_token_pos,
len(tokens))
context_token_id = self.unigram_dictionary.get_id(tokens[context_token_pos])
signal_examples = [[self.unigram_dictionary.get_id(query_token), context_token_id]]
num_examples += 1
noise_examples = self.generate_noise_examples(
signal_examples)
num_examples += len(noise_examples)
yield (signal_examples, noise_examples)
def produce_macrobatches(self, filename_iterator):
'''
Assemble the bunch es of examples from the parsed data coming from
files that were read. Proportion of signal to noise examples
is determinted by self.noise_ratio
Pack the data from self.generate_examples into pandas DFs, serve out only
those that
'''
pass
|
make.py | import errno
import glob
import json
import os
from queue import Queue
import shlex
import shutil
import stat
import subprocess
import threading
import time
from typing import Callable
import webbrowser
import bpy
import arm.assets as assets
from arm.exporter import ArmoryExporter
import arm.lib.make_datas
import arm.lib.server
import arm.live_patch as live_patch
import arm.log as log
import arm.make_logic as make_logic
import arm.make_renderpath as make_renderpath
import arm.make_state as state
import arm.make_world as make_world
import arm.utils
import arm.write_data as write_data
if arm.is_reload(__name__):
assets = arm.reload_module(assets)
arm.exporter = arm.reload_module(arm.exporter)
from arm.exporter import ArmoryExporter
arm.lib.make_datas = arm.reload_module(arm.lib.make_datas)
arm.lib.server = arm.reload_module(arm.lib.server)
live_patch = arm.reload_module(live_patch)
log = arm.reload_module(log)
make_logic = arm.reload_module(make_logic)
make_renderpath = arm.reload_module(make_renderpath)
state = arm.reload_module(state)
make_world = arm.reload_module(make_world)
arm.utils = arm.reload_module(arm.utils)
write_data = arm.reload_module(write_data)
else:
arm.enable_reload(__name__)
scripts_mtime = 0 # Monitor source changes
profile_time = 0
# Queue of threads and their done callbacks. Item format: [thread, done]
thread_callback_queue = Queue(maxsize=0)
def run_proc(cmd, done: Callable) -> subprocess.Popen:
"""Creates a subprocess with the given command and returns it.
If Blender is not running in background mode, a thread is spawned
that waits until the subprocess has finished executing to not freeze
the UI, otherwise (in background mode) execution is blocked until
the subprocess has finished.
If `done` is not `None`, it is called afterwards in the main thread.
"""
use_thread = not bpy.app.background
def wait_for_proc(proc: subprocess.Popen):
proc.wait()
if use_thread:
# Put the done callback into the callback queue so that it
# can be received by a polling function in the main thread
thread_callback_queue.put([threading.current_thread(), done], block=True)
else:
done()
p = subprocess.Popen(cmd)
if use_thread:
threading.Thread(target=wait_for_proc, args=(p,)).start()
else:
wait_for_proc(p)
return p
def compile_shader_pass(res, raw_shaders_path, shader_name, defs, make_variants):
os.chdir(raw_shaders_path + '/' + shader_name)
# Open json file
json_name = shader_name + '.json'
with open(json_name) as f:
json_file = f.read()
json_data = json.loads(json_file)
fp = arm.utils.get_fp_build()
arm.lib.make_datas.make(res, shader_name, json_data, fp, defs, make_variants)
path = fp + '/compiled/Shaders'
c = json_data['contexts'][0]
for s in ['vertex_shader', 'fragment_shader', 'geometry_shader', 'tesscontrol_shader', 'tesseval_shader']:
if s in c:
shutil.copy(c[s], path + '/' + c[s].split('/')[-1])
def remove_readonly(func, path, excinfo):
os.chmod(path, stat.S_IWRITE)
func(path)
def export_data(fp, sdk_path):
wrd = bpy.data.worlds['Arm']
print('Armory v{0} ({1})'.format(wrd.arm_version, wrd.arm_commit))
if wrd.arm_verbose_output:
print(f'Blender: {bpy.app.version_string}, Target: {state.target}, GAPI: {arm.utils.get_gapi()}')
# Clean compiled variants if cache is disabled
build_dir = arm.utils.get_fp_build()
if not wrd.arm_cache_build:
if os.path.isdir(build_dir + '/debug/html5-resources'):
shutil.rmtree(build_dir + '/debug/html5-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/krom-resources'):
shutil.rmtree(build_dir + '/krom-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/debug/krom-resources'):
shutil.rmtree(build_dir + '/debug/krom-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/windows-resources'):
shutil.rmtree(build_dir + '/windows-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/linux-resources'):
shutil.rmtree(build_dir + '/linux-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/osx-resources'):
shutil.rmtree(build_dir + '/osx-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/compiled/Shaders'):
shutil.rmtree(build_dir + '/compiled/Shaders', onerror=remove_readonly)
raw_shaders_path = sdk_path + '/armory/Shaders/'
assets_path = sdk_path + '/armory/Assets/'
export_physics = bpy.data.worlds['Arm'].arm_physics != 'Disabled'
export_navigation = bpy.data.worlds['Arm'].arm_navigation != 'Disabled'
export_ui = bpy.data.worlds['Arm'].arm_ui != 'Disabled'
assets.reset()
# Build node trees
ArmoryExporter.import_traits = []
make_logic.build()
make_world.build()
make_renderpath.build()
# Export scene data
assets.embedded_data = sorted(list(set(assets.embedded_data)))
physics_found = False
navigation_found = False
ui_found = False
ArmoryExporter.compress_enabled = state.is_publish and wrd.arm_asset_compression
ArmoryExporter.optimize_enabled = state.is_publish and wrd.arm_optimize_data
if not os.path.exists(build_dir + '/compiled/Assets'):
os.makedirs(build_dir + '/compiled/Assets')
# have a "zoo" collection in the current scene
export_coll = bpy.data.collections.new("export_coll")
bpy.context.scene.collection.children.link(export_coll)
for scene in bpy.data.scenes:
if scene == bpy.context.scene: continue
for o in scene.collection.all_objects:
if o.type == "MESH" or o.type == "EMPTY":
if o.name not in export_coll.all_objects.keys():
export_coll.objects.link(o)
depsgraph = bpy.context.evaluated_depsgraph_get()
bpy.data.collections.remove(export_coll) # destroy "zoo" collection
for scene in bpy.data.scenes:
if scene.arm_export:
ext = '.lz4' if ArmoryExporter.compress_enabled else '.arm'
asset_path = build_dir + '/compiled/Assets/' + arm.utils.safestr(scene.name) + ext
ArmoryExporter.export_scene(bpy.context, asset_path, scene=scene, depsgraph=depsgraph)
if ArmoryExporter.export_physics:
physics_found = True
if ArmoryExporter.export_navigation:
navigation_found = True
if ArmoryExporter.export_ui:
ui_found = True
assets.add(asset_path)
if physics_found == False: # Disable physics if no rigid body is exported
export_physics = False
if navigation_found == False:
export_navigation = False
if ui_found == False:
export_ui = False
if wrd.arm_ui == 'Enabled':
export_ui = True
modules = []
if wrd.arm_audio == 'Enabled':
modules.append('audio')
if export_physics:
modules.append('physics')
if export_navigation:
modules.append('navigation')
if export_ui:
modules.append('ui')
defs = arm.utils.def_strings_to_array(wrd.world_defs)
cdefs = arm.utils.def_strings_to_array(wrd.compo_defs)
if wrd.arm_verbose_output:
print('Exported modules:', ', '.join(modules))
print('Shader flags:', ' '.join(defs))
print('Compositor flags:', ' '.join(cdefs))
print('Khafile flags:', ' '.join(assets.khafile_defs))
# Render path is configurable at runtime
has_config = wrd.arm_write_config or os.path.exists(arm.utils.get_fp() + '/Bundled/config.arm')
# Write compiled.inc
shaders_path = build_dir + '/compiled/Shaders'
if not os.path.exists(shaders_path):
os.makedirs(shaders_path)
write_data.write_compiledglsl(defs + cdefs, make_variants=has_config)
# Write referenced shader passes
if not os.path.isfile(build_dir + '/compiled/Shaders/shader_datas.arm') or state.last_world_defs != wrd.world_defs:
res = {'shader_datas': []}
for ref in assets.shader_passes:
# Ensure shader pass source exists
if not os.path.exists(raw_shaders_path + '/' + ref):
continue
assets.shader_passes_assets[ref] = []
compile_shader_pass(res, raw_shaders_path, ref, defs + cdefs, make_variants=has_config)
# Workaround to also export non-material world shaders
res['shader_datas'] += make_world.shader_datas
arm.utils.write_arm(shaders_path + '/shader_datas.arm', res)
for ref in assets.shader_passes:
for s in assets.shader_passes_assets[ref]:
assets.add_shader(shaders_path + '/' + s + '.glsl')
for file in assets.shaders_external:
name = file.split('/')[-1].split('\\')[-1]
target = build_dir + '/compiled/Shaders/' + name
if not os.path.exists(target):
shutil.copy(file, target)
state.last_world_defs = wrd.world_defs
# Reset path
os.chdir(fp)
# Copy std shaders
if not os.path.isdir(build_dir + '/compiled/Shaders/std'):
shutil.copytree(raw_shaders_path + 'std', build_dir + '/compiled/Shaders/std')
# Write config.arm
resx, resy = arm.utils.get_render_resolution(arm.utils.get_active_scene())
if wrd.arm_write_config:
write_data.write_config(resx, resy)
# Change project version (Build, Publish)
if (not state.is_play) and (wrd.arm_project_version_autoinc):
wrd.arm_project_version = arm.utils.arm.utils.change_version_project(wrd.arm_project_version)
# Write khafile.js
write_data.write_khafilejs(state.is_play, export_physics, export_navigation, export_ui, state.is_publish, ArmoryExporter.import_traits)
# Write Main.hx - depends on write_khafilejs for writing number of assets
scene_name = arm.utils.get_project_scene_name()
write_data.write_mainhx(scene_name, resx, resy, state.is_play, state.is_publish)
if scene_name != state.last_scene or resx != state.last_resx or resy != state.last_resy:
wrd.arm_recompile = True
state.last_resx = resx
state.last_resy = resy
state.last_scene = scene_name
def compile(assets_only=False):
wrd = bpy.data.worlds['Arm']
fp = arm.utils.get_fp()
os.chdir(fp)
# Set build command
target_name = state.target
node_path = arm.utils.get_node_path()
khamake_path = arm.utils.get_khamake_path()
cmd = [node_path, khamake_path]
kha_target_name = arm.utils.get_kha_target(target_name)
if kha_target_name != '':
cmd.append(kha_target_name)
# Custom exporter
if state.is_export:
item = wrd.arm_exporterlist[wrd.arm_exporterlist_index]
if item.arm_project_target == 'custom' and item.arm_project_khamake != '':
for s in item.arm_project_khamake.split(' '):
cmd.append(s)
ffmpeg_path = arm.utils.get_ffmpeg_path()
if ffmpeg_path != None and ffmpeg_path != '':
cmd.append('--ffmpeg')
cmd.append(ffmpeg_path) # '"' + ffmpeg_path + '"'
state.export_gapi = arm.utils.get_gapi()
cmd.append('-g')
cmd.append(state.export_gapi)
# Windows - Set Visual Studio Version
if state.target.startswith('windows'):
cmd.append('-visualstudio')
vs_ver, vs_year, vs_name, vs_id = arm.utils.get_visual_studio_from_version(wrd.arm_project_win_list_vs)
cmd.append(vs_id)
if arm.utils.get_legacy_shaders() or 'ios' in state.target:
if 'html5' in state.target or 'ios' in state.target:
pass
else:
cmd.append('--shaderversion')
cmd.append('110')
elif 'android' in state.target or 'html5' in state.target:
cmd.append('--shaderversion')
cmd.append('300')
else:
cmd.append('--shaderversion')
cmd.append('330')
if '_VR' in wrd.world_defs:
cmd.append('--vr')
cmd.append('webvr')
if arm.utils.get_pref_or_default('khamake_debug', False):
cmd.append('--debug')
if arm.utils.get_rp().rp_renderer == 'Raytracer':
cmd.append('--raytrace')
cmd.append('dxr')
dxc_path = fp + '/HlslShaders/dxc.exe'
subprocess.Popen([dxc_path, '-Zpr', '-Fo', fp + '/Bundled/raytrace.cso', '-T', 'lib_6_3', fp + '/HlslShaders/raytrace.hlsl']).wait()
if arm.utils.get_khamake_threads() > 1:
cmd.append('--parallelAssetConversion')
cmd.append(str(arm.utils.get_khamake_threads()))
compilation_server = False
cmd.append('--to')
if (kha_target_name == 'krom' and not state.is_publish) or (kha_target_name == 'html5' and not state.is_publish):
cmd.append(arm.utils.build_dir() + '/debug')
# Start compilation server
if kha_target_name == 'krom' and arm.utils.get_compilation_server() and not assets_only and wrd.arm_cache_build:
compilation_server = True
arm.lib.server.run_haxe(arm.utils.get_haxe_path())
else:
cmd.append(arm.utils.build_dir())
if not wrd.arm_verbose_output:
cmd.append("--quiet")
else:
print("Using project from " + arm.utils.get_fp())
print("Running: ", *cmd)
#Project needs to be compiled at least once
#before compilation server can work
if not os.path.exists(arm.utils.build_dir() + '/debug/krom/krom.js') and not state.is_publish:
state.proc_build = run_proc(cmd, build_done)
else:
if assets_only or compilation_server:
cmd.append('--nohaxe')
cmd.append('--noproject')
state.proc_build = run_proc(cmd, assets_done if compilation_server else build_done)
def build(target, is_play=False, is_publish=False, is_export=False):
global profile_time
profile_time = time.time()
state.target = target
state.is_play = is_play
state.is_publish = is_publish
state.is_export = is_export
# Save blend
if arm.utils.get_save_on_build():
bpy.ops.wm.save_mainfile()
log.clear(clear_warnings=True, clear_errors=True)
# Set camera in active scene
active_scene = arm.utils.get_active_scene()
if active_scene.camera == None:
for o in active_scene.objects:
if o.type == 'CAMERA':
active_scene.camera = o
break
# Get paths
sdk_path = arm.utils.get_sdk_path()
raw_shaders_path = sdk_path + '/armory/Shaders/'
# Set dir
fp = arm.utils.get_fp()
os.chdir(fp)
# Create directories
wrd = bpy.data.worlds['Arm']
sources_path = 'Sources/' + arm.utils.safestr(wrd.arm_project_package)
if not os.path.exists(sources_path):
os.makedirs(sources_path)
# Save external scripts edited inside Blender
write_texts = False
for text in bpy.data.texts:
if text.filepath != '' and text.is_dirty:
write_texts = True
break
if write_texts:
area = bpy.context.area
old_type = area.type
area.type = 'TEXT_EDITOR'
for text in bpy.data.texts:
if text.filepath != '' and text.is_dirty and os.path.isfile(text.filepath):
area.spaces[0].text = text
bpy.ops.text.save()
area.type = old_type
# Save internal Haxe scripts
for text in bpy.data.texts:
if text.filepath == '' and text.name[-3:] == '.hx':
with open('Sources/' + arm.utils.safestr(wrd.arm_project_package) + '/' + text.name, 'w') as f:
f.write(text.as_string())
# Export data
export_data(fp, sdk_path)
if state.target == 'html5':
w, h = arm.utils.get_render_resolution(arm.utils.get_active_scene())
write_data.write_indexhtml(w, h, is_publish)
# Bundle files from include dir
if os.path.isdir('include'):
dest = '/html5/' if is_publish else '/debug/html5/'
for fn in glob.iglob(os.path.join('include', '**'), recursive=False):
shutil.copy(fn, arm.utils.build_dir() + dest + os.path.basename(fn))
def play_done():
"""Called if the player was stopped/terminated."""
state.proc_play = None
state.redraw_ui = True
log.clear()
live_patch.stop()
def assets_done():
if state.proc_build == None:
return
result = state.proc_build.poll()
if result == 0:
# Connect to the compilation server
os.chdir(arm.utils.build_dir() + '/debug/')
cmd = [arm.utils.get_haxe_path(), '--connect', '6000', 'project-krom.hxml']
state.proc_build = run_proc(cmd, compilation_server_done)
else:
state.proc_build = None
state.redraw_ui = True
log.error('Build failed, check console')
def compilation_server_done():
if state.proc_build == None:
return
result = state.proc_build.poll()
if result == 0:
if os.path.exists('krom/krom.js.temp'):
os.chmod('krom/krom.js', stat.S_IWRITE)
os.remove('krom/krom.js')
os.rename('krom/krom.js.temp', 'krom/krom.js')
build_done()
else:
state.proc_build = None
state.redraw_ui = True
log.error('Build failed, check console')
def build_done():
print('Finished in {:0.3f}s'.format(time.time() - profile_time))
if log.num_warnings > 0:
log.print_warn(f'{log.num_warnings} warning{"s" if log.num_warnings > 1 else ""} occurred during compilation')
if state.proc_build is None:
return
result = state.proc_build.poll()
state.proc_build = None
state.redraw_ui = True
if result == 0:
bpy.data.worlds['Arm'].arm_recompile = False
build_success()
else:
log.error('Build failed, check console')
def runtime_to_target():
wrd = bpy.data.worlds['Arm']
if wrd.arm_runtime == 'Krom':
return 'krom'
else:
return 'html5'
def get_khajs_path(target):
if target == 'krom':
return arm.utils.build_dir() + '/debug/krom/krom.js'
else: # Browser
return arm.utils.build_dir() + '/debug/html5/kha.js'
def play():
global scripts_mtime
wrd = bpy.data.worlds['Arm']
build(target=runtime_to_target(), is_play=True)
khajs_path = get_khajs_path(state.target)
if not wrd.arm_cache_build or \
not os.path.isfile(khajs_path) or \
assets.khafile_defs_last != assets.khafile_defs or \
state.last_target != state.target:
wrd.arm_recompile = True
state.last_target = state.target
# Trait sources modified
state.mod_scripts = []
script_path = arm.utils.get_fp() + '/Sources/' + arm.utils.safestr(wrd.arm_project_package)
if os.path.isdir(script_path):
new_mtime = scripts_mtime
for fn in glob.iglob(os.path.join(script_path, '**', '*.hx'), recursive=True):
mtime = os.path.getmtime(fn)
if scripts_mtime < mtime:
arm.utils.fetch_script_props(fn) # Trait props
fn = fn.split('Sources/')[1]
fn = fn[:-3] #.hx
fn = fn.replace('/', '.')
state.mod_scripts.append(fn)
wrd.arm_recompile = True
if new_mtime < mtime:
new_mtime = mtime
scripts_mtime = new_mtime
if len(state.mod_scripts) > 0: # Trait props
arm.utils.fetch_trait_props()
compile(assets_only=(not wrd.arm_recompile))
def build_success():
log.clear()
wrd = bpy.data.worlds['Arm']
if state.is_play:
if wrd.arm_runtime == 'Browser':
# Start server
os.chdir(arm.utils.get_fp())
prefs = arm.utils.get_arm_preferences()
t = threading.Thread(name='localserver', target=arm.lib.server.run_tcp, args=(prefs.html5_server_port, prefs.html5_server_log), daemon=True)
t.start()
html5_app_path = 'http://localhost:{}/{}/debug/html5'.format(prefs.html5_server_port, arm.utils.build_dir())
webbrowser.open(html5_app_path)
elif wrd.arm_runtime == 'Krom':
if wrd.arm_live_patch:
live_patch.start()
open(arm.utils.get_fp_build() + '/debug/krom/krom.patch', 'w').close()
krom_location, krom_path = arm.utils.krom_paths()
os.chdir(krom_location)
cmd = [krom_path, arm.utils.get_fp_build() + '/debug/krom', arm.utils.get_fp_build() + '/debug/krom-resources']
if arm.utils.get_os() == 'win':
cmd.append('--consolepid')
cmd.append(str(os.getpid()))
if wrd.arm_audio == 'Disabled':
cmd.append('--nosound')
if wrd.arm_verbose_output:
print("Running: ", *cmd)
state.proc_play = run_proc(cmd, play_done)
elif state.is_publish:
sdk_path = arm.utils.get_sdk_path()
target_name = arm.utils.get_kha_target(state.target)
files_path = os.path.join(arm.utils.get_fp_build(), target_name)
if (target_name == 'html5' or target_name == 'krom') and wrd.arm_minify_js:
# Minify JS
minifier_path = sdk_path + '/lib/armory_tools/uglifyjs/bin/uglifyjs'
if target_name == 'html5':
jsfile = files_path + '/kha.js'
else:
jsfile = files_path + '/krom.js'
args = [arm.utils.get_node_path(), minifier_path, jsfile, '-o', jsfile]
proc = subprocess.Popen(args)
proc.wait()
if target_name == 'krom':
# Copy Krom binaries
if state.target == 'krom-windows':
gapi = state.export_gapi
ext = '' if gapi == 'direct3d11' else '_' + gapi
krom_location = sdk_path + '/Krom/Krom' + ext + '.exe'
shutil.copy(krom_location, files_path + '/Krom.exe')
krom_exe = arm.utils.safestr(wrd.arm_project_name) + '.exe'
os.rename(files_path + '/Krom.exe', files_path + '/' + krom_exe)
elif state.target == 'krom-linux':
krom_location = sdk_path + '/Krom/Krom'
shutil.copy(krom_location, files_path)
krom_exe = arm.utils.safestr(wrd.arm_project_name)
os.rename(files_path + '/Krom', files_path + '/' + krom_exe)
krom_exe = './' + krom_exe
else:
krom_location = sdk_path + '/Krom/Krom.app'
shutil.copytree(krom_location, files_path + '/Krom.app')
game_files = os.listdir(files_path)
for f in game_files:
f = files_path + '/' + f
if os.path.isfile(f):
shutil.move(f, files_path + '/Krom.app/Contents/MacOS')
krom_exe = arm.utils.safestr(wrd.arm_project_name) + '.app'
os.rename(files_path + '/Krom.app', files_path + '/' + krom_exe)
# Rename
ext = state.target.split('-')[-1] # krom-windows
new_files_path = files_path + '-' + ext
os.rename(files_path, new_files_path)
files_path = new_files_path
if target_name == 'html5':
project_path = files_path
print('Exported HTML5 package to ' + project_path)
elif target_name.startswith('ios') or target_name.startswith('osx'): # TODO: to macos
project_path = files_path + '-build'
print('Exported XCode project to ' + project_path)
elif target_name.startswith('windows'):
project_path = files_path + '-build'
vs_ver, vs_year, vs_name, vs_id = arm.utils.get_visual_studio_from_version(wrd.arm_project_win_list_vs)
print('Exported '+ vs_name +' project to ' + project_path)
elif target_name.startswith('android'):
project_name = arm.utils.safesrc(wrd.arm_project_name +'-'+ wrd.arm_project_version)
project_path = os.path.join(files_path + '-build', project_name)
print('Exported Android Studio project to ' + project_path)
elif target_name.startswith('krom'):
project_path = files_path
print('Exported Krom package to ' + project_path)
else:
project_path = files_path + '-build'
print('Exported makefiles to ' + project_path)
if not bpy.app.background and arm.utils.get_arm_preferences().open_build_directory:
arm.utils.open_folder(project_path)
# Android build APK
if target_name.startswith('android'):
if (arm.utils.get_project_android_build_apk()) and (len(arm.utils.get_android_sdk_root_path()) > 0):
print("\nBuilding APK")
# Check settings
path_sdk = arm.utils.get_android_sdk_root_path()
if len(path_sdk) > 0:
# Check Environment Variables - ANDROID_SDK_ROOT
if os.getenv('ANDROID_SDK_ROOT') == None:
# Set value from settings
os.environ['ANDROID_SDK_ROOT'] = path_sdk
else:
project_path = ''
# Build start
if len(project_path) > 0:
os.chdir(project_path) # set work folder
if arm.utils.get_os_is_windows():
state.proc_publish_build = run_proc(os.path.join(project_path, "gradlew.bat assembleDebug"), done_gradlew_build)
else:
cmd = shlex.split(os.path.join(project_path, "gradlew assembleDebug"))
state.proc_publish_build = run_proc(cmd, done_gradlew_build)
else:
print('\nBuilding APK Warning: ANDROID_SDK_ROOT is not specified in environment variables and "Android SDK Path" setting is not specified in preferences: \n- If you specify an environment variable ANDROID_SDK_ROOT, then you need to restart Blender;\n- If you specify the setting "Android SDK Path" in the preferences, then repeat operation "Publish"')
# HTML5 After Publish
if target_name.startswith('html5'):
if len(arm.utils.get_html5_copy_path()) > 0 and (wrd.arm_project_html5_copy):
project_name = arm.utils.safesrc(wrd.arm_project_name +'-'+ wrd.arm_project_version)
dst = os.path.join(arm.utils.get_html5_copy_path(), project_name)
if os.path.exists(dst):
shutil.rmtree(dst)
try:
shutil.copytree(project_path, dst)
print("Copied files to " + dst)
except OSError as exc:
if exc.errno == errno.ENOTDIR:
shutil.copy(project_path, dst)
else: raise
if len(arm.utils.get_link_web_server()) and (wrd.arm_project_html5_start_browser):
link_html5_app = arm.utils.get_link_web_server() +'/'+ project_name
print("Running a browser with a link " + link_html5_app)
webbrowser.open(link_html5_app)
# Windows After Publish
if target_name.startswith('windows'):
list_vs = []
err = ''
# Print message
project_name = arm.utils.safesrc(wrd.arm_project_name +'-'+ wrd.arm_project_version)
if int(wrd.arm_project_win_build) == 1:
print('\nOpen in Visual Studio: ' + os.path.join(project_path, project_name + '.sln"'))
if int(wrd.arm_project_win_build) == 2:
print('\nCompile project ' + os.path.join(project_path, project_name + '.vcxproj'))
if int(wrd.arm_project_win_build) == 3:
print('\nCompile and run project ' + os.path.join(project_path, project_name + '.vcxproj'))
if int(wrd.arm_project_win_build) > 0:
# Check Visual Studio
list_vs, err = arm.utils.get_list_installed_vs(True, True, True)
if len(err) > 0:
print(err)
return
if len(list_vs) == 0:
print('No Visual Studio found')
return
is_check = False
for vs in list_vs:
if vs[0] == wrd.arm_project_win_list_vs:
is_check = True
break
if not is_check:
vs_ver, vs_year, vs_name, vs_id = arm.utils.get_visual_studio_from_version(wrd.arm_project_win_list_vs)
print(vs_name + ' not found.')
print('The following are installed on the PC:')
for vs in list_vs:
print('- ' + vs[1] + ' (version ' + vs[3] +')')
return
# Current VS
vs_path = ''
for vs in list_vs:
if vs[0] == wrd.arm_project_win_list_vs:
vs_path = vs[2]
break
# Open in Visual Studio
if int(wrd.arm_project_win_build) == 1:
cmd = os.path.join('start "' + vs_path, 'Common7', 'IDE', 'devenv.exe" "' + os.path.join(project_path, project_name + '.sln"'))
subprocess.Popen(cmd, shell=True)
# Compile
if int(wrd.arm_project_win_build) > 1:
bits = '64' if wrd.arm_project_win_build_arch == 'x64' else '32'
# vcvars
cmd = os.path.join(vs_path, 'VC', 'Auxiliary', 'Build', 'vcvars' + bits + '.bat')
if not os.path.isfile(cmd):
print('File "'+ cmd +'" not found. Verify ' + vs_name + ' was installed correctly')
log.error('Compile failed, check console')
return
state.proc_publish_build = run_proc(cmd, done_vs_vars)
def done_gradlew_build():
if state.proc_publish_build == None:
return
result = state.proc_publish_build.poll()
if result == 0:
state.proc_publish_build = None
wrd = bpy.data.worlds['Arm']
path_apk = os.path.join(arm.utils.get_fp_build(), arm.utils.get_kha_target(state.target))
project_name = arm.utils.safesrc(wrd.arm_project_name +'-'+ wrd.arm_project_version)
path_apk = os.path.join(path_apk + '-build', project_name, 'app', 'build', 'outputs', 'apk', 'debug')
print("\nBuild APK to " + path_apk)
# Rename APK
apk_name = 'app-debug.apk'
file_name = os.path.join(path_apk, apk_name)
if wrd.arm_project_android_rename_apk:
apk_name = project_name + '.apk'
os.rename(file_name, os.path.join(path_apk, apk_name))
file_name = os.path.join(path_apk, apk_name)
print("\nRename APK to " + apk_name)
# Copy APK
if wrd.arm_project_android_copy_apk:
shutil.copyfile(file_name, os.path.join(arm.utils.get_android_apk_copy_path(), apk_name))
print("Copy APK to " + arm.utils.get_android_apk_copy_path())
# Open directory with APK
if arm.utils.get_android_open_build_apk_directory():
arm.utils.open_folder(path_apk)
# Open directory after copy APK
if arm.utils.get_android_apk_copy_open_directory():
arm.utils.open_folder(arm.utils.get_android_apk_copy_path())
# Running emulator
if wrd.arm_project_android_run_avd:
run_android_emulators(arm.utils.get_android_emulator_name())
state.redraw_ui = True
else:
state.proc_publish_build = None
state.redraw_ui = True
os.environ['ANDROID_SDK_ROOT'] = ''
log.error('Building the APK failed, check console')
def run_android_emulators(avd_name):
if len(avd_name.strip()) == 0:
return
print('\nRunning Emulator "'+ avd_name +'"')
path_file = arm.utils.get_android_emulator_file()
if len(path_file) > 0:
if arm.utils.get_os_is_windows():
run_proc(path_file + " -avd "+ avd_name, None)
else:
cmd = shlex.split(path_file + " -avd "+ avd_name)
run_proc(cmd, None)
else:
print('Update List Emulators Warning: File "'+ path_file +'" not found. Check that the variable ANDROID_SDK_ROOT is correct in environment variables or in "Android SDK Path" setting: \n- If you specify an environment variable ANDROID_SDK_ROOT, then you need to restart Blender;\n- If you specify the setting "Android SDK Path", then repeat operation "Publish"')
def done_vs_vars():
if state.proc_publish_build == None:
return
result = state.proc_publish_build.poll()
if result == 0:
state.proc_publish_build = None
# MSBuild
wrd = bpy.data.worlds['Arm']
list_vs, err = arm.utils.get_list_installed_vs(True, True, True)
# Current VS
vs_path = ''
vs_name = ''
for vs in list_vs:
if vs[0] == wrd.arm_project_win_list_vs:
vs_name = vs[1]
vs_path = vs[2]
break
msbuild = os.path.join(vs_path, 'MSBuild', 'Current', 'Bin', 'MSBuild.exe')
if not os.path.isfile(msbuild):
print('File "'+ msbuild +'" not found. Verify ' + vs_name + ' was installed correctly')
log.error('Compile failed, check console')
state.redraw_ui = True
return
project_name = arm.utils.safesrc(wrd.arm_project_name +'-'+ wrd.arm_project_version)
project_path = os.path.join(arm.utils.get_fp_build(), arm.utils.get_kha_target(state.target)) + '-build'
cmd = '"' + msbuild + '" "' + os.path.join(project_path, project_name + '.vcxproj"')
# Arguments
platform = 'x64' if wrd.arm_project_win_build_arch == 'x64' else 'win32'
log_param = wrd.arm_project_win_build_log
if log_param == 'WarningsAndErrorsOnly':
log_param = 'WarningsOnly;ErrorsOnly'
cmd = cmd + ' -m:' + str(wrd.arm_project_win_build_cpu) + ' -clp:'+ log_param +' /p:Configuration='+ wrd.arm_project_win_build_mode +' /p:Platform=' + platform
print('\nCompiling the project ' + os.path.join(project_path, project_name + '.vcxproj"'))
state.proc_publish_build = run_proc(cmd, done_vs_build)
state.redraw_ui = True
else:
state.proc_publish_build = None
state.redraw_ui = True
log.error('\nCompile failed, check console')
def done_vs_build():
if state.proc_publish_build == None:
return
result = state.proc_publish_build.poll()
if result == 0:
state.proc_publish_build = None
wrd = bpy.data.worlds['Arm']
project_path = os.path.join(arm.utils.get_fp_build(), arm.utils.get_kha_target(state.target)) + '-build'
if wrd.arm_project_win_build_arch == 'x64':
path = os.path.join(project_path, 'x64', wrd.arm_project_win_build_mode)
else:
path = os.path.join(project_path, wrd.arm_project_win_build_mode)
print('\nCompilation completed in ' + path)
# Run
if int(wrd.arm_project_win_build) == 3:
# Copying the executable file
res_path = os.path.join(arm.utils.get_fp_build(), arm.utils.get_kha_target(state.target))
file_name = arm.utils.safesrc(wrd.arm_project_name +'-'+ wrd.arm_project_version) + '.exe'
print('\nCopy the executable file from ' + path + ' to ' + res_path)
shutil.copyfile(os.path.join(path, file_name), os.path.join(res_path, file_name))
path = res_path
# Run project
cmd = os.path.join('"' + res_path, file_name + '"')
print('Run the executable file to ' + cmd)
os.chdir(res_path) # set work folder
subprocess.Popen(cmd, shell=True)
# Open Build Directory
if wrd.arm_project_win_build_open:
arm.utils.open_folder(path)
state.redraw_ui = True
else:
state.proc_publish_build = None
state.redraw_ui = True
log.error('Compile failed, check console')
def clean():
os.chdir(arm.utils.get_fp())
wrd = bpy.data.worlds['Arm']
# Remove build and compiled data
try:
if os.path.isdir(arm.utils.build_dir()):
shutil.rmtree(arm.utils.build_dir(), onerror=remove_readonly)
if os.path.isdir(arm.utils.get_fp() + '/build'): # Kode Studio build dir
shutil.rmtree(arm.utils.get_fp() + '/build', onerror=remove_readonly)
except:
print('Armory Warning: Some files in the build folder are locked')
# Remove compiled nodes
pkg_dir = arm.utils.safestr(wrd.arm_project_package).replace('.', '/')
nodes_path = 'Sources/' + pkg_dir + '/node/'
if os.path.isdir(nodes_path):
shutil.rmtree(nodes_path, onerror=remove_readonly)
# Remove khafile/Main.hx
if os.path.isfile('khafile.js'):
os.remove('khafile.js')
if os.path.isfile('Sources/Main.hx'):
os.remove('Sources/Main.hx')
# Remove Sources/ dir if empty
if os.path.exists('Sources/' + pkg_dir) and os.listdir('Sources/' + pkg_dir) == []:
shutil.rmtree('Sources/' + pkg_dir, onerror=remove_readonly)
if os.path.exists('Sources') and os.listdir('Sources') == []:
shutil.rmtree('Sources/', onerror=remove_readonly)
# Remove Shape key Textures
if os.path.exists('MorphTargets/'):
shutil.rmtree('MorphTargets/', onerror=remove_readonly)
# To recache signatures for batched materials
for mat in bpy.data.materials:
mat.signature = ''
mat.arm_cached = False
# Restart compilation server
if arm.utils.get_compilation_server():
arm.lib.server.kill_haxe()
print('Project cleaned')
|
autopollingcachepolicy.py | import logging
import sys
import datetime
import time
from threading import Thread, Event
from requests import HTTPError
from .readwritelock import ReadWriteLock
from .interfaces import CachePolicy
log = logging.getLogger(sys.modules[__name__].__name__)
class AutoPollingCachePolicy(CachePolicy):
def __init__(self, config_fetcher, config_cache, cache_key,
poll_interval_seconds=60, max_init_wait_time_seconds=5,
on_configuration_changed_callback=None):
if poll_interval_seconds < 1:
poll_interval_seconds = 1
if max_init_wait_time_seconds < 0:
max_init_wait_time_seconds = 0
self._config_fetcher = config_fetcher
self._config_cache = config_cache
self._cache_key = cache_key
self._poll_interval_seconds = poll_interval_seconds
self._max_init_wait_time_seconds = datetime.timedelta(seconds=max_init_wait_time_seconds)
self._on_configuration_changed_callback = on_configuration_changed_callback
self._initialized = False
self._is_running = False
self._start_time = datetime.datetime.utcnow()
self._lock = ReadWriteLock()
self.thread = Thread(target=self._run, args=[])
self.thread.daemon = True
self._is_started = Event()
self.thread.start()
self._is_started.wait()
def _run(self):
self._is_running = True
self._is_started.set()
while True:
self.force_refresh()
time.sleep(self._poll_interval_seconds)
if not self._is_running:
break
def get(self):
while not self._initialized \
and datetime.datetime.utcnow() < self._start_time + self._max_init_wait_time_seconds:
time.sleep(.500)
try:
self._lock.acquire_read()
return self._config_cache.get(self._cache_key)
finally:
self._lock.release_read()
def force_refresh(self):
try:
old_configuration = None
force_fetch = False
try:
self._lock.acquire_read()
old_configuration = self._config_cache.get(self._cache_key)
force_fetch = not bool(old_configuration)
finally:
self._lock.release_read()
configuration_response = self._config_fetcher.get_configuration_json(force_fetch)
if configuration_response.is_fetched():
configuration = configuration_response.json()
if configuration != old_configuration:
try:
self._lock.acquire_write()
self._config_cache.set(self._cache_key, configuration)
self._initialized = True
finally:
self._lock.release_write()
try:
if self._on_configuration_changed_callback is not None:
self._on_configuration_changed_callback()
except Exception:
log.exception(sys.exc_info()[0])
if not self._initialized and old_configuration is not None:
self._initialized = True
except HTTPError as e:
log.error('Double-check your SDK Key at https://app.configcat.com/sdkkey.'
' Received unexpected response: %s' % str(e.response))
except Exception:
log.exception(sys.exc_info()[0])
def stop(self):
self._is_running = False
|
bazel_build.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2016 The Tulsi Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bridge between Xcode and Bazel for the "build" action."""
import atexit
import errno
import fcntl
import hashlib
import inspect
import io
import json
import os
import pipes
import re
import shutil
import signal
import subprocess
import sys
import textwrap
import threading
import time
import zipfile
from apfs_clone_copy import CopyOnWrite
import bazel_build_events
import bazel_build_settings
import bazel_options
from bootstrap_lldbinit import BootstrapLLDBInit
from bootstrap_lldbinit import TULSI_LLDBINIT_FILE
import tulsi_logging
from update_symbol_cache import UpdateSymbolCache
# List of frameworks that Xcode injects into test host targets that should be
# re-signed when running the tests on devices.
XCODE_INJECTED_FRAMEWORKS = [
'libXCTestBundleInject.dylib',
'libXCTestSwiftSupport.dylib',
'IDEBundleInjection.framework',
'XCTAutomationSupport.framework',
'XCTest.framework',
]
_logger = None
def _PrintUnbuffered(msg):
sys.stdout.write('%s\n' % msg)
sys.stdout.flush()
def _PrintXcodeWarning(msg):
sys.stdout.write(':: warning: %s\n' % msg)
sys.stdout.flush()
def _PrintXcodeError(msg):
sys.stderr.write(':: error: %s\n' % msg)
sys.stderr.flush()
def _Fatal(msg, fatal_frame=None):
"""Print a fatal error pointing to the failure line inside the script."""
if not fatal_frame:
fatal_frame = inspect.currentframe().f_back
filename, line_number, _, _, _ = inspect.getframeinfo(fatal_frame)
_PrintUnbuffered('%s:%d: error: %s' % (os.path.abspath(filename),
line_number, msg))
CLEANUP_BEP_FILE_AT_EXIT = False
# Function to be called atexit to clean up the BEP file if one is present.
# This is especially useful in cases of abnormal termination (such as what
# happens when Xcode is killed).
def _BEPFileExitCleanup(bep_file_path):
if not CLEANUP_BEP_FILE_AT_EXIT:
return
try:
os.remove(bep_file_path)
except OSError as e:
_PrintXcodeWarning('Failed to remove BEP file from %s. Error: %s' %
(bep_file_path, e.strerror))
def _InterruptHandler(signum, frame):
"""Gracefully exit on SIGINT."""
del signum, frame # Unused.
_PrintUnbuffered('Caught interrupt signal. Exiting...')
sys.exit(0)
class Timer(object):
"""Simple profiler."""
def __init__(self, action_name, action_id):
"""Creates a new Timer object.
Args:
action_name: A human-readable action name, shown in the build log.
action_id: A machine-readable action identifier, can be used for metrics.
Returns:
A Timer instance.
Raises:
RuntimeError: if Timer is created without initializing _logger.
"""
if _logger is None:
raise RuntimeError('Attempted to create Timer without a logger.')
self.action_name = action_name
self.action_id = action_id
self._start = None
def Start(self):
self._start = time.time()
return self
def End(self, log_absolute_times=False):
end = time.time()
seconds = end - self._start
if log_absolute_times:
_logger.log_action(self.action_name, self.action_id, seconds,
self._start, end)
else:
_logger.log_action(self.action_name, self.action_id, seconds)
def _LockFileCreate():
# This relies on this script running at the root of the bazel workspace.
cwd = os.environ['PWD']
cwd_hash = hashlib.sha256(cwd.encode()).hexdigest()
return '/tmp/tulsi_bazel_build_{}.lock'.format(cwd_hash)
# Function to be called atexit to release the file lock on script termination.
def _LockFileExitCleanup(lock_file_handle):
lock_file_handle.close()
def _LockFileAcquire(lock_path):
"""Force script to wait on file lock to serialize build target actions.
Args:
lock_path: Path to the lock file.
"""
_PrintUnbuffered('Queuing Tulsi build...')
lockfile = open(lock_path, 'w')
# Register "fclose(...)" as early as possible, before acquiring lock.
atexit.register(_LockFileExitCleanup, lockfile)
while True:
try:
fcntl.lockf(lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
break
except IOError as err:
if err.errno != errno.EAGAIN:
raise
else:
time.sleep(0.1)
class CodesignBundleAttributes(object):
"""Wrapper class for codesigning attributes of a signed bundle."""
# List of codesigning attributes that this script requires.
_ATTRIBUTES = ['Authority', 'Identifier', 'TeamIdentifier']
def __init__(self, codesign_output):
self.attributes = {}
pending_attributes = list(self._ATTRIBUTES)
for line in codesign_output.split('\n'):
if not pending_attributes:
break
for attribute in pending_attributes:
if line.startswith(attribute):
value = line[len(attribute) + 1:]
self.attributes[attribute] = value
pending_attributes.remove(attribute)
break
for attribute in self._ATTRIBUTES:
if attribute not in self.attributes:
_PrintXcodeError(
'Failed to extract %s from %s.\n' % (attribute, codesign_output))
def Get(self, attribute):
"""Returns the value for the given attribute, or None if it wasn't found."""
value = self.attributes.get(attribute)
if attribute not in self._ATTRIBUTES:
_PrintXcodeError(
'Attribute %s not declared to be parsed. ' % attribute +
'Available attributes are %s.\n' % self._ATTRIBUTES)
return value
class _OptionsParser(object):
"""Handles parsing script options."""
# List of all supported Xcode configurations.
KNOWN_CONFIGS = ['Debug', 'Release']
def __init__(self, build_settings, sdk_version, platform_name, arch):
self.targets = []
self.build_settings = build_settings
self.common_build_options = [
'--verbose_failures',
'--bes_outerr_buffer_size=0', # Don't buffer Bazel output.
]
self.sdk_version = sdk_version
self.platform_name = platform_name
if self.platform_name.startswith('watch'):
config_platform = 'watchos'
elif self.platform_name.startswith('iphone'):
config_platform = 'ios'
elif self.platform_name.startswith('macos'):
config_platform = 'macos'
elif self.platform_name.startswith('appletv'):
config_platform = 'tvos'
else:
self._WarnUnknownPlatform()
config_platform = 'ios'
self.bazel_build_config = '{}_{}'.format(config_platform, arch)
if self.bazel_build_config not in build_settings.platformConfigFlags:
_PrintXcodeError('Unknown active compilation target of "{}". '
'Please report a Tulsi bug.'
.format(self.bazel_build_config))
sys.exit(1)
self.verbose = 0
self.bazel_bin_path = 'bazel-bin'
self.bazel_executable = None
@staticmethod
def _UsageMessage():
"""Returns a usage message string."""
usage = textwrap.dedent("""\
Usage: %s <target> [<target2> ...] --bazel <bazel_binary_path> [options]
Where options are:
--verbose [-v]
Increments the verbosity of the script by one level. This argument
may be provided multiple times to enable additional output levels.
--bazel_bin_path <path>
Path at which Bazel-generated artifacts may be retrieved.
""" % sys.argv[0])
return usage
def ParseOptions(self, args):
"""Parses arguments, returning (message, exit_code)."""
bazel_executable_index = args.index('--bazel')
self.targets = args[:bazel_executable_index]
if not self.targets or len(args) < bazel_executable_index + 2:
return (self._UsageMessage(), 10)
self.bazel_executable = args[bazel_executable_index + 1]
return self._ParseVariableOptions(args[bazel_executable_index + 2:])
def GetBaseFlagsForTargets(self, config):
is_debug = config == 'Debug'
return self.build_settings.flags_for_target(
self.targets[0],
is_debug,
self.bazel_build_config)
def GetEnabledFeatures(self):
"""Returns a list of enabled Bazel features for the active target."""
return self.build_settings.features_for_target(self.targets[0])
def GetBazelOptions(self, config):
"""Returns the full set of build options for the given config."""
bazel, start_up, build = self.GetBaseFlagsForTargets(config)
all_build = []
all_build.extend(self.common_build_options)
all_build.extend(build)
xcode_version_flag = self._ComputeXcodeVersionFlag()
if xcode_version_flag:
all_build.append('--xcode_version=%s' % xcode_version_flag)
return bazel, start_up, all_build
def _WarnUnknownPlatform(self):
_PrintUnbuffered('Warning: unknown platform "%s" will be treated as '
'iOS' % self.platform_name)
def _ParseVariableOptions(self, args):
"""Parses flag-based args, returning (message, exit_code)."""
verbose_re = re.compile('-(v+)$')
while args:
arg = args[0]
args = args[1:]
if arg == '--bazel_bin_path':
if not args:
return ('Missing required parameter for %s' % arg, 2)
self.bazel_bin_path = args[0]
args = args[1:]
elif arg == '--verbose':
self.verbose += 1
else:
match = verbose_re.match(arg)
if match:
self.verbose += len(match.group(1))
else:
return ('Unknown option "%s"\n%s' % (arg, self._UsageMessage()), 1)
return (None, 0)
@staticmethod
def _GetXcodeBuildVersionString():
"""Returns Xcode build version from the environment as a string."""
return os.environ['XCODE_PRODUCT_BUILD_VERSION']
@staticmethod
def _GetXcodeVersionString():
"""Returns Xcode version info from the environment as a string."""
reported_version = os.environ['XCODE_VERSION_ACTUAL']
match = re.match(r'(\d{2})(\d)(\d)$', reported_version)
if not match:
_PrintUnbuffered('Warning: Failed to extract Xcode version from %s' % (
reported_version))
return None
major_version = int(match.group(1))
minor_version = int(match.group(2))
fix_version = int(match.group(3))
return '%d.%d.%d' % (major_version, minor_version, fix_version)
@staticmethod
def _ComputeXcodeVersionFlag():
"""Returns a string for the --xcode_version build flag, if any.
The flag should be used if the active Xcode version was not the same one
used during project generation.
Note this a best-attempt only; this may not be accurate as Bazel itself
caches the active DEVELOPER_DIR path and the user may have changed their
installed Xcode version.
"""
xcode_version = _OptionsParser._GetXcodeVersionString()
build_version = _OptionsParser._GetXcodeBuildVersionString()
if not xcode_version or not build_version:
return None
# Of the form Major.Minor.Fix.Build (new Bazel form) or Major.Min.Fix (old).
full_bazel_version = os.environ.get('TULSI_XCODE_VERSION')
if not full_bazel_version: # Unexpected: Tulsi gen didn't set the flag.
return xcode_version
# Newer Bazel versions specify the version as Major.Minor.Fix.Build.
if full_bazel_version.count('.') == 3:
components = full_bazel_version.rsplit('.', 1)
bazel_xcode_version = components[0]
bazel_build_version = components[1]
if (xcode_version != bazel_xcode_version
or build_version != bazel_build_version):
return '{}.{}'.format(xcode_version, build_version)
else:
return None
else: # Old version of Bazel. We need to use form Major.Minor.Fix.
return xcode_version if xcode_version != full_bazel_version else None
class BazelBuildBridge(object):
"""Handles invoking Bazel and unpacking generated binaries."""
BUILD_EVENTS_FILE = 'build_events.json'
def __init__(self, build_settings):
self.build_settings = build_settings
self.verbose = 0
self.build_path = None
self.bazel_bin_path = None
self.codesign_attributes = {}
self.codesigning_folder_path = os.environ['CODESIGNING_FOLDER_PATH']
self.xcode_action = os.environ['ACTION'] # The Xcode build action.
# When invoked as an external build system script, Xcode will set ACTION to
# an empty string.
if not self.xcode_action:
self.xcode_action = 'build'
if int(os.environ['XCODE_VERSION_MAJOR']) < 900:
xcode_build_version = os.environ['XCODE_PRODUCT_BUILD_VERSION']
_PrintXcodeWarning('Tulsi officially supports Xcode 9+. You are using an '
'earlier Xcode, build %s.' % xcode_build_version)
self.tulsi_version = os.environ.get('TULSI_VERSION', 'UNKNOWN')
# TODO(b/69857078): Remove this when wrapped_clang is updated.
self.direct_debug_prefix_map = False
self.normalized_prefix_map = False
self.update_symbol_cache = UpdateSymbolCache()
# Target architecture. Must be defined for correct setting of
# the --cpu flag. Note that Xcode will set multiple values in
# ARCHS when building for a Generic Device.
archs = os.environ.get('ARCHS')
if not archs:
_PrintXcodeError('Tulsi requires env variable ARCHS to be '
'set. Please file a bug against Tulsi.')
sys.exit(1)
self.arch = archs.split()[-1]
# Path into which generated artifacts should be copied.
self.built_products_dir = os.environ['BUILT_PRODUCTS_DIR']
# Path where Xcode expects generated sources to be placed.
self.derived_sources_folder_path = os.environ.get('DERIVED_SOURCES_DIR')
# Full name of the target artifact (e.g., "MyApp.app" or "Test.xctest").
self.full_product_name = os.environ['FULL_PRODUCT_NAME']
# Whether to generate runfiles for this target.
self.gen_runfiles = os.environ.get('GENERATE_RUNFILES')
# Target SDK version.
self.sdk_version = os.environ.get('SDK_VERSION')
# TEST_HOST for unit tests.
self.test_host_binary = os.environ.get('TEST_HOST')
# Whether this target is a test or not.
self.is_test = os.environ.get('WRAPPER_EXTENSION') == 'xctest'
# Target platform.
self.platform_name = os.environ['PLATFORM_NAME']
# Type of the target artifact.
self.product_type = os.environ['PRODUCT_TYPE']
# Path to the parent of the xcodeproj bundle.
self.project_dir = os.environ['PROJECT_DIR']
# Path to the xcodeproj bundle.
self.project_file_path = os.environ['PROJECT_FILE_PATH']
# Path to the directory containing the WORKSPACE file.
self.workspace_root = os.path.abspath(os.environ['TULSI_WR'])
# Set to the name of the generated bundle for bundle-type targets, None for
# single file targets (like static libraries).
self.wrapper_name = os.environ.get('WRAPPER_NAME')
self.wrapper_suffix = os.environ.get('WRAPPER_SUFFIX', '')
# Path where Xcode expects the artifacts to be written to. This is not the
# codesigning_path as device vs simulator builds have different signing
# requirements, so Xcode expects different paths to be signed. This is
# mostly apparent on XCUITests where simulator builds set the codesigning
# path to be the .xctest bundle, but for device builds it is actually the
# UI runner app (since it needs to be codesigned to run on the device.) The
# FULL_PRODUCT_NAME variable is a stable path on where to put the expected
# artifacts. For static libraries (objc_library, swift_library),
# FULL_PRODUCT_NAME corresponds to the .a file name, which coincides with
# the expected location for a single artifact output.
# TODO(b/35811023): Check these paths are still valid.
self.artifact_output_path = os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['FULL_PRODUCT_NAME'])
# Path to where Xcode expects the binary to be placed.
self.binary_path = os.path.join(
os.environ['TARGET_BUILD_DIR'], os.environ['EXECUTABLE_PATH'])
self.is_simulator = self.platform_name.endswith('simulator')
# Check to see if code signing actions should be skipped or not.
if self.is_simulator:
self.codesigning_allowed = False
else:
self.codesigning_allowed = os.environ.get('CODE_SIGNING_ALLOWED') == 'YES'
if self.codesigning_allowed:
platform_prefix = 'iOS'
if self.platform_name.startswith('macos'):
platform_prefix = 'macOS'
entitlements_filename = '%sXCTRunner.entitlements' % platform_prefix
self.runner_entitlements_template = os.path.join(self.project_file_path,
'.tulsi',
'Resources',
entitlements_filename)
self.bazel_executable = None
def Run(self, args):
"""Executes a Bazel build based on the environment and given arguments."""
if self.xcode_action != 'build':
sys.stderr.write('Xcode action is %s, ignoring.' % self.xcode_action)
return 0
parser = _OptionsParser(self.build_settings,
self.sdk_version,
self.platform_name,
self.arch)
timer = Timer('Parsing options', 'parsing_options').Start()
message, exit_code = parser.ParseOptions(args[1:])
timer.End()
if exit_code:
_PrintXcodeError('Option parsing failed: %s' % message)
return exit_code
self.verbose = parser.verbose
self.bazel_bin_path = os.path.abspath(parser.bazel_bin_path)
self.bazel_executable = parser.bazel_executable
self.bazel_exec_root = self.build_settings.bazelExecRoot
# Update feature flags.
features = parser.GetEnabledFeatures()
self.direct_debug_prefix_map = 'DirectDebugPrefixMap' in features
self.normalized_prefix_map = 'DebugPathNormalization' in features
self.build_path = os.path.join(self.bazel_bin_path,
os.environ.get('TULSI_BUILD_PATH', ''))
# Path to the Build Events JSON file uses pid and is removed if the
# build is successful.
filename = '%d_%s' % (os.getpid(), BazelBuildBridge.BUILD_EVENTS_FILE)
self.build_events_file_path = os.path.join(
self.project_file_path,
'.tulsi',
filename)
(command, retval) = self._BuildBazelCommand(parser)
if retval:
return retval
timer = Timer('Running Bazel', 'running_bazel').Start()
exit_code, outputs = self._RunBazelAndPatchOutput(command)
timer.End()
if exit_code:
_Fatal('Bazel build failed with exit code %d. Please check the build '
'log in Report Navigator (⌘9) for more information.'
% exit_code)
return exit_code
post_bazel_timer = Timer('Total Tulsi Post-Bazel time', 'total_post_bazel')
post_bazel_timer.Start()
if not os.path.exists(self.bazel_exec_root):
_Fatal('No Bazel execution root was found at %r. Debugging experience '
'will be compromised. Please report a Tulsi bug.'
% self.bazel_exec_root)
return 404
# This needs to run after `bazel build`, since it depends on the Bazel
# workspace directory
exit_code = self._LinkTulsiWorkspace()
if exit_code:
return exit_code
exit_code, outputs_data = self._ExtractAspectOutputsData(outputs)
if exit_code:
return exit_code
# Generated headers are installed on a thread since we are launching
# a separate process to do so. This gives us clean timings.
install_thread = threading.Thread(
target=self._InstallGeneratedHeaders, args=(outputs,))
install_thread.start()
timer = Timer('Installing artifacts', 'installing_artifacts').Start()
exit_code = self._InstallArtifact(outputs_data)
timer.End()
install_thread.join()
if exit_code:
return exit_code
exit_code, dsym_paths = self._InstallDSYMBundles(
self.built_products_dir, outputs_data)
if exit_code:
return exit_code
if not dsym_paths:
# Clean any bundles from a previous build that can interfere with
# debugging in LLDB.
self._CleanExistingDSYMs()
else:
for path in dsym_paths:
# Starting with Xcode 9.x, a plist based remapping exists for dSYM
# bundles that works with Swift as well as (Obj-)C(++).
#
# This solution also works for Xcode 8.x for (Obj-)C(++) but not
# for Swift.
timer = Timer('Adding remappings as plists to dSYM',
'plist_dsym').Start()
exit_code = self._PlistdSYMPaths(path)
timer.End()
if exit_code:
_PrintXcodeError('Remapping dSYMs process returned %i, please '
'report a Tulsi bug and attach a full Xcode '
'build log.' % exit_code)
return exit_code
# Starting with Xcode 7.3, XCTests inject several supporting frameworks
# into the test host that need to be signed with the same identity as
# the host itself.
if (self.is_test and not self.platform_name.startswith('macos') and
self.codesigning_allowed):
exit_code = self._ResignTestArtifacts()
if exit_code:
return exit_code
# Starting with Xcode 8, .lldbinit files are honored during Xcode debugging
# sessions. This allows use of the target.source-map field to remap the
# debug symbol paths encoded in the binary to the paths expected by Xcode.
#
# This will not work with dSYM bundles, or a direct -fdebug-prefix-map from
# the Bazel-built locations to Xcode-visible sources.
timer = Timer('Updating .lldbinit', 'updating_lldbinit').Start()
clear_source_map = dsym_paths or self.direct_debug_prefix_map
exit_code = self._UpdateLLDBInit(clear_source_map)
timer.End()
if exit_code:
_PrintXcodeWarning('Updating .lldbinit action failed with code %d' %
exit_code)
post_bazel_timer.End(log_absolute_times=True)
return 0
def _BuildBazelCommand(self, options):
"""Builds up a commandline string suitable for running Bazel."""
configuration = os.environ['CONFIGURATION']
# Treat the special testrunner build config as a Debug compile.
test_runner_config_prefix = '__TulsiTestRunner_'
if configuration.startswith(test_runner_config_prefix):
configuration = configuration[len(test_runner_config_prefix):]
elif os.environ.get('TULSI_TEST_RUNNER_ONLY') == 'YES':
_PrintXcodeError('Building test targets with configuration "%s" is not '
'allowed. Please use the "Test" action or "Build for" > '
'"Testing" instead.' % configuration)
return (None, 1)
if configuration not in _OptionsParser.KNOWN_CONFIGS:
_PrintXcodeError('Unknown build configuration "%s"' % configuration)
return (None, 1)
bazel, start_up, build = options.GetBazelOptions(configuration)
bazel_command = [bazel]
bazel_command.extend(start_up)
bazel_command.append('build')
bazel_command.extend(build)
bazel_command.extend([
# The following flags are used by Tulsi to identify itself and read
# build information from Bazel. They shold not affect Bazel anaylsis
# caching.
'--tool_tag=tulsi:bazel_build',
'--build_event_json_file=%s' % self.build_events_file_path,
'--noexperimental_build_event_json_file_path_conversion',
'--aspects', '@tulsi//:tulsi/tulsi_aspects.bzl%tulsi_outputs_aspect'])
if self.is_test and self.gen_runfiles:
bazel_command.append('--output_groups=+tulsi_outputs')
else:
bazel_command.append('--output_groups=tulsi_outputs,default')
bazel_command.extend(options.targets)
extra_options = bazel_options.BazelOptions(os.environ)
bazel_command.extend(extra_options.bazel_feature_flags())
return (bazel_command, 0)
def _RunBazelAndPatchOutput(self, command):
"""Runs subprocess command, patching output as it's received."""
self._PrintVerbose('Running "%s", patching output for workspace root at '
'"%s" with project path at "%s".' %
(' '.join([pipes.quote(x) for x in command]),
self.workspace_root,
self.project_dir))
# Xcode translates anything that looks like ""<path>:<line>:" that is not
# followed by the word "warning" into an error. Bazel warnings and debug
# messages do not fit this scheme and must be patched here.
bazel_warning_line_regex = re.compile(
r'(?:DEBUG|WARNING): ([^:]+:\d+:(?:\d+:)?)\s+(.+)')
def PatchBazelWarningStatements(output_line):
match = bazel_warning_line_regex.match(output_line)
if match:
output_line = '%s warning: %s' % (match.group(1), match.group(2))
return output_line
patch_xcode_parsable_line = PatchBazelWarningStatements
if self.workspace_root != self.project_dir:
# Match (likely) filename:line_number: lines.
xcode_parsable_line_regex = re.compile(r'([^/][^:]+):\d+:')
def PatchOutputLine(output_line):
output_line = PatchBazelWarningStatements(output_line)
if xcode_parsable_line_regex.match(output_line):
output_line = '%s/%s' % (self.workspace_root, output_line)
return output_line
patch_xcode_parsable_line = PatchOutputLine
def HandleOutput(output):
for line in output.splitlines():
_logger.log_bazel_message(patch_xcode_parsable_line(line))
def WatcherUpdate(watcher):
"""Processes any new events in the given watcher.
Args:
watcher: a BazelBuildEventsWatcher object.
Returns:
A list of new tulsiout file names seen.
"""
new_events = watcher.check_for_new_events()
new_outputs = []
for build_event in new_events:
if build_event.stderr:
HandleOutput(build_event.stderr)
if build_event.stdout:
HandleOutput(build_event.stdout)
if build_event.files:
outputs = [x for x in build_event.files if x.endswith('.tulsiouts')]
new_outputs.extend(outputs)
return new_outputs
def ReaderThread(file_handle, out_buffer):
out_buffer.append(file_handle.read())
file_handle.close()
# Make sure the BEP JSON file exists and is empty. We do this to prevent
# any sort of race between the watcher, bazel, and the old file contents.
open(self.build_events_file_path, 'w').close()
# Capture the stderr and stdout from Bazel. We only display it if it we're
# unable to read any BEP events.
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=1)
# Register atexit function to clean up BEP file.
atexit.register(_BEPFileExitCleanup, self.build_events_file_path)
global CLEANUP_BEP_FILE_AT_EXIT
CLEANUP_BEP_FILE_AT_EXIT = True
# Start capturing output from Bazel.
reader_buffer = []
reader_thread = threading.Thread(target=ReaderThread,
args=(process.stdout, reader_buffer))
reader_thread.daemon = True
reader_thread.start()
with io.open(self.build_events_file_path, 'r', -1, 'utf-8', 'ignore'
) as bep_file:
watcher = bazel_build_events.BazelBuildEventsWatcher(bep_file,
_PrintXcodeWarning)
output_locations = []
while process.returncode is None:
output_locations.extend(WatcherUpdate(watcher))
time.sleep(0.1)
process.poll()
output_locations.extend(WatcherUpdate(watcher))
# If BEP JSON parsing failed, we should display the raw stdout and
# stderr from Bazel.
reader_thread.join()
if not watcher.has_read_events():
HandleOutput(reader_buffer[0])
if process.returncode == 0 and not output_locations:
CLEANUP_BEP_FILE_AT_EXIT = False
_PrintXcodeError('Unable to find location of the .tulsiouts file.'
'Please report this as a Tulsi bug, including the'
'contents of %s.' % self.build_events_file_path)
return 1, output_locations
return process.returncode, output_locations
def _ExtractAspectOutputsData(self, output_files):
"""Converts aspect output from paths to json to a list of dictionaries.
Args:
output_files: A list of strings to files representing Bazel aspect output
in UTF-8 JSON format.
Returns:
return_code, [dict]: A tuple with a return code as its first argument and
for its second argument, a list of dictionaries for
each output_file that could be interpreted as valid
JSON, representing the returned Bazel aspect
information.
return_code, None: If an error occurred while converting the list of
files into JSON.
"""
outputs_data = []
for output_file in output_files:
try:
output_data = json.load(open(output_file))
except (ValueError, IOError) as e:
_PrintXcodeError('Failed to load output map ""%s". '
'%s' % (output_file, e))
return 600, None
outputs_data.append(output_data)
return 0, outputs_data
def _InstallArtifact(self, outputs_data):
"""Installs Bazel-generated artifacts into the Xcode output directory."""
xcode_artifact_path = self.artifact_output_path
if not outputs_data:
_PrintXcodeError('Failed to load top level output file.')
return 600
primary_output_data = outputs_data[0]
if 'artifact' not in primary_output_data:
_PrintXcodeError(
'Failed to find an output artifact for target %s in output map %r' %
(xcode_artifact_path, primary_output_data))
return 601
primary_artifact = primary_output_data['artifact']
artifact_archive_root = primary_output_data.get('archive_root')
bundle_name = primary_output_data.get('bundle_name')
# The PRODUCT_NAME used by the Xcode project is not trustable as it may be
# modified by the user and, more importantly, may have been modified by
# Tulsi to disambiguate multiple targets with the same name.
self.bazel_product_name = bundle_name
# We need to handle IPAs (from {ios, tvos}_application) differently from
# ZIPs (from the other bundled rules) because they output slightly different
# directory structures.
is_ipa = primary_artifact.endswith('.ipa')
is_zip = primary_artifact.endswith('.zip')
if is_ipa or is_zip:
expected_bundle_name = bundle_name + self.wrapper_suffix
# The directory structure within the IPA is then determined based on
# Bazel's package and/or product type.
if is_ipa:
bundle_subpath = os.path.join('Payload', expected_bundle_name)
else:
# If the artifact is a ZIP, assume that the bundle is the top-level
# directory (this is the way in which Skylark rules package artifacts
# that are not standalone IPAs).
bundle_subpath = expected_bundle_name
# Prefer to copy over files from the archive root instead of unzipping the
# ipa/zip in order to help preserve timestamps. Note that the archive root
# is only present for local builds; for remote builds we must extract from
# the zip file.
if self._IsValidArtifactArchiveRoot(artifact_archive_root, bundle_name):
source_location = os.path.join(artifact_archive_root, bundle_subpath)
exit_code = self._RsyncBundle(os.path.basename(primary_artifact),
source_location,
xcode_artifact_path)
else:
exit_code = self._UnpackTarget(primary_artifact,
xcode_artifact_path,
bundle_subpath)
if exit_code:
return exit_code
elif os.path.isfile(primary_artifact):
# Remove the old artifact before copying.
if os.path.isfile(xcode_artifact_path):
try:
os.remove(xcode_artifact_path)
except OSError as e:
_PrintXcodeError('Failed to remove stale output file ""%s". '
'%s' % (xcode_artifact_path, e))
return 600
exit_code = self._CopyFile(os.path.basename(primary_artifact),
primary_artifact,
xcode_artifact_path)
if exit_code:
return exit_code
else:
self._RsyncBundle(os.path.basename(primary_artifact),
primary_artifact,
xcode_artifact_path)
# When the rules output a tree artifact, Tulsi will copy the bundle as is
# into the expected Xcode output location. But because they're copied as
# is from the bazel output, they come with bazel's permissions, which are
# read only. Here we set them to write as well, so Xcode can modify the
# bundle too (for example, for codesigning).
chmod_timer = Timer('Modifying permissions of output bundle',
'bundle_chmod').Start()
self._PrintVerbose('Spawning subprocess to add write permissions to '
'copied bundle...')
process = subprocess.Popen(['chmod', '-R', 'uga+w', xcode_artifact_path])
process.wait()
chmod_timer.End()
# No return code check as this is not an essential operation.
self._InstallEmbeddedBundlesIfNecessary(primary_output_data)
return 0
def _IsValidArtifactArchiveRoot(self, archive_root, bundle_name):
"""Returns true if the archive root is valid for use."""
if not archive_root or not os.path.isdir(archive_root):
return False
# The archive root will not be updated for any remote builds, but will be
# valid for local builds. We detect this by using an implementation detail
# of the rules_apple bundler: archives will always be transformed from
# <name>.unprocessed.zip (locally or remotely) to <name>.archive-root.
#
# Thus if the mod time on the archive root is not greater than the mod
# time on the on the zip, the archive root is not valid. Remote builds
# will end up copying the <name>.unprocessed.zip but not the
# <name>.archive-root, making this a valid temporary solution.
#
# In the future, it would be better to have this handled by the rules;
# until then this should suffice as a work around to improve build times.
unprocessed_zip = os.path.join(os.path.dirname(archive_root),
'%s.unprocessed.zip' % bundle_name)
if not os.path.isfile(unprocessed_zip):
return False
return os.path.getmtime(archive_root) > os.path.getmtime(unprocessed_zip)
def _InstallEmbeddedBundlesIfNecessary(self, output_data):
"""Install embedded bundles next to the current target's output."""
# In order to find and load symbols for the binary installed on device,
# Instruments needs to "see" it in Spotlight index somewhere on the local
# filesystem. This is only needed for on-device instrumentation.
#
# Unfortunatelly, it does not seem to be possible to detect when a build is
# being made for profiling, thus we can't exclude this step for on-device
# non-profiling builds.
if self.is_simulator or ('embedded_bundles' not in output_data):
return
timer = Timer('Installing embedded bundles',
'installing_embedded_bundles').Start()
for bundle_info in output_data['embedded_bundles']:
bundle_name = bundle_info['bundle_name']
bundle_extension = bundle_info['bundle_extension']
full_name = bundle_name + bundle_extension
output_path = os.path.join(self.built_products_dir, full_name)
# TODO(b/68936732): See if copying just the binary (not the whole bundle)
# is enough to make Instruments work.
if self._IsValidArtifactArchiveRoot(bundle_info['archive_root'],
bundle_name):
source_path = os.path.join(bundle_info['archive_root'], full_name)
self._RsyncBundle(full_name, source_path, output_path)
else:
# Try to find the embedded bundle within the installed main bundle.
bundle_path = self._FindEmbeddedBundleInMain(bundle_name,
bundle_extension)
if bundle_path:
self._RsyncBundle(full_name, bundle_path, output_path)
else:
_PrintXcodeWarning('Could not find bundle %s in main bundle. ' %
(bundle_name + bundle_extension) +
'Device-level Instruments debugging will be '
'disabled for this bundle. Please report a '
'Tulsi bug and attach a full Xcode build log.')
timer.End()
# Maps extensions to anticipated subfolders.
_EMBEDDED_BUNDLE_PATHS = {
'.appex': 'PlugIns',
'.framework': 'Frameworks'
}
def _FindEmbeddedBundleInMain(self, bundle_name, bundle_extension):
"""Retrieves the first embedded bundle found within our main bundle."""
main_bundle = os.environ.get('EXECUTABLE_FOLDER_PATH')
if not main_bundle:
return None
main_bundle_path = os.path.join(self.built_products_dir,
main_bundle)
return self._FindEmbeddedBundle(bundle_name,
bundle_extension,
main_bundle_path)
def _FindEmbeddedBundle(self, bundle_name, bundle_extension, bundle_path):
"""Retrieves the first embedded bundle found within this bundle path."""
embedded_subfolder = self._EMBEDDED_BUNDLE_PATHS.get(bundle_extension)
if not embedded_subfolder:
return None
projected_bundle_path = os.path.join(bundle_path,
embedded_subfolder,
bundle_name + bundle_extension)
if os.path.isdir(projected_bundle_path):
return projected_bundle_path
# For frameworks not in the main app bundle, and possibly other executable
# bundle content in the future, we recurse through every .appex in PlugIns
# to find those frameworks.
#
# This won't support frameworks that could potentially have the same name
# but are different between the app and extensions, but we intentionally
# choose not to handle that case. Xcode build system only supports
# uniquely named frameworks, and we shouldn't confuse the dynamic loader
# with frameworks that have the same image names but different content.
appex_root_path = os.path.join(bundle_path, 'PlugIns')
if not os.path.isdir(appex_root_path):
return None
# Find each directory within appex_root_path and attempt to find a bundle.
# If one can't be found, return None.
appex_dirs = os.listdir(appex_root_path)
for appex_dir in appex_dirs:
appex_path = os.path.join(appex_root_path, appex_dir)
path = self._FindEmbeddedBundle(bundle_name,
bundle_extension,
appex_path)
if path:
return path
return None
def _InstallGeneratedHeaders(self, outputs):
"""Invokes install_genfiles.py to install generated Bazel files."""
genfiles_timer = Timer('Installing generated headers',
'installing_generated_headers').Start()
# Resolve the path to the install_genfiles.py script.
# It should be in the same directory as this script.
path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'install_genfiles.py')
args = [path, self.bazel_exec_root]
args.extend(outputs)
self._PrintVerbose('Spawning subprocess install_genfiles.py to copy '
'generated files in the background...')
process = subprocess.Popen(args)
process.wait()
genfiles_timer.End()
def _InstallBundle(self, source_path, output_path):
"""Copies the bundle at source_path to output_path."""
if not os.path.isdir(source_path):
return 0, None
if os.path.isdir(output_path):
try:
shutil.rmtree(output_path)
except OSError as e:
_PrintXcodeError('Failed to remove stale bundle ""%s". '
'%s' % (output_path, e))
return 700, None
exit_code = self._CopyBundle(os.path.basename(source_path),
source_path,
output_path)
return exit_code, output_path
def _RsyncBundle(self, source_path, full_source_path, output_path):
"""Rsyncs the given bundle to the given expected output path."""
self._PrintVerbose('Rsyncing %s to %s' % (source_path, output_path))
# rsync behavior changes based on presence of a trailing slash.
if not full_source_path.endswith('/'):
full_source_path += '/'
try:
# Use -c to check differences by checksum, -v for verbose,
# and --delete to delete stale files.
# The rest of the flags are the same as -a but without preserving
# timestamps, which is done intentionally so the timestamp will
# only change when the file is changed.
subprocess.check_output(['rsync',
'-vcrlpgoD',
'--delete',
full_source_path,
output_path],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
_PrintXcodeError('Rsync failed. %s' % e)
return 650
return 0
def _CopyBundle(self, source_path, full_source_path, output_path):
"""Copies the given bundle to the given expected output path."""
self._PrintVerbose('Copying %s to %s' % (source_path, output_path))
try:
CopyOnWrite(full_source_path, output_path, tree=True)
except OSError as e:
_PrintXcodeError('Copy failed. %s' % e)
return 650
return 0
def _CopyFile(self, source_path, full_source_path, output_path):
"""Copies the given file to the given expected output path."""
self._PrintVerbose('Copying %s to %s' % (source_path, output_path))
output_path_dir = os.path.dirname(output_path)
if not os.path.exists(output_path_dir):
try:
os.makedirs(output_path_dir)
except OSError as e:
_PrintXcodeError('Failed to create output directory "%s". '
'%s' % (output_path_dir, e))
return 650
try:
CopyOnWrite(full_source_path, output_path)
except OSError as e:
_PrintXcodeError('Copy failed. %s' % e)
return 650
return 0
def _UnpackTarget(self, bundle_path, output_path, bundle_subpath):
"""Unpacks generated bundle into the given expected output path."""
self._PrintVerbose('Unpacking %s to %s' % (bundle_path, output_path))
if not os.path.isfile(bundle_path):
_PrintXcodeError('Generated bundle not found at "%s"' % bundle_path)
return 670
if os.path.isdir(output_path):
try:
shutil.rmtree(output_path)
except OSError as e:
_PrintXcodeError('Failed to remove stale output directory ""%s". '
'%s' % (output_path, e))
return 600
# We need to handle IPAs (from {ios, tvos}_application) differently from
# ZIPs (from the other bundled rules) because they output slightly different
# directory structures.
is_ipa = bundle_path.endswith('.ipa')
with zipfile.ZipFile(bundle_path, 'r') as zf:
for item in zf.infolist():
filename = item.filename
# Support directories do not seem to be needed by the debugger and are
# skipped.
basedir = filename.split(os.sep)[0]
if basedir.endswith('Support') or basedir.endswith('Support2'):
continue
if len(filename) < len(bundle_subpath):
continue
attributes = (item.external_attr >> 16) & 0o777
self._PrintVerbose('Extracting %s (%o)' % (filename, attributes),
level=1)
if not filename.startswith(bundle_subpath):
_PrintXcodeWarning('Mismatched extraction path. Bundle content '
'at "%s" expected to have subpath of "%s"' %
(filename, bundle_subpath))
dir_components = self._SplitPathComponents(filename)
# Get the file's path, ignoring the payload components if the archive
# is an IPA.
if is_ipa:
subpath = os.path.join(*dir_components[2:])
else:
subpath = os.path.join(*dir_components[1:])
target_path = os.path.join(output_path, subpath)
# Ensure the target directory exists.
try:
target_dir = os.path.dirname(target_path)
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
except OSError as e:
_PrintXcodeError(
'Failed to create target path "%s" during extraction. %s' % (
target_path, e))
return 671
# If the archive item looks like a file, extract it.
if not filename.endswith(os.sep):
with zf.open(item) as src, file(target_path, 'wb') as dst:
shutil.copyfileobj(src, dst)
# Patch up the extracted file's attributes to match the zip content.
if attributes:
os.chmod(target_path, attributes)
return 0
def _InstallDSYMBundles(self, output_dir, outputs_data):
"""Copies any generated dSYM bundles to the given directory."""
# Indicates that our aspect reports a dSYM was generated for this build.
has_dsym = outputs_data[0]['has_dsym']
if not has_dsym:
return 0, None
# Start the timer now that we know we have dSYM bundles to install.
timer = Timer('Installing DSYM bundles', 'installing_dsym').Start()
# Declares the Xcode-generated name of our main target's dSYM.
# This environment variable is always set, for any possible Xcode output
# that could generate a dSYM bundle.
target_dsym = os.environ.get('DWARF_DSYM_FILE_NAME')
if target_dsym:
dsym_to_process = set([(self.build_path, target_dsym)])
# Collect additional dSYM bundles generated by the dependencies of this
# build such as extensions or frameworks.
child_dsyms = set()
for data in outputs_data:
for bundle_info in data.get('embedded_bundles', []):
if not bundle_info['has_dsym']:
continue
# Uses the parent of archive_root to find dSYM bundles associated with
# app/extension/df bundles. Currently hinges on implementation of the
# build rules.
dsym_path = os.path.dirname(bundle_info['archive_root'])
bundle_full_name = (bundle_info['bundle_name'] +
bundle_info['bundle_extension'])
dsym_filename = '%s.dSYM' % bundle_full_name
child_dsyms.add((dsym_path, dsym_filename))
dsym_to_process.update(child_dsyms)
dsyms_found = []
for dsym_path, dsym_filename in dsym_to_process:
input_dsym_full_path = os.path.join(dsym_path, dsym_filename)
output_full_path = os.path.join(output_dir, dsym_filename)
exit_code, path = self._InstallBundle(input_dsym_full_path,
output_full_path)
if exit_code:
_PrintXcodeWarning('Failed to install dSYM "%s" (%s)'
% (dsym_filename, exit_code))
elif path is None:
_PrintXcodeWarning('Could not find a dSYM bundle named "%s"'
% dsym_filename)
else:
dsyms_found.append(path)
timer.End()
return 0, dsyms_found
def _ResignBundle(self, bundle_path, signing_identity, entitlements=None):
"""Re-signs the bundle with the given signing identity and entitlements."""
if not self.codesigning_allowed:
return 0
timer = Timer('\tSigning ' + bundle_path, 'signing_bundle').Start()
command = [
'xcrun',
'codesign',
'-f',
'--timestamp=none',
'-s',
signing_identity,
]
if entitlements:
command.extend(['--entitlements', entitlements])
else:
command.append('--preserve-metadata=entitlements')
command.append(bundle_path)
returncode, output = self._RunSubprocess(command)
timer.End()
if returncode:
_PrintXcodeError('Re-sign command %r failed. %s' % (command, output))
return 800 + returncode
return 0
def _ResignTestArtifacts(self):
"""Resign test related artifacts that Xcode injected into the outputs."""
if not self.is_test:
return 0
# Extract the signing identity from the bundle at the expected output path
# since that's where the signed bundle from bazel was placed.
signing_identity = self._ExtractSigningIdentity(self.artifact_output_path)
if not signing_identity:
return 800
exit_code = 0
timer = Timer('Re-signing injected test host artifacts',
'resigning_test_host').Start()
if self.test_host_binary:
# For Unit tests, we need to resign the frameworks that Xcode injected
# into the test host bundle.
test_host_bundle = os.path.dirname(self.test_host_binary)
exit_code = self._ResignXcodeTestFrameworks(
test_host_bundle, signing_identity)
else:
# For UI tests, we need to resign the UI test runner app and the
# frameworks that Xcode injected into the runner app. The UI Runner app
# also needs to be signed with entitlements.
exit_code = self._ResignXcodeTestFrameworks(
self.codesigning_folder_path, signing_identity)
if exit_code == 0:
entitlements_path = self._InstantiateUIRunnerEntitlements()
if entitlements_path:
exit_code = self._ResignBundle(
self.codesigning_folder_path,
signing_identity,
entitlements_path)
else:
_PrintXcodeError('Could not instantiate UI runner entitlements.')
exit_code = 800
timer.End()
return exit_code
def _ResignXcodeTestFrameworks(self, bundle, signing_identity):
"""Re-signs the support frameworks injected by Xcode in the given bundle."""
if not self.codesigning_allowed:
return 0
for framework in XCODE_INJECTED_FRAMEWORKS:
framework_path = os.path.join(
bundle, 'Frameworks', framework)
if os.path.isdir(framework_path) or os.path.isfile(framework_path):
exit_code = self._ResignBundle(framework_path, signing_identity)
if exit_code != 0:
return exit_code
return 0
def _InstantiateUIRunnerEntitlements(self):
"""Substitute team and bundle identifiers into UI runner entitlements.
This method throws an IOError exception if the template wasn't found in
its expected location, or an OSError if the expected output folder could
not be created.
Returns:
The path to where the entitlements file was generated.
"""
if not self.codesigning_allowed:
return None
if not os.path.exists(self.derived_sources_folder_path):
os.makedirs(self.derived_sources_folder_path)
output_file = os.path.join(
self.derived_sources_folder_path,
self.bazel_product_name + '_UIRunner.entitlements')
if os.path.exists(output_file):
os.remove(output_file)
with open(self.runner_entitlements_template, 'r') as template:
contents = template.read()
contents = contents.replace(
'$(TeamIdentifier)',
self._ExtractSigningTeamIdentifier(self.artifact_output_path))
contents = contents.replace(
'$(BundleIdentifier)',
self._ExtractSigningBundleIdentifier(self.artifact_output_path))
with open(output_file, 'w') as output:
output.write(contents)
return output_file
def _ExtractSigningIdentity(self, signed_bundle):
"""Returns the identity used to sign the given bundle path."""
return self._ExtractSigningAttribute(signed_bundle, 'Authority')
def _ExtractSigningTeamIdentifier(self, signed_bundle):
"""Returns the team identifier used to sign the given bundle path."""
return self._ExtractSigningAttribute(signed_bundle, 'TeamIdentifier')
def _ExtractSigningBundleIdentifier(self, signed_bundle):
"""Returns the bundle identifier used to sign the given bundle path."""
return self._ExtractSigningAttribute(signed_bundle, 'Identifier')
def _ExtractSigningAttribute(self, signed_bundle, attribute):
"""Returns the attribute used to sign the given bundle path."""
if not self.codesigning_allowed:
return '<CODE_SIGNING_ALLOWED=NO>'
cached = self.codesign_attributes.get(signed_bundle)
if cached:
return cached.Get(attribute)
timer = Timer('\tExtracting signature for ' + signed_bundle,
'extracting_signature').Start()
output = subprocess.check_output(['xcrun',
'codesign',
'-dvv',
signed_bundle],
stderr=subprocess.STDOUT)
timer.End()
bundle_attributes = CodesignBundleAttributes(output)
self.codesign_attributes[signed_bundle] = bundle_attributes
return bundle_attributes.Get(attribute)
def _UpdateLLDBInit(self, clear_source_map=False):
"""Updates ~/.lldbinit-tulsiproj to enable debugging of Bazel binaries."""
# Make sure a reference to ~/.lldbinit-tulsiproj exists in ~/.lldbinit or
# ~/.lldbinit-Xcode. Priority is given to ~/.lldbinit-Xcode if it exists,
# otherwise the bootstrapping will be written to ~/.lldbinit.
BootstrapLLDBInit()
with open(TULSI_LLDBINIT_FILE, 'w') as out:
out.write('# This file is autogenerated by Tulsi and should not be '
'edited.\n')
if clear_source_map:
out.write('settings clear target.source-map\n')
return 0
if self.normalized_prefix_map:
source_map = ('./', self._NormalizePath(self.workspace_root))
out.write('# This maps the normalized root to that used by '
'%r.\n' % os.path.basename(self.project_file_path))
else:
# NOTE: settings target.source-map is different from
# DBGSourcePathRemapping; the former is an LLDB target-level
# remapping API that rewrites breakpoints, the latter is an LLDB
# module-level remapping API that changes DWARF debug info in memory.
#
# If we had multiple remappings, it would not make sense for the
# two APIs to share the same mappings. They have very different
# side-effects in how they individually handle debug information.
source_map = self._ExtractTargetSourceMap()
out.write('# This maps Bazel\'s execution root to that used by '
'%r.\n' % os.path.basename(self.project_file_path))
out.write('settings set target.source-map "%s" "%s"\n' % source_map)
return 0
def _DWARFdSYMBinaries(self, dsym_bundle_path):
"""Returns an array of abs paths to DWARF binaries in the dSYM bundle.
Args:
dsym_bundle_path: absolute path to the dSYM bundle.
Returns:
str[]: a list of strings representing the absolute paths to each binary
found within the dSYM bundle.
"""
dwarf_dir = os.path.join(dsym_bundle_path,
'Contents',
'Resources',
'DWARF')
dsym_binaries = []
for f in os.listdir(dwarf_dir):
# Ignore hidden files, such as .DS_Store files.
if not f.startswith('.'):
# Append full path info.
dsym_binary = os.path.join(dwarf_dir, f)
dsym_binaries.append(dsym_binary)
return dsym_binaries
def _UUIDInfoForBinary(self, source_binary_path):
"""Returns exit code of dwarfdump along with every UUID + arch found.
Args:
source_binary_path: absolute path to the binary file.
Returns:
(Int, str[(str, str)]): a tuple containing the return code of dwarfdump
as its first element, and a list of strings
representing each UUID found for each given
binary slice found within the binary with its
given architecture, if no error has occcured.
"""
returncode, output = self._RunSubprocess([
'xcrun',
'dwarfdump',
'--uuid',
source_binary_path
])
if returncode:
_PrintXcodeWarning('dwarfdump returned %d while finding the UUID for %s'
% (returncode, source_binary_path))
return (returncode, [])
# All UUIDs for binary slices will be returned as the second from left,
# from output; "UUID: D4DE5AA2-79EE-36FE-980C-755AED318308 (x86_64)
# /Applications/Calendar.app/Contents/MacOS/Calendar"
uuids_found = []
for dwarfdump_output in output.split('\n'):
if not dwarfdump_output:
continue
found_output = re.match(r'^(?:UUID: )([^ ]+) \(([^)]+)', dwarfdump_output)
if not found_output:
continue
found_uuid = found_output.group(1)
if not found_uuid:
continue
found_arch = found_output.group(2)
if not found_arch:
continue
uuids_found.append((found_uuid, found_arch))
return (0, uuids_found)
def _CreateUUIDPlist(self, dsym_bundle_path, uuid, arch, source_maps):
"""Creates a UUID.plist in a dSYM bundle to redirect sources.
Args:
dsym_bundle_path: absolute path to the dSYM bundle.
uuid: string representing the UUID of the binary slice with paths to
remap in the dSYM bundle.
arch: the architecture of the binary slice.
source_maps: list of tuples representing all absolute paths to source
files compiled by Bazel as strings ($0) associated with the
paths to Xcode-visible sources used for the purposes of
Tulsi debugging as strings ($1).
Returns:
Bool: True if no error was found, or False, representing a failure to
write when creating the plist.
"""
# Create a UUID plist at (dsym_bundle_path)/Contents/Resources/.
remap_plist = os.path.join(dsym_bundle_path,
'Contents',
'Resources',
'%s.plist' % uuid)
# Via an XML plist, add the mappings from _ExtractTargetSourceMap().
try:
with open(remap_plist, 'w') as out:
out.write('<?xml version="1.0" encoding="UTF-8"?>\n'
'<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" '
'"http://www.apple.com/DTDs/PropertyList-1.0.dtd">\n'
'<plist version="1.0">\n'
'<dict>\n'
'<key>DBGSourcePathRemapping</key>\n'
'<dict>\n')
for source_map in source_maps:
# Add the mapping as a DBGSourcePathRemapping to the UUID plist here.
out.write('<key>%s</key>\n<string>%s</string>\n' % source_map)
# Make sure that we also set DBGVersion to 3.
out.write('</dict>\n'
'<key>DBGVersion</key>\n'
'<string>3</string>\n'
'</dict>\n'
'</plist>\n')
except OSError as e:
_PrintXcodeError('Failed to write %s, received error %s' %
(remap_plist, e))
return False
# Update the dSYM symbol cache with a reference to this dSYM bundle.
err_msg = self.update_symbol_cache.UpdateUUID(uuid,
dsym_bundle_path,
arch)
if err_msg:
_PrintXcodeWarning('Attempted to save (uuid, dsym_bundle_path, arch) '
'to DBGShellCommands\' dSYM cache, but got error '
'\"%s\".' % err_msg)
return True
def _CleanExistingDSYMs(self):
"""Clean dSYM bundles that were left over from a previous build."""
output_dir = self.built_products_dir
output_dir_list = os.listdir(output_dir)
for item in output_dir_list:
if item.endswith('.dSYM'):
shutil.rmtree(os.path.join(output_dir, item))
def _PlistdSYMPaths(self, dsym_bundle_path):
"""Adds Plists to a given dSYM bundle to redirect DWARF data."""
# Retrieve the paths that we are expected to remap.
# Always include a direct path from the execroot to Xcode-visible sources.
source_maps = [self._ExtractTargetSourceMap()]
# Remap relative paths from the workspace root.
if self.normalized_prefix_map:
# Take the normalized path and map that to Xcode-visible sources.
source_maps.append(('./', self._NormalizePath(self.workspace_root)))
# Find the binaries within the dSYM bundle. UUIDs will match that of the
# binary it was based on.
dsym_binaries = self._DWARFdSYMBinaries(dsym_bundle_path)
if not dsym_binaries:
_PrintXcodeWarning('Could not find the binaries that the dSYM %s was '
'based on to determine DWARF binary slices to patch. '
'Debugging will probably fail.' % (dsym_bundle_path))
return 404
# Find the binary slice UUIDs with dwarfdump from each binary.
for source_binary_path in dsym_binaries:
returncode, uuid_info_found = self._UUIDInfoForBinary(source_binary_path)
if returncode:
return returncode
# Create a plist per UUID, each indicating a binary slice to remap paths.
for uuid, arch in uuid_info_found:
plist_created = self._CreateUUIDPlist(dsym_bundle_path,
uuid,
arch,
source_maps)
if not plist_created:
return 405
return 0
def _NormalizePath(self, path):
"""Returns paths with a common form, normalized with a trailing slash.
Args:
path: a file system path given in the form of a string.
Returns:
str: a normalized string with a trailing slash, based on |path|.
"""
return os.path.normpath(path) + os.sep
def _ExtractTargetSourceMap(self, normalize=True):
"""Extracts the source path as a tuple associated with the WORKSPACE path.
Args:
normalize: Defines if all paths should be normalized. Preferred for APIs
like DBGSourcePathRemapping and target.source-map but won't
work for the purposes of -fdebug-prefix-map.
Returns:
None: if an error occurred.
(str, str): a single tuple representing all absolute paths to source
files compiled by Bazel as strings ($0) associated with
the paths to Xcode-visible sources used for the purposes
of Tulsi debugging as strings ($1).
"""
# All paths route to the "workspace root" for sources visible from Xcode.
sm_destpath = self.workspace_root
if normalize:
sm_destpath = self._NormalizePath(sm_destpath)
# Add a redirection for the Bazel execution root, the path where sources
# are referenced by Bazel.
sm_execroot = self.bazel_exec_root
if normalize:
sm_execroot = self._NormalizePath(sm_execroot)
return (sm_execroot, sm_destpath)
def _LinkTulsiWorkspace(self):
"""Links the Bazel Workspace to the Tulsi Workspace (`tulsi-workspace`)."""
tulsi_workspace = os.path.join(self.project_file_path,
'.tulsi',
'tulsi-workspace')
if os.path.islink(tulsi_workspace):
os.unlink(tulsi_workspace)
os.symlink(self.bazel_exec_root, tulsi_workspace)
if not os.path.exists(tulsi_workspace):
_PrintXcodeError(
'Linking Tulsi Workspace to %s failed.' % tulsi_workspace)
return -1
@staticmethod
def _SplitPathComponents(path):
"""Splits the given path into an array of all of its components."""
components = path.split(os.sep)
# Patch up the first component if path started with an os.sep
if not components[0]:
components[0] = os.sep
return components
def _RunSubprocess(self, cmd):
"""Runs the given command as a subprocess, returning (exit_code, output)."""
self._PrintVerbose('%r' % cmd, 1)
process = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output, _ = process.communicate()
return (process.returncode, output)
def _PrintVerbose(self, msg, level=0):
if self.verbose > level:
_PrintUnbuffered(msg)
def main(argv):
build_settings = bazel_build_settings.BUILD_SETTINGS
if build_settings is None:
_Fatal('Unable to resolve build settings. Please report a Tulsi bug.')
return 1
return BazelBuildBridge(build_settings).Run(argv)
if __name__ == '__main__':
# Register the interrupt handler immediately in case we receive SIGINT while
# trying to acquire the lock.
signal.signal(signal.SIGINT, _InterruptHandler)
_LockFileAcquire(_LockFileCreate())
_logger = tulsi_logging.Logger()
logger_warning = tulsi_logging.validity_check()
if logger_warning:
_PrintXcodeWarning(logger_warning)
_timer = Timer('Everything', 'complete_build').Start()
_exit_code = main(sys.argv)
_timer.End()
sys.exit(_exit_code)
|
test_server.py | import os
import threading
import json
import numpy as np
import pytest
from skimage import io
from skimage._shared._tempfile import temporary_file
from scipy import ndimage as ndi
from gala import features, serve, evaluate as ev
D = os.path.dirname(os.path.abspath(__file__))
os.chdir(os.path.join(D, 'example-data/snemi-mini'))
import os
import zmq
def id_serve(port=5555, curr_id=1):
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind('tcp://*:%s' % port)
while True:
count = socket.recv_json()['count']
ids_rep = {'begin': curr_id, 'end': curr_id + count}
curr_id += count
socket.send_json(ids_rep)
@pytest.fixture
def dummy_data():
frag0 = np.arange(1, 17, dtype=int).reshape((4, 4))
gt0 = np.array([[1, 1, 2, 2], [1, 1, 2, 2], [3] * 4, [3] * 4], dtype=int)
frag, gt = (ndi.zoom(image, 4, order=0, mode='reflect')
for image in [frag0, gt0])
fman = features.base.Mock(frag, gt)
return frag, gt, fman
@pytest.fixture
def dummy_data2(dummy_data):
frag, gt, _ = dummy_data
frag[7, 7:9] = 17
frag[7:10, -1] = 18
fman = features.base.Mock(frag, gt)
return frag, gt, fman
def test_server(dummy_data):
frag, gt, fman = dummy_data
host, port = 'tcp://localhost', 5588
solver = serve.Solver(frag, feature_manager=fman,
address='tcp://*:' + str(port))
thread = threading.Thread(target=solver.listen, name='solver')
thread.start()
_, dst = serve.proofread(frag, gt, host=host, port=port, num_operations=2,
stop_when_finished=True, random_state=0)
result = np.array(dst)[frag]
# test: resulting segmentation should be improvement over fragments alone
assert (ev.vi(result, gt, ignore_x=[], ignore_y=[]) <
ev.vi(frag, gt, ignore_x=[], ignore_y=[]))
thread.join()
def test_server_imperfect_fragments(dummy_data2):
frag, gt, fman = dummy_data2
host, port = 'tcp://localhost', 5589
solver = serve.Solver(frag, feature_manager=fman,
address='tcp://*:' + str(port))
thread = threading.Thread(target=solver.listen, name='solver')
thread.start()
_, dst = serve.proofread(frag, gt, host=host, port=port, num_operations=2,
stop_when_finished=True, random_state=0)
result = np.array(dst)[frag]
# test: resulting segmentation should be improvement over fragments alone
assert (ev.vi(result, gt, ignore_x=[], ignore_y=[]) <
ev.vi(frag, gt, ignore_x=[], ignore_y=[]))
thread.join()
def test_server_with_id_service(dummy_data):
frag, gt, fman = dummy_data
id_service_port = 5600
config = {'client_url': 'tcp://*:5590',
'id_service_url': 'tcp://localhost:%i' % id_service_port,
'solver_url': 'tcp://localhost:5590'}
with temporary_file('.json') as config_filename:
with open(config_filename, 'w') as fout:
json.dump(config, fout)
solver = serve.Solver(frag, feature_manager=fman,
config_file=config_filename)
starting_id = 23461
id_thread = threading.Thread(target=id_serve, name='id-service',
daemon=True,
kwargs=dict(port=id_service_port,
curr_id=starting_id))
id_thread.start()
thread = threading.Thread(target=solver.listen, name='solver')
thread.start()
host, port = config['solver_url'].rsplit(':', maxsplit=1)
_, dst = serve.proofread(frag, gt, host=host, port=int(port),
num_operations=2, stop_when_finished=True,
random_state=0)
result = np.array(dst)[frag]
# test: resulting segmentation should be improvement over fragments alone
assert (ev.vi(result, gt, ignore_x=[], ignore_y=[]) <
ev.vi(frag, gt, ignore_x=[], ignore_y=[]))
# test 2: make sure ID service worked: starting ID should be as above
# should be equal but boundary ID messes things up
assert np.min(result) > starting_id
thread.join()
def test_server_with_periodic_send(dummy_data):
frag, gt, fman = dummy_data
id_service_port = 5601
config = {'client_url': 'tcp://*:5591',
'id_service_url': 'tcp://localhost:%i' % id_service_port,
'solver_url': 'tcp://localhost:5591'}
with temporary_file('.json') as config_filename:
with open(config_filename, 'w') as fout:
json.dump(config, fout)
solver = serve.Solver(frag, feature_manager=fman,
config_file=config_filename)
starting_id = 23461
id_thread = threading.Thread(target=id_serve, name='id-service',
daemon=True,
kwargs=dict(port=id_service_port,
curr_id=starting_id))
id_thread.start()
thread = threading.Thread(target=solver.listen, name='solver', daemon=True,
kwargs=dict(send_every=10))
thread.start()
host, port = config['solver_url'].rsplit(':', maxsplit=1)
_, dst = serve.proofread(frag, gt, host=host, port=int(port),
num_operations=2, stop_when_finished=True,
request_seg=False, random_state=0)
result = np.array(dst)[frag]
# test: resulting segmentation should be improvement over fragments alone
assert (ev.vi(result, gt, ignore_x=[], ignore_y=[]) <
ev.vi(frag, gt, ignore_x=[], ignore_y=[]))
# test 2: make sure ID service worked: starting ID should be as above
# should be equal but boundary ID messes things up
assert np.min(result) > starting_id
@pytest.fixture
def data():
frag, gt, pr = map(io.imread, sorted(os.listdir('.')))
return frag, gt, pr
@pytest.mark.skipif('GALA_TEST_FULL' not in os.environ,
reason=("Test takes too long; "
"set GALA_TEST_FULL env variable to run this."))
def test_server_long(data):
frag, gt, pr = data
host, port = 'tcp://localhost', 5590
solver = serve.Solver(frag, pr, port=port, host='tcp://*')
thread = threading.Thread(target=solver.listen, name='solver')
thread.start()
_, dst = serve.proofread(frag, gt, host=host, port=port,
stop_when_finished=True, random_state=0)
result = np.array(dst)[frag]
# test: resulting segmentation should be improvement over fragments alone
assert (ev.vi(result, gt, ignore_x=[], ignore_y=[]) <
ev.vi(frag, gt, ignore_x=[], ignore_y=[]))
thread.join()
|
test_dag_serialization.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for stringified DAGs."""
import importlib
import importlib.util
import multiprocessing
import os
import unittest
from datetime import datetime, timedelta, timezone
from glob import glob
from unittest import mock
import pytest
from dateutil.relativedelta import FR, relativedelta
from kubernetes.client import models as k8s
from parameterized import parameterized
from airflow.hooks.base import BaseHook
from airflow.kubernetes.pod_generator import PodGenerator
from airflow.models import DAG, Connection, DagBag, TaskInstance
from airflow.models.baseoperator import BaseOperator, BaseOperatorLink
from airflow.operators.bash import BashOperator
from airflow.security import permissions
from airflow.serialization.json_schema import load_dag_schema_dict
from airflow.serialization.serialized_objects import SerializedBaseOperator, SerializedDAG
from tests.test_utils.mock_operators import CustomOperator, CustomOpLink, GoogleLink
executor_config_pod = k8s.V1Pod(
metadata=k8s.V1ObjectMeta(name="my-name"),
spec=k8s.V1PodSpec(
containers=[
k8s.V1Container(name="base", volume_mounts=[k8s.V1VolumeMount(name="my-vol", mount_path="/vol/")])
]
),
)
serialized_simple_dag_ground_truth = {
"__version": 1,
"dag": {
"default_args": {
"__type": "dict",
"__var": {
"depends_on_past": False,
"retries": 1,
"retry_delay": {"__type": "timedelta", "__var": 300.0},
"max_retry_delay": {"__type": "timedelta", "__var": 600.0},
"sla": {"__type": "timedelta", "__var": 100.0},
},
},
"start_date": 1564617600.0,
'_task_group': {
'_group_id': None,
'prefix_group_id': True,
'children': {'bash_task': ('operator', 'bash_task'), 'custom_task': ('operator', 'custom_task')},
'tooltip': '',
'ui_color': 'CornflowerBlue',
'ui_fgcolor': '#000',
'upstream_group_ids': [],
'downstream_group_ids': [],
'upstream_task_ids': [],
'downstream_task_ids': [],
},
"is_paused_upon_creation": False,
"_dag_id": "simple_dag",
"doc_md": "### DAG Tutorial Documentation",
"fileloc": None,
"tasks": [
{
"task_id": "bash_task",
"owner": "airflow",
"retries": 1,
"retry_delay": 300.0,
"max_retry_delay": 600.0,
"sla": 100.0,
"_downstream_task_ids": [],
"_inlets": [],
"_is_dummy": False,
"_outlets": [],
"ui_color": "#f0ede4",
"ui_fgcolor": "#000",
"template_fields": ['bash_command', 'env'],
"template_fields_renderers": {'bash_command': 'bash', 'env': 'json'},
"bash_command": "echo {{ task.task_id }}",
'label': 'bash_task',
"_task_type": "BashOperator",
"_task_module": "airflow.operators.bash",
"pool": "default_pool",
"executor_config": {
'__type': 'dict',
'__var': {
"pod_override": {
'__type': 'k8s.V1Pod',
'__var': PodGenerator.serialize_pod(executor_config_pod),
}
},
},
"doc_md": "### Task Tutorial Documentation",
},
{
"task_id": "custom_task",
"retries": 1,
"retry_delay": 300.0,
"max_retry_delay": 600.0,
"sla": 100.0,
"_downstream_task_ids": [],
"_inlets": [],
"_is_dummy": False,
"_outlets": [],
"_operator_extra_links": [{"tests.test_utils.mock_operators.CustomOpLink": {}}],
"ui_color": "#fff",
"ui_fgcolor": "#000",
"template_fields": ['bash_command'],
"template_fields_renderers": {},
"_task_type": "CustomOperator",
"_task_module": "tests.test_utils.mock_operators",
"pool": "default_pool",
'label': 'custom_task',
},
],
"timezone": "UTC",
"_access_control": {
"__type": "dict",
"__var": {
"test_role": {
"__type": "set",
"__var": [permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT],
}
},
},
"edge_info": {},
"dag_dependencies": [],
},
}
ROOT_FOLDER = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir)
)
def make_example_dags(module_path):
"""Loads DAGs from a module for test."""
dagbag = DagBag(module_path)
return dagbag.dags
def make_simple_dag():
"""Make very simple DAG to verify serialization result."""
with DAG(
dag_id='simple_dag',
default_args={
"retries": 1,
"retry_delay": timedelta(minutes=5),
"max_retry_delay": timedelta(minutes=10),
"depends_on_past": False,
"sla": timedelta(seconds=100),
},
start_date=datetime(2019, 8, 1),
is_paused_upon_creation=False,
access_control={"test_role": {permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT}},
doc_md="### DAG Tutorial Documentation",
) as dag:
CustomOperator(task_id='custom_task')
BashOperator(
task_id='bash_task',
bash_command='echo {{ task.task_id }}',
owner='airflow',
executor_config={"pod_override": executor_config_pod},
doc_md="### Task Tutorial Documentation",
)
return {'simple_dag': dag}
def make_user_defined_macro_filter_dag():
"""Make DAGs with user defined macros and filters using locally defined methods.
For Webserver, we do not include ``user_defined_macros`` & ``user_defined_filters``.
The examples here test:
(1) functions can be successfully displayed on UI;
(2) templates with function macros have been rendered before serialization.
"""
def compute_next_execution_date(dag, execution_date):
return dag.following_schedule(execution_date)
default_args = {'start_date': datetime(2019, 7, 10)}
dag = DAG(
'user_defined_macro_filter_dag',
default_args=default_args,
user_defined_macros={
'next_execution_date': compute_next_execution_date,
},
user_defined_filters={'hello': lambda name: f'Hello {name}'},
catchup=False,
)
BashOperator(
task_id='echo',
bash_command='echo "{{ next_execution_date(dag, execution_date) }}"',
dag=dag,
)
return {dag.dag_id: dag}
def collect_dags(dag_folder=None):
"""Collects DAGs to test."""
dags = {}
dags.update(make_simple_dag())
dags.update(make_user_defined_macro_filter_dag())
if dag_folder:
if isinstance(dag_folder, (list, tuple)):
patterns = dag_folder
else:
patterns = [dag_folder]
else:
patterns = [
"airflow/example_dags",
"airflow/providers/*/example_dags",
"airflow/providers/*/*/example_dags",
]
for pattern in patterns:
for directory in glob(f"{ROOT_FOLDER}/{pattern}"):
dags.update(make_example_dags(directory))
# Filter subdags as they are stored in same row in Serialized Dag table
dags = {dag_id: dag for dag_id, dag in dags.items() if not dag.is_subdag}
return dags
def serialize_subprocess(queue, dag_folder):
"""Validate pickle in a subprocess."""
dags = collect_dags(dag_folder)
for dag in dags.values():
queue.put(SerializedDAG.to_json(dag))
queue.put(None)
class TestStringifiedDAGs(unittest.TestCase):
"""Unit tests for stringified DAGs."""
def setUp(self):
super().setUp()
BaseHook.get_connection = mock.Mock(
return_value=Connection(
extra=(
'{'
'"project_id": "mock", '
'"location": "mock", '
'"instance": "mock", '
'"database_type": "postgres", '
'"use_proxy": "False", '
'"use_ssl": "False"'
'}'
)
)
)
self.maxDiff = None
def test_serialization(self):
"""Serialization and deserialization should work for every DAG and Operator."""
dags = collect_dags()
serialized_dags = {}
for _, v in dags.items():
dag = SerializedDAG.to_dict(v)
SerializedDAG.validate_schema(dag)
serialized_dags[v.dag_id] = dag
# Compares with the ground truth of JSON string.
self.validate_serialized_dag(serialized_dags['simple_dag'], serialized_simple_dag_ground_truth)
def validate_serialized_dag(self, json_dag, ground_truth_dag):
"""Verify serialized DAGs match the ground truth."""
assert json_dag['dag']['fileloc'].split('/')[-1] == 'test_dag_serialization.py'
json_dag['dag']['fileloc'] = None
def sorted_serialized_dag(dag_dict: dict):
"""
Sorts the "tasks" list and "access_control" permissions in the
serialised dag python dictionary. This is needed as the order of
items should not matter but assertEqual would fail if the order of
items changes in the dag dictionary
"""
dag_dict["dag"]["tasks"] = sorted(dag_dict["dag"]["tasks"], key=lambda x: sorted(x.keys()))
dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"] = sorted(
dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"]
)
return dag_dict
assert sorted_serialized_dag(ground_truth_dag) == sorted_serialized_dag(json_dag)
def test_deserialization_across_process(self):
"""A serialized DAG can be deserialized in another process."""
# Since we need to parse the dags twice here (once in the subprocess,
# and once here to get a DAG to compare to) we don't want to load all
# dags.
queue = multiprocessing.Queue()
proc = multiprocessing.Process(target=serialize_subprocess, args=(queue, "airflow/example_dags"))
proc.daemon = True
proc.start()
stringified_dags = {}
while True:
v = queue.get()
if v is None:
break
dag = SerializedDAG.from_json(v)
assert isinstance(dag, DAG)
stringified_dags[dag.dag_id] = dag
dags = collect_dags("airflow/example_dags")
assert set(stringified_dags.keys()) == set(dags.keys())
# Verify deserialized DAGs.
for dag_id in stringified_dags:
self.validate_deserialized_dag(stringified_dags[dag_id], dags[dag_id])
def test_roundtrip_provider_example_dags(self):
dags = collect_dags(
[
"airflow/providers/*/example_dags",
"airflow/providers/*/*/example_dags",
]
)
# Verify deserialized DAGs.
for dag in dags.values():
serialized_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(serialized_dag, dag)
def validate_deserialized_dag(self, serialized_dag, dag):
"""
Verify that all example DAGs work with DAG Serialization by
checking fields between Serialized Dags & non-Serialized Dags
"""
fields_to_check = dag.get_serialized_fields() - {
# Doesn't implement __eq__ properly. Check manually
'timezone',
# Need to check fields in it, to exclude functions
'default_args',
"_task_group",
}
for field in fields_to_check:
assert getattr(serialized_dag, field) == getattr(
dag, field
), f'{dag.dag_id}.{field} does not match'
if dag.default_args:
for k, v in dag.default_args.items():
if callable(v):
# Check we stored _something_.
assert k in serialized_dag.default_args
else:
assert (
v == serialized_dag.default_args[k]
), f'{dag.dag_id}.default_args[{k}] does not match'
assert serialized_dag.timezone.name == dag.timezone.name
for task_id in dag.task_ids:
self.validate_deserialized_task(serialized_dag.get_task(task_id), dag.get_task(task_id))
# Verify that the DAG object has 'full_filepath' attribute
# and is equal to fileloc
assert serialized_dag.full_filepath == dag.fileloc
def validate_deserialized_task(
self,
serialized_task,
task,
):
"""Verify non-airflow operators are casted to BaseOperator."""
assert isinstance(serialized_task, SerializedBaseOperator)
assert not isinstance(task, SerializedBaseOperator)
assert isinstance(task, BaseOperator)
fields_to_check = task.get_serialized_fields() - {
# Checked separately
'_task_type',
'subdag',
# Type is excluded, so don't check it
'_log',
# List vs tuple. Check separately
'template_fields',
# We store the string, real dag has the actual code
'on_failure_callback',
'on_success_callback',
'on_retry_callback',
# Checked separately
'resources',
}
assert serialized_task.task_type == task.task_type
assert set(serialized_task.template_fields) == set(task.template_fields)
assert serialized_task.upstream_task_ids == task.upstream_task_ids
assert serialized_task.downstream_task_ids == task.downstream_task_ids
for field in fields_to_check:
assert getattr(serialized_task, field) == getattr(
task, field
), f'{task.dag.dag_id}.{task.task_id}.{field} does not match'
if serialized_task.resources is None:
assert task.resources is None or task.resources == []
else:
assert serialized_task.resources == task.resources
# Check that for Deserialised task, task.subdag is None for all other Operators
# except for the SubDagOperator where task.subdag is an instance of DAG object
if task.task_type == "SubDagOperator":
assert serialized_task.subdag is not None
assert isinstance(serialized_task.subdag, DAG)
else:
assert serialized_task.subdag is None
@parameterized.expand(
[
(datetime(2019, 8, 1, tzinfo=timezone.utc), None, datetime(2019, 8, 1, tzinfo=timezone.utc)),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
datetime(2019, 8, 1, tzinfo=timezone.utc),
),
]
)
def test_deserialization_start_date(self, dag_start_date, task_start_date, expected_task_start_date):
dag = DAG(dag_id='simple_dag', start_date=dag_start_date)
BaseOperator(task_id='simple_task', dag=dag, start_date=task_start_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_start_date or dag_start_date >= task_start_date:
# If dag.start_date > task.start_date -> task.start_date=dag.start_date
# because of the logic in dag.add_task()
assert "start_date" not in serialized_dag["dag"]["tasks"][0]
else:
assert "start_date" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert simple_task.start_date == expected_task_start_date
def test_deserialization_with_dag_context(self):
with DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1, tzinfo=timezone.utc)) as dag:
BaseOperator(task_id='simple_task')
# should not raise RuntimeError: dictionary changed size during iteration
SerializedDAG.to_dict(dag)
@parameterized.expand(
[
(datetime(2019, 8, 1, tzinfo=timezone.utc), None, datetime(2019, 8, 1, tzinfo=timezone.utc)),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
datetime(2019, 8, 1, tzinfo=timezone.utc),
),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
),
]
)
def test_deserialization_end_date(self, dag_end_date, task_end_date, expected_task_end_date):
dag = DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1), end_date=dag_end_date)
BaseOperator(task_id='simple_task', dag=dag, end_date=task_end_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_end_date or dag_end_date <= task_end_date:
# If dag.end_date < task.end_date -> task.end_date=dag.end_date
# because of the logic in dag.add_task()
assert "end_date" not in serialized_dag["dag"]["tasks"][0]
else:
assert "end_date" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert simple_task.end_date == expected_task_end_date
@parameterized.expand(
[
(None, None, None),
("@weekly", "@weekly", "0 0 * * 0"),
("@once", "@once", None),
({"__type": "timedelta", "__var": 86400.0}, timedelta(days=1), timedelta(days=1)),
]
)
def test_deserialization_schedule_interval(
self, serialized_schedule_interval, expected_schedule_interval, expected_n_schedule_interval
):
serialized = {
"__version": 1,
"dag": {
"default_args": {"__type": "dict", "__var": {}},
"_dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"schedule_interval": serialized_schedule_interval,
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
assert dag.schedule_interval == expected_schedule_interval
assert dag.normalized_schedule_interval == expected_n_schedule_interval
@parameterized.expand(
[
(relativedelta(days=-1), {"__type": "relativedelta", "__var": {"days": -1}}),
(relativedelta(month=1, days=-1), {"__type": "relativedelta", "__var": {"month": 1, "days": -1}}),
# Every friday
(relativedelta(weekday=FR), {"__type": "relativedelta", "__var": {"weekday": [4]}}),
# Every second friday
(relativedelta(weekday=FR(2)), {"__type": "relativedelta", "__var": {"weekday": [4, 2]}}),
]
)
def test_roundtrip_relativedelta(self, val, expected):
serialized = SerializedDAG._serialize(val)
assert serialized == expected
round_tripped = SerializedDAG._deserialize(serialized)
assert val == round_tripped
@parameterized.expand(
[
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
]
)
def test_dag_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id='simple_dag', params=val)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if val:
assert "params" in serialized_dag["dag"]
else:
assert "params" not in serialized_dag["dag"]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
assert expected_val == deserialized_dag.params
assert expected_val == deserialized_simple_task.params
@parameterized.expand(
[
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
]
)
def test_task_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id='simple_dag')
BaseOperator(task_id='simple_task', dag=dag, params=val, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if val:
assert "params" in serialized_dag["dag"]["tasks"][0]
else:
assert "params" not in serialized_dag["dag"]["tasks"][0]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
assert expected_val == deserialized_simple_task.params
def test_extra_serialized_field_and_operator_links(self):
"""
Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
"""
test_date = datetime(2019, 8, 1)
dag = DAG(dag_id='simple_dag', start_date=test_date)
CustomOperator(task_id='simple_task', dag=dag, bash_command="true")
serialized_dag = SerializedDAG.to_dict(dag)
assert "bash_command" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert getattr(simple_task, "bash_command") == "true"
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link only contains the inbuilt Op Link
assert serialized_dag["dag"]["tasks"][0]["_operator_extra_links"] == [
{'tests.test_utils.mock_operators.CustomOpLink': {}}
]
# Test all the extra_links are set
assert set(simple_task.extra_links) == {'Google Custom', 'airflow', 'github', 'google'}
ti = TaskInstance(task=simple_task, execution_date=test_date)
ti.xcom_push('search_query', "dummy_value_1")
# Test Deserialized inbuilt link
custom_inbuilt_link = simple_task.get_extra_links(test_date, CustomOpLink.name)
assert 'http://google.com/custom_base_link?search=dummy_value_1' == custom_inbuilt_link
# Test Deserialized link registered via Airflow Plugin
google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)
assert "https://www.google.com" == google_link_from_plugin
def test_extra_operator_links_logs_error_for_non_registered_extra_links(self):
"""
Assert OperatorLinks not registered via Plugins and if it is not an inbuilt Operator Link,
it can still deserialize the DAG (does not error) but just logs an error
"""
class TaskStateLink(BaseOperatorLink):
"""OperatorLink not registered via Plugins nor a built-in OperatorLink"""
name = 'My Link'
def get_link(self, operator, dttm):
return 'https://www.google.com'
class MyOperator(BaseOperator):
"""Just a DummyOperator using above defined Extra Operator Link"""
operator_extra_links = [TaskStateLink()]
def execute(self, context):
pass
with DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1)) as dag:
MyOperator(task_id='blah')
serialized_dag = SerializedDAG.to_dict(dag)
with self.assertLogs("airflow.serialization.serialized_objects", level="ERROR") as log_output:
SerializedDAG.from_dict(serialized_dag)
received_logs = log_output.output[0]
expected_err_msg = (
"Operator Link class 'tests.serialization.test_dag_serialization.TaskStateLink' "
"not registered"
)
assert expected_err_msg in received_logs
def test_extra_serialized_field_and_multiple_operator_links(self):
"""
Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
"""
test_date = datetime(2019, 8, 1)
dag = DAG(dag_id='simple_dag', start_date=test_date)
CustomOperator(task_id='simple_task', dag=dag, bash_command=["echo", "true"])
serialized_dag = SerializedDAG.to_dict(dag)
assert "bash_command" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert getattr(simple_task, "bash_command") == ["echo", "true"]
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link only contains the inbuilt Op Link
assert serialized_dag["dag"]["tasks"][0]["_operator_extra_links"] == [
{'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 0}},
{'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 1}},
]
# Test all the extra_links are set
assert set(simple_task.extra_links) == {
'BigQuery Console #1',
'BigQuery Console #2',
'airflow',
'github',
'google',
}
ti = TaskInstance(task=simple_task, execution_date=test_date)
ti.xcom_push('search_query', ["dummy_value_1", "dummy_value_2"])
# Test Deserialized inbuilt link #1
custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #1")
assert 'https://console.cloud.google.com/bigquery?j=dummy_value_1' == custom_inbuilt_link
# Test Deserialized inbuilt link #2
custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #2")
assert 'https://console.cloud.google.com/bigquery?j=dummy_value_2' == custom_inbuilt_link
# Test Deserialized link registered via Airflow Plugin
google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)
assert "https://www.google.com" == google_link_from_plugin
class ClassWithCustomAttributes:
"""
Class for testing purpose: allows to create objects with custom attributes in one single statement.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __str__(self):
return f"{self.__class__.__name__}({str(self.__dict__)})"
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
@parameterized.expand(
[
(None, None),
([], []),
({}, {}),
("{{ task.task_id }}", "{{ task.task_id }}"),
(["{{ task.task_id }}", "{{ task.task_id }}"]),
({"foo": "{{ task.task_id }}"}, {"foo": "{{ task.task_id }}"}),
({"foo": {"bar": "{{ task.task_id }}"}}, {"foo": {"bar": "{{ task.task_id }}"}}),
(
[{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
[{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
),
(
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}},
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}},
),
(
ClassWithCustomAttributes(
att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]
),
"ClassWithCustomAttributes("
"{'att1': '{{ task.task_id }}', 'att2': '{{ task.task_id }}', 'template_fields': ['att1']})",
),
(
ClassWithCustomAttributes(
nested1=ClassWithCustomAttributes(
att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]
),
nested2=ClassWithCustomAttributes(
att3="{{ task.task_id }}", att4="{{ task.task_id }}", template_fields=["att3"]
),
template_fields=["nested1"],
),
"ClassWithCustomAttributes("
"{'nested1': ClassWithCustomAttributes({'att1': '{{ task.task_id }}', "
"'att2': '{{ task.task_id }}', 'template_fields': ['att1']}), "
"'nested2': ClassWithCustomAttributes({'att3': '{{ task.task_id }}', 'att4': "
"'{{ task.task_id }}', 'template_fields': ['att3']}), 'template_fields': ['nested1']})",
),
]
)
def test_templated_fields_exist_in_serialized_dag(self, templated_field, expected_field):
"""
Test that templated_fields exists for all Operators in Serialized DAG
Since we don't want to inflate arbitrary python objects (it poses a RCE/security risk etc.)
we want check that non-"basic" objects are turned in to strings after deserializing.
"""
dag = DAG("test_serialized_template_fields", start_date=datetime(2019, 8, 1))
with dag:
BashOperator(task_id="test", bash_command=templated_field)
serialized_dag = SerializedDAG.to_dict(dag)
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_test_task = deserialized_dag.task_dict["test"]
assert expected_field == getattr(deserialized_test_task, "bash_command")
def test_dag_serialized_fields_with_schema(self):
"""
Additional Properties are disabled on DAGs. This test verifies that all the
keys in DAG.get_serialized_fields are listed in Schema definition.
"""
dag_schema: dict = load_dag_schema_dict()["definitions"]["dag"]["properties"]
# The parameters we add manually in Serialization needs to be ignored
ignored_keys: set = {
"is_subdag",
"tasks",
"has_on_success_callback",
"has_on_failure_callback",
"dag_dependencies",
}
dag_params: set = set(dag_schema.keys()) - ignored_keys
assert set(DAG.get_serialized_fields()) == dag_params
def test_operator_subclass_changing_base_defaults(self):
assert (
BaseOperator(task_id='dummy').do_xcom_push is True
), "Precondition check! If this fails the test won't make sense"
class MyOperator(BaseOperator):
def __init__(self, do_xcom_push=False, **kwargs):
super().__init__(**kwargs)
self.do_xcom_push = do_xcom_push
op = MyOperator(task_id='dummy')
assert op.do_xcom_push is False
blob = SerializedBaseOperator.serialize_operator(op)
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert serialized_op.do_xcom_push is False
def test_no_new_fields_added_to_base_operator(self):
"""
This test verifies that there are no new fields added to BaseOperator. And reminds that
tests should be added for it.
"""
base_operator = BaseOperator(task_id="10")
fields = base_operator.__dict__
assert {
'_BaseOperator__instantiated': True,
'_dag': None,
'_downstream_task_ids': set(),
'_inlets': [],
'_log': base_operator.log,
'_outlets': [],
'_upstream_task_ids': set(),
'depends_on_past': False,
'do_xcom_push': True,
'doc': None,
'doc_json': None,
'doc_md': None,
'doc_rst': None,
'doc_yaml': None,
'email': None,
'email_on_failure': True,
'email_on_retry': True,
'end_date': None,
'execution_timeout': None,
'executor_config': {},
'inlets': [],
'label': '10',
'max_retry_delay': None,
'on_execute_callback': None,
'on_failure_callback': None,
'on_retry_callback': None,
'on_success_callback': None,
'outlets': [],
'owner': 'airflow',
'params': {},
'pool': 'default_pool',
'pool_slots': 1,
'priority_weight': 1,
'queue': 'default',
'resources': None,
'retries': 0,
'retry_delay': timedelta(0, 300),
'retry_exponential_backoff': False,
'run_as_user': None,
'sla': None,
'start_date': None,
'subdag': None,
'task_concurrency': None,
'task_id': '10',
'trigger_rule': 'all_success',
'wait_for_downstream': False,
'weight_rule': 'downstream',
} == fields, """
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
ACTION NEEDED! PLEASE READ THIS CAREFULLY AND CORRECT TESTS CAREFULLY
Some fields were added to the BaseOperator! Please add them to the list above and make sure that
you add support for DAG serialization - you should add the field to
`airflow/serialization/schema.json` - they should have correct type defined there.
Note that we do not support versioning yet so you should only add optional fields to BaseOperator.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
"""
def test_task_group_serialization(self):
"""
Test TaskGroup serialization/deserialization.
"""
from airflow.operators.dummy import DummyOperator
from airflow.utils.task_group import TaskGroup
execution_date = datetime(2020, 1, 1)
with DAG("test_task_group_serialization", start_date=execution_date) as dag:
task1 = DummyOperator(task_id="task1")
with TaskGroup("group234") as group234:
_ = DummyOperator(task_id="task2")
with TaskGroup("group34") as group34:
_ = DummyOperator(task_id="task3")
_ = DummyOperator(task_id="task4")
task5 = DummyOperator(task_id="task5")
task1 >> group234
group34 >> task5
dag_dict = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(dag_dict)
json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(json_dag, dag)
serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag))
assert serialized_dag.task_group.children
assert serialized_dag.task_group.children.keys() == dag.task_group.children.keys()
def check_task_group(node):
try:
children = node.children.values()
except AttributeError:
# Round-trip serialization and check the result
expected_serialized = SerializedBaseOperator.serialize_operator(dag.get_task(node.task_id))
expected_deserialized = SerializedBaseOperator.deserialize_operator(expected_serialized)
expected_dict = SerializedBaseOperator.serialize_operator(expected_deserialized)
assert node
assert SerializedBaseOperator.serialize_operator(node) == expected_dict
return
for child in children:
check_task_group(child)
check_task_group(serialized_dag.task_group)
def test_edge_info_serialization(self):
"""
Tests edge_info serialization/deserialization.
"""
from airflow.operators.dummy import DummyOperator
from airflow.utils.edgemodifier import Label
with DAG("test_edge_info_serialization", start_date=datetime(2020, 1, 1)) as dag:
task1 = DummyOperator(task_id="task1")
task2 = DummyOperator(task_id="task2")
task1 >> Label("test label") >> task2
dag_dict = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(dag_dict)
json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(json_dag, dag)
serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag))
assert serialized_dag.edge_info == dag.edge_info
@parameterized.expand(
[
("poke", False),
("reschedule", True),
]
)
def test_serialize_sensor(self, mode, expect_custom_deps):
from airflow.sensors.base import BaseSensorOperator
class DummySensor(BaseSensorOperator):
def poke(self, context):
return False
op = DummySensor(task_id='dummy', mode=mode, poke_interval=23)
blob = SerializedBaseOperator.serialize_operator(op)
if expect_custom_deps:
assert "deps" in blob
else:
assert "deps" not in blob
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert op.deps == serialized_op.deps
@parameterized.expand(
[
({"on_success_callback": lambda x: print("hi")}, True),
({}, False),
]
)
def test_dag_on_success_callback_roundtrip(self, passed_success_callback, expected_value):
"""
Test that when on_success_callback is passed to the DAG, has_on_success_callback is stored
in Serialized JSON blob. And when it is de-serialized dag.has_on_success_callback is set to True.
When the callback is not set, has_on_success_callback should not be stored in Serialized blob
and so default to False on de-serialization
"""
dag = DAG(dag_id='test_dag_on_success_callback_roundtrip', **passed_success_callback)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if expected_value:
assert "has_on_success_callback" in serialized_dag["dag"]
else:
assert "has_on_success_callback" not in serialized_dag["dag"]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
assert deserialized_dag.has_on_success_callback is expected_value
@parameterized.expand(
[
({"on_failure_callback": lambda x: print("hi")}, True),
({}, False),
]
)
def test_dag_on_failure_callback_roundtrip(self, passed_failure_callback, expected_value):
"""
Test that when on_failure_callback is passed to the DAG, has_on_failure_callback is stored
in Serialized JSON blob. And when it is de-serialized dag.has_on_failure_callback is set to True.
When the callback is not set, has_on_failure_callback should not be stored in Serialized blob
and so default to False on de-serialization
"""
dag = DAG(dag_id='test_dag_on_failure_callback_roundtrip', **passed_failure_callback)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if expected_value:
assert "has_on_failure_callback" in serialized_dag["dag"]
else:
assert "has_on_failure_callback" not in serialized_dag["dag"]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
assert deserialized_dag.has_on_failure_callback is expected_value
@parameterized.expand(
[
(
['task_1', 'task_5', 'task_2', 'task_4'],
['task_1', 'task_5', 'task_2', 'task_4'],
),
(
{'task_1', 'task_5', 'task_2', 'task_4'},
['task_1', 'task_2', 'task_4', 'task_5'],
),
(
('task_1', 'task_5', 'task_2', 'task_4'),
['task_1', 'task_5', 'task_2', 'task_4'],
),
(
{
"staging_schema": [
{"key:": "foo", "value": "bar"},
{"key:": "this", "value": "that"},
"test_conf",
]
},
{
"staging_schema": [
{"__type": "dict", "__var": {"key:": "foo", "value": "bar"}},
{
"__type": "dict",
"__var": {"key:": "this", "value": "that"},
},
"test_conf",
]
},
),
(
{"task3": "test3", "task2": "test2", "task1": "test1"},
{"task1": "test1", "task2": "test2", "task3": "test3"},
),
(
('task_1', 'task_5', 'task_2', 3, ["x", "y"]),
['task_1', 'task_5', 'task_2', 3, ["x", "y"]],
),
]
)
def test_serialized_objects_are_sorted(self, object_to_serialized, expected_output):
"""Test Serialized Sets are sorted while list and tuple preserve order"""
serialized_obj = SerializedDAG._serialize(object_to_serialized)
if isinstance(serialized_obj, dict) and "__type" in serialized_obj:
serialized_obj = serialized_obj["__var"]
assert serialized_obj == expected_output
def test_kubernetes_optional():
"""Serialisation / deserialisation continues to work without kubernetes installed"""
def mock__import__(name, globals_=None, locals_=None, fromlist=(), level=0):
if level == 0 and name.partition('.')[0] == 'kubernetes':
raise ImportError("No module named 'kubernetes'")
return importlib.__import__(name, globals=globals_, locals=locals_, fromlist=fromlist, level=level)
with mock.patch('builtins.__import__', side_effect=mock__import__) as import_mock:
# load module from scratch, this does not replace any already imported
# airflow.serialization.serialized_objects module in sys.modules
spec = importlib.util.find_spec("airflow.serialization.serialized_objects")
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
# if we got this far, the module did not try to load kubernetes, but
# did it try to access airflow.kubernetes.*?
imported_airflow = {
c.args[0].split('.', 2)[1] for c in import_mock.call_args_list if c.args[0].startswith("airflow.")
}
assert "kubernetes" not in imported_airflow
# pod loading is not supported when kubernetes is not available
pod_override = {
'__type': 'k8s.V1Pod',
'__var': PodGenerator.serialize_pod(executor_config_pod),
}
with pytest.raises(RuntimeError):
module.BaseSerialization.from_dict(pod_override)
# basic serialization should succeed
module.SerializedDAG.to_dict(make_simple_dag()["simple_dag"])
|
slave.py | from platform import node, version, system, processor, machine
from shutil import disk_usage
from time import sleep
from argparse import ArgumentParser
from datetime import datetime
from socket import socket, gethostbyname, gethostname
from threading import Thread
from re import findall
from uuid import getnode
from psutil import cpu_count, virtual_memory
from os import remove
from requests import get
from requests.exceptions import ConnectionError
class Slave:
def __init__(self, address: str, port: int):
self.address = address
self.port = port
self.socket = socket()
self.stop = False
self.recording = False
self.connected = False
self.file_name = "slave.log"
self.encoding = "UTF-8"
def start(self):
while not self.stop: # While exit flag is False
if self.connected:
print(">> Waiting order from master...")
msg = ""
try:
msg = self.socket.recv(1024).decode(self.encoding).split(" ") # Split received string with " " sep
len_msg = len(msg) # Store length of msg receive
if len_msg == 1:
if msg[0] == "start_log": # If msg is only "start_log"
record = Thread(target=self.__start_log) # Create record Thread
record.start() # Start record Thread
elif msg[0] == "stop_log": # If msg is only "stop_log"
self.__stop_log() # Stop the record
elif msg[0] == "exit": # If msg is only "exit"
self.__exit()
else:
self.connected = False
elif len_msg == 2:
if msg[0] == "get_log": # If msg is only "get_log" + arg (arg must be int!!!)
self.__get_log(int(msg[1]))
elif len_msg == 4:
if msg[0] == "ddos": # If msg is only "ddos" + args (args must be "ip date time"!!!)
ip = msg[1] # Example "ddos 192.168.2.159 2019-11-10 19:52:28"
date = msg[2].split("-") # Split msg[2] with "-" separator into date[0->2]
hour = msg[3].split(":") # Split msg[3] with ":" separator into hour[0->2]
date_time = datetime(int(date[0]), int(date[1]), int(date[2]), int(hour[0]), int(hour[1]),
int(hour[2]))
ddos = Thread(target=self.__ddos,
args=(ip, date_time,)) # Create ddos Thread on (ip, datetime)
ddos.start() # Start ddos Thread
except ConnectionResetError:
print("/!\\ Disconnected...")
self.socket.close()
self.connected = False
self.__connection()
else:
self.__connection()
def __connection(self):
try:
print(">> Connection ...")
self.socket.connect((self.address, self.port))
print(f">> Connected to {self.address}:{self.port}")
self.connected = True
except ConnectionRefusedError:
self.connected = False
print(f">> Unable to connect to {self.address}:{self.port}, retrying in 5 seconds...")
sleep(4)
except OSError:
print(f">> OSError, creating new socket...")
self.socket.close()
self.socket = socket()
def __start_log(self): # Start recording logs
if self.recording:
self.__send_message("Error, still recording.")
else:
print(">> Starting log record...")
self.recording = True # Set recording flag to True
self.__send_message("Log started!")
while self.recording: # While record is active
timer = 10
with open(self.file_name, 'w') as file:
file.write(self.__get_sys_info()) # Write sys info into log file
while timer > 0 and self.recording:
sleep(1)
timer -= 1
def __stop_log(self): # Stop recording logs
if self.recording:
print(">> Stopping logs record...")
self.__send_message("Log stopped!")
self.recording = False
else:
self.__send_message("Error, not recording.")
def __get_log(self, nbr_lines: int): # Send log to master (arg = line number from the bottom of log file)
if self.recording:
self.__stop_log() # Stopping log before sending
to_send, nbr_lines = self.__get_log_into_str(nbr_lines) # Get log from sys
if to_send is not None:
self.__send_message("\n" + to_send) # Send message to master
print(f">> Sending the {nbr_lines} last lines...")
else:
print("/!\\ File doesn't exist!")
self.__send_message("Log file doesn't exist.")
def __ddos(self, ip: str, date_time: datetime): # ddos an ip on datetime fixed (args = ip, datetime)
print(f">> Attack planned on {ip} at {date_time}...")
self.__send_message(f"Attack planned on {ip} at {date_time}...")
date_now = datetime.now() # Get datetime for now
time_left = int((date_time - date_now).total_seconds()) # Store time left in second
while not self.stop and time_left > 0: # While not stop
sleep(1) # Wait 1 sec
time_left -= 1 # Remove a second to timer
if not self.stop and time_left <= 0: # If not stop and time is over
try:
request = get(ip, timeout=3) # HTTP request to target url
self.__send_message(f"request_code : {request.status_code}")
except ConnectionError:
print(">> Host unreachable!")
self.__send_message("Host unreachable!")
else: # Stop event on
print(">> Attack cancelled.")
def __remove_log(self):
try: # Try to remove file
remove(self.file_name) # Try to remove file
print(">> File removed!") # Printed if file removed
except FileNotFoundError:
pass
def __send_message(self, data: str):
try:
self.socket.send(data.encode(self.encoding)) # Send message
except ConnectionResetError:
print("/!\\ Disconnected...")
self.socket.close()
self.connected = False
self.__connection()
def __get_log_into_str(self, nbr_lines: int):
try:
log_file = open(self.file_name, "r") # Open log file in write mode
lines_list = log_file.readlines() # Reading lines fro log file
log_file.close() # Close log file
ret, length = "", len(lines_list) # Initiate "ret" as empty string, "len" as number of log file's lines
if nbr_lines > length: # If asked nbr_lines is above max lines of log file then...
nbr_lines = length # Store number of log file lines in nbr_lines
for line in range(length - nbr_lines, length): # For each "x" last line
ret += str(lines_list[line]) # add line to "ret" string
return ret, nbr_lines
except FileNotFoundError:
return None
@classmethod
def __get_sys_info(cls):
total, used, free = disk_usage("/") # Get disk usage in bytes
mem = virtual_memory()
return f"{datetime.now()} # computer_name = {node()}\n" \
f"{datetime.now()} # system = {system()}\n" \
f"{datetime.now()} # os_version = {version()}\n" \
f"{datetime.now()} # processor = {processor()}\n" \
f"{datetime.now()} # architecture = {machine()}\n" \
f"{datetime.now()} # processor_core = {cpu_count(logical=False)}\n" \
f"{datetime.now()} # ip_address = {gethostbyname(gethostname())}\n" \
f"{datetime.now()} # mac_address = {':'.join(findall('..', '%012x' % getnode()))}\n" \
f"{datetime.now()} # main_disk_usage = {round(used / 2 ** 30, 1)}/{round(total / 2 ** 30, 1)} GB\n" \
f"{datetime.now()} # ram = {round(mem[0] / 2 ** 30)} GB"
def __exit(self): # exit program
self.stop = True # Set exit flag to true
if self.recording:
self.__stop_log() # Stop log
self.__send_message("exit")
self.socket.close() # Close socket
self.__remove_log() # Remove log file
print(">> Exiting...")
def main():
parser = ArgumentParser(add_help=False)
parser.add_argument('address', type=str, action='store', help='Address of master')
parser.add_argument('port', type=int, action='store', help='Listening port of master')
args = parser.parse_args()
slave = Slave(args.address, args.port)
slave.start()
if __name__ == '__main__':
main()
|
gui.py | import eel
import os
import battlecode_cli as cli
import threading
import sys
import json
import signal
import psutil
import player_plain
import battlecode as bc
import zipfile
import requests
import base64
import shutil
target_dir = os.path.abspath(os.path.dirname(__file__))
print('Moving into', target_dir)
os.chdir(target_dir)
options = {'host':'0.0.0.0', 'port':6147, 'mode':'default'}
if sys.platform == 'win32':
options['host'] = 'localhost'
print('Starting eel')
eel.init('web')
CLIENT_ID = 'YmF0dGxlY29kZXdlYmFwcDpKQlVZOVZFNjkyNDNCWUM5MDI0Mzg3SEdWWTNBUUZL'
game = None
def get_token(username, password):
headers = {}
headers['authorization'] = "Basic " + CLIENT_ID
data = {}
data['grant_type'] = 'password'
data['username'] = username
data['password'] = password
data['client_id'] = CLIENT_ID
req = requests.post("http://www.battlecode.org/oauth/token", headers=headers, data=data)
print(req.text)
return req
@eel.expose
def upload_scrim_server(return_args):
cwd = os.getcwd()
if 'NODOCKER' in os.environ:
os.chdir('..')
else:
os.chdir('/player')
os.chdir(return_args['file_name'])
zip_file_name = os.path.abspath(os.path.join('../',
return_args['file_name']))
shutil.make_archive(zip_file_name, 'zip', '.')
if not zip_file_name.endswith('.zip'):
zip_file_name += '.zip'
os.chdir(cwd)
username = return_args['username']
password = return_args['password']
req = get_token(username, password)
if req.status_code != 200:
print("Error authenticating.")
return "Error authenticating."
token = json.loads(req.text)['access_token']
headers = {}
headers['Authorization'] = 'Bearer ' + token
data = {}
data['label'] = return_args['player']
with open(zip_file_name, 'rb') as image_file:
encoded_string = base64.b64encode(image_file.read())
data['src'] = encoded_string
res = requests.post("https://battlecode.org/apis/submissions", headers=headers, data=data)
return "success"
@eel.expose
def save_logs(file_name):
if 'NODOCKER':
file_name = os.path.abspath(os.path.join('..', file_name))
else:
file_name = os.path.abspath(os.path.join('/player/', file_name))
output_string = ""
if game != None:
if all('logger' in player for player in game.players):
for i in range(len(game.players)):
player = game.players[i]
log_header = "\n\n\n\n\n\n======================================\n"
if i % 2 == 0:
log_header += "Red "
else:
log_header += "Blue "
if i < 2:
log_header += "Earth"
else:
log_header += "Mars"
log_header += "\n\n"
logs = log_header + player['logger'].logs.getvalue()
output_string += logs
else:
# This should never run. Game needs to be started to call this modal
return ""
try:
with open(file_name, 'w') as f:
f.write(output_string)
return ""
except Exception as e:
print("There was an error dumping the logs")
print(e)
return str(e)
def start_game(return_args):
global WINNER
WINNER = 0
return_args['map_name'] = return_args['map']
# check mountpoint for maps first
c2 = os.path.abspath(os.path.join('/player/battlecode-maps', return_args['map']))
if 'NODOCKER' not in os.environ and os.path.exists(c2):
return_args['map'] = cli.get_map(c2)
else:
c1 = os.path.abspath(os.path.join('..', 'battlecode-maps', return_args['map']))
if os.path.exists(c1):
return_args['map'] = cli.get_map(c1)
else:
if 'testmap' not in return_args['map']:
print("Can't find map {} in {}, falling back to test map..",
return_args['map'],
os.path.abspath(os.path.join('..', 'battlecode-maps'))
)
if 'NODOCKER' not in os.environ:
print('(Also looked in /player/battlecode-maps, which should be mounted to the battlecode-maps directory of your scaffold)')
return_args['map'] = bc.GameMap.test_map()
if 'NODOCKER' in os.environ:
return_args['docker'] = False
return_args['dir_p1'] = os.path.abspath(os.path.join('..', return_args['dir_p1']))
return_args['dir_p2'] = os.path.abspath(os.path.join('..', return_args['dir_p2']))
else:
return_args['docker'] = True
return_args['dir_p1'] = os.path.abspath(os.path.join('/player', return_args['dir_p1']))
return_args['dir_p2'] = os.path.abspath(os.path.join('/player', return_args['dir_p2']))
return_args['terminal_viewer'] = False
return_args['extra_delay'] = 0
global game
(game, dockers, sock_file) = cli.create_game(return_args)
winner = None
try:
print("Running game...")
winner = cli.run_game(game, dockers, return_args, sock_file)
finally:
cli.cleanup(dockers, return_args, sock_file)
lock.release()
if winner == 'player1':
eel.trigger_end_game(1)()
elif winner == ' player2':
eel.trigger_end_game(2)()
else:
eel.trigger_end_game(0)()
print("Ready to run next game.")
@eel.expose
def get_viewer_data(turn):
turn = int(turn)
if game != None and len(game.manager_viewer_messages) >= 1:
if turn >= len(game.manager_viewer_messages) or turn == -1:
turn = len(game.manager_viewer_messages) - 1
message = json.loads(game.manager_viewer_messages[turn])
message['turn'] = turn
return message
else:
return {'width':0, 'height': 0, 'earth' : [], 'mars': [], 'turn':0}
@eel.expose
def run_game(return_args):
if not lock.acquire(blocking=False):
return "Fail"
t1 = threading.Thread(target=start_game,args=(return_args,))
t1.start()
return "success"
@eel.expose
def get_maps():
if 'NODOCKER' in os.environ:
map_dir = os.path.abspath('../battlecode-maps')
else:
map_dir = '/battlecode/battlecode-maps'
maps = [o for o in os.listdir(map_dir)
if 'bc18map' in o or 'bc18t' in o]
maps.append('testmap.bc18map')
if 'NODOCKER' not in os.environ:
try:
for o in os.listdir('/player/battlecode-maps'):
if o not in maps:
maps.append(o)
except:
pass
return maps
@eel.expose
def get_player_dirs():
if 'NODOCKER' in os.environ:
player_dir = os.path.abspath('..')
else:
player_dir = '/player'
players = []
for o in os.listdir(player_dir):
if o.startswith('.') or o in ('battlecode', 'battlecode-manager'):
continue
full_path = os.path.join(player_dir, o)
if not os.path.isdir(full_path):
continue
if os.path.exists(os.path.join(full_path, 'run.sh')):
players.append(o)
return players
# if 0 not ended, if 1 red, 2 blue
@eel.expose
def get_player_logs():
if game != None:
if all('logger' in player for player in game.players):
logs = [player['logger'].logs.getvalue() for player in game.players]
return logs
else:
return ["", "", "", ""]
return ["NULL", "NULL", "NULL", "NULL"]
@eel.expose
def end_game():
global game
if game is not None:
game.winner = 'player3'
game.disconnected = True
game.game_over = True
return ""
def reap_children(timeout=3):
"Tries hard to terminate and ultimately kill all the children of this process."
def on_terminate(proc):
pass
# print("process {} terminated with exit code {}".format(proc, proc.returncode))
procs = psutil.Process().children(recursive=True)
# send SIGTERM
for p in procs:
p.terminate()
gone, alive = psutil.wait_procs(procs, timeout=timeout, callback=on_terminate)
if alive:
# send SIGKILL
for p in alive:
# print("process {} survived SIGTERM; trying SIGKILL" % p.pid)
p.kill()
gone, alive = psutil.wait_procs(alive, timeout=timeout, callback=on_terminate)
if alive:
# give up
for p in alive:
print("process {} survived SIGKILL; giving up" % p.pid)
@eel.expose
def stop_manager():
print("Shutting manager down.")
player_plain.reap(psutil.Process())
procs = psutil.Process().kill()
print("=== Ready! ===")
print("To play games open http://localhost:6147/run.html in your browser on Mac/Linux/WindowsPro, or http://192.168.99.100:6147/run.html on Windows10Home.")
lock = threading.Lock()
eel.start('run.html', options=options, block=False, suppress_error=True)
while True:
eel.sleep(1.0)
|
dask_util.py | # -*- coding: utf-8 -*-
from collections import defaultdict
from timeit import default_timer
from threading import Event, Thread, Lock
import os
import time
import sys
try:
from dask.callbacks import Callback
from dask.utils import ignoring
except ImportError as e:
opt_import_err = e
Callback = object
else:
opt_import_err = None
from africanus.util.docs import DefaultOut
from africanus.util.requirements import requires_optional
def format_time(t):
"""Format seconds into a human readable form."""
m, s = divmod(t, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
w, d = divmod(d, 7)
if w:
return "{0:2.0f}w{1:2.0f}d".format(w, d)
elif d:
return "{0:2.0f}d{1:2.0f}h".format(d, h)
elif h:
return "{0:2.0f}h{1:2.0f}m".format(h, m)
elif m:
return "{0:2.0f}m{1:2.0f}s".format(m, s)
else:
return "{0:5.0f}s".format(s)
def key_bin(key):
if type(key) is tuple:
key = key[0]
if type(key) is bytes:
key = key.decode()
try:
return str(key)
except Exception:
return "other"
class TaskData(object):
__slots__ = ("total", "completed", "time_sum")
def __init__(self, completed=0, total=0, time_sum=0.0):
self.completed = completed
self.total = total
self.time_sum = time_sum
def __iadd__(self, other):
self.completed += other.completed
self.total += other.total
self.time_sum += other.time_sum
return self
def __add__(self, other):
return TaskData(self.completed + other.completed,
self.total + other.total,
self.time_sum + other.time_sum)
def __repr__(self):
return "TaskData(%s, %s, %s)" % (self.completed,
self.total,
self.time_sum)
__str__ = __repr__
def update_bar(elapsed, prev_completed, prev_estimated, pb):
total = 0
completed = 0
estimated = 0.0
time_guess = 0.0
# update
with pb._lock:
for k, v in pb.task_data.items():
total += v.total
completed += v.completed
if v.completed > 0:
avg_time = v.time_sum / v.completed
estimated += avg_time * v.total
time_guess += v.time_sum
# If we've completed some new tasks, update our estimate
# otherwise use previous estimate. This prevents jumps
# relative to the elapsed time
if completed != prev_completed:
estimated = estimated * elapsed / time_guess
else:
estimated = prev_estimated
# For the first 10 seconds, tell the user estimates improve over time
# then display the bar
if elapsed < 10.0:
fraction = 0.0
bar = " estimate improves over time"
else:
# Print out the progress bar
fraction = elapsed / estimated if estimated > 0.0 else 0.0
bar = "#" * int(pb._width * fraction)
percent = int(100 * fraction)
msg = "\r[{0:{1}.{1}}] | {2}% Complete (Estimate) | {3} / ~{4}".format(
bar, pb._width, percent,
format_time(elapsed),
"???" if estimated == 0.0 else format_time(estimated))
with ignoring(ValueError):
pb._file.write(msg)
pb._file.flush()
return completed, estimated
def timer_func(pb):
start = default_timer()
while pb.running.is_set():
elapsed = default_timer() - start
prev_completed = 0
prev_estimated = 0.0
if elapsed > pb._minimum:
prev_completed, prev_estimated = update_bar(elapsed,
prev_completed,
prev_estimated,
pb)
time.sleep(pb._dt)
default_out = DefaultOut("sys.stdout")
class EstimatingProgressBar(Callback):
"""
Progress Bar that displays elapsed time as well as an
estimate of total time taken.
When starting a dask computation,
the bar examines the graph and determines
the number of chunks contained by a dask collection.
During computation the number of completed chunks and
their the total time taken to complete them are
tracked. The average derived from these numbers are
used to estimate total compute time, relative to
the current elapsed time.
The bar is not particularly accurate and will
underestimate near the beginning of computation
and seems to slightly overestimate during the
buk of computation. However, it may be more accurate
than the default dask task bar which tracks
number of tasks completed by total tasks.
Parameters
----------
minimum : int, optional
Minimum time threshold in seconds before displaying a progress bar.
Default is 0 (always display)
width : int, optional
Width of the bar, default is 42 characters.
dt : float, optional
Update resolution in seconds, default is 1.0 seconds.
"""
@requires_optional("dask", opt_import_err)
def __init__(self, minimum=0, width=42, dt=1.0, out=default_out):
if out is None:
out = open(os.devnull, "w")
elif out is default_out:
out = sys.stdout
self._minimum = minimum
self._width = width
self._dt = dt
self._file = out
self._lock = Lock()
def _start(self, dsk):
self.task_start = {}
self.task_data = defaultdict(TaskData)
for k, v in dsk.items():
self.task_data[key_bin(k)].total += 1
self.running = Event()
self.running.set()
self.thread = Thread(target=timer_func, args=(self,))
self.daemon = True
self.thread.start()
def _finish(self, dsk, state, errored):
self.running.clear()
self.task_data.clear()
self.task_start.clear()
def _pretask(self, key, dsk, state):
with self._lock:
self.task_start[key] = default_timer()
def _posttask(self, key, result, dsk, state, worker_id):
with self._lock:
td = self.task_data[key_bin(key)]
td.time_sum += default_timer() - self.task_start.pop(key)
td.completed += 1
|
backend.py | # -*- coding: utf-8 -*-
# This code is based on AjaxTerm/Web-Shell which included a fairly complete
# vt100 implementation as well as a stable process multiplexer.
# I made some small fixes, improved some small parts and added a Session class
# which can be used by the widget.
# License: GPL2
import sys
import os
import fcntl
import array
import threading
import time
import termios
import pty
import signal
import struct
import select
import subprocess
__version__ = "0.1"
class Terminal(object):
def __init__(self, w, h):
self.w = w
self.h = h
self.vt100_charset_graph = [
0x25ca, 0x2026, 0x2022, 0x3f,
0xb6, 0x3f, 0xb0, 0xb1,
0x3f, 0x3f, 0x2b, 0x2b,
0x2b, 0x2b, 0x2b, 0xaf,
0x2014, 0x2014, 0x2014, 0x5f,
0x2b, 0x2b, 0x2b, 0x2b,
0x7c, 0x2264, 0x2265, 0xb6,
0x2260, 0xa3, 0xb7, 0x7f
]
self.vt100_esc = {
'#8': self.esc_DECALN,
'(A': self.esc_G0_0,
'(B': self.esc_G0_1,
'(0': self.esc_G0_2,
'(1': self.esc_G0_3,
'(2': self.esc_G0_4,
')A': self.esc_G1_0,
')B': self.esc_G1_1,
')0': self.esc_G1_2,
')1': self.esc_G1_3,
')2': self.esc_G1_4,
'7': self.esc_DECSC,
'8': self.esc_DECRC,
'=': self.esc_DECKPAM,
'>': self.esc_DECKPNM,
'D': self.esc_IND,
'E': self.esc_NEL,
'H': self.esc_HTS,
'M': self.esc_RI,
'N': self.esc_SS2,
'O': self.esc_SS3,
'P': self.esc_DCS,
'X': self.esc_SOS,
'Z': self.esc_DECID,
'[': self.esc_CSI,
'\\': self.esc_ST,
']': self.esc_OSC,
'^': self.esc_PM,
'_': self.esc_APC,
'c': self.reset_hard,
}
self.vt100_csi = {
'@': self.csi_ICH,
'A': self.csi_CUU,
'B': self.csi_CUD,
'C': self.csi_CUF,
'D': self.csi_CUB,
'E': self.csi_CNL,
'F': self.csi_CPL,
'G': self.csi_CHA,
'H': self.csi_CUP,
'I': self.csi_CHT,
'J': self.csi_ED,
'K': self.csi_EL,
'L': self.csi_IL,
'M': self.csi_DL,
'P': self.csi_DCH,
'S': self.csi_SU,
'T': self.csi_SD,
'W': self.csi_CTC,
'X': self.csi_ECH,
'Z': self.csi_CBT,
'`': self.csi_HPA,
'a': self.csi_HPR,
'b': self.csi_REP,
'c': self.csi_DA,
'd': self.csi_VPA,
'e': self.csi_VPR,
'f': self.csi_HVP,
'g': self.csi_TBC,
'h': self.csi_SM,
'l': self.csi_RM,
'm': self.csi_SGR,
'n': self.csi_DSR,
'r': self.csi_DECSTBM,
's': self.csi_SCP,
'u': self.csi_RCP,
'x': self.csi_DECREQTPARM,
'!p': self.csi_DECSTR,
}
self.vt100_keyfilter_ansikeys = {
'~': '~',
'A': '\x1b[A',
'B': '\x1b[B',
'C': '\x1b[C',
'D': '\x1b[D',
'F': '\x1b[F',
'H': '\x1b[H',
'1': '\x1b[5~',
'2': '\x1b[6~',
'3': '\x1b[2~',
'4': '\x1b[3~',
'a': '\x1bOP',
'b': '\x1bOQ',
'c': '\x1bOR',
'd': '\x1bOS',
'e': '\x1b[15~',
'f': '\x1b[17~',
'g': '\x1b[18~',
'h': '\x1b[19~',
'i': '\x1b[20~',
'j': '\x1b[21~',
'k': '\x1b[23~',
'l': '\x1b[24~',
}
self.vt100_keyfilter_appkeys = {
'~': '~',
'A': '\x1bOA',
'B': '\x1bOB',
'C': '\x1bOC',
'D': '\x1bOD',
'F': '\x1bOF',
'H': '\x1bOH',
'1': '\x1b[5~',
'2': '\x1b[6~',
'3': '\x1b[2~',
'4': '\x1b[3~',
'a': '\x1bOP',
'b': '\x1bOQ',
'c': '\x1bOR',
'd': '\x1bOS',
'e': '\x1b[15~',
'f': '\x1b[17~',
'g': '\x1b[18~',
'h': '\x1b[19~',
'i': '\x1b[20~',
'j': '\x1b[21~',
'k': '\x1b[23~',
'l': '\x1b[24~',
}
self.reset_hard()
# Reset functions
def reset_hard(self):
# Attribute mask: 0x0XFB0000
# X: Bit 0 - Underlined
# Bit 1 - Negative
# Bit 2 - Concealed
# F: Foreground
# B: Background
self.attr = 0x00fe0000
# UTF-8 decoder
self.utf8_units_count = 0
self.utf8_units_received = 0
self.utf8_char = 0
# Key filter
self.vt100_keyfilter_escape = False
# Last char
self.vt100_lastchar = 0
# Control sequences
self.vt100_parse_len = 0
self.vt100_parse_state = ""
self.vt100_parse_func = ""
self.vt100_parse_param = ""
# Buffers
self.vt100_out = ""
# Invoke other resets
self.reset_screen()
self.reset_soft()
def reset_soft(self):
# Attribute mask: 0x0XFB0000
# X: Bit 0 - Underlined
# Bit 1 - Negative
# Bit 2 - Concealed
# F: Foreground
# B: Background
self.attr = 0x00fe0000
# Scroll parameters
self.scroll_area_y0 = 0
self.scroll_area_y1 = self.h
# Character sets
self.vt100_charset_is_single_shift = False
self.vt100_charset_is_graphical = False
self.vt100_charset_g_sel = 0
self.vt100_charset_g = [0, 0]
# Modes
self.vt100_mode_insert = False
self.vt100_mode_lfnewline = False
self.vt100_mode_cursorkey = False
self.vt100_mode_column_switch = False
self.vt100_mode_inverse = False
self.vt100_mode_origin = False
self.vt100_mode_autowrap = True
self.vt100_mode_cursor = True
self.vt100_mode_alt_screen = False
self.vt100_mode_backspace = False
# Init DECSC state
self.esc_DECSC()
self.vt100_saved2 = self.vt100_saved
self.esc_DECSC()
def reset_screen(self):
# Screen
self.screen = array.array('i', [self.attr | 0x20] * self.w * self.h)
self.screen2 = array.array('i', [self.attr | 0x20] * self.w * self.h)
# Scroll parameters
self.scroll_area_y0 = 0
self.scroll_area_y1 = self.h
# Cursor position
self.cx = 0
self.cy = 0
# Tab stops
self.tab_stops = range(0, self.w, 8)
# UTF-8 functions
def utf8_decode(self, d):
o = ''
for c in d:
char = ord(c)
if self.utf8_units_count != self.utf8_units_received:
self.utf8_units_received += 1
if (char & 0xc0) == 0x80:
self.utf8_char = (self.utf8_char << 6) | (char & 0x3f)
if self.utf8_units_count == self.utf8_units_received:
if self.utf8_char < 0x10000:
o += unichr(self.utf8_char)
self.utf8_units_count = self.utf8_units_received = 0
else:
o += '?'
while self.utf8_units_received:
o += '?'
self.utf8_units_received -= 1
self.utf8_units_count = 0
else:
if (char & 0x80) == 0x00:
o += c
elif (char & 0xe0) == 0xc0:
self.utf8_units_count = 1
self.utf8_char = char & 0x1f
elif (char & 0xf0) == 0xe0:
self.utf8_units_count = 2
self.utf8_char = char & 0x0f
elif (char & 0xf8) == 0xf0:
self.utf8_units_count = 3
self.utf8_char = char & 0x07
else:
o += '?'
return o
def utf8_charwidth(self, char):
if char >= 0x2e80:
return 2
else:
return 1
# Low-level terminal functions
def peek(self, y0, x0, y1, x1):
return self.screen[self.w * y0 + x0:self.w * (y1 - 1) + x1]
def poke(self, y, x, s):
pos = self.w * y + x
self.screen[pos:pos + len(s)] = s
def fill(self, y0, x0, y1, x1, char):
n = self.w * (y1 - y0 - 1) + (x1 - x0)
self.poke(y0, x0, array.array('i', [char] * n))
def clear(self, y0, x0, y1, x1):
self.fill(y0, x0, y1, x1, self.attr | 0x20)
# Scrolling functions
def scroll_area_up(self, y0, y1, n=1):
n = min(y1 - y0, n)
self.poke(y0, 0, self.peek(y0 + n, 0, y1, self.w))
self.clear(y1 - n, 0, y1, self.w)
def scroll_area_down(self, y0, y1, n=1):
n = min(y1 - y0, n)
self.poke(y0 + n, 0, self.peek(y0, 0, y1 - n, self.w))
self.clear(y0, 0, y0 + n, self.w)
def scroll_area_set(self, y0, y1):
y0 = max(0, min(self.h - 1, y0))
y1 = max(1, min(self.h, y1))
if y1 > y0:
self.scroll_area_y0 = y0
self.scroll_area_y1 = y1
def scroll_line_right(self, y, x, n=1):
if x < self.w:
n = min(self.w - self.cx, n)
self.poke(y, x + n, self.peek(y, x, y + 1, self.w - n))
self.clear(y, x, y + 1, x + n)
def scroll_line_left(self, y, x, n=1):
if x < self.w:
n = min(self.w - self.cx, n)
self.poke(y, x, self.peek(y, x + n, y + 1, self.w))
self.clear(y, self.w - n, y + 1, self.w)
# Cursor functions
def cursor_line_width(self, next_char):
wx = self.utf8_charwidth(next_char)
lx = 0
for x in range(min(self.cx, self.w)):
char = self.peek(self.cy, x, self.cy + 1, x + 1)[0] & 0xffff
wx += self.utf8_charwidth(char)
lx += 1
return wx, lx
def cursor_up(self, n=1):
self.cy = max(self.scroll_area_y0, self.cy - n)
def cursor_down(self, n=1):
self.cy = min(self.scroll_area_y1 - 1, self.cy + n)
def cursor_left(self, n=1):
self.cx = max(0, self.cx - n)
def cursor_right(self, n=1):
self.cx = min(self.w - 1, self.cx + n)
def cursor_set_x(self, x):
self.cx = max(0, x)
def cursor_set_y(self, y):
self.cy = max(0, min(self.h - 1, y))
def cursor_set(self, y, x):
self.cursor_set_x(x)
self.cursor_set_y(y)
# Dumb terminal
def ctrl_BS(self):
delta_y, cx = divmod(self.cx - 1, self.w)
cy = max(self.scroll_area_y0, self.cy + delta_y)
self.cursor_set(cy, cx)
def ctrl_HT(self, n=1):
if n > 0 and self.cx >= self.w:
return
if n <= 0 and self.cx == 0:
return
ts = 0
for i in range(len(self.tab_stops)):
if self.cx >= self.tab_stops[i]:
ts = i
ts += n
if ts < len(self.tab_stops) and ts >= 0:
self.cursor_set_x(self.tab_stops[ts])
else:
self.cursor_set_x(self.w - 1)
def ctrl_LF(self):
if self.vt100_mode_lfnewline:
self.ctrl_CR()
if self.cy == self.scroll_area_y1 - 1:
self.scroll_area_up(self.scroll_area_y0, self.scroll_area_y1)
else:
self.cursor_down()
def ctrl_CR(self):
self.cursor_set_x(0)
def dumb_write(self, char):
if char < 32:
if char == 8:
self.ctrl_BS()
elif char == 9:
self.ctrl_HT()
elif char >= 10 and char <= 12:
self.ctrl_LF()
elif char == 13:
self.ctrl_CR()
return True
return False
def dumb_echo(self, char):
# Check right bound
wx, cx = self.cursor_line_width(char)
# Newline
if wx > self.w:
if self.vt100_mode_autowrap:
self.ctrl_CR()
self.ctrl_LF()
else:
self.cx = cx - 1
if self.vt100_mode_insert:
self.scroll_line_right(self.cy, self.cx)
if self.vt100_charset_is_single_shift:
self.vt100_charset_is_single_shift = False
elif self.vt100_charset_is_graphical and (char & 0xffe0) == 0x0060:
char = self.vt100_charset_graph[char - 0x60]
self.poke(self.cy, self.cx, array.array('i', [self.attr | char]))
self.cursor_set_x(self.cx + 1)
# VT100 CTRL, ESC, CSI handlers
def vt100_charset_update(self):
self.vt100_charset_is_graphical = (
self.vt100_charset_g[self.vt100_charset_g_sel] == 2)
def vt100_charset_set(self, g):
# Invoke active character set
self.vt100_charset_g_sel = g
self.vt100_charset_update()
def vt100_charset_select(self, g, charset):
# Select charset
self.vt100_charset_g[g] = charset
self.vt100_charset_update()
def vt100_setmode(self, p, state):
# Set VT100 mode
p = self.vt100_parse_params(p, [], False)
for m in p:
if m == '4':
# Insertion replacement mode
self.vt100_mode_insert = state
elif m == '20':
# Linefeed/new line mode
self.vt100_mode_lfnewline = state
elif m == '?1':
# Cursor key mode
self.vt100_mode_cursorkey = state
elif m == '?3':
# Column mode
if self.vt100_mode_column_switch:
if state:
self.w = 132
else:
self.w = 80
self.reset_screen()
elif m == '?5':
# Screen mode
self.vt100_mode_inverse = state
elif m == '?6':
# Region origin mode
self.vt100_mode_origin = state
if state:
self.cursor_set(self.scroll_area_y0, 0)
else:
self.cursor_set(0, 0)
elif m == '?7':
# Autowrap mode
self.vt100_mode_autowrap = state
elif m == '?25':
# Text cursor enable mode
self.vt100_mode_cursor = state
elif m == '?40':
# Column switch control
self.vt100_mode_column_switch = state
elif m == '?47':
# Alternate screen mode
if ((state and not self.vt100_mode_alt_screen) or
(not state and self.vt100_mode_alt_screen)):
self.screen, self.screen2 = self.screen2, self.screen
self.vt100_saved, self.vt100_saved2 = self.vt100_saved2, self.vt100_saved
self.vt100_mode_alt_screen = state
elif m == '?67':
# Backspace/delete
self.vt100_mode_backspace = state
def ctrl_SO(self):
# Shift out
self.vt100_charset_set(1)
def ctrl_SI(self):
# Shift in
self.vt100_charset_set(0)
def esc_CSI(self):
# CSI start sequence
self.vt100_parse_reset('csi')
def esc_DECALN(self):
# Screen alignment display
self.fill(0, 0, self.h, self.w, 0x00fe0045)
def esc_G0_0(self):
self.vt100_charset_select(0, 0)
def esc_G0_1(self):
self.vt100_charset_select(0, 1)
def esc_G0_2(self):
self.vt100_charset_select(0, 2)
def esc_G0_3(self):
self.vt100_charset_select(0, 3)
def esc_G0_4(self):
self.vt100_charset_select(0, 4)
def esc_G1_0(self):
self.vt100_charset_select(1, 0)
def esc_G1_1(self):
self.vt100_charset_select(1, 1)
def esc_G1_2(self):
self.vt100_charset_select(1, 2)
def esc_G1_3(self):
self.vt100_charset_select(1, 3)
def esc_G1_4(self):
self.vt100_charset_select(1, 4)
def esc_DECSC(self):
# Store cursor
self.vt100_saved = {}
self.vt100_saved['cx'] = self.cx
self.vt100_saved['cy'] = self.cy
self.vt100_saved['attr'] = self.attr
self.vt100_saved['charset_g_sel'] = self.vt100_charset_g_sel
self.vt100_saved['charset_g'] = self.vt100_charset_g[:]
self.vt100_saved['mode_autowrap'] = self.vt100_mode_autowrap
self.vt100_saved['mode_origin'] = self.vt100_mode_origin
def esc_DECRC(self):
# Retore cursor
self.cx = self.vt100_saved['cx']
self.cy = self.vt100_saved['cy']
self.attr = self.vt100_saved['attr']
self.vt100_charset_g_sel = self.vt100_saved['charset_g_sel']
self.vt100_charset_g = self.vt100_saved['charset_g'][:]
self.vt100_charset_update()
self.vt100_mode_autowrap = self.vt100_saved['mode_autowrap']
self.vt100_mode_origin = self.vt100_saved['mode_origin']
def esc_DECKPAM(self):
# Application keypad mode
pass
def esc_DECKPNM(self):
# Numeric keypad mode
pass
def esc_IND(self):
# Index
self.ctrl_LF()
def esc_NEL(self):
# Next line
self.ctrl_CR()
self.ctrl_LF()
def esc_HTS(self):
# Character tabulation set
self.csi_CTC('0')
def esc_RI(self):
# Reverse line feed
if self.cy == self.scroll_area_y0:
self.scroll_area_down(self.scroll_area_y0, self.scroll_area_y1)
else:
self.cursor_up()
def esc_SS2(self):
# Single-shift two
self.vt100_charset_is_single_shift = True
def esc_SS3(self):
# Single-shift three
self.vt100_charset_is_single_shift = True
def esc_DCS(self):
# Device control string
self.vt100_parse_reset('str')
def esc_SOS(self):
# Start of string
self.vt100_parse_reset('str')
def esc_DECID(self):
# Identify terminal
self.csi_DA('0')
def esc_ST(self):
# String terminator
pass
def esc_OSC(self):
# Operating system command
self.vt100_parse_reset('str')
def esc_PM(self):
# Privacy message
self.vt100_parse_reset('str')
def esc_APC(self):
# Application program command
self.vt100_parse_reset('str')
def csi_ICH(self, p):
# Insert character
p = self.vt100_parse_params(p, [1])
self.scroll_line_right(self.cy, self.cx, p[0])
def csi_CUU(self, p):
# Cursor up
p = self.vt100_parse_params(p, [1])
self.cursor_up(max(1, p[0]))
def csi_CUD(self, p):
# Cursor down
p = self.vt100_parse_params(p, [1])
self.cursor_down(max(1, p[0]))
def csi_CUF(self, p):
# Cursor right
p = self.vt100_parse_params(p, [1])
self.cursor_right(max(1, p[0]))
def csi_CUB(self, p):
# Cursor left
p = self.vt100_parse_params(p, [1])
self.cursor_left(max(1, p[0]))
def csi_CNL(self, p):
# Cursor next line
self.csi_CUD(p)
self.ctrl_CR()
def csi_CPL(self, p):
# Cursor preceding line
self.csi_CUU(p)
self.ctrl_CR()
def csi_CHA(self, p):
# Cursor character absolute
p = self.vt100_parse_params(p, [1])
self.cursor_set_x(p[0] - 1)
def csi_CUP(self, p):
# Set cursor position
p = self.vt100_parse_params(p, [1, 1])
if self.vt100_mode_origin:
self.cursor_set(self.scroll_area_y0 + p[0] - 1, p[1] - 1)
else:
self.cursor_set(p[0] - 1, p[1] - 1)
def csi_CHT(self, p):
# Cursor forward tabulation
p = self.vt100_parse_params(p, [1])
self.ctrl_HT(max(1, p[0]))
def csi_ED(self, p):
# Erase in display
p = self.vt100_parse_params(p, ['0'], False)
if p[0] == '0':
self.clear(self.cy, self.cx, self.h, self.w)
elif p[0] == '1':
self.clear(0, 0, self.cy + 1, self.cx + 1)
elif p[0] == '2':
self.clear(0, 0, self.h, self.w)
def csi_EL(self, p):
# Erase in line
p = self.vt100_parse_params(p, ['0'], False)
if p[0] == '0':
self.clear(self.cy, self.cx, self.cy + 1, self.w)
elif p[0] == '1':
self.clear(self.cy, 0, self.cy + 1, self.cx + 1)
elif p[0] == '2':
self.clear(self.cy, 0, self.cy + 1, self.w)
def csi_IL(self, p):
# Insert line
p = self.vt100_parse_params(p, [1])
if (self.cy >= self.scroll_area_y0 and self.cy < self.scroll_area_y1):
self.scroll_area_down(self.cy, self.scroll_area_y1, max(1, p[0]))
def csi_DL(self, p):
# Delete line
p = self.vt100_parse_params(p, [1])
if (self.cy >= self.scroll_area_y0 and self.cy < self.scroll_area_y1):
self.scroll_area_up(self.cy, self.scroll_area_y1, max(1, p[0]))
def csi_DCH(self, p):
# Delete characters
p = self.vt100_parse_params(p, [1])
self.scroll_line_left(self.cy, self.cx, max(1, p[0]))
def csi_SU(self, p):
# Scroll up
p = self.vt100_parse_params(p, [1])
self.scroll_area_up(
self.scroll_area_y0, self.scroll_area_y1, max(1, p[0]))
def csi_SD(self, p):
# Scroll down
p = self.vt100_parse_params(p, [1])
self.scroll_area_down(
self.scroll_area_y0, self.scroll_area_y1, max(1, p[0]))
def csi_CTC(self, p):
# Cursor tabulation control
p = self.vt100_parse_params(p, ['0'], False)
for m in p:
if m == '0':
try:
ts = self.tab_stops.index(self.cx)
except ValueError:
tab_stops = self.tab_stops
tab_stops.append(self.cx)
tab_stops.sort()
self.tab_stops = tab_stops
elif m == '2':
try:
self.tab_stops.remove(self.cx)
except ValueError:
pass
elif m == '5':
self.tab_stops = [0]
def csi_ECH(self, p):
# Erase character
p = self.vt100_parse_params(p, [1])
n = min(self.w - self.cx, max(1, p[0]))
self.clear(self.cy, self.cx, self.cy + 1, self.cx + n)
def csi_CBT(self, p):
# Cursor backward tabulation
p = self.vt100_parse_params(p, [1])
self.ctrl_HT(1 - max(1, p[0]))
def csi_HPA(self, p):
# Character position absolute
p = self.vt100_parse_params(p, [1])
self.cursor_set_x(p[0] - 1)
def csi_HPR(self, p):
# Character position forward
self.csi_CUF(p)
def csi_REP(self, p):
# Repeat
p = self.vt100_parse_params(p, [1])
if self.vt100_lastchar < 32:
return
n = min(2000, max(1, p[0]))
while n:
self.dumb_echo(self.vt100_lastchar)
n -= 1
self.vt100_lastchar = 0
def csi_DA(self, p):
# Device attributes
p = self.vt100_parse_params(p, ['0'], False)
if p[0] == '0':
self.vt100_out = "\x1b[?1;2c"
elif p[0] == '>0' or p[0] == '>':
self.vt100_out = "\x1b[>0;184;0c"
def csi_VPA(self, p):
# Line position absolute
p = self.vt100_parse_params(p, [1])
self.cursor_set_y(p[0] - 1)
def csi_VPR(self, p):
# Line position forward
self.csi_CUD(p)
def csi_HVP(self, p):
# Character and line position
self.csi_CUP(p)
def csi_TBC(self, p):
# Tabulation clear
p = self.vt100_parse_params(p, ['0'], False)
if p[0] == '0':
self.csi_CTC('2')
elif p[0] == '3':
self.csi_CTC('5')
def csi_SM(self, p):
# Set mode
self.vt100_setmode(p, True)
def csi_RM(self, p):
# Reset mode
self.vt100_setmode(p, False)
def csi_SGR(self, p):
# Select graphic rendition
p = self.vt100_parse_params(p, [0])
for m in p:
if m == 0:
# Reset
self.attr = 0x00fe0000
elif m == 4:
# Underlined
self.attr |= 0x01000000
elif m == 7:
# Negative
self.attr |= 0x02000000
elif m == 8:
# Concealed
self.attr |= 0x04000000
elif m == 24:
# Not underlined
self.attr &= 0x7eff0000
elif m == 27:
# Positive
self.attr &= 0x7dff0000
elif m == 28:
# Revealed
self.attr &= 0x7bff0000
elif m >= 30 and m <= 37:
# Foreground
self.attr = (self.attr & 0x7f0f0000) | ((m - 30) << 20)
elif m == 39:
# Default fg color
self.attr = (self.attr & 0x7f0f0000) | 0x00f00000
elif m >= 40 and m <= 47:
# Background
self.attr = (self.attr & 0x7ff00000) | ((m - 40) << 16)
elif m == 49:
# Default bg color
self.attr = (self.attr & 0x7ff00000) | 0x000e0000
def csi_DSR(self, p):
# Device status report
p = self.vt100_parse_params(p, ['0'], False)
if p[0] == '5':
self.vt100_out = "\x1b[0n"
elif p[0] == '6':
x = self.cx + 1
y = self.cy + 1
self.vt100_out = '\x1b[%d;%dR' % (y, x)
elif p[0] == '7':
self.vt100_out = 'WebShell'
elif p[0] == '8':
self.vt100_out = __version__
elif p[0] == '?6':
x = self.cx + 1
y = self.cy + 1
self.vt100_out = '\x1b[?%d;%dR' % (y, x)
elif p[0] == '?15':
self.vt100_out = '\x1b[?13n'
elif p[0] == '?25':
self.vt100_out = '\x1b[?20n'
elif p[0] == '?26':
self.vt100_out = '\x1b[?27;1n'
elif p[0] == '?53':
self.vt100_out = '\x1b[?53n'
def csi_DECSTBM(self, p):
# Set top and bottom margins
p = self.vt100_parse_params(p, [1, self.h])
self.scroll_area_set(p[0] - 1, p[1])
if self.vt100_mode_origin:
self.cursor_set(self.scroll_area_y0, 0)
else:
self.cursor_set(0, 0)
def csi_SCP(self, p):
# Save cursor position
self.vt100_saved_cx = self.cx
self.vt100_saved_cy = self.cy
def csi_RCP(self, p):
# Restore cursor position
self.cx = self.vt100_saved_cx
self.cy = self.vt100_saved_cy
def csi_DECREQTPARM(self, p):
# Request terminal parameters
p = self.vt100_parse_params(p, [], False)
if p[0] == '0':
self.vt100_out = "\x1b[2;1;1;112;112;1;0x"
elif p[0] == '1':
self.vt100_out = "\x1b[3;1;1;112;112;1;0x"
def csi_DECSTR(self, p):
# Soft terminal reset
self.reset_soft()
# VT100 Parser
def vt100_parse_params(self, p, d, to_int=True):
# Process parameters (params p with defaults d)
# Add prefix to all parameters
prefix = ''
if len(p) > 0:
if p[0] >= '<' and p[0] <= '?':
prefix = p[0]
p = p[1:]
p = p.split(';')
else:
p = ''
# Process parameters
n = max(len(p), len(d))
o = []
for i in range(n):
value_def = False
if i < len(p):
value = prefix + p[i]
value_def = True
if to_int:
try:
value = int(value)
except ValueError:
value_def = False
if (not value_def) and i < len(d):
value = d[i]
o.append(value)
return o
def vt100_parse_reset(self, vt100_parse_state=""):
self.vt100_parse_state = vt100_parse_state
self.vt100_parse_len = 0
self.vt100_parse_func = ""
self.vt100_parse_param = ""
def vt100_parse_process(self):
if self.vt100_parse_state == 'esc':
# ESC mode
f = self.vt100_parse_func
try:
self.vt100_esc[f]()
except KeyError:
pass
if self.vt100_parse_state == 'esc':
self.vt100_parse_reset()
else:
# CSI mode
f = self.vt100_parse_func
p = self.vt100_parse_param
try:
self.vt100_csi[f](p)
except KeyError:
pass
if self.vt100_parse_state == 'csi':
self.vt100_parse_reset()
def vt100_write(self, char):
if char < 32:
if char == 27:
self.vt100_parse_reset('esc')
return True
elif char == 14:
self.ctrl_SO()
elif char == 15:
self.ctrl_SI()
elif (char & 0xffe0) == 0x0080:
self.vt100_parse_reset('esc')
self.vt100_parse_func = chr(char - 0x40)
self.vt100_parse_process()
return True
if self.vt100_parse_state:
if self.vt100_parse_state == 'str':
if char >= 32:
return True
self.vt100_parse_reset()
else:
if char < 32:
if char == 24 or char == 26:
self.vt100_parse_reset()
return True
else:
self.vt100_parse_len += 1
if self.vt100_parse_len > 32:
self.vt100_parse_reset()
else:
char_msb = char & 0xf0
if char_msb == 0x20:
# Intermediate bytes (added to function)
self.vt100_parse_func += unichr(char)
elif char_msb == 0x30 and self.vt100_parse_state == 'csi':
# Parameter byte
self.vt100_parse_param += unichr(char)
else:
# Function byte
self.vt100_parse_func += unichr(char)
self.vt100_parse_process()
return True
self.vt100_lastchar = char
return False
# External interface
def set_size(self, w, h):
if w < 2 or w > 256 or h < 2 or h > 256:
return False
self.w = w
self.h = h
self.reset_screen()
return True
def read(self):
d = self.vt100_out
self.vt100_out = ""
return d
def write(self, d):
d = self.utf8_decode(d)
for c in d:
char = ord(c)
if self.vt100_write(char):
continue
if self.dumb_write(char):
continue
if char <= 0xffff:
self.dumb_echo(char)
return True
def pipe(self, d):
o = ''
for c in d:
char = ord(c)
if self.vt100_keyfilter_escape:
self.vt100_keyfilter_escape = False
try:
if self.vt100_mode_cursorkey:
o += self.vt100_keyfilter_appkeys[c]
else:
o += self.vt100_keyfilter_ansikeys[c]
except KeyError:
pass
elif c == '~':
self.vt100_keyfilter_escape = True
elif char == 127:
if self.vt100_mode_backspace:
o += chr(8)
else:
o += chr(127)
else:
o += c
if self.vt100_mode_lfnewline and char == 13:
o += chr(10)
return o
def dump(self):
screen = []
attr_ = -1
cx, cy = min(self.cx, self.w - 1), self.cy
for y in range(0, self.h):
wx = 0
line = [""]
for x in range(0, self.w):
d = self.screen[y * self.w + x]
char = d & 0xffff
attr = d >> 16
# Cursor
if cy == y and cx == x and self.vt100_mode_cursor:
attr = attr & 0xfff0 | 0x000c
# Attributes
if attr != attr_:
if attr_ != -1:
line.append("")
bg = attr & 0x000f
fg = (attr & 0x00f0) >> 4
# Inverse
inv = attr & 0x0200
inv2 = self.vt100_mode_inverse
if (inv and not inv2) or (inv2 and not inv):
fg, bg = bg, fg
# Concealed
if attr & 0x0400:
fg = 0xc
# Underline
if attr & 0x0100:
ul = True
else:
ul = False
line.append((fg, bg, ul))
line.append("")
attr_ = attr
wx += self.utf8_charwidth(char)
if wx <= self.w:
line[-1] += unichr(char)
screen.append(line)
return (cx, cy), screen
def synchronized(func):
def wrapper(self, *args, **kwargs):
try:
self.lock.acquire()
except AttributeError:
self.lock = threading.RLock()
self.lock.acquire()
try:
result = func(self, *args, **kwargs)
finally:
self.lock.release()
return result
return wrapper
class Multiplexer(object):
def __init__(self, cmd="/bin/bash", env_term="xterm-color", timeout=60 * 60 * 24):
# Set Linux signal handler
if sys.platform in ("linux2", "linux3"):
self.sigchldhandler = signal.signal(signal.SIGCHLD, signal.SIG_IGN)
# Session
self.session = {}
self.cmd = cmd
self.env_term = env_term
self.timeout = timeout
# Supervisor thread
self.signal_stop = 0
self.thread = threading.Thread(target=self.proc_thread)
self.thread.start()
def stop(self):
# Stop supervisor thread
self.signal_stop = 1
self.thread.join()
def proc_resize(self, sid, w, h):
fd = self.session[sid]['fd']
# Set terminal size
try:
fcntl.ioctl(fd,
struct.unpack('i',
struct.pack('I', termios.TIOCSWINSZ)
)[0],
struct.pack("HHHH", h, w, 0, 0))
except (IOError, OSError):
pass
self.session[sid]['term'].set_size(w, h)
self.session[sid]['w'] = w
self.session[sid]['h'] = h
@synchronized
def proc_keepalive(self, sid, w, h, cmd=None):
if not sid in self.session:
# Start a new session
self.session[sid] = {
'state': 'unborn',
'term': Terminal(w, h),
'time': time.time(),
'w': w,
'h': h}
return self.proc_spawn(sid, cmd)
elif self.session[sid]['state'] == 'alive':
self.session[sid]['time'] = time.time()
# Update terminal size
if self.session[sid]['w'] != w or self.session[sid]['h'] != h:
self.proc_resize(sid, w, h)
return True
else:
return False
def proc_spawn(self, sid, cmd=None):
# Session
self.session[sid]['state'] = 'alive'
w, h = self.session[sid]['w'], self.session[sid]['h']
# Fork new process
try:
pid, fd = pty.fork()
except (IOError, OSError):
self.session[sid]['state'] = 'dead'
return False
if pid == 0:
cmd = cmd or self.cmd
# Safe way to make it work under BSD and Linux
try:
ls = os.environ['LANG'].split('.')
except KeyError:
ls = []
if len(ls) < 2:
ls = ['en_US', 'UTF-8']
try:
os.putenv('COLUMNS', str(w))
os.putenv('LINES', str(h))
os.putenv('TERM', self.env_term)
os.putenv('PATH', os.environ['PATH'])
os.putenv('LANG', ls[0] + '.UTF-8')
# os.system(cmd)
p = subprocess.Popen(cmd, shell=False)
# print "called with subprocess", p.pid
child_pid, sts = os.waitpid(p.pid, 0)
# print "child_pid", child_pid, sts
except (IOError, OSError):
pass
# self.proc_finish(sid)
os._exit(0)
else:
# Store session vars
self.session[sid]['pid'] = pid
self.session[sid]['fd'] = fd
# Set file control
fcntl.fcntl(fd, fcntl.F_SETFL, os.O_NONBLOCK)
# Set terminal size
self.proc_resize(sid, w, h)
return True
def proc_waitfordeath(self, sid):
try:
os.close(self.session[sid]['fd'])
except (KeyError, IOError, OSError):
pass
if sid in self.session:
if 'fd' in self.session[sid]:
del self.session[sid]['fd']
try:
os.waitpid(self.session[sid]['pid'], 0)
except (KeyError, IOError, OSError):
pass
if sid in self.session:
if 'pid' in self.session[sid]:
del self.session[sid]['pid']
self.session[sid]['state'] = 'dead'
return True
def proc_bury(self, sid):
if self.session[sid]['state'] == 'alive':
try:
os.kill(self.session[sid]['pid'], signal.SIGTERM)
except (IOError, OSError):
pass
self.proc_waitfordeath(sid)
if sid in self.session:
del self.session[sid]
return True
@synchronized
def proc_buryall(self):
for sid in self.session.keys():
self.proc_bury(sid)
@synchronized
def proc_read(self, sid):
"""
Read from process
"""
if sid not in self.session:
return False
elif self.session[sid]['state'] != 'alive':
return False
try:
fd = self.session[sid]['fd']
d = os.read(fd, 65536)
if not d:
# Process finished, BSD
self.proc_waitfordeath(sid)
return False
except (IOError, OSError):
# Process finished, Linux
self.proc_waitfordeath(sid)
return False
term = self.session[sid]['term']
term.write(d)
# Read terminal response
d = term.read()
if d:
try:
os.write(fd, d)
except (IOError, OSError):
return False
return True
@synchronized
def proc_write(self, sid, d):
"""
Write to process
"""
if sid not in self.session:
return False
elif self.session[sid]['state'] != 'alive':
return False
try:
term = self.session[sid]['term']
d = term.pipe(d)
fd = self.session[sid]['fd']
os.write(fd, d)
except (IOError, OSError):
return False
return True
@synchronized
def proc_dump(self, sid):
"""
Dump terminal output
"""
if sid not in self.session:
return False
return self.session[sid]['term'].dump()
@synchronized
def proc_getalive(self):
"""
Get alive sessions, bury timed out ones
"""
fds = []
fd2sid = {}
now = time.time()
for sid in self.session.keys():
then = self.session[sid]['time']
if (now - then) > self.timeout:
self.proc_bury(sid)
else:
if self.session[sid]['state'] == 'alive':
fds.append(self.session[sid]['fd'])
fd2sid[self.session[sid]['fd']] = sid
return (fds, fd2sid)
def proc_thread(self):
"""
Supervisor thread
"""
while not self.signal_stop:
# Read fds
(fds, fd2sid) = self.proc_getalive()
try:
i, o, e = select.select(fds, [], [], 1.0)
except (IOError, OSError):
i = []
for fd in i:
sid = fd2sid[fd]
self.proc_read(sid)
self.session[sid]["changed"] = time.time()
if len(i):
time.sleep(0.002)
self.proc_buryall()
def ssh_command(login, executable="ssh"):
cmd = executable
cmd += ' -oPreferredAuthentications=keyboard-interactive,password'
cmd += ' -oNoHostAuthenticationForLocalhost=yes'
cmd += ' -oLogLevel=FATAL'
cmd += ' -F/dev/null -l' + login + ' localhost'
return cmd
class Session(object):
_mux = None
@classmethod
def close_all(cls):
Session._mux.stop()
def __init__(self, cmd=None, width=80, height=24):
if not Session._mux:
Session._mux = Multiplexer()
self._session_id = "%s-%s" % (time.time(), id(self))
self._width = width
self._height = height
self._started = False
def resize(self, width, height):
self._width = width
self._height = height
if self._started:
self.keepalive()
def start(self, cmd=None):
self._started = Session._mux.proc_keepalive(
self._session_id, self._width, self._height, cmd or self.cmd)
return self._started
def close(self):
return Session._mux.proc_bury(self._session_id)
stop = close
def is_alive(self):
return Session._mux.session.get(self._session_id, {}).get('state') == 'alive'
def keepalive(self):
return Session._mux.proc_keepalive(self._session_id, self._width, self._height)
def dump(self):
if self.keepalive():
return Session._mux.proc_dump(self._session_id)
def write(self, data):
if self.keepalive():
Session._mux.proc_write(self._session_id, data)
def last_change(self):
return Session._mux.session.get(self._session_id, {}).get("changed", None)
def pid(self):
return Session._mux.session.get(self._session_id, {}).get("pid", None)
if __name__ == "__main__":
w, h = (80, 24)
cmd = "/bin/ls --color=yes"
multiplex = Multiplexer(cmd)
sid = "session-id-%s"
if multiplex.proc_keepalive(sid, w, h):
# multiplex.proc_write(sid, k)
time.sleep(1)
# print multiplex.proc_dump(sid)
print
"Output:", multiplex.proc_dump(sid)
multiplex.stop()
|
benchmarks.py | #!/usr/bin/env python
"""Compare new solver `gr1x` to `slugs`."""
import argparse
import datetime
import pprint
import logging
import multiprocessing as mp
import sys
import time
from dd import cudd
from omega.logic import bitvector as bv
from omega.symbolic import symbolic
from openpromela import slugs
import psutil
from tugs import solver
from tugs import utils
GR1X_LOG = 'tugs.solver'
def run_parallel():
first = 2
problem = 'synt15'
solver = 'gr1x'
if solver == 'slugs':
output = 'runs_slugs'
target = run_slugs
elif solver == 'gr1x':
output = 'runs'
target = run_gr1x
else:
raise ValueError('unknown solver "{s}"'.format(s=solver))
i_str = '{i}'
slugsin_path = '{problem}/slugsin/{problem}_{i}.txt'.format(
problem=problem, i=i_str)
details_path = '{problem}/{output}/details_{i}.txt'.format(
problem=problem, output=output, i=i_str)
strategy_path = '{problem}/{output}/strategy.txt'.format(
problem=problem, output=output, i=i_str)
psutil_path = '{problem}/{output}/psutil_{i}.txt'.format(
problem=problem, output=output, i=i_str)
n_cpus = psutil.cpu_count(logical=False)
# all_cpus = range(n_cpus)
all_cpus = [0, 3, 4, 5, 6, 7]
concurrent = len(all_cpus)
repetitions = 10
final = first + concurrent * repetitions
print('will run from {first} to {final}'.format(
first=first, final=final))
assert concurrent <= n_cpus, (concurrent, n_cpus)
n = first
for j in xrange(repetitions):
m = n + concurrent
print('spawn {n} to {last}'.format(n=n, last=m - 1))
jobs = list()
for i in xrange(n, m):
d = dict(
slugsin_file=slugsin_path.format(i=i),
details_file=details_path.format(i=i),
strategy_file=strategy_path,
psutil_file=psutil_path.format(i=i))
jobs.append(d)
n = m
procs = list()
for d, cpu in zip(jobs, all_cpus):
d['affinity'] = [cpu]
p = mp.Process(target=target, kwargs=d)
procs.append(p)
for p in procs:
p.start()
for p in procs:
p.join()
print('all joined')
def run_slugs(slugsin_file, strategy_file,
psutil_file, details_file, affinity=None):
"""Run `slugs` for problem define in `slugsin_file`."""
print('Starting: {fname}'.format(fname=slugsin_file))
# other_options = ['--fixedPointRecycling']
other_options = list()
# config logging
h_psutil = utils.add_logfile(psutil_file, 'openpromela.slugs')
log = logging.getLogger('openpromela.slugs')
log.setLevel('DEBUG')
# capture execution environment
versions = utils.snapshot_versions(check=False)
log.info(pprint.pformat(versions))
# run
t0 = time.time()
r = slugs._call_slugs(
filename=slugsin_file,
symbolic=True,
strategy_file=strategy_file,
affinity=affinity,
logfile=details_file,
other_options=other_options)
t1 = time.time()
dt = datetime.timedelta(seconds=t1 - t0)
# close log files
utils.close_logfile(h_psutil, 'openpromela.slugs')
assert r is not None, 'NOT REALISABLE !!!'
print('Done with: {fname} in {dt}'.format(
fname=slugsin_file, dt=dt))
def run_gr1x(slugsin_file, strategy_file,
details_file, affinity=None, **kw):
"""Run `gr1x` for problem define in `slugsin_file`."""
win_set_file = 'winning_set'
proc = psutil.Process()
proc.cpu_affinity(affinity)
# log verbosity
level = logging.INFO
log = logging.getLogger(GR1X_LOG)
log.setLevel(level)
# dump log
h = logging.FileHandler(details_file, mode='w')
log.addHandler(h)
# log versions
versions = utils.snapshot_versions(check=False)
log.info(pprint.pformat(versions))
# synthesize
with open(slugsin_file, 'r') as f:
s = f.read()
t0 = time.time()
solver.solve_game(
s,
win_set_fname=win_set_file,
strategy_fname=strategy_file)
t1 = time.time()
dt = datetime.timedelta(seconds=t1 - t0)
print('Done with: {fname} in {dt}'.format(
fname=slugsin_file, dt=dt))
# close log file
log.removeHandler(h)
h.close()
sys.stdout.flush()
def run_gr1x_slugs_comparison(slugsin_file):
"""Check that both solvers return same winning set."""
print('compare for: {f}'.format(f=slugsin_file))
slugs_winning_set_file = 'winning_set_bdd.txt'
slugs_strategy_file = 'slugs_strategy.txt'
gr1x_strategy_file = 'gr1x_strategy.txt'
utils.snapshot_versions()
# call gr1x
with open('slugsin_file', 'r') as f:
slugsin = f.read()
d = solver.parse_slugsin(slugsin)
bdd = cudd.BDD()
aut = solver.make_automaton(d, bdd)
z = solver.compute_winning_set(aut)
t = solver.construct_streett_transducer(z, aut)
t.bdd.dump(t.action['sys'][0], gr1x_strategy_file)
# call slugs
symb = True
slugs._call_slugs(slugsin_file, symb, slugs_strategy_file)
# compare
z_ = bdd.load(slugs_winning_set_file)
assert z == z_, (z, z_)
compare_strategies(
slugsin, slugs_strategy_file, gr1x_strategy_file)
def compare_strategies(s, slugs_file, gr1x_file):
"""Check that both solvers return same strategy."""
print('++ compare strategies')
COUNTER = solver.COUNTER
d = solver.parse_slugsin(s)
n_goals = len(d['sys_win'])
aux_vars = ['{c}_{i}'.format(c=COUNTER, i=i)
for i in xrange(n_goals)]
aux_vars.extend(
['{c}{i}'.format(c=COUNTER, i=i)
for i in xrange(n_goals)])
aux_vars.append(solver.SELECTOR)
dvars = d['input'] + d['output'] + aux_vars
# add primed
dvars.extend(["{var}'".format(var=var) for var in dvars])
bdd = cudd.BDD()
for var in dvars:
bdd.add_var(var)
print('load slugs file')
p = bdd.load(slugs_file)
print('load gr1x file')
q = bdd.load(gr1x_file)
print('compare')
dvars = {
'{c}{i}'.format(c=COUNTER, i=i):
'{c}_{i}'.format(c=COUNTER, i=i)
for i in xrange(n_goals)}
p = cudd.rename(p, bdd, dvars)
table = {COUNTER: dict(dom=(0, n_goals), type='int', owner='sys')}
table, _, _ = bv.bitblast_table(table)
for j in xrange(n_goals):
u = symbolic.cofactor(p, COUNTER, j, bdd, table)
v = symbolic.cofactor(q, COUNTER, j, bdd, table)
assert u == v
print('-- done comparing strategies')
def main():
p = argparse.ArgumentParser()
p.add_argument('--min', default=2, type=int,
help='from this # of masters')
p.add_argument('--max', default=2, type=int,
help='to this # of masters')
p.add_argument('--debug', type=int, default=logging.ERROR,
help='python logging level')
p.add_argument('--repeat', default=1, type=int,
help='multiple runs from min to max')
p.add_argument('--solver', default='slugs', type=str,
choices=['slugs', 'gr1x', 'compare'])
args = p.parse_args()
# multiple runs should be w/o plots
assert args.repeat == 1 or not args.plot
# multiple runs
run_parallel()
return
for i in xrange(args.repeat):
print('run: {i}'.format(i=i))
if args.solver == 'slugs':
run_slugs(args)
elif args.solver == 'gr1x':
run_gr1x(args)
elif args.solver == 'compare':
run_gr1x_slugs_comparison(args)
else:
raise Exception(
'unknown solver: {s}'.format(s=args.solver))
if __name__ == '__main__':
main()
|
game_controller.py | import os
import threading
import time
import cv2
from utils.auto_settings import check_settings
from bot import Bot
from config import Config
from death_manager import DeathManager
from game_recovery import GameRecovery
from game_stats import GameStats
from health_manager import HealthManager
from logger import Logger
from messages import Messenger
from screen import grab, get_offset_state
from utils.restart import restart_game, kill_game
from utils.misc import kill_thread, set_d2r_always_on_top, restore_d2r_window_visibility
class GameController:
def __init__(self):
self.is_running = False
self.health_monitor_thread = None
self.health_manager = None
self.death_manager = None
self.death_monitor_thread = None
self.game_recovery = None
self.game_stats = None
self.game_controller_thread = None
self.bot_thread = None
self.bot = None
def run_bot(self):
# Start bot thread
self.bot = Bot(self.game_stats)
self.bot_thread = threading.Thread(target=self.bot.start)
self.bot_thread.daemon = True
self.bot_thread.start()
# Register that thread to the death and health manager so they can stop the bot thread if needed
self.death_manager.set_callback(lambda: self.bot.stop() or kill_thread(self.bot_thread))
self.health_manager.set_callback(lambda: self.bot.stop() or kill_thread(self.bot_thread))
do_restart = False
messenger = Messenger()
while 1:
self.health_manager.update_location(self.bot.get_curr_location())
max_game_length_reached = self.game_stats.get_current_game_length() > Config().general["max_game_length_s"]
max_consecutive_fails_reached = False if not Config().general["max_consecutive_fails"] else self.game_stats.get_consecutive_runs_failed() >= Config().general["max_consecutive_fails"]
if max_game_length_reached or max_consecutive_fails_reached or self.death_manager.died() or self.health_manager.did_chicken():
# Some debug and logging
if max_game_length_reached:
Logger.info(f"Max game length reached. Attempting to restart {Config().general['name']}!")
if Config().general["info_screenshots"]:
cv2.imwrite("./info_screenshots/info_max_game_length_reached_" + time.strftime("%Y%m%d_%H%M%S") + ".png", grab())
elif self.death_manager.died():
self.game_stats.log_death(self.death_manager._last_death_screenshot)
elif self.health_manager.did_chicken():
self.game_stats.log_chicken(self.health_manager._last_chicken_screenshot)
self.bot.stop()
kill_thread(self.bot_thread)
# Try to recover from whatever situation we are and go back to hero selection
if max_consecutive_fails_reached:
msg = f"Consecutive fails {self.game_stats.get_consecutive_runs_failed()} >= Max {Config().general['max_consecutive_fails']}. Quitting botty."
Logger.error(msg)
if Config().general["custom_message_hook"]:
messenger.send_message(msg)
self.safe_exit(1)
else:
do_restart = self.game_recovery.go_to_hero_selection()
break
time.sleep(0.5)
self.bot_thread.join()
if do_restart:
# Reset flags before running a new bot
self.death_manager.reset_death_flag()
self.health_manager.reset_chicken_flag()
self.game_stats.log_end_game(failed=max_game_length_reached)
return self.run_bot()
else:
if Config().general["info_screenshots"]:
cv2.imwrite("./info_screenshots/info_could_not_recover_" + time.strftime("%Y%m%d_%H%M%S") + ".png", grab())
if Config().general['restart_d2r_when_stuck']:
Logger.error("Could not recover from a max game length violation. Restarting the Game.")
if Config().general["custom_message_hook"]:
messenger.send_message("Got stuck and will now restart D2R")
if restart_game(Config().general["d2r_path"]):
self.game_stats.log_end_game(failed=max_game_length_reached)
if self.setup_screen():
self.start_health_manager_thread()
self.start_death_manager_thread()
self.game_recovery = GameRecovery(self.death_manager)
return self.run_bot()
Logger.error("Could not restart the game. Quitting.")
messenger.send_message("Got stuck and could not restart the game. Quitting.")
else:
Logger.error("Could not recover from a max game length violation. Quitting botty.")
if Config().general["custom_message_hook"]:
messenger.send_message("Got stuck and will now quit botty")
self.safe_exit(1)
def start(self):
# Check if we user should update the d2r settings
diff = check_settings()
if len(diff) > 0:
Logger.warning("Your D2R settings differ from the requiered ones. Please use Auto Settings to adjust them. The differences are:")
Logger.warning(f"{diff}")
set_d2r_always_on_top()
self.setup_screen()
self.start_health_manager_thread()
self.start_death_manager_thread()
self.game_recovery = GameRecovery(self.death_manager)
self.game_stats = GameStats()
self.start_game_controller_thread()
self.is_running = True
def stop(self):
restore_d2r_window_visibility()
if self.death_monitor_thread: kill_thread(self.death_monitor_thread)
if self.health_monitor_thread: kill_thread(self.health_monitor_thread)
if self.bot_thread: kill_thread(self.bot_thread)
if self.game_controller_thread: kill_thread(self.game_controller_thread)
self.is_running = False
def setup_screen(self):
if get_offset_state():
return True
return False
def start_health_manager_thread(self):
# Run health monitor thread
self.health_manager = HealthManager()
self.health_monitor_thread = threading.Thread(target=self.health_manager.start_monitor)
self.health_monitor_thread.daemon = True
self.health_monitor_thread.start()
def start_death_manager_thread(self):
# Run death monitor thread
self.death_manager = DeathManager()
self.death_monitor_thread = threading.Thread(target=self.death_manager.start_monitor)
self.death_monitor_thread.daemon = True
self.death_monitor_thread.start()
def start_game_controller_thread(self):
# Run game controller thread
self.game_controller_thread = threading.Thread(target=self.run_bot)
self.game_controller_thread.daemon = False
self.game_controller_thread.start()
def toggle_pause_bot(self):
if self.bot:
self.bot.toggle_pause()
def safe_exit(self, error_code=0):
kill_game()
os._exit(error_code)
|
perception.py | __author__ = 'Mehmet Mert Yildiran, mert.yildiran@bil.omu.edu.tr'
import datetime # Supplies classes for manipulating dates and times in both simple and complex ways.
import imutils # A series of convenience functions to make basic image processing functions such as translation, rotation, resizing, skeletonization etc.
import time # Provides various time-related functions.
import cv2 # (Open Source Computer Vision) is a library of programming functions mainly aimed at real-time computer vision.
import numpy # The fundamental package for scientific computing with Python.
import os # Provides a portable way of using operating system dependent functionality.
import multiprocessing # A package that supports spawning processes using an API similar to the threading module.
from cerebrum.vision.utilities import VisionMemoryUtil # BUILT-IN Memory operations package
import random # Pseudo-random number generators for various distributions.
import tkinter
STABILIZATION_DETECTION = 5 # Number of frames to detect stabilization
NON_STATIONARY_PERCENTAGE = 70 # Percentage of frame for detecting NON-STATIONARY CAMERA. Like: ( height * width * float(X) / float(100) )
NON_ZERO_PERCENTAGE = 0 # Percentage of frame(threshold) for detecting unnecessary movement
TARGET_HEIGHT = 360 # Number of horizontal lines for target video and processing. Like 720p, 360p etc.
MIN_AREA = 500 # Minimum area in square pixels to detect a motion
root = tkinter.Tk()
SCREEN_WIDTH = root.winfo_screenwidth()
SCREEN_HEIGHT = root.winfo_screenheight()
class VisionPerception():
# MAIN CODE BLOCK
@staticmethod
def start(video_input, vision_perception_stimulated):
if video_input == "0": # If the video_input is None, then we are reading from webcam
camera = cv2.VideoCapture(0)
time.sleep(0.25)
else: # Otherwise, we are reading from a video file
time.sleep(0.25)
camera = cv2.VideoCapture(video_input)
referenceFrame = None # Initialize the reference frame in the video stream
starting_time = None
memory_data_thresh = []
memory_data_frameDeltaColored = []
(grabbed, first_frame) = camera.read() # Grab the first frame
height, width = first_frame.shape[:2] # Get video height and width from first frame(size)
#if not height == 720 or not width == 1280:
if float(width) / float(height) != float(16) / float(9):
if video_input == "0":
# There is a STUPIDTY in here
pass
else:
raise ValueError('Aspect ratio of input stream must be [16:9]')
#warnings.warn("Aspect ratio of input stream must be [16:9]")
frame_counter = 1 # Define frame counter variable
motion_detected = 0 # Delta situation checking variable
delta_value_stack = [] # List of delta values
non_stationary_camera = 0
motion_counter = 0
nonzero_toolow = 0
beginning_of_stream = datetime.datetime.now()
while True: # Loop over the frames of the video
(grabbed, frame) = camera.read() # Grab the current frame and initialize the occupied/unoccupied
if not grabbed: # If the frame could not be grabbed, then we have reached the end of the video
break
frame_counter += 1 # Increase frame counter's value
if video_input == "0":
# If we are capturing from camera fuck Time Correction, there is also a STUPIDTY in here
pass
else:
# -------------------- TIME CORRECTION --------------------
time_delta = datetime.datetime.now() - beginning_of_stream
current_time_of_realworld = time_delta.seconds + time_delta.microseconds / float(1000000)
current_time_of_stream = frame_counter / camera.get(cv2.cv.CV_CAP_PROP_FPS)
diff_of_time = current_time_of_stream - current_time_of_realworld
if abs(diff_of_time) > (1 / camera.get(cv2.cv.CV_CAP_PROP_FPS)):
if diff_of_time > 0:
time.sleep(1 / camera.get(cv2.cv.CV_CAP_PROP_FPS))
else:
(grabbed, frame) = camera.read() # Grab the current frame and initialize the occupied/unoccupied
if not grabbed: # If the frame could not be grabbed, then we have reached the end of the video
break
frame_counter += 1 # Increase frame counter's value
continue
# -------------------- TIME CORRECTION --------------------
delta_value = 0 # Delta Value for storing max continuous contour area for current frame
frame = imutils.resize(frame, height=TARGET_HEIGHT) # Resize frame to 360p. Alternative resizing method:
height, width = frame.shape[:2] # Get video height and width from first frame(size)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Convert frame to grayscale
gray = cv2.bilateralFilter(gray,9,75,75) # Blur current frame with Bilateral Filter for noise reduction
if referenceFrame is None: # If Reference Frame is None, initialize it
referenceFrame = gray
continue
frameDelta = cv2.absdiff(referenceFrame, gray) # Compute the absolute difference between the current frame and reference frame
thresh = cv2.threshold(frameDelta, 12, 255, cv2.THRESH_BINARY)[1] # Apply OpenCV's threshold function to get binary frame
thresh = cv2.dilate(thresh, None, iterations=1) # Dilate the thresholded image to fill in holes
frameDeltaColored = cv2.bitwise_and(frame,frame, mask= thresh) # Bitwise and - to get delta frame
# Find contours on thresholded image
(cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
contour_area_stack = [] # List of contour areas's values
# Loop over the contours
if cnts:
for c in cnts: # Contour in Contours
contour_area_stack.append(cv2.contourArea(c)) # Calculate contour area and append to contour stack
if cv2.contourArea(c) > MIN_AREA: # If contour area greater than min area
(x, y, w, h) = cv2.boundingRect(c) # Compute the bounding box for this contour
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) # Draw it on the frame
delta_value = max(contour_area_stack) # Assign max contour area to delta value
if delta_value > MIN_AREA: # If max contour area (delta value) greater than min area
motion_detected = 1 # Initialize delta situation
if delta_value > (height * width * float(NON_STATIONARY_PERCENTAGE) / float(100)): # If delta value is too much
non_stationary_camera = 1
status_text = "WARNING: NON-STATIONARY CAMERA"
frameDeltaColored = numpy.zeros_like(frame)
else:
non_stationary_camera = 0
if cv2.countNonZero(thresh) < (height * width * float(NON_ZERO_PERCENTAGE) / float(100)): # If Non Zero count is too low
nonzero_toolow = 1
status_text = "WARNING: NON-ZERO TOO LOW"
frameDeltaColored = numpy.zeros_like(frame)
else:
nonzero_toolow = 0
if motion_detected: # If we are on delta situation
if starting_time is None:
starting_time = datetime.datetime.now() # Starting time of the memory
vision_perception_stimulated.value = 1 # Vision perception stimulated
if random.randint(0,2) == 1: # IMPORTANT
memory_data_thresh.append(thresh.tostring())
memory_data_frameDeltaColored.append(frameDeltaColored.tostring())
#print type(memory_data_thresh[0])
if not non_stationary_camera:
status_text = "MOTION DETECTED"
delta_value_stack.append(delta_value) # Append max contour area (delta value) to delta value stack
if len(delta_value_stack) >= STABILIZATION_DETECTION: # If length of delta value stack is greater than or equal to STABILIZATION_DETECTION constant
delta_value_stack.pop(0) # Pop first element of delta value stack
# If minimum delta value is greater than (mean of last 5 frame - minimum area / 2) and maximum delta value is less than (mean of last 5 frame + minimum area / 2)
if min(delta_value_stack) > (numpy.mean(delta_value_stack) - MIN_AREA / 2) and max(delta_value_stack) < (numpy.mean(delta_value_stack) + MIN_AREA / 2):
ending_time = datetime.datetime.now() # Ending time of the memory
vision_perception_stimulated.value = 0 # Vision perception NOT stimulated
if memory_data_thresh and memory_data_frameDeltaColored:
process4 = multiprocessing.Process(target=VisionMemoryUtil.add_memory, args=(memory_data_thresh, memory_data_frameDeltaColored, starting_time, ending_time)) # Define write memory process
process4.start() # Start write memory process
memory_data_thresh = []
memory_data_frameDeltaColored = []
starting_time = None
motion_detected = 0 # Then video STABILIZED
delta_value_stack = [] # Empty delta value stack
referenceFrame = None # Clear reference frame
if not non_stationary_camera and not nonzero_toolow:
motion_counter += 1
else:
if not non_stationary_camera and not nonzero_toolow:
status_text = "MOTION UNDETECTED"
frameDeltaColored = numpy.zeros_like(frame)
# Draw the text and timestamp on the frame
cv2.putText(frame, "Diff : {}".format(delta_value), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
cv2.putText(frame, "Thresh : {}".format(MIN_AREA), (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
cv2.putText(frame, "Frame : {}".format(frame_counter), (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
cv2.putText(frame, "Status : {}".format(status_text), (10, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2)
cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
# Show the frames and record if the user presses ESC or q
cv2.imshow("Original Frame", frame)
cv2.moveWindow("Original Frame",50 * SCREEN_WIDTH / 1920,100 * SCREEN_HEIGHT / 1080)
cv2.imshow("Frame Threshhold", thresh)
cv2.moveWindow("Frame Threshhold",50 * SCREEN_WIDTH / 1920,550 * SCREEN_HEIGHT / 1080)
cv2.imshow("Frame Delta", frameDelta)
cv2.moveWindow("Frame Delta",1200 * SCREEN_WIDTH / 1920,550 * SCREEN_HEIGHT / 1080)
cv2.imshow("Frame Delta Colored", frameDeltaColored)
cv2.moveWindow("Frame Delta Colored",1200 * SCREEN_WIDTH / 1920,100 * SCREEN_HEIGHT / 1080)
key = cv2.waitKey(1) & 0xFF
# if the `ESC` or `q` key is pressed, break the loop
if key == ord("q") or key == ord("\x1b"):
os.system("killall python") # Temporary line for practicality in DEVELOPMENT
break
cv2.destroyAllWindows() # Close any open windows
camera.release() # Release the capture device
|
multinode_runner.py | import fabric
from fabric import Connection
from .hostinfo import HostInfo, HostInfoList
from multiprocessing import Pipe, Process
from multiprocessing import connection as mp_connection
import click
def run_on_host(hostinfo: HostInfo, workdir: str, recv_conn: mp_connection.Connection,
send_conn: mp_connection.Connection, env: dict) -> None:
"""
Use fabric connection to execute command on local or remote hosts.
Args:
hostinfo (HostInfo): host information
workdir (str): the directory to execute the command
recv_conn (multiprocessing.connection.Connection): receive messages from the master sender
send_conn (multiprocessing.connection.Connection): send messages to the master receiver
env (dict): a dictionary for environment variables
"""
fab_conn = fabric.Connection(hostinfo.hostname, port=hostinfo.port)
finish = False
env_msg = ' '.join([f'{k}=\"{v}\"' for k, v in env.items()])
# keep listening until exit
while not finish:
# receive cmd
cmds = recv_conn.recv()
if cmds == 'exit':
# exit from the loop
finish = True
break
else:
# execute the commands
try:
# cd to execute directory
with fab_conn.cd(workdir):
# propagate the runtime environment
with fab_conn.prefix(f"export {env_msg}"):
if hostinfo.is_local_host:
# execute on the local machine
fab_conn.local(cmds, hide=False)
else:
# execute on the remote machine
fab_conn.run(cmds, hide=False)
send_conn.send('success')
except:
click.echo(f"Error: failed to run {cmds} on {hostinfo.hostname}")
send_conn.send('failure')
# shutdown
send_conn.send("finish")
fab_conn.close()
class MultiNodeRunner:
"""
A runner to execute commands on an array of machines. This runner
is inspired by Nezha (https://github.com/zhuzilin/NeZha).
"""
def __init__(self):
self.processes = {}
self.master_send_conns = {}
self.master_recv_conns = {}
def connect(self, host_info_list: HostInfoList, workdir: str, env: dict) -> None:
"""
Establish connections to a list of hosts
Args:
host_info_list (HostInfoList): a list of HostInfo objects
workdir (str): the directory where command is executed
env (dict): environment variables to propagate to hosts
"""
for hostinfo in host_info_list:
master_send_conn, worker_recv_conn = Pipe()
master_recv_conn, worker_send_conn = Pipe()
p = Process(target=run_on_host, args=(hostinfo, workdir, worker_recv_conn, worker_send_conn, env))
p.start()
self.processes[hostinfo.hostname] = p
self.master_recv_conns[hostinfo.hostname] = master_recv_conn
self.master_send_conns[hostinfo.hostname] = master_send_conn
def send(self, hostinfo: HostInfo, cmd: str) -> None:
"""
Send a command to a local/remote host.
Args:
hostinfo (HostInfo): host information
cmd (str): the command to execute
"""
assert hostinfo.hostname in self.master_send_conns, \
f'{hostinfo} is not found in the current connections'
conn = self.master_send_conns[hostinfo.hostname]
conn.send(cmd)
def stop_all(self) -> None:
"""
Stop connections to all hosts.
"""
for hostname, conn in self.master_send_conns.items():
conn.send('exit')
def recv_from_all(self) -> dict:
"""
Receive messages from all hosts
Returns:
msg_from_node (dict): a dictionry which contains messages from each node
"""
msg_from_node = dict()
for hostname, conn in self.master_recv_conns.items():
msg_from_node[hostname] = conn.recv()
return msg_from_node
|
cli.py | #!/usr/bin/env python3
import sys
import time
import threading
import requests as rq
import json
from base64 import b64decode
import click
import six
import questionary
from questionary import ValidationError, Validator, Choice
from prompt_toolkit.styles import Style
from pyfiglet import figlet_format
try:
import colorama
colorama.init()
except ImportError:
colorama = None
try:
from termcolor import colored
except ImportError:
colored = None
class Spinner:
busy = False
delay = 0.1
@staticmethod
def spinning_cursor():
while 1:
for cursor in '|/-\\':
yield cursor
def __init__(self, delay=None):
self.spinner_generator = self.spinning_cursor()
if delay and float(delay):
self.delay = delay
def spinner_task(self):
while self.busy:
sys.stdout.write(next(self.spinner_generator))
sys.stdout.flush()
time.sleep(self.delay)
sys.stdout.write('\b')
sys.stdout.flush()
def __enter__(self):
self.busy = True
threading.Thread(target=self.spinner_task).start()
def __exit__(self, exception, value, tb):
self.busy = False
time.sleep(self.delay)
if exception is not None:
return False
# custom style for question properties
custom_style = Style([
('qmark', 'fg:#fac731 bold'), # token in front of the question
('question', ''), # question text
# pointer used in select and checkbox prompts
('pointer', 'fg:#673ab7 bold'),
# pointed-at choice in select and checkbox prompts
('highlighted', 'fg:#673ab7 bold'),
('selected', 'fg:#0abf5b'), # style for a selected item of a checkbox
('separator', 'fg:#cc5454'), # separator in lists
# user instructions for select, rawselect, checkbox
('instruction', ''),
('text', 'fg:#4688f1 bold'), # plain text
# disabled choices for select and checkbox prompts
('disabled', 'fg:#858585 italic'),
# submitted answer text behind the question
('answer', 'fg:#f44336 bold'),
])
# api routes/endpoints
routes = {
'auth': {
'route': 'https://api-pa-cliente.equatorialenergia.com.br/auth/connect/token',
'headers': {
'Authorization': 'Basic Y2VtYXI6RXF0QENlbWFy'
},
'body': {
'grant_type': 'password',
'username': '',
'password': '',
'navegador': 'browser',
'dispositivo': 'device',
'empresaId': '+'
}
},
'bills': {
'route': 'https://api-pa-cliente.equatorialenergia.com.br/api/v1/debitos/',
'options': {
'open_bills': '?listarEmAberto=true',
'all_bills': '?listarEmAberto=false'
}
},
'pdf': {
'route': 'https://api-pa-cliente.equatorialenergia.com.br/api/v1/faturas/segunda-via/',
'options': {
'show_url': '?showUrl=true'
}
}
}
def getToken(username, password):
"""
getToken - Get authentication bearer token.
Get 'username' and 'password', request data on auth endpoint and return token object.
Parameters:
username (str): Username to login (cpf).
password (str): Password of given username (born date).
Returns:
token (list): List object of token
"""
# set request body
body = routes['auth']['body'].copy()
body['username'] = '1:' + username
body['password'] = password
# request on endpoint
token_resp = rq.post(
url=routes['auth']['route'],
data=body,
headers=routes['auth']['headers']
)
# parse response as json
token = json.loads(token_resp.text)
# return token list object
return token
def extractUserDataFromToken(token):
# get user data split in token
encoded_user_data = token['access_token'].split('.')[1]
# append tabulation
encoded_user_data += "=" * ((4 - len(encoded_user_data) % 4) % 4)
# decode user data
decoded_user_data = b64decode(encoded_user_data)
# parse user data as json
json_user_data = json.loads(decoded_user_data)['userData']
# return user data parsed as json
return json_user_data
def getUcs(personal_data):
cc = personal_data['ContasContrato']
ucs = []
for contrato in cc:
Numero = contrato['Numero']
Endereco = contrato['Endereco']
Bairro = contrato['Bairro']
Cidade = contrato['Cidade']
ucs.append(
{
'numero': Numero,
'endereco': Endereco + ', ' + Bairro + ', ' + Cidade
}
)
return ucs
def getOpenBills(ucs):
# get open bills
open_bills = {}
for uc in ucs:
open_bills_resp = rq.get(
url=routes['bills']['route'] + uc
)
content = open_bills_resp.text
status = open_bills_resp.status_code
headers = open_bills_resp.headers
open_bills_data = json.loads(content)['data']['faturas'] if (
status == 200 and 'application/json' in headers['Content-Type']) else 'Não há faturas em aberto para esta conta contrato.'
open_bills[uc] = open_bills_data
return open_bills
def getAllBills(uc):
# get all bills
all_bills_url = routes['bills']['route'] + \
uc + routes['bills']['options']['all_bills']
all_bills_text = rq.get(url=all_bills_url).text
all_bills = json.loads(all_bills_text)
return all_bills
def getBillPdf(bill_num, token):
get_bill_pdf_url = routes['pdf']['route'] + bill_num + \
routes['pdf']['route']['options']['show_url']
get_bill_pdf_headers = {
'Authorization': token['token_type'] + ' ' + token['access_token']
}
bill_text = rq.get(url=get_bill_pdf_url, headers=get_bill_pdf_headers).text
bill = json.loads(bill_text)
return bill
def saveBillPdf(bill_data, period, name='fatura_equatorial'):
"""
saveBillPdf - Save bills as '.pdf' file.
Get 'bill_data', transform in bytes and save it as pdf with 'name' + month|year based on 'period'.
i.e: saveBillPdf(bill_data, period='04/2020', name='bill_test') will generate a file named 'bill_test - 04/2020.pdf'
Parameters:
bill_data (list): List object describing bill.
period (str): Bill period.
name (str): Desired filename.
"""
# encode bill_data as bytes
bytes = b64decode(bill_data['data']['base64'], validate=True)
# get month and year vars for file naming
year, month = period.split('/')
# set path str to save file
path = '{n} - {m}|{y}.pdf'.format(n=name, m=month, y=year)
# try to save file
try:
f = open(path, 'wb')
f.write(bytes)
f.close()
print('Arquivo {path} salvo com sucesso!'.format(path=path))
except Exception as e:
raise(e)
def log(string, color, font="slant", figlet=False):
if colored:
if not figlet:
six.print_(colored(string, color))
else:
six.print_(colored(figlet_format(
string, font=font), color))
else:
six.print_(string)
class EmptyValidator(Validator):
def validate(self, value):
if len(value.text):
return True
else:
raise ValidationError(
message="Esse campo é de preenchimento obrigatório.",
cursor_position=len(value.text))
def askUcs(personal_data):
ucs = getUcs(personal_data)
uc_choices = []
selected = []
for uc in ucs:
uc_choices.append(
Choice(
title='{num} - {end}'.format(num=uc['numero'],
end=uc['endereco']),
value=uc['numero']
)
)
uc_choices.append(
Choice(
title='Todos os contratos',
value='all'
)
)
selected_uc = questionary.select(
message='Selecione um contrato para emissão de faturas pendentes:',
choices=uc_choices,
style=custom_style
).ask()
# selected all ucs
if selected_uc == 'all':
for choice in uc_choices:
selected.append(choice.value)
selected.pop()
# selected single uc
else:
selected.append(selected_uc)
return selected
def askPersonalData():
cpf = questionary.text(
message='Insira o CPF do titular',
validate=EmptyValidator,
style=custom_style
).ask()
born_date = questionary.text(
message='Insira a Data de Nascimento do Titular (AAAA-MM-DD)',
validate=EmptyValidator,
style=custom_style
).ask()
return cpf.strip(), born_date.strip()
def saveOpenBills(uc_bills_dict, token):
for index in uc_bills_dict:
if type(uc_bills_dict[index]) is list:
for bill in uc_bills_dict[index]:
bill_data = getBillPdf(bill['numeroFatura'], token)
try:
saveBillPdf(
bill_data=bill_data,
period=bill['competencia'],
name='fatura_equatorial_{uc}'.format(uc=index)
)
except Exception as e:
raise Exception(
"Ocorreu um erro ao salvar a fatura: %s" % (e))
else:
print('Sem faturas abertas para a uc {uc}!'.format(uc=index))
@click.command()
def main():
"""
Cli básica para exibição/emissão de faturas da Equatorial Energia - Pará
"""
log("Equatorial/PA CLI", color="blue", figlet=True)
log("Bem Vindo ao Equatorial/PA CLI", "green")
cpf, born_date = askPersonalData()
token = []
with Spinner():
token = getToken(cpf, born_date)
personal_data = extractUserDataFromToken(token)
selected_uc = askUcs(personal_data)
uc_bills_dict = {}
with Spinner():
uc_bills_dict = getOpenBills(selected_uc)
try:
with Spinner():
saveOpenBills(uc_bills_dict, token)
except Exception as e:
raise(e)
if __name__ == '__main__':
main()
|
test_random.py | from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises, assert_equal,
assert_warns, assert_no_warnings, assert_array_equal,
assert_array_almost_equal, suppress_warnings)
from numpy import random
import sys
import warnings
class TestSeed(TestCase):
def test_scalar(self):
s = np.random.RandomState(0)
assert_equal(s.randint(1000), 684)
s = np.random.RandomState(4294967295)
assert_equal(s.randint(1000), 419)
def test_array(self):
s = np.random.RandomState(range(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState(np.arange(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState([0])
assert_equal(s.randint(1000), 973)
s = np.random.RandomState([4294967295])
assert_equal(s.randint(1000), 265)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, -0.5)
assert_raises(ValueError, np.random.RandomState, -1)
def test_invalid_array(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, [-0.5])
assert_raises(ValueError, np.random.RandomState, [-1])
assert_raises(ValueError, np.random.RandomState, [4294967296])
assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296])
assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296])
class TestBinomial(TestCase):
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial(TestCase):
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.randint(-5, -1) < -1)
x = random.randint(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, np.random.multinomial, 1, p,
np.float(1))
class TestSetState(TestCase):
def setUp(self):
self.seed = 1234567890
self.prng = random.RandomState(self.seed)
self.state = self.prng.get_state()
def test_basic(self):
old = self.prng.tomaxint(16)
self.prng.set_state(self.state)
new = self.prng.tomaxint(16)
assert_(np.all(old == new))
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.prng.standard_normal(size=3)
self.prng.set_state(self.state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.prng.standard_normal()
state = self.prng.get_state()
old = self.prng.standard_normal(size=3)
self.prng.set_state(state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_backwards_compatibility(self):
# Make sure we can accept old state tuples that do not have the
# cached Gaussian value.
old_state = self.state[:-2]
x1 = self.prng.standard_normal(size=16)
self.prng.set_state(old_state)
x2 = self.prng.standard_normal(size=16)
self.prng.set_state(self.state)
x3 = self.prng.standard_normal(size=16)
assert_(np.all(x1 == x2))
assert_(np.all(x1 == x3))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.prng.negative_binomial(0.5, 0.5)
class TestRandint(TestCase):
rfunc = np.random.randint
# valid integer/boolean types
itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self):
assert_raises(TypeError, self.rfunc, 1, dtype=np.float)
def test_bounds_checking(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt)
def test_rng_zero_and_extremes(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = (lbnd + ubnd)//2
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
def test_full_range(self):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
try:
self.rfunc(lbnd, ubnd, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_in_bounds_fuzz(self):
# Don't use fixed seed
np.random.seed()
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd, size=2**16, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2, size=2**16, dtype=np.bool_)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_repeatability(self):
import hashlib
# We use a md5 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but np.bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': '7dd3170d7aa461d201a65f8bcf3944b0',
'int16': '1b7741b80964bb190c50d541dca1cac1',
'int32': '4dc9fcc2b395577ebb51793e58ed1a05',
'int64': '17db902806f448331b5a758d7d2ee672',
'int8': '27dd30c4e08a797063dffac2490b0be6',
'uint16': '1b7741b80964bb190c50d541dca1cac1',
'uint32': '4dc9fcc2b395577ebb51793e58ed1a05',
'uint64': '17db902806f448331b5a758d7d2ee672',
'uint8': '27dd30c4e08a797063dffac2490b0be6'}
for dt in self.itype[1:]:
np.random.seed(1234)
# view as little endian for hash
if sys.byteorder == 'little':
val = self.rfunc(0, 6, size=1000, dtype=dt)
else:
val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap()
res = hashlib.md5(val.view(np.int8)).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianess
np.random.seed(1234)
val = self.rfunc(0, 2, size=1000, dtype=np.bool).view(np.int8)
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(np.bool).name] == res)
def test_int64_uint64_corner_case(self):
# When stored in Numpy arrays, `lbnd` is casted
# as np.int64, and `ubnd` is casted as np.uint64.
# Checking whether `lbnd` >= `ubnd` used to be
# done solely via direct comparison, which is incorrect
# because when Numpy tries to compare both numbers,
# it casts both to np.float64 because there is
# no integer superset of np.int64 and np.uint64. However,
# `ubnd` is too large to be represented in np.float64,
# causing it be round down to np.iinfo(np.int64).max,
# leading to a ValueError because `lbnd` now equals
# the new `ubnd`.
dt = np.int64
tgt = np.iinfo(np.int64).max
lbnd = np.int64(np.iinfo(np.int64).max)
ubnd = np.uint64(np.iinfo(np.int64).max + 1)
# None of these function calls should
# generate a ValueError now.
actual = np.random.randint(lbnd, ubnd, dtype=dt)
assert_equal(actual, tgt)
def test_respect_dtype_singleton(self):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
sample = self.rfunc(lbnd, ubnd, dtype=dt)
self.assertEqual(sample.dtype, np.dtype(dt))
for dt in (np.bool, np.int, np.long):
lbnd = 0 if dt is np.bool else np.iinfo(dt).min
ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, dtype=dt)
self.assertFalse(hasattr(sample, 'dtype'))
self.assertEqual(type(sample), dt)
class TestRandomDist(TestCase):
# Make sure the random distribution returns the correct value for a
# given seed
def setUp(self):
self.seed = 1234567890
def test_rand(self):
np.random.seed(self.seed)
actual = np.random.rand(3, 2)
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randn(self):
np.random.seed(self.seed)
actual = np.random.randn(3, 2)
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randint(self):
np.random.seed(self.seed)
actual = np.random.randint(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers(self):
np.random.seed(self.seed)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = np.random.random_integers(-99, 99, size=(3, 2))
assert_(len(w) == 1)
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers_max_int(self):
# Tests whether random_integers can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = np.random.random_integers(np.iinfo('l').max,
np.iinfo('l').max)
assert_(len(w) == 1)
desired = np.iinfo('l').max
assert_equal(actual, desired)
def test_random_integers_deprecated(self):
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
# DeprecationWarning raised with high == None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max)
# DeprecationWarning raised with high != None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max, np.iinfo('l').max)
def test_random_sample(self):
np.random.seed(self.seed)
actual = np.random.random_sample((3, 2))
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_choice_uniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4)
desired = np.array([2, 3, 2, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([1, 1, 2, 2])
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False)
desired = np.array([0, 1, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False,
p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([2, 3, 1])
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
np.random.seed(self.seed)
actual = np.random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['c', 'd', 'c', 'd'])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = np.random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(np.random.choice(2, replace=True)))
assert_(np.isscalar(np.random.choice(2, replace=False)))
assert_(np.isscalar(np.random.choice(2, replace=True, p=p)))
assert_(np.isscalar(np.random.choice(2, replace=False, p=p)))
assert_(np.isscalar(np.random.choice([1, 2], replace=True)))
assert_(np.random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(np.random.choice(2, s, replace=True)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False)))
assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True)))
assert_(np.random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_equal(np.random.choice(6, s, replace=True).shape, s)
assert_equal(np.random.choice(6, s, replace=False).shape, s)
assert_equal(np.random.choice(6, s, replace=True, p=p).shape, s)
assert_equal(np.random.choice(6, s, replace=False, p=p).shape, s)
assert_equal(np.random.choice(np.arange(6), s, replace=True).shape, s)
def test_bytes(self):
np.random.seed(self.seed)
actual = np.random.bytes(10)
desired = b'\x82Ui\x9e\xff\x97+Wf\xa5'
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object, 1),
("b", np.int32, 1)])]:
np.random.seed(self.seed)
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
np.random.shuffle(alist)
actual = alist
desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
assert_array_equal(actual, desired)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
np.random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
np.random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_beta(self):
np.random.seed(self.seed)
actual = np.random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.45341850513746058e-02, 5.31297615662868145e-04],
[1.85366619058432324e-06, 4.19214516800110563e-03],
[1.58405155108498093e-04, 1.26252891949397652e-04]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
np.random.seed(self.seed)
actual = np.random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[37, 43],
[42, 48],
[46, 45]])
assert_array_equal(actual, desired)
def test_chisquare(self):
np.random.seed(self.seed)
actual = np.random.chisquare(50, size=(3, 2))
desired = np.array([[63.87858175501090585, 68.68407748911370447],
[65.77116116901505904, 47.09686762438974483],
[72.3828403199695174, 74.18408615260374006]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
np.random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = np.random.mtrand.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.54539444573611562, 0.45460555426388438],
[0.62345816822039413, 0.37654183177960598]],
[[0.55206000085785778, 0.44793999914214233],
[0.58964023305154301, 0.41035976694845688]],
[[0.59266909280647828, 0.40733090719352177],
[0.56974431743975207, 0.43025568256024799]]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, np.random.dirichlet, p, np.float(1))
def test_exponential(self):
np.random.seed(self.seed)
actual = np.random.exponential(1.1234, size=(3, 2))
desired = np.array([[1.08342649775011624, 1.00607889924557314],
[2.46628830085216721, 2.49668106809923884],
[0.68717433461363442, 1.69175666993575979]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(np.random.exponential(scale=0), 0)
assert_raises(ValueError, np.random.exponential, scale=-0.)
def test_f(self):
np.random.seed(self.seed)
actual = np.random.f(12, 77, size=(3, 2))
desired = np.array([[1.21975394418575878, 1.75135759791559775],
[1.44803115017146489, 1.22108959480396262],
[1.02176975757740629, 1.34431827623300415]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
np.random.seed(self.seed)
actual = np.random.gamma(5, 3, size=(3, 2))
desired = np.array([[24.60509188649287182, 28.54993563207210627],
[26.13476110204064184, 12.56988482927716078],
[31.71863275789960568, 33.30143302795922011]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(np.random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, np.random.gamma, shape=-0., scale=-0.)
def test_geometric(self):
np.random.seed(self.seed)
actual = np.random.geometric(.123456789, size=(3, 2))
desired = np.array([[8, 7],
[17, 17],
[5, 12]])
assert_array_equal(actual, desired)
def test_gumbel(self):
np.random.seed(self.seed)
actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.19591898743416816, 0.34405539668096674],
[-1.4492522252274278, -1.47374816298446865],
[1.10651090478803416, -0.69535848626236174]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(np.random.gumbel(scale=0), 0)
assert_raises(ValueError, np.random.gumbel, scale=-0.)
def test_hypergeometric(self):
np.random.seed(self.seed)
actual = np.random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[10, 10],
[10, 10],
[9, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = np.random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = np.random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = np.random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = np.random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
np.random.seed(self.seed)
actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.66599721112760157, 0.52829452552221945],
[3.12791959514407125, 3.18202813572992005],
[-0.05391065675859356, 1.74901336242837324]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(np.random.laplace(scale=0), 0)
assert_raises(ValueError, np.random.laplace, scale=-0.)
def test_logistic(self):
np.random.seed(self.seed)
actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[1.09232835305011444, 0.8648196662399954],
[4.27818590694950185, 4.33897006346929714],
[-0.21682183359214885, 2.63373365386060332]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
np.random.seed(self.seed)
actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[16.50698631688883822, 36.54846706092654784],
[22.67886599981281748, 0.71617561058995771],
[65.72798501792723869, 86.84341601437161273]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(np.random.lognormal(sigma=0), 1)
assert_raises(ValueError, np.random.lognormal, sigma=-0.)
def test_logseries(self):
np.random.seed(self.seed)
actual = np.random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[2, 2],
[6, 17],
[3, 6]])
assert_array_equal(actual, desired)
def test_multinomial(self):
np.random.seed(self.seed)
actual = np.random.multinomial(20, [1/6.]*6, size=(3, 2))
desired = np.array([[[4, 3, 5, 4, 2, 2],
[5, 2, 8, 2, 2, 1]],
[[3, 4, 3, 6, 0, 4],
[2, 1, 4, 3, 6, 4]],
[[4, 4, 2, 5, 2, 3],
[4, 3, 4, 2, 3, 4]]])
assert_array_equal(actual, desired)
def test_multivariate_normal(self):
np.random.seed(self.seed)
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = np.random.multivariate_normal(mean, cov, size)
desired = np.array([[[1.463620246718631, 11.73759122771936 ],
[1.622445133300628, 9.771356667546383]],
[[2.154490787682787, 12.170324946056553],
[1.719909438201865, 9.230548443648306]],
[[0.689515026297799, 9.880729819607714],
[-0.023054015651998, 9.201096623542879]]])
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = np.random.multivariate_normal(mean, cov)
desired = np.array([0.895289569463708, 9.17180864067987])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
mean = [0, 0]
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov)
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(np.random.multivariate_normal, mean, cov,
check_valid='ignore')
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(ValueError, np.random.multivariate_normal, mean, cov,
check_valid='raise')
def test_negative_binomial(self):
np.random.seed(self.seed)
actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[848, 841],
[892, 611],
[779, 647]])
assert_array_equal(actual, desired)
def test_noncentral_chisquare(self):
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[23.91905354498517511, 13.35324692733826346],
[31.22452661329736401, 16.60047399466177254],
[5.03461598262724586, 17.94973089023519464]])
assert_array_almost_equal(actual, desired, decimal=14)
actual = np.random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[1.47145377828516666, 0.15052899268012659],
[0.00943803056963588, 1.02647251615666169],
[0.332334982684171, 0.15451287602753125]])
assert_array_almost_equal(actual, desired, decimal=14)
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[9.597154162763948, 11.725484450296079],
[10.413711048138335, 3.694475922923986],
[13.484222138963087, 14.377255424602957]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
np.random.seed(self.seed)
actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[1.40598099674926669, 0.34207973179285761],
[3.57715069265772545, 7.92632662577829805],
[0.43741599463544162, 1.1774208752428319]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
np.random.seed(self.seed)
actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[2.80378370443726244, 3.59863924443872163],
[3.121433477601256, -0.33382987590723379],
[4.18552478636557357, 4.46410668111310471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(np.random.normal(scale=0), 0)
assert_raises(ValueError, np.random.normal, scale=-0.)
def test_pareto(self):
np.random.seed(self.seed)
actual = np.random.pareto(a=.123456789, size=(3, 2))
desired = np.array(
[[2.46852460439034849e+03, 1.41286880810518346e+03],
[5.28287797029485181e+07, 6.57720981047328785e+07],
[1.40840323350391515e+02, 1.98390255135251704e+05]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# http://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
np.random.seed(self.seed)
actual = np.random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[1, 0],
[0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('l').max
lamneg = -1
assert_raises(ValueError, np.random.poisson, lamneg)
assert_raises(ValueError, np.random.poisson, [lamneg]*10)
assert_raises(ValueError, np.random.poisson, lambig)
assert_raises(ValueError, np.random.poisson, [lambig]*10)
def test_power(self):
np.random.seed(self.seed)
actual = np.random.power(a=.123456789, size=(3, 2))
desired = np.array([[0.02048932883240791, 0.01424192241128213],
[0.38446073748535298, 0.39499689943484395],
[0.00177699707563439, 0.13115505880863756]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
np.random.seed(self.seed)
actual = np.random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[13.8882496494248393, 13.383318339044731],
[20.95413364294492098, 21.08285015800712614],
[11.06066537006854311, 17.35468505778271009]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(np.random.rayleigh(scale=0), 0)
assert_raises(ValueError, np.random.rayleigh, scale=-0.)
def test_standard_cauchy(self):
np.random.seed(self.seed)
actual = np.random.standard_cauchy(size=(3, 2))
desired = np.array([[0.77127660196445336, -6.55601161955910605],
[0.93582023391158309, -2.07479293013759447],
[-4.74601644297011926, 0.18338989290760804]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
np.random.seed(self.seed)
actual = np.random.standard_exponential(size=(3, 2))
desired = np.array([[0.96441739162374596, 0.89556604882105506],
[2.1953785836319808, 2.22243285392490542],
[0.6116915921431676, 1.50592546727413201]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_gamma(self):
np.random.seed(self.seed)
actual = np.random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[5.50841531318455058, 6.62953470301903103],
[5.93988484943779227, 2.31044849402133989],
[7.54838614231317084, 8.012756093271868]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gamma_0(self):
assert_equal(np.random.standard_gamma(shape=0), 0)
assert_raises(ValueError, np.random.standard_gamma, shape=-0.)
def test_standard_normal(self):
np.random.seed(self.seed)
actual = np.random.standard_normal(size=(3, 2))
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_t(self):
np.random.seed(self.seed)
actual = np.random.standard_t(df=10, size=(3, 2))
desired = np.array([[0.97140611862659965, -0.08830486548450577],
[1.36311143689505321, -0.55317463909867071],
[-0.18473749069684214, 0.61181537341755321]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
np.random.seed(self.seed)
actual = np.random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[12.68117178949215784, 12.4129206149193152],
[16.20131377335158263, 16.25692138747600524],
[11.20400690911820263, 14.4978144835829923]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
np.random.seed(self.seed)
actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[6.99097932346268003, 6.73801597444323974],
[9.50364421400426274, 9.53130618907631089],
[5.48995325769805476, 8.47493103280052118]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = np.random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
# account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
# DBL_MAX by increasing fmin a bit
np.random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_scalar_exception_propagation(self):
# Tests that exceptions are correctly propagated in distributions
# when called with objects that throw exceptions when converted to
# scalars.
#
# Regression test for gh: 8865
class ThrowingFloat(np.ndarray):
def __float__(self):
raise TypeError
throwing_float = np.array(1.0).view(ThrowingFloat)
assert_raises(TypeError, np.random.uniform, throwing_float, throwing_float)
class ThrowingInteger(np.ndarray):
def __int__(self):
raise TypeError
throwing_int = np.array(1).view(ThrowingInteger)
assert_raises(TypeError, np.random.hypergeometric, throwing_int, 1, 1)
def test_vonmises(self):
np.random.seed(self.seed)
actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[2.28567572673902042, 2.89163838442285037],
[0.38198375564286025, 2.57638023113890746],
[1.19153771588353052, 1.83509849681825354]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
np.random.seed(self.seed)
r = np.random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
np.testing.assert_(np.isfinite(r).all())
def test_wald(self):
np.random.seed(self.seed)
actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[3.82935265715889983, 5.13125249184285526],
[0.35045403618358717, 1.50832396872003538],
[0.24124319895843183, 0.22031101461955038]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
np.random.seed(self.seed)
actual = np.random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.97097342648766727, 0.91422896443565516],
[1.89517770034962929, 1.91414357960479564],
[0.67057783752390987, 1.39494046635066793]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
assert_equal(np.random.weibull(a=0), 0)
assert_raises(ValueError, np.random.weibull, a=-0.)
def test_zipf(self):
np.random.seed(self.seed)
actual = np.random.zipf(a=1.23, size=(3, 2))
desired = np.array([[66, 29],
[1, 1],
[3, 13]])
assert_array_equal(actual, desired)
class TestBroadcast(TestCase):
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setUp(self):
self.seed = 123456789
def setSeed(self):
np.random.seed(self.seed)
# TODO: Include test for randint once it can broadcast
# Can steal the test written in PR #6938
def test_uniform(self):
low = [0]
high = [1]
uniform = np.random.uniform
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.setSeed()
actual = uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
self.setSeed()
actual = uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
normal = np.random.normal
desired = np.array([2.2129019979039612,
2.1283977976520019,
1.8417114045748335])
self.setSeed()
actual = normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc * 3, bad_scale)
self.setSeed()
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
beta = np.random.beta
desired = np.array([0.19843558305989056,
0.075230336409423643,
0.24976865978980844])
self.setSeed()
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
self.setSeed()
actual = beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a, b * 3)
assert_raises(ValueError, beta, a, bad_b * 3)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
exponential = np.random.exponential
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
std_gamma = np.random.standard_gamma
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
gamma = np.random.gamma
desired = np.array([1.5221370731769048,
1.5277256455738331,
1.4248762625178359])
self.setSeed()
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
self.setSeed()
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
f = np.random.f
desired = np.array([0.80038951638264799,
0.86768719635363512,
2.7251095168386801])
self.setSeed()
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
self.setSeed()
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
nonc_f = np.random.noncentral_f
desired = np.array([9.1393943263705211,
13.025456344595602,
8.8018098359100545])
self.setSeed()
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
self.setSeed()
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
self.setSeed()
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_chisquare(self):
df = [1]
bad_df = [-1]
chisquare = np.random.chisquare
desired = np.array([0.57022801133088286,
0.51947702108840776,
0.1320969254923558])
self.setSeed()
actual = chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
nonc_chi = np.random.noncentral_chisquare
desired = np.array([9.0015599467913763,
4.5804135049718742,
6.0872302432834564])
self.setSeed()
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
self.setSeed()
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
t = np.random.standard_t
desired = np.array([3.0702872575217643,
5.8560725167361607,
1.0274791436474273])
self.setSeed()
actual = t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
vonmises = np.random.vonmises
desired = np.array([2.9883443664201312,
-2.7064099483995943,
-1.8672476700665914])
self.setSeed()
actual = vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu * 3, bad_kappa)
self.setSeed()
actual = vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
pareto = np.random.pareto
desired = np.array([1.1405622680198362,
1.1465519762044529,
1.0389564467453547])
self.setSeed()
actual = pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
weibull = np.random.weibull
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
power = np.random.power
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.setSeed()
actual = power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
laplace = np.random.laplace
desired = np.array([0.067921356028507157,
0.070715642226971326,
0.019290950698972624])
self.setSeed()
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
self.setSeed()
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
gumbel = np.random.gumbel
desired = np.array([0.2730318639556768,
0.26936705726291116,
0.33906220393037939])
self.setSeed()
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
self.setSeed()
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
logistic = np.random.logistic
desired = np.array([0.13152135837586171,
0.13675915696285773,
0.038216792802833396])
self.setSeed()
actual = logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc * 3, bad_scale)
self.setSeed()
actual = logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc, bad_scale * 3)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
lognormal = np.random.lognormal
desired = np.array([9.1422086044848427,
8.4013952870126261,
6.3073234116578671])
self.setSeed()
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
self.setSeed()
actual = lognormal(mean, sigma * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
rayleigh = np.random.rayleigh
desired = np.array([1.2337491937897689,
1.2360119924878694,
1.1936818095781789])
self.setSeed()
actual = rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
wald = np.random.wald
desired = np.array([0.11873681120271318,
0.12450084820795027,
0.9096122728408238])
self.setSeed()
actual = wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean * 3, scale)
assert_raises(ValueError, wald, mean * 3, bad_scale)
self.setSeed()
actual = wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean, scale * 3)
assert_raises(ValueError, wald, mean, bad_scale * 3)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
triangular = np.random.triangular
desired = np.array([2.03339048710429,
2.0347400359389356,
2.0095991069536208])
self.setSeed()
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, right)
self.setSeed()
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, right)
self.setSeed()
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, right * 3)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
binom = np.random.binomial
desired = np.array([1, 1, 1])
self.setSeed()
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
self.setSeed()
actual = binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
neg_binom = np.random.negative_binomial
desired = np.array([1, 0, 1])
self.setSeed()
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
self.setSeed()
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
max_lam = np.random.RandomState().poisson_lam_max
lam = [1]
bad_lam_one = [-1]
bad_lam_two = [max_lam * 2]
poisson = np.random.poisson
desired = np.array([1, 1, 0])
self.setSeed()
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
zipf = np.random.zipf
desired = np.array([2, 2, 1])
self.setSeed()
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
geom = np.random.geometric
desired = np.array([2, 2, 2])
self.setSeed()
actual = geom(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geom, bad_p_one * 3)
assert_raises(ValueError, geom, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [0]
bad_nsample_two = [4]
hypergeom = np.random.hypergeometric
desired = np.array([1, 1, 1])
self.setSeed()
actual = hypergeom(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two)
self.setSeed()
actual = hypergeom(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two)
self.setSeed()
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
logseries = np.random.logseries
desired = np.array([1, 1, 1])
self.setSeed()
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
class TestThread(TestCase):
# make sure each state produces the same sequence even in threads
def setUp(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(np.random.RandomState(s), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(np.random.RandomState(s), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1/6.]*6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput(TestCase):
def setUp(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (np.random.exponential, np.random.standard_gamma,
np.random.chisquare, np.random.standard_t,
np.random.pareto, np.random.weibull,
np.random.power, np.random.rayleigh,
np.random.poisson, np.random.zipf,
np.random.geometric, np.random.logseries)
probfuncs = (np.random.geometric, np.random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
self.assertEqual(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (np.random.uniform, np.random.normal,
np.random.beta, np.random.gamma,
np.random.f, np.random.noncentral_chisquare,
np.random.vonmises, np.random.laplace,
np.random.gumbel, np.random.logistic,
np.random.lognormal, np.random.wald,
np.random.binomial, np.random.negative_binomial)
probfuncs = (np.random.binomial, np.random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
self.assertEqual(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
self.assertEqual(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
self.assertEqual(out.shape, self.tgtShape)
# TODO: Uncomment once randint can broadcast arguments
# def test_randint(self):
# itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16,
# np.int32, np.uint32, np.int64, np.uint64]
# func = np.random.randint
# high = np.array([1])
# low = np.array([0])
#
# for dt in itype:
# out = func(low, high, dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
#
# out = func(low[0], high, dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
#
# out = func(low, high[0], dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [np.random.noncentral_f, np.random.triangular,
np.random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
self.assertEqual(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
self.assertEqual(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
self.assertEqual(out.shape, self.tgtShape)
if __name__ == "__main__":
run_module_suite()
|
multiprocess_demo.py | #import multiprocessing
from pymongo import MongoClient
import pandas as pd
import numpy as np
import string
import re
from multiprocessing import Lock, Process
from datetime import datetime
def process_data(maxx,l):
pass
def process_cursor(skip_n, limit_n,l):
print('Starting process', skip_n // limit_n, '...')
client = MongoClient(uri)
db = client.cs_ml
max = db.coll.find({}).skip(skip_n).limit(limit_n)
max_list = list(max)
status = process_data(max_list,l)
print('job done for partitions', skip_n // limit_n, status)
print('Completed process', skip_n // limit_n, '...')
if __name__ == '__main__':
n_cores = 1 # number of splits (logical cores of the CPU-1)
# collection_size = 5284733-13 5284720 for 16core # your collection size in mongo
collection_size = 5781408
batch_size = round(collection_size / n_cores)
skips = range(0, n_cores * batch_size, batch_size)
print(collection_size, batch_size, skips, len(skips))
l= Lock()
processes = [Process(target=process_cursor, args=(skip_n, batch_size,l)) for skip_n in skips]
for process in processes:
process.start()
for process in processes:
process.join()
|
test_executor.py | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import threading
import time
import mesos
import mesos_pb2
class MyExecutor(mesos.Executor):
def launchTask(self, driver, task):
# Create a thread to run the task. Tasks should always be run in new
# threads or processes, rather than inside launchTask itself.
def run_task():
print "Running task %s" % task.task_id.value
update = mesos_pb2.TaskStatus()
update.task_id.value = task.task_id.value
update.state = mesos_pb2.TASK_RUNNING
driver.sendStatusUpdate(update)
time.sleep(1)
print "Sending status update..."
update = mesos_pb2.TaskStatus()
update.task_id.value = task.task_id.value
update.state = mesos_pb2.TASK_FINISHED
driver.sendStatusUpdate(update)
print "Sent status update"
thread = threading.Thread(target=run_task)
thread.start()
if __name__ == "__main__":
print "Starting executor"
driver = mesos.MesosExecutorDriver(MyExecutor())
sys.exit(0 if driver.run() == mesos_pb2.DRIVER_STOPPED else 1)
|
recovery_2i.py | import logging
from threading import Thread
import time
from .base_2i import BaseSecondaryIndexingTests
from couchbase.n1ql import CONSISTENCY_REQUEST
from couchbase_helper.query_definitions import QueryDefinition
from lib.memcached.helper.data_helper import MemcachedClientHelper
from membase.api.rest_client import RestConnection
from membase.helper.cluster_helper import ClusterOperationHelper
from remote.remote_util import RemoteMachineShellConnection
log = logging.getLogger(__name__)
class SecondaryIndexingRecoveryTests(BaseSecondaryIndexingTests):
def setUp(self):
self.use_replica = True
super(SecondaryIndexingRecoveryTests, self).setUp()
self.load_query_definitions = []
self.initial_index_number = self.input.param("initial_index_number", 10)
for x in range(self.initial_index_number):
index_name = "index_name_" + str(x)
query_definition = QueryDefinition(
index_name=index_name, index_fields=["VMs"],
query_template="SELECT * FROM %s ", groups=["simple"],
index_where_clause=" VMs IS NOT NULL ")
self.load_query_definitions.append(query_definition)
if self.load_query_definitions:
self.multi_create_index(buckets=self.buckets,
query_definitions=self.load_query_definitions)
def tearDown(self):
if hasattr(self, 'query_definitions') and not self.skip_cleanup:
try:
self.log.info("<<<<<< WILL DROP THE INDEXES >>>>>")
tasks = self.async_multi_drop_index(
buckets=self.buckets, query_definitions=self.query_definitions)
for task in tasks:
task.result()
self.async_multi_drop_index(
buckets=self.buckets, query_definitions=self.load_query_definitions)
except Exception as ex:
log.info(ex)
super(SecondaryIndexingRecoveryTests, self).tearDown()
'''Test that checks if indexes that are ready during index warmup can be used'''
def test_use_index_during_warmup(self):
index_node = self.get_nodes_from_services_map(service_type="index",
get_all_nodes=False)
rest = RestConnection(index_node)
# Change indexer snapshot for a recovery point
doc = {"indexer.settings.persisted_snapshot.moi.interval": 60000}
rest.set_index_settings(doc)
create_index_query = "CREATE INDEX idx ON default(age)"
create_index_query2 = "CREATE INDEX idx1 ON default(age)"
create_index_query3 = "CREATE INDEX idx2 ON default(age)"
create_index_query4 = "CREATE INDEX idx3 ON default(age)"
create_index_query5 = "CREATE INDEX idx4 ON default(age)"
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query2,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query3,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query4,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query5,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
self.wait_until_indexes_online()
rest.set_service_memoryQuota(service='indexMemoryQuota',
memoryQuota=256)
master_rest = RestConnection(self.master)
self.shell.execute_cbworkloadgen(master_rest.username, master_rest.password, 700000, 100, "default", 1024, '-j')
index_stats = rest.get_indexer_stats()
self.log.info(index_stats["indexer_state"])
self.assertTrue(index_stats["indexer_state"].lower() != 'warmup')
# Sleep for 60 seconds to allow a snapshot to be created
self.sleep(60)
t1 = Thread(target=self.monitor_index_stats, name="monitor_index_stats", args=([index_node, 60]))
t1.start()
shell = RemoteMachineShellConnection(index_node)
output1, error1 = shell.execute_command("killall -9 indexer")
t1.join()
use_index_query = "select * from default where age > 30"
# Results are not garunteed to be accurate so the query successfully running is all we can check
try:
results = self.n1ql_helper.run_cbq_query(query=use_index_query, server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("query should run correctly, an index is available for use")
'''Ensure that the index is in warmup, but there is an index ready to be used'''
def monitor_index_stats(self, index_node=None, timeout=600):
index_usable = False
rest = RestConnection(index_node)
init_time = time.time()
next_time = init_time
while not index_usable:
index_stats = rest.get_indexer_stats()
self.log.info(index_stats["indexer_state"])
index_map = self.get_index_map()
if index_stats["indexer_state"].lower() == 'warmup':
for index in index_map['default']:
if index_map['default'][index]['status'] == 'Ready':
index_usable = True
break
else:
next_time = time.time()
index_usable = index_usable or (next_time - init_time > timeout)
return
def test_rebalance_in(self):
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
try:
rebalance = self.cluster.async_rebalance(
self.servers[:self.nodes_init],
self.nodes_in_list,
[], services=self.services_in)
mid_recovery_tasks = self.async_run_operations(
phase="in_between")
rebalance.result()
self._run_tasks([kvOps_tasks, mid_recovery_tasks])
# check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self._check_all_bucket_items_indexed()
post_recovery_tasks = self.async_run_operations(phase="after")
self._run_tasks([post_recovery_tasks])
except Exception as ex:
log.info(str(ex))
raise
def test_rebalance_out(self):
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
try:
# self._create_replica_indexes()
rebalance = self.cluster.async_rebalance(
self.servers[:self.nodes_init],
[], self.nodes_out_list)
mid_recovery_tasks = self.async_run_operations(phase="in_between")
rebalance.result()
self._run_tasks([kvOps_tasks, mid_recovery_tasks])
# check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self._check_all_bucket_items_indexed()
post_recovery_tasks = self.async_run_operations(phase="after")
self._run_tasks([post_recovery_tasks])
except Exception as ex:
log.info(str(ex))
raise
def test_rebalance_in_out(self):
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
try:
# self._create_replica_indexes()
rebalance = self.cluster.async_rebalance(
self.servers[:self.nodes_init], self.nodes_in_list,
self.nodes_out_list, services=self.services_in)
mid_recovery_tasks = self.async_run_operations(phase="in_between")
rebalance.result()
self._run_tasks([kvOps_tasks, mid_recovery_tasks])
# check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self._check_all_bucket_items_indexed()
post_recovery_tasks = self.async_run_operations(phase="after")
self._run_tasks([post_recovery_tasks])
except Exception as ex:
log.info(str(ex))
raise
def test_rebalance_in_out_multi_nodes(self):
"""
MB-16220
1. Create cluster + Indexes
2. Run Queries
3. Rebalance out DAta and Rebalance In Data node.
4. Rebalance out Index and Rebalance in Index Node.
"""
try:
extra_nodes = self.servers[self.nodes_init:]
self.assertGreaterEqual(
len(extra_nodes), 2,
"Sufficient nodes not available for rebalance")
self.nodes_out = 1
self.nodes_in_list = [extra_nodes[0]]
self.nodes_out_dist = "kv:1"
self.services_in = ["kv"]
self.targetMaster = False
self.generate_map_nodes_out_dist()
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
rebalance = self.cluster.async_rebalance(
self.servers[:self.nodes_init],
self.nodes_in_list,
self.nodes_out_list,
services=self.services_in)
mid_recovery_tasks = self.async_run_operations(phase="in_between")
rebalance.result()
# check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self.nodes_out_dist = "index:1"
self.services_in = ["index"]
self.nodes_in_list = [extra_nodes[1]]
self.generate_map_nodes_out_dist()
# self._create_replica_indexes()
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
self.nodes_in_list,
self.nodes_out_list, services=self.services_in)
rebalance.result()
self._run_tasks([kvOps_tasks, mid_recovery_tasks])
# check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self._check_all_bucket_items_indexed()
post_recovery_tasks = self.async_run_operations(phase="after")
self._run_tasks([post_recovery_tasks])
except Exception as ex:
log.info(str(ex))
raise
def test_rebalance_with_stop_start(self):
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
try:
# self._create_replica_indexes()
rebalance = self.cluster.async_rebalance(
self.servers[:self.nodes_init],
self.nodes_in_list,
self.nodes_out_list, services=self.services_in)
stopped = RestConnection(self.master).stop_rebalance(
wait_timeout=self.wait_timeout // 3)
self.assertTrue(stopped, msg="Unable to stop rebalance")
rebalance.result()
self.sleep(100)
rebalance = self.cluster.async_rebalance(
self.servers[:self.nodes_init], self.nodes_in_list,
self.nodes_out_list, services=self.services_in)
mid_recovery_tasks = self.async_run_operations(phase="in_between")
rebalance.result()
self._run_tasks([kvOps_tasks, mid_recovery_tasks])
# check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self._check_all_bucket_items_indexed()
post_recovery_tasks = self.async_run_operations(phase="after")
self._run_tasks([post_recovery_tasks])
except Exception as ex:
log.info(str(ex))
raise
def test_server_crash(self):
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
try:
self.use_replica = False
self._create_replica_indexes()
self.targetProcess = self.input.param("targetProcess", 'memcached')
for node in self.nodes_out_list:
remote = RemoteMachineShellConnection(node)
if self.targetProcess == "memcached":
remote.kill_memcached()
else:
remote.terminate_process(process_name=self.targetProcess)
self.sleep(60)
mid_recovery_tasks = self.async_run_operations(phase="in_between")
self._run_tasks([kvOps_tasks, mid_recovery_tasks])
# check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self._check_all_bucket_items_indexed()
post_recovery_tasks = self.async_run_operations(phase="after")
self._run_tasks([post_recovery_tasks])
except Exception as ex:
log.info(str(ex))
raise
def test_server_stop(self):
if self.doc_ops:
return
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
try:
self._create_replica_indexes()
for node in self.nodes_out_list:
remote = RemoteMachineShellConnection(node)
remote.stop_server()
mid_recovery_tasks = self.async_run_operations(phase="in_between")
self._run_tasks([kvOps_tasks, mid_recovery_tasks])
post_recovery_tasks = self.async_run_operations(phase="after")
self._run_tasks([post_recovery_tasks])
except Exception as ex:
log.info(str(ex))
raise
finally:
for node in self.nodes_out_list:
remote = RemoteMachineShellConnection(node)
remote.start_server()
self.sleep(20)
def test_server_restart(self):
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
try:
for node in self.nodes_out_list:
remote = RemoteMachineShellConnection(node)
remote.stop_server()
self.sleep(30)
for node in self.nodes_out_list:
remote = RemoteMachineShellConnection(node)
remote.start_server()
self.sleep(30)
mid_recovery_tasks = self.async_run_operations(phase="in_between")
self._run_tasks([kvOps_tasks, mid_recovery_tasks])
# check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self._check_all_bucket_items_indexed()
post_recovery_tasks = self.async_run_operations(phase="after")
self._run_tasks([post_recovery_tasks])
except Exception as ex:
log.info(str(ex))
raise
def test_failover(self):
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
try:
self._create_replica_indexes()
servr_out = self.nodes_out_list
failover_task = self.cluster.async_failover(
[self.master],
failover_nodes=servr_out,
graceful=self.graceful)
mid_recovery_tasks = self.async_run_operations(phase="in_between")
failover_task.result()
if self.graceful:
# Check if rebalance is still running
msg = "graceful failover failed for nodes"
check_rblnc = RestConnection(self.master).monitorRebalance(
stop_if_loop=True)
self.assertTrue(check_rblnc, msg=msg)
self._run_tasks([kvOps_tasks, mid_recovery_tasks])
rebalance = self.cluster.async_rebalance(
self.servers[:self.nodes_init], [], servr_out)
rebalance.result()
# check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self._check_all_bucket_items_indexed()
post_recovery_tasks = self.async_run_operations(phase="after")
self._run_tasks([post_recovery_tasks])
except Exception as ex:
log.info(str(ex))
raise
def test_failover_add_back(self):
try:
rest = RestConnection(self.master)
recoveryType = self.input.param("recoveryType", "full")
servr_out = self.nodes_out_list
failover_task = self.cluster.async_failover([self.master],
failover_nodes=servr_out, graceful=self.graceful)
failover_task.result()
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
nodes_all = rest.node_statuses()
nodes = []
if servr_out[0].ip == "127.0.0.1":
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if (str(node.port) == failover_node.port)])
else:
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if node.ip == failover_node.ip])
for node in nodes:
log.info("Adding Back: {0}".format(node))
rest.add_back_node(node.id)
rest.set_recovery_type(otpNode=node.id,
recoveryType=recoveryType)
rebalance = self.cluster.async_rebalance(
self.servers[:self.nodes_init], [], [])
mid_recovery_tasks = self.async_run_operations(phase="in_between")
rebalance.result()
self._run_tasks([kvOps_tasks, mid_recovery_tasks])
# check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self._check_all_bucket_items_indexed()
post_recovery_tasks = self.async_run_operations(phase="after")
self._run_tasks([post_recovery_tasks])
except Exception as ex:
log.info(str(ex))
raise
def test_failover_indexer_add_back(self):
"""
Indexer add back scenarios
:return:
"""
rest = RestConnection(self.master)
recoveryType = self.input.param("recoveryType", "full")
indexer_out = int(self.input.param("nodes_out", 0))
nodes = self.get_nodes_from_services_map(service_type="index",
get_all_nodes=True)
self.assertGreaterEqual(len(nodes), indexer_out,
"Existing Indexer Nodes less than Indexer out nodes")
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
try:
self.use_replica = False
self._create_replica_indexes()
servr_out = nodes[:indexer_out]
failover_task = self.cluster.async_failover(
[self.master], failover_nodes=servr_out,
graceful=self.graceful)
failover_task.result()
nodes_all = rest.node_statuses()
nodes = []
if servr_out[0].ip == "127.0.0.1":
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if (str(node.port) == failover_node.port)])
else:
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if node.ip == failover_node.ip])
for node in nodes:
log.info("Adding back {0} with recovery type {1}...".format(
node.ip, recoveryType))
rest.add_back_node(node.id)
rest.set_recovery_type(otpNode=node.id,
recoveryType=recoveryType)
log.info("Rebalancing nodes in...")
mid_recovery_tasks = self.async_run_operations(phase="in_between")
rebalance = self.cluster.async_rebalance(
self.servers[:self.nodes_init], [], [])
rebalance.result()
self._run_tasks([mid_recovery_tasks, kvOps_tasks])
# check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self._check_all_bucket_items_indexed()
post_recovery_tasks = self.async_run_operations(phase="after")
self._run_tasks([post_recovery_tasks])
except Exception as ex:
log.info(str(ex))
raise
def test_failover_indexer_restart(self):
"""
CBQE-3153
Indexer add back scenarios
:return:
"""
index_servers = self.get_nodes_from_services_map(service_type="index",
get_all_nodes=True)
self.multi_create_index(self.buckets, self.query_definitions)
self.get_dgm_for_plasma()
self.sleep(30)
kvOps_tasks = self._run_kvops_tasks()
remote = RemoteMachineShellConnection(index_servers[0])
remote.stop_server()
self.sleep(20)
for bucket in self.buckets:
for query in self.query_definitions:
try:
self.query_using_index(bucket=bucket,
query_definition=query)
except Exception as ex:
msg = "queryport.indexNotFound"
if msg in str(ex):
continue
else:
log.info(str(ex))
break
remote.start_server()
self.sleep(20)
self._run_tasks([kvOps_tasks])
def test_autofailover(self):
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
autofailover_timeout = 30
conn = RestConnection(self.master)
status = conn.update_autofailover_settings(True, autofailover_timeout)
self.assertTrue(status, 'failed to change autofailover_settings!')
try:
self._create_replica_indexes()
servr_out = self.nodes_out_list
remote = RemoteMachineShellConnection(servr_out[0])
remote.stop_server()
self.sleep(10)
mid_recovery_tasks = self.async_run_operations(phase="in_between")
self.sleep(autofailover_timeout + 10, "Wait for autofailover")
rebalance = self.cluster.async_rebalance(
self.servers[:self.nodes_init], [], servr_out)
rebalance.result()
self._run_tasks([kvOps_tasks, mid_recovery_tasks])
# check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self._check_all_bucket_items_indexed()
post_recovery_tasks = self.async_run_operations(phase="after")
self._run_tasks([post_recovery_tasks])
except Exception as ex:
log.info(str(ex))
raise
finally:
remote.start_server()
self.sleep(30)
def test_network_partitioning(self):
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
try:
self._create_replica_indexes()
for node in self.nodes_out_list:
self.start_firewall_on_node(node)
self.sleep(60)
mid_recovery_tasks = self.async_run_operations(phase="in_between")
self._run_tasks([kvOps_tasks, mid_recovery_tasks])
post_recovery_tasks = self.async_run_operations(phase="after")
self._run_tasks([post_recovery_tasks])
except Exception as ex:
log.info(str(ex))
raise
finally:
for node in self.nodes_out_list:
self.stop_firewall_on_node(node)
self.sleep(30)
# check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self._check_all_bucket_items_indexed()
def test_couchbase_bucket_compaction(self):
"""
Run Compaction Here
Run auto-compaction to remove the tomb stones
"""
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
compact_tasks = []
for bucket in self.buckets:
compact_tasks.append(self.cluster.async_compact_bucket(
self.master, bucket))
mid_recovery_tasks = self.async_run_operations(phase="in_between")
self._run_tasks([kvOps_tasks, mid_recovery_tasks])
for task in compact_tasks:
task.result()
self._check_all_bucket_items_indexed()
post_recovery_tasks = self.async_run_operations(phase="after")
self._run_tasks([post_recovery_tasks])
def test_warmup(self):
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
for server in self.nodes_out_list:
remote = RemoteMachineShellConnection(server)
remote.stop_server()
remote.start_server()
remote.disconnect()
mid_recovery_tasks = self.async_run_operations(phase="in_between")
ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
self._run_tasks([kvOps_tasks, mid_recovery_tasks])
# check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self._check_all_bucket_items_indexed()
post_recovery_tasks = self.async_run_operations(phase="after")
self._run_tasks([post_recovery_tasks])
def test_couchbase_bucket_flush(self):
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
# Flush the bucket
for bucket in self.buckets:
log.info("Flushing bucket {0}...".format(bucket.name))
rest = RestConnection(self.master)
rest.flush_bucket(bucket.name)
count = 0
while rest.get_bucket_status(bucket.name) != "healthy" and \
count < 10:
log.info("Bucket {0} Status is {1}. Sleeping...".format(
bucket.name, rest.get_bucket_status(bucket.name)))
count += 1
self.sleep(10)
log.info("Bucket {0} is {1}".format(
bucket.name, rest.get_bucket_status(bucket.name)))
mid_recovery_tasks = self.async_run_operations(phase="in_between")
self._run_tasks([kvOps_tasks, mid_recovery_tasks])
# check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self.sleep(180)
self._check_all_bucket_items_indexed()
post_recovery_tasks = self.async_run_operations(phase="after")
self.sleep(180)
self._run_tasks([post_recovery_tasks])
def test_robust_rollback_handling_in_failure_scenario(self):
"""
MB-36582
TODO:
"https://issues.couchbase.com/browse/MB-37586
https://issues.couchbase.com/browse/MB-37588
Will wait on the stats to be available
https://issues.couchbase.com/browse/MB-37594
"""
data_nodes = self.get_kv_nodes()
self.assertTrue(len(data_nodes) >= 3, "Can't run this with less than 3 KV nodes")
bucket_name = self.buckets[0].name
index_name = self.get_index_map()[bucket_name].keys()[0]
index_node = self.get_nodes_from_services_map(service_type="index",
get_all_nodes=False)
rest = RestConnection(index_node)
# Change indexer snapshot for a recovery point
doc = {"indexer.settings.persisted_snapshot.moi.interval": 60000}
rest.set_index_settings(doc)
# Deleting bucket as there is no easy way in testrunner to crate index before loading data
for bucket in self.buckets:
self.cluster.bucket_delete(self.master, bucket=bucket)
# Create default bucket
default_params = self._create_bucket_params(
server=self.master, size=self.bucket_size,
replicas=self.num_replicas, bucket_type=self.bucket_type,
enable_replica_index=self.enable_replica_index,
eviction_policy=self.eviction_policy, lww=self.lww,
maxttl=self.maxttl, compression_mode=self.compression_mode)
self.cluster.create_default_bucket(default_params)
# loading data to bucket
gens_load = self.generate_docs(num_items=self.docs_per_day)
self.load(gens_load, flag=self.item_flag, batch_size=self.batch_size, op_type="create", verify_data=False)
# creating Index
query_definition = QueryDefinition(
index_name=index_name, index_fields=["VMs"],
query_template="SELECT * FROM %s ", groups=["simple"],
index_where_clause=" VMs IS NOT NULL ")
self.load_query_definitions.append(query_definition)
self.create_index(bucket="default", query_definition=query_definition)
node_b, node_c = (None, None)
for node in data_nodes:
if node.ip == self.master.ip:
continue
if not node_b:
node_b = node
else:
node_c = node
break
# Blocking Node C from Node B
try:
self.block_incoming_network_from_node(node_b, node_c)
# Killing Memcached on Node C so that disk snapshots have vbuuid not available with Node B
for _ in range(2):
# Killing memcached on node C
num_snapshot = rest.get_index_stats()[bucket_name][index_name]["num_commits"]
remote_client = RemoteMachineShellConnection(node_c)
remote_client.kill_memcached()
remote_client.disconnect()
sleep_count = 0
while sleep_count < 10:
self.sleep(10, "Waiting for Disk Snapshot/s to be available")
new_num_snapshot = rest.get_index_stats()[bucket_name][index_name]["num_commits"]
if new_num_snapshot > num_snapshot:
self.log.info("New Disk Snapshot is available")
break
sleep_count += 1
# Restarting Indexer to clear in-memory snapshots
remote_client = RemoteMachineShellConnection(index_node)
remote_client.execute_command("kill -9 $(ps aux | pgrep 'indexer')")
self.sleep(timeout=10, message="Allowing time for indexer to restart")
# Fail over Node C so that replica takes over on Node B
self.cluster.failover(servers=self.servers, failover_nodes=[node_c])
self.sleep(timeout=30, message="Waiting for rollback to kick in")
# Get rollback count
num_rollback = rest.get_num_rollback_stat(bucket="default")
self.assertEqual(num_rollback, 1, "Failed to rollback in failure scenario")
# Todo: add validation that the rollback has happened from snapshot not from Zero
finally:
self.resume_blocked_incoming_network_from_node(node_b, node_c)
def test_discard_disk_snapshot_after_kv_persisted(self):
"""
MB-36554
Todo: https://issues.couchbase.com/browse/MB-37586
Will wait on the stats to be available
https://issues.couchbase.com/browse/MB-37587
"""
data_nodes = self.get_kv_nodes()
self.assertTrue(len(data_nodes) == 2, "This test require a cluster of 2 nodes")
bucket_name = self.buckets[0].name
index_name = list(self.get_index_map()[bucket_name])[0]
index_node = self.get_nodes_from_services_map(service_type="index",
get_all_nodes=False)
rest = RestConnection(index_node)
# Change indexer snapshot for a recovery point
doc = {"indexer.settings.persisted_snapshot.moi.interval": 60000}
rest.set_index_settings(doc)
# Deleting bucket as there is no easy way in testrunner to crate index before loading data
for bucket in self.buckets:
self.cluster.bucket_delete(self.master, bucket=bucket)
# Create default bucket
default_params = self._create_bucket_params(
server=self.master, size=self.bucket_size,
replicas=self.num_replicas, bucket_type=self.bucket_type,
enable_replica_index=self.enable_replica_index,
eviction_policy=self.eviction_policy, lww=self.lww,
maxttl=self.maxttl, compression_mode=self.compression_mode)
self.cluster.create_default_bucket(default_params)
# loading data to bucket
gens_load = self.generate_docs(num_items=self.docs_per_day)
self.load(gens_load, flag=self.item_flag, batch_size=self.batch_size, op_type="create", verify_data=False)
# creating Index
query_definition = QueryDefinition(
index_name=index_name, index_fields=["VMs"],
query_template="SELECT * FROM %s ", groups=["simple"],
index_where_clause=" VMs IS NOT NULL ")
self.load_query_definitions.append(query_definition)
self.create_index(bucket="default", query_definition=query_definition)
# Blocking node B firewall
node_b, node_c = data_nodes
try:
self.block_incoming_network_from_node(node_b, node_c)
# Performing doc mutation
num_snapshot = rest.get_index_stats()[bucket_name][index_name]["num_commits"]
gens_load = self.generate_docs(self.docs_per_day * 2)
self.load(gens_load, flag=self.item_flag, verify_data=False, batch_size=self.batch_size)
sleep_count = 0
while sleep_count < 10:
self.sleep(10, "Waiting for Disk Snapshot/s to be available")
new_num_snapshot = rest.get_index_stats()[bucket_name][index_name]["num_commits"]
if new_num_snapshot > num_snapshot:
self.log.info("New Disk Snapshot is available")
break
sleep_count += 1
# Performing doc mutation
num_snapshot = rest.get_index_stats()[bucket_name][index_name]["num_commits"]
gens_load = self.generate_docs(self.docs_per_day * 3)
self.load(gens_load, flag=self.item_flag, verify_data=False, batch_size=self.batch_size)
sleep_count = 0
while sleep_count < 10:
self.sleep(10, "Waiting for Disk Snapshot/s to be available")
new_num_snapshot = rest.get_index_stats()[bucket_name][index_name]["num_commits"]
if new_num_snapshot > num_snapshot:
self.log.info("New Disk Snapshot is available")
break
sleep_count += 1
# resume the communication between node B and node C
finally:
self.resume_blocked_incoming_network_from_node(node_b, node_c)
# TODO: Need to add validation based on stat that the Disk Snapshot has catch up and extra snapshots are deleted
# Meanwhile we will validate based on the item_count
self.sleep(timeout=2 * 60, message="Giving some time to indexer to recover after resuming communication "
"between node A and node B")
item_count_after_checking_kv_persisted_seq_num = rest.get_index_stats()[bucket_name][index_name]["items_count"]
self.assertEqual(item_count_after_checking_kv_persisted_seq_num, self.docs_per_day * 3 * 2016,
"Indexer failed to index all the items in bucket.\nExpected indexed item {}"
"\n Actual indexed item {}".format(item_count_after_checking_kv_persisted_seq_num,
self.docs_per_day * 3 * 2016))
def test_rollback_to_zero_preceded_by_rollback_from_disk_snapshot(self):
"""
MB36444
"""
bucket_name = self.buckets[0].name
index_name = list(self.get_index_map()[bucket_name])[0]
data_nodes = self.get_kv_nodes()
self.assertTrue(len(data_nodes) >= 3, "Can't run this with less than 3 KV nodes")
# Blocking node B firewall
node_b, node_c = (None, None)
for node in data_nodes:
if node.ip == self.master.ip:
continue
if not node_b:
node_b = node
else:
node_c = node
break
try:
# Blocking communication between Node B and Node C
conn = RestConnection(self.master)
self.block_incoming_network_from_node(node_b, node_c)
# Doing some mutation which replica on Node C won't see
gens_load = self.generate_docs(num_items=self.docs_per_day * 2)
self.load(gens_load, flag=self.item_flag, batch_size=self.batch_size, op_type="create", verify_data=False)
# Failing over Node C
self.cluster.failover(servers=self.servers, failover_nodes=[node_c])
sleep_count = 0
while sleep_count < 15:
num_rollback = conn.get_num_rollback_stat(bucket=bucket_name)
if num_rollback == 1:
self.log.info("Indexer has rolled back from disk snapshot")
break
self.sleep(10, "Waiting for rollback to disk snapshot")
sleep_count += 1
self.assertNotEqual(sleep_count, 15, "Rollback to disk snapshot didn't happen")
# Change indexer snapshot for a recovery point
doc = {"indexer.settings.persisted_snapshot.moi.interval": 60000}
conn.set_index_settings(doc)
# Doing some mutation so that two new disk snapshots are generated
num_snapshot = conn.get_index_stats()[bucket_name][index_name]["num_commits"]
gens_load = self.generate_docs(num_items=self.docs_per_day * 3)
self.load(gens_load, flag=self.item_flag, batch_size=self.batch_size, op_type="create", verify_data=False)
sleep_count = 0
while sleep_count < 10:
self.sleep(10, "Waiting for Disk Snapshot/s to be available")
new_num_snapshot = conn.get_index_stats()[bucket_name][index_name]["num_commits"]
if new_num_snapshot > num_snapshot:
self.log.info("New Disk Snapshot is available")
break
sleep_count += 1
self.assertNotEqual(sleep_count, 10, "No new Disk Snapshot is available")
num_snapshot = conn.get_index_stats()[bucket_name][index_name]["num_commits"]
gens_load = self.generate_docs(num_items=self.docs_per_day * 4)
self.load(gens_load, flag=self.item_flag, batch_size=self.batch_size, op_type="create", verify_data=False)
sleep_count = 0
while sleep_count < 10:
self.sleep(10, "Waiting for Disk Snapshot/s to be available")
new_num_snapshot = conn.get_index_stats()[bucket_name][index_name]["num_commits"]
if new_num_snapshot > num_snapshot:
self.log.info("New Disk Snapshot is available")
break
sleep_count += 1
self.assertNotEqual(sleep_count, 10, "No new Disk Snapshot is available")
# Performing full recovery for fail over Node C
self.resume_blocked_incoming_network_from_node(node_b, node_c)
conn.set_recovery_type(otpNode='ns_1@' + node_c.ip, recoveryType="full")
self.cluster.rebalance(self.servers, [], [])
# Blocking communication between Node B and Node C
conn = RestConnection(self.master)
self.block_incoming_network_from_node(node_b, node_c)
# Doing some mutation which replica on Node C won't see
gens_load = self.generate_docs(num_items=self.docs_per_day * 5)
self.load(gens_load, flag=self.item_flag, batch_size=self.batch_size, op_type="create", verify_data=False)
# Killing memcached on node C
remote_client = RemoteMachineShellConnection(node_c)
remote_client.kill_memcached()
remote_client.disconnect()
# Failing over Node C
num_rollback = conn.get_num_rollback_stat(bucket=bucket_name)
self.cluster.failover(servers=self.servers, failover_nodes=[node_c])
sleep_count = 0
while sleep_count < 10:
self.sleep(10, "Waiting for Disk Snapshot/s to be available")
new_num_rollback = conn.get_num_rollback_stat(bucket=bucket_name)
if new_num_rollback == num_rollback + 1:
self.log.info("Rollbacked to Disk Snapshot")
break
sleep_count += 1
self.assertNotEqual(sleep_count, 10, "Indexer failed to rollback")
# Todo: add the assert to check the rollback happened from disk snapshot not from zero
finally:
self.resume_blocked_incoming_network_from_node(node_b, node_c)
def test_restart_timestamp_calculation_for_rollback(self):
"""
MB-35880
Case B:
Can't reproduce it consistently
"""
data_nodes = self.get_kv_nodes()
self.assertTrue(len(data_nodes) >= 3, "Can't run this with less than 3 KV nodes")
# Deleting bucket as there is no easy way in testrunner to crate index before loading data
for bucket in self.buckets:
self.cluster.bucket_delete(self.master, bucket=bucket)
# Create default bucket
default_params = self._create_bucket_params(
server=self.master, size=self.bucket_size,
replicas=self.num_replicas, bucket_type=self.bucket_type,
enable_replica_index=self.enable_replica_index,
eviction_policy=self.eviction_policy, lww=self.lww,
maxttl=self.maxttl, compression_mode=self.compression_mode)
self.cluster.create_default_bucket(default_params)
# creating Index idx_0
query_definition = QueryDefinition(
index_name="idx_0", index_fields=["VMs"],
query_template="SELECT * FROM %s ", groups=["simple"],
index_where_clause=" VMs IS NOT NULL ")
self.load_query_definitions.append(query_definition)
self.create_index(bucket="default", query_definition=query_definition)
# loading data to bucket
gens_load = self.generate_docs(num_items=self.docs_per_day)
self.load(gens_load, flag=self.item_flag, batch_size=self.batch_size, op_type="create", verify_data=False)
# creating few more indexes
for item in range(1, 4):
query_definition = QueryDefinition(
index_name="idx_{0}".format(item), index_fields=["VMs"],
query_template="SELECT * FROM %s ", groups=["simple"],
index_where_clause=" VMs IS NOT NULL ")
self.load_query_definitions.append(query_definition)
self.create_index(bucket="default", query_definition=query_definition)
# Checking item_count in all indexes
self.sleep(timeout=10, message="Allowing indexes to index all item in bucket")
rest = RestConnection(self.master)
for item in range(4):
indexed_item = rest.get_index_stats()["default"]["idx_{0}".format(item)]["items_count"]
self.assertEqual(indexed_item, self.docs_per_day * 2016, "Failed to index all the item in bucket")
data_nodes = self.get_kv_nodes()
node_b, node_c = (None, None)
for node in data_nodes:
if node.ip == self.master.ip:
continue
if not node_b:
node_b = node
else:
node_c = node
break
try:
# Blocking communication between Node B and Node C
self.block_incoming_network_from_node(node_b, node_c)
# Mutating docs so that replica on Node C don't see changes on Node B
gens_load = self.generate_docs(num_items=self.docs_per_day)
self.load(gens_load, flag=self.item_flag, batch_size=self.batch_size, op_type="create", verify_data=False)
# killing Memcached on Node B
remote_client = RemoteMachineShellConnection(node_b)
remote_client.kill_memcached()
remote_client.disconnect()
# Failing over Node B
self.cluster.failover(servers=self.servers, failover_nodes=[node_b])
self.sleep(timeout=10, message="Allowing indexer to rollback")
# Validating that indexer has indexed item after rollback and catch up with items in bucket
for item in range(4):
indexed_item = rest.get_index_stats()["default"]["idx_{0}".format(item)]["items_count"]
self.assertEqual(indexed_item, self.docs_per_day * 2016, "Index {} has failed to index items after"
" rollback")
finally:
self.resume_blocked_incoming_network_from_node(node_b, node_c)
def test_recover_index_from_in_memory_snapshot(self):
"""
MB-32102
MB-35663
"""
bucket_name = self.buckets[0].name
index_name = list(self.get_index_map()[bucket_name])[0]
# Blocking node B firewall
data_nodes = self.get_kv_nodes()
self.assertTrue(len(data_nodes) >= 3, "Can't run this with less than 3 KV nodes")
node_b, node_c = (None, None)
for node in data_nodes:
if node.ip == self.master.ip:
continue
if not node_b:
node_b = node
else:
node_c = node
break
# get num_rollback stats before triggering in-memory recovery
conn = RestConnection(self.master)
num_rollback_before_recovery = conn.get_num_rollback_stat(bucket=bucket_name)
try:
self.block_incoming_network_from_node(node_b, node_c)
# killing Memcached on Node B
remote_client = RemoteMachineShellConnection(node_b)
remote_client.kill_memcached()
remote_client.disconnect()
# Failing over Node B
self.cluster.failover(servers=self.servers, failover_nodes=[node_b])
finally:
# resume the communication between node B and node C
self.resume_blocked_incoming_network_from_node(node_b, node_c)
# get num_rollback stats after in-memory recovery of indexes
num_rollback_after_recovery = conn.get_num_rollback_stat(bucket=bucket_name)
self.assertEqual(num_rollback_before_recovery, num_rollback_after_recovery,
"Recovery didn't happen from in-memory snapshot")
self.log.info("Node has recovered from in-memory snapshots")
# Loading few more docs so that indexer will index updated as well as new docs
gens_load = self.generate_docs(num_items=self.docs_per_day * 2)
self.load(gens_load, flag=self.item_flag, batch_size=self.batch_size, op_type="create", verify_data=False)
use_index_query = "select Count(*) from {0} USE INDEX ({1})".format(bucket_name, index_name)
result = self.n1ql_helper.run_cbq_query(query=use_index_query, server=self.n1ql_node,
scan_consistency=CONSISTENCY_REQUEST)["results"][0]["$1"]
expected_result = self.docs_per_day * 2 * 2016
self.assertEqual(result, expected_result, "Indexer hasn't recovered properly from in-memory as"
" indexes haven't catch up with "
"request_plus/consistency_request")
self.log.info("Indexer continues to index as expected")
def test_partial_rollback(self):
self.multi_create_index()
self.sleep(30)
self.log.info("Stopping persistence on NodeA & NodeB")
data_nodes = self.get_nodes_from_services_map(service_type="kv",
get_all_nodes=True)
for data_node in data_nodes:
for bucket in self.buckets:
mem_client = MemcachedClientHelper.direct_client(data_node, bucket.name)
mem_client.stop_persistence()
self.run_doc_ops()
self.sleep(10)
# Get count before rollback
bucket_before_item_counts = {}
for bucket in self.buckets:
bucket_count_before_rollback = self.get_item_count(self.master, bucket.name)
bucket_before_item_counts[bucket.name] = bucket_count_before_rollback
log.info("Items in bucket {0} before rollback = {1}".format(
bucket.name, bucket_count_before_rollback))
# Index rollback count before rollback
self._verify_bucket_count_with_index_count()
self.multi_query_using_index()
# Kill memcached on Node A so that Node B becomes master
self.log.info("Kill Memcached process on NodeA")
shell = RemoteMachineShellConnection(data_nodes[0])
shell.kill_memcached()
# Start persistence on Node B
self.log.info("Starting persistence on NodeB")
for bucket in self.buckets:
mem_client = MemcachedClientHelper.direct_client(data_nodes[1], bucket.name)
mem_client.start_persistence()
# Failover Node B
self.log.info("Failing over NodeB")
self.sleep(10)
failover_task = self.cluster.async_failover(
self.servers[:self.nodes_init], [data_nodes[1]], self.graceful,
wait_for_pending=120)
failover_task.result()
# Wait for a couple of mins to allow rollback to complete
# self.sleep(120)
bucket_after_item_counts = {}
for bucket in self.buckets:
bucket_count_after_rollback = self.get_item_count(self.master, bucket.name)
bucket_after_item_counts[bucket.name] = bucket_count_after_rollback
log.info("Items in bucket {0} after rollback = {1}".format(
bucket.name, bucket_count_after_rollback))
for bucket in self.buckets:
if bucket_after_item_counts[bucket.name] == bucket_before_item_counts[bucket.name]:
log.info("Looks like KV rollback did not happen at all.")
self._verify_bucket_count_with_index_count()
self.multi_query_using_index()
def _create_replica_indexes(self):
query_definitions = []
if not self.use_replica:
return []
if not self.index_nodes_out:
return []
index_nodes = self.get_nodes_from_services_map(service_type="index",
get_all_nodes=True)
for node in self.index_nodes_out:
if node in index_nodes:
index_nodes.remove(node)
if index_nodes:
ops_map = self.generate_operation_map("in_between")
if ("create_index" not in ops_map):
indexes_lost = self._find_index_lost_when_indexer_down()
deploy_node_info = ["{0}:{1}".format(index_nodes[0].ip,
index_nodes[0].port)]
for query_definition in self.query_definitions:
if query_definition.index_name in indexes_lost:
query_definition.index_name = query_definition.index_name + "_replica"
query_definitions.append(query_definition)
for bucket in self.buckets:
self.create_index(bucket=bucket,
query_definition=query_definition,
deploy_node_info=deploy_node_info)
else:
query_definitions.append(query_definition)
self.query_definitions = query_definitions
def _find_index_lost_when_indexer_down(self):
lost_indexes = []
rest = RestConnection(self.master)
index_map = rest.get_index_status()
log.info("index_map: {0}".format(index_map))
for index_node in self.index_nodes_out:
host = "{0}:8091".format(index_node.ip)
for index in index_map.values():
for keys, vals in index.items():
if vals["hosts"] == host:
lost_indexes.append(keys)
log.info("Lost Indexes: {0}".format(lost_indexes))
return lost_indexes
def _run_kvops_tasks(self):
tasks_ops = []
if self.doc_ops:
tasks_ops = self.async_run_doc_ops()
return tasks_ops
def _run_tasks(self, tasks_list):
for tasks in tasks_list:
for task in tasks:
task.result()
|
test_index.py | import os
import multiprocessing as mp
import pytest
import numpy as np
from jina.enums import FlowOptimizeLevel
from jina.executors.indexers.vector import NumpyIndexer
from jina.flow import Flow
from jina.parser import set_flow_parser
from jina.proto import jina_pb2
from jina import Document
from tests import random_docs
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope='function')
def test_workspace_index(tmpdir):
os.environ['JINA_TEST_INDEX'] = str(tmpdir)
workspace_path = os.environ['JINA_TEST_INDEX']
yield workspace_path
del os.environ['JINA_TEST_INDEX']
@pytest.fixture(scope='function')
def test_workspace_joint(tmpdir):
os.environ['JINA_TEST_JOINT'] = str(tmpdir)
workspace_path = os.environ['JINA_TEST_JOINT']
yield workspace_path
del os.environ['JINA_TEST_JOINT']
def get_result(resp):
n = []
for d in resp.search.docs:
n.append([k.id for k in d.matches])
n = np.array(n)
# each doc should return a list of top-100
np.testing.assert_equal(n.shape[0], 2)
np.testing.assert_equal(n.shape[1], 50)
class DummyIndexer(NumpyIndexer):
# the add() function is simply copied from NumpyIndexer
def add(self, *args, **kwargs):
pass
class DummyIndexer2(NumpyIndexer):
# the add() function is simply copied from NumpyIndexer
def add(self, keys: 'np.ndarray', vectors: 'np.ndarray', *args, **kwargs):
if len(vectors.shape) != 2:
raise ValueError(f'vectors shape {vectors.shape} is not valid, expecting "vectors" to have rank of 2')
if not self.num_dim:
self.num_dim = vectors.shape[1]
self.dtype = vectors.dtype.name
elif self.num_dim != vectors.shape[1]:
raise ValueError(
"vectors' shape [%d, %d] does not match with indexers's dim: %d" %
(vectors.shape[0], vectors.shape[1], self.num_dim))
elif self.dtype != vectors.dtype.name:
raise TypeError(
f"vectors' dtype {vectors.dtype.name} does not match with indexers's dtype: {self.dtype}")
elif keys.shape[0] != vectors.shape[0]:
raise ValueError('number of key %d not equal to number of vectors %d' % (keys.shape[0], vectors.shape[0]))
elif self.key_dtype != keys.dtype.name:
raise TypeError(
f"keys' dtype {keys.dtype.name} does not match with indexers keys's dtype: {self.key_dtype}")
self.write_handler.write(vectors.tobytes())
self.key_bytes += keys.tobytes()
self.key_dtype = keys.dtype.name
self._size += keys.shape[0]
def test_doc_iters():
docs = random_docs(3, 5)
for doc in docs:
assert isinstance(doc, Document)
def test_simple_route():
f = Flow().add()
with f:
f.index(input_fn=random_docs(10))
def test_update_method(test_metas):
with DummyIndexer(index_filename='testa.bin', metas=test_metas) as indexer:
indexer.save()
assert not os.path.exists(indexer.save_abspath)
assert not os.path.exists(indexer.index_abspath)
indexer.add()
indexer.save()
assert os.path.exists(indexer.save_abspath)
assert os.path.exists(indexer.index_abspath)
with DummyIndexer2(index_filename='testb.bin', metas=test_metas) as indexer:
indexer.save()
assert not os.path.exists(indexer.save_abspath)
assert not os.path.exists(indexer.index_abspath)
indexer.add(np.array([1, 2, 3]), np.array([[1, 1, 1], [2, 2, 2]]))
indexer.save()
assert os.path.exists(indexer.save_abspath)
assert os.path.exists(indexer.index_abspath)
@pytest.mark.skipif('GITHUB_WORKFLOW' in os.environ, reason='skip the network test on github workflow')
def test_two_client_route_parallel():
fa1 = set_flow_parser().parse_args(['--optimize-level', str(FlowOptimizeLevel.NONE)])
f1 = Flow(fa1).add(parallel=3)
f2 = Flow(optimize_level=FlowOptimizeLevel.IGNORE_GATEWAY).add(parallel=3)
def start_client(fl):
fl.index(input_fn=random_docs(10))
with f1:
assert f1.num_peas == 6
t1 = mp.Process(target=start_client, args=(f1,))
t1.daemon = True
t2 = mp.Process(target=start_client, args=(f1,))
t2.daemon = True
t1.start()
t2.start()
with f2:
# no optimization can be made because we ignored the gateway
assert f2.num_peas == 6
t1 = mp.Process(target=start_client, args=(f2,))
t1.daemon = True
t2 = mp.Process(target=start_client, args=(f2,))
t2.daemon = True
t1.start()
t2.start()
@pytest.mark.skipif('GITHUB_WORKFLOW' in os.environ, reason='skip the network test on github workflow')
def test_two_client_route():
def start_client(fl):
fl.index(input_fn=random_docs(10))
with Flow().add() as f:
t1 = mp.Process(target=start_client, args=(f,))
t1.daemon = True
t2 = mp.Process(target=start_client, args=(f,))
t2.daemon = True
t1.start()
t2.start()
def test_index(test_workspace_index):
f = Flow().add(uses=os.path.join(cur_dir, 'yaml/test-index.yml'), parallel=3, separated_workspace=True)
with f:
f.index(input_fn=random_docs(50))
for j in range(3):
assert os.path.join(test_workspace_index, f'test2-{j + 1}/test2.bin')
assert os.path.exists(os.path.join(test_workspace_index, f'test2-{j + 1}/tmp2'))
def test_compound_idx(test_workspace_joint, mocker):
def validate(req):
assert req.status.code < jina_pb2.StatusProto.ERROR
assert req.search.docs[0].matches[0].score.op_name == 'NumpyIndexer'
with Flow().add(uses=os.path.join(cur_dir, 'yaml/test-joint.yml')) as f:
f.index(random_docs(100, chunks_per_doc=0))
response_mock = mocker.Mock(wrap=validate)
with Flow().add(uses=os.path.join(cur_dir, 'yaml/test-joint.yml')) as g:
g.search(random_docs(10, chunks_per_doc=0), on_done=response_mock)
response_mock.assert_called()
|
penv.py | from multiprocessing import Process, Pipe
import gym
import numpy as np
def worker(conn, env):
while True:
cmd, data = conn.recv()
if cmd == "step":
obs, reward, done, info = env.step(data)
if done:
obs = env.reset()
conn.send((obs, reward, done, info))
elif cmd == "reset":
obs = env.reset()
conn.send(obs)
else:
raise NotImplementedError
class ParallelEnv(gym.Env):
"""A concurrent execution of environments in multiple processes."""
def __init__(self, envs):
assert len(envs) >= 1, "No environment given."
self.envs = envs
self.observation_space = self.envs[0].observation_space
self.action_space = self.envs[0].action_space
self.locals = []
for env in self.envs[1:]:
local, remote = Pipe()
p = Process(target=worker, args=(remote, env))
# p.daemon = True
p.start()
remote.close()
self.locals.append(local)
def reset(self):
for local in self.locals:
local.send(("reset", None))
results = [self.envs[0].reset()] + [local.recv()
for local in self.locals]
return results
def step(self, joint_actions):
# send all actions and envs to the worker except the first
for local, actions in zip(self.locals, (np.array(joint_actions).T)[1:]):
local.send(("step", actions))
# execute the first action on the first env manually
obs, reward, done, info = self.envs[0].step(
np.array(joint_actions)[:, 0])
if done:
obs = self.envs[0].reset()
# combine parallel envs back into results
results = zip(*[(obs, reward, done, info)] + [local.recv()
for local in self.locals])
return results
def render(self):
raise NotImplementedError
|
exe.py | """WizardKit: Execution functions"""
#vim: sts=2 sw=2 ts=2
import json
import logging
import os
import re
import subprocess
import time
from threading import Thread
from queue import Queue, Empty
import psutil
# STATIC VARIABLES
LOG = logging.getLogger(__name__)
# Classes
class NonBlockingStreamReader():
"""Class to allow non-blocking reads from a stream."""
# pylint: disable=too-few-public-methods
# Credits:
## https://gist.github.com/EyalAr/7915597
## https://stackoverflow.com/a/4896288
def __init__(self, stream):
self.stream = stream
self.queue = Queue()
def populate_queue(stream, queue):
"""Collect lines from stream and put them in queue."""
while not stream.closed:
try:
line = stream.read(1)
except ValueError:
# Assuming the stream was closed
line = None
if line:
queue.put(line)
self.thread = start_thread(
populate_queue,
args=(self.stream, self.queue),
)
def stop(self):
"""Stop reading from input stream."""
self.stream.close()
def read(self, timeout=None):
"""Read from queue if possible, returns item from queue."""
try:
return self.queue.get(block=timeout is not None, timeout=timeout)
except Empty:
return None
def save_to_file(self, proc, out_path):
"""Continuously save output to file while proc is running."""
LOG.debug('Saving process %s output to %s', proc, out_path)
while proc.poll() is None:
out = b''
out_bytes = b''
while out is not None:
out = self.read(0.1)
if out:
out_bytes += out
with open(out_path, 'a', encoding='utf-8') as _f:
_f.write(out_bytes.decode('utf-8', errors='ignore'))
# Close stream to prevent 100% CPU usage
self.stream.close()
# Functions
def build_cmd_kwargs(cmd, minimized=False, pipe=True, shell=False, **kwargs):
"""Build kwargs for use by subprocess functions, returns dict.
Specifically subprocess.run() and subprocess.Popen().
NOTE: If no encoding specified then UTF-8 will be used.
"""
LOG.debug(
'cmd: %s, minimized: %s, pipe: %s, shell: %s, kwargs: %s',
cmd, minimized, pipe, shell, kwargs,
)
cmd_kwargs = {
'args': cmd,
'shell': shell,
}
# Strip sudo if appropriate
if cmd[0] == 'sudo':
if os.name == 'posix' and os.geteuid() == 0: # pylint: disable=no-member
cmd.pop(0)
# Add additional kwargs if applicable
for key in 'check cwd encoding errors stderr stdin stdout'.split():
if key in kwargs:
cmd_kwargs[key] = kwargs[key]
# Default to UTF-8 encoding
if not ('encoding' in cmd_kwargs or 'errors' in cmd_kwargs):
cmd_kwargs['encoding'] = 'utf-8'
cmd_kwargs['errors'] = 'ignore'
# Start minimized
if minimized:
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = 6
cmd_kwargs['startupinfo'] = startupinfo
# Pipe output
if pipe:
cmd_kwargs['stderr'] = subprocess.PIPE
cmd_kwargs['stdout'] = subprocess.PIPE
# Done
LOG.debug('cmd_kwargs: %s', cmd_kwargs)
return cmd_kwargs
def get_json_from_command(cmd, check=True, encoding='utf-8', errors='ignore'):
"""Capture JSON content from cmd output, returns dict.
If the data can't be decoded then either an exception is raised
or an empty dict is returned depending on errors.
"""
LOG.debug('Loading JSON data from cmd: %s', cmd)
json_data = {}
try:
proc = run_program(cmd, check=check, encoding=encoding, errors=errors)
json_data = json.loads(proc.stdout)
except (subprocess.CalledProcessError, json.decoder.JSONDecodeError):
if errors != 'ignore':
raise
return json_data
def get_procs(name, exact=True, try_again=True):
"""Get process object(s) based on name, returns list of proc objects."""
LOG.debug('name: %s, exact: %s', name, exact)
processes = []
regex = f'^{name}$' if exact else name
# Iterate over all processes
for proc in psutil.process_iter():
if re.search(regex, proc.name(), re.IGNORECASE):
processes.append(proc)
# Try again?
if not processes and try_again:
time.sleep(1)
processes = get_procs(name, exact, try_again=False)
# Done
return processes
def kill_procs(name, exact=True, force=False, timeout=30):
"""Kill all processes matching name (case-insensitively).
NOTE: Under Posix systems this will send SIGINT to allow processes
to gracefully exit.
If force is True then it will wait until timeout specified and then
send SIGKILL to any processes still alive.
"""
LOG.debug(
'name: %s, exact: %s, force: %s, timeout: %s',
name, exact, force, timeout,
)
target_procs = get_procs(name, exact=exact)
for proc in target_procs:
proc.terminate()
# Force kill if necesary
if force:
results = psutil.wait_procs(target_procs, timeout=timeout)
for proc in results[1]: # Alive processes
proc.kill()
def popen_program(cmd, minimized=False, pipe=False, shell=False, **kwargs):
"""Run program and return a subprocess.Popen object."""
LOG.debug(
'cmd: %s, minimized: %s, pipe: %s, shell: %s',
cmd, minimized, pipe, shell,
)
LOG.debug('kwargs: %s', kwargs)
cmd_kwargs = build_cmd_kwargs(
cmd,
minimized=minimized,
pipe=pipe,
shell=shell,
**kwargs)
try:
# pylint: disable=consider-using-with
proc = subprocess.Popen(**cmd_kwargs)
except FileNotFoundError:
LOG.error('Command not found: %s', cmd)
raise
LOG.debug('proc: %s', proc)
# Done
return proc
def run_program(cmd, check=True, pipe=True, shell=False, **kwargs):
# pylint: disable=subprocess-run-check
"""Run program and return a subprocess.CompletedProcess object."""
LOG.debug(
'cmd: %s, check: %s, pipe: %s, shell: %s',
cmd, check, pipe, shell,
)
LOG.debug('kwargs: %s', kwargs)
cmd_kwargs = build_cmd_kwargs(
cmd,
check=check,
pipe=pipe,
shell=shell,
**kwargs)
try:
proc = subprocess.run(**cmd_kwargs)
except FileNotFoundError:
LOG.error('Command not found: %s', cmd)
raise
LOG.debug('proc: %s', proc)
# Done
return proc
def start_thread(function, args=None, daemon=True):
"""Run function as thread in background, returns Thread object."""
LOG.debug(
'Starting background thread for function: %s, args: %s, daemon: %s',
function, args, daemon,
)
args = args if args else []
thread = Thread(target=function, args=args, daemon=daemon)
thread.start()
return thread
def stop_process(proc, graceful=True):
"""Stop process.
NOTES: proc should be a subprocess.Popen obj.
If graceful is True then a SIGTERM is sent before SIGKILL.
"""
# Graceful exit
if graceful:
if os.name == 'posix' and os.geteuid() != 0: # pylint: disable=no-member
run_program(['sudo', 'kill', str(proc.pid)], check=False)
else:
proc.terminate()
time.sleep(2)
# Force exit
if os.name == 'posix' and os.geteuid() != 0: # pylint: disable=no-member
run_program(['sudo', 'kill', '-9', str(proc.pid)], check=False)
else:
proc.kill()
def wait_for_procs(name, exact=True, timeout=None):
"""Wait for all process matching name."""
LOG.debug('name: %s, exact: %s, timeout: %s', name, exact, timeout)
target_procs = get_procs(name, exact=exact)
procs = psutil.wait_procs(target_procs, timeout=timeout)
# Raise exception if necessary
if procs[1]: # Alive processes
raise psutil.TimeoutExpired(name=name, seconds=timeout)
if __name__ == '__main__':
print("This file is not meant to be called directly.")
|
Thread_join.py | #join() blocks indefinitely
import threading
import time
import logging
def daemon():
print 'Daemon'
time.sleep(2)
print 'Exit Daemon'
def worker():
print 'Starting Worker'
print 'Exiting Worker'
d = threading.Thread(name='Daemon',target=(daemon))
d.setDaemon(True)
w = threading.Thread(name='Wokr',target=(worker))
d.start()
w.start()
d.join()
w.join()
|
model.py | # -*- coding: utf-8 -*-
import datetime
import json
import logging
import re
import threading
import time
import gocept.cache.method as cache
import requests
log = logging.getLogger()
# we only want data for these types, errors and what not begone
TYPES = set(['Buses', 'Metros', 'Trains', 'Trams'])
# what we're interested in
VALUES_OF_INTEREST = (u'TransportMode',
u'StationName',
u'LineNumber',
u'Destination',
u'DisplayTime',
u'GroupOfLine')
# combined regex for metro-style and train-style displayrows
# matches both HH:MM and MM min style times
DISPLAY_NAME_RE = re.compile(
r'^([0-9]+) +([a-zA-ZåäöÅÄÖ\.]+) *([0-9]+[:0-9]* ?[min]*\.?) *,?')
DEPARTURE_URL_TEMPLATE = 'http://api.sl.se/api2/realtimedeparturesV4.json?key=%s&siteid=%s&timewindow=60'
STATION_URL_TEMPLATE = 'https://api.sl.se/api2/typeahead.json?key=%s&searchstring=%s&stationsonly=true&maxresults=1'
# i dont care about fjärrtåg
BANNED_DESTINATIONS = set([u'Fjärrtåg'])
cached_data = {}
def reap_cache():
"""
Cache eviction loop.
Will delete stale data and hopes to prevent memory leaks.
"""
while True:
time.sleep(60)
now = get_now()
count = 0
for station, (timestamp, data) in cached_data.items():
if timestamp - now > datetime.timedelta(minutes=15):
count += 1
del cached_data[station]
log.info('Reaper evicted %d entries from cache' % count)
reaper = threading.Thread(target=reap_cache)
reaper.daemon = True
reaper.start()
class ApiException(Exception):
pass
def compile_whitelist(args):
"""
Helper function to compile a whitelist from the argument dict.
The whitelist maps transport_type => set([lines]):
{'Trams': set(['10', '20'])}
"""
whitelist = {}
for t in TYPES:
if t.lower() in args:
whitelist[t] = set(args[t.lower()].split(','))
return whitelist
def parse_displayrow(text):
"""
Helper function to parse the text of the actual SL displays.
Examples:
10 Hjulsta 8 min. => {u'linenumber': '10',
'u'destination': 'Hjulsta',
u'displaytime': '8 min.'}
"""
# sometimes displayrows are empty dicts
if isinstance(text, dict):
return []
data = []
# each line can contain more than one line/destination/time tuple
# iterate over each match, extract, remove matched data and try again
while len(text) > 0:
match = DISPLAY_NAME_RE.match(text)
if match:
row = {}
row[u'linenumber'] = match.group(1)
row[u'destination'] = match.group(2)
row[u'displaytime'] = match.group(3).strip()
data.append(row)
text = text[len(match.group(0)):].strip()
else:
logging.debug('Display row mismatch: %s' % text)
break
return data
def get_now():
"""
Helper function to get the date as of "now"
"""
return datetime.datetime.now()
def convert_time(time):
"""
Helper function to convert the displaytime strings to
an actual integer minute represenation.
Examples:
'Nu' => 0
'8 min.'' => 8
'12:22' (at the time of 12:18) => 4
'9' => 9
'-' => 0
"""
r = time
if 'min' in time:
r = time.replace('min', '').replace('.', '').strip()
elif 'Nu' in time:
r = 0
elif ':' in time:
now = get_now()
# floor below minute
now = datetime.datetime(year=now.year, month=now.month, day=now.day,
hour=now.hour, minute=now.minute, second=0,
microsecond=0)
hour, minute = time.split(':')
dtime = datetime.datetime(year=now.year, month=now.month, day=now.day,
hour=int(hour), minute=int(minute), second=0,
microsecond=0)
# 00.00 wraparound?
if dtime < now:
dtime = dtime + datetime.timedelta(days=1)
r = round((dtime - now).total_seconds() / 60.0)
try:
return int(r)
except ValueError:
return -1
def parse_json_response(text, whitelist=None):
"""
Parses the JSON response from trafiklab and returns
a normalized list of departures as dictioneries.
Each dictionary contains fields for each departure.
The order of the list is undefined.
Note that the time field is an integer and in minutes.
Example outout:
[{ u'destination': u'Kungsträdg.',
u'displaytime': u'5 min',
u'groupofline': u'Tunnelbanans blå linje',
u'linenumber': u'10',
u'stationname': u'Sundbybergs centrum',
u'time': 5,
u'transportmode': u'METRO'},
{u'destination': u'Kungsträdg.',
u'displaytime': u'5 min',
u'groupofline': u'Tunnelbanans blå linje',
u'linenumber': u'10',
u'stationname': u'Sundbybergs centrum',
u'time': 5}]
"""
if whitelist is None:
whitelist = {}
jdata = json.loads(text)
data = []
# iterate over buses, trains, trams etc
for transport_type, transport in jdata.get(u'ResponseData', {}).items():
# Metros/Metro sub iteration
if transport_type in TYPES and transport:
for item in transport:
row = {}
for value in VALUES_OF_INTEREST:
if value in item:
row[value.lower()] = item[value]
# if we have a whitelist, skip if not in it
if transport_type in whitelist and \
row[u'linenumber'] not in whitelist[transport_type]:
continue
# filter out banned destinations
if row[u'destination'] in BANNED_DESTINATIONS:
continue
row[u'time'] = convert_time(row[u'displaytime'])
# there's no point in displaying these is there?
if 0 <= row[u'time'] < 100:
data.append(row)
return data
def parse_json_site_response(text):
"""
Helper function to parse and extract the station name from the
trafiklab JSON site response.
"""
jdata = json.loads(text)
data = []
for site in jdata.get(u'ResponseData', {}):
if site.get(u'Type') == 'Station':
data.append({u'name': site['Name']})
return data
@cache.Memoize(60)
def query_trafiklab(url):
"""
Helper function for querying the trafiklab HTTP APIs.
"""
r = requests.get(url)
if r.status_code != 200:
raise ApiException('Error while querying the trafiklab API')
return r.text
def get_departure(url_template, station, key, whitelist=None):
"""
Helper function to get the parsed response for the given
URL, station and departure API key.
"""
resp = query_trafiklab(url_template % (key, station))
return parse_json_response(resp, whitelist)
def handle_flapping_displays(station, data, cached_data):
"""
Function for finding out which (if any) of the cached departures
should actually be in the current data list, but are hidden
since their displayrows flapped with warnings about pickpockets.
"""
timestamp, old_data = cached_data.get(station, (None, None))
keep = []
def calc_dt(ts):
return int(round((get_now() - ts).total_seconds() / 60.0))
if timestamp is not None:
# convert and round the time diff to minute integer
dt = calc_dt(timestamp)
for old_d in old_data:
# if the departure has already left, lets not care
if old_d[u'transportmode'] == u'METRO' and old_d[u'time'] > dt:
# calculate deltatime more or less accurately?
# lists are expected to be very short, (2-4 elements)
# so it's ok to O(n^2) here
# find the cached departure in the new list:
for d in data:
# we have the departure in our new list, dont do anything
# allow +/- 1 minute difference since we'll accumulate
# errors in the delta calc
if d[u'destination'] == old_d[u'destination'] and \
d[u'linenumber'] == old_d[u'linenumber'] and \
-2 < d[u'time'] + dt - old_d[u'time'] < 2:
break
else:
# hey, we used to have this and now we dont
if u'firstseen' not in old_d:
old_d[u'firstseen'] = timestamp
old_d[u'firsttime'] = old_d[u'time']
old_d[u'time'] = old_d[u'firsttime'] - \
calc_dt(old_d[u'firstseen'])
keep.append(old_d)
return keep
def get_departures(station, key, whitelist=None):
"""
Returns a list of all departures for the given station.
Each element describes a departure, encoded as a dictionary.
The list is ordered by departure time, ascending order.
The API key needs to be a valid trafiklab departure API key.
"""
data = get_departure(DEPARTURE_URL_TEMPLATE, station, key, whitelist)
# see if we have cached older entries which are still relevant
data.extend(handle_flapping_displays(station, data, cached_data))
# sort on time to departure
data.sort(key=lambda x: x['time'])
cached_data[station] = (get_now(), data)
return data
@cache.Memoize(24 * 60 * 60)
def get_station_name(station, key):
"""
Returns the name of the given station ID.
The API key needs to be a valid trafiklab platsuppslagnings API key.
"""
resp = query_trafiklab(STATION_URL_TEMPLATE % (key, station))
data = parse_json_site_response(resp)
if len(data) < 1:
raise ApiException('Site name response from trafiklab was empty')
return data[0][u'name']
|
wxStocks_gui.py | #################################################################
# Most of the wxPython in wxStocks is located here. #
# Table of Contents: #
# 1: Imports and line_number function #
# 2: Main Frame, where tab order is placed. #
# 3: Tabs #
# 4: Grid objects #
#################################################################
import wx, numpy
import config, threading, logging, sys, time, os, math, webbrowser, calendar, datetime
import inspect
import pprint as pp
from collections import namedtuple
from wx.lib import sheet
from wxStocks_modules.wxStocks_classes import Stock, Account, SpreadsheetCell, SpreadsheetRow, PageReference, FunctionPage, ResearchPageRowDataList, StockBuyDialog
from wxStocks_modules.wxStocks_default_functions import default_function_page_object_config as functions_config
from wxStocks_modules import wxStocks_db_functions as db
from wxStocks_modules import wxStocks_utilities as utils
from wxStocks_modules import wxStocks_scrapers as scrape
from wxStocks_modules import wxStocks_meta_functions as meta
from wxStocks_modules import wxStocks_gui_position_index as gui_position
from wxStocks_modules import wxStocks_functions_that_process_user_functions as process_user_function
import user_data.user_functions.wxStocks_screen_functions as screen
import AAII.wxStocks_aaii_xls_data_importer as aaii
class MainFrame(wx.Frame): # reorder tab postions here
def __init__(self, *args, **kwargs):
start = time.time()
wx.Frame.__init__(self, parent = None, id = wx.ID_ANY, title="wxStocks", pos = wx.DefaultPosition, size = gui_position.MainFrame_size)
self.SetSizeHints(gui_position.MainFrame_SetSizeHints[0],gui_position.MainFrame_SetSizeHints[1])
self.title = "wxStocks"
self.uid = config.MAIN_FRAME_UNIQUE_ID
# Here we create a panel and a notebook on the panel
main_frame = wx.Panel(self)
self.notebook = wx.Notebook(main_frame)
# create the page windows as children of the notebook
# add the pages to the notebook with the label to show on the tab
self.welcome_page = WelcomePage(self.notebook)
self.notebook.AddPage(self.welcome_page, self.welcome_page.title)
self.get_data_page = GetDataPage(self.notebook)
self.notebook.AddPage(self.get_data_page, self.get_data_page.title)
self.portfolio_page = PortfolioPage(self.notebook)
self.notebook.AddPage(self.portfolio_page, self.portfolio_page.title)
self.view_data_page = ViewDataPage(self.notebook)
self.notebook.AddPage(self.view_data_page, self.view_data_page.title)
self.analyse_page = AnalysisPage(self.notebook)
self.notebook.AddPage(self.analyse_page, self.analyse_page.title)
self.research_page = ResearchPage(self.notebook)
self.notebook.AddPage(self.research_page, self.research_page.title)
self.sale_prep_page = SalePrepPage(self.notebook)
self.notebook.AddPage(self.sale_prep_page, self.sale_prep_page.title)
self.trade_page = TradePage(self.notebook)
self.notebook.AddPage(self.trade_page, self.trade_page.title)
self.user_functions_page = UserFunctionsMetaPage(self.notebook)
self.notebook.AddPage(self.user_functions_page, self.user_functions_page.title)
# finally, put the notebook in a sizer for the panel to manage
# the layout
sizer = wx.BoxSizer()
sizer.Add(self.notebook, 1, wx.EXPAND)
main_frame.SetSizer(sizer)
# here we add all pages to the config.GLOBAL_PAGES_DICT, adding both the index and the title as key values.
self.set_config_GLOBAL_PAGES_DICT_key_value_pairs(self.notebook)
config.GLOBAL_PAGES_DICT[self.uid] = self
config.GLOBAL_PAGES_DICT["0"] = self
config.GLOBAL_PAGES_DICT[self.title] = self
finish = time.time()
self.startup_time = finish-start
logging.info("done.\n\n------------------------- wxStocks startup complete: {} seconds -------------------------\n".format(round(self.startup_time)))
db.load_GLOBAL_STOCK_DICT_into_active_memory()
def set_config_GLOBAL_PAGES_DICT_key_value_pairs(self, notebook, parent_index = None):
for child in notebook.Children:
# sadly, have to use ordinals which is why + 1.
page_index = notebook.Children.index(child) + 1.
if parent_index:
if len(notebook.Children) > 10:
denominator = 100.
elif len(notebook.Children) > 100:
logging.error("Error: far too many tabs in {}".format(notebook))
sys.exit()
else:
denominator = 10.
# create formatted indexes
if float(parent_index).is_integer():
page_index = float(int(parent_index) + (page_index/denominator))
else: # it already has decimals
page_index = float(str(parent_index) + str(page_index/denominator).replace("0.", ""))
if float(page_index).is_integer():
page_index = int(page_index)
page = PageReference(child.title, index = page_index, obj = child)
config.GLOBAL_PAGES_DICT[str(page_index)] = page
page_index = float(page_index)
config.GLOBAL_PAGES_DICT[child.title] = page
try: # main page (find these in the config file)
config.GLOBAL_PAGES_DICT[child.uid] = page
page.uid = child.uid
except: # user created subsection
pass
#config.PAGES_DICT[child.title] = child
if child.Children:
for grandchild in child.Children:
if type(grandchild) is wx._core.Panel:
for great_grandchild in grandchild.Children:
if type(great_grandchild) is wx._core.Notebook:
self.set_config_GLOBAL_PAGES_DICT_key_value_pairs(great_grandchild, parent_index = page_index)
class Tab(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
# ###################### wx tabs #########################################################
class WelcomePage(Tab):
def __init__(self, parent):
self.title = "Welcome"
self.uid = config.WELCOME_PAGE_UNIQUE_ID
wx.Panel.__init__(self, parent)
welcome_page_text = wx.StaticText(self, -1,
"Welcome to wxStocks",
gui_position.WelcomePage.welcome_page_text
)
instructions_text = '''
Instructions: this program is essentially a work-flow following the tabs above.
---------------------------------------------------------------------------------------------------------------------------------
Welcome:\t\t\t\t\t\tGeneral insturction and password reset.
Import Data:
\tDownload Tickers:\t\t\tThis page is where you download ticker .CSV files to create a list of tickers to scrape.
\tScrape YQL:\t\t\t\t\tThis page takes all tickers, and then scrapes current stock data using them.
\tImport Data Spreadsheets:\t\tThis page allows you to import your own spreadsheets. You must first create functions in it's Edit Functions tab.
Portfolios:\t\t\t\t\t\tThis page allows you to load your portfolios from which you plan on making trades.
\t\t\t\t\t\t\t\tIf you have more than one portfolio you plan on working from, you may add more.
View Data:
\tView All Stocks:\t\t\t\tThis page generates a list of all stocks that have been scraped and presents all the data about them.
\t\t\t\t\t\t\t\t- Use this page to double check your data to make sure it's accurate and up to date.
\tView One Stock:\t\t\t\tThis page allows you to look at all the data associated with one stock.
\t\t\t\t\t\t\t\t- Here you will find the attributes you may use in programming your own functions involving individual stocks.
Analyse Data:
\tScreen:\t\t\t\t\t\tThis page allows you to screen for stocks that fit your criteria, and save them for later.
\tSaved Screens:\t\t\t\tThis page allows you to recall old screens you've saved.
\tRank:\t\t\t\t\t\tThis page allows you to rank stocks along certain criteria.
\tCustom Analysis:\t\t\t\tThis page allows you to execute your own custom analysis.
\t\t\t\t\t\t\t\t- You can learn about programming and interfacing with wxStocks to do your own analysis in the Edit Functions section.
Research:\t\t\t\t\t\tDo your homework! This page allows you to easily access data for stocks you intend to buy or sell.
\t\t\t\t\t\t\t\tYou can add different buttons in the config page if you have other sources your prefer.
Sale Prep:\t\t\t\t\t\tThis page allows you to estimate the amount of funds generated from a potential stock sale.
Trade:\t\t\t\t\t\t\tThis page (currently not functional) takes the stocks you plan to sell, estimates the amount of money generated,
\t\t\t\t\t\t\t\tand lets you estimate the volume of stocks to buy to satisfy your diversification requirements.
Edit Functions:\t\t\t\t\tHere you many view/edit/restore user created functions. You may also edit them in your own text editor.
'''
self.instructions = wx.StaticText(self, -1,
instructions_text,
gui_position.WelcomePage.instructions
)
self.reset_password_button = None
self.reset_password_button_horizontal_position = gui_position.WelcomePage.reset_password_button[0]
self.reset_password_button_vertical_position = gui_position.WelcomePage.reset_password_button[1]
if config.ENCRYPTION_POSSIBLE:
self.reset_password_button = wx.Button(self, label="Reset Password", pos=(self.reset_password_button_horizontal_position, self.reset_password_button_vertical_position), size=(-1,-1))
self.reset_password_button.Bind(wx.EVT_BUTTON, self.resetPasswordPrep, self.reset_password_button)
text_field_offset = gui_position.WelcomePage.text_field_offset
text_field_vertical_offset = gui_position.WelcomePage.text_field_vertical_offset
text_field_vertical_offset_small_bump = gui_position.WelcomePage.text_field_vertical_offset_small_bump
text_field_vertical_offset_medium_bump = gui_position.WelcomePage.text_field_vertical_offset_medium_bump
text_field_vertical_offset_large_bump = gui_position.WelcomePage.text_field_vertical_offset_large_bump
current_password_text = "Current Password:"
self.current_password_static_text = wx.StaticText(self, -1, current_password_text,
(self.reset_password_button_horizontal_position,
self.reset_password_button_vertical_position + text_field_vertical_offset_small_bump))
self.current_password_field = wx.TextCtrl(self, -1, "",
(self.reset_password_button_horizontal_position + text_field_offset,
self.reset_password_button_vertical_position + text_field_vertical_offset + text_field_vertical_offset_small_bump),
style=wx.TE_PASSWORD ) #| wx.TE_PROCESS_ENTER)
new_password_text = "New Password:"
self.new_password_static_text = wx.StaticText(self, -1, new_password_text,
(self.reset_password_button_horizontal_position,
self.reset_password_button_vertical_position + text_field_vertical_offset_medium_bump))
self.new_password_field = wx.TextCtrl(self, -1, "",
(self.reset_password_button_horizontal_position + text_field_offset,
self.reset_password_button_vertical_position + text_field_vertical_offset + text_field_vertical_offset_medium_bump),
style=wx.TE_PASSWORD ) #| wx.TE_PROCESS_ENTER)
confirm_password_text = "Confirm New Password:"
self.confirm_password_static_text = wx.StaticText(self, -1, confirm_password_text,
(self.reset_password_button_horizontal_position,
self.reset_password_button_vertical_position + text_field_vertical_offset_large_bump))
self.confirm_new_password_field = wx.TextCtrl(self, -1, "",
(self.reset_password_button_horizontal_position + text_field_offset,
self.reset_password_button_vertical_position + text_field_vertical_offset + text_field_vertical_offset_large_bump),
style=wx.TE_PASSWORD ) #| wx.TE_PROCESS_ENTER)
encryption_hardness_text = "Optional:\nEncryption Strength (1-24):"
encryption_bump = gui_position.WelcomePage.text_field_vertical_offset_encryption_bump
optional_offset = gui_position.WelcomePage.text_field_vertical_offset_optional_bump
self.encryption_hardness_static_text = wx.StaticText(self, -1, encryption_hardness_text,
(self.reset_password_button_horizontal_position,
self.reset_password_button_vertical_position + encryption_bump))
self.encryption_hardness_field = wx.TextCtrl(self, -1, "",
(self.reset_password_button_horizontal_position + text_field_offset,
self.reset_password_button_vertical_position + text_field_vertical_offset + encryption_bump + optional_offset),
) #style=wx.TE_PASSWORD | wx.TE_PROCESS_ENTER)
self.encryption_hardness_field.SetHint("default = 8")
self.current_password_static_text.Hide()
self.current_password_field.Hide()
self.new_password_static_text.Hide()
self.new_password_field.Hide()
self.confirm_password_static_text.Hide()
self.confirm_new_password_field.Hide()
self.encryption_hardness_static_text.Hide()
self.encryption_hardness_field.Hide()
reset_password_bump = gui_position.WelcomePage.reset_password_bump
self.reset_password_submit_button = wx.Button(self,
label="Submit",
pos=(
self.reset_password_button_horizontal_position + text_field_offset + reset_password_bump,
self.reset_password_button_vertical_position
),
size=(-1,-1))
self.reset_password_submit_button.Bind(wx.EVT_BUTTON, self.resetPassword, self.reset_password_submit_button)
self.reset_password_submit_button.Hide()
reset_password_negative_vertical_bump = gui_position.WelcomePage.reset_password_negative_vertical_bump
self.password_reset_status_static_text = wx.StaticText(self, -1, "",
(self.reset_password_button_horizontal_position,
self.reset_password_button_vertical_position - reset_password_negative_vertical_bump))
self.delete_all_stock_data_horizontal_position = gui_position.WelcomePage.delete_all_stock_data[0]
self.delete_all_stock_data_vertical_position = gui_position.WelcomePage.delete_all_stock_data[1]
self.delete_all_stock_data = wx.Button(self,
label="Delete All Stock Data",
pos=(self.delete_all_stock_data_horizontal_position,
self.delete_all_stock_data_vertical_position),
size=(-1,-1))
self.delete_all_stock_data.Bind(wx.EVT_BUTTON,
self.deleteAllStockData,
self.delete_all_stock_data)
#self.function_to_test = utils.OnSaveAs
#self.test_function_button = wx.Button(self, label="Execute", pos=(10, 10), size=(-1,-1))
#self.test_function_button.Bind(wx.EVT_BUTTON, self.OnSaveAs, self.test_function_button)
logging.info("WelcomePage loaded")
def testFunction(self, event):
try:
self.function_to_test()
except Exception as e:
logging.error(e)
def resetPasswordPrep(self, event):
self.reset_password_button.Hide()
self.instructions.Hide()
self.current_password_field.Clear()
self.new_password_field.Clear()
self.confirm_new_password_field.Clear()
self.encryption_hardness_field.Clear()
self.current_password_static_text.Show()
self.current_password_field.Show()
self.new_password_static_text.Show()
self.new_password_field.Show()
self.confirm_password_static_text.Show()
self.confirm_new_password_field.Show()
self.encryption_hardness_static_text.Show()
self.encryption_hardness_field.Show()
self.reset_password_submit_button.Show()
def resetPassword(self, event):
old_password = self.current_password_field.GetValue()
new_password = self.new_password_field.GetValue()
confirm_password = self.confirm_new_password_field.GetValue()
encryption_strength = self.encryption_hardness_field.GetValue()
if encryption_strength:
try:
encryption_strength = int(encryption_strength)
except:
self.password_reset_status_static_text.SetLabel("Encryption strength must be an integer or blank.")
self.current_password_field.Clear()
self.new_password_field.Clear()
self.confirm_new_password_field.Clear()
self.encryption_hardness_field.Clear()
return
saved_hash = db.is_saved_password_hash()
if new_password != confirm_password:
self.password_reset_status_static_text.SetLabel("Your confirmation did not match your new password.")
self.current_password_field.Clear()
self.new_password_field.Clear()
self.confirm_new_password_field.Clear()
self.encryption_hardness_field.Clear()
return
if not db.valid_pw(old_password, saved_hash):
self.password_reset_status_static_text.SetLabel("The password you submitted is incorrect.")
self.current_password_field.Clear()
self.new_password_field.Clear()
self.confirm_new_password_field.Clear()
self.encryption_hardness_field.Clear()
return
# Success!
# reset password and all relevant files
db.reset_all_encrypted_files_with_new_password(old_password, new_password, encryption_strength)
self.password_reset_status_static_text.SetLabel("You have successfully change your password.")
self.closePasswordResetFields()
def closePasswordResetFields(self):
self.reset_password_submit_button.Hide()
self.current_password_field.Clear()
self.new_password_field.Clear()
self.confirm_new_password_field.Clear()
self.encryption_hardness_field.Clear()
self.current_password_static_text.Hide()
self.current_password_field.Hide()
self.new_password_static_text.Hide()
self.new_password_field.Hide()
self.confirm_password_static_text.Hide()
self.confirm_new_password_field.Hide()
self.encryption_hardness_static_text.Hide()
self.encryption_hardness_field.Hide()
self.reset_password_button.Show()
def deleteAllStockData(self, event):
confirm = wx.MessageDialog(None,
"Caution! You are about to delete all saved stock data. Your portfolio and screen data will remain, but all stock data will be deleted. To avoid errors, the program will then update basic stock data, and shut down.",
'Delete All Stock Data?',
style = wx.YES_NO
)
confirm.SetYesNoLabels(("&Cancel"), ("&Yes, Delete All Stock Data and Restart"))
yesNoAnswer = confirm.ShowModal()
confirm.Destroy()
if yesNoAnswer == wx.ID_NO:
self.deleteAllStockDataConfirm()
def deleteAllStockDataConfirm(self):
db.deleteAllStockDataConfirmed()
sys.exit()
##
class GetDataPage(Tab):
def __init__(self, parent):
self.title = "Import Data"
self.uid = config.GET_DATA_PAGE_UNIQUE_ID
wx.Panel.__init__(self, parent)
####
self.get_data_page_panel = wx.Panel(self, -1, pos=(0,5), size=( wx.EXPAND, wx.EXPAND))
self.get_data_notebook = wx.Notebook(self.get_data_page_panel)
self.ticker_page = TickerPage(self.get_data_notebook)
self.get_data_notebook.AddPage(self.ticker_page, self.ticker_page.title)
self.xbrl_import_page = XbrlImportPage(self.get_data_notebook)
self.get_data_notebook.AddPage(self.xbrl_import_page, self.xbrl_import_page.title)
self.yql_scrape_page = YqlScrapePage(self.get_data_notebook)
self.get_data_notebook.AddPage(self.yql_scrape_page, self.yql_scrape_page.title)
self.spreadsheet_import_page = SpreadsheetImportPage(self.get_data_notebook)
self.get_data_notebook.AddPage(self.spreadsheet_import_page, self.spreadsheet_import_page.title)
sizer2 = wx.BoxSizer()
sizer2.Add(self.get_data_notebook, 1, wx.EXPAND)
self.SetSizer(sizer2)
####
class TickerPage(Tab):
def __init__(self, parent):
self.title = "Download Ticker Data"
self.uid = config.TICKER_PAGE_UNIQUE_ID
wx.Panel.__init__(self, parent)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.AddSpacer(gui_position.TickerPage.AddSpacer) # vertical offset
self.SetSizer(self.sizer)
text = wx.StaticText(self, -1,
"Welcome to the ticker page.",
gui_position.TickerPage.text
)
download_button = wx.Button(self, label="NYSE and Nasdaq", pos=gui_position.TickerPage.download_button, size=(-1,-1))
download_button.Bind(wx.EVT_BUTTON, self.confirmDownloadTickers, download_button)
cik_button = wx.Button(self, label="CIK Numbers", pos=gui_position.TickerPage.cik_button, size=(-1,-1))
cik_button.Bind(wx.EVT_BUTTON, self.confirmCikDownload, cik_button)
refresh_button = wx.Button(self, label="Refresh", pos=gui_position.TickerPage.refresh_button, size=(-1,-1))
refresh_button.Bind(wx.EVT_BUTTON, self.refreshTickers, refresh_button)
exchanges = ""
for exchange_name in config.STOCK_EXCHANGE_LIST:
if exchange_name is config.STOCK_EXCHANGE_LIST[0]:
exchanges = exchange_name.upper()
elif exchange_name is config.STOCK_EXCHANGE_LIST[-1]:
if len(config.STOCK_EXCHANGE_LIST) == 2:
exchanges = exchanges + " and " + exchange_name.upper()
else:
exchanges = exchanges + ", and " + exchange_name.upper()
else:
exchanges = exchanges + ", " + exchange_name.upper()
self.showAllTickers()
logging.info("TickerPage loaded")
def confirmCikDownload(self, event):
confirm = wx.MessageDialog(None,
"You are about to make a request from RankandFiled.com. If you do this too often they may block your IP address.",
'Confirm Download',
style = wx.YES_NO
)
confirm.SetYesNoLabels(("&Download"), ("&Cancel"))
yesNoAnswer = confirm.ShowModal()
#try:
# confirm.SetYesNoLabels(("&Scrape"), ("&Cancel"))
#except AttributeError:
# pass
confirm.Destroy()
self.file_display.Hide()
if yesNoAnswer == wx.ID_YES:
download_cik = threading.Thread(name="cik download", target=self.downloadCikNumbers)
download_cik.start()
def downloadCikNumbers(self):
logging.info("Begin cik number download...")
scrape.download_and_save_cik_ticker_mappings()
db.save_GLOBAL_STOCK_DICT()
self.showAllTickers()
# Update view all stocks
view_all_stocks_page = config.GLOBAL_PAGES_DICT.get(config.ALL_STOCKS_PAGE_UNIQUE_ID).obj
view_all_stocks_page.spreadSheetFillAllStocks("event")
def confirmDownloadTickers(self, event):
confirm = wx.MessageDialog(None,
"You are about to make a request from Nasdaq.com. If you do this too often they may block your IP address.",
'Confirm Download',
style = wx.YES_NO
)
confirm.SetYesNoLabels(("&Download"), ("&Cancel"))
yesNoAnswer = confirm.ShowModal()
#try:
# confirm.SetYesNoLabels(("&Scrape"), ("&Cancel"))
#except AttributeError:
# pass
confirm.Destroy()
self.file_display.Hide()
if yesNoAnswer == wx.ID_YES:
download_tickers = threading.Thread(name="download tickers", target=self.downloadTickers)
download_tickers.start()
def downloadTickers(self):
logging.info("Begin ticker download...")
ticker_list_without_prices = scrape.nasdaq_full_ticker_list_downloader()
uppercase_exchange_list = [x.upper() for x in config.STOCK_EXCHANGE_LIST]
for data in ticker_list_without_prices:
if type(data[2]) in [str, unicode]:
if data[2].upper() in uppercase_exchange_list:
stock = db.create_new_Stock_if_it_doesnt_exist(data[0])
stock.firm_name = data[1]
stock.Exchange_na = data[2]
stock.etf_na = data[3]
else:
logging.info(data)
logging.info("Begin price data download...")
scrape.convert_nasdaq_csv_to_stock_objects()
db.save_GLOBAL_STOCK_DICT()
self.showAllTickers()
# Update view all stocks
view_all_stocks_page = config.GLOBAL_PAGES_DICT.get(config.ALL_STOCKS_PAGE_UNIQUE_ID).obj
view_all_stocks_page.spreadSheetFillAllStocks("event")
# no longer used
def saveTickerDataAsStocks(self, ticker_data_from_download):
# first check for stocks that have fallen off the stock exchanges
ticker_list = []
dead_tickers = []
# create a list of tickers
for ticker_data_sublist in ticker_data_from_download:
logging.info("{}: {}".format(ticker_data_sublist[0] ,ticker_data_sublist[1]))
ticker_symbol_upper = utils.strip_string_whitespace(ticker_data_sublist[0]).upper()
ticker_list.append(ticker_symbol_upper)
# save stocks if new
for ticker_data_sublist in ticker_data_from_download:
ticker_symbol = utils.strip_string_whitespace(ticker_data_sublist[0])
firm_name = ticker_data_sublist[1]
if "$" in ticker_symbol:
logging.info('Ticker {} with "$" symbol found, not sure if ligitimate, so not saving it.'.format(ticker_symbol))
continue
stock = db.create_new_Stock_if_it_doesnt_exist(ticker_symbol)
stock.firm_name = firm_name
logging.info("Saving: {} {}".format(stock.ticker, stock.firm_name))
db.save_GLOBAL_STOCK_DICT()
# end no longer used
def refreshTickers(self, event):
self.showAllTickers()
def showAllTickers(self):
logging.info("Loading Tickers")
ticker_list = []
ticker_list = list(config.GLOBAL_STOCK_DICT.keys())
if ticker_list:
ticker_list.sort()
self.displayTickers(ticker_list)
self.sizer.Add(self.file_display, 1, wx.ALL|wx.EXPAND)
self.file_display.Show()
def displayTickers(self, ticker_list):
ticker_list = list(ticker_list)
ticker_list.sort()
ticker_list_massive_str = ""
for ticker in ticker_list:
ticker_list_massive_str += ticker
ticker_list_massive_str += ", "
display_tickers_position_vertical_offset = gui_position.TickerPage.display_tickers_position_vertical_offset
size = gui_position.TickerPage.display_tickers_size_if_resize_errors
try:
width, height = gui_position.main_frame_size()
size = ( width - display_tickers_size_horizontal_adjustment , height - display_tickers_position_vertical_offset) # find the difference between the Frame and the grid size
except:
pass
try:
self.file_display.Destroy()
except:
pass
self.file_display = wx.TextCtrl(self, -1,
ticker_list_massive_str,
(2, display_tickers_position_vertical_offset),
size = size,
style = wx.TE_READONLY | wx.TE_MULTILINE ,
)
class XbrlImportPage(Tab):
def __init__(self, parent):
self.title = "XBRL import"
self.uid = config.XBRL_IMPORT_PAGE_UNIQUE_ID
wx.Panel.__init__(self, parent)
text = wx.StaticText(self, -1,
"XBRL import page",
gui_position.XbrlImportPage.text
)
self.sec_download_button = wx.Button(self, label="Download XBRL files from the SEC", pos=gui_position.XbrlImportPage.sec_download_button, size=(-1,-1))
self.sec_download_button.Bind(wx.EVT_BUTTON, self.confirmSecDownload, self.sec_download_button)
self.radio_year_month = wx.RadioButton(self, pos=gui_position.XbrlImportPage.radio_year_month)
self.radio_year_month.SetValue(True)
self.radio_from_year_to_year = wx.RadioButton(self, pos=gui_position.XbrlImportPage.radio_from_year_to_year)
self.checkbox_dont_save_sec_files = wx.CheckBox(self, pos=gui_position.XbrlImportPage.checkbox_dont_save_sec_files, label="Download without backups")
self.checkbox_dont_save_sec_files.SetValue(True)
now = datetime.datetime.now()
this_month = int(now.month)
this_year = int(now.year)
self.xbrl_year_input = wx.TextCtrl(self, -1,
str(this_year),
gui_position.XbrlImportPage.xbrl_year_input,
)
self.xbrl_year_input.SetHint("year")
self.xbrl_year_input.Bind(wx.EVT_SET_FOCUS, lambda event: self.set_radio_button(event, self.radio_year_month))
self.xbrl_month_dropdown = wx.ComboBox(self, pos=gui_position.XbrlImportPage.xbrl_month_dropdown, choices= calendar.month_name[1:])
self.xbrl_month_dropdown.SetSelection(this_month-1) # ordianals
self.xbrl_month_dropdown.Bind(wx.EVT_SET_FOCUS, lambda event: self.set_radio_button(event, self.radio_year_month))
self.xbrl_from_year_input = wx.TextCtrl(self, pos=gui_position.XbrlImportPage.xbrl_from_year_input)
self.xbrl_from_year_input.SetHint("from year")
self.xbrl_from_year_input.Bind(wx.EVT_SET_FOCUS, lambda event: self.set_radio_button(event, self.radio_from_year_to_year))
self.xbrl_to_year_input = wx.TextCtrl(self, pos=gui_position.XbrlImportPage.xbrl_to_year_input)
self.xbrl_to_year_input.SetHint("to year")
self.xbrl_to_year_input.Bind(wx.EVT_SET_FOCUS, lambda event: self.set_radio_button(event, self.radio_from_year_to_year))
self.from_file_button = wx.Button(self, label="Import XBRL file", pos=gui_position.XbrlImportPage.from_file_button, size=(-1,-1))
self.from_file_button.Bind(wx.EVT_BUTTON, self.import_XBRL_files, self.from_file_button)
self.from_folder_button = wx.Button(self, label="Import XBRL folder", pos=gui_position.XbrlImportPage.from_folder_button, size=(-1,-1))
self.from_folder_button.Bind(wx.EVT_BUTTON, self.import_XBRL_folder, self.from_folder_button)
self.abort_import_button = wx.Button(self, label="Cancel Import", pos=gui_position.XbrlImportPage.abort_import_button, size=(-1,-1))
self.abort_import_button.Bind(wx.EVT_BUTTON, self.abortImport, self.abort_import_button)
self.abort_import_button.Hide()
self.abort_import = False
self.progress_bar = wx.Gauge(self, -1, 100, size=gui_position.XbrlImportPage.progress_bar_size, pos = gui_position.XbrlImportPage.progress_bar)
self.progress_bar.Hide()
self.num_of_imported_stocks = 0
self.number_of_tickers_to_import = 0
self.total_relevant_tickers = 0
self.tickers_to_import = 0
self.import_time_text = 0
self.number_of_nonimported_stocks = 0
self.total_relevant_tickers = wx.StaticText(self, -1,
label = "Total number of tickers = %d" % (self.num_of_imported_stocks + self.number_of_tickers_to_import),
pos = gui_position.XbrlImportPage.total_relevant_tickers
)
self.tickers_to_import = wx.StaticText(self, -1,
label = "Tickers that need to be scraped = %d" % self.number_of_tickers_to_import,
pos = gui_position.XbrlImportPage.tickers_to_import
)
sleep_time = config.SCRAPE_SLEEP_TIME
import_time_secs = (self.number_of_tickers_to_import/config.SCRAPE_CHUNK_LENGTH) * sleep_time * 2
import_time = utils.time_from_epoch(import_time_secs)
self.import_time_text = wx.StaticText(self, -1,
label = "Time = %s" % import_time,
pos = gui_position.XbrlImportPage.import_time_text
)
logging.info("XbrlImportPage loaded")
def set_radio_button(self, event, radio_button=None):
if not radio_button.GetValue():
radio_button.SetValue(True)
def import_XBRL_files(self, event):
xbrl_data_folder_dialogue = wx.FileDialog(self, "Choose a File:")
if xbrl_data_folder_dialogue.ShowModal() == wx.ID_OK:
path = xbrl_data_folder_dialogue.GetPath()
logging.info(path)
else:
path = None
xbrl_data_folder_dialogue.Destroy()
if path:
scrape.scrape_xbrl_from_file(path)
def import_XBRL_folder(self, event):
xbrl_data_folder_dialogue = wx.DirDialog(self, "Choose a directory:")
if xbrl_data_folder_dialogue.ShowModal() == wx.ID_OK:
path = xbrl_data_folder_dialogue.GetPath()
logging.info(path)
else:
path = None
xbrl_data_folder_dialogue.Destroy()
if path:
logging.warning(path)
for file in os.listdir(path):
logging.warning(file)
file_path = os.path.join(path, file)
logging.warning(file_path)
if file.endswith(".zip"):
logging.warning("Importing from: {}".format(os.path.join(path, file)))
scrape.scrape_xbrl_from_file(file_path)
def confirmSecDownload(self, event):
confirm = wx.MessageDialog(None,
"You are about to import XBRL data from the SEC's database. Please don't do this during business hours, do it at night. Also, these downloads can be very, very large. Make sure you have a large amount of memory available.\n\nData saved limited for memory purpose. You can edit the scraper section to expand this.",
'Import stock data?',
style = wx.YES_NO
)
confirm.SetYesNoLabels(("&Import"), ("&Cancel"))
yesNoAnswer = confirm.ShowModal()
confirm.Destroy()
if yesNoAnswer == wx.ID_YES:
self.scrapeSEC()
def scrapeSEC(self):
year_month = self.radio_year_month.GetValue()
from_to = self.radio_from_year_to_year.GetValue()
year, month, from_year, to_year = None, None, None, None
if year_month:
year = self.xbrl_year_input.GetValue()
month = self.xbrl_month_dropdown.GetValue()
month = list(calendar.month_name).index(month)
elif from_to:
from_year = self.xbrl_from_year_input.GetValue()
to_year = self.xbrl_to_year_input.GetValue()
else:
return
# logging.warning("({}, {}, {}, {})".format(year, month, from_year, to_year))
add_to_wxStocks_database = self.checkbox_dont_save_sec_files.IsChecked()
scrape.sec_xbrl_download_launcher(year=year, month=month, from_year=from_year, to_year=to_year, add_to_wxStocks_database = add_to_wxStocks_database)
def abortImport(self, event):
logging.info("Canceling import... this may take up to {} seconds.".format(config.SCRAPE_SLEEP_TIME))
if self.abort_import == False:
self.abort_import = True
self.abort_import_button.Hide()
self.import_button.Show()
self.calculate_import_times()
class YqlScrapePage(Tab):
def __init__(self, parent):
self.title = "Scrape YQL"
self.uid = config.YQL_SCRAPE_PAGE_UNIQUE_ID
wx.Panel.__init__(self, parent)
text = wx.StaticText(self, -1,
"Welcome to the scrape page",
gui_position.YqlScrapePage.text
)
self.time_button = wx.Button(self, label="Calculate Scrape Time", pos=gui_position.YqlScrapePage.time_button, size=(-1,-1))
self.time_button.Bind(wx.EVT_BUTTON, self.calculate_scrape_times, self.time_button)
self.scrape_button = wx.Button(self, label="Scrape YQL", pos=gui_position.YqlScrapePage.scrape_button, size=(-1,-1))
self.scrape_button.Bind(wx.EVT_BUTTON, self.confirmScrape, self.scrape_button)
self.abort_scrape_button = wx.Button(self, label="Cancel Scrape", pos=gui_position.YqlScrapePage.abort_scrape_button, size=(-1,-1))
self.abort_scrape_button.Bind(wx.EVT_BUTTON, self.abortScrape, self.abort_scrape_button)
self.abort_scrape_button.Hide()
self.abort_scrape = False
self.progress_bar = wx.Gauge(self, -1, 100, size=gui_position.YqlScrapePage.progress_bar_size, pos = gui_position.YqlScrapePage.progress_bar)
self.progress_bar.Hide()
self.numScrapedStocks = 0
self.number_of_tickers_to_scrape = 0
self.total_relevant_tickers = 0
self.tickers_to_scrape = 0
self.scrape_time_text = 0
self.number_of_unscraped_stocks = 0
self.total_relevant_tickers = wx.StaticText(self, -1,
label = "Total number of tickers = %d" % (self.numScrapedStocks + self.number_of_tickers_to_scrape),
pos = gui_position.YqlScrapePage.total_relevant_tickers
)
self.tickers_to_scrape = wx.StaticText(self, -1,
label = "Tickers that need to be scraped = %d" % self.number_of_tickers_to_scrape,
pos = gui_position.YqlScrapePage.tickers_to_scrape
)
sleep_time = config.SCRAPE_SLEEP_TIME
scrape_time_secs = (self.number_of_tickers_to_scrape/config.SCRAPE_CHUNK_LENGTH) * sleep_time * 2
scrape_time = utils.time_from_epoch(scrape_time_secs)
self.scrape_time_text = wx.StaticText(self, -1,
label = "Time = %s" % scrape_time,
pos = gui_position.YqlScrapePage.scrape_time_text
)
logging.info("YqlScrapePage loaded")
def calculate_scrape_times(self, event=None):
scrape_thread = threading.Thread(target=self.calculate_scrape_times_worker)
scrape_thread.start()
def calculate_scrape_times_worker(self):
logging.info("Calculating scrape times...")
sleep_time = config.SCRAPE_SLEEP_TIME
# calculate number of stocks and stuff to scrape
self.numScrapedStocks = 0
self.number_of_tickers_to_scrape = 0
for stock in config.GLOBAL_STOCK_DICT:
self.number_of_tickers_to_scrape += 1
current_time = float(time.time())
time_since_update = current_time - config.GLOBAL_STOCK_DICT[stock].last_yql_basic_scrape_update
if (int(time_since_update) < int(config.TIME_ALLOWED_FOR_BEFORE_RECENT_UPDATE_IS_STALE) ):
self.numScrapedStocks += 1
self.number_of_unscraped_stocks = self.number_of_tickers_to_scrape - self.numScrapedStocks
total_ticker_len = len(config.GLOBAL_STOCK_DICT)
scrape_time_secs = (self.number_of_unscraped_stocks/config.SCRAPE_CHUNK_LENGTH) * sleep_time * 2
scrape_time = utils.time_from_epoch(scrape_time_secs)
self.total_relevant_tickers.SetLabel("Total number of tickers = %d" % self.number_of_tickers_to_scrape)
self.tickers_to_scrape.SetLabel("Tickers that need to be scraped = %d" % self.number_of_unscraped_stocks)
self.scrape_time_text.SetLabel("Time = %s" % scrape_time)
logging.info("Calculation done")
def confirmScrape(self, event):
confirm = wx.MessageDialog(None,
"You are about to scrape of Yahoo's YQL database. If you do this too often Yahoo may temporarily block your IP address.",
'Scrape stock data?',
style = wx.YES_NO
)
confirm.SetYesNoLabels(("&Scrape"), ("&Cancel"))
yesNoAnswer = confirm.ShowModal()
#try:
# confirm.SetYesNoLabels(("&Scrape"), ("&Cancel"))
#except AttributeError:
# pass
confirm.Destroy()
if yesNoAnswer == wx.ID_YES:
self.scrapeYQL()
def scrapeYQL(self):
chunk_list_and_percent_of_full_scrape_done_and_number_of_tickers_to_scrape = scrape.prepareYqlScrape()
chunk_list = chunk_list_and_percent_of_full_scrape_done_and_number_of_tickers_to_scrape[0]
percent_of_full_scrape_done = chunk_list_and_percent_of_full_scrape_done_and_number_of_tickers_to_scrape[1]
self.number_of_tickers_to_scrape = chunk_list_and_percent_of_full_scrape_done_and_number_of_tickers_to_scrape[2]
self.progress_bar.SetValue(percent_of_full_scrape_done)
self.progress_bar.Show()
self.scrape_button.Hide()
self.abort_scrape_button.Show()
# Process the scrape while updating a progress bar
timer = threading.Timer(0, self.executeScrapePartOne, [chunk_list, 0])
timer.start()
#scrape_thread = threading.Thread(target=self.executeOneScrape, args = (ticker_chunk,))
#scrape_thread.daemon = True
#scrape_thread.start()
#while scrape_thread.isAlive():
# # Every two sleep times execute a new scrape
# full_scrape_sleep = float(sleep_time * 2)
# scrape_thread.join(full_scrape_sleep)
# cont, skip = progress_dialog.Update(self.numScrapedStocks)
# if not cont:
# progress_dialog.Destroy()
# return
def executeScrapePartOne(self, ticker_chunk_list, position_of_this_chunk):
if self.abort_scrape == True:
self.abort_scrape = False
self.progress_bar.Hide()
logging.info("Scrape canceled.")
return
data = scrape.executeYqlScrapePartOne(ticker_chunk_list, position_of_this_chunk)
sleep_time = config.SCRAPE_SLEEP_TIME
timer = threading.Timer(sleep_time, self.executeScrapePartTwo, [ticker_chunk_list, position_of_this_chunk, data])
timer.start()
def executeScrapePartTwo(self, ticker_chunk_list, position_of_this_chunk, successful_pyql_data):
if self.abort_scrape == True:
self.abort_scrape = False
self.progress_bar.Hide()
logging.info("Scrape canceled.")
return
scrape.executeYqlScrapePartTwo(ticker_chunk_list, position_of_this_chunk, successful_pyql_data)
sleep_time = config.SCRAPE_SLEEP_TIME
logging.warning("Sleeping for %d seconds before the next task" % sleep_time)
#time.sleep(sleep_time)
#self.numScrapedStocks += number_of_stocks_in_this_scrape
#cont, skip = self.progress_dialog.Update(self.numScrapedStocks)
#if not cont:
# self.progress_dialog.Destroy()
# return
number_of_tickers_in_chunk_list = 0
for chunk in ticker_chunk_list:
for ticker in chunk:
number_of_tickers_in_chunk_list += 1
number_of_tickers_previously_updated = self.number_of_tickers_to_scrape - number_of_tickers_in_chunk_list
number_of_tickers_done_in_this_scrape = 0
for i in range(len(ticker_chunk_list)):
if i > position_of_this_chunk:
continue
for ticker in ticker_chunk_list[i]:
number_of_tickers_done_in_this_scrape += 1
total_number_of_tickers_done = number_of_tickers_previously_updated + number_of_tickers_done_in_this_scrape
percent_of_full_scrape_done = round( 100 * float(total_number_of_tickers_done) / float(self.number_of_tickers_to_scrape))
position_of_this_chunk += 1
percent_done = round( 100 * float(position_of_this_chunk) / float(len(ticker_chunk_list)) )
logging.info("{}% done this scrape execution.".format(percent_done))
logging.info("{}% done of all tickers.".format(percent_of_full_scrape_done))
self.progress_bar.SetValue(percent_of_full_scrape_done)
self.calculate_scrape_times()
if position_of_this_chunk >= len(ticker_chunk_list):
# finished
self.abort_scrape_button.Hide()
self.scrape_button.Show()
self.progress_bar.SetValue(100)
return
else:
logging.info("ready to loop again")
timer = threading.Timer(sleep_time, self.executeScrapePartOne, [ticker_chunk_list, position_of_this_chunk])
timer.start()
def abortScrape(self, event):
logging.info("Canceling scrape... this may take up to {} seconds.".format(config.SCRAPE_SLEEP_TIME))
if self.abort_scrape == False:
self.abort_scrape = True
self.abort_scrape_button.Hide()
self.scrape_button.Show()
self.calculate_scrape_times()
class SpreadsheetImportPage(Tab):
def __init__(self, parent):
self.title = "Import Data Spreadsheets"
self.uid = config.SPREADSHEET_IMPORT_PAGE_UNIQUE_ID
wx.Panel.__init__(self, parent)
####
spreadsheet_page_panel = wx.Panel(self, -1, pos=(0,5), size=( wx.EXPAND, wx.EXPAND))
spreadsheet_notebook = wx.Notebook(spreadsheet_page_panel)
self.xls_import_page = XlsImportPage(spreadsheet_notebook)
spreadsheet_notebook.AddPage(self.xls_import_page, self.xls_import_page.title)
self.csv_import_page = CsvImportPage(spreadsheet_notebook)
spreadsheet_notebook.AddPage(self.csv_import_page, self.csv_import_page.title)
sizer2 = wx.BoxSizer()
sizer2.Add(spreadsheet_notebook, 1, wx.EXPAND)
self.SetSizer(sizer2)
####
class CsvImportPage(Tab):
def __init__(self, parent):
self.title = "Import .CSV Data"
self.uid = config.CSV_IMPORT_PAGE_UNIQUE_ID
wx.Panel.__init__(self, parent)
text = wx.StaticText(self, -1,
"Welcome to the CSV data import page page.\nYou can make your own import functions under the function tab.",
gui_position.CsvImportPage.text
)
default_button_position = gui_position.CsvImportPage.default_button_position
default_button_horizontal_position = default_button_position[0]
default_button_vertical_position = default_button_position[1]
default_dropdown_offset = gui_position.CsvImportPage.default_dropdown_offset
default_dropdown_horizontal_offset = default_dropdown_offset[0]
default_dropdown_vertical_offset = default_dropdown_offset[1]
import_button = wx.Button(self, label="import .csv", pos=(default_button_horizontal_position, default_button_vertical_position), size=(-1,-1))
import_button.Bind(wx.EVT_BUTTON, self.importCSV, import_button)
self.csv_import_name_list = meta.return_csv_import_function_short_names()
self.drop_down = wx.ComboBox(self, pos=(default_button_horizontal_position + default_dropdown_horizontal_offset, default_button_vertical_position + default_dropdown_vertical_offset), choices=self.csv_import_name_list)
self.triple_list = meta.return_csv_import_function_triple()
self.csv_import_name = None
def importCSV(self, event):
self.csv_import_name = self.drop_down.GetValue()
# Identify the function mapped to screen name
for triple in self.triple_list:
if self.csv_import_name == triple.doc:
csv_import_function = triple.function
# in case doc string is too many characters...
elif self.csv_import_name == triple.name:
csv_import_function = triple.function
if not csv_import_function:
logging.error("Error, somthing went wrong locating the correct import function to use.")
# run ranking funtion on all stocks
success = process_user_function.import_csv_via_user_created_function(self, csv_import_function)
if not success:
return
if success == "fail":
title_string = "Error"
success_string = "This import has failed, please check make sure your function conforms to the import protocols."
message_style = wx.ICON_ERROR
elif success == "some":
title_string = "Some Errors"
success_string = "There were some errors with your import, please review your CSV file and make sure that your functions conform to the protocols, and that the ticker symbols in your csv files are the same format as wxStocks'."
message_style = wx.ICON_EXCLAMATION
elif success == "success":
title_string = "Success"
success_string = "Success! You're file has been successfully imported."
message_style = wx.OK
else:
logging.error("Error in importCSV title and success strings")
return
logging.info("importCSV done")
confirm = wx.MessageDialog(None,
success_string,
title_string,
style = message_style
)
confirm.ShowModal()
class XlsImportPage(Tab):
def __init__(self, parent):
self.title = "Import .XLS Data"
self.uid = config.XLS_IMPORT_PAGE_UNIQUE_ID
wx.Panel.__init__(self, parent)
text = wx.StaticText(self, -1,
"Welcome to the XLS data import page page.\nYou can make your own import functions under the function tab.",
(10,10)
)
default_button_position = gui_position.XlsImportPage.default_button_position
default_button_horizontal_position = default_button_position[0]
default_button_vertical_position = default_button_position[1]
default_dropdown_offset = gui_position.XlsImportPage.default_dropdown_offset
default_dropdown_horizontal_offset = default_dropdown_offset[0]
default_dropdown_vertical_offset = default_dropdown_offset[1]
aaii_offset = gui_position.XlsImportPage.aaii_offset # if aaii files in aaii import folder, this button will appear below the import dropdown
import_button = wx.Button(self, label="import .xls", pos=(default_button_horizontal_position, default_button_vertical_position), size=(-1,-1))
import_button.Bind(wx.EVT_BUTTON, self.importXLS, import_button)
import_all_aaii_files_button = wx.Button(self, label="import aaii files from folder", pos=(default_button_horizontal_position, default_button_vertical_position + aaii_offset), size=(-1,-1))
import_all_aaii_files_button.Bind(wx.EVT_BUTTON, self.import_AAII_files, import_all_aaii_files_button)
self.xls_import_name_list = meta.return_xls_import_function_short_names()
self.drop_down = wx.ComboBox(self, pos=(default_button_horizontal_position + default_dropdown_horizontal_offset, default_button_vertical_position + default_dropdown_vertical_offset), choices=self.xls_import_name_list)
self.triple_list = meta.return_xls_import_function_triple()
self.xls_import_name = None
def importXLS(self, event):
self.xls_import_name = self.drop_down.GetValue()
# Identify the function mapped to screen name
for triple in self.triple_list:
if self.xls_import_name == triple.doc:
xls_import_function = triple.function
# in case doc string is too many characters...
elif self.xls_import_name == triple.name:
xls_import_function = triple.function
if not xls_import_function:
logging.error("Error, somthing went wrong locating the correct import function to use.")
# run ranking funtion on all stocks
success = process_user_function.import_xls_via_user_created_function(self, xls_import_function)
if not success:
return
if success == "fail":
title_string = "Error"
success_string = "This import has failed, please check make sure your function conforms to the import protocols."
message_style = wx.ICON_ERROR
elif success == "some":
title_string = "Some Errors"
success_string = "There were some errors with your import, please review your XLS file and make sure that your functions conform to the protocols, and that the ticker symbols in your xls files are the same format as wxStocks'."
message_style = wx.ICON_EXCLAMATION
elif success == "success":
title_string = "Success"
success_string = "Success! You're file has been successfully imported."
message_style = wx.OK
else:
logging.error("Error in importXLS title and success strings")
return
logging.info("importXLS done")
confirm = wx.MessageDialog(None,
success_string,
title_string,
style = message_style
)
confirm.ShowModal()
def import_AAII_files(self, event):
aaii_data_folder_dialogue = wx.DirDialog(self, "Choose a directory:")
if aaii_data_folder_dialogue.ShowModal() == wx.ID_OK:
path = aaii_data_folder_dialogue.GetPath()
logging.info(path)
else:
path = None
aaii_data_folder_dialogue.Destroy()
if path:
aaii.import_aaii_files_from_data_folder(path=path)
##
class PortfolioPage(Tab):
def __init__(self, parent):
self.title = "Portfolios"
self.uid = config.PORTFOLIO_PAGE_UNIQUE_ID
wx.Panel.__init__(self, parent)
####
self.portfolio_page_panel = wx.Panel(self, -1, pos=(0,5), size=( wx.EXPAND, wx.EXPAND))
self.portfolio_account_notebook = wx.Notebook(self.portfolio_page_panel)
portfolios_that_already_exist = []
if config.PORTFOLIO_OBJECTS_DICT:
portfolios_that_already_exist = [obj for key, obj in config.PORTFOLIO_OBJECTS_DICT.items()]
portfolios_that_already_exist.sort(key = lambda x: x.id_number)
default_portfolio_names = ["Primary", "Secondary", "Tertiary"]
if not portfolios_that_already_exist:
config.NUMBER_OF_PORTFOLIOS = config.NUMBER_OF_DEFAULT_PORTFOLIOS
for i in range(config.NUMBER_OF_PORTFOLIOS):
portfolio_name = None
if config.NUMBER_OF_PORTFOLIOS < 10:
portfolio_name = "Portfolio %d" % (i+1)
else:
portfolio_name = "%dth" % (i+1)
if i in range(len(default_portfolio_names)):
portfolio_name = default_portfolio_names[i]
portfolio_obj = db.create_new_Account_if_one_doesnt_exist(i+1, name=portfolio_name)
logging.info("Portfolio: {} {}, created at startup".format(portfolio_obj.id_number, portfolio_obj.name))
portfolio_account = PortfolioAccountTab(self.portfolio_account_notebook, (i+1), portfolio_name)
portfolio_account.title = portfolio_name
self.portfolio_account_notebook.AddPage(portfolio_account, portfolio_name)
else: # portfolios already exist
need_to_save = False
portfolios_to_save = []
for portfolio_obj in portfolios_that_already_exist:
portfolio_account = PortfolioAccountTab(self.portfolio_account_notebook, portfolio_obj.id_number, portfolio_obj.name)
portfolio_account.title = portfolio_obj.name
self.portfolio_account_notebook.AddPage(portfolio_account, portfolio_obj.name)
if need_to_save == True:
for portfolio_obj in portfolios_to_save:
db.save_portfolio_object(portfolio_obj)
sizer2 = wx.BoxSizer()
sizer2.Add(self.portfolio_account_notebook, 1, wx.EXPAND)
self.SetSizer(sizer2)
####
logging.info("PortfolioPage loaded")
class PortfolioAccountTab(Tab):
def __init__(self, parent, tab_number, portfolio_name):
self.title = None
tab_panel = wx.Panel.__init__(self, parent, tab_number)
self.portfolio_id = tab_number
self.name = portfolio_name
self.portfolio_obj = config.PORTFOLIO_OBJECTS_DICT.get(str(tab_number))
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.AddSpacer(gui_position.PortfolioAccountTab.AddSpacer)
self.SetSizer(self.sizer)
if not self.portfolio_obj:
try:
db.load_portfolio_object(self.portfolio_id)
except Exception as e:
logging.error(e)
self.portfolio_obj = None
self.add_button = wx.Button(self, label="Update from file", pos=gui_position.PortfolioAccountTab.add_button, size=(-1,-1))
self.add_button.Bind(wx.EVT_BUTTON, self.addAccountCSV, self.add_button)
self.portfolio_import_name_list = meta.return_portfolio_import_function_short_names()
self.drop_down = wx.ComboBox(self, pos=gui_position.PortfolioAccountTab.drop_down, choices=self.portfolio_import_name_list)
self.triple_list = meta.return_portfolio_import_function_triple()
self.portfolio_import_name = None
self.delete_button = wx.Button(self, label="Delete this portfolio", pos=gui_position.PortfolioAccountTab.delete_button, size=(-1,-1))
self.delete_button.Bind(wx.EVT_BUTTON, self.confirmDeleteAccount, self.delete_button)
self.rename_button = wx.Button(self, label="Rename this portfolio", pos=gui_position.PortfolioAccountTab.rename_button, size=(-1,-1))
self.rename_button.Bind(wx.EVT_BUTTON, self.changeTabName, self.rename_button)
self.add_a_portfolio_button = wx.Button(self, label="Add new portfolio", pos=gui_position.PortfolioAccountTab.add_a_portfolio_button, size=(-1,-1))
self.add_a_portfolio_button.Bind(wx.EVT_BUTTON, self.addPortfolio, self.add_a_portfolio_button)
#print_portfolio_data_button = wx.Button(self, label="p", pos=(730,0), size=(-1,-1))
#print_portfolio_data_button.Bind(wx.EVT_BUTTON, self.printData, print_portfolio_data_button)
self.current_account_spreadsheet = None
if self.portfolio_obj:
self.spreadSheetFill(self.portfolio_obj)
self.screen_grid = None
self.ticker_input = wx.TextCtrl(self, -1, "", gui_position.PortfolioAccountTab.ticker_input)
self.ticker_input.SetHint("ticker")
self.share_input = wx.TextCtrl(self, -1, "", gui_position.PortfolioAccountTab.share_input)
self.share_input.SetHint("# shares")
self.cost_basis_input = wx.TextCtrl(self, -1, "", gui_position.PortfolioAccountTab.cost_basis_input)
self.cost_basis_input.SetHint("Cash/Cost")
self.update_button = wx.Button(self, label="Update Data", pos=gui_position.PortfolioAccountTab.update_button, size=(-1,-1))
self.update_button.Bind(wx.EVT_BUTTON, self.updateManually, self.update_button)
self.update_prices_button = wx.Button(self, label="Update Prices", pos=gui_position.PortfolioAccountTab.update_prices_button, size=(-1,-1))
self.update_prices_button.Bind(wx.EVT_BUTTON, self.confirmUpdatePrices, self.update_prices_button)
self.remove_data_button = wx.Button(self, label="Remove Data", pos=gui_position.PortfolioAccountTab.remove_data_button, size = (-1,-1))
self.remove_data_button.Bind(wx.EVT_BUTTON, self.confirmRemoveData, self.remove_data_button)
logging.info("PortfolioAccountTab {} loaded".format(self.name))
def confirmRemoveData(self, event):
ticker = self.ticker_input.GetValue()
cost_basis = self.cost_basis_input.GetValue()
shares = self.share_input.GetValue()
if not ticker:
return
if shares:
try:
shares = float(shares)
except:
logging.info("Shares must be a number.")
return
stock = utils.return_stock_by_symbol(ticker)
if not stock:
logging.info("Stock {} does not appear to exist. If you want to delete cash, you must set it to zero. It cannot be None".format(ticker))
return
if ticker and not (cost_basis or shares):
confirm_message = "You are about to remove %s from your portfolio." % stock.symbol
elif ticker and shares and cost_basis:
confirm_message = "You are about to remove %s's share and cost basis data from your portfolio." % stock.symbol
elif ticker and shares:
if shares <= self.portfolio_obj.stock_shares_dict.get(stock.symbol):
confirm_message = "You are about to remove " + str(shares) + " of %s's shares from your portfolio." % stock.symbol
else:
logging.error("Error: invalid number of shares.")
logging.info("You currently have {} shares.".format(str(self.portfolio_obj.stock_shares_dict.get(stock.symbol))))
logging.info("You tried to remove {}.".format(str(shares)))
return
elif ticker and cost_basis:
confirm_message = "You are about to remove %s's cost basis data from your portfolio." % stock.symbol
else:
logging.warning("Invalid input")
return
confirm = wx.MessageDialog(None,
confirm_message,
'Delete Data',
style = wx.YES_NO
)
confirm.SetYesNoLabels(("&Delete"), ("&Cancel"))
yesNoAnswer = confirm.ShowModal()
#try:
# confirm.SetYesNoLabels(("&Scrape"), ("&Cancel"))
#except AttributeError:
# pass
confirm.Destroy()
if yesNoAnswer == wx.ID_YES:
self.removeData(self.portfolio_obj, stock, shares, cost_basis)
def removeData(self, Account_object, stock, shares_to_remove = None, cost_basis = None):
if cost_basis or shares_to_remove:
if cost_basis:
Account_object.cost_basis_dict.pop(stock.symbol, None)
if shares_to_remove:
current_shares = Account_object.stock_shares_dict.get(stock.symbol)
left_over_shares = float(current_shares) - float(shares_to_remove)
if not left_over_shares:
Account_object.cost_basis_dict.pop(stock.symbol, None)
Account_object.stock_shares_dict.pop(stock.symbol, None)
else:
Account_object.stock_shares_dict[stock.symbol] = left_over_shares
else: # remove stock
Account_object.cost_basis_dict.pop(stock.symbol, None)
Account_object.stock_shares_dict.pop(stock.symbol, None)
db.save_portfolio_object(Account_object)
utils.update_all_dynamic_grids()
self.ticker_input.SetValue("")
self.cost_basis_input.SetValue("")
self.share_input.SetValue("")
def addPortfolio(self, event):
confirm = wx.MessageDialog(self,
"You are about to add a new portfolio. The change will be applied the next time you launch this program.",
'Restart Required',
wx.OK | wx.CANCEL
)
if confirm.ShowModal() != wx.ID_OK:
confirm.Destroy()
return
confirm.Destroy()
config.NUMBER_OF_PORTFOLIOS = config.NUMBER_OF_PORTFOLIOS + 1
id_number = config.NUMBER_OF_PORTFOLIOS
portfolio_name = "Portfolio {}".format(id_number)
portfolio_obj = db.create_new_Account_if_one_doesnt_exist(config.NUMBER_OF_PORTFOLIOS, name=portfolio_name)
logging.info("Portfolio: {} {}, created".format(portfolio_obj.id_number, portfolio_obj.name))
portfolio_account_notebook = config.GLOBAL_PAGES_DICT.get(config.PORTFOLIO_PAGE_UNIQUE_ID).obj.portfolio_account_notebook
portfolio_account = PortfolioAccountTab(portfolio_account_notebook, (id_number), portfolio_name)
portfolio_account.title = portfolio_name
portfolio_account_notebook.AddPage(portfolio_account, portfolio_name)
db.save_portfolio_object(portfolio_obj)
return
def fillSpreadsheetWithCurrentPortfolio(self):
portfolio_obj = self.portfolio_obj
if portfolio_obj:
self.spreadSheetFill(portfolio_obj = portfolio_obj)
def spreadSheetFill(self, portfolio_obj):
if self.current_account_spreadsheet:
self.current_account_spreadsheet.Destroy()
size = gui_position.PortfolioAccountTab.portfolio_page_spreadsheet_size_position_tuple[0]
spreadsheet_fill_vertical_offset = gui_position.PortfolioAccountTab.spreadsheet_fill_vertical_offset
try:
width, height = gui_position.main_frame_size()
size = ( width - spreadsheet_fill_horizontal_offset , height - spreadsheet_fill_vertical_offset) # find the difference between the Frame and the grid size
except:
pass
self.current_account_spreadsheet = create_account_spread_sheet(self, portfolio_obj, size = size)
self.current_account_spreadsheet.Bind(wx.grid.EVT_GRID_CELL_LEFT_DCLICK, self.loadStockDataFromGridIntoUpdateSection, self.current_account_spreadsheet)
if self.current_account_spreadsheet:
self.sizer.Add(self.current_account_spreadsheet, 1, wx.ALL|wx.EXPAND)
self.current_account_spreadsheet.Show()
return
def loadStockDataFromGridIntoUpdateSection(self, event):
row = event.GetRow()
total_rows = self.current_account_spreadsheet.GetNumberRows()
column = event.GetCol()
total_cols = self.current_account_spreadsheet.GetNumberCols()
value = self.current_account_spreadsheet.GetCellValue(row, column)
ticker = None
shares = None
cost_basis = None
cash = None
if row <= (total_rows - 6): # 5 extra nonequity rows below, editble
ticker = self.current_account_spreadsheet.GetCellValue(row, 0)
shares = self.current_account_spreadsheet.GetCellValue(row, 2)
cost_basis = self.current_account_spreadsheet.GetCellValue(row, 5)
elif row == (total_rows - 3): # cash row
cash = self.current_account_spreadsheet.GetCellValue(row, 4)
else: # Nothing relevant selected
self.ticker_input.SetValue("")
self.share_input.SetValue("")
self.cost_basis_input.SetValue("")
if ticker:
self.ticker_input.SetValue(ticker)
else:
self.ticker_input.SetValue("")
if shares:
self.share_input.SetValue(shares)
else:
self.share_input.SetValue("")
if cost_basis and not cash:
self.cost_basis_input.SetValue(cost_basis)
else:
self.cost_basis_input.SetValue("")
if cash:
self.ticker_input.SetValue("")
self.share_input.SetValue("")
self.cost_basis_input.SetValue(cash)
def addAccountCSV(self, event):
'''append a csv to current ticker list'''
self.portfolio_import_name = self.drop_down.GetValue()
# Identify the function mapped to screen name
for triple in self.triple_list:
if self.portfolio_import_name == triple.doc:
portfolio_import_function = triple.function
# in case doc string is too many characters...
elif self.portfolio_import_name == triple.name:
portfolio_import_function = triple.function
if not portfolio_import_function:
logging.error("Error, somthing went wrong locating the correct import function to use.")
self.account_obj = process_user_function.import_portfolio_via_user_created_function(self, self.portfolio_id, portfolio_import_function)
logging.info(type(self.account_obj))
self.spreadSheetFill(self.account_obj)
# this is used in sale prep page:
config.PORTFOLIO_OBJECTS_DICT[str(self.portfolio_id)] = self.account_obj
utils.update_all_dynamic_grids()
logging.info("Portfolio CSV import complete.")
def updateAccountViaCSV(self, event):
self.portfolio_update_name = self.drop_down.GetValue()
# Identify the function mapped to screen name
for triple in self.triple_list:
if self.portfolio_import_name == triple.doc:
portfolio_import_function = triple.function
# in case doc string is too many characters...
elif self.portfolio_import_name == triple.name:
portfolio_import_function = triple.function
utils.update_all_dynamic_grids()
def updateManually(self, event):
ticker = utils.strip_string_whitespace(self.ticker_input.GetValue())
cost_basis_or_cash = self.cost_basis_input.GetValue()
shares = str(self.share_input.GetValue()).replace(",", "")
# if (not (ticker or cost_basis_or_cash)) or ((cost_basis_or_cash and shares) and not ticker) or (ticker and not (cost_basis_or_cash or shares)):
if not (ticker or cost_basis_or_cash):
# need a ticker or cost_basis to update
logging.warning("invalid entry: not (ticker or cost_basis_or_cash)")
logging.warning("ticker: {}".format(ticker))
logging.warning("cost_basis_or_cash: {}".format(cost_basis_or_cash))
return
elif (cost_basis_or_cash and shares) and not ticker:
# if no ticker, can't update shares
logging.warning("invalid entry: (cost_basis_or_cash and shares) and not ticker")
logging.warning("cost_basis_or_cash: {}".format(cost_basis_or_cash))
logging.warning("shares: {}".format(shares))
logging.warning("ticker: {}".format(ticker))
return
elif ticker and not (cost_basis_or_cash or shares):
# if ticker, but not cost basis or shares, it's just sitting there being a ticker
logging.warning("invalid entry: ticker or not (cost_basis_or_cash or shares)")
logging.warning("ticker: {}".format(ticker))
logging.warning("cost_basis_or_cash: {}".format(cost_basis_or_cash))
logging.warning("shares: {}".format(shares))
return
else:
# correct entry
pass
if cost_basis_or_cash and not (ticker or shares):
# User is updating cash in account
cash = cost_basis_or_cash
cash = utils.money_text_to_float(cash)
if not self.portfolio_obj:
self.portfolio_obj = db.create_new_Account_if_one_doesnt_exist(self.portfolio_id, name = self.name)
self.portfolio_obj.available_cash = cash
else:
# updating an individual stock
if not self.portfolio_obj:
self.portfolio_obj = db.create_new_Account_if_one_doesnt_exist(self.portfolio_id, name = self.name)
cost_basis = cost_basis_or_cash
try:
ticker = ticker.upper()
except Exception as e:
logging.error(e)
logging.info("invalid ticker: %s" % ticker)
stock = utils.return_stock_by_symbol(ticker)
if not stock:
logging.info("stock with ticker %s does not appear to exist, do you want to create it?" % ticker)
confirm_update = self.confirmCreateMissingStock(ticker)
if confirm_update:
db.create_new_Stock_if_it_doesnt_exist(ticker)
stock = utils.return_stock_by_symbol(ticker)
else:
# cancel adding stock
logging.info("Canceling portfolio update")
return
if shares:
try:
shares = float(shares)
self.portfolio_obj.stock_shares_dict[ticker] = shares
except Exception as e:
logging.error(e)
logging.info("Error: shares data is improperly formatted")
if cost_basis:
cost_basis = utils.money_text_to_float(cost_basis)
self.portfolio_obj.cost_basis_dict[ticker] = cost_basis
db.save_portfolio_object(self.portfolio_obj)
self.spreadSheetFill(self.portfolio_obj)
self.ticker_input.SetValue("")
self.cost_basis_input.SetValue("")
self.share_input.SetValue("")
self.ticker_input.SetHint("ticker")
self.share_input.SetHint("# shares")
self.cost_basis_input.SetHint("Cash/Cost")
utils.update_all_dynamic_grids()
def confirmCreateMissingStock(self, ticker):
ticker = ticker.upper()
confirm = wx.MessageDialog(None,
"Stock with ticker %s does not appear to exist, do you want to create a stock with ticker %s?" % (ticker, ticker),
'Create Stock With Ticker %s?' % ticker,
style = wx.YES_NO
)
confirm.SetYesNoLabels(("&Create"), ("&Cancel"))
yesNoAnswer = confirm.ShowModal()
#try:
# confirm.SetYesNoLabels(("&Scrape"), ("&Cancel"))
#except AttributeError:
# pass
confirm.Destroy()
return yesNoAnswer == wx.ID_YES
def confirmUpdateMissingStock(self):
confirm = wx.MessageDialog(None,
"You are about to make a request from Yahoo Finance. If you do this too often they may temporarily block your IP address. This will require an update delay to prevent rate limiting.",
'Confirm Update Stock Data?',
style = wx.YES_NO
)
confirm.SetYesNoLabels(("&Download"), ("&Cancel"))
yesNoAnswer = confirm.ShowModal()
#try:
# confirm.SetYesNoLabels(("&Scrape"), ("&Cancel"))
#except AttributeError:
# pass
confirm.Destroy()
return yesNoAnswer == wx.ID_YES
def confirmUpdateMultipleMissingStocks(self):
confirm = wx.MessageDialog(None,
"Some of the stocks you are updating cannot be updated via Nasdaq. You are about to make a request from Yahoo Finance. If you do this too often they may temporarily block your IP address. This will require an update delay to prevent rate limiting.",
'Confirm Update Stock Data?',
style = wx.YES_NO
)
confirm.SetYesNoLabels(("&Download"), ("&Cancel"))
yesNoAnswer = confirm.ShowModal()
#try:
# confirm.SetYesNoLabels(("&Scrape"), ("&Cancel"))
#except AttributeError:
# pass
confirm.Destroy()
return yesNoAnswer == wx.ID_YES
def confirmUpdatePrices(self, event):
confirm = wx.MessageDialog(None,
"You are about to make a request from Nasdaq.com. If you do this too often they may temporarily block your IP address.",
'Confirm Download',
style = wx.YES_NO
)
confirm.SetYesNoLabels(("&Download"), ("&Cancel"))
yesNoAnswer = confirm.ShowModal()
#try:
# confirm.SetYesNoLabels(("&Scrape"), ("&Cancel"))
#except AttributeError:
# pass
confirm.Destroy()
if yesNoAnswer == wx.ID_YES:
self.updatePrices()
def updatePrices(self):
logging.info("Begin ticker download...")
ticker_data = scrape.convert_nasdaq_csv_to_stock_objects()
current_time = time.time()
tickers_that_need_yql_update = []
for ticker in self.portfolio_obj.stock_shares_dict:
stock = config.GLOBAL_STOCK_DICT.get(ticker)
if stock:
last_update_for_last_close = utils.return_last_close_and_last_update_tuple(stock)[1]
if last_update_for_last_close:
if (current_time - last_update_for_last_close) < config.PORTFOLIO_PRICE_REFRESH_TIME:
#fresh data
pass
else:
#update
tickers_that_need_yql_update.append(stock.symbol)
else:
#update
tickers_that_need_yql_update.append(stock.symbol)
if tickers_that_need_yql_update:
confirm = self.confirmUpdateMultipleMissingStocks()
logging.info(tickers_that_need_yql_update)
if confirm:
scrape.scrape_loop_for_missing_portfolio_stocks(ticker_list = tickers_that_need_yql_update, update_regardless_of_recent_updates = True)
utils.update_all_dynamic_grids()
logging.info("Not sure if necessary, but saving here after update.")
db.save_GLOBAL_STOCK_DICT()
def changeTabName(self, event, name=None):
old_name = self.portfolio_obj.name
if not name:
rename_popup = wx.TextEntryDialog(None,
"What would you like to call this portfolio?",
"Rename tab",
str(self.name)
)
rename_popup.ShowModal()
new_name = str(rename_popup.GetValue())
rename_popup.Destroy()
else:
new_name = name
portfolio_name_list = [obj.name for key, obj in config.PORTFOLIO_OBJECTS_DICT.items()]
new_portfolio_names = []
if new_name != old_name:
logging.info(new_name)
logging.info(portfolio_name_list)
if new_name not in portfolio_name_list:
self.name = new_name
self.portfolio_obj.name = new_name
logging.info("This file opening needs to be removed.")
# password = ""
# if config.ENCRYPTION_POSSIBLE:
# password = self.get_password()
db.save_portfolio_object(self.portfolio_obj)
confirm = wx.MessageDialog(self,
"This portfolio's name has been changed. The change will be applied the next time you launch this program.",
'Restart Required',
style = wx.ICON_EXCLAMATION
)
confirm.ShowModal()
confirm.Destroy()
else:
error = wx.MessageDialog(self,
'Each portfolio must have a unique name.',
'Name Error',
style = wx.ICON_ERROR
)
error.ShowModal()
error.Destroy()
else:
logging.info("portfolio name not changed")
def confirmDeleteAccount(self, event):
confirm = wx.MessageDialog(None,
"You are about to delete your current account data. Are you sure you want to delete this data?",
'Delete Portfolio Data?',
wx.YES_NO
)
confirm.SetYesNoLabels(("&Delete"), ("&Cancel"))
yesNoAnswer = confirm.ShowModal()
confirm.Destroy()
if yesNoAnswer == wx.ID_YES:
self.deleteAccountList()
def deleteAccountList(self):
'''delete account'''
portfolio_account_notebook = config.GLOBAL_PAGES_DICT.get(config.PORTFOLIO_PAGE_UNIQUE_ID).obj.portfolio_account_notebook
portfolio_account_notebook.SetPageText(self.portfolio_id - 1, " ")
deleted = db.delete_portfolio_object(self.portfolio_id) #, password = password)
if not deleted:
logging.info("Something weird is going on with deleting a portfolio.")
if deleted:
confirm = wx.MessageDialog(self,
"Portfolio Deleted.",
'Portfolio Deleted',
style = wx.ICON_EXCLAMATION
)
confirm.ShowModal()
confirm.Destroy()
if self.current_account_spreadsheet:
self.current_account_spreadsheet.Destroy()
self.add_button.Hide()
self.add_button.Show()
self.add_button.Hide() # seems to still appear after deletion
self.drop_down.Hide()
self.delete_button.Hide()
self.rename_button.Hide()
self.add_a_portfolio_button.Hide()
self.ticker_input.Hide()
self.share_input.Hide()
self.cost_basis_input.Hide()
self.update_button.Hide()
self.update_prices_button.Hide()
self.remove_data_button.Hide()
text = wx.StaticText(self, -1,
"This tab will disappear on restart",
(wx.ALIGN_CENTRE_HORIZONTAL, 10)
)
return
###
class ViewDataPage(Tab):
def __init__(self, parent):
self.title = "View Data"
self.uid = config.VIEW_DATA_PAGE_UNIQUE_ID
wx.Panel.__init__(self, parent)
####
view_data_page_panel = wx.Panel(self, -1, pos=(0,5), size=( wx.EXPAND, wx.EXPAND))
view_data_notebook = wx.Notebook(view_data_page_panel)
self.all_stocks_page = AllStocksPage(view_data_notebook)
view_data_notebook.AddPage(self.all_stocks_page, self.all_stocks_page.title)
self.stock_data_page = StockDataPage(view_data_notebook)
view_data_notebook.AddPage(self.stock_data_page, self.stock_data_page.title)
self.data_field_page = DataFieldPage(view_data_notebook)
view_data_notebook.AddPage(self.data_field_page, self.data_field_page.title)
sizer2 = wx.BoxSizer()
sizer2.Add(view_data_notebook, 1, wx.EXPAND)
self.SetSizer(sizer2)
####
class AllStocksPage(Tab):
def __init__(self, parent):
self.title = "View All Stocks"
self.uid = config.ALL_STOCKS_PAGE_UNIQUE_ID
self.parent = parent
wx.Panel.__init__(self, parent)
text = wx.StaticText(self, -1,
"Full Stock List",
gui_position.AllStocksPage.text
)
self.spreadsheet = None
refresh_button = wx.Button(self, label="refresh", pos=gui_position.AllStocksPage.refresh_button, size=(-1,-1))
refresh_button.Bind(wx.EVT_BUTTON, self.spreadSheetFillAllStocks, refresh_button)
reset_attribute_button = wx.Button(self, label="reset attributes displayed", pos=gui_position.AllStocksPage.reset_attribute_button, size=(-1,-1))
reset_attribute_button.Bind(wx.EVT_BUTTON, self.resetGlobalAttributeSet, reset_attribute_button)
self.first_spread_sheet_load = True
# commented out below to speed up testing
#self.spreadSheetFillAllStocks("event")
logging.info("AllStocksPage loaded")
def spreadSheetFillAllStocks(self, event):
if self.first_spread_sheet_load:
self.first_spread_sheet_load = False
else:
try:
self.spreadsheet.Destroy()
except Exception as exception:
logging.info(exception)
# Find all attribute names
stock_list = utils.return_all_stocks()
#You need this code to resize
size = gui_position.full_spreadsheet_size_position_tuple[0]
try:
width, height = wx.Window.GetClientSize(config.GLOBAL_PAGES_DICT.get(config.MAIN_FRAME_UNIQUE_ID))
#logging.info("{}, {}".format(width, height))
size = (width-20, height-128) # find the difference between the Frame and the grid size
except Exception as e:
logging.error(e)
self.sizer = None
self.inner_sizer = None
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.inner_sizer = wx.BoxSizer(wx.VERTICAL)
self.inner_sizer.AddSpacer(gui_position.full_spreadsheet_size_position_tuple[1][1])
new_grid = create_megagrid_from_stock_list(stock_list, self, size = size)
self.inner_sizer.Add(new_grid, 1, wx.ALL|wx.EXPAND)
self.SetSizer(self.inner_sizer)
self.sizer.Add(self, 1, wx.EXPAND|wx.ALL)
##
self.spreadsheet = new_grid
self.spreadsheet.Show()
def resetGlobalAttributeSet(self, event):
'adds SHARED attribute to GLOBAL_ATTRIBUTE_SET'
db.reset_GLOBAL_ATTRIBUTE_SET()
class StockDataPage(Tab):
def __init__(self, parent):
self.title = "View One Stock"
self.uid = config.STOCK_DATA_PAGE_UNIQUE_ID
self.parent = parent
wx.Panel.__init__(self, parent)
text = wx.StaticText(self, -1,
"Data for stock:",
gui_position.StockDataPage.text
)
self.ticker_input = wx.TextCtrl(self, -1,
"",
gui_position.StockDataPage.ticker_input,
style=wx.TE_PROCESS_ENTER
)
self.ticker_input.Bind(wx.EVT_TEXT_ENTER, self.createOneStockSpreadSheet)
self.ticker_input.SetHint("ticker")
look_up_button = wx.Button(self,
label="look up",
pos=gui_position.StockDataPage.look_up_button,
size=(-1,-1)
)
look_up_button.Bind(wx.EVT_BUTTON, self.createOneStockSpreadSheet, look_up_button)
self.search_data = wx.TextCtrl(self, -1,
"",
gui_position.StockDataPage.search_data,
style=wx.TE_PROCESS_ENTER
)
self.search_data.SetHint("search data")
self.search_data.Bind(wx.EVT_KEY_UP, self.searchData)
self.search_button = wx.Button(self,
label="search",
pos=gui_position.StockDataPage.search_button,
size=(-1,-1)
)
self.search_button.Bind(wx.EVT_BUTTON, self.searchData, self.search_button)
self.update_yql_basic_data_button = wx.Button(self,
label="update basic data",
pos=gui_position.StockDataPage.update_yql_basic_data_button,
size=(-1,-1)
)
self.update_yql_basic_data_button.Bind(wx.EVT_BUTTON, self.update_yql_basic_data, self.update_yql_basic_data_button)
self.update_yql_basic_data_button.Hide()
#update_annual_data_button = wx.Button(self,
# label="update annual data",
# pos=(430,5),
# size=(-1,-1)
# )
#update_analyst_estimates_button = wx.Button(self,
# label="update analyst estimates",
# pos=(570,5),
# size=(-1,-1)
# )
self.update_additional_data_button = wx.Button(self,
label="update additional data",
pos=gui_position.StockDataPage.update_additional_data_button,
size=(-1,-1)
)
self.update_additional_data_button.Bind(wx.EVT_BUTTON, self.updateAdditionalDataForOneStock, self.update_additional_data_button)
self.update_additional_data_button.Hide()
self.current_ticker_viewed = None
self.current_search_term = None
#update_annual_data_button.Bind(wx.EVT_BUTTON, self.update_annual_data, update_annual_data_button)
#update_analyst_estimates_button.Bind(wx.EVT_BUTTON, self.update_analyst_estimates_data, update_analyst_estimates_button)
logging.info("StockDataPage loaded")
def searchData(self, event, search_term = None):
current_ticker_viewed = self.current_ticker_viewed
if not current_ticker_viewed:
return
if not search_term:
search_term = self.search_data.GetValue() # if loading via text input
self.current_search_term = search_term
self.createOneStockSpreadSheet("event", current_ticker_viewed = current_ticker_viewed, search_term = search_term)
def updateAdditionalDataForOneStock(self, event):
ticker = self.ticker_input.GetValue()
if str(ticker) == "ticker" or not ticker:
return
scrape.scrape_all_additional_data_prep([ticker])
self.createOneStockSpreadSheet("event")
def createOneStockSpreadSheet(self, event, current_ticker_viewed = None, search_term = None):
if not current_ticker_viewed:
ticker = self.ticker_input.GetValue() # if loading via text input
else:
ticker = current_ticker_viewed # if reloading spreadsheet
if str(ticker) == "ticker" or not ticker:
return
#You need this code to resize
size = gui_position.full_spreadsheet_size_position_tuple[0]
try:
width, height = wx.Window.GetClientSize(config.GLOBAL_PAGES_DICT.get(config.MAIN_FRAME_UNIQUE_ID))
#logging.info("{}, {}".format(width, height))
size = (width-20, height-128) # find the difference between the Frame and the grid size
except Exception as e:
logging.error(e)
self.sizer = None
self.inner_sizer = None
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.inner_sizer = wx.BoxSizer(wx.VERTICAL)
self.inner_sizer.AddSpacer(60)
new_grid = create_spread_sheet_for_one_stock(self, str(ticker).upper(), size = size, search_term = search_term)
self.inner_sizer.Add(new_grid, 1, wx.ALL|wx.EXPAND)
self.SetSizer(self.inner_sizer)
self.sizer.Add(self, 1, wx.EXPAND|wx.ALL)
##
self.screen_grid = new_grid
self.current_ticker_viewed = ticker.upper()
self.update_yql_basic_data_button.Show()
self.update_additional_data_button.Show()
def update_yql_basic_data(self, event):
ticker = self.ticker_input.GetValue()
if str(ticker) == "ticker":
return
logging.info("basic yql scrape")
chunk_list_and_percent_of_full_scrape_done_and_number_of_tickers_to_scrape = scrape.prepareYqlScrape([str(ticker).upper()])
chunk_list = chunk_list_and_percent_of_full_scrape_done_and_number_of_tickers_to_scrape[0]
data = scrape.executeYqlScrapePartOne(chunk_list, 0)
scrape.executeYqlScrapePartTwo(chunk_list, 0, data)
self.createOneStockSpreadSheet(event = "")
def update_annual_data(self, event):
ticker = self.ticker_input.GetValue()
if str(ticker) == "ticker":
return
logging.info("scraping yahoo and morningstar annual data, you'll need to keep an eye on the terminal until this finishes.")
scrape_balance_sheet_income_statement_and_cash_flow( [str(ticker).upper()] )
self.createOneStockSpreadSheet(event = "")
def update_analyst_estimates_data(self, event):
ticker = self.ticker_input.GetValue()
if str(ticker) == "ticker":
return
logging.info("about to scrape")
scrape_analyst_estimates( [str(ticker).upper()] )
self.createOneStockSpreadSheet(event = "")
class DataFieldPage(Tab):
def __init__(self, parent):
self.title = "View Data Types"
self.uid = config.DATA_FIELD_PAGE_UNIQUE_ID
self.parent = parent
wx.Panel.__init__(self, parent)
text = wx.StaticText(self, -1,
"Data types:",
gui_position.DataFieldPage.text
)
button = wx.Button(self,
label="look up",
pos=gui_position.DataFieldPage.button,
size=(-1,-1)
)
button.Bind(wx.EVT_BUTTON, self.createDataSpreadSheet, button)
logging.info("DataFieldPage loaded")
def resetGlobalAttributeSet(self, event):
'adds SHARED attribute to GLOBAL_ATTRIBUTE_SET'
db.reset_GLOBAL_ATTRIBUTE_SET()
def createDataSpreadSheet(self, event):
#You need this code to resize
size = gui_position.full_spreadsheet_size_position_tuple[0]
try:
width, height = wx.Window.GetClientSize(config.GLOBAL_PAGES_DICT.get(config.MAIN_FRAME_UNIQUE_ID))
#logging.info("{}, {}".format(width, height))
size = (width-20, height-128) # find the difference between the Frame and the grid size
except Exception as e:
logging.error(e)
self.sizer = None
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.AddSpacer(60)
new_grid = create_spread_sheet_for_data_fields(self)
self.screen_grid = new_grid
self.sizer.Add(new_grid, 1, wx.EXPAND)
##
####
#####
class AnalysisPage(Tab):
def __init__(self, parent):
self.title = "Analyse Data"
self.uid = config.ANALYSE_PAGE_UNIQUE_ID
wx.Panel.__init__(self, parent)
####
analyse_panel = wx.Panel(self, -1, pos=(0,5), size=( wx.EXPAND, wx.EXPAND))
analyse_notebook = wx.Notebook(analyse_panel)
self.screen_page = ScreenPage(analyse_notebook)
analyse_notebook.AddPage(self.screen_page, self.screen_page.title)
self.saved_screen_page = SavedScreenPage(analyse_notebook)
analyse_notebook.AddPage(self.saved_screen_page, self.saved_screen_page.title)
self.rank_page = RankPage(analyse_notebook)
analyse_notebook.AddPage(self.rank_page, self.rank_page.title)
self.custom_analyse_meta_page = CustomAnalysisMetaPage(analyse_notebook)
analyse_notebook.AddPage(self.custom_analyse_meta_page, self.custom_analyse_meta_page.title)
sizer2 = wx.BoxSizer()
sizer2.Add(analyse_notebook, 1, wx.EXPAND)
self.SetSizer(sizer2)
class ScreenPage(Tab):
def __init__(self, parent):
self.title = "Screen"
self.uid = config.SCREEN_PAGE_UNIQUE_ID
self.parent = parent
wx.Panel.__init__(self, parent)
text = wx.StaticText(self, -1,
"Screen Stocks",
gui_position.ScreenPage.text
)
screen_button = wx.Button(self, label="screen", pos=gui_position.ScreenPage.screen_button, size=(-1,-1))
screen_button.Bind(wx.EVT_BUTTON, self.screenStocks, screen_button)
self.screen_name_list = meta.return_screen_function_short_names()
self.drop_down = wx.ComboBox(self, pos=gui_position.ScreenPage.drop_down, choices=self.screen_name_list)
self.triple_list = meta.return_screen_function_triple()
self.save_screen_button = wx.Button(self, label="save", pos=gui_position.ScreenPage.save_screen_button, size=(-1,-1))
self.save_screen_button.Bind(wx.EVT_BUTTON, self.saveScreen, self.save_screen_button)
self.save_screen_button.Hide()
self.screen_grid = None
self.first_spread_sheet_load = True
self.ticker_col = 0
logging.info("ScreenPage loaded")
def screenStocks(self, event):
screen_name = self.drop_down.GetValue()
# Identify the function mapped to screen name
for triple in self.triple_list:
if screen_name == triple.doc:
screen_function = triple.function
# in case doc string is too many characters...
elif screen_name == triple.name:
screen_function = triple.function
if not screen_function:
logging.error("Error, somthing went wrong locating the correct screen to use.")
# run screen
conforming_stocks = []
for ticker in sorted(config.GLOBAL_STOCK_DICT.keys()):
stock = config.GLOBAL_STOCK_DICT.get(ticker)
result = screen_function(stock)
if result is True:
conforming_stocks.append(stock)
config.CURRENT_SCREEN_LIST = conforming_stocks
self.createSpreadsheet(conforming_stocks)
self.save_screen_button.Show()
def createSpreadsheet(self, stock_list):
if self.first_spread_sheet_load:
self.first_spread_sheet_load = False
else:
try:
self.screen_grid.Destroy()
except Exception as e:
logging.error(e)
stock_list.sort(key = lambda x: (x is None, x.symbol))
#You need this code to resize
size = gui_position.full_spreadsheet_size_position_tuple[0]
try:
width, height = wx.Window.GetClientSize(config.GLOBAL_PAGES_DICT.get(config.MAIN_FRAME_UNIQUE_ID))
#logging.info("{}, {}".format(width, height))
spreadsheet_width_height_offset = gui_position.ScreenPage.spreadsheet_width_height_offset
size = (width-spreadsheet_width_height_offset[0], height-spreadsheet_width_height_offset[1]) # find the difference between the Frame and the grid size
except Exception as e:
logging.error(e)
self.sizer = None
self.inner_sizer = None
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.inner_sizer = wx.BoxSizer(wx.VERTICAL)
self.inner_sizer.AddSpacer(gui_position.full_spreadsheet_size_position_tuple[1][1])
new_grid = create_megagrid_from_stock_list(stock_list, self, size = size)
self.inner_sizer.Add(new_grid, 1, wx.ALL|wx.EXPAND)
self.SetSizer(self.inner_sizer)
self.sizer.Add(self, 1, wx.EXPAND|wx.ALL)
##
self.screen_grid = new_grid
self.screen_grid.Bind(wx.grid.EVT_GRID_CELL_LEFT_DCLICK, lambda event: self.addStockToResearchPage(event), self.screen_grid)
def saveScreen(self, event):
current_screen_name_displayed = self.drop_down.GetValue()
current_screen_dict = db.root.GLOBAL_STOCK_SCREEN_DICT
screen_name_tuple_list = config.SCREEN_NAME_AND_TIME_CREATED_TUPLE_LIST
existing_screen_names = [i[0] for i in screen_name_tuple_list]
if not current_screen_dict:
db.load_GLOBAL_STOCK_SCREEN_DICT()
current_screen_dict = db.root.GLOBAL_STOCK_SCREEN_DICT
# current_screen_dict must at least be {}
save_popup = wx.TextEntryDialog(None,
"What would you like to name this group?",
"Save Screen",
"{screen} saved on {time}".format(screen = current_screen_name_displayed, time = str(time.strftime("%m-%d %I:%M%p")))
)
if save_popup.ShowModal() == wx.ID_OK:
saved_screen_name = str(save_popup.GetValue())
# files save with replace(" ", "_") so you need to check if any permutation of this patter already exists.
if saved_screen_name in existing_screen_names or saved_screen_name.replace(" ", "_") in existing_screen_names or saved_screen_name.replace("_", " ") in existing_screen_names:
save_popup.Destroy()
error = wx.MessageDialog(self,
'Each saved screen must have a unique name. Would you like to try saving again with a different name.',
'Error: Name already exists',
style = wx.YES_NO
)
error.SetYesNoLabels(("&Rename"), ("&Don't Save"))
yesNoAnswer = error.ShowModal()
error.Destroy()
if yesNoAnswer == wx.ID_YES:
# Recursion
logging.info("Recursion event in saveScreen")
self.saveScreen(event="event")
return
else:
config.SCREEN_NAME_AND_TIME_CREATED_TUPLE_LIST.append([saved_screen_name, float(time.time())])
# Save global dict
db.save_GLOBAL_STOCK_STREEN_DICT()
# Save screen name results
db.save_named_screen(saved_screen_name, config.CURRENT_SCREEN_LIST)
# Save SCREEN_NAME_AND_TIME_CREATED_TUPLE_LIST
db.save_SCREEN_NAME_AND_TIME_CREATED_TUPLE_LIST()
utils.update_all_screen_dropdowns_after_saving_a_new_screen()
self.save_screen_button.Hide()
save_popup.Destroy()
return
def addStockToResearchPage(self, event):
row = event.GetRow()
col = event.GetCol()
if int(col) == self.ticker_col:
ticker = self.screen_grid.GetCellValue(row, col)
utils.add_ticker_to_research_page(str(ticker))
class SavedScreenPage(Tab):
def __init__(self, parent):
self.title = "View Saved Screens"
self.uid = config.SAVED_SCREEN_PAGE_UNIQUE_ID
self.parent = parent
wx.Panel.__init__(self, parent)
text = wx.StaticText(self, -1,
"Saved screens",
gui_position.SavedScreenPage.text
)
refresh_screen_button = wx.Button(self, label="refresh list", pos=gui_position.SavedScreenPage.refresh_screen_button, size=(-1,-1))
refresh_screen_button.Bind(wx.EVT_BUTTON, self.refreshScreens, refresh_screen_button)
load_screen_button = wx.Button(self, label="load screen", pos=gui_position.SavedScreenPage.load_screen_button, size=(-1,-1))
load_screen_button.Bind(wx.EVT_BUTTON, self.loadScreen, load_screen_button)
self.existing_screen_name_list = []
if config.SCREEN_NAME_AND_TIME_CREATED_TUPLE_LIST:
self.existing_screen_name_list = [i[0] for i in reversed(config.SCREEN_NAME_AND_TIME_CREATED_TUPLE_LIST)]
self.drop_down = wx.ComboBox(self, value="",
pos=gui_position.SavedScreenPage.drop_down,
choices=self.existing_screen_name_list,
style = wx.TE_READONLY
)
self.currently_viewed_screen = None
self.delete_screen_button = wx.Button(self, label="delete", pos=gui_position.SavedScreenPage.delete_screen_button, size=(-1,-1))
self.delete_screen_button.Bind(wx.EVT_BUTTON, self.deleteScreen, self.delete_screen_button)
self.delete_screen_button.Hide()
self.first_spread_sheet_load = True
self.spreadsheet = None
self.ticker_col = 0
logging.info("SavedScreenPage loaded")
def deleteScreen(self, event):
confirm = wx.MessageDialog(None,
"You are about to delete this screen.",
'Are you sure?',
wx.YES_NO
)
confirm.SetYesNoLabels(("&Delete"), ("&Cancel"))
yesNoAnswer = confirm.ShowModal()
confirm.Destroy()
logging.info(self.spreadsheet)
if yesNoAnswer != wx.ID_YES:
return
try:
logging.info(self.currently_viewed_screen)
db.delete_named_screen(self.currently_viewed_screen)
self.spreadsheet.Destroy()
self.currently_viewed_screen = None
except Exception as exception:
logging.error(exception)
error = wx.MessageDialog(self,
"Something went wrong. File was not deleted, because this file doesn't seem to exist.",
'Error: File Does Not Exist',
style = wx.ICON_ERROR
)
error.ShowModal()
error.Destroy()
return
self.refreshScreens('event')
def refreshScreens(self, event):
self.drop_down.Hide()
self.drop_down.Destroy()
# Why did i put this here? Leave a note next time moron...
time.sleep(2)
self.existing_screen_name_list = [i[0] for i in reversed(config.SCREEN_NAME_AND_TIME_CREATED_TUPLE_LIST)]
self.drop_down = wx.ComboBox(self,
pos=(305, 6),
choices=self.existing_screen_name_list,
style = wx.TE_READONLY
)
def loadScreen(self, event):
selected_screen_name = self.drop_down.GetStringSelection()
try:
config.CURRENT_SAVED_SCREEN_LIST = db.load_named_screen(selected_screen_name)
except Exception as exception:
logging.error(exception)
error = wx.MessageDialog(self,
"Something went wrong. This file doesn't seem to exist.",
'Error: File Does Not Exist',
style = wx.ICON_ERROR
)
error.ShowModal()
error.Destroy()
return
self.currently_viewed_screen = selected_screen_name
if self.first_spread_sheet_load:
self.first_spread_sheet_load = False
else:
if self.spreadsheet:
try:
self.spreadsheet.Destroy()
except Exception as exception:
logging.error(exception)
self.spreadSheetFill()
def spreadSheetFill(self):
#You need this code to resize
size = gui_position.full_spreadsheet_size_position_tuple[0]
try:
width, height = wx.Window.GetClientSize(config.GLOBAL_PAGES_DICT.get(config.MAIN_FRAME_UNIQUE_ID))
spreadsheet_width_height_offset = gui_position.SavedScreenPage.spreadsheet_width_height_offset
size = (width-spreadsheet_width_height_offset[0], height-spreadsheet_width_height_offset[1]) # find the difference between the Frame and the grid size
except Exception as e:
logging.error(e)
self.sizer = None
self.inner_sizer = None
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.inner_sizer = wx.BoxSizer(wx.VERTICAL)
self.inner_sizer.AddSpacer(gui_position.full_spreadsheet_size_position_tuple[1][1])
new_grid = create_megagrid_from_stock_list(config.CURRENT_SAVED_SCREEN_LIST, self, size = size)
self.inner_sizer.Add(new_grid, 1, wx.ALL|wx.EXPAND)
self.SetSizer(self.inner_sizer)
self.sizer.Add(self, 1, wx.EXPAND|wx.ALL)
##
self.spreadsheet = new_grid
self.spreadsheet.Bind(wx.grid.EVT_GRID_CELL_LEFT_DCLICK, lambda event: self.addStockToResearchPage(event), self.spreadsheet)
self.spreadsheet.Show()
self.delete_screen_button.Show()
def addStockToResearchPage(self, event):
row = event.GetRow()
col = event.GetCol()
if int(col) == self.ticker_col:
ticker = self.spreadsheet.GetCellValue(row, col)
utils.add_ticker_to_research_page(str(ticker))
class RankPage(Tab):
def __init__(self, parent):
self.title = "Rank"
self.uid = config.RANK_PAGE_UNIQUE_ID
self.parent = parent
wx.Panel.__init__(self, parent)
self.full_ticker_list = [] # this should hold all tickers in any spreadsheet displayed
self.held_ticker_list = [] # not sure if this is relevant anymore
self.full_attribute_list = list(config.GLOBAL_ATTRIBUTE_SET)
rank_page_text = wx.StaticText(self, -1,
self.title,
gui_position.RankPage.rank_page_text
)
refresh_screen_button = wx.Button(self, label="refresh", pos=gui_position.RankPage.refresh_screen_button, size=(-1,-1))
refresh_screen_button.Bind(wx.EVT_BUTTON, self.refreshScreens, refresh_screen_button)
load_screen_button = wx.Button(self, label="add screen", pos=gui_position.RankPage.load_screen_button, size=(-1,-1))
load_screen_button.Bind(wx.EVT_BUTTON, self.loadScreen, load_screen_button)
load_portfolio_button = wx.Button(self, label="add account", pos=gui_position.RankPage.load_portfolio_button, size=(-1,-1))
load_portfolio_button.Bind(wx.EVT_BUTTON, self.loadAccount, load_portfolio_button)
update_additional_data_button = wx.Button(self, label="update additional data", pos=gui_position.RankPage.update_additional_data_button, size=(-1,-1))
update_additional_data_button.Bind(wx.EVT_BUTTON, self.updateAdditionalData, update_additional_data_button)
#update_annual_data_button = wx.Button(self, label="update annual data", pos=(5,5), size=(-1,-1))
#update_annual_data_button.Bind(wx.EVT_BUTTON, self.updateAnnualData, update_annual_data_button)
#update_analyst_estimates_button = wx.Button(self, label="update analysts estimates", pos=(5,30), size=(-1,-1))
#update_analyst_estimates_button.Bind(wx.EVT_BUTTON, self.updateAnalystEstimates, update_analyst_estimates_button)
self.existing_screen_name_list = []
if config.SCREEN_NAME_AND_TIME_CREATED_TUPLE_LIST:
self.existing_screen_name_list = [i[0] for i in reversed(config.SCREEN_NAME_AND_TIME_CREATED_TUPLE_LIST)] # add conditional to remove old screens
self.drop_down = wx.ComboBox(self,
pos=gui_position.RankPage.drop_down,
choices=self.existing_screen_name_list,
style = wx.TE_READONLY
)
self.portfolio_name_tuple_list = []
for key, portfolio_obj in config.PORTFOLIO_OBJECTS_DICT.items():
tuple_to_append = [portfolio_obj.name, portfolio_obj.id_number]
self.portfolio_name_tuple_list.append(tuple_to_append)
self.accounts_drop_down = wx.ComboBox(self,
pos=gui_position.RankPage.accounts_drop_down,
choices = [obj.name for obj in sorted(config.PORTFOLIO_OBJECTS_DICT.values(), key=lambda x: x.id_number)],
style = wx.TE_READONLY
)
self.currently_viewed_screen = None
self.clear_button = wx.Button(self, label="clear", pos=gui_position.RankPage.clear_button, size=(-1,-1))
self.clear_button.Bind(wx.EVT_BUTTON, self.clearGrid, self.clear_button)
self.clear_button.Hide()
self.sort_button = wx.Button(self, label="Sort by:", pos=gui_position.RankPage.sort_button, size=(-1,-1))
self.sort_button.Bind(wx.EVT_BUTTON, self.sortStocks, self.sort_button)
sort_drop_down_width = -1
if [attribute for attribute in config.GLOBAL_ATTRIBUTE_SET if (len(str(attribute)) > 50)]:
sort_drop_down_width = 480
self.sort_drop_down = wx.ComboBox(self,
pos=gui_position.RankPage.sort_drop_down,
choices=self.full_attribute_list,
style = wx.TE_READONLY,
size = (sort_drop_down_width, -1)
)
self.sort_button.Hide()
self.sort_drop_down.Hide()
self.rank_triple_list = meta.return_rank_function_triple()
self.rank_name_list = meta.return_rank_function_short_names()
self.rank_button = wx.Button(self, label="Rank by:", pos=gui_position.RankPage.rank_button, size=(-1,-1))
self.rank_button.Bind(wx.EVT_BUTTON, self.rankStocks, self.rank_button)
self.rank_drop_down = wx.ComboBox(self,
pos=gui_position.RankPage.rank_drop_down,
choices=self.rank_name_list,
style = wx.TE_READONLY
)
self.rank_button.Hide()
self.rank_drop_down.Hide()
self.fade_opacity = 255
self.spreadsheet = None
self.rank_name = None
self.ticker_col = 0
logging.info("RankPage loaded")
def rankStocks(self, event):
self.rank_name = self.rank_drop_down.GetValue()
# Identify the function mapped to screen name
for triple in self.rank_triple_list:
if self.rank_name == triple.doc:
rank_function = triple.function
# in case doc string is too many characters...
elif self.rank_name == triple.name:
rank_function = triple.function
if not rank_function:
logging.error("Error, somthing went wrong locating the correct screen to use.")
# run ranking funtion on all stocks
ranked_tuple_list = process_user_function.return_ranked_list_from_rank_function(config.RANK_PAGE_ALL_RELEVANT_STOCKS, rank_function)
self.createRankedSpreadsheet(ranked_tuple_list, self.rank_name)
def createUnrankedSpreadsheet(self, stock_list=None):
self.full_attribute_list = list(config.GLOBAL_ATTRIBUTE_SET)
self.sort_drop_down.Set(self.full_attribute_list)
if stock_list is None:
stock_list = config.RANK_PAGE_ALL_RELEVANT_STOCKS
if self.spreadsheet:
try:
self.spreadsheet.Destroy()
except Exception as exception:
logging.error(exception)
stock_list.sort(key = lambda x: x.symbol)
#You need this code to resize
size = gui_position.RankPage.rank_page_spreadsheet_size_position_tuple[0]
try:
width, height = wx.Window.GetClientSize(config.GLOBAL_PAGES_DICT.get(config.MAIN_FRAME_UNIQUE_ID))
size = (width-20, height-128) # find the difference between the Frame and the grid size
except Exception as e:
logging.error(e)
self.sizer = None
self.inner_sizer = None
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.inner_sizer = wx.BoxSizer(wx.VERTICAL)
self.inner_sizer.AddSpacer(gui_position.RankPage.rank_page_spreadsheet_size_position_tuple[1][1])
new_grid = create_megagrid_from_stock_list(stock_list, self, size = size, pos=gui_position.RankPage.rank_page_spreadsheet_size_position_tuple[1])
self.inner_sizer.Add(new_grid, 1, wx.ALL|wx.EXPAND)
self.SetSizer(self.inner_sizer)
self.sizer.Add(self, 1, wx.EXPAND|wx.ALL)
##
self.spreadsheet = new_grid
self.spreadsheet.Bind(wx.grid.EVT_GRID_CELL_LEFT_DCLICK, lambda event: self.addStockToResearchPage(event), self.spreadsheet)
self.clear_button.Show()
self.sort_button.Show()
self.sort_drop_down.Show()
self.rank_button.Show()
self.rank_drop_down.Show()
self.spreadsheet.Show()
def createRankedSpreadsheet(self, ranked_tuple_list, rank_name):
self.full_attribute_list = list(config.GLOBAL_ATTRIBUTE_SET)
self.sort_drop_down.Set(self.full_attribute_list)
#You need this code to resize
size = gui_position.RankPage.rank_page_spreadsheet_size_position_tuple[0]
try:
width, height = wx.Window.GetClientSize(config.GLOBAL_PAGES_DICT.get(config.MAIN_FRAME_UNIQUE_ID))
#logging.info("{}, {}".format(width, height))
size = (width-20, height-128) # find the difference between the Frame and the grid size
except Exception as e:
logging.error(e)
self.sizer = None
self.inner_sizer = None
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.inner_sizer = wx.BoxSizer(wx.VERTICAL)
self.inner_sizer.AddSpacer(gui_position.RankPage.rank_page_spreadsheet_size_position_tuple[1][1])
new_grid = create_ranked_megagrid_from_tuple_list(ranked_tuple_list, self, rank_name, size = size)
self.inner_sizer.Add(new_grid, 1, wx.ALL|wx.EXPAND)
self.SetSizer(self.inner_sizer)
self.sizer.Add(self, 1, wx.EXPAND|wx.ALL)
##
self.spreadsheet = new_grid
self.spreadsheet.Bind(wx.grid.EVT_GRID_CELL_LEFT_DCLICK, lambda event: self.addStockToResearchPage(event), self.spreadsheet)
######################################
self.clear_button.Show()
self.sort_button.Show()
self.sort_drop_down.Show()
self.rank_button.Show()
self.rank_drop_down.Show()
logging.info("rankStocks done!")
self.spreadsheet.Show()
def updateAdditionalData(self, event):
ticker_list = []
for stock in config.RANK_PAGE_ALL_RELEVANT_STOCKS:
ticker_list.append(stock.symbol)
scrape.scrape_all_additional_data_prep(ticker_list)
self.createSpreadSheet(stock_list = config.RANK_PAGE_ALL_RELEVANT_STOCKS)
def clearGrid(self, event):
confirm = wx.MessageDialog(None,
"You are about to clear this grid.",
'Are you sure?',
wx.YES_NO
)
confirm.SetYesNoLabels(("&Clear"), ("&Cancel"))
yesNoAnswer = confirm.ShowModal()
confirm.Destroy()
if yesNoAnswer != wx.ID_YES:
return
logging.info("Clearing the Grid")
config.RANK_PAGE_ALL_RELEVANT_STOCKS = []
self.full_attribute_list = []
self.relevant_attribute_list = []
self.full_ticker_list = []
self.held_ticker_list = []
self.createUnrankedSpreadsheet()
self.clear_button.Hide()
self.sort_button.Hide()
self.sort_drop_down.Hide()
def refreshScreens(self, event):
self.drop_down.Hide()
self.drop_down.Destroy()
self.accounts_drop_down.Hide()
self.accounts_drop_down.Destroy()
time.sleep(2)
db.load_SCREEN_NAME_AND_TIME_CREATED_TUPLE_LIST()
self.existing_screen_name_list = []
if config.SCREEN_NAME_AND_TIME_CREATED_TUPLE_LIST:
self.existing_screen_name_list = [i[0] for i in reversed(config.SCREEN_NAME_AND_TIME_CREATED_TUPLE_LIST)]
self.drop_down = wx.ComboBox(self,
pos=(305, 6),
choices=self.existing_screen_name_list,
style = wx.TE_READONLY
)
self.portfolio_name_tuple_list = []
for key, portfolio_obj in config.PORTFOLIO_OBJECTS_DICT.items():
tuple_to_append = [portfolio_obj.name, portfolio_obj.id_number]
self.portfolio_name_tuple_list.append(tuple_to_append)
logging.info(self.portfolio_name_tuple_list)
self.accounts_drop_down = wx.ComboBox(self,
pos=(305, 31),
choices = [obj.name for obj in sorted(config.PORTFOLIO_OBJECTS_DICT.values(), key=lambda x: x.id_number)],
style = wx.TE_READONLY
)
def loadScreen(self, event):
selected_screen_name = self.drop_down.GetValue()
try:
saved_screen = db.load_named_screen(selected_screen_name)
except Exception as exception:
logging.error(exception)
error = wx.MessageDialog(self,
"Something went wrong. This file doesn't seem to exist.",
'Error: File Does Not Exist',
style = wx.ICON_ERROR
)
error.ShowModal()
error.Destroy()
return
self.currently_viewed_screen = selected_screen_name
possibly_remove_dead_stocks = []
for stock in saved_screen:
if not stock:
possibly_remove_dead_stocks.append(stock)
logging.info("One of the stocks in this screen does not appear to exist.")
elif stock not in config.RANK_PAGE_ALL_RELEVANT_STOCKS:
config.RANK_PAGE_ALL_RELEVANT_STOCKS.append(stock)
else:
logging.info("{} skipped".format(stock.symbol))
self.createUnrankedSpreadsheet()
def loadAccount(self, event):
selected_account_name = self.accounts_drop_down.GetValue()
tuple_not_found = True
for this_tuple in self.portfolio_name_tuple_list:
if selected_account_name == this_tuple[0]:
tuple_not_found = False
try:
saved_account = db.load_portfolio_object(id_number = this_tuple[1])
except Exception as exception:
logging.error(exception)
error = wx.MessageDialog(self,
"Something went wrong. This file doesn't seem to exist.",
'Error: File Does Not Exist',
style = wx.ICON_ERROR
)
error.ShowModal()
error.Destroy()
return
if tuple_not_found:
error = wx.MessageDialog(self,
"Something went wrong. This data doesn't seem to exist.",
'Error: Data Does Not Exist',
style = wx.ICON_ERROR
)
error.ShowModal()
error.Destroy()
return
for ticker in saved_account.stock_shares_dict:
stock = utils.return_stock_by_symbol(ticker)
if stock not in config.RANK_PAGE_ALL_RELEVANT_STOCKS:
config.RANK_PAGE_ALL_RELEVANT_STOCKS.append(stock)
if str(stock.symbol) not in self.held_ticker_list:
self.held_ticker_list.append(str(stock.symbol))
if str(stock.symbol) not in self.full_ticker_list:
self.full_ticker_list.append(str(stock.symbol))
self.createUnrankedSpreadsheet()
def sortStocks(self, event):
sort_field = self.sort_drop_down.GetValue()
do_not_sort_reversed = config.RANK_PAGE_ATTRIBUTES_THAT_DO_NOT_SORT_REVERSED
if sort_field in do_not_sort_reversed:
reverse_var = False
else:
reverse_var = True
num_stock_value_list = []
str_stock_value_list = []
incompatible_stock_list = []
for stock in config.RANK_PAGE_ALL_RELEVANT_STOCKS:
try:
val = getattr(stock, sort_field)
try:
float_val = float(val.replace("%",""))
rank_tuple = Ranked_Tuple_Reference(float_val, stock)
num_stock_value_list.append(rank_tuple)
except:
rank_tuple = Ranked_Tuple_Reference(val, stock)
str_stock_value_list.append(rank_tuple)
except Exception as exception:
rank_tuple = Ranked_Tuple_Reference(None, stock)
incompatible_stock_list.append(rank_tuple)
num_stock_value_list.sort(key = lambda x: x.value, reverse=reverse_var)
str_stock_value_list.sort(key = lambda x: x.value)
incompatible_stock_list.sort(key = lambda x: x.stock.symbol)
sorted_tuple_list = num_stock_value_list + str_stock_value_list + incompatible_stock_list
self.createRankedSpreadsheet(sorted_tuple_list, sort_field)
self.sort_drop_down.SetStringSelection(sort_field)
def addStockToResearchPage(self, event):
row = event.GetRow()
col = event.GetCol()
if int(col) == self.ticker_col:
ticker = self.spreadsheet.GetCellValue(row, col)
utils.add_ticker_to_research_page(str(ticker))
class CustomAnalysisMetaPage(Tab):
def __init__(self, parent):
self.title = "Custom Analysis"
self.uid = config.CUSTOM_ANALYSE_META_PAGE_UNIQUE_ID
wx.Panel.__init__(self, parent)
####
personal_analyse_panel = wx.Panel(self, -1, pos=(0,5), size=( wx.EXPAND, wx.EXPAND))
meta_analyse_notebook = wx.Notebook(personal_analyse_panel)
self.user_created_function_page_triples = meta.return_custom_analysis_function_triple()
self.user_created_function_page_triples.sort(key = lambda x: x.doc)
# logging.warning(self.user_created_function_page_triples)
for triple in self.user_created_function_page_triples:
self.this_page = CustomAnalysisPage(meta_analyse_notebook, triple, self.user_created_function_page_triples.index(triple) + 1)
doc_string = triple.doc
function_name = triple.name
# name custom analysis page
if doc_string:
self.this_page.title = doc_string
elif len(function_name) < 30:
self.this_page.title = function_name
else:
self.this_page.title = "Custom Analysis " + str(self.user_created_function_page_triples.index(triple) + 1)
meta_analyse_notebook.AddPage(self.this_page, self.this_page.title)
sizer2 = wx.BoxSizer()
sizer2.Add(meta_analyse_notebook, 1, wx.EXPAND)
self.SetSizer(sizer2)
logging.info("{} loaded".format(self.title))
class CustomAnalysisPage(Tab):
def __init__(self, parent, function_triple, page_index):
self.title = None
self.parent = parent
wx.Panel.__init__(self, parent)
self.doc_string = function_triple.doc
self.function_name = function_triple.name
self.custom_spreadsheet_builder = function_triple.function
self.page_index = page_index
self.panel_name = None
self.sizer = wx.FlexGridSizer(rows=1, cols=2, hgap = 1, vgap = 0)
self.ticker_sizer = wx.BoxSizer(wx.VERTICAL)
self.grid_sizer = wx.BoxSizer(wx.VERTICAL)
self.ticker_sizer.AddSpacer(gui_position.CustomAnalysisPage.ticker_sizer_AddSpacer)
self.grid_sizer.AddSpacer(gui_position.CustomAnalysisPage.grid_sizer_AddSpacer)
self.sizer.Add(self.ticker_sizer, 0, wx.BOTTOM|wx.EXPAND)
self.sizer.Add(self.grid_sizer, 1, wx.ALL|wx.EXPAND)
self.sizer.AddGrowableRow(0, 1)
self.sizer.SetFlexibleDirection(wx.VERTICAL)
self.sizer.AddGrowableCol(1, 1)
self.sizer.SetFlexibleDirection(wx.BOTH)
self.SetSizer(self.sizer)
if self.doc_string:
self.panel_name = self.doc_string
elif len(self.function_name) < 30:
self.panel_name = self.function_name
else:
self.panel_name = "Custom Analysis " + str(self.page_index)
self.refresh_screen_button = wx.Button(self, label="refresh", pos=gui_position.CustomAnalysisPage.refresh_screen_button, size=(-1,-1))
self.refresh_screen_button.Bind(wx.EVT_BUTTON, self.refreshScreens, self.refresh_screen_button)
self.load_screen_button = wx.Button(self, label="add screen", pos=gui_position.CustomAnalysisPage.load_screen_button, size=(-1,-1))
self.load_screen_button.Bind(wx.EVT_BUTTON, self.loadScreen, self.load_screen_button)
self.load_portfolio_button = wx.Button(self, label="add account", pos=gui_position.CustomAnalysisPage.load_portfolio_button, size=(-1,-1))
self.load_portfolio_button.Bind(wx.EVT_BUTTON, self.loadAccount, self.load_portfolio_button)
self.existing_screen_name_list = []
if config.SCREEN_NAME_AND_TIME_CREATED_TUPLE_LIST:
self.existing_screen_name_list = [i[0] for i in reversed(config.SCREEN_NAME_AND_TIME_CREATED_TUPLE_LIST)] # add conditional to remove old screens
self.screen_drop_down = wx.ComboBox(self,
pos=gui_position.CustomAnalysisPage.screen_drop_down,
choices=self.existing_screen_name_list,
style = wx.TE_READONLY
)
self.portfolio_name_tuple_list = []
for key, portfolio_obj in config.PORTFOLIO_OBJECTS_DICT.items():
tuple_to_append = [portfolio_obj.name, portfolio_obj.id_number]
self.portfolio_name_tuple_list.append(tuple_to_append)
self.accounts_drop_down = wx.ComboBox(self,
pos=gui_position.CustomAnalysisPage.accounts_drop_down,
choices = [obj.name for obj in sorted(config.PORTFOLIO_OBJECTS_DICT.values(), key=lambda x: x.id_number)],
style = wx.TE_READONLY
)
self.clear_button = wx.Button(self, label="clear", pos=gui_position.CustomAnalysisPage.clear_button, size=(-1,-1))
self.clear_button.Bind(wx.EVT_BUTTON, self.clearSpreadsheet, self.clear_button)
self.clear_button.Hide()
self.save_button = wx.Button(self, label="save", pos=gui_position.CustomAnalysisPage.save_button, size=(-1,-1))
self.save_button.Bind(wx.EVT_BUTTON, self.saveGridAs, self.save_button)
self.save_button.Hide()
self.fade_opacity = 255
self.custom_spreadsheet = None
self.ticker_input = wx.TextCtrl(self, -1,
"",
gui_position.CustomAnalysisPage.ticker_input,
style=wx.TE_PROCESS_ENTER
)
self.ticker_input.SetHint("ticker")
self.ticker_input.Bind(wx.EVT_TEXT_ENTER, self.addOneStock)
self.add_one_stock_button = wx.Button(self,
label="Add stock:",
pos=gui_position.CustomAnalysisPage.add_one_stock_button,
size=(-1,-1)
)
self.add_one_stock_button.Bind(wx.EVT_BUTTON, self.addOneStock, self.add_one_stock_button)
self.add_all_stocks_button = wx.Button(self,
label="Add all stocks",
pos= gui_position.CustomAnalysisPage.add_all_stocks_button,
size=(-1,-1)
)
self.add_all_stocks_button.Bind(wx.EVT_BUTTON, self.loadAllStocks, self.add_all_stocks_button)
self.analyse = wx.Button(self,
label="Analyse",
pos=gui_position.CustomAnalysisPage.analyse,
size=(-1,-1)
)
self.analyse.Bind(wx.EVT_BUTTON, self.loadCustomSpreadsheet, self.analyse)
self.analyse.Hide()
self.all_stocks_currently_included = []
self.ticker_display = None
self.screen_grid = None
logging.info("{} loaded".format(self.panel_name))
def addOneStock(self, event):
ticker = self.ticker_input.GetValue()
if str(ticker) == "ticker" or not ticker:
return
stock = utils.return_stock_by_symbol(ticker)
if stock is None:
logging.error("Error: Stock %s doesn't appear to exist" % ticker)
return
if stock in self.all_stocks_currently_included:
# it's already included
return
self.all_stocks_currently_included.append(stock)
self.showStocksCurrentlyUsed()
self.ticker_input.SetValue("")
def clearSpreadsheet(self, event):
self.all_stocks_currently_included = []
self.ticker_input.SetValue("")
try:
self.ticker_display.Destroy()
except Exception as e:
logging.error(e)
try:
self.screen_grid.Destroy()
except Exception as e:
logging.error(e)
self.clear_button.Hide()
self.save_button.Hide()
self.analyse.Hide()
def loadAllStocks(self, event):
self.all_stocks_currently_included = utils.return_all_stocks()
self.showStocksCurrentlyUsed(self.all_stocks_currently_included)
def showStocksCurrentlyUsed(self, stock_list = None):
if not stock_list and not self.all_stocks_currently_included:
return
try:
self.ticker_display.Destroy()
except Exception as e:
logging.info(e)
if not stock_list:
stock_list = self.all_stocks_currently_included
stock_list.sort(key = lambda x: x.symbol)
ticker_list_massive_str = ""
for stock in stock_list:
ticker_list_massive_str += stock.symbol
ticker_list_massive_str += "\n"
vertical_offset = gui_position.CustomAnalysisPage.vertical_offset
height_offset = gui_position.CustomAnalysisPage.height_offset
try:
width, height = gui_position.main_frame_size()
height = height - height_offset
except Exception as e:
logging.error(e)
self.ticker_display_horizontal_offset = gui_position.CustomAnalysisPage.ticker_display_horizontal_offset
self.ticker_display_horizontal_size = gui_position.CustomAnalysisPage.ticker_display_horizontal_size
self.ticker_display = wx.TextCtrl(self, -1,
ticker_list_massive_str,
(self.ticker_display_horizontal_offset, vertical_offset),
size = (self.ticker_display_horizontal_size, height),
style = wx.TE_READONLY | wx.TE_MULTILINE ,
)
self.ticker_sizer.Add(self.ticker_display, 1, wx.BOTTOM|wx.EXPAND)
self.ticker_display.Show()
self.analyse.Show()
self.clear_button.Show()
def refreshScreens(self, event):
self.screen_drop_down.Hide()
self.screen_drop_down.Destroy()
# Why did i put this here? Leave a note next time moron...
time.sleep(2)
self.existing_screen_name_list = [i[0] for i in reversed(config.SCREEN_NAME_AND_TIME_CREATED_TUPLE_LIST)]
self.screen_drop_down = wx.ComboBox(self,
pos=gui_position.CustomAnalysisPage.screen_drop_down,
choices=self.existing_screen_name_list,
style = wx.TE_READONLY
)
def loadScreen(self, event):
selected_screen_name = self.screen_drop_down.GetValue()
try:
screen_stock_list = db.load_named_screen(selected_screen_name)
except Exception as exception:
logging.error(exception)
error = wx.MessageDialog(self,
"Something went wrong. This file doesn't seem to exist.",
'Error: File Does Not Exist',
style = wx.ICON_ERROR
)
error.ShowModal()
error.Destroy()
return
for stock in screen_stock_list:
if stock:
if stock not in self.all_stocks_currently_included:
self.all_stocks_currently_included.append(stock)
self.showStocksCurrentlyUsed()
def loadCustomSpreadsheet(self, event):
# notes: so here, we have a 2d grid, so it seems reasonable, that a tuple would
# function quite well for position. Perhaps a sort of limiting row, where everything
# below it dumps data. Now, the x dimention is fixed, so that's easy to deal with,
# the y dimention is not fixed, so dealing with that may be tricky. Maybe use
# data types, like fixed-attribute, or custome attibute, and have custom come first
# allow iterating at the attribute terminus
if not self.all_stocks_currently_included:
return
try:
self.custom_spreadsheet.Destroy()
except Exception as e:
logging.error(e)
list_of_spreadsheet_cells = process_user_function.process_custom_analysis_spreadsheet_data(self.all_stocks_currently_included, self.custom_spreadsheet_builder)
#You need this code to resize
size = gui_position.CustomAnalysisPage.spreadsheet_size
try:
width, height = size
size = (width-gui_position.CustomAnalysisPage.spreadsheet_width_height_offset[0], height-gui_position.CustomAnalysisPage.spreadsheet_width_height_offset[1])
except Exception as e:
logging.error(e)
self.inner_inner_sizer = None
new_grid = self.create_custom_analysis_spread_sheet(list_of_spreadsheet_cells, size = size)
self.grid_sizer.Add(new_grid, 1, wx.EXPAND|wx.ALL)
##
self.custom_spreadsheet = new_grid
self.custom_spreadsheet.Show()
def create_custom_analysis_spread_sheet(self,
cell_list,
held_ticker_list = [] # not used currently
, size = gui_position.CustomAnalysisPage.spreadsheet_size
, position = gui_position.CustomAnalysisPage.spreadsheet_position
, enable_editing = False
):
if len(cell_list) > 10000:
logging.info("Creating extremely large custom analysis spreadsheet, this may take a few minutes... seriously.")
num_rows = 0
num_columns = 0
for cell in cell_list:
if cell.row > num_rows:
num_rows = cell.row
if cell.col > num_columns:
num_columns = cell.col
num_columns += 1 # check and see if it's ordinal or cardinal
num_rows += 1 # ditto
self.screen_grid = wx.grid.Grid(self, -1, size=size, pos=position)
self.screen_grid.CreateGrid(num_rows, num_columns)
self.screen_grid.EnableEditing(enable_editing)
# fill in grid
for cell in cell_list:
self.screen_grid.SetCellValue(cell.row, cell.col, str(cell.text))
# Add color if relevant
if cell.background_color is not None:
self.screen_grid.SetCellBackgroundColour(cell.row, cell.col, cell.background_color)
if cell.text_color is not None:
self.screen_grid.SetCellTextColour(cell.row, cell.col, cell.text_color)
if cell.col_title is not None:
self.screen_grid.SetColLabelValue(cell.col, cell.col_title)
if cell.row_title is not None:
self.screen_grid.SetRowLabelValue(cell.row, cell.row_title)
if cell.align_right:
self.screen_grid.SetCellAlignment(cell.row, cell.col, horiz = wx.ALIGN_RIGHT, vert = wx.ALIGN_BOTTOM)
self.screen_grid.Bind(wx.grid.EVT_GRID_CELL_LEFT_DCLICK, lambda event: self.addStockToResearchPage(event), self.screen_grid)
self.screen_grid.AutoSizeColumns()
# deal with colors and shit later, also held stocklist
self.save_button.Show()
return self.screen_grid
def addStockToResearchPage(self, event):
row = event.GetRow()
col = event.GetCol()
# is there a way to draw out which cells are ticker cells using the function provided by users? Currently unsure.
ticker = self.screen_grid.GetCellValue(row, col)
utils.add_ticker_to_research_page(str(ticker))
def loadAccount(self, event):
account_name = self.accounts_drop_down.GetValue()
portfolio_obj = utils.return_account_by_name(account_name)
for ticker in portfolio_obj.stock_shares_dict:
stock = utils.return_stock_by_symbol(ticker)
if stock:
if stock not in self.all_stocks_currently_included:
self.all_stocks_currently_included.append(stock)
self.showStocksCurrentlyUsed()
def saveGridAs(self, event):
title = self.function_name
utils.save_grid_as(wx_window = self, wx_grid=self.screen_grid, title=title)
#### Research
class ResearchPage(Tab):
def __init__(self, parent):
self.title = "Research"
self.panel_name = "Research"
self.uid = config.RESEARCH_PAGE_UNIQUE_ID
wx.Panel.__init__(self, parent)
text = wx.StaticText(self, -1,
self.title,
gui_position.ResearchPage.text
)
self.ticker_input = wx.TextCtrl(self, -1,
"",
gui_position.ResearchPage.ticker_input,
style=wx.TE_PROCESS_ENTER
)
self.ticker_input.SetHint("ticker")
self.ticker_input.Bind(wx.EVT_TEXT_ENTER, self.addStock, self.ticker_input)
self.add_stock_button = wx.Button(self,
label="add",
pos=gui_position.ResearchPage.add_stock_button,
size=(-1,-1)
)
self.add_stock_button.Bind(wx.EVT_BUTTON, self.addStock, self.add_stock_button)
self.remove_stock_button = wx.Button(self,
label="remove",
pos=gui_position.ResearchPage.remove_stock_button,
size=(-1,-1)
)
self.remove_stock_button.Bind(wx.EVT_BUTTON, self.confirmRemove, self.remove_stock_button)
self.stock_list = []
self.rows_dict = {}
self.development_sample_stocks = ['googl', 'aapl', 'msft', 'gs', 'att', 'luv']
self.bingbong = wx.Button(self,
label="Load examples for development",
pos=gui_position.ResearchPage.bingbong,
size=(-1,-1)
)
self.bingbong.Bind(wx.EVT_BUTTON, self.development_examples_load_into_page, self.bingbong)
logging.info("ResearchPage loaded")
def development_examples_load_into_page(self, event):
for ticker in self.development_sample_stocks:
utils.add_ticker_to_research_page(ticker)
def addStock(self, event, ticker=None):
if not ticker:
ticker = self.ticker_input.GetValue()
if str(ticker) == "ticker" or not ticker:
return
stock = utils.return_stock_by_symbol(ticker)
if not stock:
logging.info("Stock with symbol %s does not appear to exist" % ticker.upper())
return
self.stock_list.append(stock)
self.stock_list = utils.remove_list_duplicates(self.stock_list)
# remember to sort by symbol at the end of adding a stock!
self.stock_list.sort(key=lambda x: x.ticker)
self.generateShownStockList("event")
self.ticker_input.SetValue("")
def confirmRemove(self, event):
ticker = self.ticker_input.GetValue()
if str(ticker) == "ticker" or not ticker:
return
stock = utils.return_stock_by_symbol(ticker)
if not stock:
logging.info("Stock with symbol %s does not appear to exist" % ticker.upper())
return
confirm = wx.MessageDialog(None,
"You are about to remove %s from your research list. Are you sure you want to remove %s?" % (stock.ticker, stock.ticker),
'Remove %s' % stock.ticker,
style = wx.YES_NO
)
confirm.SetYesNoLabels(("&Remove"), ("&Cancel"))
yesNoAnswer = confirm.ShowModal()
confirm.Destroy()
if yesNoAnswer == wx.ID_YES:
self.removeStock(stock)
def removeStock(self, stock):
self.stock_list.remove(stock)
self.generateShownStockList("event")
self.ticker_input.SetValue("")
def openButtonURL(self, event, row_obj, index):
index = str(index)
button_dict = getattr(row_obj, "button_dict" + index)
url = button_dict.get("url")
lambda_function = button_dict.get("lambda_function")
# Replace ticker
try:
stock = row_obj.stock
except:
stock = None
if stock:
ticker = stock.ticker
firm_name = stock.firm_name.replace(" ", "%20")
exchange = utils.return_stocks_exchange_if_possible(stock)
url = url % {"ticker": ticker, "exchange": exchange, "firm_name": firm_name}
if lambda_function:
url = lambda_function(url)
# Replace exchange if possible
webbrowser.open(url, new=1, autoraise=True)
def openWebsiteOnButtonClick(self, event, row_obj):
url = utils.return_stocks_website_if_possible(row_obj.stock)
webbrowser.open(url, new=1, autoraise=True)
def generateGeneralResearchRow(self):
# not sure why this is still here... i don't think it's used???
row_object = ResearchPageRowDataList()
research_initial_button_vertical_offset = gui_position.ResearchPage.research_initial_button_vertical_offset
research_initial_button_horizontal_offset = gui_position.ResearchPage.research_initial_button_horizontal_offset
research_text_additional_vertical_offset = gui_position.ResearchPage.research_text_additional_vertical_offset
research_added_width = gui_position.ResearchPage.research_added_width
research_default_horizontal_offset = gui_position.ResearchPage.research_default_horizontal_offset
row_object.ticker_textctrl = wx.StaticText(self, -1,
"General:",
(research_default_horizontal_offset, research_initial_button_vertical_offset + research_text_additional_vertical_offset)
)
for button_dict in config.GENERAL_RESEARCH_PAGE_DICT_LIST:
button = None
lambda_function = None
button_index = config.GENERAL_RESEARCH_PAGE_DICT_LIST.index(button_dict)
button_width = button_dict.get("width")
if not button_width:
button_width = -1
button = wx.Button(self,
label=str(button_dict.get("button_text")),
pos=(research_initial_button_horizontal_offset + research_added_width, (button_index) + research_initial_button_vertical_offset),
size=(button_width, -1)
)
research_added_width += button.GetSize()[0]
lambda_function = button_dict.get("lambda_function")
if lambda_function:
setattr(row_object, "lambda_function", lambda_function)
button.Bind(wx.EVT_BUTTON, lambda event, row_obj = row_object, index = button_index: self.openButtonURL(event, row_obj, index), button)
button_name_for_row_object = "button" + str(button_index) + ''.join(char for char in button_dict.get("button_text") if char.isalnum())
setattr(row_object, button_name_for_row_object, button)
button_dict_name = "button_dict" + str(button_index)
setattr(row_object, button_dict_name, button_dict)
return row_object
def generateStockRow(self, index, stock=None):
row_object = ResearchPageRowDataList()
website_button_horizontal_offset = gui_position.ResearchPage.website_button_horizontal_offset
stock_initial_button_horizontal_offset = gui_position.ResearchPage.stock_initial_button_horizontal_offset
stock_initial_button_vertical_offset = gui_position.ResearchPage.stock_initial_button_vertical_offset
stock_text_additional_vertical_offset = gui_position.ResearchPage.stock_text_additional_vertical_offset
second_line_text_additional_offset = gui_position.ResearchPage.second_line_text_additional_offset
vertical_offset_per_stock = gui_position.ResearchPage.vertical_offset_per_stock
stock_added_width = gui_position.ResearchPage.stock_added_width
stock_default_vertical_offset = gui_position.ResearchPage.stock_default_vertical_offset
if stock:
row_object.stock = stock
row_object.ticker_textctrl = wx.StaticText(self, -1,
stock.ticker,
(stock_default_vertical_offset, (index*vertical_offset_per_stock) + stock_initial_button_vertical_offset + stock_text_additional_vertical_offset)
)
if utils.return_stocks_website_if_possible(stock):
row_object.website_button = wx.Button(self,
label="website",
pos=(website_button_horizontal_offset,(index*vertical_offset_per_stock) + stock_initial_button_vertical_offset),
size=(-1,-1)
)
row_object.website_button.Bind(wx.EVT_BUTTON, lambda event, row_obj = row_object: self.openWebsiteOnButtonClick(event, row_obj), row_object.website_button)
row_object.firm_name_textctrl = wx.StaticText(self, -1,
stock.firm_name,
(stock_default_vertical_offset, (index*vertical_offset_per_stock) + stock_initial_button_vertical_offset + stock_text_additional_vertical_offset + second_line_text_additional_offset),
size = (100, -1)
)
for button_dict in config.RESEARCH_PAGE_DICT_LIST:
button = None
lambda_function = None
button_index = config.RESEARCH_PAGE_DICT_LIST.index(button_dict)
button_width = button_dict.get("width")
if not button_width:
button_width = -1
button = wx.Button(self,
label=str(button_dict.get("button_text")),
pos=(stock_initial_button_horizontal_offset + stock_added_width, (index*vertical_offset_per_stock) + stock_initial_button_vertical_offset),
size=(button_width, -1)
)
stock_added_width += button.GetSize()[0]
lambda_function = button_dict.get("lambda_function")
if lambda_function:
setattr(row_object, "lambda_function", lambda_function)
button.Bind(wx.EVT_BUTTON, lambda event, row_obj = row_object, index = button_index: self.openButtonURL(event, row_obj, index), button)
button_name_for_row_object = "button" + str(button_index) + ''.join(char for char in button_dict.get("button_text") if char.isalnum())
setattr(row_object, button_name_for_row_object, button)
button_dict_name = "button_dict" + str(button_index)
setattr(row_object, button_dict_name, button_dict)
self.rows_dict[str(index)] = row_object
else:
row_object = None
return row_object
def generateShownStockList(self, event):
for i in range(len(self.rows_dict)):
row_obj = self.rows_dict.get(str(i))
for attribute in dir(row_obj):
if not attribute.startswith("__"):
try:
wx_obj = getattr(row_obj, attribute)
wx_obj.Destroy()
except:
pass
del row_obj
self.rows_dict = {}
for index, stock in enumerate(self.stock_list):
row_obj = self.generateStockRow(index, stock=stock)
####
class SalePrepPage(Tab):
def __init__(self, parent):
self.title = "Sale Prep"
self.uid = config.SALE_PREP_PAGE_UNIQUE_ID
self.parent = parent
wx.Panel.__init__(self, parent)
text = wx.StaticText(self, -1,
"Sale Prep",
gui_position.SalePrepPage.text
)
self.ticker_list = []
self.checkbox_list = []
self.rows_dict = {}
for i in range(len(config.PORTFOLIO_OBJECTS_DICT)):
portfolio_exists = config.PORTFOLIO_OBJECTS_DICT.get(str(i+1))
if not portfolio_exists:
continue
horizontal_offset = gui_position.SalePrepPage.horizontal_offset
if i>=5:
horizontal_offset = gui_position.SalePrepPage.horizontal_offset_i_greater_than_n
checkbox_to_add = wx.CheckBox(self, -1,
config.PORTFOLIO_OBJECTS_DICT.get(str(i+1)).name,
pos=((gui_position.SalePrepPage.checkbox_initial_offset + horizontal_offset), (gui_position.SalePrepPage.checkbox_vertical_offset_factor*i)),
size=(-1,-1)
)
checkbox_to_add.SetValue(True)
self.checkbox_list.append(checkbox_to_add)
line = wx.StaticLine(self, -1, pos=gui_position.SalePrepPage.line, size=gui_position.SalePrepPage.line_size)
refresh_button = wx.Button(self, label="Clear and Refresh Spreadsheet", pos=gui_position.SalePrepPage.refresh_button, size=(-1,-1))
refresh_button.Bind(wx.EVT_BUTTON, self.spreadSheetFill, refresh_button)
self.load_new_account_data_button = wx.Button(self, label="Refresh Accounts Data and Spreadsheet", pos=gui_position.SalePrepPage.load_new_account_data_button, size=(-1,-1))
self.load_new_account_data_button.Bind(wx.EVT_BUTTON, self.refreshAccountData, self.load_new_account_data_button)
self.save_button = wx.Button(self, label="Export for Trade Window", pos=gui_position.SalePrepPage.save_button, size=(-1,-1))
self.save_button.Bind(wx.EVT_BUTTON, self.exportSaleCandidates, self.save_button)
self.save_button.Hide()
self.saved_text = wx.StaticText(self, -1,
"Data is now in memory.",
gui_position.SalePrepPage.saved_text
)
self.saved_text.Hide()
self.commission = config.DEFAULT_COMMISSION
self.set_spreadsheet_values()
self.grid = None
for i in range(len(self.checkbox_list)):
box = self.checkbox_list[i]
if box:
is_checked = box.GetValue()
if is_checked:
self.spreadSheetFill('event')
break
logging.info("SalePrepPage loaded")
def resetPage(self):
self.rows_dict = {}
self.spreadSheetFill("event")
def cell_is_writable(self, row_num, col_num):
default_rows = config.DEFAULT_ROWS_ON_SALE_PREP_PAGE
if ((row_num >= default_rows) and col_num in [self.num_of_shares_cell.col, self.percent_of_shares_cell.col]):
return True
elif (row_num, col_num) == (3, self.carryover_input_cell.col):
return True
else:
return False
def set_spreadsheet_values(self):
# colors
self.carryover_input_color_hex = "#C5DBCA"
self.num_of_shares_input_color_hex = "#CFE8FC"
self.percentage_of_shares_input_color_hex = "#CFFCEF"
self.dark_cell_color_hex = "#333333"
# row 5
self.first_cell = SpreadsheetCell(row = 5, col = 0, text = "")
self.num_of_shares_cell = SpreadsheetCell(row = 4, col = 1, text = "# of shares", align_center = True)
self.num_of_shares_cell_2 = SpreadsheetCell(row = 5, col = 1, text = "to sell", align_center = True)
self.percent_of_shares_cell = SpreadsheetCell(row = 4, col = 2, text = "%" + " of shares", align_center = True)
self.percent_of_shares_cell_2 = SpreadsheetCell(row = 5, col = 2, text = "to sell", align_center = True)
self.ticker_cell = SpreadsheetCell(row = 5, col = 3, text = "Ticker")
self.syntax_check_cell = SpreadsheetCell(row = 5, col = 4, text = "")#Syntax Check")
self.name_cell = SpreadsheetCell(row = 5, col = 5, text = "Name")
self.sale_check_cell = SpreadsheetCell(row = 4, col = 6, text = "Sale", align_center = True)
self.sale_check_cell_2 = SpreadsheetCell(row = 5, col = 6, text = "Check", align_center = True)
self.number_of_shares_copy_cell = SpreadsheetCell(row = 4, col = 7, text = "# of shares", align_center = True)
self.number_of_shares_copy_cell_2 = SpreadsheetCell(row = 5, col = 7, text = "to sell", align_center = True)
self.percent_of_shares_copy_cell = SpreadsheetCell(row = 4, col = 8, text = "%" + " of shares", align_center = True)
self.percent_of_shares_copy_cell_2 = SpreadsheetCell(row = 5, col = 8, text = "to sell", align_center = True)
self.total_shares_cell = SpreadsheetCell(row = 4, col = 9, text = "Total # of", align_center = True)
self.total_shares_cell_2 = SpreadsheetCell(row = 5, col = 9, text = "shares", align_center = True)
self.price_cell = SpreadsheetCell(row = 5, col = 10, text = "Price")
self.sale_value_cell = SpreadsheetCell(row = 5, col = 11, text = "Sale Value")
self.commission_cell = SpreadsheetCell(row = 4, col = 12, text = "Commission loss", align_center = True)
self.commission_cell_2 = SpreadsheetCell(row = 5, col = 12, text = "($%.2f/trade)" % float(self.commission), align_center = True)
self.cost_basis_cell = SpreadsheetCell(row = 4, col = 13, text = "Cost basis", align_center = True)
self.cost_basis_cell_2 = SpreadsheetCell(row = 5, col = 13, text = "per share", align_center = True)
self.capital_gains_cell = SpreadsheetCell(row = 4, col = 14, text = "Capital", align_center = True)
self.capital_gains_cell_2 = SpreadsheetCell(row = 5, col = 14, text = "Gains", align_center = True)
self.adjusted_cap_gains_cell = SpreadsheetCell(row = 4, col = 15, text = "Adjusted Capital Gains", align_center = True)
self.adjusted_cap_gains_cell_2 = SpreadsheetCell(row = 5, col = 15, text = "(including carryovers)", align_center = True)
self.row_four_cell_list = [
self.num_of_shares_cell,
self.percent_of_shares_cell,
self.sale_check_cell,
self.number_of_shares_copy_cell,
self.percent_of_shares_copy_cell,
self.total_shares_cell,
self.commission_cell,
self.cost_basis_cell,
self.capital_gains_cell,
self.adjusted_cap_gains_cell,
]
self.row_five_cell_list = [
self.first_cell,
self.num_of_shares_cell_2,
self.percent_of_shares_cell_2,
self.ticker_cell,
self.syntax_check_cell,
self.name_cell,
self.sale_check_cell_2,
self.number_of_shares_copy_cell_2,
self.percent_of_shares_copy_cell_2,
self.total_shares_cell_2,
self.price_cell,
self.sale_value_cell,
self.commission_cell_2,
self.cost_basis_cell_2,
self.capital_gains_cell_2,
self.adjusted_cap_gains_cell_2,
]
self.total_number_of_columns = len(self.row_five_cell_list)
# row 2
self.carryover_text_cell = SpreadsheetCell(row = 2, col = self.adjusted_cap_gains_cell.col, text = "Input carryover loss (if any)")
# row 3
self.carryover_input_cell = SpreadsheetCell(row = 3, col = self.carryover_text_cell.col, text = 0., value = 0., align_right = True)
# row 7
self.totals_cell = SpreadsheetCell(row = 7, col = 0, text = "Totals:")
def exportSaleCandidates(self, event):
self.save_button.Hide()
num_columns = self.grid.GetNumberCols()
num_rows = self.grid.GetNumberRows()
default_rows = config.DEFAULT_ROWS_ON_SALE_PREP_PAGE
sell_tuple_list = [] # this will end up being a list of tuples for each stock to sell
for row_num in range(num_rows):
if row_num < default_rows:
continue
for column_num in range(num_columns):
if column_num == self.number_of_shares_copy_cell.col:
not_empty = self.grid.GetCellValue(row_num, column_num)
error = self.grid.GetCellValue(row_num, column_num - 1) # error column is one less than stock column
if error != "Error":
error = None
logging.info(not_empty)
if not_empty and not error:
if int(not_empty):
portfolio_id_number = str(self.grid.GetCellValue(row_num, self.first_cell.col))
relevant_portfolio = config.PORTFOLIO_OBJECTS_DICT.get(portfolio_id_number)
ticker = str(self.grid.GetCellValue(row_num, self.ticker_cell.col))
number_of_shares_to_sell = int(self.grid.GetCellValue(row_num, self.number_of_shares_copy_cell.col))
sell_tuple = (ticker, number_of_shares_to_sell, relevant_portfolio)
sell_tuple_list.append(sell_tuple)
elif error:
logging.info("ERROR: Could not save sell list. There are errors in quantity syntax.")
return
for i in sell_tuple_list:
logging.info(i)
# Here, i'm not sure whether to save to file or not (currently not saving to file, obviously)
relevant_portfolios_list = []
for i in range(len(self.checkbox_list)):
box = self.checkbox_list[i]
is_checked = box.GetValue()
if is_checked:
relevant_portfolios_list.append(config.PORTFOLIO_OBJECTS_DICT[str(i+1)])
config.SALE_PREP_PORTFOLIOS_AND_SALE_CANDIDATES_TUPLE= [
relevant_portfolios_list,
sell_tuple_list
]
logging.info(config.SALE_PREP_PORTFOLIOS_AND_SALE_CANDIDATES_TUPLE)
self.saved_text.Show()
trade_page = config.GLOBAL_PAGES_DICT.get(config.TRADE_PAGE_UNIQUE_ID).obj
trade_page.importSaleCandidates("event")
def refreshAccountData(self, event):
######## Rebuild Checkbox List in case of new accounts
for i in self.checkbox_list:
try:
i.Destroy()
except Exception as exception:
logging.error(exception)
self.checkbox_list = []
for key, portfolio_obj in config.PORTFOLIO_OBJECTS_DICT.items():
try:
index = int(key)-1
except Exception as e:
logging.error(e)
logging.info("Note: Something has gone wrong with the keys to the config.PORTFOLIO_OBJECTS_DICT, which is throwing this error, they should be indexed starting with 1")
if not portfolio_obj:
continue
else:
for attribute in dir(portfolio_obj):
if not attribute.startswith("_"):
logging.info(str(attribute)+": ()".format(getattr(portfolio_obj, attribute)))
horizontal_offset = 0
if index>=5:
horizontal_offset = 200
checkbox_to_add = wx.CheckBox(self, -1,
portfolio_obj.name,
pos=((gui_position.SalePrepPage.checkbox_initial_offset + horizontal_offset), (gui_position.SalePrepPage.checkbox_vertical_offset_factor*index)),
size=(-1,-1)
)
checkbox_to_add.SetValue(True)
self.checkbox_list.append(checkbox_to_add)
self.spreadSheetFill("event")
def hideSaveButtonWhileEnteringData(self, event):
# This function has been deactivated, unfortunately it causes too many false positives...
# color = self.grid.GetCellBackgroundColour(event.GetRow(), event.GetCol())
# logging.info(color)
# logging.info(type(color))
# logging.info("---------")
# if color != (255, 255, 255, 255):
# logging.info('it works')
# self.save_button.Hide()
# event.Skip()
pass
def spreadSheetFill(self, event):
try:
self.grid.Hide() # destroying grid will throw a segmentation fault for some reason
except Exception as exception:
pass
#logging.error(exception)
relevant_portfolios_list = []
for i in range(len(self.checkbox_list)):
box = self.checkbox_list[i]
is_checked = box.GetValue()
if is_checked:
relevant_portfolios_list.append(config.PORTFOLIO_OBJECTS_DICT[str(i+1)])
default_columns = self.total_number_of_columns
default_rows = config.DEFAULT_ROWS_ON_SALE_PREP_PAGE
num_columns = default_columns
num_rows = default_rows
for account in relevant_portfolios_list:
try:
num_rows += 1 # for account name
num_stocks = len(account.stock_shares_dict)
num_rows += num_stocks
except Exception as exception:
logging.error(exception)
# set size for grid
size = gui_position.SalePrepPage.size
try:
width, height = gui_position.MainFrame_size
size = (width-gui_position.SalePrepPage.width_adjust, height-gui_position.SalePrepPage.height_adjust) # find the difference between the Frame and the grid size
except Exception as e:
logging.error(e)
self.sizer = None
self.inner_sizer = None
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.inner_sizer = wx.BoxSizer(wx.VERTICAL)
self.inner_sizer.AddSpacer(gui_position.SalePrepPage.AddSpacer)
new_grid = SalePrepGrid(self, -1, size=size, pos=gui_position.SalePrepPage.new_grid_position)
new_grid.CreateGrid(num_rows, num_columns)
new_grid.Bind(wx.grid.EVT_GRID_CELL_CHANGED, self.updateGrid, new_grid)
#You need this code to resize
self.inner_sizer.Add(new_grid, 1, wx.ALL|wx.EXPAND)
self.SetSizer(self.inner_sizer)
self.sizer.Add(self, 1, wx.EXPAND|wx.ALL)
##
self.grid = new_grid
# I deactivated this binding because it caused too much confusion if you don't click on a white square after entering data
# self.grid.Bind(wx.grid.EVT_GRID_CELL_LEFT_CLICK ,self.hideSaveButtonWhileEnteringData, self.grid)
for column_num in range(num_columns):
for row_num in range(num_rows):
if not self.cell_is_writable(row_num, column_num):
self.grid.SetReadOnly(row_num, column_num, True)
else: # fill in writable column number spaces
if column_num == self.carryover_input_cell.col:
self.grid.SetCellBackgroundColour(row_num, column_num, self.carryover_input_color_hex)
elif column_num == self.num_of_shares_cell.col:
self.grid.SetCellBackgroundColour(row_num, column_num, self.num_of_shares_input_color_hex)
elif column_num == self.percent_of_shares_cell.col:
self.grid.SetCellBackgroundColour(row_num, column_num, self.percentage_of_shares_input_color_hex)
# set row 2
self.grid.SetCellValue(self.carryover_text_cell.row, self.carryover_text_cell.col, self.carryover_text_cell.text)
# set row 3
self.grid.SetCellValue(self.carryover_input_cell.row, self.carryover_input_cell.col, config.locale.currency(self.carryover_input_cell.text, grouping = True))
if self.carryover_input_cell.align_right:
self.grid.SetCellAlignment(self.carryover_input_cell.row, self.carryover_input_cell.col, horiz = wx.ALIGN_RIGHT, vert = wx.ALIGN_BOTTOM)
# set row 7
self.grid.SetCellValue(self.totals_cell.row, self.totals_cell.col, self.totals_cell.text)
# set row 4 & 5
for cell in self.row_five_cell_list + self.row_four_cell_list:
self.grid.SetCellValue(cell.row, cell.col, cell.text)
if cell.align_center:
self.grid.SetCellAlignment(cell.row, cell.col, horiz = wx.ALIGN_CENTRE, vert = wx.ALIGN_BOTTOM)
# rows 6 and 8
for i in range(num_columns):
self.grid.SetCellBackgroundColour(6, i, self.dark_cell_color_hex)
self.grid.SetCellBackgroundColour(8, i, self.dark_cell_color_hex)
# load account data
portfolio_num = 0
row_count = default_rows
col_count = 0
for account in relevant_portfolios_list:
try:
throw_error = account.stock_shares_dict
# intentionally throws an error if account hasn't been imported
except Exception as e:
logging.error(e)
logging.info(": An account appears to not be loaded, but this isn't a problem.")
continue
# set portfolio name
portfolio_name = account.name
self.grid.SetCellValue(row_count, self.first_cell.col, account.name)
self.grid.SetCellBackgroundColour(row_count, self.num_of_shares_cell.col, "white")
self.grid.SetReadOnly(row_count, self.num_of_shares_cell.col, True)
self.grid.SetCellBackgroundColour(row_count, self.percent_of_shares_cell.col, "white")
self.grid.SetReadOnly(row_count, self.percent_of_shares_cell.col, True)
portfolio_num += 1
row_count += 1
for ticker in sorted(account.stock_shares_dict):
row_obj_already_exists = False
row_obj = self.rows_dict.get(str(ticker)+str(account.id_number))
if row_obj:
row_obj.row = row_count
row_obj_already_exists = True
else:
quantity = account.stock_shares_dict.get(ticker)
stocks_last_price = None
sale_value = None
stocks_capital_gains = None
# set all cell values for stock
stock = utils.return_stock_by_symbol(ticker)
if not stock:
logging.info("Stock %s does not appear to exist" % ticker)
continue
# set account index cell
account_index_cell = SpreadsheetCell(row = row_count, col = self.first_cell.col, text = str(account.id_number), text_color = "white")
# set ticker cell
stocks_ticker_cell = SpreadsheetCell(row = row_count, col = self.ticker_cell.col, text = stock.symbol, stock = stock)
# return and set cost basis per share
cost_basis_per_share = utils.return_cost_basis_per_share(account, stock.symbol)
if cost_basis_per_share:
try:
cost_basis_per_share = str(cost_basis_per_share).replace("$", "").replace(",","")
if cost_basis_per_share:
cost_basis_per_share = float(cost_basis_per_share)
stocks_cost_basis_cell = SpreadsheetCell(row = row_count, col = self.cost_basis_cell.col, text = config.locale.currency(cost_basis_per_share, grouping = True), value = (cost_basis_per_share), align_right = True)
except Exception as e:
logging.error(e)
cost_basis_per_share = None
# set firm name cell
stocks_firm_name_cell = SpreadsheetCell(row = row_count, col = self.name_cell.col, text = stock.firm_name, value = stock.firm_name)
# set quantity cell
quantity_text = str(quantity)
if float(quantity).is_integer():
quantity_text = str(int(quantity))
stocks_quantity_cell = SpreadsheetCell(row = row_count, col = self.total_shares_cell.col, text = quantity_text, value = int(quantity), align_right = True)
# set last price
try:
stocks_last_price = utils.return_last_price_if_possible(stock)
if stocks_last_price is not None:
stocks_last_price = float(stocks_last_price)
stocks_last_price_cell = SpreadsheetCell(row = row_count, col = self.price_cell.col, text = config.locale.currency(stocks_last_price, grouping = True), value = stocks_last_price, align_right = True)
except Exception as exception:
logging.error(exception)
# set capital gains
if cost_basis_per_share:
# this needs to be set via a row dict, which may not exist on startup
try:
this_row = self.rows_dict.get(str(row_count))
except:
this_row = None
if this_row:
try:
sale_value = row_obj.cell_dict.get(str(self.sale_value_cell.col)).value
except:
pass
if sale_value is not None:
stocks_capital_gains = float(sale_value - cost_basis_per_share)
if stocks_capital_gains < 0.:
stocks_capital_gains = 0.
stocks_capital_gains_cell = SpreadsheetCell(row = row_count, col = self.capital_gains_cell.col, text = config.locale.currency(stocks_capital_gains, grouping = True), value = stocks_capital_gains, align_right = True)
# set carryover reduction
if self.carryover_input_cell.value:
if stocks_capital_gains:
if self.carryover_input_cell.value > stocks_capital_gains:
new_carryover_value = self.carryover_input_cell.value - stocks_capital_gains
self.carryover_input_cell.value = new_carryover_value
reduction = -(stocks_capital_gains)
else:
reduction = stocks_capital_gains - self.carryover_input_cell.value
self.carryover_input_cell.value = 0.
stocks_capital_gains_adjustment_cell = SpreadsheetCell(row = row_count, col = self.adjusted_cap_gains_cell.col, text = config.locale.currency(reduction, grouping = True), value = reduction)
else:
stocks_capital_gains_adjustment_cell = SpreadsheetCell(row = row_count, col = self.adjusted_cap_gains_cell.col, text = "", value = None)
else:
stocks_capital_gains_adjustment_cell = SpreadsheetCell(row = row_count, col = self.adjusted_cap_gains_cell.col, text = "", value = None)
# set row
if cost_basis_per_share and stocks_last_price and stocks_capital_gains:
try:
this_row = self.rows_dict.get(str(stock.symbol) + str(account.id_number))
except Exception as e:
logging.error(e)
if not this_row:
this_row = SpreadsheetRow(row_count, name = stock.symbol, row_title = str(ticker)+str(account.id_number), account = account, cell_dict = {})
this_row.cell_dict[account_index_cell.col] = account_index_cell
this_row.cell_dict[stocks_ticker_cell.col] = stocks_ticker_cell
this_row.cell_dict[stocks_firm_name_cell.col] = stocks_firm_name_cell
this_row.cell_dict[stocks_quantity_cell.col] = stocks_quantity_cell
this_row.cell_dict[stocks_cost_basis_cell.col] = stocks_cost_basis_cell
this_row.cell_dict[stocks_last_price_cell.col] = stocks_last_price_cell
this_row.cell_dict[stocks_capital_gains_cell.col] = stocks_capital_gains_cell
this_row.cell_dict[stocks_capital_gains_adjustment_cell.col] = stocks_capital_gains_adjustment_cell
elif cost_basis_per_share and stocks_last_price:
try:
this_row = self.rows_dict.get(str(stock.symbol) + str(account.id_number))
except Exception as e:
logging.error(e)
if not this_row:
this_row = SpreadsheetRow(row_count, name = stock.symbol, row_title = str(ticker)+str(account.id_number), account = account, cell_dict = {})
this_row.cell_dict[account_index_cell.col] = account_index_cell
this_row.cell_dict[stocks_ticker_cell.col] = stocks_ticker_cell
this_row.cell_dict[stocks_firm_name_cell.col] = stocks_firm_name_cell
this_row.cell_dict[stocks_quantity_cell.col] = stocks_quantity_cell
this_row.cell_dict[stocks_cost_basis_cell.col] = stocks_cost_basis_cell
this_row.cell_dict[stocks_last_price_cell.col] = stocks_last_price_cell
elif cost_basis_per_share:
try:
this_row = self.rows_dict.get(str(stock.symbol) + str(account.id_number))
except Exception as e:
logging.error(e)
if not this_row:
this_row = SpreadsheetRow(row_count, name = stock.symbol, row_title = str(ticker)+str(account.id_number), account = account, cell_dict = {})
this_row.cell_dict[account_index_cell.col] = account_index_cell
this_row.cell_dict[stocks_ticker_cell.col] = stocks_ticker_cell
this_row.cell_dict[stocks_firm_name_cell.col] = stocks_firm_name_cell
this_row.cell_dict[stocks_quantity_cell.col] = stocks_quantity_cell
this_row.cell_dict[stocks_cost_basis_cell.col] = stocks_cost_basis_cell
elif stocks_last_price:
try:
this_row = self.rows_dict.get(str(stock.symbol) + str(account.id_number))
except Exception as e:
logging.error(e)
if not this_row:
this_row = SpreadsheetRow(row_count, name = stock.symbol, row_title = str(ticker)+str(account.id_number), account = account, cell_dict = {})
this_row.cell_dict[account_index_cell.col] = account_index_cell
this_row.cell_dict[stocks_ticker_cell.col] = stocks_ticker_cell
this_row.cell_dict[stocks_firm_name_cell.col] = stocks_firm_name_cell
this_row.cell_dict[stocks_quantity_cell.col] = stocks_quantity_cell
this_row.cell_dict[stocks_last_price_cell.col] = stocks_last_price_cell
else:
try:
this_row = self.rows_dict.get(str(stock.symbol) + str(account.id_number))
except Exception as e:
logging.error(e)
if not this_row:
this_row = SpreadsheetRow(row_count, name = stock.symbol, row_title = str(ticker)+str(account.id_number), account = account, cell_dict = {})
this_row.cell_dict[account_index_cell.col] = account_index_cell
this_row.cell_dict[stocks_ticker_cell.col] = stocks_ticker_cell
this_row.cell_dict[stocks_firm_name_cell.col] = stocks_firm_name_cell
this_row.cell_dict[stocks_quantity_cell.col] = stocks_quantity_cell
self.rows_dict[str(stock.symbol) + str(account.id_number)] = this_row
this_row = None
stock = None
row_count += 1
# iterate over cells to fill in grid
for ticker, row_obj in self.rows_dict.items():
for col_num, cell_obj in row_obj.cell_dict.items():
# check if row moved:
if cell_obj.row != row_obj.row:
cell_obj.row = row_obj.row
self.grid.SetCellValue(cell_obj.row, cell_obj.col, cell_obj.text)
if cell_obj.align_right:
self.grid.SetCellAlignment(cell_obj.row, cell_obj.col, horiz = wx.ALIGN_RIGHT, vert = wx.ALIGN_BOTTOM)
if cell_obj.text_color:
self.grid.SetCellTextColour(cell_obj.row, cell_obj.col, cell_obj.text_color)
total_sale_value = None
total_sale_value_relevant = True
total_commission_loss = None
total_capital_gains = None
total_capital_gains_relevant = True
### Set Totals ###
# set total sale
for ticker, row_obj in self.rows_dict.items():
cell_obj = row_obj.cell_dict.get(self.number_of_shares_copy_cell.col)
if cell_obj:
if cell_obj.text is not None and cell_obj.text is not "":
# set total sale
if total_sale_value_relevant:
row_sale_value_cell = row_obj.cell_dict.get(self.sale_value_cell.col)
if row_sale_value_cell:
if row_sale_value_cell.value is not None and row_sale_value_cell.value is not "":
if total_sale_value is None:
total_sale_value = 0.
total_sale_value += float(row_sale_value_cell.value)
else:
total_sale_value = None
total_sale_value_relevant = False
if total_sale_value is not None:
self.grid.SetCellValue(self.totals_cell.row, self.sale_value_cell.col, config.locale.currency(total_sale_value, grouping = True))
self.grid.SetCellAlignment(self.totals_cell.row, self.sale_value_cell.col, horiz = wx.ALIGN_RIGHT, vert = wx.ALIGN_BOTTOM)
else:
self.grid.SetCellValue(self.totals_cell.row, self.sale_value_cell.col, "")
# set total commission
if config.DEFAULT_COMMISSION:
num_trades = 0.
for row, row_obj in self.rows_dict.items():
cell_obj = row_obj.cell_dict.get(self.number_of_shares_copy_cell.col)
if cell_obj:
if cell_obj.text is not None and cell_obj.text is not "":
# set stocks with commission
num_trades += 1
if config.DEFAULT_COMMISSION and num_trades:
total_commission_loss = num_trades * config.DEFAULT_COMMISSION
self.grid.SetCellValue(self.totals_cell.row, self.commission_cell.col, config.locale.currency(total_commission_loss, grouping = True))
self.grid.SetCellAlignment(self.totals_cell.row, self.commission_cell.col, horiz = wx.ALIGN_RIGHT, vert = wx.ALIGN_BOTTOM)
else:
self.grid.SetCellValue(self.totals_cell.row, self.commission_cell.col, "")
# set captial gains
for ticker, row_obj in self.rows_dict.items():
cell_obj = row_obj.cell_dict.get(self.number_of_shares_copy_cell.col)
if cell_obj:
if cell_obj.text is not None and cell_obj.text is not "":
# set total sale
if total_capital_gains_relevant:
capital_gains_cell = row_obj.cell_dict.get(self.capital_gains_cell.col)
if capital_gains_cell:
if capital_gains_cell.value is not None and capital_gains_cell.value is not "":
if total_capital_gains is None:
total_capital_gains = 0.
total_capital_gains += float(capital_gains_cell.value)
else:
total_capital_gains = None
total_capital_gains_relevant = False
if total_capital_gains is not None:
self.grid.SetCellValue(self.totals_cell.row, self.capital_gains_cell.col, config.locale.currency(total_capital_gains, grouping = True))
self.grid.SetCellAlignment(self.totals_cell.row, self.capital_gains_cell.col, horiz = wx.ALIGN_RIGHT, vert = wx.ALIGN_BOTTOM)
else:
self.grid.SetCellValue(self.totals_cell.row, self.capital_gains_cell.col, "")
# carryover adjustments
if self.carryover_input_cell.value and total_capital_gains:
adjusted_capital_gains = max(total_capital_gains - self.carryover_input_cell.value, 0.)
self.grid.SetCellValue(self.totals_cell.row, self.carryover_input_cell.col, config.locale.currency(adjusted_capital_gains, grouping = True))
self.grid.SetCellAlignment(self.totals_cell.row, self.carryover_input_cell.col, horiz = wx.ALIGN_RIGHT, vert = wx.ALIGN_BOTTOM)
else:
self.grid.SetCellValue(self.totals_cell.row, self.carryover_input_cell.col, "")
if self.carryover_input_cell.value and total_capital_gains:
adjustment_left = self.carryover_input_cell.value
for row, row_obj in sorted(self.rows_dict.items()):
cell_obj = row_obj.cell_dict.get(self.number_of_shares_copy_cell.col)
if cell_obj:
if cell_obj.text is not None and cell_obj.text is not "":
# set capital gains adjustment
capital_gains_cell = row_obj.cell_dict.get(self.capital_gains_cell.col)
if capital_gains_cell:
if capital_gains_cell.value and adjustment_left:
adjusted_capital_gains = max(capital_gains_cell.value - adjustment_left, 0.)
adjustment_left = max(adjustment_left - capital_gains_cell.value, 0.)
self.grid.SetCellValue(cell_obj.row, self.carryover_input_cell.col, config.locale.currency(adjusted_capital_gains, grouping = True))
self.grid.SetCellAlignment(cell_obj.row, self.carryover_input_cell.col, horiz = wx.ALIGN_RIGHT, vert = wx.ALIGN_BOTTOM)
else:
self.grid.SetCellValue(cell_obj.row, self.carryover_input_cell.col, config.locale.currency(capital_gains_cell.value, grouping = True))
self.grid.SetCellAlignment(cell_obj.row, self.carryover_input_cell.col, horiz = wx.ALIGN_RIGHT, vert = wx.ALIGN_BOTTOM)
self.grid.AutoSizeColumns()
def updateGrid(self, event, refresh = None):
row = event.GetRow()
column = event.GetCol()
value = self.grid.GetCellValue(row, column)
value = utils.strip_string_whitespace(value)
if int(row) == self.carryover_input_cell.row and int(column) == self.carryover_input_cell.col:
# updating carryover losses
value = value.replace("$", "")
try:
value = float(value)
except:
logging.error("Error: invalid carryover input.")
return
self.carryover_input_cell.text = value
self.carryover_input_cell.value = value
self.saved_text.Hide()
self.save_button.Show()
self.exportSaleCandidates("event")
self.spreadSheetFill("event")
return
ticker = str(self.grid.GetCellValue(row, self.ticker_cell.col))
account_id_number = str(self.grid.GetCellValue(row, self.first_cell.col))
num_shares = str(self.grid.GetCellValue(row, self.total_shares_cell.col))
num_shares = num_shares.replace(",","")
sale_value = None
percent_to_commission = None
row_obj = self.rows_dict.get(str(ticker)+str(account_id_number))
stocks_ticker_cell = row_obj.cell_dict.get(str(self.ticker_cell.col))
if stocks_ticker_cell:
ticker = stocks_ticker_cell.text
else:
ticker = str(self.grid.GetCellValue(row, self.ticker_cell.col))
if not ticker:
logging.error("Error: something went wrong here")
logging.info(ticker)
stock = utils.return_stock_by_symbol(ticker)
if not stock:
logging.error("Error, stock %s doesn't appear to exist" % ticker)
return
stocks_price_cell = row_obj.cell_dict.get(str(self.price_cell.col))
if stocks_price_cell:
price = stocks_price_cell.value
else:
try:
price = float(utils.return_last_price_if_possible(stock))
except:
price = None
stocks_num_of_shares_cell = row_obj.cell_dict.get(str(self.num_of_shares_cell.col))
stocks_percent_of_shares_cell = row_obj.cell_dict.get(str(self.percent_of_shares_cell.col))
stocks_num_of_shares_copy_cell = row_obj.cell_dict.get(str(self.number_of_shares_copy_cell.col))
stocks_percent_of_shares_copy_cell = row_obj.cell_dict.get(str(self.percent_of_shares_copy_cell.col))
stocks_sale_check_cell = row_obj.cell_dict.get(str(self.sale_check_cell.col))
stocks_sale_value_cell = row_obj.cell_dict.get(str(self.sale_value_cell.col))
stocks_percent_to_commission_cell = row_obj.cell_dict.get(str(self.commission_cell.col))
stocks_cost_basis_per_share_cell = row_obj.cell_dict.get(str(self.cost_basis_cell.col))
stocks_capital_gains_cell = row_obj.cell_dict.get(str(self.capital_gains_cell.col))
if not stocks_num_of_shares_cell:
stocks_num_of_shares_cell = SpreadsheetCell(row = row, col = self.num_of_shares_cell.col, align_right = True)
row_obj.cell_dict[self.num_of_shares_cell.col] = stocks_num_of_shares_cell
if not stocks_percent_of_shares_cell:
stocks_percent_of_shares_cell = SpreadsheetCell(row = row, col = self.percent_of_shares_cell.col, align_right = True)
row_obj.cell_dict[self.percent_of_shares_cell.col] = stocks_percent_of_shares_cell
if not stocks_num_of_shares_copy_cell:
stocks_num_of_shares_copy_cell = SpreadsheetCell(row = row, col = self.number_of_shares_copy_cell.col, align_right = True)
row_obj.cell_dict[self.number_of_shares_copy_cell.col] = stocks_num_of_shares_copy_cell
if not stocks_percent_of_shares_copy_cell:
stocks_percent_of_shares_copy_cell = SpreadsheetCell(row = row, col = self.percent_of_shares_copy_cell.col, align_right = True)
row_obj.cell_dict[self.percent_of_shares_copy_cell.col] = stocks_percent_of_shares_copy_cell
if not stocks_sale_check_cell:
stocks_sale_check_cell = SpreadsheetCell(row = row, col = self.sale_check_cell.col)
row_obj.cell_dict[self.sale_check_cell.col] = stocks_sale_check_cell
if not stocks_sale_value_cell:
stocks_sale_value_cell = SpreadsheetCell(row = row, col = self.sale_value_cell.col, align_right = True)
row_obj.cell_dict[self.sale_value_cell.col] = stocks_sale_value_cell
if not stocks_percent_to_commission_cell:
stocks_percent_to_commission_cell = SpreadsheetCell(row = row, col = self.commission_cell.col, align_right = True)
row_obj.cell_dict[self.commission_cell.col] = stocks_percent_to_commission_cell
if not stocks_cost_basis_per_share_cell:
stocks_cost_basis_per_share_cell = SpreadsheetCell(row = row, col = self.cost_basis_cell.col, align_right = True)
row_obj.cell_dict[self.cost_basis_cell.col] = stocks_cost_basis_per_share_cell
if not stocks_capital_gains_cell:
stocks_capital_gains_cell = SpreadsheetCell(row = row, col = self.capital_gains_cell.col, align_right = True)
row_obj.cell_dict[self.capital_gains_cell.col] = stocks_capital_gains_cell
if column == self.num_of_shares_cell.col: # sell by number
try:
number_of_shares_to_sell = int(value)
except Exception as exception:
logging.error(exception)
number_of_shares_to_sell = None
# blank percentage of shares col
#self.grid.SetCellValue(row, self.percent_of_shares_cell.col, "")
stocks_percent_of_shares_cell.text = ""
stocks_percent_of_shares_cell.value = None
if str(number_of_shares_to_sell).isdigit() and float(num_shares) >= float(number_of_shares_to_sell) and float(number_of_shares_to_sell) != 0.:
# No input errors
#self.grid.SetCellValue(row, self.number_of_shares_copy_cell.col, str(number_of_shares_to_sell))
stocks_num_of_shares_cell.text = str(number_of_shares_to_sell)
stocks_num_of_shares_cell.value = number_of_shares_to_sell
stocks_num_of_shares_copy_cell.text = str(number_of_shares_to_sell)
stocks_num_of_shares_copy_cell.value = number_of_shares_to_sell
percent_of_total_holdings = float(number_of_shares_to_sell)/float(num_shares)
percent_of_total_holdings_text = str(int(round(100 * percent_of_total_holdings))) + "%"
#self.grid.SetCellValue(row, self.percent_of_shares_copy_cell.col, ("%d" % percent_of_total_holdings) + "%")
stocks_percent_of_shares_copy_cell.text = percent_of_total_holdings_text
stocks_percent_of_shares_copy_cell.value = percent_of_total_holdings
if float(num_shares) == float(number_of_shares_to_sell):
#self.grid.SetCellValue(row, self.sale_check_cell.col, "All")
stocks_sale_check_cell.text = "All"
#self.grid.SetCellTextColour(row, self.sale_check_cell.col, "black")
stocks_sale_check_cell.text_color = "black"
else:
#self.grid.SetCellValue(row, self.sale_check_cell.col, "Some")
stocks_sale_check_cell.text = "Some"
#self.grid.SetCellTextColour(row, self.sale_check_cell.col, "black")
stocks_sale_check_cell.text_color = "black"
elif value == "" or number_of_shares_to_sell == 0:
# if zero
#self.grid.SetCellValue(row, self.number_of_shares_copy_cell.col, "")
stocks_num_of_shares_cell.text = ""
stocks_num_of_shares_cell.value = 0.
#self.grid.SetCellValue(row, self.percent_of_shares_copy_cell.col, "")
stocks_percent_of_shares_cell.text = ""
stocks_percent_of_shares_cell.value = None
#self.grid.SetCellValue(row, self.sale_check_cell.col, "")
stocks_sale_check_cell.text = ""
#self.grid.SetCellTextColour(row, self.sale_check_cell.col, "black")
stocks_sale_check_cell.text_color = "black"
else:
# Bad input
self.setGridError(row, row_obj.row_title, number = number_of_shares_to_sell)
return
if column == self.percent_of_shares_cell.col: # by % of stock held
if "%" in value:
value = value.strip("%")
try:
value = float(value)/100
except Exception as exception:
logging.error(exception)
self.setGridError(row, row_obj.row_title, percentage = value)
return
else:
try:
value = float(value)
if value >= 1:
value = value / 100
except Exception as exception:
logging.error(exception)
if value != "":
self.setGridError(row, row_obj.row_title, percentage = value)
return
percent_of_holdings_to_sell = value
if float(value).is_integer():
value = int(value)
stocks_percent_of_shares_cell.text = str(value*100) + "%"
stocks_percent_of_shares_cell.value = value
#self.grid.SetCellValue(row, self.num_of_shares_cell.col, "")
stocks_num_of_shares_cell.text = ""
stocks_num_of_shares_cell.value = None
# if empty
if percent_of_holdings_to_sell == "" or percent_of_holdings_to_sell == 0:
number_of_shares_to_sell = 0
stocks_num_of_shares_copy_cell.text = ""
stocks_num_of_shares_copy_cell.value = 0
stocks_percent_of_shares_copy_cell = ""
stocks_percent_of_shares_copy_cell = None
stocks_sale_check_cell.text = ""
stocks_sale_check_cell.text_color = "black"
elif percent_of_holdings_to_sell <= 1:
number_of_shares_to_sell = int(math.floor( int(num_shares) * percent_of_holdings_to_sell ) )
stocks_num_of_shares_copy_cell.text = str(number_of_shares_to_sell)
stocks_num_of_shares_copy_cell.value = number_of_shares_to_sell
if number_of_shares_to_sell:
stocks_percent_of_shares_copy_cell.text = str(percent_of_holdings_to_sell * 100) + "%"
stocks_percent_of_shares_copy_cell.value = percent_of_holdings_to_sell
if int(num_shares) == int(number_of_shares_to_sell):
stocks_sale_check_cell.text = "All"
stocks_sale_check_cell.text_color = "black"
else:
stocks_sale_check_cell.text = "Some"
stocks_sale_check_cell.text_color = "black"
else: # percentage is too small, because no shares can be sold at that percentage
stocks_percent_of_shares_copy_cell.text = str(0.) + "%"
stocks_percent_of_shares_copy_cell.value = 0.
else:
self.setGridError(row, row_obj.row_title, percentage = percent_of_holdings_to_sell)
return
if price is not None and number_of_shares_to_sell:
sale_value = float(number_of_shares_to_sell) * float(price)
stocks_sale_value_cell.text = config.locale.currency(sale_value, grouping = True)
stocks_sale_value_cell.value = sale_value
if sale_value:
percent_to_commission = self.commission/sale_value
else:
percent_to_commission = 0
if percent_to_commission:
stocks_percent_to_commission_cell.text = ("%.2f" % float(percent_to_commission * 100)) + "%"
stocks_percent_to_commission_cell.value = percent_to_commission
#if sale_value:
# self.grid.SetCellValue(row, self.sale_value_cell.col, "$%.2f" % sale_value)
#if percent_to_commission:
# self.grid.SetCellValue(row, self.commission_cell.col, ("%.2f" % percent_to_commission) + "%")
cost_basis_per_share = utils.return_cost_basis_per_share(row_obj.account, stock.symbol)
if cost_basis_per_share is not None:
stocks_cost_basis_per_share_cell.text = config.locale.currency(cost_basis_per_share, grouping = True)
stocks_cost_basis_per_share_cell.value = cost_basis_per_share
if cost_basis_per_share is not None and price is not None and number_of_shares_to_sell:
capital_gain_per_share = price - cost_basis_per_share
capital_gains = (capital_gain_per_share * number_of_shares_to_sell) - float(config.DEFAULT_COMMISSION)
#self.grid.SetCellValue(row, self.capital_gains_cell.col, "$%.2f" % capital_gains)
#self.grid.SetCellAlignment(row, self.capital_gains_cell.col, horiz = wx.ALIGN_RIGHT, vert = wx.ALIGN_BOTTOM)
stocks_capital_gains_cell.text = config.locale.currency(capital_gains, grouping = True)
stocks_capital_gains_cell.value = capital_gains
if stocks_capital_gains_cell.value < 0:
stocks_capital_gains_cell.text_color = config.NEGATIVE_SPREADSHEET_VALUE_COLOR_HEX
self.saved_text.Hide()
self.save_button.Show()
self.spreadSheetFill("event")
self.exportSaleCandidates("event")
def setGridError(self, row, row_obj_row_title, number = None, percentage = None):
row_obj = self.rows_dict.get(str(row_obj_row_title))
logging.info(self.rows_dict)
logging.info(row_obj)
stocks_num_of_shares_copy_cell = row_obj.cell_dict.get(str(self.number_of_shares_copy_cell.col))
stocks_percent_of_shares_copy_cell = row_obj.cell_dict.get(str(self.percent_of_shares_copy_cell.col))
stocks_sale_check_cell = row_obj.cell_dict.get(str(self.sale_check_cell.col))
stocks_sale_value_cell = row_obj.cell_dict.get(str(self.sale_value_cell.col))
stocks_percent_to_commission_cell = row_obj.cell_dict.get(str(self.commission_cell.col))
stocks_capital_gains_cell = row_obj.cell_dict.get(str(self.capital_gains_cell.col))
if stocks_num_of_shares_copy_cell:
stocks_num_of_shares_copy_cell.text = ""
stocks_num_of_shares_copy_cell.value = None
if stocks_percent_of_shares_copy_cell:
stocks_percent_of_shares_copy_cell.text = ""
stocks_percent_of_shares_copy_cell.value = None
stocks_sale_check_cell = SpreadsheetCell(row = row, col = self.sale_check_cell.col, text = "Error", text_color = config.NEGATIVE_SPREADSHEET_VALUE_COLOR_HEX)
row_obj.cell_dict[self.sale_check_cell.col] = stocks_sale_check_cell
if stocks_sale_value_cell:
stocks_sale_value_cell.text = ""
stocks_sale_value_cell.value = None
if stocks_percent_to_commission_cell:
stocks_percent_to_commission_cell.text = ""
stocks_percent_to_commission_cell.value = None
if stocks_capital_gains_cell:
stocks_capital_gains_cell.text = ""
stocks_capital_gains_cell.value = None
if number:
stocks_num_of_shares_cell = SpreadsheetCell(row = row, col = self.num_of_shares_cell.col, text = str(number), text_color = config.NEGATIVE_SPREADSHEET_VALUE_COLOR_HEX, align_right = True)
row_obj.cell_dict[self.num_of_shares_cell.col] = stocks_num_of_shares_cell
if percentage:
stocks_percent_of_shares_cell = SpreadsheetCell(row = row, col = self.percent_of_shares_cell.col, text = str(percentage), text_color = config.NEGATIVE_SPREADSHEET_VALUE_COLOR_HEX, align_right = True)
row_obj.cell_dict[self.percent_of_shares_cell.col] = stocks_percent_of_shares_cell
#self.grid.SetCellValue(row, self.sale_check_cell.col, "Error")
#self.grid.SetCellTextColour(row, self.sale_check_cell.col, "red")
#self.grid.SetCellValue(row, self.number_of_shares_copy_cell.col, "")
#self.grid.SetCellValue(row, self.percent_of_shares_copy_cell.col, "")
#self.grid.SetCellValue(row, self.sale_value_cell.col, "")
#self.grid.SetCellValue(row, self.commission_cell.col, "")
self.spreadSheetFill("event")
def setNoGridError(self, row):
self.grid.SetCellValue(row, self.sale_check_cell.col, "")
self.grid.SetCellTextColour(row, self.sale_check_cell.col, "black")
class TradePage(Tab):
def __init__(self, parent):
self.title = "Trade"
self.uid = config.TRADE_PAGE_UNIQUE_ID
self.parent = parent
self.default_average_daily_volume_attribute_name = config.DEFAULT_AVERAGE_DAILY_VOLUME_ATTRIBUTE_NAME
wx.Panel.__init__(self, parent)
trade_page_text = wx.StaticText(self, -1,
"Set up trades",
gui_position.TradePage.trade_page_text
)
self.ticker_list = []
self.relevant_portfolios_list = []
self.relevant_portfolio_name_list = []
self.sale_tuple_list = []
self.default_rows_above_buy_candidates = 5
self.default_buy_candidate_column = 7
self.default_buy_candidate_color = "#FAEFCF"
self.default_not_entered_buy_candidate_color = "#FBF7EC"
self.default_buy_candidate_quantity_column = 14
self.default_buy_candidate_quantity_color = "#CFE8FC"
self.default_not_entered_buy_candidate_quantity_color = "#E9F4FD"
self.default_account_dropdown_column = 17
self.buy_candidates = [] # this will be tickers to buy, but no quantities
self.buy_candidate_tuples = [] # this will be tickers ROWS with quantities, if they don't appear in previous list, there will be disregarded, because data has been changed.
self.default_columns = 19
self.default_min_rows = 17
# no longer used
#import_sale_candidates_button = wx.Button(self, label="Import sale candidates and refresh spreadsheet", pos=(0,30), size=(-1,-1))
#import_sale_candidates_button.Bind(wx.EVT_BUTTON, self.importSaleCandidates, import_sale_candidates_button)
self.create_grid_button = wx.Button(self, label="Create grid", pos=gui_position.TradePage.create_grid_button, size=(-1,-1))
self.create_grid_button.Bind(wx.EVT_BUTTON, self.makeGridOnButtonPush, self.create_grid_button)
self.create_grid_button.Hide()
self.clear_grid_button = wx.Button(self, label="Clear", pos=gui_position.TradePage.clear_grid_button, size=(-1,-1))
self.clear_grid_button.Bind(wx.EVT_BUTTON, self.clearGrid, self.clear_grid_button)
self.save_grid_button = wx.Button(self, label="Print", pos=gui_position.TradePage.save_grid_button, size=(-1,-1))
self.save_grid_button.Bind(wx.EVT_BUTTON, self.saveGridAs, self.save_grid_button)
self.grid_list = []
self.update_stocks_button = wx.Button(self, label="Update stocks with errors.", pos=gui_position.TradePage.update_stocks_button, size=(-1,-1))
self.update_stocks_button.Bind(wx.EVT_BUTTON, self.updateStocksWithErrors, self.update_stocks_button)
self.update_stocks_button.Hide()
self.stocks_to_update = []
self.number_of_tickers_to_scrape = 0
self.stock_update_pending_text = wx.StaticText(self, -1,
"Currently Updating Stocks",
gui_position.TradePage.stock_update_pending_text
)
self.stock_update_pending_text.Hide()
self.execute_trades_button = wx.Button(self,
label="Execute trades",
pos = gui_position.TradePage.execute_trades_button,
size = (-1,-1),
)
self.execute_trades_button.Bind(wx.EVT_BUTTON, self.confirmExecuteTrades, self.execute_trades_button)
self.grid = None
self.makeGridOnButtonPush("event")
logging.info("TradePage loaded")
def confirmExecuteTrades(self, event):
confirm = wx.MessageDialog(None,
"Do you want to execute your currently set trades?",
'Confirm Trades',
style = wx.YES_NO
)
confirm.SetYesNoLabels(("&Execute"), ("&Cancel"))
yesNoAnswer = confirm.ShowModal()
confirm.Destroy()
if yesNoAnswer == wx.ID_YES:
self.executeCurrentlyScheduledTrades()
def executeCurrentlyScheduledTrades(self):
logging.info("sale_tuple_list: {}".format(self.sale_tuple_list))
logging.info("SALE_PREP_PORTFOLIOS_AND_SALE_CANDIDATES_TUPLE: {}".format(config.SALE_PREP_PORTFOLIOS_AND_SALE_CANDIDATES_TUPLE))
logging.info("buy_candidate_tuples: {}".format(self.buy_candidate_tuples))
logging.info("buy_candidates: {}".format(self.buy_candidates))
full_execute_buy_list = []
for this_tuple in self.buy_candidate_tuples:
quantity = this_tuple[1]
ticker = this_tuple[2]
execute_buy_tuple = self.executeTradeDialog(ticker=ticker, number_of_shares=quantity)
if not execute_buy_tuple:
return # cancel entire event if one window is cancelled
full_execute_buy_list.append(execute_buy_tuple)
logging.info(full_execute_buy_list)
self.executeTradeFinal(self.sale_tuple_list, full_execute_buy_list)
def executeTradeFinal(self, sell_tuple_list, buy_tuple_list):
accounts_to_be_saved_list = []
for sell_candidate in sell_tuple_list:
ticker = sell_candidate[0]
quantity = sell_candidate[1]
account_obj = sell_candidate[2]
previous_shares = account_obj.stock_shares_dict.get(ticker)
if previous_shares is not None:
left_over_shares = float(previous_shares) - float(quantity)
else:
logging.error("ERROR: something went quite wrong here.")
return
if left_over_shares < 0:
logging.error("ERROR: you cannot sell more shares than you own.")
return
elif not left_over_shares: # could be 0, 0., or None
account_obj.stock_shares_dict.pop(ticker, None)
account_obj.cost_basis_dict.pop(ticker, None)
else: # if there are left over stocks
account_obj.stock_shares_dict[ticker] = float(left_over_shares)
accounts_to_be_saved_list.append(account_obj)
for buy_candidate in buy_tuple_list:
ticker = buy_candidate[0]
quantity = buy_candidate[1]
account_obj = buy_candidate[2]
cost_basis = buy_candidate[3]
previous_shares = account_obj.stock_shares_dict.get(ticker)
if not previous_shares:
previous_shares = 0.
total_shares = float(previous_shares) + float(quantity)
account_obj.stock_shares_dict[ticker] = total_shares
if cost_basis:
old_cost_basis = account_obj.cost_basis_dict.get(ticker)
if old_cost_basis or previous_shares:
logging.info("I'm not comfortable calculating your new cost basis for tax purposes. Please re-enter it on your account page.")
account_obj.cost_basis_dict[ticker] = None
else:
account_obj.cost_basis_dict[ticker] = float(cost_basis)
accounts_to_be_saved_list.append(account_obj)
save_list = utils.remove_list_duplicates(accounts_to_be_saved_list)
for account in save_list:
db.save_portfolio_object(account)
logging.info("TRADE EXECUTED")
self.clearGrid("event")
sale_prep_page = config.GLOBAL_PAGES_DICT.get(config.SALE_PREP_PAGE_UNIQUE_ID).obj
sale_prep_page.resetPage()
utils.update_all_dynamic_grids()
def executeTradeDialog(self, ticker, number_of_shares, preset_account_choice=None, error_account=None, preset_cost_basis=None, error_cost_basis=None):
trade_dialog = StockBuyDialog(ticker, number_of_shares, preset_account_choice, error_account, preset_cost_basis, error_cost_basis)
confirm = trade_dialog.ShowModal()
portfolio = trade_dialog.portfolio_dropdown.GetValue()
cost_basis = trade_dialog.cost_basis.GetValue()
trade_dialog.Destroy()
if confirm != wx.ID_OK:
return
if not portfolio:
logging.warning("invalid portfolio choice")
self.executeTradeDialog(ticker=ticker, number_of_shares=number_of_shares, error_account = "You much choose a portfolio for this stock purchase", preset_cost_basis=cost_basis)
float_cost_basis = utils.money_text_to_float(cost_basis)
if cost_basis and (float_cost_basis is None): # it may be 0, but it shouldn't have a valid cost basis entry and then return None
logging.warning("invalid cost basis")
self.executeTradeDialog(ticker=ticker, number_of_shares=number_of_shares, preset_account_choice=portfolio, error_cost_basis = "You entered an invalid cost basis")
portfolio_obj = utils.return_account_by_name(portfolio)
if not portfolio_obj:
logging.warning("Something went wrong with grabbing the portfolio {portfolio} here.".format(portfolio=portfolio))
logging.info("{} {} {} {}".format(ticker, number_of_shares, portfolio_obj, float_cost_basis))
return (ticker, number_of_shares, portfolio_obj, float_cost_basis)
def updateStocksWithErrors(self, event):
# start by getting errors from grid
total_grid_rows = self.grid.GetNumberRows()
default_row_gap_before_stocks = 6
ticker_column = 0
volume_column = 2
for row in range(default_row_gap_before_stocks, total_grid_rows):
ticker_cell_text = self.grid.GetCellValue(row, ticker_column)
if not ticker_cell_text:
continue
logging.warning(ticker_cell_text)
volume_cell_text = self.grid.GetCellValue(row, volume_column)
logging.warning(volume_cell_text)
if volume_cell_text == "Update Volume":
stock = utils.return_stock_by_symbol(ticker_cell_text)
if not stock.ticker in self.stocks_to_update:
self.stocks_to_update.append(stock.ticker)
utils.remove_list_duplicates(self.stocks_to_update)
logging.warning(self.stocks_to_update)
if not self.stocks_to_update:
self.update_stocks_button.Hide()
return
if len(self.stocks_to_update) > config.SCRAPE_CHUNK_LENGTH:
logging.info("You should be able to remove this prompt by using newer scraping functions here.")
error_message = wx.MessageDialog(None,
"The number of stocks to scrape is too large. Please use the Scrape tab to perform a full scrape.",
'Scrape Too Large',
style = wx.ICON_EXCLAMATION
)
error_message.ShowModal()
confirm.Destroy()
return
self.update_stocks_button.Hide()
self.stock_update_pending_text.Show()
for stock.ticker in self.stocks_to_update:
time.sleep(5)
update = scrape.bloomberg_us_stock_quote_scrape(stock.ticker)
utils.update_all_dynamic_grids()
def clearGrid(self, event):
self.relevant_portfolios_list = []
self.sale_tuple_list = []
self.ticker_list = []
self.buy_candidates = []
self.buy_candidate_tuples = []
self.stocks_to_update = []
self.newGridFill()
sale_prep_page = config.GLOBAL_PAGES_DICT.get(config.SALE_PREP_PAGE_UNIQUE_ID).obj
sale_prep_page.saved_text.Hide()
sale_prep_page.save_button.Show()
def importSaleCandidates(self, event):
logging.info("Boom goes the boom!!!!!!!!") # My favorite logging text, please don't remove :(
self.relevant_portfolios_list = config.SALE_PREP_PORTFOLIOS_AND_SALE_CANDIDATES_TUPLE[0]
self.sale_tuple_list = config.SALE_PREP_PORTFOLIOS_AND_SALE_CANDIDATES_TUPLE[1]
for portfolio in self.relevant_portfolios_list:
id_number = portfolio.id_number
self.makeGridOnButtonPush("event")
# Now, how to refresh only parts of the list... hmmmm
def makeGridOnButtonPush(self, event):
self.newGridFill()
def updateGrid(self, event, grid = None, cursor_positon = None):
# this function currently creates new grids on top of each other.
# why?
# when i tried to update the previous grid using the data (run self.spreadSheetFill on the last line).
# this caused a Segmentation fault: 11
# thus, this hack... create a new grid on execution each time.
# it will tell you how many grids have loaded, but it shouldn't affect performance (i think).
if not grid:
logging.info("no grid")
grid = self.grid
else:
logging.info(grid)
if not cursor_positon:
row = int(event.GetRow())
column = int(event.GetCol())
cursor_positon = (row, column)
else:
row = int(cursor_positon[0])
column = int(cursor_positon[1])
if int(column) == self.default_account_dropdown_column:
# ignore this input
return
buy_candidates_len = len(self.buy_candidates)
logging.info(buy_candidates_len)
logging.info(buy_candidates_len + 1 + self.default_rows_above_buy_candidates + 1)
self.buy_candidates = []
self.buy_candidate_tuples = []
for row in (range(buy_candidates_len + 1 + self.default_rows_above_buy_candidates + 1)):
if row > self.default_rows_above_buy_candidates and grid.GetCellBackgroundColour(row, self.default_buy_candidate_column) in [self.default_buy_candidate_color, self.default_not_entered_buy_candidate_color]:
ticker = grid.GetCellValue(row, self.default_buy_candidate_column)
if ticker:
stock = utils.return_stock_by_symbol(ticker)
if stock:
self.buy_candidates.append(str(stock.symbol))
quantity = grid.GetCellValue(row, self.default_buy_candidate_quantity_column)
if quantity:
if str(quantity).isdigit():
quantity = int(quantity)
ticker_row = row
self.buy_candidate_tuples.append((ticker_row, quantity, str(ticker), stock))
else:
logging.info("{} doesn't seem to exist".format(ticker))
self.grid.SetCellValue(row, column, ticker)
self.grid.SetCellAlignment(row, column, horiz = wx.ALIGN_RIGHT, vert = wx.ALIGN_BOTTOM)
self.grid.SetCellValue(row, column + 1, "Error")
self.grid.SetCellAlignment(row, column + 1, horiz = wx.ALIGN_CENTER, vert = wx.ALIGN_BOTTOM)
self.grid.SetCellTextColour(row, column + 1, config.NEGATIVE_SPREADSHEET_VALUE_COLOR_HEX)
return
logging.info(self.buy_candidates)
# build new grid
self.newGridFill(cursor_positon = cursor_positon)
def newGridFill(self, cursor_positon = (0,0)):
size = gui_position.TradePage.newGridFill_size
width_adjust = gui_position.TradePage.width_adjust
height_adjust = gui_position.TradePage.height_adjust
try:
width, height = wx.Window.GetClientSize(config.GLOBAL_PAGES_DICT.get(config.MAIN_FRAME_UNIQUE_ID))
size = (width-gui_position.TradePage.width_adjust, height-gui_position.TradePage.height_adjust) # find the difference between the Frame and the grid size
except:
pass
# CREATE A GRID HERE
new_grid = TradeGrid(self, -1, size=size, pos=gui_position.TradePage.new_grid_position)
# calc rows
self.relevant_portfolio_name_list = []
try:
# set initial rows, buy candidate rows checked below
for account in self.relevant_portfolios_list:
id_number = account.id_number
self.relevant_portfolio_name_list.append(account.name)
logging.info("relevant_portfolio_name_list: {}".format(self.relevant_portfolio_name_list))
num_rows = len(self.sale_tuple_list)
num_rows += config.DEFAULT_ROWS_ON_TRADE_PREP_PAGE_FOR_TICKERS
except Exception as exception:
logging.error(exception)
logging.info("Error in loading trade grid, num_rows will be reset to zero.")
num_rows = 0
num_rows = max(num_rows, self.default_min_rows, (self.default_rows_above_buy_candidates + len(self.buy_candidates) + 2))
new_grid.CreateGrid(num_rows, self.default_columns)
new_grid.Bind(wx.grid.EVT_GRID_CELL_CHANGED, lambda event: self.updateGrid(event, grid = new_grid), new_grid)
#You need this code to resize
self.sizer = None
self.inner_sizer = None
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.inner_sizer = wx.BoxSizer(wx.VERTICAL)
self.inner_sizer.AddSpacer(gui_position.TradePage.newGridFill_AddSpacer)
self.inner_sizer.Add(new_grid, 1, wx.ALL|wx.EXPAND)
self.SetSizer(self.inner_sizer)
self.sizer.Add(self, 1, wx.EXPAND|wx.ALL)
##
for column_num in range(self.default_columns):
for row_num in range(num_rows):
if row_num <= self.default_rows_above_buy_candidates or row_num > (self.default_rows_above_buy_candidates + len(self.buy_candidates) + 1) or column_num not in [self.default_buy_candidate_column,self.default_buy_candidate_quantity_column]:
new_grid.SetReadOnly(row_num, column_num, True)
elif column_num == self.default_buy_candidate_column:
if int(row_num) == int(self.default_rows_above_buy_candidates + len(self.buy_candidates) + 1):
new_grid.SetCellBackgroundColour(row_num, column_num, self.default_not_entered_buy_candidate_color)
else:
new_grid.SetCellBackgroundColour(row_num, column_num, self.default_buy_candidate_color)
elif column_num == self.default_buy_candidate_quantity_column:
if int(row_num) == int(self.default_rows_above_buy_candidates + len(self.buy_candidates) + 1):
new_grid.SetCellBackgroundColour(row_num, column_num, self.default_not_entered_buy_candidate_quantity_color)
else:
new_grid.SetCellBackgroundColour(row_num, column_num, self.default_buy_candidate_quantity_color)
# Defining cells separately from forming them so they are easy to edit
# e.g. dummy_cell_var = [Row, Column, "Name/Value"]
# cell_list = [dummy_cell_var, dummy_cell_2, etc...]
# for cell in cell_list:
# new_grid.SetCellValue(cell[0],cell[1],cell[2])
spreadsheet_cell_list = []
# Start with static values (large if statement for code folding purposes only)
if "This section sets the static cell values by column" == "This section sets the static cell values by column":
# Column 0 (using zero-based numbering for simplicity):
this_column_number = 0
title_with_relevant_portfolios_string = "Trade Prep (" + ",\n".join(self.relevant_portfolio_name_list) + ")"
name_of_spreadsheet_cell = SpreadsheetCell(row = 0, col = this_column_number, text=title_with_relevant_portfolios_string)
share_to_sell_cell = SpreadsheetCell(row = 2, col = this_column_number, text = "Sell:")
ticker_title_cell = SpreadsheetCell(row = 3, col = this_column_number, text = "Ticker")
spreadsheet_cell_list.append(name_of_spreadsheet_cell)
spreadsheet_cell_list.append(share_to_sell_cell)
spreadsheet_cell_list.append(ticker_title_cell)
# Column 1:
this_column_number = 1
num_shares_cell = SpreadsheetCell(row = 3, col = this_column_number, text = "# shares")
spreadsheet_cell_list.append(num_shares_cell)
# Column 2:
this_column_number = 2
volume_title_cell = SpreadsheetCell(row = 3, col = this_column_number, text = "Volume")
spreadsheet_cell_list.append(volume_title_cell)
# Column 3:
# empty
# Column 4:
this_column_number = 4
total_asset_cell = SpreadsheetCell( row = 0, col = this_column_number, text = "Total asset value =")
approximate_surplus_cash_cell = SpreadsheetCell(row = 3, col = this_column_number, text = "Approximate surplus cash from sale =")
percent_total_cash_cell = SpreadsheetCell( row = 6, col = this_column_number, text = "%" + " Total Cash After Sale")
portfolio_cash_available_cell = SpreadsheetCell(row = 9, col = this_column_number, text = "Portfolio Cash Available =")
num_stocks_to_look_cell = SpreadsheetCell( row = 12, col = this_column_number, text = "# of stocks to look at for 3%" + " of portfolio each.")
approximate_to_spend_cell = SpreadsheetCell( row = 15, col = this_column_number, text = "Approximate to spend on each (3%) stock.")
spreadsheet_cell_list.append(total_asset_cell)
spreadsheet_cell_list.append(approximate_surplus_cash_cell)
spreadsheet_cell_list.append(percent_total_cash_cell)
spreadsheet_cell_list.append(portfolio_cash_available_cell)
spreadsheet_cell_list.append(num_stocks_to_look_cell)
spreadsheet_cell_list.append(approximate_to_spend_cell)
# Column 5:
# empty
# Column 6:
this_column_number = 6
num_symbol_cell = SpreadsheetCell(row = 5, col = this_column_number, text = "#")
spreadsheet_cell_list.append(num_symbol_cell)
# Column 7:
this_column_number = 7
shares_to_buy_cell = SpreadsheetCell(row = 2, col = this_column_number, text = "To buy:")
input_ticker_cell = SpreadsheetCell(row = 3, col = this_column_number, text = "Input ticker")
spreadsheet_cell_list.append(shares_to_buy_cell)
spreadsheet_cell_list.append(input_ticker_cell)
# Column 8:
# empty
# Column 9:
# empty
# Column 10:
this_column_number = 10
num_shares_to_buy_cell = SpreadsheetCell(row = 2, col = this_column_number, text = "# shares for")
three_percent_cell = SpreadsheetCell(row = 3, col = this_column_number, text = "3%")
spreadsheet_cell_list.append(num_shares_to_buy_cell)
spreadsheet_cell_list.append(three_percent_cell)
# Column 11:
this_column_number = 11
for_cell = SpreadsheetCell(row = 2, col = this_column_number, text = "for")
five_percent_cell = SpreadsheetCell(row = 3, col = this_column_number, text = "5%")
spreadsheet_cell_list.append(for_cell)
spreadsheet_cell_list.append(five_percent_cell)
# Column 12:
this_column_number = 12
for_cell_2 = SpreadsheetCell(row = 2, col = this_column_number, text = "for")
ten_percent_cell = SpreadsheetCell(row = 3, col = this_column_number, text = "10%")
spreadsheet_cell_list.append(for_cell_2)
spreadsheet_cell_list.append(ten_percent_cell)
# Column 13:
# empty
# Column 14:
this_column_number = 14
input_num_shares_cell = SpreadsheetCell(row = 2, col = this_column_number, text = "Input # Shares")
input_num_shares_cell_2 = SpreadsheetCell(row = 3, col = this_column_number, text = "to Purchase")
spreadsheet_cell_list.append(input_num_shares_cell)
spreadsheet_cell_list.append(input_num_shares_cell_2)
# Column 15:
this_column_number = 15
cost_cell = SpreadsheetCell(row = 3, col = this_column_number, text = "Cost")
spreadsheet_cell_list.append(cost_cell)
# Column 16:
# empty
# Column 17:
this_column_number = 17
total_cost_cell = SpreadsheetCell(row = 1, col = this_column_number, text = "Total Cost =")
adjusted_cash_cell = SpreadsheetCell(row = 2, col = this_column_number, text = "Adjusted Cash Available =")
num_stocks_to_purchase_cell = SpreadsheetCell(row = 3, col = this_column_number, text = "# Stocks left to purchase at 3%")
new_cash_percentage_cell = SpreadsheetCell(row = 4, col = this_column_number, text = "New cash %" + " of portfolio =")
# this cell may be irrelevant
# new_cash_total_cell = [5, this_column_number, "New cash %% of total ="]
spreadsheet_cell_list.append(total_cost_cell)
spreadsheet_cell_list.append(adjusted_cash_cell)
spreadsheet_cell_list.append(num_stocks_to_purchase_cell)
spreadsheet_cell_list.append(new_cash_percentage_cell)
# spreadsheet_cell_list.append(new_cash_total_cell)
# Column 18:
# computed values only
if "This section sets the variable cell values with relevant data" == "This section sets the variable cell values with relevant data":
# Column 0-2 data:
this_column_number = 0
default_rows = config.DEFAULT_ROWS_ON_TRADE_PREP_PAGE_FOR_TICKERS
counter = 0
for stock_tuple in self.sale_tuple_list:
ticker = stock_tuple[0]
number_of_shares_to_sell = stock_tuple[1]
stock = utils.return_stock_by_symbol(ticker)
correct_row = counter + default_rows
ticker_cell = SpreadsheetCell(row = correct_row, col = this_column_number, text = ticker)
number_of_shares_to_sell_cell = SpreadsheetCell(row = correct_row, col = this_column_number+1, text = str(number_of_shares_to_sell), value = number_of_shares_to_sell, align_right = True)
spreadsheet_cell_list.append(ticker_cell)
spreadsheet_cell_list.append(number_of_shares_to_sell_cell)
try:
avg_daily_volume = utils.return_daily_volume_if_possible(stock)
if not avg_daily_volume:
avg_daily_volume = "Update Volume"
color = config.NEGATIVE_SPREADSHEET_VALUE_COLOR_HEX
self.stocks_to_update.append(stock.ticker)
self.update_stocks_button.Show()
else:
color = None
volume_cell = SpreadsheetCell(row = correct_row, col = this_column_number + 2, text = str(avg_daily_volume), value = avg_daily_volume, align_right = True, text_color = color)
spreadsheet_cell_list.append(volume_cell)
except Exception as exception:
logging.error(exception)
counter += 1
# Column 4 data:
this_column_number = 4
## total asset value
total_asset_value_row = 1
total_asset_value = 0.00
for account in self.relevant_portfolios_list:
total_asset_value += float(str(account.available_cash).replace("$",""))
for ticker, quantity in account.stock_shares_dict.items():
stock = utils.return_stock_by_symbol(ticker)
quantity = float(str(quantity).replace(",",""))
last_price = utils.return_last_price_if_possible(stock)
try:
last_price = float(last_price)
value_of_held_stock = last_price * quantity
total_asset_value += value_of_held_stock
except Exception as e:
logging.error(e)
logging.info("No last price: {} {}".format(last_price, type(last_price)))
try:
logging.info("quantity {} {}".format(quantity, type(quantity)))
except:
logging.info("no quantity!")
try:
logging.info("value_of_held_stock: {} {}".format(value_of_held_stock, type(value_of_held_stock)))
except:
logging.info("No value_of_held_stock available")
logging.info("total_asset_value: {} {}".format(total_asset_value, type(total_asset_value)))
total_asset_value_cell = SpreadsheetCell(row = total_asset_value_row, col = this_column_number, text = config.locale.currency(total_asset_value, grouping = True), value = total_asset_value, align_right = True)
spreadsheet_cell_list.append(total_asset_value_cell)
## approximate surplus cash
approximate_surplus_cash_row = 4
value_of_all_stock_to_sell = 0.00
for stock_tuple in self.sale_tuple_list:
ticker = stock_tuple[0]
number_of_shares_to_sell = int(stock_tuple[1])
stock = utils.return_stock_by_symbol(ticker)
last_price = utils.return_last_price_if_possible(stock)
try:
value_of_single_stock_to_sell = float(last_price) * float(number_of_shares_to_sell)
value_of_all_stock_to_sell += value_of_single_stock_to_sell
except Exception as e:
logging.error(e)
logging.info("No last price: {} {}".format(last_price, type(last_price)))
logging.info("number_of_shares_to_sell: {} {}".format(number_of_shares_to_sell, type(number_of_shares_to_sell)))
logging.info("value_of_all_stock_to_sell: {} {}".format(value_of_all_stock_to_sell, type(value_of_all_stock_to_sell)))
surplus_cash_cell = SpreadsheetCell(row = approximate_surplus_cash_row, col = this_column_number, text = config.locale.currency(value_of_all_stock_to_sell, grouping = True), value = value_of_all_stock_to_sell, align_right = True)
spreadsheet_cell_list.append(surplus_cash_cell)
## percent of portfolio that is cash after sale
percent_cash_row = 7
total_cash = 0.00
for account in self.relevant_portfolios_list:
total_cash += float(str(account.available_cash).replace("$",""))
total_cash += value_of_all_stock_to_sell
if total_cash != 0 and total_asset_value != 0:
percent_cash = total_cash / total_asset_value
percent_cash = round(percent_cash * 100)
percent_cash = str(percent_cash) + "%"
else:
percent_cash = "Null"
percent_cash_cell = SpreadsheetCell(row = percent_cash_row, col = this_column_number, text = percent_cash, align_right = True)
spreadsheet_cell_list.append(percent_cash_cell)
## portfolio cash available after sale
cash_after_sale_row = 10
total_cash_after_sale = total_cash # from above
total_cash_after_sale_cell = SpreadsheetCell(row = cash_after_sale_row, col = this_column_number, text = config.locale.currency(total_cash_after_sale, grouping = True), value = total_cash_after_sale, align_right = True)
spreadsheet_cell_list.append(total_cash_after_sale_cell)
## Number of stocks to purchase at 3% each
stocks_at_three_percent_row = 13
three_percent_of_all_assets = 0.03 * total_asset_value # from above
if three_percent_of_all_assets == 0: # don't divide by zero
number_of_stocks_at_3_percent = 0
else:
number_of_stocks_at_3_percent = total_cash / three_percent_of_all_assets # total_cash defined above
number_of_stocks_at_3_percent = int(math.floor(number_of_stocks_at_3_percent)) # always round down
stocks_at_three_percent_cell = SpreadsheetCell(row = stocks_at_three_percent_row, col = this_column_number, text = str(number_of_stocks_at_3_percent), value = number_of_stocks_at_3_percent, align_right = True)
spreadsheet_cell_list.append(stocks_at_three_percent_cell)
## Approximate to spend on each stock at 3%
three_percent_of_all_assets_row = 16
three_percent_of_all_assets_cell = SpreadsheetCell(row = three_percent_of_all_assets_row, col = this_column_number, text = config.locale.currency(three_percent_of_all_assets, grouping = True), value = three_percent_of_all_assets, align_right = True)
spreadsheet_cell_list.append(three_percent_of_all_assets_cell)
# Now, add buy candidate tickers:
count = 0
# This is the a very similar iteration as above
for column_num in range(self.default_columns):
for row_num in range(num_rows):
# iterate over all cells
if row_num <= self.default_rows_above_buy_candidates or row_num > (self.default_rows_above_buy_candidates + len(self.buy_candidates) + 1) or column_num not in [self.default_buy_candidate_column, self.default_buy_candidate_quantity_column]:
# basically skip all irrelevant rows for these purposes
pass
elif column_num == self.default_buy_candidate_column:
if count in range(len(self.buy_candidates)):
this_ticker = self.buy_candidates[count]
company_to_buy_number = SpreadsheetCell(row = row_num, col = column_num - 1, text = str(count + 1))
company_to_buy_ticker = SpreadsheetCell(row = row_num, col = column_num, text = this_ticker)
spreadsheet_cell_list.append(company_to_buy_number)
spreadsheet_cell_list.append(company_to_buy_ticker)
stock = utils.return_stock_by_symbol(this_ticker)
if stock:
company_to_buy_firm_name = SpreadsheetCell(row = row_num, col = column_num + 1, text = str(stock.firm_name))
spreadsheet_cell_list.append(company_to_buy_firm_name)
## This was a good idea... but the gridcellchoiceeditor was really unweildy, so a made a pop up class on execute click instead.
# portfolio_dropdown = wx.grid.GridCellChoiceEditor(["test1", "test2"], allowOthers=True)
# new_grid.SetCellEditor(row_num, self.default_account_dropdown_column, portfolio_dropdown)
# new_grid.SetCellBackgroundColour(row_num, self.default_account_dropdown_column, self.default_buy_candidate_quantity_color)
# logging.info('HERE!')
try:
last_price = float(utils.return_last_price_if_possible(stock))
company_to_buy_price = SpreadsheetCell(row = row_num, col = column_num + 2, text = config.locale.currency(last_price, grouping = True), value = last_price, align_right = True)
spreadsheet_cell_list.append(company_to_buy_price)
update_error = False
except Exception as e:
logging.error(e)
company_to_buy_price_error = SpreadsheetCell( row = row_num, col = column_num + 2, text = "(Error: Update %s)" % stock.symbol, text_color = config.NEGATIVE_SPREADSHEET_VALUE_COLOR_HEX)
spreadsheet_cell_list.append(company_to_buy_price_error)
update_error = True
self.stocks_to_update.append(stock.symbol)
utils.remove_list_duplicates(self.stocks_to_update)
logging.info("{} {} {} {}".format(self.stocks_to_update, "Row:", row_num, "Column:", column_num))
self.update_stocks_button.Show()
column_shift = 3
for percent in [0.03, 0.05, 0.10]:
color = None
if not update_error:
# total_asset_value calculated above
max_cost = total_asset_value * percent
number_of_shares_to_buy = int(math.floor(max_cost / last_price))
else:
number_of_shares_to_buy = "-"
color = config.NEGATIVE_SPREADSHEET_VALUE_COLOR_HEX
number_of_shares_to_buy_cell = SpreadsheetCell(row = row_num, col = column_num + column_shift, text = str(number_of_shares_to_buy), value = number_of_shares_to_buy, align_right = True, text_color = color)
spreadsheet_cell_list.append(number_of_shares_to_buy_cell)
column_shift += 1
for this_tuple in self.buy_candidate_tuples:
if this_tuple[0] == row_num:
quantity = this_tuple[1]
buy_candidate_quantity_cell = SpreadsheetCell(row = row_num, col = self.default_buy_candidate_quantity_column, text = str(quantity), value = quantity, align_right = False) # align right looks bad here, though it is an int
spreadsheet_cell_list.append(buy_candidate_quantity_cell)
count += 1
# now calculate final values
buy_cost_column = 15
total_buy_cost = 0.00
for row_num in range(num_rows):
if row_num > self.default_rows_above_buy_candidates:
quantity_cell = [cell for cell in spreadsheet_cell_list if (cell.row == row_num and cell.col == (buy_cost_column - 1))]
if quantity_cell:
if len(quantity_cell) > 1:
logging.error("Error: too many cells returned for cell list")
return
quantity = quantity_cell[0].text
else:
quantity = None
if quantity is not None:
ticker_cell = [cell for cell in spreadsheet_cell_list if (cell.row == row_num and cell.col == (buy_cost_column - 8))]
if ticker_cell:
if len(ticker_cell) > 1:
logging.error("Error: too many cells returned for cell list")
return
ticker = ticker_cell[0].text
else:
ticker = None
logging.info(ticker)
stock = utils.return_stock_by_symbol(ticker)
if stock:
quantity = int(quantity)
cost = float(utils.return_last_price_if_possible(stock)) * quantity
total_buy_cost += cost
cost_cell = SpreadsheetCell(row = row_num, col = buy_cost_column, text = config.locale.currency(cost, grouping = True), value = cost, align_right = True)
spreadsheet_cell_list.append(cost_cell)
# column 18 (final column)
this_column_number = 18
total_cost_row = 1
total_cost_sum_cell = SpreadsheetCell(row = total_cost_row, col = this_column_number, text = config.locale.currency(total_buy_cost, grouping = True), value = total_buy_cost, align_right = True)
spreadsheet_cell_list.append(total_cost_sum_cell)
adjusted_cash_row = 2
adjusted_cash_result = total_cash - total_buy_cost
if adjusted_cash_result >= 0:
color = "black"
else:
color = config.NEGATIVE_SPREADSHEET_VALUE_COLOR_HEX
adjusted_cash_result_cell = SpreadsheetCell(row = adjusted_cash_row, col = this_column_number, text = config.locale.currency(adjusted_cash_result, grouping = True), value = adjusted_cash_result, align_right = True, text_color = color)
spreadsheet_cell_list.append(adjusted_cash_result_cell)
number_of_stocks_still_yet_to_buy_row = 3
if total_asset_value > 0:
number_of_stocks_still_yet_to_buy = int(math.floor(adjusted_cash_result / (total_asset_value * 0.03)))
else:
number_of_stocks_still_yet_to_buy = 0
number_of_stocks_still_yet_to_buy_cell = SpreadsheetCell(row = number_of_stocks_still_yet_to_buy_row, col = this_column_number, text = str(number_of_stocks_still_yet_to_buy), value = number_of_stocks_still_yet_to_buy, align_right = True, text_color = color)
spreadsheet_cell_list.append(number_of_stocks_still_yet_to_buy_cell)
new_percent_cash_row = 4
if adjusted_cash_result != 0 and total_asset_value != 0:
new_percent_cash = round(100 * adjusted_cash_result / total_asset_value)
else:
new_percent_cash = 0
new_percent_cash_cell = SpreadsheetCell(row = new_percent_cash_row, col = this_column_number, text = "%d" % int(new_percent_cash) + "%", align_right = True, text_color = color)
spreadsheet_cell_list.append(new_percent_cash_cell)
# Finally, set cell values in list:
for cell in spreadsheet_cell_list:
new_grid.SetCellValue(cell.row, cell.col, cell.text)
if cell.align_right:
new_grid.SetCellAlignment(cell.row, cell.col, horiz = wx.ALIGN_RIGHT, vert = wx.ALIGN_BOTTOM)
if cell.text_color:
new_grid.SetCellTextColour(cell.row, cell.col, cell.text_color)
new_grid.AutoSizeColumns()
new_grid.SetGridCursor(cursor_positon[0] + 1, cursor_positon[1])
for grid in self.grid_list:
grid.Hide()
self.grid_list.append(new_grid)
if len(self.grid_list) > 1:
logging.info("number of grids created = {}".format(len(self.grid_list)))
new_grid.SetFocus()
self.grid = new_grid
def saveGridAs(self, event, title = "Trade_Prep"):
utils.save_grid_as(wx_window = self, wx_grid = self.grid, title=title)
class UserFunctionsMetaPage(Tab):
def __init__(self, parent):
self.title = "Edit Functions"
self.uid = config.USER_FUNCTIONS_PAGE_UNIQUE_ID
wx.Panel.__init__(self, parent)
####
user_function_page_panel = wx.Panel(self, -1, pos=(0,5), size=( wx.EXPAND, wx.EXPAND))
user_function_notebook = wx.Notebook(user_function_page_panel)
for function_page_dict in functions_config.user_created_function_ref_dict_list:
function_page_obj = FunctionPage(
title = function_page_dict.get("title"),
uid_config_reference = function_page_dict.get("uid_config_reference"),
general_text = function_page_dict.get("general_text"),
additional_text = function_page_dict.get("additional_text"),
save_button_text = function_page_dict.get("save_button_text"),
reset_button_text = function_page_dict.get("reset_button_text"),
function_that_loads_text_of_user_created_functions = function_page_dict.get("function_that_loads_text_of_user_created_functions"),
save_function = function_page_dict.get("save_function"),
function_to_load_defaults = function_page_dict.get("function_to_load_defaults"),
)
new_functions_page = UserFunctionsPage(user_function_notebook, function_page_obj)
user_function_notebook.AddPage(new_functions_page, function_page_obj.title)
sizer2 = wx.BoxSizer()
sizer2.Add(user_function_notebook, 1, wx.EXPAND)
self.SetSizer(sizer2)
####
class UserFunctionsPage(Tab):
def __init__(self, parent, function_page_obj):
self.function_page_obj = function_page_obj
self.title = self.function_page_obj.title
self.uid = self.function_page_obj.uid
self.parent = parent
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.AddSpacer(gui_position.UserFunctionsPage.AddSpacer)
wx.Panel.__init__(self, parent)
self.general_text = wx.StaticText(self, -1, self.function_page_obj.general_text, gui_position.UserFunctionsPage.general_text)
self.additional_text = wx.StaticText(self, -1, self.function_page_obj.additional_text, (gui_position.UserFunctionsPage.additional_text))
self.save_button = wx.Button(self, label=self.function_page_obj.save_button_text, pos=gui_position.UserFunctionsPage.save_button, size=(-1,-1))
self.save_button.Bind(wx.EVT_BUTTON, self.confirmSave, self.save_button)
self.reset_button = wx.Button(self, label=self.function_page_obj.reset_button_text, pos=gui_position.UserFunctionsPage.reset_button, size=(-1,-1))
self.reset_button.Bind(wx.EVT_BUTTON, self.confirmResetToDefault, self.reset_button)
self.function_file_text = self.function_page_obj.function_that_loads_text_of_user_created_functions()
self.file_display = wx.TextCtrl(self, -1,
self.function_file_text,
gui_position.UserFunctionsPage.file_display_position,
size = gui_position.UserFunctionsPage.file_display_size,
style = wx.TE_MULTILINE ,
)
self.sizer.Add(self.file_display, 1, wx.ALL|wx.EXPAND)
self.file_display.Show()
self.SetSizer(self.sizer)
logging.info("%s loaded" % self.function_page_obj.title)
def confirmSave(self, event):
confirm = wx.MessageDialog(None,
"Are you sure you want to save your work? This action cannot be undone.",
'Confirm Save',
style = wx.YES_NO
)
confirm.SetYesNoLabels(("&Save"), ("&Cancel"))
yesNoAnswer = confirm.ShowModal()
confirm.Destroy()
if yesNoAnswer == wx.ID_YES:
self.saveFunctionsFile()
def saveFunctionsFile(self, text = None):
if text:
text_to_save = text
else:
text_to_save = self.file_display.GetValue()
self.function_page_obj.save_function(text_to_save)
def confirmResetToDefault(self, event):
confirm = wx.MessageDialog(None,
"Are you sure you reset to file default? This action cannot be undone.",
'Confirm Reset',
style = wx.YES_NO
)
confirm.SetYesNoLabels(("&Reset"), ("&Cancel"))
yesNoAnswer = confirm.ShowModal()
confirm.Destroy()
if yesNoAnswer == wx.ID_YES:
self.resetToDefault()
def resetToDefault(self):
self.file_display.Destroy()
self.function_file_text = self.function_page_obj.function_to_load_defaults()
self.saveFunctionsFile(text = self.function_file_text)
size = gui_position.UserFunctionsPage.resetToDefault_size
try:
width, height = gui_position.main_frame_size()
size = ( width - gui_position.UserFunctionsPage.resetToDefault_horizontal_offset, height - resetToDefault_vertical_offset) # find the difference between the Frame and the grid size
except:
pass
self.file_display = wx.TextCtrl(self, -1,
self.function_file_text,
gui_position.UserFunctionsPage.file_display_position,
size = gui_position.UserFunctionsPage.file_display_size,
style = wx.TE_MULTILINE ,
)
self.file_display.Show()
# ###########################################################################################
# ###################### wx grids #######################################################
## no longer used i think, will remove soon.
class GridAllStocks(wx.grid.Grid):
def __init__(self, *args, **kwargs):
wx.grid.Grid.__init__(self, *args, **kwargs)
self.num_rows = len(config.GLOBAL_STOCK_DICT)
self.num_columns = 0
for stock in utils.return_all_stocks():
num_attributes = 0
for attribute in dir(stock):
if not attribute.startswith('_'):
num_attributes += 1
if self.num_columns < num_attributes:
self.num_columns = num_attributes
class StockScreenGrid(wx.grid.Grid):
def __init__(self, *args, **kwargs):
wx.grid.Grid.__init__(self, *args, **kwargs)
#############################################
stock_list = config.CURRENT_SCREEN_LIST
#############################################
self.num_rows = len(stock_list)
self.num_columns = 0
for stock in stock_list:
num_attributes = 0
for attribute in dir(stock):
if not attribute.startswith('_'):
num_attributes += 1
if self.num_columns < num_attributes:
self.num_columns = num_attributes
class SavedScreenGrid(wx.grid.Grid):
# literally the only difference is the config call
def __init__(self, *args, **kwargs):
wx.grid.Grid.__init__(self, *args, **kwargs)
#############################################
stock_list = config.CURRENT_SAVED_SCREEN_LIST
#############################################
self.num_rows = len(stock_list)
self.num_columns = 0
for stock in stock_list:
num_attributes = 0
for attribute in dir(stock):
if not attribute.startswith('_'):
num_attributes += 1
if self.num_columns < num_attributes:
self.num_columns = num_attributes
class RankPageGrid(wx.grid.Grid):
# literally the only difference is the config call
def __init__(self, *args, **kwargs):
wx.grid.Grid.__init__(self, *args, **kwargs)
#############################################
stock_list = config.CURRENT_SAVED_SCREEN_LIST
#############################################
self.num_rows = len(stock_list)
self.num_columns = 0
for stock in stock_list:
num_attributes = 0
for attribute in dir(stock):
if not attribute.startswith('_'):
num_attributes += 1
if self.num_columns < num_attributes:
self.num_columns = num_attributes
# end should remove, probably
class SalePrepGrid(wx.grid.Grid):
def __init__(self, *args, **kwargs):
wx.grid.Grid.__init__(self, *args, **kwargs)
class TradeGrid(wx.grid.Grid):
def __init__(self, *args, **kwargs):
wx.grid.Grid.__init__(self, *args, **kwargs)
class AccountDataGrid(wx.grid.Grid):
def __init__(self, *args, **kwargs):
wx.grid.Grid.__init__(self, *args, **kwargs)
class MegaTable(wx.grid.GridTableBase):
"""
A custom wxGrid Table using user supplied data
"""
def __init__(self, data, colnames, plugins = {}):
"""data is a list of the form
[(rowname, dictionary),
dictionary.get(colname, None) returns the data for column
colname
"""
# The base class must be initialized *first*
wx.grid.GridTableBase.__init__(self)
self.data = data
self.colnames = colnames
self.plugins = plugins or {}
# XXX
# we need to store the row length and collength to
# see if the table has changed size
self._rows = self.GetNumberRows()
self._cols = self.GetNumberCols()
def GetNumberCols(self):
return len(self.colnames)
def GetNumberRows(self):
return len(self.data)
def GetColLabelValue(self, col):
return self.colnames[col]
def GetRowLabelValues(self, row):
return self.data[row][0]
def GetValue(self, row, col):
return str(self.data[row][1].get(self.GetColLabelValue(col), ""))
def GetRawValue(self, row, col):
return self.data[row][1].get(self.GetColLabelValue(col), "")
def SetValue(self, row, col, value):
self.data[row][1][self.GetColLabelValue(col)] = value
def ResetView(self, grid):
"""
(wxGrid) -> Reset the grid view. Call this to
update the grid if rows and columns have been added or deleted
"""
grid.BeginBatch()
for current, new, delmsg, addmsg in [
(self._rows, self.GetNumberRows(), wxGRIDTABLE_NOTIFY_ROWS_DELETED, wxGRIDTABLE_NOTIFY_ROWS_APPENDED),
(self._cols, self.GetNumberCols(), wxGRIDTABLE_NOTIFY_COLS_DELETED, wxGRIDTABLE_NOTIFY_COLS_APPENDED),
]:
if new < current:
msg = wxGridTableMessage(self,delmsg,new,current-new)
grid.ProcessTableMessage(msg)
elif new > current:
msg = wxGridTableMessage(self,addmsg,new-current)
grid.ProcessTableMessage(msg)
self.UpdateValues(grid)
grid.EndBatch()
self._rows = self.GetNumberRows()
self._cols = self.GetNumberCols()
# update the column rendering plugins
self._updateColAttrs(grid)
# XXX
# Okay, this is really stupid, we need to "jiggle" the size
# to get the scrollbars to recalibrate when the underlying
# grid changes.
h,w = grid.GetSize()
grid.SetSize((h+1, w))
grid.SetSize((h, w))
grid.ForceRefresh()
def UpdateValues(self, grid):
"""Update all displayed values"""
# This sends an event to the grid table to update all of the values
msg = wxGridTableMessage(self, wxGRIDTABLE_REQUEST_VIEW_GET_VALUES)
grid.ProcessTableMessage(msg)
def _updateColAttrs(self, grid):
"""
wxGrid -> update the column attributes to add the
appropriate renderer given the column name. (renderers
are stored in the self.plugins dictionary)
Otherwise default to the default renderer.
"""
col = 0
for colname in self.colnames:
attr = wxGridCellAttr()
if colname in self.plugins:
renderer = self.plugins[colname](self)
if renderer.colSize:
grid.SetColSize(col, renderer.colSize)
if renderer.rowSize:
grid.SetDefaultRowSize(renderer.rowSize)
attr.SetReadOnly(true)
attr.SetRenderer(renderer)
grid.SetColAttr(col, attr)
col += 1
# ------------------------------------------------------
# begin the added code to manipulate the table (non wx related)
def AppendRow(self, row):
entry = {}
for name in self.colnames:
entry[name] = "Appended_%i"%row
# XXX Hack
# entry["A"] can only be between 1..4
entry["A"] = random.choice(range(4))
self.data.insert(row, ["Append_%i"%row, entry])
def DeleteCols(self, cols):
"""
cols -> delete the columns from the dataset
cols hold the column indices
"""
# we'll cheat here and just remove the name from the
# list of column names. The data will remain but
# it won't be shown
deleteCount = 0
cols = cols[:]
cols.sort()
for i in cols:
self.colnames.pop(i-deleteCount)
# we need to advance the delete count
# to make sure we delete the right columns
deleteCount += 1
if not len(self.colnames):
self.data = []
def DeleteRows(self, rows):
"""
rows -> delete the rows from the dataset
rows hold the row indices
"""
deleteCount = 0
rows = rows[:]
rows.sort()
for i in rows:
self.data.pop(i-deleteCount)
# we need to advance the delete count
# to make sure we delete the right rows
deleteCount += 1
def SortColumn(self, col):
"""
col -> sort the data based on the column indexed by col
"""
name = self.colnames[col]
_data = []
for row in self.data:
rowname, entry = row
_data.append((entry.get(name, None), row))
_data.sort()
self.data = []
for sortvalue, row in _data:
self.data.append(row)
# end table manipulation code
# ----------------------------------------------------------
class MegaGrid(wx.grid.Grid):
def __init__(self, parent, data, colnames, plugins=None, size=(1000,680), pos=(0,50), enable_editing = False
):
"""parent, data, colnames, plugins=None
Initialize a grid using the data defined in data and colnames
(see MegaTable for a description of the data format)
plugins is a dictionary of columnName -> column renderers.
"""
# The base class must be initialized *first*
wx.grid.Grid.__init__(self, parent, -1, size = size, pos = pos)
self._table = MegaTable(data, colnames, plugins)
self.SetTable(self._table)
self._plugins = plugins
wx.EvtHandler.Bind(self, wx.grid.EVT_GRID_LABEL_RIGHT_CLICK, self.OnLabelRightClicked)
#wx.grid.EVT_GRID_LABEL_RIGHT_CLICK(self, self.OnLabelRightClicked)
self.EnableEditing(enable_editing)
def Reset(self):
"""reset the view based on the data in the table. Call
this when rows are added or destroyed"""
self._table.ResetView(self)
def OnLabelRightClicked(self, evt):
# Did we click on a row or a column?
row, col = evt.GetRow(), evt.GetCol()
if row == -1: self.colPopup(col, evt)
elif col == -1: self.rowPopup(row, evt)
def rowPopup(self, row, evt):
"""(row, evt) -> display a popup menu when a row label is right clicked"""
appendID = wxNewId()
deleteID = wxNewId()
x = self.GetRowSize(row)/2
if not self.GetSelectedRows():
self.SelectRow(row)
menu = wxMenu()
xo, yo = evt.GetPosition()
menu.Append(appendID, "Append Row")
menu.Append(deleteID, "Delete Row(s)")
def append(event, self=self, row=row):
self._table.AppendRow(row)
self.Reset()
def delete(event, self=self, row=row):
rows = self.GetSelectedRows()
self._table.DeleteRows(rows)
self.Reset()
EVT_MENU(self, appendID, append)
EVT_MENU(self, deleteID, delete)
self.PopupMenu(menu, wxPoint(x, yo))
menu.Destroy()
def colPopup(self, col, evt):
"""(col, evt) -> display a popup menu when a column label is
right clicked"""
x = self.GetColSize(col)/2
menu = wxMenu()
id1 = wxNewId()
sortID = wxNewId()
xo, yo = evt.GetPosition()
self.SelectCol(col)
cols = self.GetSelectedCols()
self.Refresh()
menu.Append(id1, "Delete Col(s)")
menu.Append(sortID, "Sort Column")
def delete(event, self=self, col=col):
cols = self.GetSelectedCols()
self._table.DeleteCols(cols)
self.Reset()
def sort(event, self=self, col=col):
self._table.SortColumn(col)
self.Reset()
EVT_MENU(self, id1, delete)
if len(cols) == 1:
EVT_MENU(self, sortID, sort)
self.PopupMenu(menu, wxPoint(xo, 0))
menu.Destroy()
# --------------------------------------------------------------------
# Sample wxGrid renderers
# ###########################################################################################
# ############ Spreadsheet Functions ########################################################
# # Had to switch to mega grids because data got too large!
def create_megagrid_from_stock_list(stock_list, parent, size=gui_position.full_spreadsheet_size_position_tuple[0], pos=gui_position.full_spreadsheet_size_position_tuple[1]):
# Find all attribute names
attribute_list = list(config.GLOBAL_ATTRIBUTE_SET)
attribute_list.sort(key=lambda x: x.lower())
# adjust list order for important terms
try:
attribute_list.insert(0, 'symbol')
except Exception as e:
logging.error(e)
try:
attribute_list.insert(1, 'firm_name')
except Exception as e:
logging.error(e)
# Create correctly sized grid
"""data is a list of the form
[(rowname, dictionary),
dictionary.get(colname, None) returns the data for column
colname
"""
data = [("",{})]
# remove stocks that failed to load
stock_list = [stock for stock in stock_list if stock is not None]
if stock_list:
try:
data = [(stock_list.index(stock), stock.__dict__) for stock in stock_list]
except Exception as e:
logging.error(e)
pp.pprint(stock_list)
spreadsheet = MegaGrid(parent = parent, data = data, colnames = attribute_list, size=size, pos=pos)
#spreadsheet.SetColSize(1, renderer.colSize)
spreadsheet.AutoSizeColumn(1)
return spreadsheet
Ranked_Tuple_Reference = namedtuple("Ranked_Tuple_Reference", ["value", "stock"])
def create_ranked_megagrid_from_tuple_list(ranked_tuple_list, parent, rank_name, size=gui_position.RankPage.rank_page_spreadsheet_size_position_tuple[0], pos=gui_position.RankPage.rank_page_spreadsheet_size_position_tuple[1]):
'ranked named_tuple reference: ["value", "stock"], rank_name should be obtained from a dropdown'
# Find all attribute names
attribute_list = list(config.GLOBAL_ATTRIBUTE_SET)
# adjust list order for important terms
try:
attribute_list.insert(0, 'symbol')
except Exception as e:
logging.error(e)
try:
attribute_list.insert(1, 'firm_name')
except Exception as e:
logging.error(e)
attribute_list.insert(2, rank_name)
# Create correctly sized grid
"""data is a list of the form
[(rowname, dictionary),
dictionary.get(colname, None) returns the data for column
colname
"""
data = []
for this_tuple in ranked_tuple_list:
index = ranked_tuple_list.index(this_tuple)
stock_dict = this_tuple.stock.__dict__
stock_dict[rank_name] = this_tuple.value
data.append((index, stock_dict))
spreadsheet = MegaGrid(parent = parent, data = data, colnames = attribute_list, size=size, pos=pos)
#spreadsheet.SetColSize(1, renderer.colSize)
spreadsheet.AutoSizeColumn(1)
return spreadsheet
def create_account_spread_sheet(
wxWindow,
account_obj,
held_ticker_list = [] # not used currentl
, size = gui_position.PortfolioAccountTab.portfolio_page_spreadsheet_size_position_tuple[0]
, position = gui_position.PortfolioAccountTab.portfolio_page_spreadsheet_size_position_tuple[1]
, enable_editing = False
):
stock_list = []
for ticker in account_obj.stock_shares_dict:
stock = config.GLOBAL_STOCK_DICT.get(ticker)
if stock:
if stock not in stock_list:
stock_list.append(stock)
else:
logging.info("Ticker: {} not found...".format(ticker))
stock_list.sort(key = lambda x: x.ticker)
attribute_list = ['symbol', 'firm_name', "Shares Held", "Last Close", "Value", "Cost Basis", "Change"]
num_columns = len(attribute_list)
num_rows = len(stock_list)
num_rows += 5 # one for cash, two for totals, two for portfolio totals
if num_columns < 2:
# if there are no stocks in the portfolio
num_columns = 2
screen_grid = wx.grid.Grid(wxWindow, -1, size=size, pos=position)
screen_grid.CreateGrid(num_rows, num_columns)
screen_grid.EnableEditing(enable_editing)
# fill in grid
for attribute in attribute_list:
screen_grid.SetColLabelValue(attribute_list.index(attribute), str(attribute))
row_count = 0
total_equity_value = 0.
total_cost_basis = 0.
total_equity_value_change = 0.
total_equity_value_fail = False
cost_basis_fail = False
current_time = time.time()
for stock in stock_list:
col_count = 0
ticker = stock.symbol
if ticker:
screen_grid.SetCellValue(row_count, 0, ticker)
firm_name = stock.firm_name
if firm_name:
screen_grid.SetCellValue(row_count, 1, firm_name)
shares_held = account_obj.stock_shares_dict.get(stock.symbol)
if shares_held:
if float(shares_held).is_integer():
shares_held = int(shares_held)
screen_grid.SetCellValue(row_count, 2, str(shares_held))
screen_grid.SetCellAlignment(row_count, 2, horiz = wx.ALIGN_RIGHT, vert = wx.ALIGN_BOTTOM)
last_close_and_update_tuple = utils.return_last_close_and_last_update_tuple(stock)
last_close = last_close_and_update_tuple[0]
last_update_for_last_close = last_close_and_update_tuple[1]
if last_close:
if (current_time - last_update_for_last_close) < config.PORTFOLIO_PRICE_REFRESH_TIME:
screen_grid.SetCellValue(row_count, 3, config.locale.currency(last_close, grouping = True))
screen_grid.SetCellAlignment(row_count, 3, horiz = wx.ALIGN_RIGHT, vert = wx.ALIGN_BOTTOM)
else:
screen_grid.SetCellValue(row_count, 3, "update prices")
screen_grid.SetCellAlignment(row_count, 3, horiz = wx.ALIGN_RIGHT, vert = wx.ALIGN_BOTTOM)
screen_grid.SetCellTextColour(row_count, 3, config.NEGATIVE_SPREADSHEET_VALUE_COLOR_HEX)
last_close = None
calc_equity_value = False
try:
shares_held = float(shares_held)
last_close = float(last_close)
calc_equity_value = True
except:
pass
equity_value = None
if calc_equity_value:
equity_value = shares_held * last_close
if equity_value:
screen_grid.SetCellValue(row_count, 4, config.locale.currency(equity_value, grouping = True))
screen_grid.SetCellAlignment(row_count, 4, horiz = wx.ALIGN_RIGHT, vert = wx.ALIGN_BOTTOM)
total_equity_value += equity_value
else:
total_equity_value_fail = True
cost_basis = account_obj.cost_basis_dict.get(stock.symbol)
if cost_basis:
screen_grid.SetCellValue(row_count, 5, config.locale.currency(cost_basis, grouping = True))
screen_grid.SetCellAlignment(row_count, 5, horiz = wx.ALIGN_RIGHT, vert = wx.ALIGN_BOTTOM)
total_cost_basis += cost_basis
else:
cost_basis_fail = True
calc_equity_profit = False
try:
cost_basis = float(cost_basis)
equity_value = float(equity_value)
calc_equity_profit = True
except:
pass
equity_value_change = None
if calc_equity_profit:
equity_value_change = ((equity_value/cost_basis) - 1) * 100
if equity_value_change:
screen_grid.SetCellValue(row_count, 6, "%.2f" % (equity_value_change) + "%")
if equity_value_change < 0:
screen_grid.SetCellTextColour(row_count, 6, config.NEGATIVE_SPREADSHEET_VALUE_COLOR_HEX)
screen_grid.SetCellAlignment(row_count, 6, horiz = wx.ALIGN_RIGHT, vert = wx.ALIGN_BOTTOM)
row_count += 1
row_count += 1 # empty row
if total_equity_value or total_cost_basis:
screen_grid.SetCellValue(row_count, 1, "Equity Totals:")
if (not total_equity_value_fail) and total_equity_value:
screen_grid.SetCellValue(row_count, 4, config.locale.currency(total_equity_value, grouping = True))
screen_grid.SetCellAlignment(row_count, 4, horiz = wx.ALIGN_RIGHT, vert = wx.ALIGN_BOTTOM)
if (not cost_basis_fail) and total_cost_basis:
screen_grid.SetCellValue(row_count, 5, config.locale.currency(total_cost_basis, grouping = True))
screen_grid.SetCellAlignment(row_count, 5, horiz = wx.ALIGN_RIGHT, vert = wx.ALIGN_BOTTOM)
if (not (total_equity_value_fail or cost_basis_fail)) and total_cost_basis:
total_equity_value_change = ((total_equity_value/total_cost_basis) - 1) * 100
screen_grid.SetCellValue(row_count, 6, "%.2f" % (total_equity_value_change) + "%")
if total_equity_value_change < 0:
screen_grid.SetCellTextColour(row_count, 6, config.NEGATIVE_SPREADSHEET_VALUE_COLOR_HEX)
screen_grid.SetCellAlignment(row_count, 6, horiz = wx.ALIGN_RIGHT, vert = wx.ALIGN_BOTTOM)
total_portfolio_value = None
if not total_equity_value_fail:
total_portfolio_value = total_equity_value + account_obj.available_cash
row_count += 1
screen_grid.SetCellValue(row_count, 1, "Cash:")
if total_portfolio_value:
percentage_cash = (float(account_obj.available_cash) / total_portfolio_value) * 100
screen_grid.SetCellValue(row_count, 2, "%.2f" % (percentage_cash) + "%")
screen_grid.SetCellAlignment(row_count, 2, horiz = wx.ALIGN_RIGHT, vert = wx.ALIGN_BOTTOM)
screen_grid.SetCellValue(row_count, 4, config.locale.currency(account_obj.available_cash, grouping = True))
screen_grid.SetCellAlignment(row_count, 4, horiz = wx.ALIGN_RIGHT, vert = wx.ALIGN_BOTTOM)
row_count += 2
screen_grid.SetCellValue(row_count, 1, "Total Portfolio Value:")
if total_portfolio_value:
screen_grid.SetCellValue(row_count, 4, config.locale.currency(total_portfolio_value, grouping = True))
screen_grid.SetCellAlignment(row_count, 4, horiz = wx.ALIGN_RIGHT, vert = wx.ALIGN_BOTTOM)
screen_grid.AutoSizeColumns()
return screen_grid
def create_spread_sheet_for_one_stock(
wxWindow,
ticker,
size = gui_position.StockDataPage.create_spread_sheet_for_one_stock_size_position_tuple[0],
position = gui_position.StockDataPage.create_spread_sheet_for_one_stock_size_position_tuple[1],
enable_editing = False,
search_term = None
):
stock = utils.return_stock_by_symbol(ticker)
if not stock:
logging.info('Ticker "%s" does not appear to have basic data' % ticker)
return
attribute_list = []
num_columns = 2 # for this we only need two columns
num_rows = 0
# Here we make rows for each attribute to be included
num_attributes = 0
if stock:
for attribute in dir(stock):
if not attribute.startswith('_'):
if attribute not in config.CLASS_ATTRIBUTES:
if search_term: # this is for searching within stock data, will be None on normal load.
if str(search_term).lower() in str(attribute).lower() or str(search_term).lower() in str(getattr(stock, attribute)).lower():
pass
else:
if attribute in ['symbol', 'firm_name']:
pass
else:
continue
if attribute not in attribute_list:
num_attributes += 1
attribute_list.append(str(attribute))
else:
logging.info("%s.%s is a dublicate" % (ticker, attribute))
if num_rows < num_attributes:
num_rows = num_attributes
screen_grid = wx.grid.Grid(wxWindow, -1, size=size, pos=position)
screen_grid.CreateGrid(num_rows, num_columns)
screen_grid.EnableEditing(enable_editing)
if not attribute_list:
logging.warning('attribute list empty')
return
attribute_list.sort(key = lambda x: x.lower())
# adjust list order for important terms
attribute_list.insert(0, attribute_list.pop(attribute_list.index('symbol')))
attribute_list.insert(1, attribute_list.pop(attribute_list.index('firm_name')))
# fill in grid
col_count = 1 # data will go on the second row
row_count = 0
for attribute in attribute_list:
# set attributes to be labels if it's the first run through
if row_count == 0:
screen_grid.SetColLabelValue(0, "attribute")
screen_grid.SetColLabelValue(1, "data")
try:
# Add attribute name
screen_grid.SetCellValue(row_count, col_count-1, str(attribute))
# Try to add basic data value
screen_grid.SetCellValue(row_count, col_count, str(getattr(stock, attribute)))
# Change text red if value is negative
if str(getattr(stock, attribute)).startswith("(") or str(getattr(stock, attribute)).startswith("-") and len(str(getattr(stock, attribute))) > 1:
screen_grid.SetCellTextColour(row_count, col_count, config.NEGATIVE_SPREADSHEET_VALUE_COLOR_HEX)
except Exception as e:
logging.error(e)
row_count += 1
screen_grid.AutoSizeColumns()
return screen_grid
def create_spread_sheet_for_data_fields(
wxWindow,
size = gui_position.DataFieldPage.create_spread_sheet_for_one_stock_size_position_tuple[0],
position = gui_position.DataFieldPage.create_spread_sheet_for_one_stock_size_position_tuple[1],
enable_editing = False,
):
attribute_list = []
num_columns = 1 # for this we only need two columns
num_rows = 0
# Here we make rows for each attribute to be included
num_attributes = 0
db.load_GLOBAL_ATTRIBUTE_SET()
attribute_list = list(config.GLOBAL_ATTRIBUTE_SET)
if not 'symbol' in attribute_list:
attribute_list.append('symbol')
if not 'firm_name' in attribute_list:
attribute_list.append('firm_name')
attribute_list = [x for x in attribute_list if not "__dict_" in x]
num_attributes = len(attribute_list)
if num_rows < num_attributes:
num_rows = num_attributes
grid = wx.grid.Grid(wxWindow, -1, size=size, pos=position)
logging.info("{},{}".format(num_rows, num_columns))
grid.CreateGrid(num_rows, num_columns)
grid.EnableEditing(enable_editing)
if not attribute_list:
logging.warning('attribute list empty')
return
attribute_list.sort(key = lambda x: (x[-2:], x))
# adjust list order for important terms
for attribute in reversed(attribute_list):
if not attribute[-3] == "_":
attribute_list.insert(0, attribute_list.pop(attribute_list.index(attribute)))
attribute_list.insert(0, attribute_list.pop(attribute_list.index('symbol')))
attribute_list.insert(1, attribute_list.pop(attribute_list.index('firm_name')))
# fill in grid
row_count = 0
for attribute in attribute_list:
# set attributes to be labels if it's the first run through
if row_count == 0:
grid.SetColLabelValue(0, "attribute")
try:
# Add attribute name
grid.SetCellValue(row_count, 0, str(attribute))
except Exception as e:
logging.error(e)
row_count += 1
grid.AutoSizeColumns()
return grid
# ###########################################################################################
# ###################### Screening functions ###############################################
def screen_pe_less_than_10():
global GLOBAL_STOCK_LIST
screen = []
for stock in GLOBAL_STOCK_LIST:
try:
if stock.PERatio:
if float(stock.PERatio) < 10:
screen.append(stock)
except Exception as e:
logging.error(e)
return screen
# ###########################################################################################
# end of line
|
app.py | """Provides a class for creating custom HTTP servers."""
import functools
import queue
import socket
import threading
import collect
from . import server
from . import http
from .logger import Logger
__all__ = ['ActiveRequest', 'App']
sender_q = queue.Queue()
def sender_d():
"""Daemon procedure for FIFO HTTP Requests/Responses."""
while True:
req = sender_q.get()
res, connection, address = req._queue.get()
res.send(connection, address)
sender_q.task_done()
req._queue.task_done()
threading.Thread(target=sender_d, daemon=True).start()
def _instantiate(cls):
return cls()
@_instantiate
class ActiveRequest(http.Request, threading.local):
"""A thread local representing the current HTTP request object."""
def __init__(self):
name = threading.current_thread().name
super().__init__(raw=(
'GET / HTTP/1.1\r\n'
'\r\n'
f'thread_name={name}'))
def _set_active(self, req):
super().__init__(raw=req.raw_request)
class App(server.Server):
"""Class with which to define the endpoints of an HTTP server."""
timeout = 60
def __init__(self, address, port):
super().__init__(address, port)
self.http_handlers = {}
self.error_handlers = {}
self.resolver = server.URLResolver()
def register(self, url, *methods):
"""Register an endpoint at the server. The handler will only be called
when the request matches the given HTTP method here. Handlers for the
same url but for different methods are allowed. The handler shall
return the string of the page contents or a tuple
(status_code, headers, content) used to create the HTTP response."""
if not methods:
methods = ('GET', )
url_path = collect.Path(url)
def decorator(func):
for method in methods:
method = method.upper()
method_handlers = self.http_handlers.get(method)
if not method_handlers:
self.http_handlers[method] = {url_path: func}
else:
method_handlers[url_path] = func
return func
return decorator
def _return_file(self, file):
try:
file_p = file.open('rb')
except PermissionError:
raise http.HTTPException(file, 403)
with file_p:
content = file_p.read()
return 200, {'Content-Type': file.type}, content
def register_exception(self, type):
"""Register an exception handler. The handler is called with the active
exception as the first argument. The handler shall return a tuple
(status_code, content)."""
def decorator(func):
self.error_handlers[type] = func
return func
return decorator
def _handle_request(self, req: http.Request):
method_handlers = self.http_handlers.get(req.method, {})
if req.path in method_handlers:
handler = method_handlers[req.path]
else:
try:
file = self.resolver[req.path]
if not file.is_file():
raise KeyError
except KeyError:
raise http.HTTPException(req.path, 404)
else:
handler = functools.partial(self._return_file, file)
header = {'Content-Type': 'text/html'}
response = handler()
if isinstance(response, tuple):
status_code, user_header, text = response
else:
status_code, user_header, text = 200, {}, response
header.update(user_header)
return status_code, header, getattr(text, 'encode', lambda: text)()
def _handle_exception(self, error):
Logger.exception('An exception was raised:')
try:
handler = next(
func
for type, func in self.error_handlers.items()
if isinstance(error, type))
except StopIteration:
new_exc = http.HTTPException(
f'An exception ({type(error).__name__}: {error}) went'
' unhandled')
text = new_exc.format()
status_code = new_exc.status_code
else:
status_code, text = handler(error)
return status_code, {'Content-Type': 'text/html'}, text.encode()
def _set_up_exception_handling(self, connection, address, req):
ActiveRequest._set_active(req)
try:
args = self._handle_request(req)
except Exception as error:
args = self._handle_exception(error)
res = http.Response(*args, req)
req._queue.put((res, connection, address))
def handle_connection(self, connection, address):
connection.settimeout(self.timeout)
threads = []
while True:
try:
req = http.Request(connection, address, self.max_recv_size)
print(req.raw_request)
except socket.timeout:
Logger.log('Timed out: %s:%s', *address)
break
except IOError:
Logger.log('Client closed: %s:%d', *address)
break
else:
sender_q.put(req)
new_thread = threading.Thread(
target=self._set_up_exception_handling,
args=(connection, address, req),
)
threads.append(new_thread)
new_thread.start()
if req.headers.get('Connection') == 'close':
Logger.log('Deliberately closing %s:%d', *address)
break
sender_q.join()
for thread in threads:
thread.join()
|
egg_pair.py | # -*- coding: utf-8 -*-
# Copyright (c) 2019 - now, Eggroll Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import configparser
import gc
import logging
import os
import shutil
import signal
import time
import threading
import platform
from collections import defaultdict
import grpc
import numpy as np
from collections.abc import Iterable
from eggroll.core.client import ClusterManagerClient
from eggroll.core.command.command_router import CommandRouter
from eggroll.core.command.command_service import CommandServicer
from eggroll.core.conf_keys import SessionConfKeys, \
ClusterManagerConfKeys, RollPairConfKeys, CoreConfKeys
from eggroll.core.constants import ProcessorTypes, ProcessorStatus, SerdesTypes
from eggroll.core.datastructure import create_executor_pool
from eggroll.core.datastructure.broker import FifoBroker
from eggroll.core.grpc.factory import GrpcChannelFactory
from eggroll.core.meta_model import ErPair
from eggroll.core.meta_model import ErTask, ErProcessor, ErEndpoint
from eggroll.core.pair_store.format import ArrayByteBuffer, PairBinReader
from eggroll.core.proto import command_pb2_grpc, transfer_pb2_grpc
from eggroll.core.transfer.transfer_service import GrpcTransferServicer, \
TransferService
from eggroll.core.utils import _exception_logger
from eggroll.core.utils import hash_code
from eggroll.core.utils import set_static_er_conf, get_static_er_conf
from eggroll.roll_pair import create_adapter, create_serdes, create_functor
from eggroll.roll_pair.transfer_pair import TransferPair, BatchBroker
from eggroll.roll_pair.task.storage import PutBatchTask
from eggroll.roll_pair.transfer_pair import TransferPair
from eggroll.roll_pair.utils.pair_utils import generator, partitioner, \
set_data_dir
from eggroll.utils.log_utils import get_logger
from eggroll.utils.profile import get_system_metric
L = get_logger()
class EggPair(object):
def __init__(self):
self.functor_serdes = create_serdes(SerdesTypes.CLOUD_PICKLE)
def __partitioner(self, hash_func, total_partitions):
return lambda k: hash_func(k) % total_partitions
def _run_unary(self, func, task, shuffle=False, reduce_op=None):
input_store_head = task._job._inputs[0]
output_store_head = task._job._outputs[0]
input_key_serdes = create_serdes(input_store_head._store_locator._serdes)
input_value_serdes = create_serdes(input_store_head._store_locator._serdes)
output_key_serdes = create_serdes(output_store_head._store_locator._serdes)
output_value_serdes = create_serdes(output_store_head._store_locator._serdes)
if input_key_serdes != output_key_serdes or \
input_value_serdes != output_value_serdes:
raise ValueError(f"input key-value serdes:{(input_key_serdes, input_value_serdes)}"
f"differ from output key-value serdes:{(output_key_serdes, output_value_serdes)}")
##### [MAP] Step 3: shuffle is True, enter if branch.
if shuffle:
from eggroll.roll_pair.transfer_pair import TransferPair
input_total_partitions = input_store_head._store_locator._total_partitions
output_total_partitions = output_store_head._store_locator._total_partitions
output_store = output_store_head
my_server_node_id = get_static_er_conf().get('server_node_id', None)
shuffler = TransferPair(transfer_id=task._job._id)
if not task._outputs or \
(my_server_node_id is not None
and my_server_node_id != task._outputs[0]._processor._server_node_id):
store_future = None
else:
#### [MAP] Step 4: start a thread to receive k,v from
#### grpc server, and store them to local db. grpc server
#### will receive k,v from other egg.
store_future = shuffler.store_broker(
store_partition=task._outputs[0],
is_shuffle=True,
total_writers=input_total_partitions,
reduce_op=reduce_op)
if not task._inputs or \
(my_server_node_id is not None
and my_server_node_id != task._inputs[0]._processor._server_node_id):
scatter_future = None
else:
shuffle_broker = FifoBroker()
write_bb = BatchBroker(shuffle_broker)
try:
#### [MAP] Step 5: start a thread to scatter processed k,v to
#### other eggs. Why called processed k,v ? see step 6.
scatter_future = shuffler.scatter(
input_broker=shuffle_broker,
partition_function=partitioner(hash_func=hash_code, total_partitions=output_total_partitions),
output_store=output_store)
#### [MAP] Step 6: iterate k,v from input, then process
#### it, finally put them into broker. 'func' is 'map_wrapper'
#### function. Notice 'map_wrapper' put all processed k,v into
#### a broker called 'write_bb', and the 'write_bb' is the same
#### as 'shuffle_broker' in Step 5.
with create_adapter(task._inputs[0]) as input_db, \
input_db.iteritems() as rb:
func(rb, input_key_serdes, input_value_serdes, write_bb)
finally:
write_bb.signal_write_finish()
if scatter_future:
scatter_results = scatter_future.result()
else:
scatter_results = 'no scatter for this partition'
if store_future:
store_results = store_future.result()
else:
store_results = 'no store for this partition'
else: # no shuffle
with create_adapter(task._inputs[0]) as input_db, \
input_db.iteritems() as rb, \
create_adapter(task._outputs[0], options=task._job._options) as db, \
db.new_batch() as wb:
func(rb, input_key_serdes, input_value_serdes, wb)
L.trace(f"close_store_adatper:{task._inputs[0]}")
def _run_binary(self, func, task):
left_key_serdes = create_serdes(task._inputs[0]._store_locator._serdes)
left_value_serdes = create_serdes(task._inputs[0]._store_locator._serdes)
right_key_serdes = create_serdes(task._inputs[1]._store_locator._serdes)
right_value_serdes = create_serdes(task._inputs[1]._store_locator._serdes)
output_key_serdes = create_serdes(task._outputs[0]._store_locator._serdes)
output_value_serdes = create_serdes(task._outputs[0]._store_locator._serdes)
if left_key_serdes != output_key_serdes or \
left_value_serdes != output_value_serdes:
raise ValueError(f"input key-value serdes:{(left_key_serdes, left_value_serdes)}"
f"differ from output key-value serdes:{(output_key_serdes, output_value_serdes)}")
with create_adapter(task._inputs[0]) as left_adapter, \
create_adapter(task._inputs[1]) as right_adapter, \
create_adapter(task._outputs[0]) as output_adapter, \
left_adapter.iteritems() as left_iterator, \
right_adapter.iteritems() as right_iterator, \
output_adapter.new_batch() as output_writebatch:
try:
func(left_iterator, left_key_serdes, left_value_serdes,
right_iterator, right_key_serdes, right_value_serdes,
output_writebatch)
except Exception as e:
raise EnvironmentError("exec task:{} error".format(task), e)
@_exception_logger
def run_task(self, task: ErTask):
if L.isEnabledFor(logging.TRACE):
L.trace(f'[RUNTASK] start. task_name={task._name}, inputs={task._inputs}, outputs={task._outputs}, task_id={task._id}')
else:
L.debug(f'[RUNTASK] start. task_name={task._name}, task_id={task._id}')
functors = task._job._functors
result = task
if task._name == 'get':
# TODO:1: move to create_serdes
f = create_functor(functors[0]._body)
with create_adapter(task._inputs[0]) as input_adapter:
L.trace(f"get: key: {self.functor_serdes.deserialize(f._key)}, path: {input_adapter.path}")
value = input_adapter.get(f._key)
result = ErPair(key=f._key, value=value)
elif task._name == 'getAll':
tag = f'{task._id}'
er_pair = create_functor(functors[0]._body)
input_store_head = task._job._inputs[0]
key_serdes = create_serdes(input_store_head._store_locator._serdes)
def generate_broker():
with create_adapter(task._inputs[0]) as db, db.iteritems() as rb:
limit = None if er_pair._key is None else key_serdes.deserialize(er_pair._key)
try:
yield from TransferPair.pair_to_bin_batch(rb, limit=limit)
finally:
TransferService.remove_broker(tag)
TransferService.set_broker(tag, generate_broker())
elif task._name == 'count':
with create_adapter(task._inputs[0]) as input_adapter:
result = ErPair(key=self.functor_serdes.serialize('result'),
value=self.functor_serdes.serialize(input_adapter.count()))
elif task._name == 'putBatch':
partition = task._outputs[0]
tag = f'{task._id}'
PutBatchTask(tag, partition).run()
elif task._name == 'putAll':
#### [EGG] Step 1: start save to db thread.
output_partition = task._outputs[0]
tag = f'{task._id}'
L.trace(f'egg_pair putAll: transfer service tag={tag}')
tf = TransferPair(tag)
store_broker_result = tf.store_broker(output_partition, False).result()
# TODO:2: should wait complete?, command timeout?
elif task._name == 'put':
f = create_functor(functors[0]._body)
with create_adapter(task._inputs[0]) as input_adapter:
value = input_adapter.put(f._key, f._value)
#result = ErPair(key=f._key, value=bytes(value))
elif task._name == 'destroy':
input_store_locator = task._inputs[0]._store_locator
namespace = input_store_locator._namespace
name = input_store_locator._name
store_type = input_store_locator._store_type
L.debug(f'destroying store_type={store_type}, namespace={namespace}, name={name}')
if name == '*':
from eggroll.roll_pair.utils.pair_utils import get_db_path, get_data_dir
target_paths = list()
if store_type == '*':
data_dir = get_data_dir()
store_types = os.listdir(data_dir)
for store_type in store_types:
target_paths.append('/'.join([data_dir, store_type, namespace]))
else:
db_path = get_db_path(task._inputs[0])
target_paths.append(db_path[:db_path.rfind('*')])
real_data_dir = os.path.realpath(get_data_dir())
for path in target_paths:
realpath = os.path.realpath(path)
if os.path.exists(path):
if realpath == "/" \
or realpath == real_data_dir \
or not realpath.startswith(real_data_dir):
raise ValueError(f'trying to delete a dangerous path: {realpath}')
else:
shutil.rmtree(path)
else:
options = task._job._options
with create_adapter(task._inputs[0], options=options) as input_adapter:
input_adapter.destroy(options=options)
elif task._name == 'delete':
f = create_functor(functors[0]._body)
with create_adapter(task._inputs[0]) as input_adapter:
if input_adapter.delete(f._key):
L.trace("delete k success")
elif task._name == 'mapValues':
f = create_functor(functors[0]._body)
def map_values_wrapper(input_iterator, key_serdes, value_serdes, output_writebatch):
for k_bytes, v_bytes in input_iterator:
v = value_serdes.deserialize(v_bytes)
output_writebatch.put(k_bytes, value_serdes.serialize(f(v)))
self._run_unary(map_values_wrapper, task)
elif task._name == 'map':
f = create_functor(functors[0]._body)
#### [MAP] Step 1: define a function, running actual compute.
def map_wrapper(input_iterator, key_serdes, value_serdes, shuffle_broker):
for k_bytes, v_bytes in input_iterator:
k1, v1 = f(key_serdes.deserialize(k_bytes), value_serdes.deserialize(v_bytes))
shuffle_broker.put((key_serdes.serialize(k1), value_serdes.serialize(v1)))
#### [MAP] Step 2: actual run 'map' process logical.
self._run_unary(map_wrapper, task, shuffle=True)
elif task._name == 'reduce':
seq_op_result = self.aggregate_seq(task=task)
result = ErPair(key=self.functor_serdes.serialize(task._inputs[0]._id),
value=self.functor_serdes.serialize(seq_op_result))
elif task._name == 'aggregate':
seq_op_result = self.aggregate_seq(task=task)
result = ErPair(key=self.functor_serdes.serialize(task._inputs[0]._id),
value=self.functor_serdes.serialize(seq_op_result))
elif task._name == 'mapPartitions':
reduce_op = create_functor(functors[1]._body)
shuffle = create_functor(functors[2]._body)
def map_partitions_wrapper(input_iterator, key_serdes, value_serdes, output_writebatch):
f = create_functor(functors[0]._body)
value = f(generator(key_serdes, value_serdes, input_iterator))
if isinstance(value, Iterable):
for k1, v1 in value:
if shuffle:
output_writebatch.put((key_serdes.serialize(k1), value_serdes.serialize(v1)))
else:
output_writebatch.put(key_serdes.serialize(k1), value_serdes.serialize(v1))
else:
key = input_iterator.key()
output_writebatch.put((key, value_serdes.serialize(value)))
self._run_unary(map_partitions_wrapper, task, shuffle=shuffle, reduce_op=reduce_op)
elif task._name == 'collapsePartitions':
def collapse_partitions_wrapper(input_iterator, key_serdes, value_serdes, output_writebatch):
f = create_functor(functors[0]._body)
value = f(generator(key_serdes, value_serdes, input_iterator))
if input_iterator.last():
key = input_iterator.key()
output_writebatch.put(key, value_serdes.serialize(value))
self._run_unary(collapse_partitions_wrapper, task)
elif task._name == 'flatMap':
shuffle = create_functor(functors[1]._body)
def flat_map_wraaper(input_iterator, key_serdes, value_serdes, output_writebatch):
f = create_functor(functors[0]._body)
for k1, v1 in input_iterator:
for k2, v2 in f(key_serdes.deserialize(k1), value_serdes.deserialize(v1)):
if shuffle:
output_writebatch.put((key_serdes.serialize(k2), value_serdes.serialize(v2)))
else:
output_writebatch.put(key_serdes.serialize(k2), value_serdes.serialize(v2))
self._run_unary(flat_map_wraaper, task, shuffle=shuffle)
elif task._name == 'glom':
def glom_wrapper(input_iterator, key_serdes, value_serdes, output_writebatch):
k_tmp = None
v_list = []
for k, v in input_iterator:
v_list.append((key_serdes.deserialize(k), value_serdes.deserialize(v)))
k_tmp = k
if k_tmp is not None:
output_writebatch.put(k_tmp, value_serdes.serialize(v_list))
self._run_unary(glom_wrapper, task)
elif task._name == 'sample':
def sample_wrapper(input_iterator, key_serdes, value_serdes, output_writebatch):
fraction = create_functor(functors[0]._body)
seed = create_functor(functors[1]._body)
input_iterator.first()
random_state = np.random.RandomState(seed)
for k, v in input_iterator:
if random_state.rand() < fraction:
output_writebatch.put(k, v)
self._run_unary(sample_wrapper, task)
elif task._name == 'filter':
def filter_wrapper(input_iterator, key_serdes, value_serdes, output_writebatch):
f = create_functor(functors[0]._body)
for k ,v in input_iterator:
if f(key_serdes.deserialize(k), value_serdes.deserialize(v)):
output_writebatch.put(k, v)
self._run_unary(filter_wrapper, task)
elif task._name == 'join':
def merge_join_wrapper(left_iterator, left_key_serdes, left_value_serdes,
right_iterator, right_key_serdes, right_value_serdes,
output_writebatch):
if not left_iterator.adapter.is_sorted() or not right_iterator.adapter.is_sorted():
raise RuntimeError(f"merge join cannot be applied: not both store types support sorting. "
f"left type: {type(left_iterator.adapter)}, is_sorted: {left_iterator.adapter.is_sorted()}; "
f"right type: {type(right_iterator.adapter)}, is_sorted: {right_iterator.adapter.is_sorted()}")
f = create_functor(functors[0]._body)
is_same_serdes = left_key_serdes == right_key_serdes
l_iter = iter(left_iterator)
r_iter = iter(right_iterator)
try:
k_left, v_left_bytes = next(l_iter)
k_right_raw, v_right_bytes = next(r_iter)
if is_same_serdes:
k_right = k_right_raw
else:
k_right = left_key_serdes.serialize(right_key_serdes.deserialize(k_right_raw))
while True:
while k_right < k_left:
k_right_raw, v_right_bytes = next(r_iter)
if is_same_serdes:
k_right = k_right_raw
else:
k_right = left_key_serdes.serialize(right_key_serdes.deserialize(k_right_raw))
while k_left < k_right:
k_left, v_left_bytes = next(l_iter)
if k_left == k_right:
output_writebatch.put(k_left,
left_value_serdes.serialize(
f(left_value_serdes.deserialize(v_left_bytes),
right_value_serdes.deserialize(v_right_bytes))))
k_left, v_left_bytes = next(l_iter)
# skips next(r_iter) to avoid duplicate codes for the 3rd time
except StopIteration as e:
return
def hash_join_wrapper(left_iterator, left_key_serdes, left_value_serdes,
right_iterator, right_key_serdes, right_value_serdes,
output_writebatch):
f = create_functor(functors[0]._body)
is_diff_serdes = type(left_key_serdes) != type(right_key_serdes)
for k_left, l_v_bytes in left_iterator:
if is_diff_serdes:
k_left = right_key_serdes.serialize(left_key_serdes.deserialize(k_left))
r_v_bytes = right_iterator.adapter.get(k_left)
if r_v_bytes:
output_writebatch.put(k_left,
left_value_serdes.serialize(
f(left_value_serdes.deserialize(l_v_bytes),
right_value_serdes.deserialize(r_v_bytes))))
join_type = task._job._options.get('join_type', 'merge')
if join_type == 'merge':
self._run_binary(merge_join_wrapper, task)
else:
self._run_binary(hash_join_wrapper, task)
elif task._name == 'subtractByKey':
def merge_subtract_by_key_wrapper(left_iterator, left_key_serdes, left_value_serdes,
right_iterator, right_key_serdes, right_value_serdes,
output_writebatch):
if not left_iterator.adapter.is_sorted() or not right_iterator.adapter.is_sorted():
raise RuntimeError(f"merge subtract_by_key cannot be applied: not both store types support sorting. "
f"left type: {type(left_iterator.adapter)}, is_sorted: {left_iterator.adapter.is_sorted()}; "
f"right type: {type(right_iterator.adapter)}, is_sorted: {right_iterator.adapter.is_sorted()}")
is_same_serdes = left_key_serdes == right_key_serdes
l_iter = iter(left_iterator)
r_iter = iter(right_iterator)
is_left_stopped = False
is_equal = False
try:
k_left, v_left = next(l_iter)
except StopIteration:
is_left_stopped = True
k_left = None
v_left = None
try:
k_right_raw, v_right = next(r_iter)
except StopIteration:
is_left_stopped = False
k_right_raw = None
v_right = None
# left is None, output must be None
if k_left is None:
return
try:
if k_left is None:
raise StopIteration()
if k_right_raw is None:
raise StopIteration()
if is_same_serdes:
k_right = k_right_raw
else:
k_right = left_key_serdes.serialize(right_key_serdes.deserialize(k_right_raw))
while True:
is_left_stopped = False
if k_left < k_right:
output_writebatch.put(k_left, v_left)
k_left, v_left = next(l_iter)
is_left_stopped = True
elif k_left == k_right:
is_equal = True
is_left_stopped = True
k_left, v_left = next(l_iter)
is_left_stopped = False
is_equal = False
k_right_raw, v_right = next(r_iter)
k_right = left_key_serdes.serialize(right_key_serdes.deserialize(k_right_raw))
else:
k_right_raw, v_right = next(r_iter)
k_right = left_key_serdes.serialize(right_key_serdes.deserialize(k_right_raw))
is_left_stopped = True
except StopIteration as e:
pass
if not is_left_stopped and not is_equal:
try:
if k_left is not None and v_left is not None:
output_writebatch.put(k_left, v_left)
while True:
k_left, v_left = next(l_iter)
output_writebatch.put(k_left, v_left)
except StopIteration as e:
pass
elif is_left_stopped and not is_equal and k_left is not None:
output_writebatch.put(k_left, v_left)
return
def hash_subtract_by_key_wrapper(left_iterator, left_key_serdes, left_value_serdes,
right_iterator, right_key_serdes, right_value_serdes,
output_writebatch):
is_diff_serdes = type(left_key_serdes) != type(right_key_serdes)
for k_left, v_left in left_iterator:
if is_diff_serdes:
k_left = right_key_serdes.serialize(left_key_serdes.deserialize(k_left))
v_right = right_iterator.adapter.get(k_left)
if v_right is None:
output_writebatch.put(k_left, v_left)
subtract_by_key_type = task._job._options.get('subtract_by_key_type', 'merge')
if subtract_by_key_type == 'merge':
self._run_binary(merge_subtract_by_key_wrapper, task)
else:
self._run_binary(hash_subtract_by_key_wrapper, task)
elif task._name == 'union':
def merge_union_wrapper(left_iterator, left_key_serdes, left_value_serdes,
right_iterator, right_key_serdes, right_value_serdes,
output_writebatch):
if not left_iterator.adapter.is_sorted() or not right_iterator.adapter.is_sorted():
raise RuntimeError(f"merge union cannot be applied: not both store types support sorting. "
f"left type: {type(left_iterator.adapter)}, is_sorted: {left_iterator.adapter.is_sorted()}; "
f"right type: {type(right_iterator.adapter)}, is_sorted: {right_iterator.adapter.is_sorted()}")
f = create_functor(functors[0]._body)
is_same_serdes = left_key_serdes == right_key_serdes
l_iter = iter(left_iterator)
r_iter = iter(right_iterator)
k_left = None
v_left_bytes = None
k_right = None
v_right_bytes = None
none_none = (None, None)
is_left_stopped = False
is_equal = False
try:
k_left, v_left_bytes = next(l_iter, none_none)
k_right_raw, v_right_bytes = next(r_iter, none_none)
if k_left is None and k_right_raw is None:
return
elif k_left is None:
is_left_stopped = True
raise StopIteration()
elif k_right_raw is None:
is_left_stopped = False
raise StopIteration()
if is_same_serdes:
k_right = k_right_raw
else:
k_right = left_key_serdes.serialize(right_key_serdes.deserialize(k_right_raw))
while True:
is_left_stopped = False
while k_right < k_left:
if is_same_serdes:
output_writebatch.put(k_right, v_right_bytes)
else:
output_writebatch.put(k_right, left_value_serdes.serialize(right_value_serdes.deserialize(v_right_bytes)))
k_right_raw, v_right_bytes = next(r_iter)
if is_same_serdes:
k_right = k_right_raw
else:
k_right = left_key_serdes.serialize(right_key_serdes.deserialize(k_right_raw))
is_left_stopped = True
while k_left < k_right:
output_writebatch.put(k_left, v_left_bytes)
k_left, v_left_bytes = next(l_iter)
if k_left == k_right:
is_equal = True
output_writebatch.put(k_left,
left_value_serdes.serialize(
f(left_value_serdes.deserialize(v_left_bytes),
right_value_serdes.deserialize(v_right_bytes))))
is_left_stopped = True
k_left, v_left_bytes = next(l_iter)
is_left_stopped = False
k_right_raw, v_right_bytes = next(r_iter)
if is_same_serdes:
k_right = k_right_raw
else:
k_right = left_key_serdes.serialize(right_key_serdes.deserialize(k_right_raw))
is_equal = False
except StopIteration as e:
pass
if not is_left_stopped:
try:
output_writebatch.put(k_left, v_left_bytes)
while True:
k_left, v_left_bytes = next(l_iter)
output_writebatch.put(k_left, v_left_bytes)
except StopIteration as e:
pass
else:
try:
if not is_equal:
if is_same_serdes:
output_writebatch.put(k_right_raw, v_right_bytes)
else:
output_writebatch.put(left_key_serdes.serialize(right_key_serdes.deserialize(k_right_raw)),
left_value_serdes.serialize(right_value_serdes.deserialize(v_right_bytes)))
while True:
k_right_raw, v_right_bytes = next(r_iter)
if is_same_serdes:
output_writebatch.put(k_right_raw, v_right_bytes)
else:
output_writebatch.put(left_key_serdes.serialize(right_key_serdes.deserialize(k_right_raw)),
left_value_serdes.serialize(right_value_serdes.deserialize(v_right_bytes)))
except StopIteration as e:
pass
# end of merge union wrapper
return
def hash_union_wrapper(left_iterator, left_key_serdes, left_value_serdes,
right_iterator, right_key_serdes, right_value_serdes,
output_writebatch):
f = create_functor(functors[0]._body)
is_diff_serdes = type(left_key_serdes) != type(right_key_serdes)
for k_left, v_left in left_iterator:
if is_diff_serdes:
k_left = right_key_serdes.serialize(left_key_serdes.deserialize(k_left))
v_right = right_iterator.adapter.get(k_left)
if v_right is None:
output_writebatch.put(k_left, v_left)
else:
v_final = f(left_value_serdes.deserialize(v_left),
right_value_serdes.deserialize(v_right))
output_writebatch.put(k_left, left_value_serdes.serialize(v_final))
right_iterator.first()
for k_right, v_right in right_iterator:
if is_diff_serdes:
final_v_bytes = output_writebatch.get(left_key_serdes.serialize(
right_key_serdes.deserialize(k_right)))
else:
final_v_bytes = output_writebatch.get(k_right)
if final_v_bytes is None:
output_writebatch.put(k_right, v_right)
union_type = task._job._options.get('union_type', 'merge')
if union_type == 'merge':
self._run_binary(merge_union_wrapper, task)
else:
self._run_binary(hash_union_wrapper, task)
elif task._name == 'withStores':
f = create_functor(functors[0]._body)
result = ErPair(key=self.functor_serdes.serialize(task._inputs[0]._id),
value=self.functor_serdes.serialize(f(task)))
if L.isEnabledFor(logging.TRACE):
L.trace(f'[RUNTASK] end. task_name={task._name}, inputs={task._inputs}, outputs={task._outputs}, task_id={task._id}')
else:
L.debug(f'[RUNTASK] end. task_name={task._name}, task_id={task._id}')
return result
# run_task ends here
def aggregate_seq(self, task: ErTask):
functors = task._job._functors
is_reduce = functors[0]._name == 'reduce'
zero_value = None if is_reduce or functors[0] is None else create_functor(functors[0]._body)
if is_reduce:
seq_op = create_functor(functors[0]._body)
else:
seq_op = create_functor(functors[1]._body)
first = True
seq_op_result = zero_value
input_partition = task._inputs[0]
input_key_serdes = create_serdes(input_partition._store_locator._serdes)
input_value_serdes = input_key_serdes
with create_adapter(input_partition) as input_adapter, \
input_adapter.iteritems() as input_iter:
for k_bytes, v_bytes in input_iter:
v = input_value_serdes.deserialize(v_bytes)
if is_reduce and first:
seq_op_result = v
first = False
else:
seq_op_result = seq_op(seq_op_result, v)
return seq_op_result
def stop_processor(cluster_manager_client: ClusterManagerClient, myself: ErProcessor):
import win32file
import win32pipe
L.info(f"stop_processor pid:{os.getpid()}, ppid:{os.getppid()}")
pipe_name = r'\\.\pipe\pid_pipe' + str(os.getpid())
pipe_buffer_size = 1024
while True:
named_pipe = win32pipe.CreateNamedPipe(pipe_name,
win32pipe.PIPE_ACCESS_DUPLEX,
win32pipe.PIPE_TYPE_MESSAGE | win32pipe.PIPE_WAIT | win32pipe.PIPE_READMODE_MESSAGE,
win32pipe.PIPE_UNLIMITED_INSTANCES,
pipe_buffer_size,
pipe_buffer_size, 500, None)
try:
while True:
try:
win32pipe.ConnectNamedPipe(named_pipe, None)
data = win32file.ReadFile(named_pipe, pipe_buffer_size, None)
if data is None or len(data) < 2:
continue
print('receive msg:', data)
cmd_str = data[1].decode('utf-8')
if 'stop' in cmd_str and str(os.getpid()) in cmd_str:
myself._status = ProcessorStatus.STOPPED
cluster_manager_client.heartbeat(myself)
except BaseException as e:
print("exception:", e)
break
finally:
try:
win32pipe.DisconnectNamedPipe(named_pipe)
except:
pass
def serve(args):
prefix = 'v1/egg-pair'
set_data_dir(args.data_dir)
CommandRouter.get_instance().register(
service_name=f"{prefix}/runTask",
route_to_module_name="eggroll.roll_pair.egg_pair",
route_to_class_name="EggPair",
route_to_method_name="run_task")
max_workers = int(RollPairConfKeys.EGGROLL_ROLLPAIR_EGGPAIR_SERVER_EXECUTOR_POOL_MAX_SIZE.get())
executor_pool_type = CoreConfKeys.EGGROLL_CORE_DEFAULT_EXECUTOR_POOL.get()
command_server = grpc.server(create_executor_pool(
canonical_name=executor_pool_type,
max_workers=max_workers,
thread_name_prefix="eggpair-command-server"),
options=[
("grpc.max_metadata_size",
int(CoreConfKeys.EGGROLL_CORE_GRPC_SERVER_CHANNEL_MAX_INBOUND_METADATA_SIZE.get())),
('grpc.max_send_message_length',
int(CoreConfKeys.EGGROLL_CORE_GRPC_SERVER_CHANNEL_MAX_INBOUND_MESSAGE_SIZE.get())),
('grpc.max_receive_message_length',
int(CoreConfKeys.EGGROLL_CORE_GRPC_SERVER_CHANNEL_MAX_INBOUND_MESSAGE_SIZE.get())),
('grpc.keepalive_time_ms', int(CoreConfKeys.CONFKEY_CORE_GRPC_CHANNEL_KEEPALIVE_TIME_SEC.get()) * 1000),
('grpc.keepalive_timeout_ms', int(CoreConfKeys.CONFKEY_CORE_GRPC_SERVER_CHANNEL_KEEPALIVE_TIMEOUT_SEC.get()) * 1000),
('grpc.keepalive_permit_without_calls', int(CoreConfKeys.CONFKEY_CORE_GRPC_SERVER_CHANNEL_KEEPALIVE_WITHOUT_CALLS_ENABLED.get())),
('grpc.per_rpc_retry_buffer_size', int(CoreConfKeys.CONFKEY_CORE_GRPC_SERVER_CHANNEL_RETRY_BUFFER_SIZE.get())),
('grpc.so_reuseport', False)])
command_servicer = CommandServicer()
command_pb2_grpc.add_CommandServiceServicer_to_server(command_servicer,
command_server)
transfer_servicer = GrpcTransferServicer()
port = args.port
transfer_port = args.transfer_port
port = command_server.add_insecure_port(f'[::]:{port}')
if transfer_port == "-1":
transfer_server = command_server
transfer_port = port
transfer_pb2_grpc.add_TransferServiceServicer_to_server(transfer_servicer,
transfer_server)
else:
transfer_server_max_workers = int(RollPairConfKeys.EGGROLL_ROLLPAIR_EGGPAIR_DATA_SERVER_EXECUTOR_POOL_MAX_SIZE.get())
transfer_server = grpc.server(create_executor_pool(
canonical_name=executor_pool_type,
max_workers=transfer_server_max_workers,
thread_name_prefix="transfer_server"),
options=[
('grpc.max_metadata_size',
int(CoreConfKeys.EGGROLL_CORE_GRPC_SERVER_CHANNEL_MAX_INBOUND_METADATA_SIZE.get())),
('grpc.max_send_message_length',
int(CoreConfKeys.EGGROLL_CORE_GRPC_SERVER_CHANNEL_MAX_INBOUND_MESSAGE_SIZE.get())),
('grpc.max_receive_message_length',
int(CoreConfKeys.EGGROLL_CORE_GRPC_SERVER_CHANNEL_MAX_INBOUND_MESSAGE_SIZE.get())),
('grpc.keepalive_time_ms', int(CoreConfKeys.CONFKEY_CORE_GRPC_SERVER_CHANNEL_KEEPALIVE_WITHOUT_CALLS_ENABLED.get()) * 1000),
('grpc.keepalive_timeout_ms', int(CoreConfKeys.CONFKEY_CORE_GRPC_SERVER_CHANNEL_KEEPALIVE_TIMEOUT_SEC.get()) * 1000),
('grpc.keepalive_permit_without_calls', int(CoreConfKeys.CONFKEY_CORE_GRPC_SERVER_CHANNEL_KEEPALIVE_WITHOUT_CALLS_ENABLED.get())),
('grpc.per_rpc_retry_buffer_size', int(CoreConfKeys.CONFKEY_CORE_GRPC_SERVER_CHANNEL_RETRY_BUFFER_SIZE.get())),
('grpc.so_reuseport', False)])
transfer_port = transfer_server.add_insecure_port(f'[::]:{transfer_port}')
transfer_pb2_grpc.add_TransferServiceServicer_to_server(transfer_servicer,
transfer_server)
transfer_server.start()
pid = os.getpid()
L.info(f"starting egg_pair service, port: {port}, transfer port: {transfer_port}, pid: {pid}")
command_server.start()
cluster_manager = args.cluster_manager
myself = None
cluster_manager_client = None
if cluster_manager:
session_id = args.session_id
server_node_id = int(args.server_node_id)
static_er_conf = get_static_er_conf()
static_er_conf['server_node_id'] = server_node_id
if not session_id:
raise ValueError('session id is missing')
options = {
SessionConfKeys.CONFKEY_SESSION_ID: args.session_id
}
myself = ErProcessor(id=int(args.processor_id),
server_node_id=server_node_id,
processor_type=ProcessorTypes.EGG_PAIR,
command_endpoint=ErEndpoint(host='localhost', port=port),
transfer_endpoint=ErEndpoint(host='localhost', port=transfer_port),
pid=pid,
options=options,
status=ProcessorStatus.RUNNING)
cluster_manager_host, cluster_manager_port = cluster_manager.strip().split(':')
L.info(f'egg_pair cluster_manager: {cluster_manager}')
cluster_manager_client = ClusterManagerClient(options={
ClusterManagerConfKeys.CONFKEY_CLUSTER_MANAGER_HOST: cluster_manager_host,
ClusterManagerConfKeys.CONFKEY_CLUSTER_MANAGER_PORT: cluster_manager_port
})
cluster_manager_client.heartbeat(myself)
if platform.system() == "Windows":
t1 = threading.Thread(target=stop_processor, args=[cluster_manager_client, myself])
t1.start()
L.info(f'egg_pair started at port={port}, transfer_port={transfer_port}')
run = True
def exit_gracefully(signum, frame):
nonlocal run
run = False
L.info(f'egg_pair {args.processor_id} at port={port}, transfer_port={transfer_port}, pid={pid} receives signum={signal.getsignal(signum)}, stopping gracefully.')
signal.signal(signal.SIGTERM, exit_gracefully)
signal.signal(signal.SIGINT, exit_gracefully)
while run:
time.sleep(1)
L.info(f'sending exit heartbeat to cm')
if cluster_manager:
myself._status = ProcessorStatus.STOPPED
cluster_manager_client.heartbeat(myself)
GrpcChannelFactory.shutdown_all_now()
L.info(f'closing RocksDB open dbs')
#todo:1: move to RocksdbAdapter and provide a cleanup method
from eggroll.core.pair_store.rocksdb import RocksdbAdapter
for path, db in RocksdbAdapter.db_dict.items():
del db
gc.collect()
L.info(f'system metric at exit: {get_system_metric(1)}')
L.info(f'egg_pair {args.processor_id} at port={port}, transfer_port={transfer_port}, pid={pid} stopped gracefully')
if __name__ == '__main__':
L.info(f'system metric at start: {get_system_metric(0.1)}')
args_parser = argparse.ArgumentParser()
args_parser.add_argument('-d', '--data-dir')
args_parser.add_argument('-cm', '--cluster-manager')
args_parser.add_argument('-nm', '--node-manager')
args_parser.add_argument('-s', '--session-id')
args_parser.add_argument('-p', '--port', default='0')
args_parser.add_argument('-t', '--transfer-port', default='0')
args_parser.add_argument('-sn', '--server-node-id')
args_parser.add_argument('-prid', '--processor-id', default='0')
args_parser.add_argument('-c', '--config')
args = args_parser.parse_args()
EGGROLL_HOME = os.environ['EGGROLL_HOME']
configs = configparser.ConfigParser()
if args.config:
conf_file = args.config
L.info(f'reading config path: {conf_file}')
else:
conf_file = f'{EGGROLL_HOME}/conf/eggroll.properties'
L.info(f'reading default config: {conf_file}')
configs.read(conf_file)
set_static_er_conf(configs['eggroll'])
if configs:
if not args.data_dir:
args.data_dir = configs['eggroll']['eggroll.data.dir']
L.info(args)
serve(args)
|
tasks.py | import json
import time
import requests
from flask import g, request, current_app, url_for
from ..models import User, Watch, Check
from .. import db
from . import main
from .authentication import auth_user
from .errors import bad_request, unauthorized, forbidden, not_found
from flask.ext.mail import Message
from threading import Thread
from .. import mail
# from celery import Celery
def queue_to_celery_broker(watch):
pass
@main.route('/tasks/check/<watch_id>/', methods=['GET'])
def run_check(watch_id):
watch = Watch.query.get(watch_id)
if not watch:
# clear_celery_broker(watch_id)
return not_found("Resource not found!")
# check if site is available:
r = requests.get(watch.url)
timestamp = time.time()
if r.status_code == 200: # the site is available:
report = "[AVAILABLE] The site at the url: %s is available as at this time: %s" % (watch.url, time.asctime( time.localtime(timestamp) ))
else:
report = "[NOT-AVAILABLE] The site at the url: %s is not available as at this time: %s" % (watch.url, time.asctime( time.localtime(timestamp) ))
# send the mail:
send_email(watch.email, report)
# record the check in the db:
check = Check( watch_id=watch.id, report=report, timestamp=timestamp, mail_sent=True )
db.session.add(check)
db.session.commit()
# create and send response
response = {}
response["check"] = check.to_json()
response["status"] = "success"
return json.dumps(response)
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, body):
app = current_app._get_current_object()
msg = Message(app.config['MONITOR_BOT_REPORT_SUBJECT'], sender=app.config['MONITOR_BOT_MAIL_SENDER'], recipients=[to])
msg.body = body
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
|
Misc.py | ## @file
# Common routines used by all tools
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
from __future__ import absolute_import
import Common.LongFilePathOs as os
import sys
import string
import threading
import time
import re
import pickle
import array
import shutil
from struct import pack
from UserDict import IterableUserDict
from UserList import UserList
from Common import EdkLogger as EdkLogger
from Common import GlobalData as GlobalData
from .DataType import *
from .BuildToolError import *
from CommonDataClass.DataClass import *
from .Parsing import GetSplitValueList
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.MultipleWorkspace import MultipleWorkspace as mws
import uuid
from CommonDataClass.Exceptions import BadExpression
import subprocess
## Regular expression used to find out place holders in string template
gPlaceholderPattern = re.compile("\$\{([^$()\s]+)\}", re.MULTILINE | re.UNICODE)
## regular expressions for map file processing
startPatternGeneral = re.compile("^Start[' ']+Length[' ']+Name[' ']+Class")
addressPatternGeneral = re.compile("^Address[' ']+Publics by Value[' ']+Rva\+Base")
valuePatternGcc = re.compile('^([\w_\.]+) +([\da-fA-Fx]+) +([\da-fA-Fx]+)$')
pcdPatternGcc = re.compile('^([\da-fA-Fx]+) +([\da-fA-Fx]+)')
secReGeneral = re.compile('^([\da-fA-F]+):([\da-fA-F]+) +([\da-fA-F]+)[Hh]? +([.\w\$]+) +(\w+)', re.UNICODE)
## Dictionary used to store file time stamp for quick re-access
gFileTimeStampCache = {} # {file path : file time stamp}
## Dictionary used to store dependencies of files
gDependencyDatabase = {} # arch : {file path : [dependent files list]}
#
# If a module is built more than once with different PCDs or library classes
# a temporary INF file with same content is created, the temporary file is removed
# when build exits.
#
_TempInfs = []
def GetVariableOffset(mapfilepath, efifilepath, varnames):
""" Parse map file to get variable offset in current EFI file
@param mapfilepath Map file absolution path
@param efifilepath: EFI binary file full path
@param varnames iteratable container whose elements are variable names to be searched
@return List whos elements are tuple with variable name and raw offset
"""
lines = []
try:
f = open(mapfilepath, 'r')
lines = f.readlines()
f.close()
except:
return None
if len(lines) == 0: return None
firstline = lines[0].strip()
if (firstline.startswith("Archive member included ") and
firstline.endswith(" file (symbol)")):
return _parseForGCC(lines, efifilepath, varnames)
if firstline.startswith("# Path:"):
return _parseForXcode(lines, efifilepath, varnames)
return _parseGeneral(lines, efifilepath, varnames)
def _parseForXcode(lines, efifilepath, varnames):
status = 0
ret = []
for line in lines:
line = line.strip()
if status == 0 and line == "# Symbols:":
status = 1
continue
if status == 1 and len(line) != 0:
for varname in varnames:
if varname in line:
# cannot pregenerate this RegEx since it uses varname from varnames.
m = re.match('^([\da-fA-FxX]+)([\s\S]*)([_]*%s)$' % varname, line)
if m is not None:
ret.append((varname, m.group(1)))
return ret
def _parseForGCC(lines, efifilepath, varnames):
""" Parse map file generated by GCC linker """
status = 0
sections = []
varoffset = []
for index, line in enumerate(lines):
line = line.strip()
# status machine transection
if status == 0 and line == "Memory Configuration":
status = 1
continue
elif status == 1 and line == 'Linker script and memory map':
status = 2
continue
elif status ==2 and line == 'START GROUP':
status = 3
continue
# status handler
if status == 3:
m = valuePatternGcc.match(line)
if m is not None:
sections.append(m.groups(0))
for varname in varnames:
Str = ''
m = re.match("^.data.(%s)" % varname, line)
if m is not None:
m = re.match(".data.(%s)$" % varname, line)
if m is not None:
Str = lines[index + 1]
else:
Str = line[len(".data.%s" % varname):]
if Str:
m = pcdPatternGcc.match(Str.strip())
if m is not None:
varoffset.append((varname, int(m.groups(0)[0], 16), int(sections[-1][1], 16), sections[-1][0]))
if not varoffset:
return []
# get section information from efi file
efisecs = PeImageClass(efifilepath).SectionHeaderList
if efisecs is None or len(efisecs) == 0:
return []
#redirection
redirection = 0
for efisec in efisecs:
for section in sections:
if section[0].strip() == efisec[0].strip() and section[0].strip() == '.text':
redirection = int(section[1], 16) - efisec[1]
ret = []
for var in varoffset:
for efisec in efisecs:
if var[1] >= efisec[1] and var[1] < efisec[1]+efisec[3]:
ret.append((var[0], hex(efisec[2] + var[1] - efisec[1] - redirection)))
return ret
def _parseGeneral(lines, efifilepath, varnames):
status = 0 #0 - beginning of file; 1 - PE section definition; 2 - symbol table
secs = [] # key = section name
varoffset = []
symRe = re.compile('^([\da-fA-F]+):([\da-fA-F]+) +([\.:\\\\\w\?@\$]+) +([\da-fA-F]+)', re.UNICODE)
for line in lines:
line = line.strip()
if startPatternGeneral.match(line):
status = 1
continue
if addressPatternGeneral.match(line):
status = 2
continue
if line.startswith("entry point at"):
status = 3
continue
if status == 1 and len(line) != 0:
m = secReGeneral.match(line)
assert m is not None, "Fail to parse the section in map file , line is %s" % line
sec_no, sec_start, sec_length, sec_name, sec_class = m.groups(0)
secs.append([int(sec_no, 16), int(sec_start, 16), int(sec_length, 16), sec_name, sec_class])
if status == 2 and len(line) != 0:
for varname in varnames:
m = symRe.match(line)
assert m is not None, "Fail to parse the symbol in map file, line is %s" % line
sec_no, sym_offset, sym_name, vir_addr = m.groups(0)
sec_no = int(sec_no, 16)
sym_offset = int(sym_offset, 16)
vir_addr = int(vir_addr, 16)
# cannot pregenerate this RegEx since it uses varname from varnames.
m2 = re.match('^[_]*(%s)' % varname, sym_name)
if m2 is not None:
# fond a binary pcd entry in map file
for sec in secs:
if sec[0] == sec_no and (sym_offset >= sec[1] and sym_offset < sec[1] + sec[2]):
varoffset.append([varname, sec[3], sym_offset, vir_addr, sec_no])
if not varoffset: return []
# get section information from efi file
efisecs = PeImageClass(efifilepath).SectionHeaderList
if efisecs is None or len(efisecs) == 0:
return []
ret = []
for var in varoffset:
index = 0
for efisec in efisecs:
index = index + 1
if var[1].strip() == efisec[0].strip():
ret.append((var[0], hex(efisec[2] + var[2])))
elif var[4] == index:
ret.append((var[0], hex(efisec[2] + var[2])))
return ret
## Routine to process duplicated INF
#
# This function is called by following two cases:
# Case 1 in DSC:
# [components.arch]
# Pkg/module/module.inf
# Pkg/module/module.inf {
# <Defines>
# FILE_GUID = 0D1B936F-68F3-4589-AFCC-FB8B7AEBC836
# }
# Case 2 in FDF:
# INF Pkg/module/module.inf
# INF FILE_GUID = 0D1B936F-68F3-4589-AFCC-FB8B7AEBC836 Pkg/module/module.inf
#
# This function copies Pkg/module/module.inf to
# Conf/.cache/0D1B936F-68F3-4589-AFCC-FB8B7AEBC836module.inf
#
# @param Path Original PathClass object
# @param BaseName New file base name
#
# @retval return the new PathClass object
#
def ProcessDuplicatedInf(Path, BaseName, Workspace):
Filename = os.path.split(Path.File)[1]
if '.' in Filename:
Filename = BaseName + Path.BaseName + Filename[Filename.rfind('.'):]
else:
Filename = BaseName + Path.BaseName
#
# If -N is specified on command line, cache is disabled
# The directory has to be created
#
DbDir = os.path.split(GlobalData.gDatabasePath)[0]
if not os.path.exists(DbDir):
os.makedirs(DbDir)
#
# A temporary INF is copied to database path which must have write permission
# The temporary will be removed at the end of build
# In case of name conflict, the file name is
# FILE_GUIDBaseName (0D1B936F-68F3-4589-AFCC-FB8B7AEBC836module.inf)
#
TempFullPath = os.path.join(DbDir,
Filename)
RtPath = PathClass(Path.File, Workspace)
#
# Modify the full path to temporary path, keep other unchanged
#
# To build same module more than once, the module path with FILE_GUID overridden has
# the file name FILE_GUIDmodule.inf, but the relative path (self.MetaFile.File) is the real path
# in DSC which is used as relative path by C files and other files in INF.
# A trick was used: all module paths are PathClass instances, after the initialization
# of PathClass, the PathClass.Path is overridden by the temporary INF path.
#
# The reason for creating a temporary INF is:
# Platform.Modules which is the base to create ModuleAutoGen objects is a dictionary,
# the key is the full path of INF, the value is an object to save overridden library instances, PCDs.
# A different key for the same module is needed to create different output directory,
# retrieve overridden PCDs, library instances.
#
# The BaseName is the FILE_GUID which is also the output directory name.
#
#
RtPath.Path = TempFullPath
RtPath.BaseName = BaseName
#
# If file exists, compare contents
#
if os.path.exists(TempFullPath):
with open(str(Path), 'rb') as f1, open(TempFullPath, 'rb') as f2:
if f1.read() == f2.read():
return RtPath
_TempInfs.append(TempFullPath)
shutil.copy2(str(Path), TempFullPath)
return RtPath
## Remove temporary created INFs whose paths were saved in _TempInfs
#
def ClearDuplicatedInf():
while _TempInfs:
File = _TempInfs.pop()
if os.path.exists(File):
os.remove(File)
## Convert GUID string in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx style to C structure style
#
# @param Guid The GUID string
#
# @retval string The GUID string in C structure style
#
def GuidStringToGuidStructureString(Guid):
GuidList = Guid.split('-')
Result = '{'
for Index in range(0, 3, 1):
Result = Result + '0x' + GuidList[Index] + ', '
Result = Result + '{0x' + GuidList[3][0:2] + ', 0x' + GuidList[3][2:4]
for Index in range(0, 12, 2):
Result = Result + ', 0x' + GuidList[4][Index:Index + 2]
Result += '}}'
return Result
## Convert GUID structure in byte array to xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
#
# @param GuidValue The GUID value in byte array
#
# @retval string The GUID value in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format
#
def GuidStructureByteArrayToGuidString(GuidValue):
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "").replace(";", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 16:
return ''
#EdkLogger.error(None, None, "Invalid GUID value string %s" % GuidValue)
try:
return "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[3], 16),
int(guidValueList[2], 16),
int(guidValueList[1], 16),
int(guidValueList[0], 16),
int(guidValueList[5], 16),
int(guidValueList[4], 16),
int(guidValueList[7], 16),
int(guidValueList[6], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16),
int(guidValueList[11], 16),
int(guidValueList[12], 16),
int(guidValueList[13], 16),
int(guidValueList[14], 16),
int(guidValueList[15], 16)
)
except:
return ''
## Convert GUID string in C structure style to xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
#
# @param GuidValue The GUID value in C structure format
#
# @retval string The GUID value in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format
#
def GuidStructureStringToGuidString(GuidValue):
if not GlobalData.gGuidCFormatPattern.match(GuidValue):
return ''
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "").replace(";", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 11:
return ''
#EdkLogger.error(None, None, "Invalid GUID value string %s" % GuidValue)
try:
return "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[0], 16),
int(guidValueList[1], 16),
int(guidValueList[2], 16),
int(guidValueList[3], 16),
int(guidValueList[4], 16),
int(guidValueList[5], 16),
int(guidValueList[6], 16),
int(guidValueList[7], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16)
)
except:
return ''
## Convert GUID string in C structure style to xxxxxxxx_xxxx_xxxx_xxxx_xxxxxxxxxxxx
#
# @param GuidValue The GUID value in C structure format
#
# @retval string The GUID value in xxxxxxxx_xxxx_xxxx_xxxx_xxxxxxxxxxxx format
#
def GuidStructureStringToGuidValueName(GuidValue):
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 11:
EdkLogger.error(None, FORMAT_INVALID, "Invalid GUID value string [%s]" % GuidValue)
return "%08x_%04x_%04x_%02x%02x_%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[0], 16),
int(guidValueList[1], 16),
int(guidValueList[2], 16),
int(guidValueList[3], 16),
int(guidValueList[4], 16),
int(guidValueList[5], 16),
int(guidValueList[6], 16),
int(guidValueList[7], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16)
)
## Create directories
#
# @param Directory The directory name
#
def CreateDirectory(Directory):
if Directory is None or Directory.strip() == "":
return True
try:
if not os.access(Directory, os.F_OK):
os.makedirs(Directory)
except:
return False
return True
## Remove directories, including files and sub-directories in it
#
# @param Directory The directory name
#
def RemoveDirectory(Directory, Recursively=False):
if Directory is None or Directory.strip() == "" or not os.path.exists(Directory):
return
if Recursively:
CurrentDirectory = os.getcwd()
os.chdir(Directory)
for File in os.listdir("."):
if os.path.isdir(File):
RemoveDirectory(File, Recursively)
else:
os.remove(File)
os.chdir(CurrentDirectory)
os.rmdir(Directory)
## Store content in file
#
# This method is used to save file only when its content is changed. This is
# quite useful for "make" system to decide what will be re-built and what won't.
#
# @param File The path of file
# @param Content The new content of the file
# @param IsBinaryFile The flag indicating if the file is binary file or not
#
# @retval True If the file content is changed and the file is renewed
# @retval False If the file content is the same
#
def SaveFileOnChange(File, Content, IsBinaryFile=True):
if not IsBinaryFile:
Content = Content.replace("\n", os.linesep)
if os.path.exists(File):
try:
if Content == open(File, "rb").read():
return False
except:
EdkLogger.error(None, FILE_OPEN_FAILURE, ExtraData=File)
DirName = os.path.dirname(File)
if not CreateDirectory(DirName):
EdkLogger.error(None, FILE_CREATE_FAILURE, "Could not create directory %s" % DirName)
else:
if DirName == '':
DirName = os.getcwd()
if not os.access(DirName, os.W_OK):
EdkLogger.error(None, PERMISSION_FAILURE, "Do not have write permission on directory %s" % DirName)
try:
if GlobalData.gIsWindows:
try:
from .PyUtility import SaveFileToDisk
if not SaveFileToDisk(File, Content):
EdkLogger.error(None, FILE_CREATE_FAILURE, ExtraData=File)
except:
Fd = open(File, "wb")
Fd.write(Content)
Fd.close()
else:
Fd = open(File, "wb")
Fd.write(Content)
Fd.close()
except IOError as X:
EdkLogger.error(None, FILE_CREATE_FAILURE, ExtraData='IOError %s' % X)
return True
## Make a Python object persistent on file system
#
# @param Data The object to be stored in file
# @param File The path of file to store the object
#
def DataDump(Data, File):
Fd = None
try:
Fd = open(File, 'wb')
pickle.dump(Data, Fd, pickle.HIGHEST_PROTOCOL)
except:
EdkLogger.error("", FILE_OPEN_FAILURE, ExtraData=File, RaiseError=False)
finally:
if Fd is not None:
Fd.close()
## Restore a Python object from a file
#
# @param File The path of file stored the object
#
# @retval object A python object
# @retval None If failure in file operation
#
def DataRestore(File):
Data = None
Fd = None
try:
Fd = open(File, 'rb')
Data = pickle.load(Fd)
except Exception as e:
EdkLogger.verbose("Failed to load [%s]\n\t%s" % (File, str(e)))
Data = None
finally:
if Fd is not None:
Fd.close()
return Data
## Retrieve and cache the real path name in file system
#
# @param Root The root directory of path relative to
#
# @retval str The path string if the path exists
# @retval None If path doesn't exist
#
class DirCache:
_CACHE_ = set()
_UPPER_CACHE_ = {}
def __init__(self, Root):
self._Root = Root
for F in os.listdir(Root):
self._CACHE_.add(F)
self._UPPER_CACHE_[F.upper()] = F
# =[] operator
def __getitem__(self, Path):
Path = Path[len(os.path.commonprefix([Path, self._Root])):]
if not Path:
return self._Root
if Path and Path[0] == os.path.sep:
Path = Path[1:]
if Path in self._CACHE_:
return os.path.join(self._Root, Path)
UpperPath = Path.upper()
if UpperPath in self._UPPER_CACHE_:
return os.path.join(self._Root, self._UPPER_CACHE_[UpperPath])
IndexList = []
LastSepIndex = -1
SepIndex = Path.find(os.path.sep)
while SepIndex > -1:
Parent = UpperPath[:SepIndex]
if Parent not in self._UPPER_CACHE_:
break
LastSepIndex = SepIndex
SepIndex = Path.find(os.path.sep, LastSepIndex + 1)
if LastSepIndex == -1:
return None
Cwd = os.getcwd()
os.chdir(self._Root)
SepIndex = LastSepIndex
while SepIndex > -1:
Parent = Path[:SepIndex]
ParentKey = UpperPath[:SepIndex]
if ParentKey not in self._UPPER_CACHE_:
os.chdir(Cwd)
return None
if Parent in self._CACHE_:
ParentDir = Parent
else:
ParentDir = self._UPPER_CACHE_[ParentKey]
for F in os.listdir(ParentDir):
Dir = os.path.join(ParentDir, F)
self._CACHE_.add(Dir)
self._UPPER_CACHE_[Dir.upper()] = Dir
SepIndex = Path.find(os.path.sep, SepIndex + 1)
os.chdir(Cwd)
if Path in self._CACHE_:
return os.path.join(self._Root, Path)
elif UpperPath in self._UPPER_CACHE_:
return os.path.join(self._Root, self._UPPER_CACHE_[UpperPath])
return None
def RealPath(File, Dir='', OverrideDir=''):
NewFile = os.path.normpath(os.path.join(Dir, File))
NewFile = GlobalData.gAllFiles[NewFile]
if not NewFile and OverrideDir:
NewFile = os.path.normpath(os.path.join(OverrideDir, File))
NewFile = GlobalData.gAllFiles[NewFile]
return NewFile
def RealPath2(File, Dir='', OverrideDir=''):
NewFile = None
if OverrideDir:
NewFile = GlobalData.gAllFiles[os.path.normpath(os.path.join(OverrideDir, File))]
if NewFile:
if OverrideDir[-1] == os.path.sep:
return NewFile[len(OverrideDir):], NewFile[0:len(OverrideDir)]
else:
return NewFile[len(OverrideDir) + 1:], NewFile[0:len(OverrideDir)]
if GlobalData.gAllFiles:
NewFile = GlobalData.gAllFiles[os.path.normpath(os.path.join(Dir, File))]
if not NewFile:
NewFile = os.path.normpath(os.path.join(Dir, File))
if not os.path.exists(NewFile):
return None, None
if NewFile:
if Dir:
if Dir[-1] == os.path.sep:
return NewFile[len(Dir):], NewFile[0:len(Dir)]
else:
return NewFile[len(Dir) + 1:], NewFile[0:len(Dir)]
else:
return NewFile, ''
return None, None
## Get GUID value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
# @param Inffile The driver file
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
def GuidValue(CName, PackageList, Inffile = None):
for P in PackageList:
GuidKeys = P.Guids.keys()
if Inffile and P._PrivateGuids:
if not Inffile.startswith(P.MetaFile.Dir):
GuidKeys = [x for x in P.Guids if x not in P._PrivateGuids]
if CName in GuidKeys:
return P.Guids[CName]
return None
## Get Protocol value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
# @param Inffile The driver file
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
def ProtocolValue(CName, PackageList, Inffile = None):
for P in PackageList:
ProtocolKeys = P.Protocols.keys()
if Inffile and P._PrivateProtocols:
if not Inffile.startswith(P.MetaFile.Dir):
ProtocolKeys = [x for x in P.Protocols if x not in P._PrivateProtocols]
if CName in ProtocolKeys:
return P.Protocols[CName]
return None
## Get PPI value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
# @param Inffile The driver file
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
def PpiValue(CName, PackageList, Inffile = None):
for P in PackageList:
PpiKeys = P.Ppis.keys()
if Inffile and P._PrivatePpis:
if not Inffile.startswith(P.MetaFile.Dir):
PpiKeys = [x for x in P.Ppis if x not in P._PrivatePpis]
if CName in PpiKeys:
return P.Ppis[CName]
return None
## A string template class
#
# This class implements a template for string replacement. A string template
# looks like following
#
# ${BEGIN} other_string ${placeholder_name} other_string ${END}
#
# The string between ${BEGIN} and ${END} will be repeated as many times as the
# length of "placeholder_name", which is a list passed through a dict. The
# "placeholder_name" is the key name of the dict. The ${BEGIN} and ${END} can
# be not used and, in this case, the "placeholder_name" must not a list and it
# will just be replaced once.
#
class TemplateString(object):
_REPEAT_START_FLAG = "BEGIN"
_REPEAT_END_FLAG = "END"
class Section(object):
_LIST_TYPES = [type([]), type(set()), type((0,))]
def __init__(self, TemplateSection, PlaceHolderList):
self._Template = TemplateSection
self._PlaceHolderList = []
# Split the section into sub-sections according to the position of placeholders
if PlaceHolderList:
self._SubSectionList = []
SubSectionStart = 0
#
# The placeholders passed in must be in the format of
#
# PlaceHolderName, PlaceHolderStartPoint, PlaceHolderEndPoint
#
for PlaceHolder, Start, End in PlaceHolderList:
self._SubSectionList.append(TemplateSection[SubSectionStart:Start])
self._SubSectionList.append(TemplateSection[Start:End])
self._PlaceHolderList.append(PlaceHolder)
SubSectionStart = End
if SubSectionStart < len(TemplateSection):
self._SubSectionList.append(TemplateSection[SubSectionStart:])
else:
self._SubSectionList = [TemplateSection]
def __str__(self):
return self._Template + " : " + str(self._PlaceHolderList)
def Instantiate(self, PlaceHolderValues):
RepeatTime = -1
RepeatPlaceHolders = {}
NonRepeatPlaceHolders = {}
for PlaceHolder in self._PlaceHolderList:
if PlaceHolder not in PlaceHolderValues:
continue
Value = PlaceHolderValues[PlaceHolder]
if type(Value) in self._LIST_TYPES:
if RepeatTime < 0:
RepeatTime = len(Value)
elif RepeatTime != len(Value):
EdkLogger.error(
"TemplateString",
PARAMETER_INVALID,
"${%s} has different repeat time from others!" % PlaceHolder,
ExtraData=str(self._Template)
)
RepeatPlaceHolders["${%s}" % PlaceHolder] = Value
else:
NonRepeatPlaceHolders["${%s}" % PlaceHolder] = Value
if NonRepeatPlaceHolders:
StringList = []
for S in self._SubSectionList:
if S not in NonRepeatPlaceHolders:
StringList.append(S)
else:
StringList.append(str(NonRepeatPlaceHolders[S]))
else:
StringList = self._SubSectionList
if RepeatPlaceHolders:
TempStringList = []
for Index in range(RepeatTime):
for S in StringList:
if S not in RepeatPlaceHolders:
TempStringList.append(S)
else:
TempStringList.append(str(RepeatPlaceHolders[S][Index]))
StringList = TempStringList
return "".join(StringList)
## Constructor
def __init__(self, Template=None):
self.String = ''
self.IsBinary = False
self._Template = Template
self._TemplateSectionList = self._Parse(Template)
## str() operator
#
# @retval string The string replaced
#
def __str__(self):
return self.String
## Split the template string into fragments per the ${BEGIN} and ${END} flags
#
# @retval list A list of TemplateString.Section objects
#
def _Parse(self, Template):
SectionStart = 0
SearchFrom = 0
MatchEnd = 0
PlaceHolderList = []
TemplateSectionList = []
while Template:
MatchObj = gPlaceholderPattern.search(Template, SearchFrom)
if not MatchObj:
if MatchEnd <= len(Template):
TemplateSection = TemplateString.Section(Template[SectionStart:], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
break
MatchString = MatchObj.group(1)
MatchStart = MatchObj.start()
MatchEnd = MatchObj.end()
if MatchString == self._REPEAT_START_FLAG:
if MatchStart > SectionStart:
TemplateSection = TemplateString.Section(Template[SectionStart:MatchStart], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
SectionStart = MatchEnd
PlaceHolderList = []
elif MatchString == self._REPEAT_END_FLAG:
TemplateSection = TemplateString.Section(Template[SectionStart:MatchStart], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
SectionStart = MatchEnd
PlaceHolderList = []
else:
PlaceHolderList.append((MatchString, MatchStart - SectionStart, MatchEnd - SectionStart))
SearchFrom = MatchEnd
return TemplateSectionList
## Replace the string template with dictionary of placeholders and append it to previous one
#
# @param AppendString The string template to append
# @param Dictionary The placeholder dictionaries
#
def Append(self, AppendString, Dictionary=None):
if Dictionary:
SectionList = self._Parse(AppendString)
self.String += "".join(S.Instantiate(Dictionary) for S in SectionList)
else:
self.String += AppendString
## Replace the string template with dictionary of placeholders
#
# @param Dictionary The placeholder dictionaries
#
# @retval str The string replaced with placeholder values
#
def Replace(self, Dictionary=None):
return "".join(S.Instantiate(Dictionary) for S in self._TemplateSectionList)
## Progress indicator class
#
# This class makes use of thread to print progress on console.
#
class Progressor:
# for avoiding deadloop
_StopFlag = None
_ProgressThread = None
_CheckInterval = 0.25
## Constructor
#
# @param OpenMessage The string printed before progress charaters
# @param CloseMessage The string printed after progress charaters
# @param ProgressChar The charater used to indicate the progress
# @param Interval The interval in seconds between two progress charaters
#
def __init__(self, OpenMessage="", CloseMessage="", ProgressChar='.', Interval=1.0):
self.PromptMessage = OpenMessage
self.CodaMessage = CloseMessage
self.ProgressChar = ProgressChar
self.Interval = Interval
if Progressor._StopFlag is None:
Progressor._StopFlag = threading.Event()
## Start to print progress charater
#
# @param OpenMessage The string printed before progress charaters
#
def Start(self, OpenMessage=None):
if OpenMessage is not None:
self.PromptMessage = OpenMessage
Progressor._StopFlag.clear()
if Progressor._ProgressThread is None:
Progressor._ProgressThread = threading.Thread(target=self._ProgressThreadEntry)
Progressor._ProgressThread.setDaemon(False)
Progressor._ProgressThread.start()
## Stop printing progress charater
#
# @param CloseMessage The string printed after progress charaters
#
def Stop(self, CloseMessage=None):
OriginalCodaMessage = self.CodaMessage
if CloseMessage is not None:
self.CodaMessage = CloseMessage
self.Abort()
self.CodaMessage = OriginalCodaMessage
## Thread entry method
def _ProgressThreadEntry(self):
sys.stdout.write(self.PromptMessage + " ")
sys.stdout.flush()
TimeUp = 0.0
while not Progressor._StopFlag.isSet():
if TimeUp <= 0.0:
sys.stdout.write(self.ProgressChar)
sys.stdout.flush()
TimeUp = self.Interval
time.sleep(self._CheckInterval)
TimeUp -= self._CheckInterval
sys.stdout.write(" " + self.CodaMessage + "\n")
sys.stdout.flush()
## Abort the progress display
@staticmethod
def Abort():
if Progressor._StopFlag is not None:
Progressor._StopFlag.set()
if Progressor._ProgressThread is not None:
Progressor._ProgressThread.join()
Progressor._ProgressThread = None
## A dict which can access its keys and/or values orderly
#
# The class implements a new kind of dict which its keys or values can be
# accessed in the order they are added into the dict. It guarantees the order
# by making use of an internal list to keep a copy of keys.
#
class sdict(IterableUserDict):
## Constructor
def __init__(self):
IterableUserDict.__init__(self)
self._key_list = []
## [] operator
def __setitem__(self, key, value):
if key not in self._key_list:
self._key_list.append(key)
IterableUserDict.__setitem__(self, key, value)
## del operator
def __delitem__(self, key):
self._key_list.remove(key)
IterableUserDict.__delitem__(self, key)
## used in "for k in dict" loop to ensure the correct order
def __iter__(self):
return self.iterkeys()
## len() support
def __len__(self):
return len(self._key_list)
## "in" test support
def __contains__(self, key):
return key in self._key_list
## indexof support
def index(self, key):
return self._key_list.index(key)
## insert support
def insert(self, key, newkey, newvalue, order):
index = self._key_list.index(key)
if order == 'BEFORE':
self._key_list.insert(index, newkey)
IterableUserDict.__setitem__(self, newkey, newvalue)
elif order == 'AFTER':
self._key_list.insert(index + 1, newkey)
IterableUserDict.__setitem__(self, newkey, newvalue)
## append support
def append(self, sdict):
for key in sdict:
if key not in self._key_list:
self._key_list.append(key)
IterableUserDict.__setitem__(self, key, sdict[key])
def has_key(self, key):
return key in self._key_list
## Empty the dict
def clear(self):
self._key_list = []
IterableUserDict.clear(self)
## Return a copy of keys
def keys(self):
keys = []
for key in self._key_list:
keys.append(key)
return keys
## Return a copy of values
def values(self):
values = []
for key in self._key_list:
values.append(self[key])
return values
## Return a copy of (key, value) list
def items(self):
items = []
for key in self._key_list:
items.append((key, self[key]))
return items
## Iteration support
def iteritems(self):
return iter(self.items())
## Keys interation support
def iterkeys(self):
return iter(self.keys())
## Values interation support
def itervalues(self):
return iter(self.values())
## Return value related to a key, and remove the (key, value) from the dict
def pop(self, key, *dv):
value = None
if key in self._key_list:
value = self[key]
self.__delitem__(key)
elif len(dv) != 0 :
value = kv[0]
return value
## Return (key, value) pair, and remove the (key, value) from the dict
def popitem(self):
key = self._key_list[-1]
value = self[key]
self.__delitem__(key)
return key, value
def update(self, dict=None, **kwargs):
if dict is not None:
for k, v in dict.items():
self[k] = v
if len(kwargs):
for k, v in kwargs.items():
self[k] = v
## Dictionary with restricted keys
#
class rdict(dict):
## Constructor
def __init__(self, KeyList):
for Key in KeyList:
dict.__setitem__(self, Key, "")
## []= operator
def __setitem__(self, key, value):
if key not in self:
EdkLogger.error("RestrictedDict", ATTRIBUTE_SET_FAILURE, "Key [%s] is not allowed" % key,
ExtraData=", ".join(dict.keys(self)))
dict.__setitem__(self, key, value)
## =[] operator
def __getitem__(self, key):
if key not in self:
return ""
return dict.__getitem__(self, key)
## del operator
def __delitem__(self, key):
EdkLogger.error("RestrictedDict", ATTRIBUTE_ACCESS_DENIED, ExtraData="del")
## Empty the dict
def clear(self):
for Key in self:
self.__setitem__(Key, "")
## Return value related to a key, and remove the (key, value) from the dict
def pop(self, key, *dv):
EdkLogger.error("RestrictedDict", ATTRIBUTE_ACCESS_DENIED, ExtraData="pop")
## Return (key, value) pair, and remove the (key, value) from the dict
def popitem(self):
EdkLogger.error("RestrictedDict", ATTRIBUTE_ACCESS_DENIED, ExtraData="popitem")
## Dictionary using prioritized list as key
#
class tdict:
_ListType = type([])
_TupleType = type(())
_Wildcard = 'COMMON'
_ValidWildcardList = ['COMMON', 'DEFAULT', 'ALL', '*', 'PLATFORM']
def __init__(self, _Single_=False, _Level_=2):
self._Level_ = _Level_
self.data = {}
self._Single_ = _Single_
# =[] operator
def __getitem__(self, key):
KeyType = type(key)
RestKeys = None
if KeyType == self._ListType or KeyType == self._TupleType:
FirstKey = key[0]
if len(key) > 1:
RestKeys = key[1:]
elif self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
else:
FirstKey = key
if self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
if FirstKey is None or str(FirstKey).upper() in self._ValidWildcardList:
FirstKey = self._Wildcard
if self._Single_:
return self._GetSingleValue(FirstKey, RestKeys)
else:
return self._GetAllValues(FirstKey, RestKeys)
def _GetSingleValue(self, FirstKey, RestKeys):
Value = None
#print "%s-%s" % (FirstKey, self._Level_) ,
if self._Level_ > 1:
if FirstKey == self._Wildcard:
if FirstKey in self.data:
Value = self.data[FirstKey][RestKeys]
if Value is None:
for Key in self.data:
Value = self.data[Key][RestKeys]
if Value is not None: break
else:
if FirstKey in self.data:
Value = self.data[FirstKey][RestKeys]
if Value is None and self._Wildcard in self.data:
#print "Value=None"
Value = self.data[self._Wildcard][RestKeys]
else:
if FirstKey == self._Wildcard:
if FirstKey in self.data:
Value = self.data[FirstKey]
if Value is None:
for Key in self.data:
Value = self.data[Key]
if Value is not None: break
else:
if FirstKey in self.data:
Value = self.data[FirstKey]
elif self._Wildcard in self.data:
Value = self.data[self._Wildcard]
return Value
def _GetAllValues(self, FirstKey, RestKeys):
Value = []
if self._Level_ > 1:
if FirstKey == self._Wildcard:
for Key in self.data:
Value += self.data[Key][RestKeys]
else:
if FirstKey in self.data:
Value += self.data[FirstKey][RestKeys]
if self._Wildcard in self.data:
Value += self.data[self._Wildcard][RestKeys]
else:
if FirstKey == self._Wildcard:
for Key in self.data:
Value.append(self.data[Key])
else:
if FirstKey in self.data:
Value.append(self.data[FirstKey])
if self._Wildcard in self.data:
Value.append(self.data[self._Wildcard])
return Value
## []= operator
def __setitem__(self, key, value):
KeyType = type(key)
RestKeys = None
if KeyType == self._ListType or KeyType == self._TupleType:
FirstKey = key[0]
if len(key) > 1:
RestKeys = key[1:]
else:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
else:
FirstKey = key
if self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
if FirstKey in self._ValidWildcardList:
FirstKey = self._Wildcard
if FirstKey not in self.data and self._Level_ > 0:
self.data[FirstKey] = tdict(self._Single_, self._Level_ - 1)
if self._Level_ > 1:
self.data[FirstKey][RestKeys] = value
else:
self.data[FirstKey] = value
def SetGreedyMode(self):
self._Single_ = False
if self._Level_ > 1:
for Key in self.data:
self.data[Key].SetGreedyMode()
def SetSingleMode(self):
self._Single_ = True
if self._Level_ > 1:
for Key in self.data:
self.data[Key].SetSingleMode()
def GetKeys(self, KeyIndex=0):
assert KeyIndex >= 0
if KeyIndex == 0:
return set(self.data.keys())
else:
keys = set()
for Key in self.data:
keys |= self.data[Key].GetKeys(KeyIndex - 1)
return keys
def IsFieldValueAnArray (Value):
Value = Value.strip()
if Value.startswith(TAB_GUID) and Value.endswith(')'):
return True
if Value.startswith('L"') and Value.endswith('"') and len(list(Value[2:-1])) > 1:
return True
if Value[0] == '"' and Value[-1] == '"' and len(list(Value[1:-1])) > 1:
return True
if Value[0] == '{' and Value[-1] == '}':
return True
if Value.startswith("L'") and Value.endswith("'") and len(list(Value[2:-1])) > 1:
return True
if Value[0] == "'" and Value[-1] == "'" and len(list(Value[1:-1])) > 1:
return True
return False
def AnalyzePcdExpression(Setting):
Setting = Setting.strip()
# There might be escaped quote in a string: \", \\\" , \', \\\'
Data = Setting
# There might be '|' in string and in ( ... | ... ), replace it with '-'
NewStr = ''
InSingleQuoteStr = False
InDoubleQuoteStr = False
Pair = 0
for Index, ch in enumerate(Data):
if ch == '"' and not InSingleQuoteStr:
if Data[Index - 1] != '\\':
InDoubleQuoteStr = not InDoubleQuoteStr
elif ch == "'" and not InDoubleQuoteStr:
if Data[Index - 1] != '\\':
InSingleQuoteStr = not InSingleQuoteStr
elif ch == '(' and not (InSingleQuoteStr or InDoubleQuoteStr):
Pair += 1
elif ch == ')' and not (InSingleQuoteStr or InDoubleQuoteStr):
Pair -= 1
if (Pair > 0 or InSingleQuoteStr or InDoubleQuoteStr) and ch == TAB_VALUE_SPLIT:
NewStr += '-'
else:
NewStr += ch
FieldList = []
StartPos = 0
while True:
Pos = NewStr.find(TAB_VALUE_SPLIT, StartPos)
if Pos < 0:
FieldList.append(Setting[StartPos:].strip())
break
FieldList.append(Setting[StartPos:Pos].strip())
StartPos = Pos + 1
return FieldList
def ParseDevPathValue (Value):
if '\\' in Value:
Value.replace('\\', '/').replace(' ', '')
Cmd = 'DevicePath ' + '"' + Value + '"'
try:
p = subprocess.Popen(Cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = p.communicate()
except Exception as X:
raise BadExpression("DevicePath: %s" % (str(X)) )
finally:
subprocess._cleanup()
p.stdout.close()
p.stderr.close()
if err:
raise BadExpression("DevicePath: %s" % str(err))
Size = len(out.split())
out = ','.join(out.split())
return '{' + out + '}', Size
def ParseFieldValue (Value):
if isinstance(Value, type(0)):
return Value, (Value.bit_length() + 7) / 8
if not isinstance(Value, type('')):
raise BadExpression('Type %s is %s' %(Value, type(Value)))
Value = Value.strip()
if Value.startswith(TAB_UINT8) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 1:
raise BadExpression('Value (%s) Size larger than %d' %(Value, Size))
return Value, 1
if Value.startswith(TAB_UINT16) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 2:
raise BadExpression('Value (%s) Size larger than %d' %(Value, Size))
return Value, 2
if Value.startswith(TAB_UINT32) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 4:
raise BadExpression('Value (%s) Size larger than %d' %(Value, Size))
return Value, 4
if Value.startswith(TAB_UINT64) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 8:
raise BadExpression('Value (%s) Size larger than %d' % (Value, Size))
return Value, 8
if Value.startswith(TAB_GUID) and Value.endswith(')'):
Value = Value.split('(', 1)[1][:-1].strip()
if Value[0] == '{' and Value[-1] == '}':
TmpValue = GuidStructureStringToGuidString(Value)
if not TmpValue:
raise BadExpression("Invalid GUID value string %s" % Value)
Value = TmpValue
if Value[0] == '"' and Value[-1] == '"':
Value = Value[1:-1]
try:
Value = "'" + uuid.UUID(Value).get_bytes_le() + "'"
except ValueError as Message:
raise BadExpression(Message)
Value, Size = ParseFieldValue(Value)
return Value, 16
if Value.startswith('L"') and Value.endswith('"'):
# Unicode String
# translate escape character
Value = Value[1:]
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
List.reverse()
Value = 0
for Char in List:
Value = (Value << 16) | ord(Char)
return Value, (len(List) + 1) * 2
if Value.startswith('"') and Value.endswith('"'):
# ASCII String
# translate escape character
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
List.reverse()
Value = 0
for Char in List:
Value = (Value << 8) | ord(Char)
return Value, len(List) + 1
if Value.startswith("L'") and Value.endswith("'"):
# Unicode Character Constant
# translate escape character
Value = Value[1:]
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
if len(List) == 0:
raise BadExpression('Length %s is %s' % (Value, len(List)))
List.reverse()
Value = 0
for Char in List:
Value = (Value << 16) | ord(Char)
return Value, len(List) * 2
if Value.startswith("'") and Value.endswith("'"):
# Character constant
# translate escape character
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
if len(List) == 0:
raise BadExpression('Length %s is %s' % (Value, len(List)))
List.reverse()
Value = 0
for Char in List:
Value = (Value << 8) | ord(Char)
return Value, len(List)
if Value.startswith('{') and Value.endswith('}'):
# Byte array
Value = Value[1:-1]
List = [Item.strip() for Item in Value.split(',')]
List.reverse()
Value = 0
RetSize = 0
for Item in List:
ItemValue, Size = ParseFieldValue(Item)
RetSize += Size
for I in range(Size):
Value = (Value << 8) | ((ItemValue >> 8 * I) & 0xff)
return Value, RetSize
if Value.startswith('DEVICE_PATH(') and Value.endswith(')'):
Value = Value.replace("DEVICE_PATH(", '').rstrip(')')
Value = Value.strip().strip('"')
return ParseDevPathValue(Value)
if Value.lower().startswith('0x'):
try:
Value = int(Value, 16)
except:
raise BadExpression("invalid hex value: %s" % Value)
if Value == 0:
return 0, 1
return Value, (Value.bit_length() + 7) / 8
if Value[0].isdigit():
Value = int(Value, 10)
if Value == 0:
return 0, 1
return Value, (Value.bit_length() + 7) / 8
if Value.lower() == 'true':
return 1, 1
if Value.lower() == 'false':
return 0, 1
return Value, 1
## AnalyzeDscPcd
#
# Analyze DSC PCD value, since there is no data type info in DSC
# This function is used to match functions (AnalyzePcdData) used for retrieving PCD value from database
# 1. Feature flag: TokenSpace.PcdCName|PcdValue
# 2. Fix and Patch:TokenSpace.PcdCName|PcdValue[|VOID*[|MaxSize]]
# 3. Dynamic default:
# TokenSpace.PcdCName|PcdValue[|VOID*[|MaxSize]]
# TokenSpace.PcdCName|PcdValue
# 4. Dynamic VPD:
# TokenSpace.PcdCName|VpdOffset[|VpdValue]
# TokenSpace.PcdCName|VpdOffset[|MaxSize[|VpdValue]]
# 5. Dynamic HII:
# TokenSpace.PcdCName|HiiString|VariableGuid|VariableOffset[|HiiValue]
# PCD value needs to be located in such kind of string, and the PCD value might be an expression in which
# there might have "|" operator, also in string value.
#
# @param Setting: String contain information described above with "TokenSpace.PcdCName|" stripped
# @param PcdType: PCD type: feature, fixed, dynamic default VPD HII
# @param DataType: The datum type of PCD: VOID*, UNIT, BOOL
# @retval:
# ValueList: A List contain fields described above
# IsValid: True if conforming EBNF, otherwise False
# Index: The index where PcdValue is in ValueList
#
def AnalyzeDscPcd(Setting, PcdType, DataType=''):
FieldList = AnalyzePcdExpression(Setting)
IsValid = True
if PcdType in (MODEL_PCD_FIXED_AT_BUILD, MODEL_PCD_PATCHABLE_IN_MODULE, MODEL_PCD_DYNAMIC_DEFAULT, MODEL_PCD_DYNAMIC_EX_DEFAULT):
Value = FieldList[0]
Size = ''
if len(FieldList) > 1 and FieldList[1]:
DataType = FieldList[1]
if FieldList[1] != TAB_VOID:
IsValid = False
if len(FieldList) > 2:
Size = FieldList[2]
if IsValid:
if DataType == "":
IsValid = (len(FieldList) <= 1)
else:
IsValid = (len(FieldList) <= 3)
if Size:
try:
int(Size, 16) if Size.upper().startswith("0X") else int(Size)
except:
IsValid = False
Size = -1
return [str(Value), DataType, str(Size)], IsValid, 0
elif PcdType == MODEL_PCD_FEATURE_FLAG:
Value = FieldList[0]
Size = ''
IsValid = (len(FieldList) <= 1)
return [Value, DataType, str(Size)], IsValid, 0
elif PcdType in (MODEL_PCD_DYNAMIC_VPD, MODEL_PCD_DYNAMIC_EX_VPD):
VpdOffset = FieldList[0]
Value = Size = ''
if not DataType == TAB_VOID:
if len(FieldList) > 1:
Value = FieldList[1]
else:
if len(FieldList) > 1:
Size = FieldList[1]
if len(FieldList) > 2:
Value = FieldList[2]
if DataType == "":
IsValid = (len(FieldList) <= 1)
else:
IsValid = (len(FieldList) <= 3)
if Size:
try:
int(Size, 16) if Size.upper().startswith("0X") else int(Size)
except:
IsValid = False
Size = -1
return [VpdOffset, str(Size), Value], IsValid, 2
elif PcdType in (MODEL_PCD_DYNAMIC_HII, MODEL_PCD_DYNAMIC_EX_HII):
IsValid = (3 <= len(FieldList) <= 5)
HiiString = FieldList[0]
Guid = Offset = Value = Attribute = ''
if len(FieldList) > 1:
Guid = FieldList[1]
if len(FieldList) > 2:
Offset = FieldList[2]
if len(FieldList) > 3:
Value = FieldList[3]
if not Value:
IsValid = False
if len(FieldList) > 4:
Attribute = FieldList[4]
return [HiiString, Guid, Offset, Value, Attribute], IsValid, 3
return [], False, 0
## AnalyzePcdData
#
# Analyze the pcd Value, Datum type and TokenNumber.
# Used to avoid split issue while the value string contain "|" character
#
# @param[in] Setting: A String contain value/datum type/token number information;
#
# @retval ValueList: A List contain value, datum type and toke number.
#
def AnalyzePcdData(Setting):
ValueList = ['', '', '']
ValueRe = re.compile(r'^\s*L?\".*\|.*\"')
PtrValue = ValueRe.findall(Setting)
ValueUpdateFlag = False
if len(PtrValue) >= 1:
Setting = re.sub(ValueRe, '', Setting)
ValueUpdateFlag = True
TokenList = Setting.split(TAB_VALUE_SPLIT)
ValueList[0:len(TokenList)] = TokenList
if ValueUpdateFlag:
ValueList[0] = PtrValue[0]
return ValueList
## check format of PCD value against its the datum type
#
# For PCD value setting
#
def CheckPcdDatum(Type, Value):
if Type == TAB_VOID:
ValueRe = re.compile(r'\s*L?\".*\"\s*$')
if not (((Value.startswith('L"') or Value.startswith('"')) and Value.endswith('"'))
or (Value.startswith('{') and Value.endswith('}')) or (Value.startswith("L'") or Value.startswith("'") and Value.endswith("'"))
):
return False, "Invalid value [%s] of type [%s]; must be in the form of {...} for array"\
", \"...\" or \'...\' for string, L\"...\" or L\'...\' for unicode string" % (Value, Type)
elif ValueRe.match(Value):
# Check the chars in UnicodeString or CString is printable
if Value.startswith("L"):
Value = Value[2:-1]
else:
Value = Value[1:-1]
Printset = set(string.printable)
Printset.remove(TAB_PRINTCHAR_VT)
Printset.add(TAB_PRINTCHAR_BS)
Printset.add(TAB_PRINTCHAR_NUL)
if not set(Value).issubset(Printset):
PrintList = sorted(Printset)
return False, "Invalid PCD string value of type [%s]; must be printable chars %s." % (Type, PrintList)
elif Type == 'BOOLEAN':
if Value not in ['TRUE', 'True', 'true', '0x1', '0x01', '1', 'FALSE', 'False', 'false', '0x0', '0x00', '0']:
return False, "Invalid value [%s] of type [%s]; must be one of TRUE, True, true, 0x1, 0x01, 1"\
", FALSE, False, false, 0x0, 0x00, 0" % (Value, Type)
elif Type in [TAB_UINT8, TAB_UINT16, TAB_UINT32, TAB_UINT64]:
try:
Value = long(Value, 0)
except:
return False, "Invalid value [%s] of type [%s];"\
" must be a hexadecimal, decimal or octal in C language format." % (Value, Type)
else:
return True, "StructurePcd"
return True, ""
## Split command line option string to list
#
# subprocess.Popen needs the args to be a sequence. Otherwise there's problem
# in non-windows platform to launch command
#
def SplitOption(OptionString):
OptionList = []
LastChar = " "
OptionStart = 0
QuotationMark = ""
for Index in range(0, len(OptionString)):
CurrentChar = OptionString[Index]
if CurrentChar in ['"', "'"]:
if QuotationMark == CurrentChar:
QuotationMark = ""
elif QuotationMark == "":
QuotationMark = CurrentChar
continue
elif QuotationMark:
continue
if CurrentChar in ["/", "-"] and LastChar in [" ", "\t", "\r", "\n"]:
if Index > OptionStart:
OptionList.append(OptionString[OptionStart:Index - 1])
OptionStart = Index
LastChar = CurrentChar
OptionList.append(OptionString[OptionStart:])
return OptionList
def CommonPath(PathList):
P1 = min(PathList).split(os.path.sep)
P2 = max(PathList).split(os.path.sep)
for Index in xrange(min(len(P1), len(P2))):
if P1[Index] != P2[Index]:
return os.path.sep.join(P1[:Index])
return os.path.sep.join(P1)
#
# Convert string to C format array
#
def ConvertStringToByteArray(Value):
Value = Value.strip()
if not Value:
return None
if Value[0] == '{':
if not Value.endswith('}'):
return None
Value = Value.replace(' ', '').replace('{', '').replace('}', '')
ValFields = Value.split(',')
try:
for Index in range(len(ValFields)):
ValFields[Index] = str(int(ValFields[Index], 0))
except ValueError:
return None
Value = '{' + ','.join(ValFields) + '}'
return Value
Unicode = False
if Value.startswith('L"'):
if not Value.endswith('"'):
return None
Value = Value[1:]
Unicode = True
elif not Value.startswith('"') or not Value.endswith('"'):
return None
Value = eval(Value) # translate escape character
NewValue = '{'
for Index in range(0, len(Value)):
if Unicode:
NewValue = NewValue + str(ord(Value[Index]) % 0x10000) + ','
else:
NewValue = NewValue + str(ord(Value[Index]) % 0x100) + ','
Value = NewValue + '0}'
return Value
class PathClass(object):
def __init__(self, File='', Root='', AlterRoot='', Type='', IsBinary=False,
Arch='COMMON', ToolChainFamily='', Target='', TagName='', ToolCode=''):
self.Arch = Arch
self.File = str(File)
if os.path.isabs(self.File):
self.Root = ''
self.AlterRoot = ''
else:
self.Root = str(Root)
self.AlterRoot = str(AlterRoot)
# Remove any '.' and '..' in path
if self.Root:
self.Root = mws.getWs(self.Root, self.File)
self.Path = os.path.normpath(os.path.join(self.Root, self.File))
self.Root = os.path.normpath(CommonPath([self.Root, self.Path]))
# eliminate the side-effect of 'C:'
if self.Root[-1] == ':':
self.Root += os.path.sep
# file path should not start with path separator
if self.Root[-1] == os.path.sep:
self.File = self.Path[len(self.Root):]
else:
self.File = self.Path[len(self.Root) + 1:]
else:
self.Path = os.path.normpath(self.File)
self.SubDir, self.Name = os.path.split(self.File)
self.BaseName, self.Ext = os.path.splitext(self.Name)
if self.Root:
if self.SubDir:
self.Dir = os.path.join(self.Root, self.SubDir)
else:
self.Dir = self.Root
else:
self.Dir = self.SubDir
if IsBinary:
self.Type = Type
else:
self.Type = self.Ext.lower()
self.IsBinary = IsBinary
self.Target = Target
self.TagName = TagName
self.ToolCode = ToolCode
self.ToolChainFamily = ToolChainFamily
self._Key = None
## Convert the object of this class to a string
#
# Convert member Path of the class to a string
#
# @retval string Formatted String
#
def __str__(self):
return self.Path
## Override __eq__ function
#
# Check whether PathClass are the same
#
# @retval False The two PathClass are different
# @retval True The two PathClass are the same
#
def __eq__(self, Other):
if isinstance(Other, type(self)):
return self.Path == Other.Path
else:
return self.Path == str(Other)
## Override __cmp__ function
#
# Customize the comparsion operation of two PathClass
#
# @retval 0 The two PathClass are different
# @retval -1 The first PathClass is less than the second PathClass
# @retval 1 The first PathClass is Bigger than the second PathClass
def __cmp__(self, Other):
if isinstance(Other, type(self)):
OtherKey = Other.Path
else:
OtherKey = str(Other)
SelfKey = self.Path
if SelfKey == OtherKey:
return 0
elif SelfKey > OtherKey:
return 1
else:
return -1
## Override __hash__ function
#
# Use Path as key in hash table
#
# @retval string Key for hash table
#
def __hash__(self):
return hash(self.Path)
def _GetFileKey(self):
if self._Key is None:
self._Key = self.Path.upper() # + self.ToolChainFamily + self.TagName + self.ToolCode + self.Target
return self._Key
def _GetTimeStamp(self):
return os.stat(self.Path)[8]
def Validate(self, Type='', CaseSensitive=True):
if GlobalData.gCaseInsensitive:
CaseSensitive = False
if Type and Type.lower() != self.Type:
return FILE_TYPE_MISMATCH, '%s (expect %s but got %s)' % (self.File, Type, self.Type)
RealFile, RealRoot = RealPath2(self.File, self.Root, self.AlterRoot)
if not RealRoot and not RealFile:
RealFile = self.File
if self.AlterRoot:
RealFile = os.path.join(self.AlterRoot, self.File)
elif self.Root:
RealFile = os.path.join(self.Root, self.File)
if len (mws.getPkgPath()) == 0:
return FILE_NOT_FOUND, os.path.join(self.AlterRoot, RealFile)
else:
return FILE_NOT_FOUND, "%s is not found in packages path:\n\t%s" % (self.File, '\n\t'.join(mws.getPkgPath()))
ErrorCode = 0
ErrorInfo = ''
if RealRoot != self.Root or RealFile != self.File:
if CaseSensitive and (RealFile != self.File or (RealRoot != self.Root and RealRoot != self.AlterRoot)):
ErrorCode = FILE_CASE_MISMATCH
ErrorInfo = self.File + '\n\t' + RealFile + " [in file system]"
self.SubDir, self.Name = os.path.split(RealFile)
self.BaseName, self.Ext = os.path.splitext(self.Name)
if self.SubDir:
self.Dir = os.path.join(RealRoot, self.SubDir)
else:
self.Dir = RealRoot
self.File = RealFile
self.Root = RealRoot
self.Path = os.path.join(RealRoot, RealFile)
return ErrorCode, ErrorInfo
Key = property(_GetFileKey)
TimeStamp = property(_GetTimeStamp)
## Parse PE image to get the required PE informaion.
#
class PeImageClass():
## Constructor
#
# @param File FilePath of PeImage
#
def __init__(self, PeFile):
self.FileName = PeFile
self.IsValid = False
self.Size = 0
self.EntryPoint = 0
self.SectionAlignment = 0
self.SectionHeaderList = []
self.ErrorInfo = ''
try:
PeObject = open(PeFile, 'rb')
except:
self.ErrorInfo = self.FileName + ' can not be found\n'
return
# Read DOS header
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x3E)
ByteList = ByteArray.tolist()
# DOS signature should be 'MZ'
if self._ByteListToStr (ByteList[0x0:0x2]) != 'MZ':
self.ErrorInfo = self.FileName + ' has no valid DOS signature MZ'
return
# Read 4 byte PE Signature
PeOffset = self._ByteListToInt(ByteList[0x3C:0x3E])
PeObject.seek(PeOffset)
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 4)
# PE signature should be 'PE\0\0'
if ByteArray.tostring() != 'PE\0\0':
self.ErrorInfo = self.FileName + ' has no valid PE signature PE00'
return
# Read PE file header
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x14)
ByteList = ByteArray.tolist()
SecNumber = self._ByteListToInt(ByteList[0x2:0x4])
if SecNumber == 0:
self.ErrorInfo = self.FileName + ' has no section header'
return
# Read PE optional header
OptionalHeaderSize = self._ByteListToInt(ByteArray[0x10:0x12])
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, OptionalHeaderSize)
ByteList = ByteArray.tolist()
self.EntryPoint = self._ByteListToInt(ByteList[0x10:0x14])
self.SectionAlignment = self._ByteListToInt(ByteList[0x20:0x24])
self.Size = self._ByteListToInt(ByteList[0x38:0x3C])
# Read each Section Header
for Index in range(SecNumber):
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x28)
ByteList = ByteArray.tolist()
SecName = self._ByteListToStr(ByteList[0:8])
SecVirtualSize = self._ByteListToInt(ByteList[8:12])
SecRawAddress = self._ByteListToInt(ByteList[20:24])
SecVirtualAddress = self._ByteListToInt(ByteList[12:16])
self.SectionHeaderList.append((SecName, SecVirtualAddress, SecRawAddress, SecVirtualSize))
self.IsValid = True
PeObject.close()
def _ByteListToStr(self, ByteList):
String = ''
for index in range(len(ByteList)):
if ByteList[index] == 0:
break
String += chr(ByteList[index])
return String
def _ByteListToInt(self, ByteList):
Value = 0
for index in range(len(ByteList) - 1, -1, -1):
Value = (Value << 8) | int(ByteList[index])
return Value
class DefaultStore():
def __init__(self, DefaultStores ):
self.DefaultStores = DefaultStores
def DefaultStoreID(self, DefaultStoreName):
for key, value in self.DefaultStores.items():
if value == DefaultStoreName:
return key
return None
def GetDefaultDefault(self):
if not self.DefaultStores or "0" in self.DefaultStores:
return "0", TAB_DEFAULT_STORES_DEFAULT
else:
minvalue = min(int(value_str) for value_str in self.DefaultStores)
return (str(minvalue), self.DefaultStores[str(minvalue)])
def GetMin(self, DefaultSIdList):
if not DefaultSIdList:
return TAB_DEFAULT_STORES_DEFAULT
storeidset = {storeid for storeid, storename in self.DefaultStores.values() if storename in DefaultSIdList}
if not storeidset:
return ""
minid = min(storeidset )
for sid, name in self.DefaultStores.values():
if sid == minid:
return name
class SkuClass():
DEFAULT = 0
SINGLE = 1
MULTIPLE =2
def __init__(self,SkuIdentifier='', SkuIds=None):
if SkuIds is None:
SkuIds = {}
for SkuName in SkuIds:
SkuId = SkuIds[SkuName][0]
skuid_num = int(SkuId, 16) if SkuId.upper().startswith("0X") else int(SkuId)
if skuid_num > 0xFFFFFFFFFFFFFFFF:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData = "SKU-ID [%s] value %s exceeds the max value of UINT64"
% (SkuName, SkuId))
self.AvailableSkuIds = sdict()
self.SkuIdSet = []
self.SkuIdNumberSet = []
self.SkuData = SkuIds
self.__SkuInherit = {}
self.__SkuIdentifier = SkuIdentifier
if SkuIdentifier == '' or SkuIdentifier is None:
self.SkuIdSet = ['DEFAULT']
self.SkuIdNumberSet = ['0U']
elif SkuIdentifier == 'ALL':
self.SkuIdSet = SkuIds.keys()
self.SkuIdNumberSet = [num[0].strip() + 'U' for num in SkuIds.values()]
else:
r = SkuIdentifier.split('|')
self.SkuIdSet=[(r[k].strip()).upper() for k in range(len(r))]
k = None
try:
self.SkuIdNumberSet = [SkuIds[k][0].strip() + 'U' for k in self.SkuIdSet]
except Exception:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData = "SKU-ID [%s] is not supported by the platform. [Valid SKU-ID: %s]"
% (k, " | ".join(SkuIds.keys())))
for each in self.SkuIdSet:
if each in SkuIds:
self.AvailableSkuIds[each] = SkuIds[each][0]
else:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData="SKU-ID [%s] is not supported by the platform. [Valid SKU-ID: %s]"
% (each, " | ".join(SkuIds.keys())))
if self.SkuUsageType != self.SINGLE:
self.AvailableSkuIds.update({'DEFAULT':0, 'COMMON':0})
if self.SkuIdSet:
GlobalData.gSkuids = (self.SkuIdSet)
if 'COMMON' in GlobalData.gSkuids:
GlobalData.gSkuids.remove('COMMON')
if self.SkuUsageType == self.SINGLE:
if len(GlobalData.gSkuids) != 1:
if 'DEFAULT' in GlobalData.gSkuids:
GlobalData.gSkuids.remove('DEFAULT')
if GlobalData.gSkuids:
GlobalData.gSkuids.sort()
def GetNextSkuId(self, skuname):
if not self.__SkuInherit:
self.__SkuInherit = {}
for item in self.SkuData.values():
self.__SkuInherit[item[1]]=item[2] if item[2] else "DEFAULT"
return self.__SkuInherit.get(skuname, "DEFAULT")
def GetSkuChain(self, sku):
if sku == "DEFAULT":
return ["DEFAULT"]
skulist = [sku]
nextsku = sku
while True:
nextsku = self.GetNextSkuId(nextsku)
skulist.append(nextsku)
if nextsku == "DEFAULT":
break
skulist.reverse()
return skulist
def SkuOverrideOrder(self):
skuorderset = []
for skuname in self.SkuIdSet:
skuorderset.append(self.GetSkuChain(skuname))
skuorder = []
for index in range(max(len(item) for item in skuorderset)):
for subset in skuorderset:
if index > len(subset)-1:
continue
if subset[index] in skuorder:
continue
skuorder.append(subset[index])
return skuorder
def __SkuUsageType(self):
if self.__SkuIdentifier.upper() == "ALL":
return SkuClass.MULTIPLE
if len(self.SkuIdSet) == 1:
if self.SkuIdSet[0] == 'DEFAULT':
return SkuClass.DEFAULT
else:
return SkuClass.SINGLE
elif len(self.SkuIdSet) == 2:
if 'DEFAULT' in self.SkuIdSet:
return SkuClass.SINGLE
else:
return SkuClass.MULTIPLE
else:
return SkuClass.MULTIPLE
def DumpSkuIdArrary(self):
ArrayStrList = []
if self.SkuUsageType == SkuClass.SINGLE:
ArrayStr = "{0x0}"
else:
for skuname in self.AvailableSkuIds:
if skuname == "COMMON":
continue
while skuname != "DEFAULT":
ArrayStrList.append(hex(int(self.AvailableSkuIds[skuname])))
skuname = self.GetNextSkuId(skuname)
ArrayStrList.append("0x0")
ArrayStr = "{" + ",".join(ArrayStrList) + "}"
return ArrayStr
def __GetAvailableSkuIds(self):
return self.AvailableSkuIds
def __GetSystemSkuID(self):
if self.__SkuUsageType() == SkuClass.SINGLE:
if len(self.SkuIdSet) == 1:
return self.SkuIdSet[0]
else:
return self.SkuIdSet[0] if self.SkuIdSet[0] != 'DEFAULT' else self.SkuIdSet[1]
else:
return 'DEFAULT'
def __GetAvailableSkuIdNumber(self):
return self.SkuIdNumberSet
SystemSkuId = property(__GetSystemSkuID)
AvailableSkuIdSet = property(__GetAvailableSkuIds)
SkuUsageType = property(__SkuUsageType)
AvailableSkuIdNumSet = property(__GetAvailableSkuIdNumber)
#
# Pack a registry format GUID
#
def PackRegistryFormatGuid(Guid):
return PackGUID(Guid.split('-'))
## Get the integer value from string like "14U" or integer like 2
#
# @param Input The object that may be either a integer value or a string
#
# @retval Value The integer value that the input represents
#
def GetIntegerValue(Input):
if type(Input) in (int, long):
return Input
String = Input
if String.endswith("U"):
String = String[:-1]
if String.endswith("ULL"):
String = String[:-3]
if String.endswith("LL"):
String = String[:-2]
if String.startswith("0x") or String.startswith("0X"):
return int(String, 16)
elif String == '':
return 0
else:
return int(String)
#
# Pack a GUID (registry format) list into a buffer and return it
#
def PackGUID(Guid):
return pack(PACK_PATTERN_GUID,
int(Guid[0], 16),
int(Guid[1], 16),
int(Guid[2], 16),
int(Guid[3][-4:-2], 16),
int(Guid[3][-2:], 16),
int(Guid[4][-12:-10], 16),
int(Guid[4][-10:-8], 16),
int(Guid[4][-8:-6], 16),
int(Guid[4][-6:-4], 16),
int(Guid[4][-4:-2], 16),
int(Guid[4][-2:], 16)
)
#
# Pack a GUID (byte) list into a buffer and return it
#
def PackByteFormatGUID(Guid):
return pack(PACK_PATTERN_GUID,
Guid[0],
Guid[1],
Guid[2],
Guid[3],
Guid[4],
Guid[5],
Guid[6],
Guid[7],
Guid[8],
Guid[9],
Guid[10],
)
##
#
# This acts like the main() function for the script, unless it is 'import'ed into another
# script.
#
if __name__ == '__main__':
pass
|
cs.py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : cs.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 01/22/2018
#
# This file is part of Jacinle.
# Distributed under terms of the MIT license.
import collections
import contextlib
import queue
import threading
import zmq
from jacinle.concurrency.packing import dumpb, loadb
from jacinle.concurrency.zmq_utils import get_addr, bind_to_random_ipc, graceful_close
from jacinle.logging import get_logger
from jacinle.utils.meta import notnone_property
from jacinle.utils.registry import CallbackRegistry
logger = get_logger(__file__)
__all__ = ['ServerPipe', 'ClientPipe', 'make_cs_pair']
_QueryMessage = collections.namedtuple('QueryMessage', ['identifier', 'payload'])
class ServerPipe(object):
def __init__(self, name, send_qsize=0, mode='tcp'):
self._name = name
self._conn_info = None
self._context_lock = threading.Lock()
self._context = zmq.Context()
self._tosock = self._context.socket(zmq.ROUTER)
self._frsock = self._context.socket(zmq.PULL)
self._tosock.set_hwm(10)
self._frsock.set_hwm(10)
self._dispatcher = CallbackRegistry()
self._send_queue = queue.Queue(maxsize=send_qsize)
self._rcv_thread = None
self._snd_thread = None
self._mode = mode
assert mode in ('ipc', 'tcp')
@property
def dispatcher(self):
return self._dispatcher
@notnone_property
def conn_info(self):
return self._conn_info
def initialize(self, tcp_port=None):
self._conn_info = []
if self._mode == 'tcp':
if tcp_port is not None:
port = tcp_port[0]
self._frsock.bind('tcp://*:{}'.format(port))
else:
port = self._frsock.bind_to_random_port('tcp://*')
self._conn_info.append('tcp://{}:{}'.format(get_addr(), port))
if tcp_port is not None:
port = tcp_port[1]
self._tosock.bind('tcp://*:{}'.format(port))
else:
port = self._tosock.bind_to_random_port('tcp://*')
self._conn_info.append('tcp://{}:{}'.format(get_addr(), port))
elif self._mode == 'ipc':
self._conn_info.append(bind_to_random_ipc(self._frsock, self._name + '-c2s-'))
self._conn_info.append(bind_to_random_ipc(self._tosock, self._name + '-s2c-'))
self._rcv_thread = threading.Thread(target=self.mainloop_recv, daemon=True)
self._rcv_thread.start()
self._snd_thread = threading.Thread(target=self.mainloop_send, daemon=True)
self._snd_thread.start()
def finalize(self):
graceful_close(self._tosock)
graceful_close(self._frsock)
self._context.term()
@contextlib.contextmanager
def activate(self, tcp_port=None):
self.initialize(tcp_port=tcp_port)
try:
yield
finally:
self.finalize()
def mainloop_recv(self):
try:
while True:
if self._frsock.closed:
break
msg = loadb(self._frsock.recv(copy=False).bytes)
identifier, type, payload = msg
self._dispatcher.dispatch(type, self, identifier, payload)
except zmq.ContextTerminated:
pass
except zmq.ZMQError as e:
if self._tosock.closed:
logger.warning('Recv socket closed unexpectedly.')
else:
raise e
def mainloop_send(self):
try:
while True:
if self._tosock.closed:
break
job = self._send_queue.get()
self._tosock.send_multipart([job.identifier, dumpb(job.payload)], copy=False)
except zmq.ContextTerminated:
pass
except zmq.ZMQError as e:
if self._tosock.closed:
logger.warning('Send socket closed unexpectedly.')
else:
raise e
def send(self, identifier, msg):
self._send_queue.put(_QueryMessage(identifier, msg))
class ClientPipe(object):
def __init__(self, name, conn_info):
self._name = name
self._conn_info = conn_info
self._context = None
self._tosock = None
self._frsock = None
@property
def identity(self):
return self._name.encode('utf-8')
def initialize(self):
self._context = zmq.Context()
self._tosock = self._context.socket(zmq.PUSH)
self._frsock = self._context.socket(zmq.DEALER)
self._tosock.setsockopt(zmq.IDENTITY, self.identity)
self._frsock.setsockopt(zmq.IDENTITY, self.identity)
self._tosock.set_hwm(2)
self._tosock.connect(self._conn_info[0])
self._frsock.connect(self._conn_info[1])
def finalize(self):
graceful_close(self._frsock)
graceful_close(self._tosock)
self._context.term()
@contextlib.contextmanager
def activate(self):
self.initialize()
try:
yield
finally:
self.finalize()
def query(self, type, inp=None, do_recv=True):
self._tosock.send(dumpb((self.identity, type, inp)), copy=False)
if do_recv:
return self.recv()
def recv(self):
out = loadb(self._frsock.recv(copy=False).bytes)
return out
def make_cs_pair(name, nr_clients=None, mode='tcp', send_qsize=10):
rep = ServerPipe(name + '-rep', mode=mode, send_qsize=send_qsize)
rep.initialize()
nr_reqs = nr_clients or 1
reqs = [ClientPipe(name + '-req-' + str(i), rep.conn_info) for i in range(nr_reqs)]
if nr_clients is None:
return rep, reqs[0]
return rep, reqs
|
watcher.py | import logging
import os.path
import threading
try:
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
from watchdog.observers.polling import PollingObserver
can_watch = True
except ImportError:
Observer = None
FileSystemEventHandler = object
PollingObserver = None
can_watch = False
from galaxy.util.hash_util import md5_hash_file
from galaxy.util.watcher import (
BaseWatcher,
get_observer_class,
NullWatcher,
)
log = logging.getLogger(__name__)
def get_tool_conf_watcher(reload_callback, tool_cache=None):
return ToolConfWatcher(reload_callback=reload_callback, tool_cache=tool_cache)
def get_tool_watcher(toolbox, config):
config_name = "watch_tools"
config_value = getattr(config, config_name, None)
observer_class = get_observer_class(config_name, config_value, default="False", monitor_what_str="tools")
if observer_class is not None:
return ToolWatcher(observer_class=observer_class, event_handler_class=ToolFileEventHandler, toolbox=toolbox)
else:
return NullWatcher()
class ToolFileEventHandler(FileSystemEventHandler):
def __init__(self, tool_watcher):
self.tool_watcher = tool_watcher
def on_any_event(self, event):
self._handle(event)
def _handle(self, event):
# modified events will only have src path, move events will
# have dest_path and src_path but we only care about dest. So
# look at dest if it exists else use src.
path = getattr(event, 'dest_path', None) or event.src_path
path = os.path.abspath(path)
tool_id = self.tool_watcher.tool_file_ids.get(path, None)
if tool_id:
try:
self.tool_watcher.toolbox.reload_tool_by_id(tool_id)
except Exception:
pass
elif path.endswith(".xml"):
directory = os.path.dirname(path)
dir_callback = self.tool_watcher.tool_dir_callbacks.get(directory, None)
if dir_callback:
tool_file = event.src_path
tool_id = dir_callback(tool_file)
if tool_id:
self.tool_watcher.tool_file_ids[tool_file] = tool_id
class ToolConfWatcher:
def __init__(self, reload_callback, tool_cache=None):
self.paths = {}
self.cache = tool_cache
self._active = False
self._lock = threading.Lock()
self.thread = None
self.reload_callback = reload_callback
def start(self):
if not self._active:
self._active = True
if self.thread is None:
self.exit = threading.Event()
self.thread = threading.Thread(target=self.check)
self.thread.daemon = True
self.thread.start()
def shutdown(self):
if self._active:
self._active = False
if self.thread.is_alive():
self.exit.set()
self.thread.join()
self.thread = None
self.exit = None
def check(self):
"""Check for changes in self.paths or self.cache and call the event handler."""
hashes = {}
if self.cache:
self.cache.assert_hashes_initialized()
while self._active and not self.exit.is_set():
do_reload = False
drop_on_next_loop = set()
drop_now = set()
with self._lock:
paths = list(self.paths.keys())
for path in paths:
try:
if not os.path.exists(path):
continue
mod_time = self.paths[path]
if not hashes.get(path, None):
hash = md5_hash_file(path)
if hash:
hashes[path] = md5_hash_file(path)
else:
continue
new_mod_time = os.path.getmtime(path)
# mod_time can be None if a non-required config was just created
if not mod_time:
self.paths[path] = new_mod_time
log.debug("The file '%s' has been created.", path)
do_reload = True
elif new_mod_time > mod_time:
new_hash = md5_hash_file(path)
if hashes[path] != new_hash:
self.paths[path] = new_mod_time
hashes[path] = new_hash
log.debug("The file '%s' has changes.", path)
do_reload = True
except OSError:
# in rare cases `path` may be deleted between `os.path.exists` calls
# and reading the file from the filesystem. We do not want the watcher
# thread to die in these cases.
if path in drop_now:
log.warning("'%s' could not be read, removing from watched files", path)
del paths[path]
if path in hashes:
del hashes[path]
else:
log.debug("'%s could not be read", path)
drop_on_next_loop.add(path)
if self.cache:
self.cache.cleanup()
do_reload = True
if not do_reload and self.cache:
removed_ids = self.cache.cleanup()
if removed_ids:
do_reload = True
if do_reload:
self.reload_callback()
drop_now = drop_on_next_loop
drop_on_next_loop = set()
self.exit.wait(1)
def monitor(self, path):
mod_time = None
if os.path.exists(path):
mod_time = os.path.getmtime(path)
with self._lock:
self.paths[path] = mod_time
def watch_file(self, tool_conf_file):
self.monitor(tool_conf_file)
class ToolWatcher(BaseWatcher):
def __init__(self, observer_class, event_handler_class, toolbox):
super().__init__(observer_class, event_handler_class)
self.toolbox = toolbox
self.tool_file_ids = {}
self.tool_dir_callbacks = {}
def watch_file(self, tool_file, tool_id):
tool_file = os.path.abspath(tool_file)
self.tool_file_ids[tool_file] = tool_id
tool_dir = os.path.dirname(tool_file)
if tool_dir not in self.monitored_dirs:
self.monitor(tool_dir)
def watch_directory(self, tool_dir, callback):
tool_dir = os.path.abspath(tool_dir)
self.tool_dir_callbacks[tool_dir] = callback
if tool_dir not in self.monitored_dirs:
self.monitor(tool_dir)
|
upnp.py | import logging
import threading
from queue import Queue
from typing import Optional
try:
import miniupnpc
except ImportError:
pass
log = logging.getLogger(__name__)
class UPnP:
thread: Optional[threading.Thread] = None
queue: Queue = Queue()
def __init__(self):
def run():
try:
self.upnp = miniupnpc.UPnP()
self.upnp.discoverdelay = 30
self.upnp.discover()
self.upnp.selectigd()
keep_going = True
while keep_going:
msg = self.queue.get()
if msg[0] == "remap":
port = msg[1]
log.info(f"Attempting to enable UPnP (open up port {port})")
try:
self.upnp.deleteportmapping(port, "TCP")
except Exception as e:
log.info(f"Removal of previous portmapping failed. This does not indicate an error: {e}")
self.upnp.addportmapping(port, "TCP", self.upnp.lanaddr, port, "chives", "")
log.info(
f"Port {port} opened with UPnP. lanaddr {self.upnp.lanaddr} "
f"external: {self.upnp.externalipaddress()}"
)
elif msg[0] == "release":
port = msg[1]
log.info(f"UPnP, releasing port {port}")
self.upnp.deleteportmapping(port, "TCP")
log.info(f"UPnP, Port {port} closed")
elif msg[0] == "shutdown":
keep_going = False
except Exception as e:
log.info(
"UPnP failed. This is not required to run chives,"
" it allows incoming connections from other peers."
)
log.info(e)
self.thread = threading.Thread(target=run)
self.thread.start()
def remap(self, port):
self.queue.put(("remap", port))
def release(self, port):
self.queue.put(("release", port))
def shutdown(self):
if not self.thread:
return
self.queue.put(("shutdown",))
log.info("UPnP, shutting down thread")
self.thread.join(5)
self.thread = None
# this is here just in case the UPnP object is destroyed non-gracefully,
# e.g. via an exception before the main thread can call shutdown()
def __del__(self):
self.shutdown()
|
mmfunction.py | import itertools
import functools
import time
import sys
import threading
def benchmark(func):
'''
デコレートした関数の実行時間を計測する関数
'''
@functools.wraps(func)
def Wrapper(*arg, **kw):
stime = time.clock()
ret = func(*arg, **kw)
etime = time.clock()
print('{0}: {1:,f}ms'.format(func.__name__, (etime - stime) * 1000))
return ret
return Wrapper
def loading_animation(process_name, animation_type='circle'):
'''
デコレートした関数にアニメーションをつける関数
Arguments:
process_name: デコレート対象の関数名
'''
def _loading_animation(func):
done = False
@functools.wraps(func)
def wrapper(*args, **kwargs):
t = threading.Thread(target=animation, args=(process_name,))
t.setDaemon(True)
t.start()
res = func(*args, **kwargs)
nonlocal done
done = True
time.sleep(0.7)
return res
def animation(s):
load_anime = {
'circle': (['|', '/', '-', '\\'], 0.1),
'dot': (['. ', '.. ', '...', ' '], 0.5)
}
at = animation_type if animation_type in load_anime else 'circle'
for c in itertools.cycle(load_anime[at][0]):
if done:
break
sys.stdout.write('\r{0}: processing '.format(s) + c)
sys.stdout.flush()
time.sleep(load_anime[at][1])
sys.stdout.write('\r{0}: Done! \n'.format(s))
return wrapper
return _loading_animation
|
kb_gtdbtkServer.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from kb_gtdbtk.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'kb_gtdbtk'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from kb_gtdbtk.kb_gtdbtkImpl import kb_gtdbtk # noqa @IgnorePep8
impl_kb_gtdbtk = kb_gtdbtk(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'kb_gtdbtk'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_kb_gtdbtk.run_kb_gtdbtk,
name='kb_gtdbtk.run_kb_gtdbtk',
types=[dict])
self.method_authentication['kb_gtdbtk.run_kb_gtdbtk'] = 'required' # noqa
self.rpc_service.add(impl_kb_gtdbtk.status,
name='kb_gtdbtk.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'kb_gtdbtk ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
deletionwatcher.py | # coding=utf-8
import json
import os.path
import pickle
import requests
import time
import threading
# noinspection PyPackageRequirements
import websocket
# noinspection PyPackageRequirements
from bs4 import BeautifulSoup
from urllib.parse import urlparse
import chatcommunicate
import metasmoke
from globalvars import GlobalVars
import datahandling
from helpers import log
from parsing import fetch_post_id_and_site_from_url, to_protocol_relative
from tasks import Tasks
# noinspection PyClassHasNoInit,PyBroadException,PyMethodParameters
class DeletionWatcher:
next_request_time = time.time() - 1
def __init__(self):
DeletionWatcher.update_site_id_list()
self.posts = {}
try:
self.socket = websocket.create_connection("wss://qa.sockets.stackexchange.com/")
except websocket.WebSocketException:
log('error', 'DeletionWatcher failed to create a websocket connection')
return
if os.path.exists("deletionIDs.p"):
with open("deletionIDs.p", "rb") as fh:
for post in DeletionWatcher._check_batch(pickle.load(fh)):
self.subscribe(post, pickle=False)
self._save()
threading.Thread(name="deletion watcher", target=self._start, daemon=True).start()
def _start(self):
while True:
msg = self.socket.recv()
if msg:
msg = json.loads(msg)
action = msg["action"]
if action == "hb":
self.socket.send("hb")
else:
data = json.loads(msg["data"])
if data["a"] == "post-deleted":
try:
post_id, _, _, post_url, callbacks = self.posts[action]
if post_id == str(data["aId"] if "aId" in data else data["qId"]):
del self.posts[action]
self.socket.send("-" + action)
Tasks.do(metasmoke.Metasmoke.send_deletion_stats_for_post, post_url, True)
for callback, max_time in callbacks:
if not max_time or time.time() < max_time:
callback()
except KeyError:
pass
def subscribe(self, post_url, callback=None, pickle=True, timeout=None):
post_id, post_site, post_type = fetch_post_id_and_site_from_url(post_url)
if post_site not in GlobalVars.site_id_dict:
log("warning", "unknown site {} when subscribing to {}".format(post_site, post_url))
return
if post_type == "answer":
question_id = datahandling.get_post_site_id_link((post_id, post_site, post_type))
if question_id is None:
return
else:
question_id = post_id
site_id = GlobalVars.site_id_dict[post_site]
action = "{}-question-{}".format(site_id, question_id)
max_time = (time.time() + timeout) if timeout else None
if action not in self.posts:
self.posts[action] = (post_id, post_site, post_type, post_url, [(callback, max_time)] if callback else [])
try:
self.socket.send(action)
except websocket.WebSocketException:
log('error', 'DeletionWatcher failed on sending {}'.format(action))
elif callback:
_, _, _, _, callbacks = self.posts[action]
callbacks.append((callback, max_time))
else:
return
if pickle:
Tasks.do(self._save)
def _save(self):
pickle_output = {}
for post_id, post_site, _, _, _ in self.posts.values():
if post_site not in pickle_output:
pickle_output[post_site] = [post_id]
else:
pickle_output[post_site].append(post_id)
with open("deletionIDs.p", "wb") as pickle_file:
pickle.dump(pickle_output, pickle_file)
@staticmethod
def _check_batch(saved):
if time.time() < DeletionWatcher.next_request_time:
time.sleep(DeletionWatcher.next_request_time - time.time())
for site, posts in saved.items():
ids = ";".join(post_id for post_id in posts if not DeletionWatcher._ignore((post_id, site)))
uri = "https://api.stackexchange.com/2.2/posts/{}".format(ids)
params = {
'site': site,
'key': 'IAkbitmze4B8KpacUfLqkw(('
}
res = requests.get(uri, params=params)
json = res.json()
if "items" not in json:
log('warning',
'DeletionWatcher API request received no items in response (code {})'.format(res.status_code))
log('warning', res.text)
return
if 'backoff' in json:
DeletionWatcher.next_request_time = time.time() + json['backoff']
for post in json['items']:
if time.time() - post["creation_date"] < 7200:
yield to_protocol_relative(post["link"]).replace("/q/", "/questions/")
@staticmethod
def _ignore(post_site_id):
return datahandling.is_false_positive(post_site_id) or datahandling.is_ignored_post(post_site_id) or \
datahandling.is_auto_ignored_post(post_site_id)
@staticmethod
def update_site_id_list():
soup = BeautifulSoup(requests.get("https://meta.stackexchange.com/topbar/site-switcher/site-list").text,
"html.parser")
site_id_dict = {}
for site in soup.findAll("a", attrs={"data-id": True}):
site_name = urlparse(site["href"]).netloc
site_id = site["data-id"]
site_id_dict[site_name] = site_id
GlobalVars.site_id_dict = site_id_dict
|
loader_base.py | """ Module dmt/data/loading/loader_base.py
Loading Concepts:
Given:
- SampleSet & custom/default preprocessing
-
Assumptions:
- # of samples per example & # of tensors per example is same for epoch
samples = [Sample(file) for file in os.listdir('dataset')]
sample_set = dmt.data.SampleSet(samples)
"""
import os, sys
import pathlib
import time
import random
import queue
import weakref
import warnings
import multiprocessing as python_mp
import threading
from threading import Thread
from numpy import BUFSIZE
import torch
if __name__ == '__main__':
python_mp.set_start_method('spawn') # spawn
curr_path = pathlib.Path(__file__)
sys.path.append(str(curr_path.parent.parent.parent.parent))
from dmt.utils.parse import (
parse_nonnegative_int, parse_positive_int,
parse_bool
)
DEBUG = True
# class SharedCounter(object):
# """ A synchronized shared counter.
# The locking done by multiprocessing.Value ensures that only a single
# process or thread may read or write the in-memory ctypes object. However,
# in order to do n += 1, Python performs a read followed by a write, so a
# second process may read the old value before the new one is written by the
# first process. The solution is to use a multiprocessing.Lock to guarantee
# the atomicity of the modifications to Value.
# This class comes almost entirely from Eli Bendersky's blog:
# http://eli.thegreenplace.net/2012/01/04/shared-counter-with-pythons-multiprocessing/
# """
# def __init__(self, n = 0):
# self.count = python_mp.Value('i', n)
# def increment(self, n = 1):
# """ Increment the counter by n (default = 1) """
# with self.count.get_lock():
# self.count.value += n
# @property
# def value(self):
# """ Return the value of the counter """
# return self.count.value
# class Queue(mp_queue):
# """ A portable implementation of multiprocessing.Queue.
# Because of multithreading / multiprocessing semantics, Queue.qsize() may
# raise the NotImplementedError exception on Unix platforms like Mac OS X
# where sem_getvalue() is not implemented. This subclass addresses this
# problem by using a synchronized shared counter (initialized to zero) and
# increasing / decreasing its value every time the put() and get() methods
# are called, respectively. This not only prevents NotImplementedError from
# being raised, but also allows us to implement a reliable version of both
# qsize() and empty().
# """
# def __init__(self, *args, **kwargs):
# super(Queue, self).__init__(*args, **kwargs)
# self.size = SharedCounter(0)
# def put(self, *args, **kwargs):
# self.size.increment(1)
# super(Queue, self).put(*args, **kwargs)
# def get(self, *args, **kwargs):
# self.size.increment(-1)
# return super(Queue, self).get(*args, **kwargs)
# def qsize(self):
# """ Reliable implementation of multiprocessing.Queue.qsize() """
# return self.size.value
# def empty(self):
# """ Reliable implementation of multiprocessing.Queue.empty() """
# return not self.qsize()
class WorkerProcess(python_mp.Process):
def __init__(self, index):
super().__init__(self)
self.index = index
self.pid = os.getpid()
self.ppid = os.getppid()
self.exit = python_mp.Event()
def run(self):
pass
def shutdown(self):
self.exit.set()
class ExampleWorker:
# TODO: add self saving attributes after confirm you can use class in Process
def __call__(self, input_queue, example_queue, example_creator_fn):
ppid = os.getppid()
pid = os.getpid()
if DEBUG:
print(f'Child PID {pid} (parent {ppid}) created! ⭐⭐⭐⭐⭐')
import os, psutil
process = psutil.Process(os.getpid())
print(process.memory_info().rss, + ' bytes') # in bytes
while True:
try:
inputs = input_queue.get(block=True)
except python_mp.Queue.Empty as e:
print(e)
print(input_queue)
# Get example from samples
if DEBUG:
msg = f'Child {pid}: got {len(inputs)} samples.'
print(msg)
samples = [i for i in inputs]
example = example_creator_fn(samples)
example_queue.put(example, block=True)
def worker(input_queue, example_queue, example_creator_fn):
import os, psutil
ppid = os.getppid()
pid = os.getpid()
if DEBUG:
print(f'Child PID {pid} (parent {ppid}) created! 👶 👶')
process = psutil.Process(os.getpid())
# print(f' Input queue: {input_queue.qsize()}')
# print(process.memory_info().rss, + ' bytes') # in bytes
while True:
inputs = input_queue.get(block=True)
# Get example from samples
if DEBUG:
msg = f'Child {pid}: got {len(inputs)} samples.'
print(msg)
samples = [i for i in inputs]
start = time.time()
example = example_creator_fn(samples)
print(f'👶 Child {pid} - Example created ({time.time() - start:.2f} '
f'sec), putting in Q..')
sys.stdout.flush()
# example_queue.put(example, block=True)
example_queue.send(example)
class BatchLoader:
""" Base class for serialized or multi-processing data loading.
"""
def __init__(
self,
sample_set,
example_creator_fn,
samples_per_example,
example_output_size,
batch_collate_fn,
examples_per_batch,
num_workers=4,
example_queue_size=16,
start_loading=False,
shuffle=False
):
"""
Args:
sample_set: collection of data samples to iterate over.
example_creator_fn: function to create examples from samples
See sample & example abstractions in README.
begin_loading: flag to start loading examples right after init.
Usually, this only starts when __getitem__ is called.
shuffle: flag to shuffle samples.
"""
self.sample_set = sample_set
self.shuffle = parse_bool(shuffle, 'shuffle')
self.example_creator_fn = example_creator_fn
self.samples_per_example = parse_nonnegative_int(
samples_per_example,
'samples_per_example')
# Batch creation from examples
self.batch_collate_fn = batch_collate_fn
self.examples_per_batch = parse_nonnegative_int(
examples_per_batch,
'examples_per_batch')
self.example_output_size = parse_positive_int(
example_output_size,
'example_output_size')
# Create sample_set indices
self._subject_indices_iterable = None
self._reset_subject_indices_iterable()
# Multi-processing for example loading (if necessary)
self.loading = False
self.num_workers = parse_nonnegative_int(num_workers, 'num_workers')
self.worker_processes = [None] * num_workers
self.transfer_daemon, self.daemon_kill = None, None
self.ii = None
self.queue_manager = None
self.example_queue_size = parse_nonnegative_int(example_queue_size,
'example_queue_size')
self.example_queue = None
self.batch_loading_queue = None
self._reset_example_queues()
self.current_batch_index = 0
if start_loading:
self.start_loading()
# @property
# def loading(self):
# num_running = sum([p is not None for p in self.worker_processes])
# if num_running == 0:
# return False
# else:
# assert num_running == self.num_workers
# return True
@property
def num_subjects(self):
return len(self.sample_set)
@property
def num_examples(self):
return self.num_subjects // self.samples_per_example
@property
def num_batches(self):
num_batches = self.num_examples // self.examples_per_batch
return num_batches
@property
def batch_size(self):
return self.examples_per_batch * self.example_output_size
### ------ # Sample Iteration Functionality # ----- ###
def _get_next_subject_index(self):
try:
index = next(self.subject_indices_iterable)
except StopIteration as exception:
if DEBUG:
print(f'Indices list ran dry. Adding more..')
self._reset_subject_indices_iterable()
index = next(self.subject_indices_iterable)
return index
def _reset_subject_indices_iterable(self):
indices = list(range(len(self.sample_set)))
if self.shuffle:
random.shuffle(indices)
self._subject_indices_iterable = iter(indices)
def _get_worker_indices_samples(self):
indices = list(range(len(self.sample_set)))
if self.shuffle:
random.shuffle(indices)
spe = self.samples_per_example
examples = [indices[spe*i:spe*i+spe] for i in range(len(indices)//spe)]
nw = self.num_workers
worker_indices = [examples[i::nw] for i in range(nw)]
worker_samples = []
for wi, worker_inds in enumerate(worker_indices):
worker_samples.append([])
for ex_inds in worker_inds:
worker_samples[wi].append([self.sample_set[i] for i in ex_inds])
return worker_indices, worker_samples
### ------ # Example Creation Functionality # ----- ###
def __next__(self):
""" Grabs the next batch by collating examples in the example-queue.
Implementation Details
- Handles drop-last when there's not enough examples for a batch.
- At end of epoch, kill all processes.
"""
import itertools
if self.current_batch_index >= self.num_batches:
print(f'[Loader] Stopping Iteration')
import sys; sys.exit(1)
self.stop_loading()
raise StopIteration
else:
# TODO: check processes are working
if self.ii is None:
self.ii = itertools.cycle(range(self.num_workers))
print(f'[NEW BATCH] !!')
examples = []
for i in range(self.examples_per_batch):
print(f' Getting example {i}..', end=''); start = time.time()
example = None
example = self.example_queues[next(self.ii)][1].recv()
examples.append(example)
print(f'.done ({time.time() - start} sec)')
start = time.time()
batch = self.batch_collate_fn(examples)
print(f' (__next__) Collate took {time.time() - start:.2f} sec')
self.current_batch_index += 1
return batch
# TODO: add function that starts stopped workers (called in __getitem__)
# TODO: add line that kills all workers when end of epoch is reached
# TODO: add iterator functionality that automatically does the above
def start_loading(self):
""" Assumes (1) example_queue & batch_load_queue are ready. """
if self.loading:
return
worker_indices, worker_samples = self._get_worker_indices_samples()
self.sample_queues = []
# NEW
if self.queue_manager is None:
self.queue_manager = python_mp.Manager()
self.example_queues = [python_mp.Pipe()] * self.num_workers
for i, process in enumerate(self.worker_processes):
if process is not None:
assert False, f'Something is very wrong with worker init.'
else:
self.sample_queues.append(python_mp.Queue())
for ex_samples in worker_samples[i]:
self.sample_queues[i].put(ex_samples)
new_process = python_mp.Process(
target=worker,
name=f'example_worker_{i}',
args=(
self.sample_queues[i],
self.example_queues[i][0],
self.example_creator_fn
))
self.worker_processes[i] = new_process
new_process.start()
# start transfer daemon
# assert self.transfer_daemon is None
# class Kill: pass
# self.daemon_kill = Kill()
# self.transfer_daemon = BatchLoader._start_transfer_daemon(
# self.example_queue,
# self.batch_loading_queue,
# self.daemon_kill
# )
self.loading = True
def stop_loading(self):
# stop loading processes
for i, process in enumerate(self.worker_processes):
if process is not None:
process.terminate()
for p in self.worker_processes:
if process is not None:
p.join()
self.worker_processes = [None] * self.num_workers
# stop transfer daemon
# if self.transfer_daemon is not None:
# self.daemon_kill = None
# self.example_queue.put('kill_me')
# assert self.batch_loading_queue is not None
# self.transfer_daemon.join()
self.loading = False
def reset_loading(self):
self.stop_loading()
self._reset_example_queues()
self.start_loading()
def _reset_example_queues(self):
assert not self.loading, 'Can only reset when loading is halted.'
return
if self.example_queue is not None:
del self.example_queue
if self.num_workers >= 1:
from dmt.utils.queue import FastQueue
self.example_queue = python_mp.Queue(
maxsize=self.example_queue_size
)
else:
self.example_queue = queue.Queue(maxsize=self.example_queue_size)
# if self.batch_loading_queue is not None:
# del self.batch_loading_queue
# self.batch_loading_queue = queue.Queue(
# maxsize=self.example_queue_size
# )
@staticmethod
def _start_transfer_daemon(src_q, dst_q, ref):
def transfer(src_q, dst_q, ref):
print('😄 Daemon created!')
while ref():
start = time.time()
obj = src_q.get(block=True)
if isinstance(obj, str) and obj == 'kill_me':
break
print('😄 Putting example into main queue! '
f'({time.time() - start:.2f} sec to get)', end='')
start = time.time()
dst_q.put(obj)
print(f' .. ({time.time() - start:.2f} sec to put)')
print('Daemon is done.')
def stop_daemon(ref):
print(f'(FastQ) Stop thread initiated')
src_q.put('kill_me')
wref = weakref.ref(ref, stop_daemon)
args = (src_q, dst_q, wref,)
transfer_thread = Thread(target=transfer, args=args)
transfer_thread.daemon = True
transfer_thread.start()
return transfer_thread
### ------ # Core API & Batch Iteration # ----- ###
def __iter__(self):
""" Initialize signal from user code.
0. Checks if example loading has already started or not.
1. If not started, load sample queues for each worker.
Also checks if all workers are functioning properly.
2. Initialize workers to start filling example-queue.
"""
if not self.loading:
self.reset_loading()
self.current_batch_index = 0
return self
### ------ # Other Functionality # ----- ###
def __len__(self):
return self.num_batches - self.current_batch_index
def __repr__(self):
procs = [False if p is None else True for p in self.worker_processes]
if len(self.worker_processes) == 0:
procs = False
string = (
f'BatchLoader Object (workers={self.num_workers}) \n'
f' Workers Running: {str(procs)} \n'
f' SampleSet size = {len(self.sample_set)}, '
f'Samples/Ex = {self.samples_per_example}, '
f'Ex/Batch = {self.examples_per_batch} \n'
f' {self.num_batches} Batches w/ Size {self.batch_size}, '
f'= Ex/Batch {self.examples_per_batch} * Tensors/Ex '
f'{self.example_output_size}'
)
return string
# Tests
def example_creator_fn(samples):
return [s.image.array for s in samples]
def collate_fn(batch):
tens_list = [torch.tensor(s[i]) for i in range(len(batch[0]))
for s in batch]
return tens_list
if __name__ == '__main__':
import sys, random
from pathlib import Path
import SimpleITK as sitk
curr_path = Path(__file__).absolute()
base_path = curr_path.parent.parent.parent.parent
sys.path.append(str(base_path))
from dmt.data import (ScalarImage3D, ScalarMask3D, CategoricalLabel,
Sample, SampleSet)
# Get dataset samples
print('Loading samples..', end=''); sys.stdout.flush(); start = time.time()
train_dir = Path('../../../tests/Task07_Pancreas/imagesTr')
train_ims = sorted([f for f in os.listdir(train_dir) if f[-2:] =='gz'])
label_dir = Path('../../../tests/Task07_Pancreas/labelsTr')
label_ims = sorted([f for f in os.listdir(label_dir) if f[-2:] =='gz'])
samples = []
for i in range(len(train_ims))[:34]:
img_path = train_dir / train_ims[i]
lab_path = label_dir / label_ims[i]
name = train_ims[i]
cns = ['background', 'pancreas', 'tumor']
image = ScalarImage3D(img_path, sitk.sitkInt16,
permanent_load=False, name=name)
label = ScalarMask3D(lab_path, cns, container_type=sitk.sitkUInt8)
label2 = CategoricalLabel(random.randint(0, 2), cns)
sample = Sample(image=image, label=label, cat=label2, name=name, id=i)
samples.append(sample)
sampleset = SampleSet(samples)
print(f'done ({time.time() - start:.2f} sec)')
loader = BatchLoader(
sampleset,
samples_per_example=3,
example_output_size=3,
examples_per_batch=2,
example_creator_fn=example_creator_fn,
batch_collate_fn=collate_fn,
num_workers=4,
start_loading=True,
shuffle=False,
example_queue_size=32,
)
print(loader)
# print(f'\n... Simulating other loading functions..', end='')
# time.sleep(10); print(f'done ✔')
import IPython; IPython.embed();
load_start = time.time()
batch_start = time.time()
print(f'⭐⭐⭐ Starting loader iteration.')
for i, batch in enumerate(loader):
t = time.time() - batch_start
print(f'\nBatch {i+1} RECEIVED! ({len(batch)} items, {t:.2} sec)')
for i, t in enumerate(batch):
print(f' Batch-Collate Tens {i}, shape: {t.shape}')
batch_start = time.time()
print(f'* Final Time: {time.time() - load_start:.2f} sec *') |
utils.py | # -*- coding: utf-8 -*-
"""
Various useful functions
"""
# Author: Remi Flamary <remi.flamary@unice.fr>
#
# License: MIT License
import multiprocessing
from functools import reduce
import time
import numpy as np
from scipy.spatial.distance import cdist
import sys
import warnings
from inspect import signature
__time_tic_toc = time.time()
def tic():
""" Python implementation of Matlab tic() function """
global __time_tic_toc
__time_tic_toc = time.time()
def toc(message='Elapsed time : {} s'):
""" Python implementation of Matlab toc() function """
t = time.time()
print(message.format(t - __time_tic_toc))
return t - __time_tic_toc
def toq():
""" Python implementation of Julia toc() function """
t = time.time()
return t - __time_tic_toc
def kernel(x1, x2, method='gaussian', sigma=1, **kwargs):
"""Compute kernel matrix"""
if method.lower() in ['gaussian', 'gauss', 'rbf']:
K = np.exp(-dist(x1, x2) / (2 * sigma**2))
return K
def laplacian(x):
"""Compute Laplacian matrix"""
L = np.diag(np.sum(x, axis=0)) - x
return L
def unif(n):
""" return a uniform histogram of length n (simplex)
Parameters
----------
n : int
number of bins in the histogram
Returns
-------
h : np.array (n,)
histogram of length n such that h_i=1/n for all i
"""
return np.ones((n,)) / n
def clean_zeros(a, b, M):
""" Remove all components with zeros weights in a and b
"""
M2 = M[a > 0, :][:, b > 0].copy() # copy force c style matrix (froemd)
a2 = a[a > 0]
b2 = b[b > 0]
return a2, b2, M2
def euclidean_distances(X, Y, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
Parameters
----------
X : {array-like}, shape (n_samples_1, n_features)
Y : {array-like}, shape (n_samples_2, n_features)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array}, shape (n_samples_1, n_samples_2)
"""
XX = np.einsum('ij,ij->i', X, X)[:, np.newaxis]
YY = np.einsum('ij,ij->i', Y, Y)[np.newaxis, :]
distances = np.dot(X, Y.T)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def dist(x1, x2=None, metric='sqeuclidean'):
"""Compute distance between samples in x1 and x2 using function scipy.spatial.distance.cdist
Parameters
----------
x1 : ndarray, shape (n1,d)
matrix with n1 samples of size d
x2 : array, shape (n2,d), optional
matrix with n2 samples of size d (if None then x2=x1)
metric : str | callable, optional
Name of the metric to be computed (full list in the doc of scipy), If a string,
the distance function can be 'braycurtis', 'canberra', 'chebyshev', 'cityblock',
'correlation', 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'wminkowski', 'yule'.
Returns
-------
M : np.array (n1,n2)
distance matrix computed with given metric
"""
if x2 is None:
x2 = x1
if metric == "sqeuclidean":
return euclidean_distances(x1, x2, squared=True)
return cdist(x1, x2, metric=metric)
def dist0(n, method='lin_square'):
"""Compute standard cost matrices of size (n, n) for OT problems
Parameters
----------
n : int
Size of the cost matrix.
method : str, optional
Type of loss matrix chosen from:
* 'lin_square' : linear sampling between 0 and n-1, quadratic loss
Returns
-------
M : ndarray, shape (n1,n2)
Distance matrix computed with given metric.
"""
res = 0
if method == 'lin_square':
x = np.arange(n, dtype=np.float64).reshape((n, 1))
res = dist(x, x)
return res
def cost_normalization(C, norm=None):
""" Apply normalization to the loss matrix
Parameters
----------
C : ndarray, shape (n1, n2)
The cost matrix to normalize.
norm : str
Type of normalization from 'median', 'max', 'log', 'loglog'. Any
other value do not normalize.
Returns
-------
C : ndarray, shape (n1, n2)
The input cost matrix normalized according to given norm.
"""
if norm is None:
pass
elif norm == "median":
C /= float(np.median(C))
elif norm == "max":
C /= float(np.max(C))
elif norm == "log":
C = np.log(1 + C)
elif norm == "loglog":
C = np.log1p(np.log1p(C))
else:
raise ValueError('Norm %s is not a valid option.\n'
'Valid options are:\n'
'median, max, log, loglog' % norm)
return C
def dots(*args):
""" dots function for multiple matrix multiply """
return reduce(np.dot, args)
def label_normalization(y, start=0):
""" Transform labels to start at a given value
Parameters
----------
y : array-like, shape (n, )
The vector of labels to be normalized.
start : int
Desired value for the smallest label in y (default=0)
Returns
-------
y : array-like, shape (n1, )
The input vector of labels normalized according to given start value.
"""
diff = np.min(np.unique(y)) - start
if diff != 0:
y -= diff
return y
def fun(f, q_in, q_out):
""" Utility function for parmap with no serializing problems """
while True:
i, x = q_in.get()
if i is None:
break
q_out.put((i, f(x)))
def parmap(f, X, nprocs=multiprocessing.cpu_count()):
""" paralell map for multiprocessing (only map on windows)"""
if not sys.platform.endswith('win32') and not sys.platform.endswith('darwin'):
q_in = multiprocessing.Queue(1)
q_out = multiprocessing.Queue()
proc = [multiprocessing.Process(target=fun, args=(f, q_in, q_out))
for _ in range(nprocs)]
for p in proc:
p.daemon = True
p.start()
sent = [q_in.put((i, x)) for i, x in enumerate(X)]
[q_in.put((None, None)) for _ in range(nprocs)]
res = [q_out.get() for _ in range(len(sent))]
[p.join() for p in proc]
return [x for i, x in sorted(res)]
else:
return list(map(f, X))
def check_params(**kwargs):
"""check_params: check whether some parameters are missing
"""
missing_params = []
check = True
for param in kwargs:
if kwargs[param] is None:
missing_params.append(param)
if len(missing_params) > 0:
print("POT - Warning: following necessary parameters are missing")
for p in missing_params:
print("\n", p)
check = False
return check
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
Parameters
----------
seed : None | int | instance of RandomState
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (int, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('{} cannot be used to seed a numpy.random.RandomState'
' instance'.format(seed))
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
deprecated class from scikit-learn package
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/deprecation.py
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from ot.deprecation import deprecated # doctest: +SKIP
>>> @deprecated() # doctest: +SKIP
... def some_function(): pass # doctest: +SKIP
Parameters
----------
extra : str
To be added to the deprecation messages.
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
self.extra = extra
def __call__(self, obj):
"""Call method
Parameters
----------
obj : object
"""
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
def _is_deprecated(func):
"""Helper to check if func is wraped by our deprecated decorator"""
if sys.version_info < (3, 5):
raise NotImplementedError("This is only available for python3.5 "
"or above")
closures = getattr(func, '__closure__', [])
if closures is None:
closures = []
is_deprecated = ('deprecated' in ''.join([c.cell_contents
for c in closures
if isinstance(c.cell_contents, str)]))
return is_deprecated
class BaseEstimator(object):
"""Base class for most objects in POT
Code adapted from sklearn BaseEstimator class
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their ``__init__`` as explicit keyword
arguments (no ``*args`` or ``**kwargs``).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [p for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD]
for p in parameters:
if p.kind == p.VAR_POSITIONAL:
raise RuntimeError("POT estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s with constructor %s doesn't "
" follow this convention."
% (cls, init_signature))
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters])
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep : bool, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The latter have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
# for key, value in iteritems(params):
for key, value in params.items():
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
class UndefinedParameter(Exception):
"""
Aim at raising an Exception when a undefined parameter is called
"""
pass
|
download_manager_test.py | # coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_datasets.core.download.download_manager."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import json
import os
import re
import tempfile
import threading
from absl.testing import absltest
import promise
import tensorflow.compat.v2 as tf
from tensorflow_datasets import testing
from tensorflow_datasets.core.download import checksums as checksums_lib
from tensorflow_datasets.core.download import download_manager as dm
from tensorflow_datasets.core.download import resource as resource_lib
ZIP = resource_lib.ExtractMethod.ZIP
TAR = resource_lib.ExtractMethod.TAR
NO_EXTRACT = resource_lib.ExtractMethod.NO_EXTRACT
def _get_promise_on_event(result=None, error=None):
"""Returns (event, Promise). Promise is fulfilled when `event.set()`."""
event = threading.Event()
def callback(resolve, reject):
def inside():
event.wait()
if error is not None:
reject(error)
resolve(result)
t = threading.Thread(target=inside)
t.daemon = True
t.start()
return event, promise.Promise(callback)
def _sha256(str_):
return hashlib.sha256(str_.encode('utf8')).hexdigest()
class Artifact(object):
# For testing only.
def __init__(self, name, url=None):
url = url or 'http://foo-bar.ch/%s' % name
content = 'content of %s' % name
self.url = url
self.content = content
self.size = len(content)
self.sha = _sha256(content)
self.size_checksum = (self.size, self.sha)
self.checksum_size = (self.sha, self.size)
self.dl_fname = resource_lib.get_dl_fname(url, self.sha)
self.dl_tmp_dirname = resource_lib.get_dl_dirname(url)
class DownloadManagerTest(testing.TestCase):
def _add_file(self, path, content='', mode='w'):
"""Returns open file handle."""
temp_f = tempfile.NamedTemporaryFile(mode=mode, delete=False)
self.files_content[path] = temp_f.name
temp_f.write(content)
temp_f.close()
self.existing_paths.append(path)
return temp_f
def setUp(self):
self.addCleanup(absltest.mock.patch.stopall)
self.existing_paths = []
self.made_dirs = []
self.dl_results = {}
self.extract_results = {}
self.file_names = {} # resource fname -> original file name
def list_directory(path):
fname = os.path.basename(path).rsplit('.', 2)[0] # suffix is '.tmp.$uuid'
return [self.file_names.get(fname, 'file_with_no_ext')]
self.files_content = {}
def open_(path, mode='r'):
if 'w' in mode:
self._add_file(path)
return open(self.files_content[path], mode)
def rename(from_, to, overwrite=False):
del overwrite
if from_ in self.files_content:
self.existing_paths.append(to)
self.existing_paths.remove(from_)
self.files_content[to] = self.files_content.pop(from_)
self.gfile_patch = absltest.mock.patch.object(
tf.io,
'gfile',
exists=lambda path: path in self.existing_paths,
makedirs=self.made_dirs.append,
# Used to get name of file as downloaded:
listdir=list_directory,
GFile=open_,
rename=absltest.mock.Mock(side_effect=rename),
)
self.gfile = self.gfile_patch.start()
absltest.mock.patch.object(checksums_lib, 'store_checksums').start()
def tearDown(self):
self.gfile_patch.stop()
def _write_info(self, path, info):
content = json.dumps(info, sort_keys=True)
self._add_file(path, content)
def _get_manager(self, force_download=False, force_extraction=False,
checksums=None, dl_dir='/dl_dir',
extract_dir='/extract_dir'):
manager = dm.DownloadManager(
dataset_name='mnist',
download_dir=dl_dir,
extract_dir=extract_dir,
manual_dir='/manual_dir',
force_download=force_download,
force_extraction=force_extraction,
)
if checksums:
manager._sizes_checksums = checksums
download = absltest.mock.patch.object(
manager._downloader,
'download',
side_effect=lambda url, tmpdir_path: self.dl_results[url])
self.downloader_download = download.start()
extract = absltest.mock.patch.object(
manager._extractor,
'extract',
side_effect=lambda path, method, dest: self.extract_results[path])
self.extractor_extract = extract.start()
return manager
def test_download(self):
"""One file in cache, one not."""
a, b, c = [Artifact(i) for i in 'abc']
urls = {
'cached': a.url,
'new': b.url,
'info_deleted': c.url,
}
_ = [self._add_file(path, content) for path, content in [ # pylint: disable=g-complex-comprehension
('/dl_dir/%s' % a.dl_fname, a.content),
('/dl_dir/%s.INFO' % a.dl_fname, 'content of info file a'),
# INFO file of c has been deleted:
('/dl_dir/%s' % c.dl_fname, c.content),
]]
dl_b, self.dl_results[b.url] = _get_promise_on_event(b.checksum_size)
dl_c, self.dl_results[c.url] = _get_promise_on_event(c.checksum_size)
manager = self._get_manager(checksums=dict(
(art.url, art.size_checksum) for art in (a, b, c)))
dl_b.set()
dl_c.set()
downloads = manager.download(urls)
expected = {
'cached': '/dl_dir/%s' % a.dl_fname,
'new': '/dl_dir/%s' % b.dl_fname,
'info_deleted': '/dl_dir/%s' % c.dl_fname,
}
self.assertEqual(downloads, expected)
def test_extract(self):
"""One file already extracted, one file with NO_EXTRACT, one to extract."""
cached = resource_lib.Resource(path='/dl_dir/cached', extract_method=ZIP)
new_ = resource_lib.Resource(path='/dl_dir/new', extract_method=TAR)
no_extract = resource_lib.Resource(path='/dl_dir/noextract',
extract_method=NO_EXTRACT)
files = {
'cached': cached,
'new': new_,
'noextract': no_extract,
}
self.existing_paths.append('/extract_dir/ZIP.cached')
extracted_new, self.extract_results['/dl_dir/new'] = (
_get_promise_on_event('/extract_dir/TAR.new'))
manager = self._get_manager()
extracted_new.set()
res = manager.extract(files)
expected = {
'cached': '/extract_dir/ZIP.cached',
'new': '/extract_dir/TAR.new',
'noextract': '/dl_dir/noextract',
}
self.assertEqual(res, expected)
def test_extract_twice_parallel(self):
# Make sure calling extract twice on same resource actually does the
# extraction once.
extracted_new, self.extract_results['/dl_dir/foo.tar'] = (
_get_promise_on_event('/extract_dir/TAR.foo'))
manager = self._get_manager()
extracted_new.set()
out1 = manager.extract(['/dl_dir/foo.tar', '/dl_dir/foo.tar'])
out2 = manager.extract('/dl_dir/foo.tar')
expected = '/extract_dir/TAR.foo'
self.assertEqual(out1[0], expected)
self.assertEqual(out1[1], expected)
expected = '/extract_dir/TAR.foo'
self.assertEqual(out2, expected)
# Result is memoize so extract has only been called once
self.assertEqual(1, self.extractor_extract.call_count)
def test_download_and_extract(self):
a, b = Artifact('a.zip'), Artifact('b')
self.file_names[a.dl_tmp_dirname] = 'a.zip'
dl_a, self.dl_results[a.url] = _get_promise_on_event(a.checksum_size)
dl_b, self.dl_results[b.url] = _get_promise_on_event(b.checksum_size)
ext_a, self.extract_results['/dl_dir/%s' % a.dl_fname] = (
_get_promise_on_event('/extract_dir/ZIP.%s' % a.dl_fname))
# url_b doesn't need any extraction.
for event in [dl_a, dl_b, ext_a]:
event.set()
# Result is the same after caching:
manager = self._get_manager(checksums={
a.url: a.size_checksum,
b.url: b.size_checksum,
})
res = manager.download_and_extract({'a': a.url, 'b': b.url})
expected = {
'a': '/extract_dir/ZIP.%s' % a.dl_fname,
'b': '/dl_dir/%s' % b.dl_fname,
}
self.assertEqual(res, expected)
def test_download_and_extract_archive_ext_in_fname(self):
# Make sure extraction method is properly deduced from original fname, and
# not from URL.
a = Artifact('a', url='http://a?key=1234')
self.file_names[a.dl_tmp_dirname] = 'a.zip'
dl, self.dl_results[a.url] = _get_promise_on_event(a.checksum_size)
ext, self.extract_results['/dl_dir/%s' % a.dl_fname] = (
_get_promise_on_event('/extract_dir/ZIP.%s' % a.dl_fname))
dl.set()
ext.set()
manager = self._get_manager(checksums={
a.url: a.size_checksum,
})
res = manager.download_and_extract({'a': a.url})
expected = {
'a': '/extract_dir/ZIP.%s' % a.dl_fname,
}
self.assertEqual(res, expected)
def test_download_and_extract_already_downloaded(self):
a = Artifact('a.zip')
self.file_names[a.dl_tmp_dirname] = 'a.zip'
# File was already downloaded:
self._add_file('/dl_dir/%s' % a.dl_fname)
self._write_info('/dl_dir/%s.INFO' % a.dl_fname,
{'original_fname': 'a.zip'})
ext_a, self.extract_results['/dl_dir/%s' % a.dl_fname] = (
_get_promise_on_event('/extract_dir/ZIP.%s' % a.dl_fname))
ext_a.set()
manager = self._get_manager(checksums={
a.url: a.size_checksum,
})
res = manager.download_and_extract(a.url)
expected = '/extract_dir/ZIP.%s' % a.dl_fname
self.assertEqual(res, expected)
def test_force_download_and_extract(self):
a = Artifact('a.tar.gz')
# resource was already downloaded / extracted:
self.existing_paths = ['/dl_dir/%s' % a.dl_fname,
'/extract_dir/TAR_GZ.%s' % a.dl_fname]
self.file_names[a.dl_tmp_dirname] = 'b.tar.gz'
self._write_info('/dl_dir/%s.INFO' % a.dl_fname,
{'original_fname': 'b.tar.gz'})
dl_a, self.dl_results[a.url] = _get_promise_on_event(a.checksum_size)
ext_a, self.extract_results['/dl_dir/%s' % a.dl_fname] = (
_get_promise_on_event('/extract_dir/TAR_GZ.%s' % a.dl_fname))
dl_a.set()
ext_a.set()
manager = self._get_manager(
force_download=True, force_extraction=True,
checksums={
a.url: a.size_checksum,
})
res = manager.download_and_extract(a.url)
expected = '/extract_dir/TAR_GZ.%s' % a.dl_fname
self.assertEqual(expected, res)
# Rename after download:
(from_, to), kwargs = self.gfile.rename.call_args
self.assertTrue(re.match(
r'/dl_dir/%s\.tmp\.[a-h0-9]{32}/b.tar.gz' % a.dl_tmp_dirname,
from_))
self.assertEqual('/dl_dir/%s' % a.dl_fname, to)
self.assertEqual(kwargs, {'overwrite': True})
self.assertEqual(1, self.downloader_download.call_count)
self.assertEqual(1, self.extractor_extract.call_count)
def test_wrong_checksum(self):
a = Artifact('a.tar.gz')
sha_b = _sha256('content of another file')
dl_a, self.dl_results[a.url] = _get_promise_on_event(a.checksum_size)
dl_a.set()
manager = self._get_manager(checksums={
a.url: (a.size, sha_b),
})
with self.assertRaises(dm.NonMatchingChecksumError):
manager.download(a.url)
self.assertEqual(0, self.extractor_extract.call_count)
if __name__ == '__main__':
testing.test_main()
|
manager.py | from dataclasses import dataclass
import logging
import threading
import time
import traceback
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Iterator
from concurrent.futures.thread import ThreadPoolExecutor
from blspy import G1Element
from chiapos import DiskProver
from salvia.consensus.pos_quality import UI_ACTUAL_SPACE_CONSTANT_FACTOR, _expected_plot_size
from salvia.plotting.util import (
PlotInfo,
PlotRefreshResult,
PlotsRefreshParameter,
PlotRefreshEvents,
get_plot_filenames,
parse_plot_info,
stream_plot_info_pk,
stream_plot_info_ph,
)
from salvia.util.ints import uint16
from salvia.util.path import mkdir
from salvia.util.streamable import Streamable, streamable
from salvia.types.blockchain_format.proof_of_space import ProofOfSpace
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.wallet.derive_keys import master_sk_to_local_sk
log = logging.getLogger(__name__)
CURRENT_VERSION: uint16 = uint16(0)
@dataclass(frozen=True)
@streamable
class CacheEntry(Streamable):
pool_public_key: Optional[G1Element]
pool_contract_puzzle_hash: Optional[bytes32]
plot_public_key: G1Element
@dataclass(frozen=True)
@streamable
class DiskCache(Streamable):
version: uint16
data: List[Tuple[bytes32, CacheEntry]]
class Cache:
_changed: bool
_data: Dict[bytes32, CacheEntry]
def __init__(self, path: Path):
self._changed = False
self._data = {}
self._path = path
if not path.parent.exists():
mkdir(path.parent)
def __len__(self):
return len(self._data)
def update(self, plot_id: bytes32, entry: CacheEntry):
self._data[plot_id] = entry
self._changed = True
def remove(self, cache_keys: List[bytes32]):
for key in cache_keys:
if key in self._data:
del self._data[key]
self._changed = True
def save(self):
try:
disk_cache: DiskCache = DiskCache(
CURRENT_VERSION, [(plot_id, cache_entry) for plot_id, cache_entry in self.items()]
)
serialized: bytes = bytes(disk_cache)
self._path.write_bytes(serialized)
self._changed = False
log.info(f"Saved {len(serialized)} bytes of cached data")
except Exception as e:
log.error(f"Failed to save cache: {e}, {traceback.format_exc()}")
def load(self):
try:
serialized = self._path.read_bytes()
log.info(f"Loaded {len(serialized)} bytes of cached data")
stored_cache: DiskCache = DiskCache.from_bytes(serialized)
if stored_cache.version != CURRENT_VERSION:
# TODO, Migrate or drop current cache if the version changes.
raise ValueError(f"Invalid cache version {stored_cache.version}. Expected version {CURRENT_VERSION}.")
self._data = {plot_id: cache_entry for plot_id, cache_entry in stored_cache.data}
except FileNotFoundError:
log.debug(f"Cache {self._path} not found")
except Exception as e:
log.error(f"Failed to load cache: {e}, {traceback.format_exc()}")
def keys(self):
return self._data.keys()
def items(self):
return self._data.items()
def get(self, plot_id):
return self._data.get(plot_id)
def changed(self):
return self._changed
def path(self):
return self._path
class PlotManager:
plots: Dict[Path, PlotInfo]
plot_filename_paths: Dict[str, Tuple[str, Set[str]]]
plot_filename_paths_lock: threading.Lock
failed_to_open_filenames: Dict[Path, int]
no_key_filenames: Set[Path]
farmer_public_keys: List[G1Element]
pool_public_keys: List[G1Element]
cache: Cache
match_str: Optional[str]
show_memo: bool
open_no_key_filenames: bool
last_refresh_time: float
refresh_parameter: PlotsRefreshParameter
log: Any
_lock: threading.Lock
_refresh_thread: Optional[threading.Thread]
_refreshing_enabled: bool
_refresh_callback: Callable
def __init__(
self,
root_path: Path,
refresh_callback: Callable,
match_str: Optional[str] = None,
show_memo: bool = False,
open_no_key_filenames: bool = False,
refresh_parameter: PlotsRefreshParameter = PlotsRefreshParameter(),
):
self.root_path = root_path
self.plots = {}
self.plot_filename_paths = {}
self.plot_filename_paths_lock = threading.Lock()
self.failed_to_open_filenames = {}
self.no_key_filenames = set()
self.farmer_public_keys = []
self.pool_public_keys = []
self.cache = Cache(self.root_path.resolve() / "cache" / "plot_manager.dat")
self.match_str = match_str
self.show_memo = show_memo
self.open_no_key_filenames = open_no_key_filenames
self.last_refresh_time = 0
self.refresh_parameter = refresh_parameter
self.log = logging.getLogger(__name__)
self._lock = threading.Lock()
self._refresh_thread = None
self._refreshing_enabled = False
self._refresh_callback = refresh_callback # type: ignore
def __enter__(self):
self._lock.acquire()
def __exit__(self, exc_type, exc_value, exc_traceback):
self._lock.release()
def set_refresh_callback(self, callback: Callable):
self._refresh_callback = callback # type: ignore
def set_public_keys(self, farmer_public_keys: List[G1Element], pool_public_keys: List[G1Element]):
self.farmer_public_keys = farmer_public_keys
self.pool_public_keys = pool_public_keys
def public_keys_available(self):
return len(self.farmer_public_keys) and len(self.pool_public_keys)
def plot_count(self):
with self:
return len(self.plots)
def get_duplicates(self):
result = []
for plot_filename, paths_entry in self.plot_filename_paths.items():
_, duplicated_paths = paths_entry
for path in duplicated_paths:
result.append(Path(path) / plot_filename)
return result
def needs_refresh(self) -> bool:
return time.time() - self.last_refresh_time > float(self.refresh_parameter.interval_seconds)
def start_refreshing(self):
self._refreshing_enabled = True
if self._refresh_thread is None or not self._refresh_thread.is_alive():
self.cache.load()
self._refresh_thread = threading.Thread(target=self._refresh_task)
self._refresh_thread.start()
def stop_refreshing(self):
self._refreshing_enabled = False
if self._refresh_thread is not None and self._refresh_thread.is_alive():
self._refresh_thread.join()
self._refresh_thread = None
def trigger_refresh(self):
log.debug("trigger_refresh")
self.last_refresh_time = 0
def _refresh_task(self):
while self._refreshing_enabled:
while not self.needs_refresh() and self._refreshing_enabled:
time.sleep(1)
if not self._refreshing_enabled:
return
plot_filenames: Dict[Path, List[Path]] = get_plot_filenames(self.root_path)
plot_directories: Set[Path] = set(plot_filenames.keys())
plot_paths: List[Path] = []
for paths in plot_filenames.values():
plot_paths += paths
total_result: PlotRefreshResult = PlotRefreshResult()
total_size = len(plot_paths)
self._refresh_callback(PlotRefreshEvents.started, PlotRefreshResult(remaining=total_size))
# First drop all plots we have in plot_filename_paths but not longer in the filesystem or set in config
def plot_removed(test_path: Path):
return not test_path.exists() or test_path.parent not in plot_directories
filenames_to_remove: List[str] = []
for plot_filename, paths_entry in self.plot_filename_paths.items():
loaded_path, duplicated_paths = paths_entry
loaded_plot = Path(loaded_path) / Path(plot_filename)
if plot_removed(loaded_plot):
filenames_to_remove.append(plot_filename)
if loaded_plot in self.plots:
del self.plots[loaded_plot]
total_result.removed += 1
# No need to check the duplicates here since we drop the whole entry
continue
paths_to_remove: List[str] = []
for path in duplicated_paths:
if plot_removed(Path(path) / Path(plot_filename)):
paths_to_remove.append(path)
total_result.removed += 1
for path in paths_to_remove:
duplicated_paths.remove(path)
for filename in filenames_to_remove:
del self.plot_filename_paths[filename]
def batches() -> Iterator[Tuple[int, List[Path]]]:
if total_size > 0:
for batch_start in range(0, total_size, self.refresh_parameter.batch_size):
batch_end = min(batch_start + self.refresh_parameter.batch_size, total_size)
yield total_size - batch_end, plot_paths[batch_start:batch_end]
else:
yield 0, []
for remaining, batch in batches():
batch_result: PlotRefreshResult = self.refresh_batch(batch, plot_directories)
if not self._refreshing_enabled:
self.log.debug("refresh_plots: Aborted")
break
# Set the remaining files since `refresh_batch()` doesn't know them but we want to report it
batch_result.remaining = remaining
total_result.loaded += batch_result.loaded
total_result.processed += batch_result.processed
total_result.duration += batch_result.duration
self._refresh_callback(PlotRefreshEvents.batch_processed, batch_result)
if remaining == 0:
break
batch_sleep = self.refresh_parameter.batch_sleep_milliseconds
self.log.debug(f"refresh_plots: Sleep {batch_sleep} milliseconds")
time.sleep(float(batch_sleep) / 1000.0)
if self._refreshing_enabled:
self._refresh_callback(PlotRefreshEvents.done, total_result)
# Cleanup unused cache
available_ids = set([plot_info.prover.get_id() for plot_info in self.plots.values()])
invalid_cache_keys = [plot_id for plot_id in self.cache.keys() if plot_id not in available_ids]
self.cache.remove(invalid_cache_keys)
self.log.debug(f"_refresh_task: cached entries removed: {len(invalid_cache_keys)}")
if self.cache.changed():
self.cache.save()
self.last_refresh_time = time.time()
self.log.debug(
f"_refresh_task: total_result.loaded {total_result.loaded}, "
f"total_result.removed {total_result.removed}, "
f"total_duration {total_result.duration:.2f} seconds"
)
def refresh_batch(self, plot_paths: List[Path], plot_directories: Set[Path]) -> PlotRefreshResult:
start_time: float = time.time()
result: PlotRefreshResult = PlotRefreshResult(processed=len(plot_paths))
counter_lock = threading.Lock()
log.debug(f"refresh_batch: {len(plot_paths)} files in directories {plot_directories}")
if self.match_str is not None:
log.info(f'Only loading plots that contain "{self.match_str}" in the file or directory name')
def process_file(file_path: Path) -> Optional[PlotInfo]:
if not self._refreshing_enabled:
return None
filename_str = str(file_path)
if self.match_str is not None and self.match_str not in filename_str:
return None
if (
file_path in self.failed_to_open_filenames
and (time.time() - self.failed_to_open_filenames[file_path])
< self.refresh_parameter.retry_invalid_seconds
):
# Try once every `refresh_parameter.retry_invalid_seconds` seconds to open the file
return None
if file_path in self.plots:
return self.plots[file_path]
entry: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name)
if entry is not None:
loaded_parent, duplicates = entry
if str(file_path.parent) in duplicates:
log.debug(f"Skip duplicated plot {str(file_path)}")
return None
try:
if not file_path.exists():
return None
prover = DiskProver(str(file_path))
log.debug(f"process_file {str(file_path)}")
expected_size = _expected_plot_size(prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR
stat_info = file_path.stat()
# TODO: consider checking if the file was just written to (which would mean that the file is still
# being copied). A segfault might happen in this edge case.
if prover.get_size() >= 30 and stat_info.st_size < 0.98 * expected_size:
log.warning(
f"Not farming plot {file_path}. Size is {stat_info.st_size / (1024**3)} GiB, but expected"
f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied."
)
return None
cache_entry = self.cache.get(prover.get_id())
if cache_entry is None:
(
pool_public_key_or_puzzle_hash,
farmer_public_key,
local_master_sk,
) = parse_plot_info(prover.get_memo())
# Only use plots that correct keys associated with them
if farmer_public_key not in self.farmer_public_keys:
log.warning(f"Plot {file_path} has a farmer public key that is not in the farmer's pk list.")
self.no_key_filenames.add(file_path)
if not self.open_no_key_filenames:
return None
pool_public_key: Optional[G1Element] = None
pool_contract_puzzle_hash: Optional[bytes32] = None
if isinstance(pool_public_key_or_puzzle_hash, G1Element):
pool_public_key = pool_public_key_or_puzzle_hash
else:
assert isinstance(pool_public_key_or_puzzle_hash, bytes32)
pool_contract_puzzle_hash = pool_public_key_or_puzzle_hash
if pool_public_key is not None and pool_public_key not in self.pool_public_keys:
log.warning(f"Plot {file_path} has a pool public key that is not in the farmer's pool pk list.")
self.no_key_filenames.add(file_path)
if not self.open_no_key_filenames:
return None
local_sk = master_sk_to_local_sk(local_master_sk)
plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key(
local_sk.get_g1(), farmer_public_key, pool_contract_puzzle_hash is not None
)
cache_entry = CacheEntry(pool_public_key, pool_contract_puzzle_hash, plot_public_key)
self.cache.update(prover.get_id(), cache_entry)
with self.plot_filename_paths_lock:
paths: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name)
if paths is None:
paths = (str(Path(prover.get_filename()).parent), set())
self.plot_filename_paths[file_path.name] = paths
else:
paths[1].add(str(Path(prover.get_filename()).parent))
log.warning(f"Have multiple copies of the plot {file_path.name} in {[paths[0], *paths[1]]}.")
return None
new_plot_info: PlotInfo = PlotInfo(
prover,
cache_entry.pool_public_key,
cache_entry.pool_contract_puzzle_hash,
cache_entry.plot_public_key,
stat_info.st_size,
stat_info.st_mtime,
)
with counter_lock:
result.loaded += 1
if file_path in self.failed_to_open_filenames:
del self.failed_to_open_filenames[file_path]
except Exception as e:
tb = traceback.format_exc()
log.error(f"Failed to open file {file_path}. {e} {tb}")
self.failed_to_open_filenames[file_path] = int(time.time())
return None
log.info(f"Found plot {file_path} of size {new_plot_info.prover.get_size()}")
if self.show_memo:
plot_memo: bytes32
if pool_contract_puzzle_hash is None:
plot_memo = stream_plot_info_pk(pool_public_key, farmer_public_key, local_master_sk)
else:
plot_memo = stream_plot_info_ph(pool_contract_puzzle_hash, farmer_public_key, local_master_sk)
plot_memo_str: str = plot_memo.hex()
log.info(f"Memo: {plot_memo_str}")
return new_plot_info
with self, ThreadPoolExecutor() as executor:
plots_refreshed: Dict[Path, PlotInfo] = {}
for new_plot in executor.map(process_file, plot_paths):
if new_plot is not None:
plots_refreshed[Path(new_plot.prover.get_filename())] = new_plot
self.plots.update(plots_refreshed)
result.duration = time.time() - start_time
self.log.debug(
f"refresh_batch: loaded {result.loaded}, "
f"removed {result.removed}, processed {result.processed}, "
f"remaining {result.remaining}, batch_size {self.refresh_parameter.batch_size}, "
f"duration: {result.duration:.2f} seconds"
)
return result
|
client.py | from __future__ import print_function, division
__version__ = '0.0.1'
import os.path
from threading import Thread, RLock
import logging
logger = logging.getLogger('onvif')
logging.basicConfig(level=logging.INFO)
logging.getLogger('zeep.client').setLevel(logging.CRITICAL)
from urllib.parse import urlparse
from zeep.client import Client, CachingClient, Settings
from zeep.wsse.username import UsernameToken
import zeep.helpers
from onvif.exceptions import ONVIFError
from onvif.definition import SERVICES
import datetime as dt
# Ensure methods to raise an ONVIFError Exception
# when some thing was wrong
def safe_func(func):
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as err:
#print('Ouuups: err =', err, ', func =', func, ', args =', args, ', kwargs =', kwargs)
raise ONVIFError(err)
return wrapped
class UsernameDigestTokenDtDiff(UsernameToken):
'''
UsernameDigestToken class, with a time offset parameter that can be adjusted;
This allows authentication on cameras without being time synchronized.
Please note that using NTP on both end is the recommended solution,
this should only be used in "safe" environments.
'''
def __init__(self, user, passw, dt_diff=None, **kwargs):
super().__init__(user, passw, **kwargs)
self.dt_diff = dt_diff # Date/time difference in datetime.timedelta
def apply(self, envelope, headers):
old_created = self.created
if self.created is None:
self.created = dt.datetime.utcnow()
#print('UsernameDigestTokenDtDiff.created: old = %s (type = %s), dt_diff = %s (type = %s)' % (self.created, type(self.created), self.dt_diff, type(self.dt_diff)), end='')
if self.dt_diff is not None:
self.created += self.dt_diff
#print(' new = %s' % self.created)
result = super().apply(envelope, headers)
self.created = old_created
return result
class ONVIFService(object):
'''
Python Implemention for ONVIF Service.
Services List:
DeviceMgmt DeviceIO Event AnalyticsDevice Display Imaging Media
PTZ Receiver RemoteDiscovery Recording Replay Search Extension
>>> from onvif import ONVIFService
>>> device_service = ONVIFService('http://192.168.0.112/onvif/device_service',
... 'admin', 'foscam',
... '/etc/onvif/wsdl/devicemgmt.wsdl')
>>> ret = device_service.GetHostname()
>>> print ret.FromDHCP
>>> print ret.Name
>>> device_service.SetHostname(dict(Name='newhostname'))
>>> ret = device_service.GetSystemDateAndTime()
>>> print ret.DaylightSavings
>>> print ret.TimeZone
>>> dict_ret = device_service.to_dict(ret)
>>> print dict_ret['TimeZone']
There are two ways to pass parameter to services methods
1. Dict
params = {'Name': 'NewHostName'}
device_service.SetHostname(params)
2. Type Instance
params = device_service.create_type('SetHostname')
params.Hostname = 'NewHostName'
device_service.SetHostname(params)
'''
@safe_func
def __init__(self, xaddr, user, passwd, url,
encrypt=True, daemon=False, zeep_client=None, no_cache=False,
portType=None, dt_diff=None, binding_name='', transport=None):
if not os.path.isfile(url):
raise ONVIFError('%s doesn`t exist!' % url)
self.url = url
self.xaddr = xaddr#.replace("192.168.1.78:80", "testapi.me:7089")
#print(["XADDR", xaddr, url])
wsse = UsernameDigestTokenDtDiff(user, passwd, dt_diff=dt_diff, use_digest=encrypt)
# Create soap client
if not zeep_client:
#print(self.url, self.xaddr)
ClientType = Client if no_cache else CachingClient
settings = Settings()
settings.strict = False
settings.xml_huge_tree = True
self.zeep_client = ClientType(wsdl=url, wsse=wsse, transport=transport, settings=settings)
else:
self.zeep_client = zeep_client
self.ws_client = self.zeep_client.create_service(binding_name, self.xaddr)
# Set soap header for authentication
self.user = user
self.passwd = passwd
# Indicate wether password digest is needed
self.encrypt = encrypt
self.daemon = daemon
self.dt_diff = dt_diff
self.create_type = lambda x: self.zeep_client.get_element('ns0:' + x)()
@classmethod
@safe_func
def clone(cls, service, *args, **kwargs):
clone_service = service.ws_client.clone()
kwargs['ws_client'] = clone_service
return ONVIFService(*args, **kwargs)
@staticmethod
@safe_func
def to_dict(zeepobject):
# Convert a WSDL Type instance into a dictionary
return {} if zeepobject is None else zeep.helpers.serialize_object(zeepobject)
def service_wrapper(self, func):
@safe_func
def wrapped(params=None, callback=None):
def call(params=None, callback=None):
# No params
# print(params.__class__.__mro__)
if params is None:
params = {}
else:
params = ONVIFService.to_dict(params)
try:
ret = func(**params)
except TypeError:
#print('### func =', func, '### params =', params, '### type(params) =', type(params))
ret = func(params)
if callable(callback):
callback(ret)
return ret
if self.daemon:
th = Thread(target=call, args=(params, callback))
th.daemon = True
th.start()
else:
return call(params, callback)
return wrapped
def __getattr__(self, name):
'''
Call the real onvif Service operations,
See the official wsdl definition for the
APIs detail(API name, request parameters,
response parameters, parameter types, etc...)
'''
builtin = name.startswith('__') and name.endswith('__')
if builtin:
return self.__dict__[name]
else:
return self.service_wrapper(getattr(self.ws_client, name))
class ONVIFCamera(object):
'''
Python Implemention ONVIF compliant device
This class integrates onvif services
adjust_time parameter allows authentication on cameras without being time synchronized.
Please note that using NTP on both end is the recommended solution,
this should only be used in "safe" environments.
Also, this cannot be used on AXIS camera, as every request is authenticated, contrary to ONVIF standard
>>> from onvif import ONVIFCamera
>>> mycam = ONVIFCamera('192.168.0.112', 80, 'admin', '12345')
>>> mycam.devicemgmt.GetServices(False)
>>> media_service = mycam.create_media_service()
>>> ptz_service = mycam.create_ptz_service()
# Get PTZ Configuration:
>>> mycam.ptz.GetConfiguration()
# Another way:
>>> ptz_service.GetConfiguration()
'''
# Class-level variables
services_template = {'devicemgmt': None, 'ptz': None, 'media': None,
'imaging': None, 'events': None, 'analytics': None }
use_services_template = {'devicemgmt': True, 'ptz': True, 'media': True,
'imaging': True, 'events': True, 'analytics': True }
def __init__(self, host, port ,user, passwd, wsdl_dir=os.path.join(os.path.dirname(os.path.dirname(__file__)), "wsdl"),
encrypt=True, daemon=False, no_cache=False, adjust_time=False, transport=None):
os.environ.pop('http_proxy', None)
os.environ.pop('https_proxy', None)
self.host = host
self.port = int(port)
self.user = user
self.passwd = passwd
self.wsdl_dir = wsdl_dir
self.encrypt = encrypt
self.daemon = daemon
self.no_cache = no_cache
self.adjust_time = adjust_time
self.transport = transport
# Active service client container
self.services = { }
self.services_lock = RLock()
# Set xaddrs
self.update_xaddrs()
self.to_dict = ONVIFService.to_dict
def update_xaddrs(self):
# Establish devicemgmt service first
self.dt_diff = None
self.devicemgmt = self.create_devicemgmt_service()
if self.adjust_time :
cdate = self.devicemgmt.GetSystemDateAndTime().UTCDateTime
cam_date = dt.datetime(cdate.Date.Year, cdate.Date.Month, cdate.Date.Day, cdate.Time.Hour, cdate.Time.Minute, cdate.Time.Second)
self.dt_diff = cam_date - dt.datetime.utcnow()
self.devicemgmt.dt_diff = self.dt_diff
#self.devicemgmt.set_wsse()
self.devicemgmt = self.create_devicemgmt_service()
# Get XAddr of services on the device
self.xaddrs = { }
capabilities = self.devicemgmt.GetCapabilities({'Category': 'All'})
for name in capabilities:
capability = capabilities[name]
try:
if name.lower() in SERVICES and capability is not None:
ns = SERVICES[name.lower()]['ns']
## REPLACED >>>
new_addr = "{}:{}".format(self.host, self.port)
local_addr = capability['XAddr']
parsed_addr = urlparse(local_addr).netloc
if new_addr not in parsed_addr:
local_addr = local_addr.replace(parsed_addr, new_addr)
#print("FIX", local_addr, new_addr, parsed_addr)
self.xaddrs[ns] = local_addr
except Exception:
logger.exception('Unexpected service type')
with self.services_lock:
try:
self.event = self.create_events_service()
self.xaddrs['http://www.onvif.org/ver10/events/wsdl/PullPointSubscription'] = self.event.CreatePullPointSubscription().SubscriptionReference.Address._value_1
except:
pass
def update_url(self, host=None, port=None):
changed = False
if host and self.host != host:
changed = True
self.host = host
if port and self.port != port:
changed = True
self.port = port
if not changed:
return
self.devicemgmt = self.create_devicemgmt_service()
self.capabilities = self.devicemgmt.GetCapabilities()
with self.services_lock:
for sname in self.services.keys():
print("HOST", self.host, self.port)
xaddr = getattr(self.capabilities, sname.capitalize).XAddr#.replace("192.168.1.78:80", "testapi.me:7089")
self.services[sname].ws_client.set_options(location=xaddr)
def get_service(self, name, create=True):
service = None
service = getattr(self, name.lower(), None)
if not service and create:
return getattr(self, 'create_%s_service' % name.lower())()
return service
def get_definition(self, name, portType=None):
'''Returns xaddr and wsdl of specified service'''
# Check if the service is supported
if name not in SERVICES:
raise ONVIFError('Unknown service %s' % name)
wsdl_file = SERVICES[name]['wsdl']
ns = SERVICES[name]['ns']
binding_name = '{%s}%s' % (ns, SERVICES[name]['binding'])
if portType:
ns += '/' + portType
wsdlpath = os.path.join(self.wsdl_dir, wsdl_file)
if not os.path.isfile(wsdlpath):
raise ONVIFError('No such file: %s' % wsdlpath)
# XAddr for devicemgmt is fixed:
if name == 'devicemgmt':
xaddr = '%s:%s/onvif/device_service' % \
(self.host if (self.host.startswith('http://') or self.host.startswith('https://'))
else 'http://%s' % self.host, self.port)
return xaddr, wsdlpath, binding_name
# Get other XAddr
xaddr = self.xaddrs.get(ns)
if not xaddr:
raise ONVIFError('Device doesn`t support service: %s' % name)
return xaddr, wsdlpath, binding_name
def create_onvif_service(self, name, from_template=True, portType=None):
'''Create ONVIF service client'''
name = name.lower()
xaddr, wsdl_file, binding_name = self.get_definition(name, portType)
with self.services_lock:
service = ONVIFService(xaddr, self.user, self.passwd,
wsdl_file, self.encrypt,
self.daemon, no_cache=self.no_cache,
portType=portType,
dt_diff=self.dt_diff,
binding_name=binding_name,
transport=self.transport)
self.services[name] = service
setattr(self, name, service)
if not self.services_template.get(name):
self.services_template[name] = service
return service
def create_devicemgmt_service(self, from_template=True):
# The entry point for devicemgmt service is fixed.
return self.create_onvif_service('devicemgmt', from_template)
def create_media_service(self, from_template=True):
return self.create_onvif_service('media', from_template)
def create_ptz_service(self, from_template=True):
return self.create_onvif_service('ptz', from_template)
def create_imaging_service(self, from_template=True):
return self.create_onvif_service('imaging', from_template)
def create_deviceio_service(self, from_template=True):
return self.create_onvif_service('deviceio', from_template)
def create_events_service(self, from_template=True):
return self.create_onvif_service('events', from_template)
def create_analytics_service(self, from_template=True):
return self.create_onvif_service('analytics', from_template)
def create_recording_service(self, from_template=True):
return self.create_onvif_service('recording', from_template)
def create_search_service(self, from_template=True):
return self.create_onvif_service('search', from_template)
def create_replay_service(self, from_template=True):
return self.create_onvif_service('replay', from_template)
def create_pullpoint_service(self, from_template=True):
return self.create_onvif_service('pullpoint', from_template, portType='PullPointSubscription')
def create_receiver_service(self, from_template=True):
return self.create_onvif_service('receiver', from_template)
|
test_s3.py | import boto3
import botocore.session
from botocore.exceptions import ClientError
from botocore.exceptions import ParamValidationError
from nose.tools import eq_ as eq
from nose.plugins.attrib import attr
from nose.plugins.skip import SkipTest
import isodate
import email.utils
import datetime
import threading
import re
import pytz
from collections import OrderedDict
import requests
import json
import base64
import hmac
import hashlib
import xml.etree.ElementTree as ET
import time
import operator
import nose
import os
import string
import random
import socket
import dateutil.parser
import ssl
from collections import namedtuple
from email.header import decode_header
from .utils import assert_raises
from .utils import generate_random
from .utils import _get_status_and_error_code
from .utils import _get_status
from .policy import Policy, Statement, make_json_policy
from . import (
get_client,
get_prefix,
get_unauthenticated_client,
get_bad_auth_client,
get_v2_client,
get_new_bucket,
get_new_bucket_name,
get_new_bucket_resource,
get_config_is_secure,
get_config_host,
get_config_port,
get_config_endpoint,
get_main_aws_access_key,
get_main_aws_secret_key,
get_main_display_name,
get_main_user_id,
get_main_email,
get_main_api_name,
get_alt_aws_access_key,
get_alt_aws_secret_key,
get_alt_display_name,
get_alt_user_id,
get_alt_email,
get_alt_client,
get_tenant_client,
get_tenant_iam_client,
get_tenant_user_id,
get_buckets_list,
get_objects_list,
get_main_kms_keyid,
get_secondary_kms_keyid,
get_svc_client,
nuke_prefixed_buckets,
)
def _bucket_is_empty(bucket):
is_empty = True
for obj in bucket.objects.all():
is_empty = False
break
return is_empty
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='empty buckets return no contents')
def test_bucket_list_empty():
bucket = get_new_bucket_resource()
is_empty = _bucket_is_empty(bucket)
eq(is_empty, True)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='distinct buckets have different contents')
def test_bucket_list_distinct():
bucket1 = get_new_bucket_resource()
bucket2 = get_new_bucket_resource()
obj = bucket1.put_object(Body='str', Key='asdf')
is_empty = _bucket_is_empty(bucket2)
eq(is_empty, True)
def _create_objects(bucket=None, bucket_name=None, keys=[]):
"""
Populate a (specified or new) bucket with objects with
specified names (and contents identical to their names).
"""
if bucket_name is None:
bucket_name = get_new_bucket_name()
if bucket is None:
bucket = get_new_bucket_resource(name=bucket_name)
for key in keys:
obj = bucket.put_object(Body=key, Key=key)
return bucket_name
def _get_keys(response):
"""
return lists of strings that are the keys from a client.list_objects() response
"""
keys = []
if 'Contents' in response:
objects_list = response['Contents']
keys = [obj['Key'] for obj in objects_list]
return keys
def _get_prefixes(response):
"""
return lists of strings that are prefixes from a client.list_objects() response
"""
prefixes = []
if 'CommonPrefixes' in response:
prefix_list = response['CommonPrefixes']
prefixes = [prefix['Prefix'] for prefix in prefix_list]
return prefixes
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='pagination w/max_keys=2, no marker')
def test_bucket_list_many():
bucket_name = _create_objects(keys=['foo', 'bar', 'baz'])
client = get_client()
response = client.list_objects(Bucket=bucket_name, MaxKeys=2)
keys = _get_keys(response)
eq(len(keys), 2)
eq(keys, ['bar', 'baz'])
eq(response['IsTruncated'], True)
response = client.list_objects(Bucket=bucket_name, Marker='baz',MaxKeys=2)
keys = _get_keys(response)
eq(len(keys), 1)
eq(response['IsTruncated'], False)
eq(keys, ['foo'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='pagination w/max_keys=2, no marker')
@attr('list-objects-v2')
def test_bucket_listv2_many():
bucket_name = _create_objects(keys=['foo', 'bar', 'baz'])
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=2)
keys = _get_keys(response)
eq(len(keys), 2)
eq(keys, ['bar', 'baz'])
eq(response['IsTruncated'], True)
response = client.list_objects_v2(Bucket=bucket_name, StartAfter='baz',MaxKeys=2)
keys = _get_keys(response)
eq(len(keys), 1)
eq(response['IsTruncated'], False)
eq(keys, ['foo'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='keycount in listobjectsv2')
@attr('list-objects-v2')
def test_basic_key_count():
client = get_client()
bucket_names = []
bucket_name = get_new_bucket_name()
client.create_bucket(Bucket=bucket_name)
for j in range(5):
client.put_object(Bucket=bucket_name, Key=str(j))
response1 = client.list_objects_v2(Bucket=bucket_name)
eq(response1['KeyCount'], 5)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='prefixes in multi-component object names')
def test_bucket_list_delimiter_basic():
bucket_name = _create_objects(keys=['foo/bar', 'foo/bar/xyzzy', 'quux/thud', 'asdf'])
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='/')
eq(response['Delimiter'], '/')
keys = _get_keys(response)
eq(keys, ['asdf'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
eq(prefixes, ['foo/', 'quux/'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='prefixes in multi-component object names')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_basic():
bucket_name = _create_objects(keys=['foo/bar', 'foo/bar/xyzzy', 'quux/thud', 'asdf'])
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='/')
eq(response['Delimiter'], '/')
keys = _get_keys(response)
eq(keys, ['asdf'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
eq(prefixes, ['foo/', 'quux/'])
eq(response['KeyCount'], len(prefixes) + len(keys))
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='test url encoding')
@attr('list-objects-v2')
def test_bucket_listv2_encoding_basic():
bucket_name = _create_objects(keys=['foo+1/bar', 'foo/bar/xyzzy', 'quux ab/thud', 'asdf+b'])
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='/', EncodingType='url')
eq(response['Delimiter'], '/')
keys = _get_keys(response)
eq(keys, ['asdf%2Bb'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 3)
eq(prefixes, ['foo%2B1/', 'foo/', 'quux%20ab/'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='test url encoding')
@attr('list-objects')
def test_bucket_list_encoding_basic():
bucket_name = _create_objects(keys=['foo+1/bar', 'foo/bar/xyzzy', 'quux ab/thud', 'asdf+b'])
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='/', EncodingType='url')
eq(response['Delimiter'], '/')
keys = _get_keys(response)
eq(keys, ['asdf%2Bb'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 3)
eq(prefixes, ['foo%2B1/', 'foo/', 'quux%20ab/'])
def validate_bucket_list(bucket_name, prefix, delimiter, marker, max_keys,
is_truncated, check_objs, check_prefixes, next_marker):
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter=delimiter, Marker=marker, MaxKeys=max_keys, Prefix=prefix)
eq(response['IsTruncated'], is_truncated)
if 'NextMarker' not in response:
response['NextMarker'] = None
eq(response['NextMarker'], next_marker)
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(len(keys), len(check_objs))
eq(len(prefixes), len(check_prefixes))
eq(keys, check_objs)
eq(prefixes, check_prefixes)
return response['NextMarker']
def validate_bucket_listv2(bucket_name, prefix, delimiter, continuation_token, max_keys,
is_truncated, check_objs, check_prefixes, last=False):
client = get_client()
params = dict(Bucket=bucket_name, Delimiter=delimiter, MaxKeys=max_keys, Prefix=prefix)
if continuation_token is not None:
params['ContinuationToken'] = continuation_token
else:
params['StartAfter'] = ''
response = client.list_objects_v2(**params)
eq(response['IsTruncated'], is_truncated)
if 'NextContinuationToken' not in response:
response['NextContinuationToken'] = None
if last:
eq(response['NextContinuationToken'], None)
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(len(keys), len(check_objs))
eq(len(prefixes), len(check_prefixes))
eq(keys, check_objs)
eq(prefixes, check_prefixes)
return response['NextContinuationToken']
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='prefixes in multi-component object names')
def test_bucket_list_delimiter_prefix():
bucket_name = _create_objects(keys=['asdf', 'boo/bar', 'boo/baz/xyzzy', 'cquux/thud', 'cquux/bla'])
delim = '/'
marker = ''
prefix = ''
marker = validate_bucket_list(bucket_name, prefix, delim, '', 1, True, ['asdf'], [], 'asdf')
marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, True, [], ['boo/'], 'boo/')
marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, False, [], ['cquux/'], None)
marker = validate_bucket_list(bucket_name, prefix, delim, '', 2, True, ['asdf'], ['boo/'], 'boo/')
marker = validate_bucket_list(bucket_name, prefix, delim, marker, 2, False, [], ['cquux/'], None)
prefix = 'boo/'
marker = validate_bucket_list(bucket_name, prefix, delim, '', 1, True, ['boo/bar'], [], 'boo/bar')
marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, False, [], ['boo/baz/'], None)
marker = validate_bucket_list(bucket_name, prefix, delim, '', 2, False, ['boo/bar'], ['boo/baz/'], None)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='prefixes in multi-component object names')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_prefix():
bucket_name = _create_objects(keys=['asdf', 'boo/bar', 'boo/baz/xyzzy', 'cquux/thud', 'cquux/bla'])
delim = '/'
continuation_token = ''
prefix = ''
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 1, True, ['asdf'], [])
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token, 1, True, [], ['boo/'])
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token, 1, False, [], ['cquux/'], last=True)
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 2, True, ['asdf'], ['boo/'])
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token, 2, False, [], ['cquux/'], last=True)
prefix = 'boo/'
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 1, True, ['boo/bar'], [])
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token, 1, False, [], ['boo/baz/'], last=True)
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 2, False, ['boo/bar'], ['boo/baz/'], last=True)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='prefix and delimiter handling when object ends with delimiter')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_prefix_ends_with_delimiter():
bucket_name = _create_objects(keys=['asdf/'])
validate_bucket_listv2(bucket_name, 'asdf/', '/', None, 1000, False, ['asdf/'], [], last=True)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='prefix and delimiter handling when object ends with delimiter')
def test_bucket_list_delimiter_prefix_ends_with_delimiter():
bucket_name = _create_objects(keys=['asdf/'])
validate_bucket_list(bucket_name, 'asdf/', '/', '', 1000, False, ['asdf/'], [], None)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='non-slash delimiter characters')
def test_bucket_list_delimiter_alt():
bucket_name = _create_objects(keys=['bar', 'baz', 'cab', 'foo'])
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='a')
eq(response['Delimiter'], 'a')
keys = _get_keys(response)
# foo contains no 'a' and so is a complete key
eq(keys, ['foo'])
# bar, baz, and cab should be broken up by the 'a' delimiters
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
eq(prefixes, ['ba', 'ca'])
@attr(resource='bucket')
@attr(method='get')
@attr(assertion='non-slash delimiter characters')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_alt():
bucket_name = _create_objects(keys=['bar', 'baz', 'cab', 'foo'])
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='a')
eq(response['Delimiter'], 'a')
keys = _get_keys(response)
# foo contains no 'a' and so is a complete key
eq(keys, ['foo'])
# bar, baz, and cab should be broken up by the 'a' delimiters
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
eq(prefixes, ['ba', 'ca'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='prefixes starting with underscore')
def test_bucket_list_delimiter_prefix_underscore():
bucket_name = _create_objects(keys=['_obj1_','_under1/bar', '_under1/baz/xyzzy', '_under2/thud', '_under2/bla'])
delim = '/'
marker = ''
prefix = ''
marker = validate_bucket_list(bucket_name, prefix, delim, '', 1, True, ['_obj1_'], [], '_obj1_')
marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, True, [], ['_under1/'], '_under1/')
marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, False, [], ['_under2/'], None)
marker = validate_bucket_list(bucket_name, prefix, delim, '', 2, True, ['_obj1_'], ['_under1/'], '_under1/')
marker = validate_bucket_list(bucket_name, prefix, delim, marker, 2, False, [], ['_under2/'], None)
prefix = '_under1/'
marker = validate_bucket_list(bucket_name, prefix, delim, '', 1, True, ['_under1/bar'], [], '_under1/bar')
marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, False, [], ['_under1/baz/'], None)
marker = validate_bucket_list(bucket_name, prefix, delim, '', 2, False, ['_under1/bar'], ['_under1/baz/'], None)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='prefixes starting with underscore')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_prefix_underscore():
bucket_name = _create_objects(keys=['_obj1_','_under1/bar', '_under1/baz/xyzzy', '_under2/thud', '_under2/bla'])
delim = '/'
continuation_token = ''
prefix = ''
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 1, True, ['_obj1_'], [])
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token , 1, True, [], ['_under1/'])
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token , 1, False, [], ['_under2/'], last=True)
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 2, True, ['_obj1_'], ['_under1/'])
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token , 2, False, [], ['_under2/'], last=True)
prefix = '_under1/'
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 1, True, ['_under1/bar'], [])
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token , 1, False, [], ['_under1/baz/'], last=True)
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 2, False, ['_under1/bar'], ['_under1/baz/'], last=True)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='percentage delimiter characters')
def test_bucket_list_delimiter_percentage():
bucket_name = _create_objects(keys=['b%ar', 'b%az', 'c%ab', 'foo'])
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='%')
eq(response['Delimiter'], '%')
keys = _get_keys(response)
# foo contains no 'a' and so is a complete key
eq(keys, ['foo'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
# bar, baz, and cab should be broken up by the 'a' delimiters
eq(prefixes, ['b%', 'c%'])
@attr(resource='bucket')
@attr(method='get')
@attr(assertion='percentage delimiter characters')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_percentage():
bucket_name = _create_objects(keys=['b%ar', 'b%az', 'c%ab', 'foo'])
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='%')
eq(response['Delimiter'], '%')
keys = _get_keys(response)
# foo contains no 'a' and so is a complete key
eq(keys, ['foo'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
# bar, baz, and cab should be broken up by the 'a' delimiters
eq(prefixes, ['b%', 'c%'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='whitespace delimiter characters')
def test_bucket_list_delimiter_whitespace():
bucket_name = _create_objects(keys=['b ar', 'b az', 'c ab', 'foo'])
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter=' ')
eq(response['Delimiter'], ' ')
keys = _get_keys(response)
# foo contains no 'a' and so is a complete key
eq(keys, ['foo'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
# bar, baz, and cab should be broken up by the 'a' delimiters
eq(prefixes, ['b ', 'c '])
@attr(resource='bucket')
@attr(method='get')
@attr(assertion='whitespace delimiter characters')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_whitespace():
bucket_name = _create_objects(keys=['b ar', 'b az', 'c ab', 'foo'])
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter=' ')
eq(response['Delimiter'], ' ')
keys = _get_keys(response)
# foo contains no 'a' and so is a complete key
eq(keys, ['foo'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
# bar, baz, and cab should be broken up by the 'a' delimiters
eq(prefixes, ['b ', 'c '])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='dot delimiter characters')
def test_bucket_list_delimiter_dot():
bucket_name = _create_objects(keys=['b.ar', 'b.az', 'c.ab', 'foo'])
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='.')
eq(response['Delimiter'], '.')
keys = _get_keys(response)
# foo contains no 'a' and so is a complete key
eq(keys, ['foo'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
# bar, baz, and cab should be broken up by the 'a' delimiters
eq(prefixes, ['b.', 'c.'])
@attr(resource='bucket')
@attr(method='get')
@attr(assertion='dot delimiter characters')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_dot():
bucket_name = _create_objects(keys=['b.ar', 'b.az', 'c.ab', 'foo'])
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='.')
eq(response['Delimiter'], '.')
keys = _get_keys(response)
# foo contains no 'a' and so is a complete key
eq(keys, ['foo'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
# bar, baz, and cab should be broken up by the 'a' delimiters
eq(prefixes, ['b.', 'c.'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='non-printable delimiter can be specified')
def test_bucket_list_delimiter_unreadable():
key_names=['bar', 'baz', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='\x0a')
eq(response['Delimiter'], '\x0a')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(assertion='non-printable delimiter can be specified')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_unreadable():
key_names=['bar', 'baz', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='\x0a')
eq(response['Delimiter'], '\x0a')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='empty delimiter can be specified')
def test_bucket_list_delimiter_empty():
key_names = ['bar', 'baz', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='')
# putting an empty value into Delimiter will not return a value in the response
eq('Delimiter' in response, False)
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(assertion='empty delimiter can be specified')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_empty():
key_names = ['bar', 'baz', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='')
# putting an empty value into Delimiter will not return a value in the response
eq('Delimiter' in response, False)
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='unspecified delimiter defaults to none')
def test_bucket_list_delimiter_none():
key_names = ['bar', 'baz', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name)
# putting an empty value into Delimiter will not return a value in the response
eq('Delimiter' in response, False)
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(assertion='unspecified delimiter defaults to none')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_none():
key_names = ['bar', 'baz', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name)
# putting an empty value into Delimiter will not return a value in the response
eq('Delimiter' in response, False)
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr('list-objects-v2')
def test_bucket_listv2_fetchowner_notempty():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, FetchOwner=True)
objs_list = response['Contents']
eq('Owner' in objs_list[0], True)
@attr('list-objects-v2')
def test_bucket_listv2_fetchowner_defaultempty():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name)
objs_list = response['Contents']
eq('Owner' in objs_list[0], False)
@attr('list-objects-v2')
def test_bucket_listv2_fetchowner_empty():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, FetchOwner= False)
objs_list = response['Contents']
eq('Owner' in objs_list[0], False)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='unused delimiter is not found')
def test_bucket_list_delimiter_not_exist():
key_names = ['bar', 'baz', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='/')
# putting an empty value into Delimiter will not return a value in the response
eq(response['Delimiter'], '/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(assertion='unused delimiter is not found')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_not_exist():
key_names = ['bar', 'baz', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='/')
# putting an empty value into Delimiter will not return a value in the response
eq(response['Delimiter'], '/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='list with delimiter not skip special keys')
def test_bucket_list_delimiter_not_skip_special():
key_names = ['0/'] + ['0/%s' % i for i in range(1000, 1999)]
key_names2 = ['1999', '1999#', '1999+', '2000']
key_names += key_names2
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='/')
eq(response['Delimiter'], '/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names2)
eq(prefixes, ['0/'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix')
@attr(assertion='returns only objects under prefix')
def test_bucket_list_prefix_basic():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Prefix='foo/')
eq(response['Prefix'], 'foo/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['foo/bar', 'foo/baz'])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix with list-objects-v2')
@attr(assertion='returns only objects under prefix')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_basic():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Prefix='foo/')
eq(response['Prefix'], 'foo/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['foo/bar', 'foo/baz'])
eq(prefixes, [])
# just testing that we can do the delimeter and prefix logic on non-slashes
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix')
@attr(assertion='prefixes w/o delimiters')
def test_bucket_list_prefix_alt():
key_names = ['bar', 'baz', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Prefix='ba')
eq(response['Prefix'], 'ba')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['bar', 'baz'])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix with list-objects-v2')
@attr(assertion='prefixes w/o delimiters')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_alt():
key_names = ['bar', 'baz', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Prefix='ba')
eq(response['Prefix'], 'ba')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['bar', 'baz'])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix')
@attr(assertion='empty prefix returns everything')
def test_bucket_list_prefix_empty():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Prefix='')
eq(response['Prefix'], '')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix with list-objects-v2')
@attr(assertion='empty prefix returns everything')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_empty():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Prefix='')
eq(response['Prefix'], '')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix')
@attr(assertion='unspecified prefix returns everything')
def test_bucket_list_prefix_none():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Prefix='')
eq(response['Prefix'], '')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix with list-objects-v2')
@attr(assertion='unspecified prefix returns everything')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_none():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Prefix='')
eq(response['Prefix'], '')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix')
@attr(assertion='nonexistent prefix returns nothing')
def test_bucket_list_prefix_not_exist():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Prefix='d')
eq(response['Prefix'], 'd')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix with list-objects-v2')
@attr(assertion='nonexistent prefix returns nothing')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_not_exist():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Prefix='d')
eq(response['Prefix'], 'd')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix')
@attr(assertion='non-printable prefix can be specified')
def test_bucket_list_prefix_unreadable():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Prefix='\x0a')
eq(response['Prefix'], '\x0a')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix with list-objects-v2')
@attr(assertion='non-printable prefix can be specified')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_unreadable():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Prefix='\x0a')
eq(response['Prefix'], '\x0a')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix w/delimiter')
@attr(assertion='returns only objects directly under prefix')
def test_bucket_list_prefix_delimiter_basic():
key_names = ['foo/bar', 'foo/baz/xyzzy', 'quux/thud', 'asdf']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='/', Prefix='foo/')
eq(response['Prefix'], 'foo/')
eq(response['Delimiter'], '/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['foo/bar'])
eq(prefixes, ['foo/baz/'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list-objects-v2 under prefix w/delimiter')
@attr(assertion='returns only objects directly under prefix')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_delimiter_basic():
key_names = ['foo/bar', 'foo/baz/xyzzy', 'quux/thud', 'asdf']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='/', Prefix='foo/')
eq(response['Prefix'], 'foo/')
eq(response['Delimiter'], '/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['foo/bar'])
eq(prefixes, ['foo/baz/'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix w/delimiter')
@attr(assertion='non-slash delimiters')
def test_bucket_list_prefix_delimiter_alt():
key_names = ['bar', 'bazar', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='a', Prefix='ba')
eq(response['Prefix'], 'ba')
eq(response['Delimiter'], 'a')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['bar'])
eq(prefixes, ['baza'])
@attr('list-objects-v2')
def test_bucket_listv2_prefix_delimiter_alt():
key_names = ['bar', 'bazar', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='a', Prefix='ba')
eq(response['Prefix'], 'ba')
eq(response['Delimiter'], 'a')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['bar'])
eq(prefixes, ['baza'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix w/delimiter')
@attr(assertion='finds nothing w/unmatched prefix')
def test_bucket_list_prefix_delimiter_prefix_not_exist():
key_names = ['b/a/r', 'b/a/c', 'b/a/g', 'g']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='d', Prefix='/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list-objects-v2 under prefix w/delimiter')
@attr(assertion='finds nothing w/unmatched prefix')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_delimiter_prefix_not_exist():
key_names = ['b/a/r', 'b/a/c', 'b/a/g', 'g']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='d', Prefix='/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix w/delimiter')
@attr(assertion='over-ridden slash ceases to be a delimiter')
def test_bucket_list_prefix_delimiter_delimiter_not_exist():
key_names = ['b/a/c', 'b/a/g', 'b/a/r', 'g']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='z', Prefix='b')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['b/a/c', 'b/a/g', 'b/a/r'])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list-objects-v2 under prefix w/delimiter')
@attr(assertion='over-ridden slash ceases to be a delimiter')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_delimiter_delimiter_not_exist():
key_names = ['b/a/c', 'b/a/g', 'b/a/r', 'g']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='z', Prefix='b')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['b/a/c', 'b/a/g', 'b/a/r'])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix w/delimiter')
@attr(assertion='finds nothing w/unmatched prefix and delimiter')
def test_bucket_list_prefix_delimiter_prefix_delimiter_not_exist():
key_names = ['b/a/c', 'b/a/g', 'b/a/r', 'g']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='z', Prefix='y')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list-objects-v2 under prefix w/delimiter')
@attr(assertion='finds nothing w/unmatched prefix and delimiter')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_delimiter_prefix_delimiter_not_exist():
key_names = ['b/a/c', 'b/a/g', 'b/a/r', 'g']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='z', Prefix='y')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='pagination w/max_keys=1, marker')
def test_bucket_list_maxkeys_one():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, MaxKeys=1)
eq(response['IsTruncated'], True)
keys = _get_keys(response)
eq(keys, key_names[0:1])
response = client.list_objects(Bucket=bucket_name, Marker=key_names[0])
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, key_names[1:])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys with list-objects-v2')
@attr(assertion='pagination w/max_keys=1, marker')
@attr('list-objects-v2')
def test_bucket_listv2_maxkeys_one():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=1)
eq(response['IsTruncated'], True)
keys = _get_keys(response)
eq(keys, key_names[0:1])
response = client.list_objects_v2(Bucket=bucket_name, StartAfter=key_names[0])
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, key_names[1:])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='pagination w/max_keys=0')
def test_bucket_list_maxkeys_zero():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, MaxKeys=0)
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys with list-objects-v2')
@attr(assertion='pagination w/max_keys=0')
@attr('list-objects-v2')
def test_bucket_listv2_maxkeys_zero():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=0)
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='pagination w/o max_keys')
def test_bucket_list_maxkeys_none():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name)
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, key_names)
eq(response['MaxKeys'], 1000)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys with list-objects-v2')
@attr(assertion='pagination w/o max_keys')
@attr('list-objects-v2')
def test_bucket_listv2_maxkeys_none():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name)
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, key_names)
eq(response['MaxKeys'], 1000)
def get_http_response_body(**kwargs):
global http_response_body
http_response_body = kwargs['http_response'].__dict__['_content']
def parseXmlToJson(xml):
response = {}
for child in list(xml):
if len(list(child)) > 0:
response[child.tag] = parseXmlToJson(child)
else:
response[child.tag] = child.text or ''
# one-liner equivalent
# response[child.tag] = parseXmlToJson(child) if len(list(child)) > 0 else child.text or ''
return response
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get usage by client')
@attr(assertion='account usage api')
@attr('fails_on_aws') # allow-unordered is a non-standard extension
def test_account_usage():
# boto3.set_stream_logger(name='botocore')
client = get_client()
# adds the unordered query parameter
def add_usage(**kwargs):
kwargs['params']['url'] += "?usage"
client.meta.events.register('before-call.s3.ListBuckets', add_usage)
client.meta.events.register('after-call.s3.ListBuckets', get_http_response_body)
client.list_buckets()
xml = ET.fromstring(http_response_body.decode('utf-8'))
parsed = parseXmlToJson(xml)
summary = parsed['Summary']
eq(summary['QuotaMaxBytes'], '-1')
eq(summary['QuotaMaxBuckets'], '1000')
eq(summary['QuotaMaxObjCount'], '-1')
eq(summary['QuotaMaxBytesPerBucket'], '-1')
eq(summary['QuotaMaxObjCountPerBucket'], '-1')
@attr(resource='bucket')
@attr(method='head')
@attr(operation='get usage by client')
@attr(assertion='account usage by head bucket')
@attr('fails_on_aws') # allow-unordered is a non-standard extension
def test_head_bucket_usage():
# boto3.set_stream_logger(name='botocore')
client = get_client()
bucket_name = _create_objects(keys=['foo'])
# adds the unordered query parameter
client.meta.events.register('after-call.s3.HeadBucket', get_http_response)
client.head_bucket(Bucket=bucket_name)
hdrs = http_response['headers']
eq(hdrs['X-RGW-Object-Count'], '1')
eq(hdrs['X-RGW-Bytes-Used'], '3')
eq(hdrs['X-RGW-Quota-User-Size'], '-1')
eq(hdrs['X-RGW-Quota-User-Objects'], '-1')
eq(hdrs['X-RGW-Quota-Max-Buckets'], '1000')
eq(hdrs['X-RGW-Quota-Bucket-Size'], '-1')
eq(hdrs['X-RGW-Quota-Bucket-Objects'], '-1')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='bucket list unordered')
@attr('fails_on_aws') # allow-unordered is a non-standard extension
def test_bucket_list_unordered():
# boto3.set_stream_logger(name='botocore')
keys_in = ['ado', 'bot', 'cob', 'dog', 'emu', 'fez', 'gnu', 'hex',
'abc/ink', 'abc/jet', 'abc/kin', 'abc/lax', 'abc/mux',
'def/nim', 'def/owl', 'def/pie', 'def/qed', 'def/rye',
'ghi/sew', 'ghi/tor', 'ghi/uke', 'ghi/via', 'ghi/wit',
'xix', 'yak', 'zoo']
bucket_name = _create_objects(keys=keys_in)
client = get_client()
# adds the unordered query parameter
def add_unordered(**kwargs):
kwargs['params']['url'] += "&allow-unordered=true"
client.meta.events.register('before-call.s3.ListObjects', add_unordered)
# test simple retrieval
response = client.list_objects(Bucket=bucket_name, MaxKeys=1000)
unordered_keys_out = _get_keys(response)
eq(len(keys_in), len(unordered_keys_out))
eq(keys_in.sort(), unordered_keys_out.sort())
# test retrieval with prefix
response = client.list_objects(Bucket=bucket_name,
MaxKeys=1000,
Prefix="abc/")
unordered_keys_out = _get_keys(response)
eq(5, len(unordered_keys_out))
# test incremental retrieval with marker
response = client.list_objects(Bucket=bucket_name, MaxKeys=6)
unordered_keys_out = _get_keys(response)
eq(6, len(unordered_keys_out))
# now get the next bunch
response = client.list_objects(Bucket=bucket_name,
MaxKeys=6,
Marker=unordered_keys_out[-1])
unordered_keys_out2 = _get_keys(response)
eq(6, len(unordered_keys_out2))
# make sure there's no overlap between the incremental retrievals
intersect = set(unordered_keys_out).intersection(unordered_keys_out2)
eq(0, len(intersect))
# verify that unordered used with delimiter results in error
e = assert_raises(ClientError,
client.list_objects, Bucket=bucket_name, Delimiter="/")
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidArgument')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys with list-objects-v2')
@attr(assertion='bucket list unordered')
@attr('fails_on_aws') # allow-unordered is a non-standard extension
@attr('list-objects-v2')
def test_bucket_listv2_unordered():
# boto3.set_stream_logger(name='botocore')
keys_in = ['ado', 'bot', 'cob', 'dog', 'emu', 'fez', 'gnu', 'hex',
'abc/ink', 'abc/jet', 'abc/kin', 'abc/lax', 'abc/mux',
'def/nim', 'def/owl', 'def/pie', 'def/qed', 'def/rye',
'ghi/sew', 'ghi/tor', 'ghi/uke', 'ghi/via', 'ghi/wit',
'xix', 'yak', 'zoo']
bucket_name = _create_objects(keys=keys_in)
client = get_client()
# adds the unordered query parameter
def add_unordered(**kwargs):
kwargs['params']['url'] += "&allow-unordered=true"
client.meta.events.register('before-call.s3.ListObjects', add_unordered)
# test simple retrieval
response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=1000)
unordered_keys_out = _get_keys(response)
eq(len(keys_in), len(unordered_keys_out))
eq(keys_in.sort(), unordered_keys_out.sort())
# test retrieval with prefix
response = client.list_objects_v2(Bucket=bucket_name,
MaxKeys=1000,
Prefix="abc/")
unordered_keys_out = _get_keys(response)
eq(5, len(unordered_keys_out))
# test incremental retrieval with marker
response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=6)
unordered_keys_out = _get_keys(response)
eq(6, len(unordered_keys_out))
# now get the next bunch
response = client.list_objects_v2(Bucket=bucket_name,
MaxKeys=6,
StartAfter=unordered_keys_out[-1])
unordered_keys_out2 = _get_keys(response)
eq(6, len(unordered_keys_out2))
# make sure there's no overlap between the incremental retrievals
intersect = set(unordered_keys_out).intersection(unordered_keys_out2)
eq(0, len(intersect))
# verify that unordered used with delimiter results in error
e = assert_raises(ClientError,
client.list_objects, Bucket=bucket_name, Delimiter="/")
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidArgument')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='invalid max_keys')
def test_bucket_list_maxkeys_invalid():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
# adds invalid max keys to url
# before list_objects is called
def add_invalid_maxkeys(**kwargs):
kwargs['params']['url'] += "&max-keys=blah"
client.meta.events.register('before-call.s3.ListObjects', add_invalid_maxkeys)
e = assert_raises(ClientError, client.list_objects, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidArgument')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='no pagination, no marker')
def test_bucket_list_marker_none():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name)
eq(response['Marker'], '')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='no pagination, empty marker')
def test_bucket_list_marker_empty():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Marker='')
eq(response['Marker'], '')
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, key_names)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys with list-objects-v2')
@attr(assertion='no pagination, empty continuationtoken')
@attr('list-objects-v2')
def test_bucket_listv2_continuationtoken_empty():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, ContinuationToken='')
eq(response['ContinuationToken'], '')
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, key_names)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list keys with list-objects-v2')
@attr(assertion='no pagination, non-empty continuationtoken')
@attr('list-objects-v2')
def test_bucket_listv2_continuationtoken():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response1 = client.list_objects_v2(Bucket=bucket_name, MaxKeys=1)
next_continuation_token = response1['NextContinuationToken']
response2 = client.list_objects_v2(Bucket=bucket_name, ContinuationToken=next_continuation_token)
eq(response2['ContinuationToken'], next_continuation_token)
eq(response2['IsTruncated'], False)
key_names2 = ['baz', 'foo', 'quxx']
keys = _get_keys(response2)
eq(keys, key_names2)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list keys with list-objects-v2')
@attr(assertion='no pagination, non-empty continuationtoken and startafter')
@attr('list-objects-v2')
def test_bucket_listv2_both_continuationtoken_startafter():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response1 = client.list_objects_v2(Bucket=bucket_name, StartAfter='bar', MaxKeys=1)
next_continuation_token = response1['NextContinuationToken']
response2 = client.list_objects_v2(Bucket=bucket_name, StartAfter='bar', ContinuationToken=next_continuation_token)
eq(response2['ContinuationToken'], next_continuation_token)
eq(response2['StartAfter'], 'bar')
eq(response2['IsTruncated'], False)
key_names2 = ['foo', 'quxx']
keys = _get_keys(response2)
eq(keys, key_names2)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='non-printing marker')
def test_bucket_list_marker_unreadable():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Marker='\x0a')
eq(response['Marker'], '\x0a')
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, key_names)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys with list-objects-v2')
@attr(assertion='non-printing startafter')
@attr('list-objects-v2')
def test_bucket_listv2_startafter_unreadable():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, StartAfter='\x0a')
eq(response['StartAfter'], '\x0a')
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, key_names)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='marker not-in-list')
def test_bucket_list_marker_not_in_list():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Marker='blah')
eq(response['Marker'], 'blah')
keys = _get_keys(response)
eq(keys, [ 'foo','quxx'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys with list-objects-v2')
@attr(assertion='startafter not-in-list')
@attr('list-objects-v2')
def test_bucket_listv2_startafter_not_in_list():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, StartAfter='blah')
eq(response['StartAfter'], 'blah')
keys = _get_keys(response)
eq(keys, ['foo', 'quxx'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='marker after list')
def test_bucket_list_marker_after_list():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Marker='zzz')
eq(response['Marker'], 'zzz')
keys = _get_keys(response)
eq(response['IsTruncated'], False)
eq(keys, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys with list-objects-v2')
@attr(assertion='startafter after list')
@attr('list-objects-v2')
def test_bucket_listv2_startafter_after_list():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, StartAfter='zzz')
eq(response['StartAfter'], 'zzz')
keys = _get_keys(response)
eq(response['IsTruncated'], False)
eq(keys, [])
def _compare_dates(datetime1, datetime2):
"""
changes ms from datetime1 to 0, compares it to datetime2
"""
# both times are in datetime format but datetime1 has
# microseconds and datetime2 does not
datetime1 = datetime1.replace(microsecond=0)
eq(datetime1, datetime2)
@attr(resource='object')
@attr(method='head')
@attr(operation='compare w/bucket list')
@attr(assertion='return same metadata')
def test_bucket_list_return_data():
key_names = ['bar', 'baz', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
data = {}
for key_name in key_names:
obj_response = client.head_object(Bucket=bucket_name, Key=key_name)
acl_response = client.get_object_acl(Bucket=bucket_name, Key=key_name)
data.update({
key_name: {
'DisplayName': acl_response['Owner']['DisplayName'],
'ID': acl_response['Owner']['ID'],
'ETag': obj_response['ETag'],
'LastModified': obj_response['LastModified'],
'ContentLength': obj_response['ContentLength'],
}
})
response = client.list_objects(Bucket=bucket_name)
objs_list = response['Contents']
for obj in objs_list:
key_name = obj['Key']
key_data = data[key_name]
eq(obj['ETag'],key_data['ETag'])
eq(obj['Size'],key_data['ContentLength'])
eq(obj['Owner']['DisplayName'],key_data['DisplayName'])
eq(obj['Owner']['ID'],key_data['ID'])
_compare_dates(obj['LastModified'],key_data['LastModified'])
# amazon is eventually consistent, retry a bit if failed
def check_configure_versioning_retry(bucket_name, status, expected_string):
client = get_client()
response = client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'MFADelete': 'Disabled','Status': status})
read_status = None
for i in range(5):
try:
response = client.get_bucket_versioning(Bucket=bucket_name)
read_status = response['Status']
except KeyError:
read_status = None
if (expected_string == read_status):
break
time.sleep(1)
eq(expected_string, read_status)
@attr(resource='object')
@attr(method='head')
@attr(operation='compare w/bucket list when bucket versioning is configured')
@attr(assertion='return same metadata')
@attr('versioning')
def test_bucket_list_return_data_versioning():
bucket_name = get_new_bucket()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key_names = ['bar', 'baz', 'foo']
bucket_name = _create_objects(bucket_name=bucket_name,keys=key_names)
client = get_client()
data = {}
for key_name in key_names:
obj_response = client.head_object(Bucket=bucket_name, Key=key_name)
acl_response = client.get_object_acl(Bucket=bucket_name, Key=key_name)
data.update({
key_name: {
'ID': acl_response['Owner']['ID'],
'DisplayName': acl_response['Owner']['DisplayName'],
'ETag': obj_response['ETag'],
'LastModified': obj_response['LastModified'],
'ContentLength': obj_response['ContentLength'],
'VersionId': obj_response['VersionId']
}
})
response = client.list_object_versions(Bucket=bucket_name)
objs_list = response['Versions']
for obj in objs_list:
key_name = obj['Key']
key_data = data[key_name]
eq(obj['Owner']['DisplayName'],key_data['DisplayName'])
eq(obj['ETag'],key_data['ETag'])
eq(obj['Size'],key_data['ContentLength'])
eq(obj['Owner']['ID'],key_data['ID'])
eq(obj['VersionId'], key_data['VersionId'])
_compare_dates(obj['LastModified'],key_data['LastModified'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all objects (anonymous)')
@attr(assertion='succeeds')
def test_bucket_list_objects_anonymous():
bucket_name = get_new_bucket()
client = get_client()
client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
unauthenticated_client = get_unauthenticated_client()
unauthenticated_client.list_objects(Bucket=bucket_name)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all objects (anonymous) with list-objects-v2')
@attr(assertion='succeeds')
@attr('list-objects-v2')
def test_bucket_listv2_objects_anonymous():
bucket_name = get_new_bucket()
client = get_client()
client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
unauthenticated_client = get_unauthenticated_client()
unauthenticated_client.list_objects_v2(Bucket=bucket_name)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all objects (anonymous)')
@attr(assertion='fails')
def test_bucket_list_objects_anonymous_fail():
bucket_name = get_new_bucket()
unauthenticated_client = get_unauthenticated_client()
e = assert_raises(ClientError, unauthenticated_client.list_objects, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all objects (anonymous) with list-objects-v2')
@attr(assertion='fails')
@attr('list-objects-v2')
def test_bucket_listv2_objects_anonymous_fail():
bucket_name = get_new_bucket()
unauthenticated_client = get_unauthenticated_client()
e = assert_raises(ClientError, unauthenticated_client.list_objects_v2, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='non-existant bucket')
@attr(assertion='fails 404')
def test_bucket_notexist():
bucket_name = get_new_bucket_name()
client = get_client()
e = assert_raises(ClientError, client.list_objects, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='non-existant bucket with list-objects-v2')
@attr(assertion='fails 404')
@attr('list-objects-v2')
def test_bucketv2_notexist():
bucket_name = get_new_bucket_name()
client = get_client()
e = assert_raises(ClientError, client.list_objects_v2, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='bucket')
@attr(method='delete')
@attr(operation='non-existant bucket')
@attr(assertion='fails 404')
def test_bucket_delete_notexist():
bucket_name = get_new_bucket_name()
client = get_client()
e = assert_raises(ClientError, client.delete_bucket, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='bucket')
@attr(method='delete')
@attr(operation='non-empty bucket')
@attr(assertion='fails 409')
def test_bucket_delete_nonempty():
key_names = ['foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
e = assert_raises(ClientError, client.delete_bucket, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 409)
eq(error_code, 'BucketNotEmpty')
def _do_set_bucket_canned_acl(client, bucket_name, canned_acl, i, results):
try:
client.put_bucket_acl(ACL=canned_acl, Bucket=bucket_name)
results[i] = True
except:
results[i] = False
def _do_set_bucket_canned_acl_concurrent(client, bucket_name, canned_acl, num, results):
t = []
for i in range(num):
thr = threading.Thread(target = _do_set_bucket_canned_acl, args=(client, bucket_name, canned_acl, i, results))
thr.start()
t.append(thr)
return t
def _do_wait_completion(t):
for thr in t:
thr.join()
@attr(resource='bucket')
@attr(method='put')
@attr(operation='concurrent set of acls on a bucket')
@attr(assertion='works')
def test_bucket_concurrent_set_canned_acl():
bucket_name = get_new_bucket()
client = get_client()
num_threads = 50 # boto2 retry defaults to 5 so we need a thread to fail at least 5 times
# this seems like a large enough number to get through retry (if bug
# exists)
results = [None] * num_threads
t = _do_set_bucket_canned_acl_concurrent(client, bucket_name, 'public-read', num_threads, results)
_do_wait_completion(t)
for r in results:
eq(r, True)
@attr(resource='object')
@attr(method='put')
@attr(operation='non-existant bucket')
@attr(assertion='fails 404')
def test_object_write_to_nonexist_bucket():
key_names = ['foo']
bucket_name = 'whatchutalkinboutwillis'
client = get_client()
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='bucket')
@attr(method='del')
@attr(operation='deleted bucket')
@attr(assertion='fails 404')
def test_bucket_create_delete():
bucket_name = get_new_bucket()
client = get_client()
client.delete_bucket(Bucket=bucket_name)
e = assert_raises(ClientError, client.delete_bucket, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='object')
@attr(method='get')
@attr(operation='read contents that were never written')
@attr(assertion='fails 404')
def test_object_read_not_exist():
bucket_name = get_new_bucket()
client = get_client()
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='bar')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchKey')
http_response = None
def get_http_response(**kwargs):
global http_response
http_response = kwargs['http_response'].__dict__
@attr(resource='object')
@attr(method='get')
@attr(operation='read contents that were never written to raise one error response')
@attr(assertion='RequestId appears in the error response')
def test_object_requestid_matches_header_on_error():
bucket_name = get_new_bucket()
client = get_client()
# get http response after failed request
client.meta.events.register('after-call.s3.GetObject', get_http_response)
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='bar')
response_body = http_response['_content']
resp_body_xml = ET.fromstring(response_body)
request_id = resp_body_xml.find('.//RequestId').text
assert request_id is not None
eq(request_id, e.response['ResponseMetadata']['RequestId'])
def _make_objs_dict(key_names):
objs_list = []
for key in key_names:
obj_dict = {'Key': key}
objs_list.append(obj_dict)
objs_dict = {'Objects': objs_list}
return objs_dict
@attr(resource='object')
@attr(method='post')
@attr(operation='delete multiple objects')
@attr(assertion='deletes multiple objects with a single call')
def test_multi_object_delete():
key_names = ['key0', 'key1', 'key2']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name)
eq(len(response['Contents']), 3)
objs_dict = _make_objs_dict(key_names=key_names)
response = client.delete_objects(Bucket=bucket_name, Delete=objs_dict)
eq(len(response['Deleted']), 3)
assert 'Errors' not in response
response = client.list_objects(Bucket=bucket_name)
assert 'Contents' not in response
response = client.delete_objects(Bucket=bucket_name, Delete=objs_dict)
eq(len(response['Deleted']), 3)
assert 'Errors' not in response
response = client.list_objects(Bucket=bucket_name)
assert 'Contents' not in response
@attr(resource='object')
@attr(method='post')
@attr(operation='delete multiple objects with list-objects-v2')
@attr(assertion='deletes multiple objects with a single call')
@attr('list-objects-v2')
def test_multi_objectv2_delete():
key_names = ['key0', 'key1', 'key2']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name)
eq(len(response['Contents']), 3)
objs_dict = _make_objs_dict(key_names=key_names)
response = client.delete_objects(Bucket=bucket_name, Delete=objs_dict)
eq(len(response['Deleted']), 3)
assert 'Errors' not in response
response = client.list_objects_v2(Bucket=bucket_name)
assert 'Contents' not in response
response = client.delete_objects(Bucket=bucket_name, Delete=objs_dict)
eq(len(response['Deleted']), 3)
assert 'Errors' not in response
response = client.list_objects_v2(Bucket=bucket_name)
assert 'Contents' not in response
@attr(resource='object')
@attr(method='put')
@attr(operation='write zero-byte key')
@attr(assertion='correct content length')
def test_object_head_zero_bytes():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='')
response = client.head_object(Bucket=bucket_name, Key='foo')
eq(response['ContentLength'], 0)
@attr(resource='object')
@attr(method='put')
@attr(operation='write key')
@attr(assertion='correct etag')
def test_object_write_check_etag():
bucket_name = get_new_bucket()
client = get_client()
response = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
eq(response['ETag'], '"37b51d194a7513e45b56f6524f2d51f2"')
@attr(resource='object')
@attr(method='put')
@attr(operation='write key')
@attr(assertion='correct cache control header')
def test_object_write_cache_control():
bucket_name = get_new_bucket()
client = get_client()
cache_control = 'public, max-age=14400'
client.put_object(Bucket=bucket_name, Key='foo', Body='bar', CacheControl=cache_control)
response = client.head_object(Bucket=bucket_name, Key='foo')
eq(response['ResponseMetadata']['HTTPHeaders']['cache-control'], cache_control)
@attr(resource='object')
@attr(method='put')
@attr(operation='write key')
@attr(assertion='correct expires header')
def test_object_write_expires():
bucket_name = get_new_bucket()
client = get_client()
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
client.put_object(Bucket=bucket_name, Key='foo', Body='bar', Expires=expires)
response = client.head_object(Bucket=bucket_name, Key='foo')
_compare_dates(expires, response['Expires'])
def _get_body(response):
body = response['Body']
got = body.read()
if type(got) is bytes:
got = got.decode()
return got
@attr(resource='object')
@attr(method='all')
@attr(operation='complete object life cycle')
@attr(assertion='read back what we wrote and rewrote')
def test_object_write_read_update_read_delete():
bucket_name = get_new_bucket()
client = get_client()
# Write
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
# Read
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
# Update
client.put_object(Bucket=bucket_name, Key='foo', Body='soup')
# Read
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'soup')
# Delete
client.delete_object(Bucket=bucket_name, Key='foo')
def _set_get_metadata(metadata, bucket_name=None):
"""
create a new bucket new or use an existing
name to create an object that bucket,
set the meta1 property to a specified, value,
and then re-read and return that property
"""
if bucket_name is None:
bucket_name = get_new_bucket()
client = get_client()
metadata_dict = {'meta1': metadata}
client.put_object(Bucket=bucket_name, Key='foo', Body='bar', Metadata=metadata_dict)
response = client.get_object(Bucket=bucket_name, Key='foo')
return response['Metadata']['meta1']
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write/re-read')
@attr(assertion='reread what we wrote')
def test_object_set_get_metadata_none_to_good():
got = _set_get_metadata('mymeta')
eq(got, 'mymeta')
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write/re-read')
@attr(assertion='write empty value, returns empty value')
def test_object_set_get_metadata_none_to_empty():
got = _set_get_metadata('')
eq(got, '')
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write/re-write')
@attr(assertion='empty value replaces old')
def test_object_set_get_metadata_overwrite_to_empty():
bucket_name = get_new_bucket()
got = _set_get_metadata('oldmeta', bucket_name)
eq(got, 'oldmeta')
got = _set_get_metadata('', bucket_name)
eq(got, '')
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write/re-write')
@attr(assertion='UTF-8 values passed through')
# TODO: the decoding of this unicode metadata is not happening properly for unknown reasons
@attr('fails_on_rgw')
def test_object_set_get_unicode_metadata():
bucket_name = get_new_bucket()
client = get_client()
def set_unicode_metadata(**kwargs):
kwargs['params']['headers']['x-amz-meta-meta1'] = u"Hello World\xe9"
client.meta.events.register('before-call.s3.PutObject', set_unicode_metadata)
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
got = response['Metadata']['meta1'].decode('utf-8')
got = response['Metadata']['meta1']
print(got)
print(u"Hello World\xe9")
eq(got, u"Hello World\xe9")
def _set_get_metadata_unreadable(metadata, bucket_name=None):
"""
set and then read back a meta-data value (which presumably
includes some interesting characters), and return a list
containing the stored value AND the encoding with which it
was returned.
This should return a 400 bad request because the webserver
rejects the request.
"""
bucket_name = get_new_bucket()
client = get_client()
metadata_dict = {'meta1': metadata}
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='bar', Metadata=metadata_dict)
return e
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write/re-write')
@attr(assertion='non-UTF-8 values detected, but rejected by webserver')
@attr('fails_strict_rfc2616')
@attr(assertion='fails 400')
def test_object_set_get_non_utf8_metadata():
metadata = '\x04mymeta'
e = _set_get_metadata_unreadable(metadata)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400 or 403)
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write')
@attr(assertion='non-printing prefixes rejected by webserver')
@attr('fails_strict_rfc2616')
@attr(assertion='fails 400')
def test_object_set_get_metadata_empty_to_unreadable_prefix():
metadata = '\x04w'
e = _set_get_metadata_unreadable(metadata)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400 or 403)
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write')
@attr(assertion='non-printing suffixes rejected by webserver')
@attr('fails_strict_rfc2616')
@attr(assertion='fails 400')
def test_object_set_get_metadata_empty_to_unreadable_suffix():
metadata = 'h\x04'
e = _set_get_metadata_unreadable(metadata)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400 or 403)
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write')
@attr(assertion='non-priting in-fixes rejected by webserver')
@attr('fails_strict_rfc2616')
@attr(assertion='fails 400')
def test_object_set_get_metadata_empty_to_unreadable_infix():
metadata = 'h\x04w'
e = _set_get_metadata_unreadable(metadata)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400 or 403)
@attr(resource='object')
@attr(method='put')
@attr(operation='data re-write')
@attr(assertion='replaces previous metadata')
def test_object_metadata_replaced_on_put():
bucket_name = get_new_bucket()
client = get_client()
metadata_dict = {'meta1': 'bar'}
client.put_object(Bucket=bucket_name, Key='foo', Body='bar', Metadata=metadata_dict)
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
got = response['Metadata']
eq(got, {})
@attr(resource='object')
@attr(method='put')
@attr(operation='data write from file (w/100-Continue)')
@attr(assertion='succeeds and returns written data')
def test_object_write_file():
bucket_name = get_new_bucket()
client = get_client()
data_str = 'bar'
data = bytes(data_str, 'utf-8')
client.put_object(Bucket=bucket_name, Key='foo', Body=data)
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
def _get_post_url(bucket_name):
endpoint = get_config_endpoint()
return '{endpoint}/{bucket_name}'.format(endpoint=endpoint, bucket_name=bucket_name)
@attr(resource='object')
@attr(method='post')
@attr(operation='anonymous browser based upload via POST request')
@attr(assertion='succeeds and returns written data')
def test_post_object_anonymous_request():
bucket_name = get_new_bucket_name()
client = get_client()
url = _get_post_url(bucket_name)
payload = OrderedDict([("key" , "foo.txt"),("acl" , "public-read"),\
("Content-Type" , "text/plain"),('file', ('bar'))])
client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
r = requests.post(url, files = payload)
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds and returns written data')
def test_post_object_authenticated_request():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request, no content-type header')
@attr(assertion='succeeds and returns written data')
def test_post_object_authenticated_no_content_type():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key="foo.txt")
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request, bad access key')
@attr(assertion='fails')
def test_post_object_authenticated_request_bad_access_key():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , 'foo'),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='anonymous browser based upload via POST request')
@attr(assertion='succeeds with status 201')
def test_post_object_set_success_code():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
url = _get_post_url(bucket_name)
payload = OrderedDict([("key" , "foo.txt"),("acl" , "public-read"),\
("success_action_status" , "201"),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 201)
message = ET.fromstring(r.content).find('Key')
eq(message.text,'foo.txt')
@attr(resource='object')
@attr(method='post')
@attr(operation='anonymous browser based upload via POST request')
@attr(assertion='succeeds with status 204')
def test_post_object_set_invalid_success_code():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
url = _get_post_url(bucket_name)
payload = OrderedDict([("key" , "foo.txt"),("acl" , "public-read"),\
("success_action_status" , "404"),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
content = r.content.decode()
eq(content,'')
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds and returns written data')
def test_post_object_upload_larger_than_chunk():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 5*1024*1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
foo_string = 'foo' * 1024*1024
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', foo_string)])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
body = _get_body(response)
eq(body, foo_string)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds and returns written data')
def test_post_object_set_key_from_filename():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "${filename}"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('foo.txt', 'bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds with status 204')
def test_post_object_ignored_header():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),("x-ignore-foo" , "bar"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds with status 204')
def test_post_object_case_insensitive_condition_fields():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bUcKeT": bucket_name},\
["StArTs-WiTh", "$KeY", "foo"],\
{"AcL": "private"},\
["StArTs-WiTh", "$CoNtEnT-TyPe", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
foo_string = 'foo' * 1024*1024
payload = OrderedDict([ ("kEy" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("aCl" , "private"),("signature" , signature),("pOLICy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds with escaped leading $ and returns written data')
def test_post_object_escaped_field_values():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "\$foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "\$foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key='\$foo.txt')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds and returns redirect url')
def test_post_object_success_redirect_action():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
url = _get_post_url(bucket_name)
redirect_url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["eq", "$success_action_redirect", redirect_url],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),("success_action_redirect" , redirect_url),\
('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 200)
url = r.url
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
eq(url,
'{rurl}?bucket={bucket}&key={key}&etag=%22{etag}%22'.format(rurl = redirect_url,\
bucket = bucket_name, key = 'foo.txt', etag = response['ETag'].strip('"')))
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with invalid signature error')
def test_post_object_invalid_signature():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "\$foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())[::-1]
payload = OrderedDict([ ("key" , "\$foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with access key does not exist error')
def test_post_object_invalid_access_key():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "\$foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "\$foo.txt"),("AWSAccessKeyId" , aws_access_key_id[::-1]),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with invalid expiration error')
def test_post_object_invalid_date_format():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": str(expires),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "\$foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "\$foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with missing key error')
def test_post_object_no_key_specified():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with missing signature error')
def test_post_object_missing_signature():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "\$foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with extra input fields policy error')
def test_post_object_missing_policy_condition():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
["starts-with", "$key", "\$foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds using starts-with restriction on metadata header')
def test_post_object_user_specified_header():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
["starts-with", "$x-amz-meta-foo", "bar"]
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('x-amz-meta-foo' , 'barclamp'),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
eq(response['Metadata']['foo'], 'barclamp')
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with policy condition failed error due to missing field in POST request')
def test_post_object_request_missing_policy_specified_field():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
["starts-with", "$x-amz-meta-foo", "bar"]
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with conditions must be list error')
def test_post_object_condition_is_case_sensitive():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"CONDITIONS": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with expiration must be string error')
def test_post_object_expires_is_case_sensitive():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"EXPIRATION": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with policy expired error')
def test_post_object_expired_policy():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=-6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails using equality restriction on metadata header')
def test_post_object_invalid_request_field_value():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
["eq", "$x-amz-meta-foo", ""]
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('x-amz-meta-foo' , 'barclamp'),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with policy missing expiration error')
def test_post_object_missing_expires_condition():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with policy missing conditions error')
def test_post_object_missing_conditions_list():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ")}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with allowable upload size exceeded error')
def test_post_object_upload_size_limit_exceeded():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 0],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with invalid content length error')
def test_post_object_missing_content_length_argument():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with invalid JSON error')
def test_post_object_invalid_content_length_argument():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", -1, 0],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with upload size less than minimum allowable error')
def test_post_object_upload_size_below_minimum():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 512, 1000],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='empty conditions return appropriate error response')
def test_post_object_empty_conditions():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{ }\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-Match: the latest ETag')
@attr(assertion='succeeds')
def test_get_object_ifmatch_good():
bucket_name = get_new_bucket()
client = get_client()
response = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
etag = response['ETag']
response = client.get_object(Bucket=bucket_name, Key='foo', IfMatch=etag)
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-Match: bogus ETag')
@attr(assertion='fails 412')
def test_get_object_ifmatch_failed():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo', IfMatch='"ABCORZ"')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-None-Match: the latest ETag')
@attr(assertion='fails 304')
def test_get_object_ifnonematch_good():
bucket_name = get_new_bucket()
client = get_client()
response = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
etag = response['ETag']
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo', IfNoneMatch=etag)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 304)
eq(e.response['Error']['Message'], 'Not Modified')
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-None-Match: bogus ETag')
@attr(assertion='succeeds')
def test_get_object_ifnonematch_failed():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo', IfNoneMatch='ABCORZ')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-Modified-Since: before')
@attr(assertion='succeeds')
def test_get_object_ifmodifiedsince_good():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo', IfModifiedSince='Sat, 29 Oct 1994 19:43:31 GMT')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-Modified-Since: after')
@attr(assertion='fails 304')
def test_get_object_ifmodifiedsince_failed():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
last_modified = str(response['LastModified'])
last_modified = last_modified.split('+')[0]
mtime = datetime.datetime.strptime(last_modified, '%Y-%m-%d %H:%M:%S')
after = mtime + datetime.timedelta(seconds=1)
after_str = time.strftime("%a, %d %b %Y %H:%M:%S GMT", after.timetuple())
time.sleep(1)
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo', IfModifiedSince=after_str)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 304)
eq(e.response['Error']['Message'], 'Not Modified')
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-Unmodified-Since: before')
@attr(assertion='fails 412')
def test_get_object_ifunmodifiedsince_good():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo', IfUnmodifiedSince='Sat, 29 Oct 1994 19:43:31 GMT')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-Unmodified-Since: after')
@attr(assertion='succeeds')
def test_get_object_ifunmodifiedsince_failed():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo', IfUnmodifiedSince='Sat, 29 Oct 2100 19:43:31 GMT')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='put')
@attr(operation='data re-write w/ If-Match: the latest ETag')
@attr(assertion='replaces previous data and metadata')
@attr('fails_on_aws')
def test_put_object_ifmatch_good():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
etag = response['ETag'].replace('"', '')
# pass in custom header 'If-Match' before PutObject call
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': etag}))
client.meta.events.register('before-call.s3.PutObject', lf)
response = client.put_object(Bucket=bucket_name,Key='foo', Body='zar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'zar')
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-Match: bogus ETag')
@attr(assertion='fails 412')
def test_put_object_ifmatch_failed():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
# pass in custom header 'If-Match' before PutObject call
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': '"ABCORZ"'}))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='zar')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='put')
@attr(operation='overwrite existing object w/ If-Match: *')
@attr(assertion='replaces previous data and metadata')
@attr('fails_on_aws')
def test_put_object_ifmatch_overwrite_existed_good():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': '*'}))
client.meta.events.register('before-call.s3.PutObject', lf)
response = client.put_object(Bucket=bucket_name,Key='foo', Body='zar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'zar')
@attr(resource='object')
@attr(method='put')
@attr(operation='overwrite non-existing object w/ If-Match: *')
@attr(assertion='fails 412')
@attr('fails_on_aws')
def test_put_object_ifmatch_nonexisted_failed():
bucket_name = get_new_bucket()
client = get_client()
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': '*'}))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='bar')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchKey')
@attr(resource='object')
@attr(method='put')
@attr(operation='overwrite existing object w/ If-None-Match: outdated ETag')
@attr(assertion='replaces previous data and metadata')
@attr('fails_on_aws')
def test_put_object_ifnonmatch_good():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-None-Match': 'ABCORZ'}))
client.meta.events.register('before-call.s3.PutObject', lf)
response = client.put_object(Bucket=bucket_name,Key='foo', Body='zar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'zar')
@attr(resource='object')
@attr(method='put')
@attr(operation='overwrite existing object w/ If-None-Match: the latest ETag')
@attr(assertion='fails 412')
@attr('fails_on_aws')
def test_put_object_ifnonmatch_failed():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
etag = response['ETag'].replace('"', '')
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-None-Match': etag}))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='zar')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='put')
@attr(operation='overwrite non-existing object w/ If-None-Match: *')
@attr(assertion='succeeds')
@attr('fails_on_aws')
def test_put_object_ifnonmatch_nonexisted_good():
bucket_name = get_new_bucket()
client = get_client()
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-None-Match': '*'}))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='put')
@attr(operation='overwrite existing object w/ If-None-Match: *')
@attr(assertion='fails 412')
@attr('fails_on_aws')
def test_put_object_ifnonmatch_overwrite_existed_failed():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-None-Match': '*'}))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='zar')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
def _setup_bucket_object_acl(bucket_acl, object_acl):
"""
add a foo key, and specified key and bucket acls to
a (new or existing) bucket.
"""
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL=bucket_acl, Bucket=bucket_name)
client.put_object(ACL=object_acl, Bucket=bucket_name, Key='foo')
return bucket_name
def _setup_bucket_acl(bucket_acl=None):
"""
set up a new bucket with specified acl
"""
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL=bucket_acl, Bucket=bucket_name)
return bucket_name
@attr(resource='object')
@attr(method='get')
@attr(operation='publically readable bucket')
@attr(assertion='bucket is readable')
def test_object_raw_get():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
unauthenticated_client = get_unauthenticated_client()
response = unauthenticated_client.get_object(Bucket=bucket_name, Key='foo')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='object')
@attr(method='get')
@attr(operation='deleted object and bucket')
@attr(assertion='fails 404')
def test_object_raw_get_bucket_gone():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
client.delete_object(Bucket=bucket_name, Key='foo')
client.delete_bucket(Bucket=bucket_name)
unauthenticated_client = get_unauthenticated_client()
e = assert_raises(ClientError, unauthenticated_client.get_object, Bucket=bucket_name, Key='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='object')
@attr(method='get')
@attr(operation='deleted object and bucket')
@attr(assertion='fails 404')
def test_object_delete_key_bucket_gone():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
client.delete_object(Bucket=bucket_name, Key='foo')
client.delete_bucket(Bucket=bucket_name)
unauthenticated_client = get_unauthenticated_client()
e = assert_raises(ClientError, unauthenticated_client.delete_object, Bucket=bucket_name, Key='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='object')
@attr(method='get')
@attr(operation='deleted object')
@attr(assertion='fails 404')
def test_object_raw_get_object_gone():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
client.delete_object(Bucket=bucket_name, Key='foo')
unauthenticated_client = get_unauthenticated_client()
e = assert_raises(ClientError, unauthenticated_client.get_object, Bucket=bucket_name, Key='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchKey')
@attr(resource='bucket')
@attr(method='head')
@attr(operation='head bucket')
@attr(assertion='succeeds')
def test_bucket_head():
bucket_name = get_new_bucket()
client = get_client()
response = client.head_bucket(Bucket=bucket_name)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='head')
@attr(operation='non-existant bucket')
@attr(assertion='fails 404')
def test_bucket_head_notexist():
bucket_name = get_new_bucket_name()
client = get_client()
e = assert_raises(ClientError, client.head_bucket, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
# n.b., RGW does not send a response document for this operation,
# which seems consistent with
# https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html
#eq(error_code, 'NoSuchKey')
@attr('fails_on_aws')
@attr(resource='bucket')
@attr(method='head')
@attr(operation='read bucket extended information')
@attr(assertion='extended information is getting updated')
def test_bucket_head_extended():
bucket_name = get_new_bucket()
client = get_client()
response = client.head_bucket(Bucket=bucket_name)
eq(int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-object-count']), 0)
eq(int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-bytes-used']), 0)
_create_objects(bucket_name=bucket_name, keys=['foo','bar','baz'])
response = client.head_bucket(Bucket=bucket_name)
eq(int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-object-count']), 3)
eq(int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-bytes-used']), 9)
@attr(resource='bucket.acl')
@attr(method='get')
@attr(operation='unauthenticated on private bucket')
@attr(assertion='succeeds')
def test_object_raw_get_bucket_acl():
bucket_name = _setup_bucket_object_acl('private', 'public-read')
unauthenticated_client = get_unauthenticated_client()
response = unauthenticated_client.get_object(Bucket=bucket_name, Key='foo')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='object.acl')
@attr(method='get')
@attr(operation='unauthenticated on private object')
@attr(assertion='fails 403')
def test_object_raw_get_object_acl():
bucket_name = _setup_bucket_object_acl('public-read', 'private')
unauthenticated_client = get_unauthenticated_client()
e = assert_raises(ClientError, unauthenticated_client.get_object, Bucket=bucket_name, Key='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='authenticated on public bucket/object')
@attr(assertion='succeeds')
def test_object_raw_authenticated():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
response = client.get_object(Bucket=bucket_name, Key='foo')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='object')
@attr(method='get')
@attr(operation='authenticated on private bucket/private object with modified response headers')
@attr(assertion='succeeds')
def test_object_raw_response_headers():
bucket_name = _setup_bucket_object_acl('private', 'private')
client = get_client()
response = client.get_object(Bucket=bucket_name, Key='foo', ResponseCacheControl='no-cache', ResponseContentDisposition='bla', ResponseContentEncoding='aaa', ResponseContentLanguage='esperanto', ResponseContentType='foo/bar', ResponseExpires='123')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
eq(response['ResponseMetadata']['HTTPHeaders']['content-type'], 'foo/bar')
eq(response['ResponseMetadata']['HTTPHeaders']['content-disposition'], 'bla')
eq(response['ResponseMetadata']['HTTPHeaders']['content-language'], 'esperanto')
eq(response['ResponseMetadata']['HTTPHeaders']['content-encoding'], 'aaa')
eq(response['ResponseMetadata']['HTTPHeaders']['cache-control'], 'no-cache')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='authenticated on private bucket/public object')
@attr(assertion='succeeds')
def test_object_raw_authenticated_bucket_acl():
bucket_name = _setup_bucket_object_acl('private', 'public-read')
client = get_client()
response = client.get_object(Bucket=bucket_name, Key='foo')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='authenticated on public bucket/private object')
@attr(assertion='succeeds')
def test_object_raw_authenticated_object_acl():
bucket_name = _setup_bucket_object_acl('public-read', 'private')
client = get_client()
response = client.get_object(Bucket=bucket_name, Key='foo')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='object')
@attr(method='get')
@attr(operation='authenticated on deleted object and bucket')
@attr(assertion='fails 404')
def test_object_raw_authenticated_bucket_gone():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
client.delete_object(Bucket=bucket_name, Key='foo')
client.delete_bucket(Bucket=bucket_name)
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='object')
@attr(method='get')
@attr(operation='authenticated on deleted object')
@attr(assertion='fails 404')
def test_object_raw_authenticated_object_gone():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
client.delete_object(Bucket=bucket_name, Key='foo')
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchKey')
@attr(resource='object')
@attr(method='get')
@attr(operation='x-amz-expires check not expired')
@attr(assertion='succeeds')
def test_object_raw_get_x_amz_expires_not_expired():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
params = {'Bucket': bucket_name, 'Key': 'foo'}
url = client.generate_presigned_url(ClientMethod='get_object', Params=params, ExpiresIn=100000, HttpMethod='GET')
res = requests.get(url).__dict__
eq(res['status_code'], 200)
@attr(resource='object')
@attr(method='get')
@attr(operation='check x-amz-expires value out of range zero')
@attr(assertion='fails 403')
def test_object_raw_get_x_amz_expires_out_range_zero():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
params = {'Bucket': bucket_name, 'Key': 'foo'}
url = client.generate_presigned_url(ClientMethod='get_object', Params=params, ExpiresIn=0, HttpMethod='GET')
res = requests.get(url).__dict__
eq(res['status_code'], 403)
@attr(resource='object')
@attr(method='get')
@attr(operation='check x-amz-expires value out of max range')
@attr(assertion='fails 403')
def test_object_raw_get_x_amz_expires_out_max_range():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
params = {'Bucket': bucket_name, 'Key': 'foo'}
url = client.generate_presigned_url(ClientMethod='get_object', Params=params, ExpiresIn=609901, HttpMethod='GET')
res = requests.get(url).__dict__
eq(res['status_code'], 403)
@attr(resource='object')
@attr(method='get')
@attr(operation='check x-amz-expires value out of positive range')
@attr(assertion='succeeds')
def test_object_raw_get_x_amz_expires_out_positive_range():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
params = {'Bucket': bucket_name, 'Key': 'foo'}
url = client.generate_presigned_url(ClientMethod='get_object', Params=params, ExpiresIn=-7, HttpMethod='GET')
res = requests.get(url).__dict__
eq(res['status_code'], 403)
@attr(resource='object')
@attr(method='put')
@attr(operation='unauthenticated, no object acls')
@attr(assertion='fails 403')
def test_object_anon_put():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo')
unauthenticated_client = get_unauthenticated_client()
e = assert_raises(ClientError, unauthenticated_client.put_object, Bucket=bucket_name, Key='foo', Body='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
@attr(resource='object')
@attr(method='put')
@attr(operation='unauthenticated, publically writable object')
@attr(assertion='succeeds')
def test_object_anon_put_write_access():
bucket_name = _setup_bucket_acl('public-read-write')
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo')
unauthenticated_client = get_unauthenticated_client()
response = unauthenticated_client.put_object(Bucket=bucket_name, Key='foo', Body='foo')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='object')
@attr(method='put')
@attr(operation='authenticated, no object acls')
@attr(assertion='succeeds')
def test_object_put_authenticated():
bucket_name = get_new_bucket()
client = get_client()
response = client.put_object(Bucket=bucket_name, Key='foo', Body='foo')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='object')
@attr(method='put')
@attr(operation='authenticated, no object acls')
@attr(assertion='succeeds')
def test_object_raw_put_authenticated_expired():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo')
params = {'Bucket': bucket_name, 'Key': 'foo'}
url = client.generate_presigned_url(ClientMethod='put_object', Params=params, ExpiresIn=-1000, HttpMethod='PUT')
# params wouldn't take a 'Body' parameter so we're passing it in here
res = requests.put(url,data="foo").__dict__
eq(res['status_code'], 403)
def check_bad_bucket_name(bucket_name):
"""
Attempt to create a bucket with a specified name, and confirm
that the request fails because of an invalid bucket name.
"""
client = get_client()
e = assert_raises(ClientError, client.create_bucket, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidBucketName')
# AWS does not enforce all documented bucket restrictions.
# http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/index.html?BucketRestrictions.html
@attr('fails_on_aws')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='name begins with underscore')
@attr(assertion='fails with subdomain: 400')
def test_bucket_create_naming_bad_starts_nonalpha():
bucket_name = get_new_bucket_name()
check_bad_bucket_name('_' + bucket_name)
def check_invalid_bucketname(invalid_name):
"""
Send a create bucket_request with an invalid bucket name
that will bypass the ParamValidationError that would be raised
if the invalid bucket name that was passed in normally.
This function returns the status and error code from the failure
"""
client = get_client()
valid_bucket_name = get_new_bucket_name()
def replace_bucketname_from_url(**kwargs):
url = kwargs['params']['url']
new_url = url.replace(valid_bucket_name, invalid_name)
kwargs['params']['url'] = new_url
client.meta.events.register('before-call.s3.CreateBucket', replace_bucketname_from_url)
e = assert_raises(ClientError, client.create_bucket, Bucket=invalid_name)
status, error_code = _get_status_and_error_code(e.response)
return (status, error_code)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='empty name')
@attr(assertion='fails 405')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
def test_bucket_create_naming_bad_short_empty():
invalid_bucketname = ''
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 405)
eq(error_code, 'MethodNotAllowed')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='short (one character) name')
@attr(assertion='fails 400')
def test_bucket_create_naming_bad_short_one():
check_bad_bucket_name('a')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='short (two character) name')
@attr(assertion='fails 400')
def test_bucket_create_naming_bad_short_two():
check_bad_bucket_name('aa')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='excessively long names')
@attr(assertion='fails with subdomain: 400')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
def test_bucket_create_naming_bad_long():
invalid_bucketname = 256*'a'
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 400)
invalid_bucketname = 280*'a'
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 400)
invalid_bucketname = 3000*'a'
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 400)
def check_good_bucket_name(name, _prefix=None):
"""
Attempt to create a bucket with a specified name
and (specified or default) prefix, returning the
results of that effort.
"""
# tests using this with the default prefix must *not* rely on
# being able to set the initial character, or exceed the max len
# tests using this with a custom prefix are responsible for doing
# their own setup/teardown nukes, with their custom prefix; this
# should be very rare
if _prefix is None:
_prefix = get_prefix()
bucket_name = '{prefix}{name}'.format(
prefix=_prefix,
name=name,
)
client = get_client()
response = client.create_bucket(Bucket=bucket_name)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
def _test_bucket_create_naming_good_long(length):
"""
Attempt to create a bucket whose name (including the
prefix) is of a specified length.
"""
# tests using this with the default prefix must *not* rely on
# being able to set the initial character, or exceed the max len
# tests using this with a custom prefix are responsible for doing
# their own setup/teardown nukes, with their custom prefix; this
# should be very rare
prefix = get_new_bucket_name()
assert len(prefix) < 63
num = length - len(prefix)
name=num*'a'
bucket_name = '{prefix}{name}'.format(
prefix=prefix,
name=name,
)
client = get_client()
response = client.create_bucket(Bucket=bucket_name)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/60 byte name')
@attr(assertion='fails with subdomain')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_create_naming_good_long_60():
_test_bucket_create_naming_good_long(60)
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/61 byte name')
@attr(assertion='fails with subdomain')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_create_naming_good_long_61():
_test_bucket_create_naming_good_long(61)
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/62 byte name')
@attr(assertion='fails with subdomain')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_create_naming_good_long_62():
_test_bucket_create_naming_good_long(62)
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/63 byte name')
@attr(assertion='fails with subdomain')
def test_bucket_create_naming_good_long_63():
_test_bucket_create_naming_good_long(63)
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list w/61 byte name')
@attr(assertion='fails with subdomain')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_list_long_name():
prefix = get_new_bucket_name()
length = 61
num = length - len(prefix)
name=num*'a'
bucket_name = '{prefix}{name}'.format(
prefix=prefix,
name=name,
)
bucket = get_new_bucket_resource(name=bucket_name)
is_empty = _bucket_is_empty(bucket)
eq(is_empty, True)
# AWS does not enforce all documented bucket restrictions.
# http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/index.html?BucketRestrictions.html
@attr('fails_on_aws')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/ip address for name')
@attr(assertion='fails on aws')
def test_bucket_create_naming_bad_ip():
check_bad_bucket_name('192.168.5.123')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/! in name')
@attr(assertion='fails with subdomain')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
def test_bucket_create_naming_bad_punctuation():
# characters other than [a-zA-Z0-9._-]
invalid_bucketname = 'alpha!soup'
status, error_code = check_invalid_bucketname(invalid_bucketname)
# TODO: figure out why a 403 is coming out in boto3 but not in boto2.
eq(status, 400)
eq(error_code, 'InvalidBucketName')
# test_bucket_create_naming_dns_* are valid but not recommended
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/underscore in name')
@attr(assertion='fails')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_create_naming_dns_underscore():
invalid_bucketname = 'foo_bar'
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 400)
eq(error_code, 'InvalidBucketName')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/100 byte name')
@attr(assertion='fails with subdomain')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
def test_bucket_create_naming_dns_long():
prefix = get_prefix()
assert len(prefix) < 50
num = 63 - len(prefix)
check_good_bucket_name(num * 'a')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/dash at end of name')
@attr(assertion='fails')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_create_naming_dns_dash_at_end():
invalid_bucketname = 'foo-'
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 400)
eq(error_code, 'InvalidBucketName')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/.. in name')
@attr(assertion='fails')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_create_naming_dns_dot_dot():
invalid_bucketname = 'foo..bar'
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 400)
eq(error_code, 'InvalidBucketName')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/.- in name')
@attr(assertion='fails')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_create_naming_dns_dot_dash():
invalid_bucketname = 'foo.-bar'
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 400)
eq(error_code, 'InvalidBucketName')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/-. in name')
@attr(assertion='fails')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_create_naming_dns_dash_dot():
invalid_bucketname = 'foo-.bar'
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 400)
eq(error_code, 'InvalidBucketName')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='re-create')
def test_bucket_create_exists():
# aws-s3 default region allows recreation of buckets
# but all other regions fail with BucketAlreadyOwnedByYou.
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
try:
response = client.create_bucket(Bucket=bucket_name)
except ClientError as e:
status, error_code = _get_status_and_error_code(e.response)
eq(e.status, 409)
eq(e.error_code, 'BucketAlreadyOwnedByYou')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get location')
def test_bucket_get_location():
location_constraint = get_main_api_name()
if not location_constraint:
raise SkipTest
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': location_constraint})
response = client.get_bucket_location(Bucket=bucket_name)
if location_constraint == "":
location_constraint = None
eq(response['LocationConstraint'], location_constraint)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='re-create by non-owner')
@attr(assertion='fails 409')
def test_bucket_create_exists_nonowner():
# Names are shared across a global namespace. As such, no two
# users can create a bucket with that same name.
bucket_name = get_new_bucket_name()
client = get_client()
alt_client = get_alt_client()
client.create_bucket(Bucket=bucket_name)
e = assert_raises(ClientError, alt_client.create_bucket, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 409)
eq(error_code, 'BucketAlreadyExists')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='re-create with existing acl')
@attr(assertion='fails 409')
def test_bucket_recreate_overwrite_acl():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ACL='public-read')
e = assert_raises(ClientError, client.create_bucket, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 409)
eq(error_code, 'BucketAlreadyExists')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='re-create with new acl')
@attr(assertion='fails 409')
def test_bucket_recreate_new_acl():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
e = assert_raises(ClientError, client.create_bucket, Bucket=bucket_name, ACL='public-read')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 409)
eq(error_code, 'BucketAlreadyExists')
def check_access_denied(fn, *args, **kwargs):
e = assert_raises(ClientError, fn, *args, **kwargs)
status = _get_status(e.response)
eq(status, 403)
def check_grants(got, want):
"""
Check that grants list in got matches the dictionaries in want,
in any order.
"""
eq(len(got), len(want))
for g, w in zip(got, want):
w = dict(w)
g = dict(g)
eq(g.pop('Permission', None), w['Permission'])
eq(g['Grantee'].pop('DisplayName', None), w['DisplayName'])
eq(g['Grantee'].pop('ID', None), w['ID'])
eq(g['Grantee'].pop('Type', None), w['Type'])
eq(g['Grantee'].pop('URI', None), w['URI'])
eq(g['Grantee'].pop('EmailAddress', None), w['EmailAddress'])
eq(g, {'Grantee': {}})
@attr(resource='bucket')
@attr(method='get')
@attr(operation='default acl')
@attr(assertion='read back expected defaults')
def test_bucket_acl_default():
bucket_name = get_new_bucket()
client = get_client()
response = client.get_bucket_acl(Bucket=bucket_name)
display_name = get_main_display_name()
user_id = get_main_user_id()
eq(response['Owner']['DisplayName'], display_name)
eq(response['Owner']['ID'], user_id)
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='public-read acl')
@attr(assertion='read back expected defaults')
@attr('fails_on_aws') # <Error><Code>IllegalLocationConstraintException</Code><Message>The unspecified location constraint is incompatible for the region specific endpoint this request was sent to.</Message>
def test_bucket_acl_canned_during_create():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='public-read', Bucket=bucket_name)
response = client.get_bucket_acl(Bucket=bucket_name)
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='acl: public-read,private')
@attr(assertion='read back expected values')
def test_bucket_acl_canned():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='public-read', Bucket=bucket_name)
response = client.get_bucket_acl(Bucket=bucket_name)
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
client.put_bucket_acl(ACL='private', Bucket=bucket_name)
response = client.get_bucket_acl(Bucket=bucket_name)
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='bucket.acls')
@attr(method='put')
@attr(operation='acl: public-read-write')
@attr(assertion='read back expected values')
def test_bucket_acl_canned_publicreadwrite():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
response = client.get_bucket_acl(Bucket=bucket_name)
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='WRITE',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='acl: authenticated-read')
@attr(assertion='read back expected values')
def test_bucket_acl_canned_authenticatedread():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='authenticated-read', Bucket=bucket_name)
response = client.get_bucket_acl(Bucket=bucket_name)
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AuthenticatedUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object.acls')
@attr(method='get')
@attr(operation='default acl')
@attr(assertion='read back expected defaults')
def test_object_acl_default():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object_acl(Bucket=bucket_name, Key='foo')
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='acl public-read')
@attr(assertion='read back expected values')
def test_object_acl_canned_during_create():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(ACL='public-read', Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object_acl(Bucket=bucket_name, Key='foo')
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='acl public-read,private')
@attr(assertion='read back expected values')
def test_object_acl_canned():
bucket_name = get_new_bucket()
client = get_client()
# Since it defaults to private, set it public-read first
client.put_object(ACL='public-read', Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object_acl(Bucket=bucket_name, Key='foo')
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
# Then back to private.
client.put_object_acl(ACL='private',Bucket=bucket_name, Key='foo')
response = client.get_object_acl(Bucket=bucket_name, Key='foo')
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object')
@attr(method='put')
@attr(operation='acl public-read-write')
@attr(assertion='read back expected values')
def test_object_acl_canned_publicreadwrite():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(ACL='public-read-write', Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object_acl(Bucket=bucket_name, Key='foo')
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='WRITE',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='acl authenticated-read')
@attr(assertion='read back expected values')
def test_object_acl_canned_authenticatedread():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(ACL='authenticated-read', Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object_acl(Bucket=bucket_name, Key='foo')
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AuthenticatedUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='acl bucket-owner-read')
@attr(assertion='read back expected values')
def test_object_acl_canned_bucketownerread():
bucket_name = get_new_bucket_name()
main_client = get_client()
alt_client = get_alt_client()
main_client.create_bucket(Bucket=bucket_name, ACL='public-read-write')
alt_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
bucket_acl_response = main_client.get_bucket_acl(Bucket=bucket_name)
bucket_owner_id = bucket_acl_response['Grants'][2]['Grantee']['ID']
bucket_owner_display_name = bucket_acl_response['Grants'][2]['Grantee']['DisplayName']
alt_client.put_object(ACL='bucket-owner-read', Bucket=bucket_name, Key='foo')
response = alt_client.get_object_acl(Bucket=bucket_name, Key='foo')
alt_display_name = get_alt_display_name()
alt_user_id = get_alt_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='FULL_CONTROL',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='READ',
ID=bucket_owner_id,
DisplayName=bucket_owner_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='acl bucket-owner-read')
@attr(assertion='read back expected values')
def test_object_acl_canned_bucketownerfullcontrol():
bucket_name = get_new_bucket_name()
main_client = get_client()
alt_client = get_alt_client()
main_client.create_bucket(Bucket=bucket_name, ACL='public-read-write')
alt_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
bucket_acl_response = main_client.get_bucket_acl(Bucket=bucket_name)
bucket_owner_id = bucket_acl_response['Grants'][2]['Grantee']['ID']
bucket_owner_display_name = bucket_acl_response['Grants'][2]['Grantee']['DisplayName']
alt_client.put_object(ACL='bucket-owner-full-control', Bucket=bucket_name, Key='foo')
response = alt_client.get_object_acl(Bucket=bucket_name, Key='foo')
alt_display_name = get_alt_display_name()
alt_user_id = get_alt_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='FULL_CONTROL',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='FULL_CONTROL',
ID=bucket_owner_id,
DisplayName=bucket_owner_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='set write-acp')
@attr(assertion='does not modify owner')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
def test_object_acl_full_control_verify_owner():
bucket_name = get_new_bucket_name()
main_client = get_client()
alt_client = get_alt_client()
main_client.create_bucket(Bucket=bucket_name, ACL='public-read-write')
main_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
alt_user_id = get_alt_user_id()
alt_display_name = get_alt_display_name()
main_user_id = get_main_user_id()
main_display_name = get_main_display_name()
grant = { 'Grants': [{'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'FULL_CONTROL'}], 'Owner': {'DisplayName': main_display_name, 'ID': main_user_id}}
main_client.put_object_acl(Bucket=bucket_name, Key='foo', AccessControlPolicy=grant)
grant = { 'Grants': [{'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'READ_ACP'}], 'Owner': {'DisplayName': main_display_name, 'ID': main_user_id}}
alt_client.put_object_acl(Bucket=bucket_name, Key='foo', AccessControlPolicy=grant)
response = alt_client.get_object_acl(Bucket=bucket_name, Key='foo')
eq(response['Owner']['ID'], main_user_id)
def add_obj_user_grant(bucket_name, key, grant):
"""
Adds a grant to the existing grants meant to be passed into
the AccessControlPolicy argument of put_object_acls for an object
owned by the main user, not the alt user
A grant is a dictionary in the form of:
{u'Grantee': {u'Type': 'type', u'DisplayName': 'name', u'ID': 'id'}, u'Permission': 'PERM'}
"""
client = get_client()
main_user_id = get_main_user_id()
main_display_name = get_main_display_name()
response = client.get_object_acl(Bucket=bucket_name, Key=key)
grants = response['Grants']
grants.append(grant)
grant = {'Grants': grants, 'Owner': {'DisplayName': main_display_name, 'ID': main_user_id}}
return grant
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='set write-acp')
@attr(assertion='does not modify other attributes')
def test_object_acl_full_control_verify_attributes():
bucket_name = get_new_bucket_name()
main_client = get_client()
alt_client = get_alt_client()
main_client.create_bucket(Bucket=bucket_name, ACL='public-read-write')
header = {'x-amz-foo': 'bar'}
# lambda to add any header
add_header = (lambda **kwargs: kwargs['params']['headers'].update(header))
main_client.meta.events.register('before-call.s3.PutObject', add_header)
main_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = main_client.get_object(Bucket=bucket_name, Key='foo')
content_type = response['ContentType']
etag = response['ETag']
alt_user_id = get_alt_user_id()
grant = {'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'FULL_CONTROL'}
grants = add_obj_user_grant(bucket_name, 'foo', grant)
main_client.put_object_acl(Bucket=bucket_name, Key='foo', AccessControlPolicy=grants)
response = main_client.get_object(Bucket=bucket_name, Key='foo')
eq(content_type, response['ContentType'])
eq(etag, response['ETag'])
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl private')
@attr(assertion='a private object can be set to private')
def test_bucket_acl_canned_private_to_private():
bucket_name = get_new_bucket()
client = get_client()
response = client.put_bucket_acl(Bucket=bucket_name, ACL='private')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
def add_bucket_user_grant(bucket_name, grant):
"""
Adds a grant to the existing grants meant to be passed into
the AccessControlPolicy argument of put_object_acls for an object
owned by the main user, not the alt user
A grant is a dictionary in the form of:
{u'Grantee': {u'Type': 'type', u'DisplayName': 'name', u'ID': 'id'}, u'Permission': 'PERM'}
"""
client = get_client()
main_user_id = get_main_user_id()
main_display_name = get_main_display_name()
response = client.get_bucket_acl(Bucket=bucket_name)
grants = response['Grants']
grants.append(grant)
grant = {'Grants': grants, 'Owner': {'DisplayName': main_display_name, 'ID': main_user_id}}
return grant
def _check_object_acl(permission):
"""
Sets the permission on an object then checks to see
if it was set
"""
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object_acl(Bucket=bucket_name, Key='foo')
policy = {}
policy['Owner'] = response['Owner']
policy['Grants'] = response['Grants']
policy['Grants'][0]['Permission'] = permission
client.put_object_acl(Bucket=bucket_name, Key='foo', AccessControlPolicy=policy)
response = client.get_object_acl(Bucket=bucket_name, Key='foo')
grants = response['Grants']
main_user_id = get_main_user_id()
main_display_name = get_main_display_name()
check_grants(
grants,
[
dict(
Permission=permission,
ID=main_user_id,
DisplayName=main_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set acl FULL_CONTRO')
@attr(assertion='reads back correctly')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${USER}</ArgumentValue>
def test_object_acl():
_check_object_acl('FULL_CONTROL')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set acl WRITE')
@attr(assertion='reads back correctly')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${USER}</ArgumentValue>
def test_object_acl_write():
_check_object_acl('WRITE')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set acl WRITE_ACP')
@attr(assertion='reads back correctly')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${USER}</ArgumentValue>
def test_object_acl_writeacp():
_check_object_acl('WRITE_ACP')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set acl READ')
@attr(assertion='reads back correctly')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${USER}</ArgumentValue>
def test_object_acl_read():
_check_object_acl('READ')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set acl READ_ACP')
@attr(assertion='reads back correctly')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${USER}</ArgumentValue>
def test_object_acl_readacp():
_check_object_acl('READ_ACP')
def _bucket_acl_grant_userid(permission):
"""
create a new bucket, grant a specific user the specified
permission, read back the acl and verify correct setting
"""
bucket_name = get_new_bucket()
client = get_client()
main_user_id = get_main_user_id()
main_display_name = get_main_display_name()
alt_user_id = get_alt_user_id()
alt_display_name = get_alt_display_name()
grant = {'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': permission}
grant = add_bucket_user_grant(bucket_name, grant)
client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=grant)
response = client.get_bucket_acl(Bucket=bucket_name)
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission=permission,
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='FULL_CONTROL',
ID=main_user_id,
DisplayName=main_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
return bucket_name
def _check_bucket_acl_grant_can_read(bucket_name):
"""
verify ability to read the specified bucket
"""
alt_client = get_alt_client()
response = alt_client.head_bucket(Bucket=bucket_name)
def _check_bucket_acl_grant_cant_read(bucket_name):
"""
verify inability to read the specified bucket
"""
alt_client = get_alt_client()
check_access_denied(alt_client.head_bucket, Bucket=bucket_name)
def _check_bucket_acl_grant_can_readacp(bucket_name):
"""
verify ability to read acls on specified bucket
"""
alt_client = get_alt_client()
alt_client.get_bucket_acl(Bucket=bucket_name)
def _check_bucket_acl_grant_cant_readacp(bucket_name):
"""
verify inability to read acls on specified bucket
"""
alt_client = get_alt_client()
check_access_denied(alt_client.get_bucket_acl, Bucket=bucket_name)
def _check_bucket_acl_grant_can_write(bucket_name):
"""
verify ability to write the specified bucket
"""
alt_client = get_alt_client()
alt_client.put_object(Bucket=bucket_name, Key='foo-write', Body='bar')
def _check_bucket_acl_grant_cant_write(bucket_name):
"""
verify inability to write the specified bucket
"""
alt_client = get_alt_client()
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key='foo-write', Body='bar')
def _check_bucket_acl_grant_can_writeacp(bucket_name):
"""
verify ability to set acls on the specified bucket
"""
alt_client = get_alt_client()
alt_client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
def _check_bucket_acl_grant_cant_writeacp(bucket_name):
"""
verify inability to set acls on the specified bucket
"""
alt_client = get_alt_client()
check_access_denied(alt_client.put_bucket_acl,Bucket=bucket_name, ACL='public-read')
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl w/userid FULL_CONTROL')
@attr(assertion='can read/write data/acls')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${USER}</ArgumentValue>
def test_bucket_acl_grant_userid_fullcontrol():
bucket_name = _bucket_acl_grant_userid('FULL_CONTROL')
# alt user can read
_check_bucket_acl_grant_can_read(bucket_name)
# can read acl
_check_bucket_acl_grant_can_readacp(bucket_name)
# can write
_check_bucket_acl_grant_can_write(bucket_name)
# can write acl
_check_bucket_acl_grant_can_writeacp(bucket_name)
client = get_client()
bucket_acl_response = client.get_bucket_acl(Bucket=bucket_name)
owner_id = bucket_acl_response['Owner']['ID']
owner_display_name = bucket_acl_response['Owner']['DisplayName']
main_display_name = get_main_display_name()
main_user_id = get_main_user_id()
eq(owner_id, main_user_id)
eq(owner_display_name, main_display_name)
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl w/userid READ')
@attr(assertion='can read data, no other r/w')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
def test_bucket_acl_grant_userid_read():
bucket_name = _bucket_acl_grant_userid('READ')
# alt user can read
_check_bucket_acl_grant_can_read(bucket_name)
# can't read acl
_check_bucket_acl_grant_cant_readacp(bucket_name)
# can't write
_check_bucket_acl_grant_cant_write(bucket_name)
# can't write acl
_check_bucket_acl_grant_cant_writeacp(bucket_name)
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl w/userid READ_ACP')
@attr(assertion='can read acl, no other r/w')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
def test_bucket_acl_grant_userid_readacp():
bucket_name = _bucket_acl_grant_userid('READ_ACP')
# alt user can't read
_check_bucket_acl_grant_cant_read(bucket_name)
# can read acl
_check_bucket_acl_grant_can_readacp(bucket_name)
# can't write
_check_bucket_acl_grant_cant_write(bucket_name)
# can't write acp
#_check_bucket_acl_grant_cant_writeacp_can_readacp(bucket)
_check_bucket_acl_grant_cant_writeacp(bucket_name)
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl w/userid WRITE')
@attr(assertion='can write data, no other r/w')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
def test_bucket_acl_grant_userid_write():
bucket_name = _bucket_acl_grant_userid('WRITE')
# alt user can't read
_check_bucket_acl_grant_cant_read(bucket_name)
# can't read acl
_check_bucket_acl_grant_cant_readacp(bucket_name)
# can write
_check_bucket_acl_grant_can_write(bucket_name)
# can't write acl
_check_bucket_acl_grant_cant_writeacp(bucket_name)
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl w/userid WRITE_ACP')
@attr(assertion='can write acls, no other r/w')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
def test_bucket_acl_grant_userid_writeacp():
bucket_name = _bucket_acl_grant_userid('WRITE_ACP')
# alt user can't read
_check_bucket_acl_grant_cant_read(bucket_name)
# can't read acl
_check_bucket_acl_grant_cant_readacp(bucket_name)
# can't write
_check_bucket_acl_grant_cant_write(bucket_name)
# can write acl
_check_bucket_acl_grant_can_writeacp(bucket_name)
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl w/invalid userid')
@attr(assertion='fails 400')
def test_bucket_acl_grant_nonexist_user():
bucket_name = get_new_bucket()
client = get_client()
bad_user_id = '_foo'
#response = client.get_bucket_acl(Bucket=bucket_name)
grant = {'Grantee': {'ID': bad_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'FULL_CONTROL'}
grant = add_bucket_user_grant(bucket_name, grant)
e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name, AccessControlPolicy=grant)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidArgument')
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='revoke all ACLs')
@attr(assertion='can: read obj, get/set bucket acl, cannot write objs')
def test_bucket_acl_no_grants():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_bucket_acl(Bucket=bucket_name)
old_grants = response['Grants']
policy = {}
policy['Owner'] = response['Owner']
# clear grants
policy['Grants'] = []
# remove read/write permission
response = client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=policy)
# can read
client.get_object(Bucket=bucket_name, Key='foo')
# can't write
check_access_denied(client.put_object, Bucket=bucket_name, Key='baz', Body='a')
#TODO fix this test once a fix is in for same issues in
# test_access_bucket_private_object_private
client2 = get_client()
# owner can read acl
client2.get_bucket_acl(Bucket=bucket_name)
# owner can write acl
client2.put_bucket_acl(Bucket=bucket_name, ACL='private')
# set policy back to original so that bucket can be cleaned up
policy['Grants'] = old_grants
client2.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=policy)
def _get_acl_header(user_id=None, perms=None):
all_headers = ["read", "write", "read-acp", "write-acp", "full-control"]
headers = []
if user_id == None:
user_id = get_alt_user_id()
if perms != None:
for perm in perms:
header = ("x-amz-grant-{perm}".format(perm=perm), "id={uid}".format(uid=user_id))
headers.append(header)
else:
for perm in all_headers:
header = ("x-amz-grant-{perm}".format(perm=perm), "id={uid}".format(uid=user_id))
headers.append(header)
return headers
@attr(resource='object')
@attr(method='PUT')
@attr(operation='add all grants to user through headers')
@attr(assertion='adds all grants individually to second user')
@attr('fails_on_dho')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
def test_object_header_acl_grants():
bucket_name = get_new_bucket()
client = get_client()
alt_user_id = get_alt_user_id()
alt_display_name = get_alt_display_name()
headers = _get_acl_header()
def add_headers_before_sign(**kwargs):
updated_headers = (kwargs['request'].__dict__['headers'].__dict__['_headers'] + headers)
kwargs['request'].__dict__['headers'].__dict__['_headers'] = updated_headers
client.meta.events.register('before-sign.s3.PutObject', add_headers_before_sign)
client.put_object(Bucket=bucket_name, Key='foo_key', Body='bar')
response = client.get_object_acl(Bucket=bucket_name, Key='foo_key')
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='WRITE',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='READ_ACP',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='WRITE_ACP',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='FULL_CONTROL',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='bucket')
@attr(method='PUT')
@attr(operation='add all grants to user through headers')
@attr(assertion='adds all grants individually to second user')
@attr('fails_on_dho')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
def test_bucket_header_acl_grants():
headers = _get_acl_header()
bucket_name = get_new_bucket_name()
client = get_client()
headers = _get_acl_header()
def add_headers_before_sign(**kwargs):
updated_headers = (kwargs['request'].__dict__['headers'].__dict__['_headers'] + headers)
kwargs['request'].__dict__['headers'].__dict__['_headers'] = updated_headers
client.meta.events.register('before-sign.s3.CreateBucket', add_headers_before_sign)
client.create_bucket(Bucket=bucket_name)
response = client.get_bucket_acl(Bucket=bucket_name)
grants = response['Grants']
alt_user_id = get_alt_user_id()
alt_display_name = get_alt_display_name()
check_grants(
grants,
[
dict(
Permission='READ',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='WRITE',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='READ_ACP',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='WRITE_ACP',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='FULL_CONTROL',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
alt_client = get_alt_client()
alt_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
# set bucket acl to public-read-write so that teardown can work
alt_client.put_bucket_acl(Bucket=bucket_name, ACL='public-read-write')
# This test will fail on DH Objects. DHO allows multiple users with one account, which
# would violate the uniqueness requirement of a user's email. As such, DHO users are
# created without an email.
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='add second FULL_CONTROL user')
@attr(assertion='works for S3, fails for DHO')
@attr('fails_on_aws') # <Error><Code>AmbiguousGrantByEmailAddress</Code><Message>The e-mail address you provided is associated with more than one account. Please retry your request using a different identification method or after resolving the ambiguity.</Message>
def test_bucket_acl_grant_email():
bucket_name = get_new_bucket()
client = get_client()
alt_user_id = get_alt_user_id()
alt_display_name = get_alt_display_name()
alt_email_address = get_alt_email()
main_user_id = get_main_user_id()
main_display_name = get_main_display_name()
grant = {'Grantee': {'EmailAddress': alt_email_address, 'Type': 'AmazonCustomerByEmail' }, 'Permission': 'FULL_CONTROL'}
grant = add_bucket_user_grant(bucket_name, grant)
client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy = grant)
response = client.get_bucket_acl(Bucket=bucket_name)
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='FULL_CONTROL',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='FULL_CONTROL',
ID=main_user_id,
DisplayName=main_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
]
)
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='add acl for nonexistent user')
@attr(assertion='fail 400')
def test_bucket_acl_grant_email_not_exist():
# behavior not documented by amazon
bucket_name = get_new_bucket()
client = get_client()
alt_user_id = get_alt_user_id()
alt_display_name = get_alt_display_name()
alt_email_address = get_alt_email()
NONEXISTENT_EMAIL = 'doesnotexist@dreamhost.com.invalid'
grant = {'Grantee': {'EmailAddress': NONEXISTENT_EMAIL, 'Type': 'AmazonCustomerByEmail'}, 'Permission': 'FULL_CONTROL'}
grant = add_bucket_user_grant(bucket_name, grant)
e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name, AccessControlPolicy = grant)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'UnresolvableGrantByEmailAddress')
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='revoke all ACLs')
@attr(assertion='acls read back as empty')
def test_bucket_acl_revoke_all():
# revoke all access, including the owner's access
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_bucket_acl(Bucket=bucket_name)
old_grants = response['Grants']
policy = {}
policy['Owner'] = response['Owner']
# clear grants
policy['Grants'] = []
# remove read/write permission for everyone
client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=policy)
response = client.get_bucket_acl(Bucket=bucket_name)
eq(len(response['Grants']), 0)
# set policy back to original so that bucket can be cleaned up
policy['Grants'] = old_grants
client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=policy)
# TODO rgw log_bucket.set_as_logging_target() gives 403 Forbidden
# http://tracker.newdream.net/issues/984
@attr(resource='bucket.log')
@attr(method='put')
@attr(operation='set/enable/disable logging target')
@attr(assertion='operations succeed')
@attr('fails_on_rgw')
def test_logging_toggle():
bucket_name = get_new_bucket()
client = get_client()
main_display_name = get_main_display_name()
main_user_id = get_main_user_id()
status = {'LoggingEnabled': {'TargetBucket': bucket_name, 'TargetGrants': [{'Grantee': {'DisplayName': main_display_name, 'ID': main_user_id,'Type': 'CanonicalUser'},'Permission': 'FULL_CONTROL'}], 'TargetPrefix': 'foologgingprefix'}}
client.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus=status)
client.get_bucket_logging(Bucket=bucket_name)
status = {'LoggingEnabled': {}}
client.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus=status)
# NOTE: this does not actually test whether or not logging works
def _setup_access(bucket_acl, object_acl):
"""
Simple test fixture: create a bucket with given ACL, with objects:
- a: owning user, given ACL
- a2: same object accessed by some other user
- b: owning user, default ACL in bucket w/given ACL
- b2: same object accessed by a some other user
"""
bucket_name = get_new_bucket()
client = get_client()
key1 = 'foo'
key2 = 'bar'
newkey = 'new'
client.put_bucket_acl(Bucket=bucket_name, ACL=bucket_acl)
client.put_object(Bucket=bucket_name, Key=key1, Body='foocontent')
client.put_object_acl(Bucket=bucket_name, Key=key1, ACL=object_acl)
client.put_object(Bucket=bucket_name, Key=key2, Body='barcontent')
return bucket_name, key1, key2, newkey
def get_bucket_key_names(bucket_name):
objs_list = get_objects_list(bucket_name)
return frozenset(obj for obj in objs_list)
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: private/private')
@attr(assertion='public has no access to bucket or objects')
def test_access_bucket_private_object_private():
# all the test_access_* tests follow this template
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='private')
alt_client = get_alt_client()
# acled object read fail
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key1)
# default object read fail
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
# bucket read fail
check_access_denied(alt_client.list_objects, Bucket=bucket_name)
# acled object write fail
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='barcontent')
# NOTE: The above put's causes the connection to go bad, therefore the client can't be used
# anymore. This can be solved either by:
# 1) putting an empty string ('') in the 'Body' field of those put_object calls
# 2) getting a new client hence the creation of alt_client{2,3} for the tests below
# TODO: Test it from another host and on AWS, Report this to Amazon, if findings are identical
alt_client2 = get_alt_client()
# default object write fail
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
# bucket write fail
alt_client3 = get_alt_client()
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: private/private with list-objects-v2')
@attr(assertion='public has no access to bucket or objects')
@attr('list-objects-v2')
def test_access_bucket_private_objectv2_private():
# all the test_access_* tests follow this template
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='private')
alt_client = get_alt_client()
# acled object read fail
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key1)
# default object read fail
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
# bucket read fail
check_access_denied(alt_client.list_objects_v2, Bucket=bucket_name)
# acled object write fail
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='barcontent')
# NOTE: The above put's causes the connection to go bad, therefore the client can't be used
# anymore. This can be solved either by:
# 1) putting an empty string ('') in the 'Body' field of those put_object calls
# 2) getting a new client hence the creation of alt_client{2,3} for the tests below
# TODO: Test it from another host and on AWS, Report this to Amazon, if findings are identical
alt_client2 = get_alt_client()
# default object write fail
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
# bucket write fail
alt_client3 = get_alt_client()
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: private/public-read')
@attr(assertion='public can only read readable object')
def test_access_bucket_private_object_publicread():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='public-read')
alt_client = get_alt_client()
response = alt_client.get_object(Bucket=bucket_name, Key=key1)
body = _get_body(response)
# a should be public-read, b gets default (private)
eq(body, 'foocontent')
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
alt_client2 = get_alt_client()
check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
alt_client3 = get_alt_client()
check_access_denied(alt_client3.list_objects, Bucket=bucket_name)
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: private/public-read with list-objects-v2')
@attr(assertion='public can only read readable object')
@attr('list-objects-v2')
def test_access_bucket_private_objectv2_publicread():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='public-read')
alt_client = get_alt_client()
response = alt_client.get_object(Bucket=bucket_name, Key=key1)
body = _get_body(response)
# a should be public-read, b gets default (private)
eq(body, 'foocontent')
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
alt_client2 = get_alt_client()
check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
alt_client3 = get_alt_client()
check_access_denied(alt_client3.list_objects_v2, Bucket=bucket_name)
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: private/public-read/write')
@attr(assertion='public can only read the readable object')
def test_access_bucket_private_object_publicreadwrite():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='public-read-write')
alt_client = get_alt_client()
response = alt_client.get_object(Bucket=bucket_name, Key=key1)
body = _get_body(response)
# a should be public-read-only ... because it is in a private bucket
# b gets default (private)
eq(body, 'foocontent')
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
alt_client2 = get_alt_client()
check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
alt_client3 = get_alt_client()
check_access_denied(alt_client3.list_objects, Bucket=bucket_name)
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: private/public-read/write with list-objects-v2')
@attr(assertion='public can only read the readable object')
@attr('list-objects-v2')
def test_access_bucket_private_objectv2_publicreadwrite():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='public-read-write')
alt_client = get_alt_client()
response = alt_client.get_object(Bucket=bucket_name, Key=key1)
body = _get_body(response)
# a should be public-read-only ... because it is in a private bucket
# b gets default (private)
eq(body, 'foocontent')
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
alt_client2 = get_alt_client()
check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
alt_client3 = get_alt_client()
check_access_denied(alt_client3.list_objects_v2, Bucket=bucket_name)
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: public-read/private')
@attr(assertion='public can only list the bucket')
def test_access_bucket_publicread_object_private():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read', object_acl='private')
alt_client = get_alt_client()
# a should be private, b gets default (private)
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key1)
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='barcontent')
alt_client2 = get_alt_client()
check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
alt_client3 = get_alt_client()
objs = get_objects_list(bucket=bucket_name, client=alt_client3)
eq(objs, ['bar', 'foo'])
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: public-read/public-read')
@attr(assertion='public can read readable objects and list bucket')
def test_access_bucket_publicread_object_publicread():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read', object_acl='public-read')
alt_client = get_alt_client()
response = alt_client.get_object(Bucket=bucket_name, Key=key1)
# a should be public-read, b gets default (private)
body = _get_body(response)
eq(body, 'foocontent')
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
alt_client2 = get_alt_client()
check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
alt_client3 = get_alt_client()
objs = get_objects_list(bucket=bucket_name, client=alt_client3)
eq(objs, ['bar', 'foo'])
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: public-read/public-read-write')
@attr(assertion='public can read readable objects and list bucket')
def test_access_bucket_publicread_object_publicreadwrite():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read', object_acl='public-read-write')
alt_client = get_alt_client()
response = alt_client.get_object(Bucket=bucket_name, Key=key1)
body = _get_body(response)
# a should be public-read-only ... because it is in a r/o bucket
# b gets default (private)
eq(body, 'foocontent')
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
alt_client2 = get_alt_client()
check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
alt_client3 = get_alt_client()
objs = get_objects_list(bucket=bucket_name, client=alt_client3)
eq(objs, ['bar', 'foo'])
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: public-read-write/private')
@attr(assertion='private objects cannot be read, but can be overwritten')
def test_access_bucket_publicreadwrite_object_private():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read-write', object_acl='private')
alt_client = get_alt_client()
# a should be private, b gets default (private)
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key1)
alt_client.put_object(Bucket=bucket_name, Key=key1, Body='barcontent')
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
alt_client.put_object(Bucket=bucket_name, Key=key2, Body='baroverwrite')
objs = get_objects_list(bucket=bucket_name, client=alt_client)
eq(objs, ['bar', 'foo'])
alt_client.put_object(Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: public-read-write/public-read')
@attr(assertion='private objects cannot be read, but can be overwritten')
def test_access_bucket_publicreadwrite_object_publicread():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read-write', object_acl='public-read')
alt_client = get_alt_client()
# a should be public-read, b gets default (private)
response = alt_client.get_object(Bucket=bucket_name, Key=key1)
body = _get_body(response)
eq(body, 'foocontent')
alt_client.put_object(Bucket=bucket_name, Key=key1, Body='barcontent')
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
alt_client.put_object(Bucket=bucket_name, Key=key2, Body='baroverwrite')
objs = get_objects_list(bucket=bucket_name, client=alt_client)
eq(objs, ['bar', 'foo'])
alt_client.put_object(Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: public-read-write/public-read-write')
@attr(assertion='private objects cannot be read, but can be overwritten')
def test_access_bucket_publicreadwrite_object_publicreadwrite():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read-write', object_acl='public-read-write')
alt_client = get_alt_client()
response = alt_client.get_object(Bucket=bucket_name, Key=key1)
body = _get_body(response)
# a should be public-read-write, b gets default (private)
eq(body, 'foocontent')
alt_client.put_object(Bucket=bucket_name, Key=key1, Body='foooverwrite')
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
alt_client.put_object(Bucket=bucket_name, Key=key2, Body='baroverwrite')
objs = get_objects_list(bucket=bucket_name, client=alt_client)
eq(objs, ['bar', 'foo'])
alt_client.put_object(Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all buckets')
@attr(assertion='returns all expected buckets')
def test_buckets_create_then_list():
client = get_client()
bucket_names = []
for i in range(5):
bucket_name = get_new_bucket_name()
bucket_names.append(bucket_name)
for name in bucket_names:
client.create_bucket(Bucket=name)
response = client.list_buckets()
bucket_dicts = response['Buckets']
buckets_list = []
buckets_list = get_buckets_list()
for name in bucket_names:
if name not in buckets_list:
raise RuntimeError("S3 implementation's GET on Service did not return bucket we created: %r", bucket.name)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all buckets (anonymous)')
@attr(assertion='succeeds')
@attr('fails_on_aws')
def test_list_buckets_anonymous():
# Get a connection with bad authorization, then change it to be our new Anonymous auth mechanism,
# emulating standard HTTP access.
#
# While it may have been possible to use httplib directly, doing it this way takes care of also
# allowing us to vary the calling format in testing.
unauthenticated_client = get_unauthenticated_client()
response = unauthenticated_client.list_buckets()
eq(len(response['Buckets']), 0)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all buckets (bad auth)')
@attr(assertion='fails 403')
def test_list_buckets_invalid_auth():
bad_auth_client = get_bad_auth_client()
e = assert_raises(ClientError, bad_auth_client.list_buckets)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'InvalidAccessKeyId')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all buckets (bad auth)')
@attr(assertion='fails 403')
def test_list_buckets_bad_auth():
main_access_key = get_main_aws_access_key()
bad_auth_client = get_bad_auth_client(aws_access_key_id=main_access_key)
e = assert_raises(ClientError, bad_auth_client.list_buckets)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'SignatureDoesNotMatch')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create bucket')
@attr(assertion='name starts with alphabetic works')
# this test goes outside the user-configure prefix because it needs to
# control the initial character of the bucket name
@nose.with_setup(
setup=lambda: nuke_prefixed_buckets(prefix='a'+get_prefix()),
teardown=lambda: nuke_prefixed_buckets(prefix='a'+get_prefix()),
)
def test_bucket_create_naming_good_starts_alpha():
check_good_bucket_name('foo', _prefix='a'+get_prefix())
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create bucket')
@attr(assertion='name starts with numeric works')
# this test goes outside the user-configure prefix because it needs to
# control the initial character of the bucket name
@nose.with_setup(
setup=lambda: nuke_prefixed_buckets(prefix='0'+get_prefix()),
teardown=lambda: nuke_prefixed_buckets(prefix='0'+get_prefix()),
)
def test_bucket_create_naming_good_starts_digit():
check_good_bucket_name('foo', _prefix='0'+get_prefix())
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create bucket')
@attr(assertion='name containing dot works')
def test_bucket_create_naming_good_contains_period():
check_good_bucket_name('aaa.111')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create bucket')
@attr(assertion='name containing hyphen works')
def test_bucket_create_naming_good_contains_hyphen():
check_good_bucket_name('aaa-111')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create bucket with objects and recreate it')
@attr(assertion='bucket recreation not overriding index')
def test_bucket_recreate_not_overriding():
key_names = ['mykey1', 'mykey2']
bucket_name = _create_objects(keys=key_names)
objs_list = get_objects_list(bucket_name)
eq(key_names, objs_list)
client = get_client()
client.create_bucket(Bucket=bucket_name)
objs_list = get_objects_list(bucket_name)
eq(key_names, objs_list)
@attr(resource='object')
@attr(method='put')
@attr(operation='create and list objects with special names')
@attr(assertion='special names work')
def test_bucket_create_special_key_names():
key_names = [
' ',
'"',
'$',
'%',
'&',
'\'',
'<',
'>',
'_',
'_ ',
'_ _',
'__',
]
bucket_name = _create_objects(keys=key_names)
objs_list = get_objects_list(bucket_name)
eq(key_names, objs_list)
client = get_client()
for name in key_names:
eq((name in objs_list), True)
response = client.get_object(Bucket=bucket_name, Key=name)
body = _get_body(response)
eq(name, body)
client.put_object_acl(Bucket=bucket_name, Key=name, ACL='private')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='create and list objects with underscore as prefix, list using prefix')
@attr(assertion='listing works correctly')
def test_bucket_list_special_prefix():
key_names = ['_bla/1', '_bla/2', '_bla/3', '_bla/4', 'abcd']
bucket_name = _create_objects(keys=key_names)
objs_list = get_objects_list(bucket_name)
eq(len(objs_list), 5)
objs_list = get_objects_list(bucket_name, prefix='_bla/')
eq(len(objs_list), 4)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy zero sized object in same bucket')
@attr(assertion='works')
def test_object_copy_zero_size():
key = 'foo123bar'
bucket_name = _create_objects(keys=[key])
fp_a = FakeWriteFile(0, '')
client = get_client()
client.put_object(Bucket=bucket_name, Key=key, Body=fp_a)
copy_source = {'Bucket': bucket_name, 'Key': key}
client.copy(copy_source, bucket_name, 'bar321foo')
response = client.get_object(Bucket=bucket_name, Key='bar321foo')
eq(response['ContentLength'], 0)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object in same bucket')
@attr(assertion='works')
def test_object_copy_same_bucket():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
client.copy(copy_source, bucket_name, 'bar321foo')
response = client.get_object(Bucket=bucket_name, Key='bar321foo')
body = _get_body(response)
eq('foo', body)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object with content-type')
@attr(assertion='works')
def test_object_copy_verify_contenttype():
bucket_name = get_new_bucket()
client = get_client()
content_type = 'text/bla'
client.put_object(Bucket=bucket_name, ContentType=content_type, Key='foo123bar', Body='foo')
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
client.copy(copy_source, bucket_name, 'bar321foo')
response = client.get_object(Bucket=bucket_name, Key='bar321foo')
body = _get_body(response)
eq('foo', body)
response_content_type = response['ContentType']
eq(response_content_type, content_type)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object to itself')
@attr(assertion='fails')
def test_object_copy_to_itself():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
e = assert_raises(ClientError, client.copy, copy_source, bucket_name, 'foo123bar')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidRequest')
@attr(resource='object')
@attr(method='put')
@attr(operation='modify object metadata by copying')
@attr(assertion='fails')
def test_object_copy_to_itself_with_metadata():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
metadata = {'foo': 'bar'}
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='foo123bar', Metadata=metadata, MetadataDirective='REPLACE')
response = client.get_object(Bucket=bucket_name, Key='foo123bar')
eq(response['Metadata'], metadata)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object from different bucket')
@attr(assertion='works')
def test_object_copy_diff_bucket():
bucket_name1 = get_new_bucket()
bucket_name2 = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name1, Key='foo123bar', Body='foo')
copy_source = {'Bucket': bucket_name1, 'Key': 'foo123bar'}
client.copy(copy_source, bucket_name2, 'bar321foo')
response = client.get_object(Bucket=bucket_name2, Key='bar321foo')
body = _get_body(response)
eq('foo', body)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy to an inaccessible bucket')
@attr(assertion='fails w/AttributeError')
def test_object_copy_not_owned_bucket():
client = get_client()
alt_client = get_alt_client()
bucket_name1 = get_new_bucket_name()
bucket_name2 = get_new_bucket_name()
client.create_bucket(Bucket=bucket_name1)
alt_client.create_bucket(Bucket=bucket_name2)
client.put_object(Bucket=bucket_name1, Key='foo123bar', Body='foo')
copy_source = {'Bucket': bucket_name1, 'Key': 'foo123bar'}
e = assert_raises(ClientError, alt_client.copy, copy_source, bucket_name2, 'bar321foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy a non-owned object in a non-owned bucket, but with perms')
@attr(assertion='works')
def test_object_copy_not_owned_object_bucket():
client = get_client()
alt_client = get_alt_client()
bucket_name = get_new_bucket_name()
client.create_bucket(Bucket=bucket_name)
client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
alt_user_id = get_alt_user_id()
grant = {'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'FULL_CONTROL'}
grants = add_obj_user_grant(bucket_name, 'foo123bar', grant)
client.put_object_acl(Bucket=bucket_name, Key='foo123bar', AccessControlPolicy=grants)
grant = add_bucket_user_grant(bucket_name, grant)
client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=grant)
alt_client.get_object(Bucket=bucket_name, Key='foo123bar')
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
alt_client.copy(copy_source, bucket_name, 'bar321foo')
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object and change acl')
@attr(assertion='works')
def test_object_copy_canned_acl():
bucket_name = get_new_bucket()
client = get_client()
alt_client = get_alt_client()
client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='bar321foo', ACL='public-read')
# check ACL is applied by doing GET from another user
alt_client.get_object(Bucket=bucket_name, Key='bar321foo')
metadata={'abc': 'def'}
copy_source = {'Bucket': bucket_name, 'Key': 'bar321foo'}
client.copy_object(ACL='public-read', Bucket=bucket_name, CopySource=copy_source, Key='foo123bar', Metadata=metadata, MetadataDirective='REPLACE')
# check ACL is applied by doing GET from another user
alt_client.get_object(Bucket=bucket_name, Key='foo123bar')
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object and retain metadata')
def test_object_copy_retaining_metadata():
for size in [3, 1024 * 1024]:
bucket_name = get_new_bucket()
client = get_client()
content_type = 'audio/ogg'
metadata = {'key1': 'value1', 'key2': 'value2'}
client.put_object(Bucket=bucket_name, Key='foo123bar', Metadata=metadata, ContentType=content_type, Body=bytearray(size))
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='bar321foo')
response = client.get_object(Bucket=bucket_name, Key='bar321foo')
eq(content_type, response['ContentType'])
eq(metadata, response['Metadata'])
body = _get_body(response)
eq(size, response['ContentLength'])
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object and replace metadata')
def test_object_copy_replacing_metadata():
for size in [3, 1024 * 1024]:
bucket_name = get_new_bucket()
client = get_client()
content_type = 'audio/ogg'
metadata = {'key1': 'value1', 'key2': 'value2'}
client.put_object(Bucket=bucket_name, Key='foo123bar', Metadata=metadata, ContentType=content_type, Body=bytearray(size))
metadata = {'key3': 'value3', 'key2': 'value2'}
content_type = 'audio/mpeg'
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='bar321foo', Metadata=metadata, MetadataDirective='REPLACE', ContentType=content_type)
response = client.get_object(Bucket=bucket_name, Key='bar321foo')
eq(content_type, response['ContentType'])
eq(metadata, response['Metadata'])
eq(size, response['ContentLength'])
@attr(resource='object')
@attr(method='put')
@attr(operation='copy from non-existent bucket')
def test_object_copy_bucket_not_found():
bucket_name = get_new_bucket()
client = get_client()
copy_source = {'Bucket': bucket_name + "-fake", 'Key': 'foo123bar'}
e = assert_raises(ClientError, client.copy, copy_source, bucket_name, 'bar321foo')
status = _get_status(e.response)
eq(status, 404)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy from non-existent object')
def test_object_copy_key_not_found():
bucket_name = get_new_bucket()
client = get_client()
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
e = assert_raises(ClientError, client.copy, copy_source, bucket_name, 'bar321foo')
status = _get_status(e.response)
eq(status, 404)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object to/from versioned bucket')
@attr(assertion='works')
@attr('versioning')
def test_object_copy_versioned_bucket():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
size = 1*5
data = bytearray(size)
data_str = data.decode()
key1 = 'foo123bar'
client.put_object(Bucket=bucket_name, Key=key1, Body=data)
response = client.get_object(Bucket=bucket_name, Key=key1)
version_id = response['VersionId']
# copy object in the same bucket
copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
key2 = 'bar321foo'
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key2)
response = client.get_object(Bucket=bucket_name, Key=key2)
body = _get_body(response)
eq(data_str, body)
eq(size, response['ContentLength'])
# second copy
version_id2 = response['VersionId']
copy_source = {'Bucket': bucket_name, 'Key': key2, 'VersionId': version_id2}
key3 = 'bar321foo2'
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key3)
response = client.get_object(Bucket=bucket_name, Key=key3)
body = _get_body(response)
eq(data_str, body)
eq(size, response['ContentLength'])
# copy to another versioned bucket
bucket_name2 = get_new_bucket()
check_configure_versioning_retry(bucket_name2, "Enabled", "Enabled")
copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
key4 = 'bar321foo3'
client.copy_object(Bucket=bucket_name2, CopySource=copy_source, Key=key4)
response = client.get_object(Bucket=bucket_name2, Key=key4)
body = _get_body(response)
eq(data_str, body)
eq(size, response['ContentLength'])
# copy to another non versioned bucket
bucket_name3 = get_new_bucket()
copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
key5 = 'bar321foo4'
client.copy_object(Bucket=bucket_name3, CopySource=copy_source, Key=key5)
response = client.get_object(Bucket=bucket_name3, Key=key5)
body = _get_body(response)
eq(data_str, body)
eq(size, response['ContentLength'])
# copy from a non versioned bucket
copy_source = {'Bucket': bucket_name3, 'Key': key5}
key6 = 'foo123bar2'
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key6)
response = client.get_object(Bucket=bucket_name, Key=key6)
body = _get_body(response)
eq(data_str, body)
eq(size, response['ContentLength'])
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object to/from versioned bucket with url-encoded name')
@attr(assertion='works')
@attr('versioning')
def test_object_copy_versioned_url_encoding():
bucket = get_new_bucket_resource()
check_configure_versioning_retry(bucket.name, "Enabled", "Enabled")
src_key = 'foo?bar'
src = bucket.put_object(Key=src_key)
src.load() # HEAD request tests that the key exists
# copy object in the same bucket
dst_key = 'bar&foo'
dst = bucket.Object(dst_key)
dst.copy_from(CopySource={'Bucket': src.bucket_name, 'Key': src.key, 'VersionId': src.version_id})
dst.load() # HEAD request tests that the key exists
def generate_random(size, part_size=5*1024*1024):
"""
Generate the specified number random data.
(actually each MB is a repetition of the first KB)
"""
chunk = 1024
allowed = string.ascii_letters
for x in range(0, size, part_size):
strpart = ''.join([allowed[random.randint(0, len(allowed) - 1)] for _ in range(chunk)])
s = ''
left = size - x
this_part_size = min(left, part_size)
for y in range(this_part_size // chunk):
s = s + strpart
if this_part_size > len(s):
s = s + strpart[0:this_part_size - len(s)]
yield s
if (x == size):
return
def _multipart_upload(bucket_name, key, size, part_size=5*1024*1024, client=None, content_type=None, metadata=None, resend_parts=[]):
"""
generate a multi-part upload for a random file of specifed size,
if requested, generate a list of the parts
return the upload descriptor
"""
if client == None:
client = get_client()
if content_type == None and metadata == None:
response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
else:
response = client.create_multipart_upload(Bucket=bucket_name, Key=key, Metadata=metadata, ContentType=content_type)
upload_id = response['UploadId']
s = ''
parts = []
for i, part in enumerate(generate_random(size, part_size)):
# part_num is necessary because PartNumber for upload_part and in parts must start at 1 and i starts at 0
part_num = i+1
s += part
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num, Body=part)
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': part_num})
if i in resend_parts:
client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num, Body=part)
return (upload_id, s, parts)
@attr(resource='object')
@attr(method='put')
@attr(operation='test copy object of a multipart upload')
@attr(assertion='successful')
@attr('versioning')
def test_object_copy_versioning_multipart_upload():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key1 = "srcmultipart"
key1_metadata = {'foo': 'bar'}
content_type = 'text/bla'
objlen = 30 * 1024 * 1024
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key1, size=objlen, content_type=content_type, metadata=key1_metadata)
client.complete_multipart_upload(Bucket=bucket_name, Key=key1, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=bucket_name, Key=key1)
key1_size = response['ContentLength']
version_id = response['VersionId']
# copy object in the same bucket
copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
key2 = 'dstmultipart'
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key2)
response = client.get_object(Bucket=bucket_name, Key=key2)
version_id2 = response['VersionId']
body = _get_body(response)
eq(data, body)
eq(key1_size, response['ContentLength'])
eq(key1_metadata, response['Metadata'])
eq(content_type, response['ContentType'])
# second copy
copy_source = {'Bucket': bucket_name, 'Key': key2, 'VersionId': version_id2}
key3 = 'dstmultipart2'
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key3)
response = client.get_object(Bucket=bucket_name, Key=key3)
body = _get_body(response)
eq(data, body)
eq(key1_size, response['ContentLength'])
eq(key1_metadata, response['Metadata'])
eq(content_type, response['ContentType'])
# copy to another versioned bucket
bucket_name2 = get_new_bucket()
check_configure_versioning_retry(bucket_name2, "Enabled", "Enabled")
copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
key4 = 'dstmultipart3'
client.copy_object(Bucket=bucket_name2, CopySource=copy_source, Key=key4)
response = client.get_object(Bucket=bucket_name2, Key=key4)
body = _get_body(response)
eq(data, body)
eq(key1_size, response['ContentLength'])
eq(key1_metadata, response['Metadata'])
eq(content_type, response['ContentType'])
# copy to another non versioned bucket
bucket_name3 = get_new_bucket()
copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
key5 = 'dstmultipart4'
client.copy_object(Bucket=bucket_name3, CopySource=copy_source, Key=key5)
response = client.get_object(Bucket=bucket_name3, Key=key5)
body = _get_body(response)
eq(data, body)
eq(key1_size, response['ContentLength'])
eq(key1_metadata, response['Metadata'])
eq(content_type, response['ContentType'])
# copy from a non versioned bucket
copy_source = {'Bucket': bucket_name3, 'Key': key5}
key6 = 'dstmultipart5'
client.copy_object(Bucket=bucket_name3, CopySource=copy_source, Key=key6)
response = client.get_object(Bucket=bucket_name3, Key=key6)
body = _get_body(response)
eq(data, body)
eq(key1_size, response['ContentLength'])
eq(key1_metadata, response['Metadata'])
eq(content_type, response['ContentType'])
@attr(resource='object')
@attr(method='put')
@attr(operation='check multipart upload without parts')
def test_multipart_upload_empty():
bucket_name = get_new_bucket()
client = get_client()
key1 = "mymultipart"
objlen = 0
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key1, size=objlen)
e = assert_raises(ClientError, client.complete_multipart_upload,Bucket=bucket_name, Key=key1, UploadId=upload_id)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
@attr(resource='object')
@attr(method='put')
@attr(operation='check multipart uploads with single small part')
def test_multipart_upload_small():
bucket_name = get_new_bucket()
client = get_client()
key1 = "mymultipart"
objlen = 1
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key1, size=objlen)
response = client.complete_multipart_upload(Bucket=bucket_name, Key=key1, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=bucket_name, Key=key1)
eq(response['ContentLength'], objlen)
def _create_key_with_random_content(keyname, size=7*1024*1024, bucket_name=None, client=None):
if bucket_name is None:
bucket_name = get_new_bucket()
if client == None:
client = get_client()
data_str = str(next(generate_random(size, size)))
data = bytes(data_str, 'utf-8')
client.put_object(Bucket=bucket_name, Key=keyname, Body=data)
return bucket_name
def _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size, client=None, part_size=5*1024*1024, version_id=None):
if(client == None):
client = get_client()
response = client.create_multipart_upload(Bucket=dest_bucket_name, Key=dest_key)
upload_id = response['UploadId']
if(version_id == None):
copy_source = {'Bucket': src_bucket_name, 'Key': src_key}
else:
copy_source = {'Bucket': src_bucket_name, 'Key': src_key, 'VersionId': version_id}
parts = []
i = 0
for start_offset in range(0, size, part_size):
end_offset = min(start_offset + part_size - 1, size - 1)
part_num = i+1
copy_source_range = 'bytes={start}-{end}'.format(start=start_offset, end=end_offset)
response = client.upload_part_copy(Bucket=dest_bucket_name, Key=dest_key, CopySource=copy_source, PartNumber=part_num, UploadId=upload_id, CopySourceRange=copy_source_range)
parts.append({'ETag': response['CopyPartResult']['ETag'], 'PartNumber': part_num})
i = i+1
return (upload_id, parts)
def _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name, version_id=None):
client = get_client()
if(version_id == None):
response = client.get_object(Bucket=src_bucket_name, Key=src_key)
else:
response = client.get_object(Bucket=src_bucket_name, Key=src_key, VersionId=version_id)
src_size = response['ContentLength']
response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
dest_size = response['ContentLength']
dest_data = _get_body(response)
assert(src_size >= dest_size)
r = 'bytes={s}-{e}'.format(s=0, e=dest_size-1)
if(version_id == None):
response = client.get_object(Bucket=src_bucket_name, Key=src_key, Range=r)
else:
response = client.get_object(Bucket=src_bucket_name, Key=src_key, Range=r, VersionId=version_id)
src_data = _get_body(response)
eq(src_data, dest_data)
@attr(resource='object')
@attr(method='put')
@attr(operation='check multipart copies with single small part')
def test_multipart_copy_small():
src_key = 'foo'
src_bucket_name = _create_key_with_random_content(src_key)
dest_bucket_name = get_new_bucket()
dest_key = "mymultipart"
size = 1
client = get_client()
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
eq(size, response['ContentLength'])
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
@attr(resource='object')
@attr(method='put')
@attr(operation='check multipart copies with an invalid range')
def test_multipart_copy_invalid_range():
client = get_client()
src_key = 'source'
src_bucket_name = _create_key_with_random_content(src_key, size=5)
response = client.create_multipart_upload(Bucket=src_bucket_name, Key='dest')
upload_id = response['UploadId']
copy_source = {'Bucket': src_bucket_name, 'Key': src_key}
copy_source_range = 'bytes={start}-{end}'.format(start=0, end=21)
e = assert_raises(ClientError, client.upload_part_copy,Bucket=src_bucket_name, Key='dest', UploadId=upload_id, CopySource=copy_source, CopySourceRange=copy_source_range, PartNumber=1)
status, error_code = _get_status_and_error_code(e.response)
valid_status = [400, 416]
if not status in valid_status:
raise AssertionError("Invalid response " + str(status))
eq(error_code, 'InvalidRange')
@attr(resource='object')
@attr(method='put')
@attr(operation='check multipart copy with an improperly formatted range')
# TODO: remove fails_on_rgw when https://tracker.ceph.com/issues/40795 is resolved
@attr('fails_on_rgw')
def test_multipart_copy_improper_range():
client = get_client()
src_key = 'source'
src_bucket_name = _create_key_with_random_content(src_key, size=5)
response = client.create_multipart_upload(
Bucket=src_bucket_name, Key='dest')
upload_id = response['UploadId']
copy_source = {'Bucket': src_bucket_name, 'Key': src_key}
test_ranges = ['{start}-{end}'.format(start=0, end=2),
'bytes={start}'.format(start=0),
'bytes=hello-world',
'bytes=0-bar',
'bytes=hello-',
'bytes=0-2,3-5']
for test_range in test_ranges:
e = assert_raises(ClientError, client.upload_part_copy,
Bucket=src_bucket_name, Key='dest',
UploadId=upload_id,
CopySource=copy_source,
CopySourceRange=test_range,
PartNumber=1)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidArgument')
@attr(resource='object')
@attr(method='put')
@attr(operation='check multipart copies without x-amz-copy-source-range')
def test_multipart_copy_without_range():
client = get_client()
src_key = 'source'
src_bucket_name = _create_key_with_random_content(src_key, size=10)
dest_bucket_name = get_new_bucket_name()
get_new_bucket(name=dest_bucket_name)
dest_key = "mymultipartcopy"
response = client.create_multipart_upload(Bucket=dest_bucket_name, Key=dest_key)
upload_id = response['UploadId']
parts = []
copy_source = {'Bucket': src_bucket_name, 'Key': src_key}
part_num = 1
copy_source_range = 'bytes={start}-{end}'.format(start=0, end=9)
response = client.upload_part_copy(Bucket=dest_bucket_name, Key=dest_key, CopySource=copy_source, PartNumber=part_num, UploadId=upload_id)
parts.append({'ETag': response['CopyPartResult']['ETag'], 'PartNumber': part_num})
client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
eq(response['ContentLength'], 10)
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
@attr(resource='object')
@attr(method='put')
@attr(operation='check multipart copies with single small part')
def test_multipart_copy_special_names():
src_bucket_name = get_new_bucket()
dest_bucket_name = get_new_bucket()
dest_key = "mymultipart"
size = 1
client = get_client()
for src_key in (' ', '_', '__', '?versionId'):
_create_key_with_random_content(src_key, bucket_name=src_bucket_name)
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
response = client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
eq(size, response['ContentLength'])
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
def _check_content_using_range(key, bucket_name, data, step):
client = get_client()
response = client.get_object(Bucket=bucket_name, Key=key)
size = response['ContentLength']
for ofs in range(0, size, step):
toread = size - ofs
if toread > step:
toread = step
end = ofs + toread - 1
r = 'bytes={s}-{e}'.format(s=ofs, e=end)
response = client.get_object(Bucket=bucket_name, Key=key, Range=r)
eq(response['ContentLength'], toread)
body = _get_body(response)
eq(body, data[ofs:end+1])
@attr(resource='object')
@attr(method='put')
@attr(operation='complete multi-part upload')
@attr(assertion='successful')
@attr('fails_on_aws')
def test_multipart_upload():
bucket_name = get_new_bucket()
key="mymultipart"
content_type='text/bla'
objlen = 30 * 1024 * 1024
metadata = {'foo': 'bar'}
client = get_client()
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, content_type=content_type, metadata=metadata)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.head_bucket(Bucket=bucket_name)
rgw_bytes_used = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-bytes-used', objlen))
eq(rgw_bytes_used, objlen)
rgw_object_count = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-object-count', 1))
eq(rgw_object_count, 1)
response = client.get_object(Bucket=bucket_name, Key=key)
eq(response['ContentType'], content_type)
eq(response['Metadata'], metadata)
body = _get_body(response)
eq(len(body), response['ContentLength'])
eq(body, data)
_check_content_using_range(key, bucket_name, data, 1000000)
_check_content_using_range(key, bucket_name, data, 10000000)
def check_versioning(bucket_name, status):
client = get_client()
try:
response = client.get_bucket_versioning(Bucket=bucket_name)
eq(response['Status'], status)
except KeyError:
eq(status, None)
# amazon is eventual consistent, retry a bit if failed
def check_configure_versioning_retry(bucket_name, status, expected_string):
client = get_client()
client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': status})
read_status = None
for i in range(5):
try:
response = client.get_bucket_versioning(Bucket=bucket_name)
read_status = response['Status']
except KeyError:
read_status = None
if (expected_string == read_status):
break
time.sleep(1)
eq(expected_string, read_status)
@attr(resource='object')
@attr(method='put')
@attr(operation='check multipart copies of versioned objects')
@attr('versioning')
def test_multipart_copy_versioned():
src_bucket_name = get_new_bucket()
dest_bucket_name = get_new_bucket()
dest_key = "mymultipart"
check_versioning(src_bucket_name, None)
src_key = 'foo'
check_configure_versioning_retry(src_bucket_name, "Enabled", "Enabled")
size = 15 * 1024 * 1024
_create_key_with_random_content(src_key, size=size, bucket_name=src_bucket_name)
_create_key_with_random_content(src_key, size=size, bucket_name=src_bucket_name)
_create_key_with_random_content(src_key, size=size, bucket_name=src_bucket_name)
version_id = []
client = get_client()
response = client.list_object_versions(Bucket=src_bucket_name)
for ver in response['Versions']:
version_id.append(ver['VersionId'])
for vid in version_id:
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size, version_id=vid)
response = client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
eq(size, response['ContentLength'])
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name, version_id=vid)
def _check_upload_multipart_resend(bucket_name, key, objlen, resend_parts):
content_type = 'text/bla'
metadata = {'foo': 'bar'}
client = get_client()
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, content_type=content_type, metadata=metadata, resend_parts=resend_parts)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=bucket_name, Key=key)
eq(response['ContentType'], content_type)
eq(response['Metadata'], metadata)
body = _get_body(response)
eq(len(body), response['ContentLength'])
eq(body, data)
_check_content_using_range(key, bucket_name, data, 1000000)
_check_content_using_range(key, bucket_name, data, 10000000)
@attr(resource='object')
@attr(method='put')
@attr(operation='complete multiple multi-part upload with different sizes')
@attr(resource='object')
@attr(method='put')
@attr(operation='complete multi-part upload')
@attr(assertion='successful')
def test_multipart_upload_resend_part():
bucket_name = get_new_bucket()
key="mymultipart"
objlen = 30 * 1024 * 1024
_check_upload_multipart_resend(bucket_name, key, objlen, [0])
_check_upload_multipart_resend(bucket_name, key, objlen, [1])
_check_upload_multipart_resend(bucket_name, key, objlen, [2])
_check_upload_multipart_resend(bucket_name, key, objlen, [1,2])
_check_upload_multipart_resend(bucket_name, key, objlen, [0,1,2,3,4,5])
@attr(assertion='successful')
def test_multipart_upload_multiple_sizes():
bucket_name = get_new_bucket()
key="mymultipart"
client = get_client()
objlen = 5*1024*1024
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
objlen = 5*1024*1024+100*1024
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
objlen = 5*1024*1024+600*1024
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
objlen = 10*1024*1024+100*1024
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
objlen = 10*1024*1024+600*1024
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
objlen = 10*1024*1024
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
@attr(assertion='successful')
def test_multipart_copy_multiple_sizes():
src_key = 'foo'
src_bucket_name = _create_key_with_random_content(src_key, 12*1024*1024)
dest_bucket_name = get_new_bucket()
dest_key="mymultipart"
client = get_client()
size = 5*1024*1024
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
size = 5*1024*1024+100*1024
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
size = 5*1024*1024+600*1024
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
size = 10*1024*1024+100*1024
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
size = 10*1024*1024+600*1024
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
size = 10*1024*1024
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
@attr(resource='object')
@attr(method='put')
@attr(operation='check failure on multiple multi-part upload with size too small')
@attr(assertion='fails 400')
def test_multipart_upload_size_too_small():
bucket_name = get_new_bucket()
key="mymultipart"
client = get_client()
size = 100*1024
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=size, part_size=10*1024)
e = assert_raises(ClientError, client.complete_multipart_upload, Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'EntityTooSmall')
def gen_rand_string(size, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def _do_test_multipart_upload_contents(bucket_name, key, num_parts):
payload=gen_rand_string(5)*1024*1024
client = get_client()
response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
upload_id = response['UploadId']
parts = []
for part_num in range(0, num_parts):
part = bytes(payload, 'utf-8')
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num+1, Body=part)
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': part_num+1})
last_payload = '123'*1024*1024
last_part = bytes(last_payload, 'utf-8')
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=num_parts+1, Body=last_part)
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': num_parts+1})
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=bucket_name, Key=key)
test_string = _get_body(response)
all_payload = payload*num_parts + last_payload
assert test_string == all_payload
return all_payload
@attr(resource='object')
@attr(method='put')
@attr(operation='check contents of multi-part upload')
@attr(assertion='successful')
def test_multipart_upload_contents():
bucket_name = get_new_bucket()
_do_test_multipart_upload_contents(bucket_name, 'mymultipart', 3)
@attr(resource='object')
@attr(method='put')
@attr(operation=' multi-part upload overwrites existing key')
@attr(assertion='successful')
def test_multipart_upload_overwrite_existing_object():
bucket_name = get_new_bucket()
client = get_client()
key = 'mymultipart'
payload='12345'*1024*1024
num_parts=2
client.put_object(Bucket=bucket_name, Key=key, Body=payload)
response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
upload_id = response['UploadId']
parts = []
for part_num in range(0, num_parts):
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num+1, Body=payload)
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': part_num+1})
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=bucket_name, Key=key)
test_string = _get_body(response)
assert test_string == payload*num_parts
@attr(resource='object')
@attr(method='put')
@attr(operation='abort multi-part upload')
@attr(assertion='successful')
def test_abort_multipart_upload():
bucket_name = get_new_bucket()
key="mymultipart"
objlen = 10 * 1024 * 1024
client = get_client()
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
client.abort_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id)
response = client.head_bucket(Bucket=bucket_name)
rgw_bytes_used = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-bytes-used', 0))
eq(rgw_bytes_used, 0)
rgw_object_count = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-object-count', 0))
eq(rgw_object_count, 0)
@attr(resource='object')
@attr(method='put')
@attr(operation='abort non-existent multi-part upload')
@attr(assertion='fails 404')
def test_abort_multipart_upload_not_found():
bucket_name = get_new_bucket()
client = get_client()
key="mymultipart"
client.put_object(Bucket=bucket_name, Key=key)
e = assert_raises(ClientError, client.abort_multipart_upload, Bucket=bucket_name, Key=key, UploadId='56788')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchUpload')
@attr(resource='object')
@attr(method='put')
@attr(operation='concurrent multi-part uploads')
@attr(assertion='successful')
def test_list_multipart_upload():
bucket_name = get_new_bucket()
client = get_client()
key="mymultipart"
mb = 1024 * 1024
upload_ids = []
(upload_id1, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=5*mb)
upload_ids.append(upload_id1)
(upload_id2, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=6*mb)
upload_ids.append(upload_id2)
key2="mymultipart2"
(upload_id3, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key2, size=5*mb)
upload_ids.append(upload_id3)
response = client.list_multipart_uploads(Bucket=bucket_name)
uploads = response['Uploads']
resp_uploadids = []
for i in range(0, len(uploads)):
resp_uploadids.append(uploads[i]['UploadId'])
for i in range(0, len(upload_ids)):
eq(True, (upload_ids[i] in resp_uploadids))
client.abort_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id1)
client.abort_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id2)
client.abort_multipart_upload(Bucket=bucket_name, Key=key2, UploadId=upload_id3)
@attr(resource='object')
@attr(method='put')
@attr(operation='multi-part upload with missing part')
def test_multipart_upload_missing_part():
bucket_name = get_new_bucket()
client = get_client()
key="mymultipart"
size = 1
response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
upload_id = response['UploadId']
parts = []
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=1, Body=bytes('\x00', 'utf-8'))
# 'PartNumber should be 1'
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': 9999})
e = assert_raises(ClientError, client.complete_multipart_upload, Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidPart')
@attr(resource='object')
@attr(method='put')
@attr(operation='multi-part upload with incorrect ETag')
def test_multipart_upload_incorrect_etag():
bucket_name = get_new_bucket()
client = get_client()
key="mymultipart"
size = 1
response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
upload_id = response['UploadId']
parts = []
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=1, Body=bytes('\x00', 'utf-8'))
# 'ETag' should be "93b885adfe0da089cdf634904fd59f71"
parts.append({'ETag': "ffffffffffffffffffffffffffffffff", 'PartNumber': 1})
e = assert_raises(ClientError, client.complete_multipart_upload, Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidPart')
def _simple_http_req_100_cont(host, port, is_secure, method, resource):
"""
Send the specified request w/expect 100-continue
and await confirmation.
"""
req_str = '{method} {resource} HTTP/1.1\r\nHost: {host}\r\nAccept-Encoding: identity\r\nContent-Length: 123\r\nExpect: 100-continue\r\n\r\n'.format(
method=method,
resource=resource,
host=host,
)
req = bytes(req_str, 'utf-8')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if is_secure:
s = ssl.wrap_socket(s);
s.settimeout(5)
s.connect((host, port))
s.send(req)
try:
data = s.recv(1024)
except socket.error as msg:
print('got response: ', msg)
print('most likely server doesn\'t support 100-continue')
s.close()
data_str = data.decode()
l = data_str.split(' ')
assert l[0].startswith('HTTP')
return l[1]
@attr(resource='object')
@attr(method='put')
@attr(operation='w/expect continue')
@attr(assertion='succeeds if object is public-read-write')
@attr('100_continue')
@attr('fails_on_mod_proxy_fcgi')
def test_100_continue():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
objname='testobj'
resource = '/{bucket}/{obj}'.format(bucket=bucket_name, obj=objname)
host = get_config_host()
port = get_config_port()
is_secure = get_config_is_secure()
#NOTES: this test needs to be tested when is_secure is True
status = _simple_http_req_100_cont(host, port, is_secure, 'PUT', resource)
eq(status, '403')
client.put_bucket_acl(Bucket=bucket_name, ACL='public-read-write')
status = _simple_http_req_100_cont(host, port, is_secure, 'PUT', resource)
eq(status, '100')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set cors')
@attr(assertion='succeeds')
@attr('cors')
def test_set_cors():
bucket_name = get_new_bucket()
client = get_client()
allowed_methods = ['GET', 'PUT']
allowed_origins = ['*.get', '*.put']
cors_config ={
'CORSRules': [
{'AllowedMethods': allowed_methods,
'AllowedOrigins': allowed_origins,
},
]
}
e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
status = _get_status(e.response)
eq(status, 404)
client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_config)
response = client.get_bucket_cors(Bucket=bucket_name)
eq(response['CORSRules'][0]['AllowedMethods'], allowed_methods)
eq(response['CORSRules'][0]['AllowedOrigins'], allowed_origins)
client.delete_bucket_cors(Bucket=bucket_name)
e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
status = _get_status(e.response)
eq(status, 404)
def _cors_request_and_check(func, url, headers, expect_status, expect_allow_origin, expect_allow_methods):
r = func(url, headers=headers)
eq(r.status_code, expect_status)
assert r.headers.get('access-control-allow-origin', None) == expect_allow_origin
assert r.headers.get('access-control-allow-methods', None) == expect_allow_methods
@attr(resource='bucket')
@attr(method='get')
@attr(operation='check cors response when origin header set')
@attr(assertion='returning cors header')
@attr('cors')
def test_cors_origin_response():
bucket_name = _setup_bucket_acl(bucket_acl='public-read')
client = get_client()
cors_config ={
'CORSRules': [
{'AllowedMethods': ['GET'],
'AllowedOrigins': ['*suffix'],
},
{'AllowedMethods': ['GET'],
'AllowedOrigins': ['start*end'],
},
{'AllowedMethods': ['GET'],
'AllowedOrigins': ['prefix*'],
},
{'AllowedMethods': ['PUT'],
'AllowedOrigins': ['*.put'],
}
]
}
e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
status = _get_status(e.response)
eq(status, 404)
client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_config)
time.sleep(3)
url = _get_post_url(bucket_name)
_cors_request_and_check(requests.get, url, None, 200, None, None)
_cors_request_and_check(requests.get, url, {'Origin': 'foo.suffix'}, 200, 'foo.suffix', 'GET')
_cors_request_and_check(requests.get, url, {'Origin': 'foo.bar'}, 200, None, None)
_cors_request_and_check(requests.get, url, {'Origin': 'foo.suffix.get'}, 200, None, None)
_cors_request_and_check(requests.get, url, {'Origin': 'startend'}, 200, 'startend', 'GET')
_cors_request_and_check(requests.get, url, {'Origin': 'start1end'}, 200, 'start1end', 'GET')
_cors_request_and_check(requests.get, url, {'Origin': 'start12end'}, 200, 'start12end', 'GET')
_cors_request_and_check(requests.get, url, {'Origin': '0start12end'}, 200, None, None)
_cors_request_and_check(requests.get, url, {'Origin': 'prefix'}, 200, 'prefix', 'GET')
_cors_request_and_check(requests.get, url, {'Origin': 'prefix.suffix'}, 200, 'prefix.suffix', 'GET')
_cors_request_and_check(requests.get, url, {'Origin': 'bla.prefix'}, 200, None, None)
obj_url = '{u}/{o}'.format(u=url, o='bar')
_cors_request_and_check(requests.get, obj_url, {'Origin': 'foo.suffix'}, 404, 'foo.suffix', 'GET')
_cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.suffix', 'Access-Control-Request-Method': 'GET',
'content-length': '0'}, 403, 'foo.suffix', 'GET')
_cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.suffix', 'Access-Control-Request-Method': 'PUT',
'content-length': '0'}, 403, None, None)
_cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.suffix', 'Access-Control-Request-Method': 'DELETE',
'content-length': '0'}, 403, None, None)
_cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.suffix', 'content-length': '0'}, 403, None, None)
_cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.put', 'content-length': '0'}, 403, 'foo.put', 'PUT')
_cors_request_and_check(requests.get, obj_url, {'Origin': 'foo.suffix'}, 404, 'foo.suffix', 'GET')
_cors_request_and_check(requests.options, url, None, 400, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'foo.suffix'}, 400, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'bla'}, 400, None, None)
_cors_request_and_check(requests.options, obj_url, {'Origin': 'foo.suffix', 'Access-Control-Request-Method': 'GET',
'content-length': '0'}, 200, 'foo.suffix', 'GET')
_cors_request_and_check(requests.options, url, {'Origin': 'foo.bar', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'foo.suffix.get', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'startend', 'Access-Control-Request-Method': 'GET'}, 200, 'startend', 'GET')
_cors_request_and_check(requests.options, url, {'Origin': 'start1end', 'Access-Control-Request-Method': 'GET'}, 200, 'start1end', 'GET')
_cors_request_and_check(requests.options, url, {'Origin': 'start12end', 'Access-Control-Request-Method': 'GET'}, 200, 'start12end', 'GET')
_cors_request_and_check(requests.options, url, {'Origin': '0start12end', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'prefix', 'Access-Control-Request-Method': 'GET'}, 200, 'prefix', 'GET')
_cors_request_and_check(requests.options, url, {'Origin': 'prefix.suffix', 'Access-Control-Request-Method': 'GET'}, 200, 'prefix.suffix', 'GET')
_cors_request_and_check(requests.options, url, {'Origin': 'bla.prefix', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'foo.put', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'foo.put', 'Access-Control-Request-Method': 'PUT'}, 200, 'foo.put', 'PUT')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='check cors response when origin is set to wildcard')
@attr(assertion='returning cors header')
@attr('cors')
def test_cors_origin_wildcard():
bucket_name = _setup_bucket_acl(bucket_acl='public-read')
client = get_client()
cors_config ={
'CORSRules': [
{'AllowedMethods': ['GET'],
'AllowedOrigins': ['*'],
},
]
}
e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
status = _get_status(e.response)
eq(status, 404)
client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_config)
time.sleep(3)
url = _get_post_url(bucket_name)
_cors_request_and_check(requests.get, url, None, 200, None, None)
_cors_request_and_check(requests.get, url, {'Origin': 'example.origin'}, 200, '*', 'GET')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='check cors response when Access-Control-Request-Headers is set in option request')
@attr(assertion='returning cors header')
@attr('cors')
def test_cors_header_option():
bucket_name = _setup_bucket_acl(bucket_acl='public-read')
client = get_client()
cors_config ={
'CORSRules': [
{'AllowedMethods': ['GET'],
'AllowedOrigins': ['*'],
'ExposeHeaders': ['x-amz-meta-header1'],
},
]
}
e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
status = _get_status(e.response)
eq(status, 404)
client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_config)
time.sleep(3)
url = _get_post_url(bucket_name)
obj_url = '{u}/{o}'.format(u=url, o='bar')
_cors_request_and_check(requests.options, obj_url, {'Origin': 'example.origin','Access-Control-Request-Headers':'x-amz-meta-header2','Access-Control-Request-Method':'GET'}, 403, None, None)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='put tags')
@attr(assertion='succeeds')
@attr('tagging')
def test_set_bucket_tagging():
bucket_name = get_new_bucket()
client = get_client()
tags={
'TagSet': [
{
'Key': 'Hello',
'Value': 'World'
},
]
}
e = assert_raises(ClientError, client.get_bucket_tagging, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchTagSetError')
client.put_bucket_tagging(Bucket=bucket_name, Tagging=tags)
response = client.get_bucket_tagging(Bucket=bucket_name)
eq(len(response['TagSet']), 1)
eq(response['TagSet'][0]['Key'], 'Hello')
eq(response['TagSet'][0]['Value'], 'World')
client.delete_bucket_tagging(Bucket=bucket_name)
e = assert_raises(ClientError, client.get_bucket_tagging, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchTagSetError')
class FakeFile(object):
"""
file that simulates seek, tell, and current character
"""
def __init__(self, char='A', interrupt=None):
self.offset = 0
self.char = bytes(char, 'utf-8')
self.interrupt = interrupt
def seek(self, offset, whence=os.SEEK_SET):
if whence == os.SEEK_SET:
self.offset = offset
elif whence == os.SEEK_END:
self.offset = self.size + offset;
elif whence == os.SEEK_CUR:
self.offset += offset
def tell(self):
return self.offset
class FakeWriteFile(FakeFile):
"""
file that simulates interruptable reads of constant data
"""
def __init__(self, size, char='A', interrupt=None):
FakeFile.__init__(self, char, interrupt)
self.size = size
def read(self, size=-1):
if size < 0:
size = self.size - self.offset
count = min(size, self.size - self.offset)
self.offset += count
# Sneaky! do stuff before we return (the last time)
if self.interrupt != None and self.offset == self.size and count > 0:
self.interrupt()
return self.char*count
class FakeReadFile(FakeFile):
"""
file that simulates writes, interrupting after the second
"""
def __init__(self, size, char='A', interrupt=None):
FakeFile.__init__(self, char, interrupt)
self.interrupted = False
self.size = 0
self.expected_size = size
def write(self, chars):
eq(chars, self.char*len(chars))
self.offset += len(chars)
self.size += len(chars)
# Sneaky! do stuff on the second seek
if not self.interrupted and self.interrupt != None \
and self.offset > 0:
self.interrupt()
self.interrupted = True
def close(self):
eq(self.size, self.expected_size)
class FakeFileVerifier(object):
"""
file that verifies expected data has been written
"""
def __init__(self, char=None):
self.char = char
self.size = 0
def write(self, data):
size = len(data)
if self.char == None:
self.char = data[0]
self.size += size
eq(data.decode(), self.char*size)
def _verify_atomic_key_data(bucket_name, key, size=-1, char=None):
"""
Make sure file is of the expected size and (simulated) content
"""
fp_verify = FakeFileVerifier(char)
client = get_client()
client.download_fileobj(bucket_name, key, fp_verify)
if size >= 0:
eq(fp_verify.size, size)
def _test_atomic_read(file_size):
"""
Create a file of A's, use it to set_contents_from_file.
Create a file of B's, use it to re-set_contents_from_file.
Re-read the contents, and confirm we get B's
"""
bucket_name = get_new_bucket()
client = get_client()
fp_a = FakeWriteFile(file_size, 'A')
client.put_object(Bucket=bucket_name, Key='testobj', Body=fp_a)
fp_b = FakeWriteFile(file_size, 'B')
fp_a2 = FakeReadFile(file_size, 'A',
lambda: client.put_object(Bucket=bucket_name, Key='testobj', Body=fp_b)
)
read_client = get_client()
read_client.download_fileobj(bucket_name, 'testobj', fp_a2)
fp_a2.close()
_verify_atomic_key_data(bucket_name, 'testobj', file_size, 'B')
@attr(resource='object')
@attr(method='put')
@attr(operation='read atomicity')
@attr(assertion='1MB successful')
def test_atomic_read_1mb():
_test_atomic_read(1024*1024)
@attr(resource='object')
@attr(method='put')
@attr(operation='read atomicity')
@attr(assertion='4MB successful')
def test_atomic_read_4mb():
_test_atomic_read(1024*1024*4)
@attr(resource='object')
@attr(method='put')
@attr(operation='read atomicity')
@attr(assertion='8MB successful')
def test_atomic_read_8mb():
_test_atomic_read(1024*1024*8)
def _test_atomic_write(file_size):
"""
Create a file of A's, use it to set_contents_from_file.
Verify the contents are all A's.
Create a file of B's, use it to re-set_contents_from_file.
Before re-set continues, verify content's still A's
Re-read the contents, and confirm we get B's
"""
bucket_name = get_new_bucket()
client = get_client()
objname = 'testobj'
# create <file_size> file of A's
fp_a = FakeWriteFile(file_size, 'A')
client.put_object(Bucket=bucket_name, Key=objname, Body=fp_a)
# verify A's
_verify_atomic_key_data(bucket_name, objname, file_size, 'A')
# create <file_size> file of B's
# but try to verify the file before we finish writing all the B's
fp_b = FakeWriteFile(file_size, 'B',
lambda: _verify_atomic_key_data(bucket_name, objname, file_size, 'A')
)
client.put_object(Bucket=bucket_name, Key=objname, Body=fp_b)
# verify B's
_verify_atomic_key_data(bucket_name, objname, file_size, 'B')
@attr(resource='object')
@attr(method='put')
@attr(operation='write atomicity')
@attr(assertion='1MB successful')
def test_atomic_write_1mb():
_test_atomic_write(1024*1024)
@attr(resource='object')
@attr(method='put')
@attr(operation='write atomicity')
@attr(assertion='4MB successful')
def test_atomic_write_4mb():
_test_atomic_write(1024*1024*4)
@attr(resource='object')
@attr(method='put')
@attr(operation='write atomicity')
@attr(assertion='8MB successful')
def test_atomic_write_8mb():
_test_atomic_write(1024*1024*8)
def _test_atomic_dual_write(file_size):
"""
create an object, two sessions writing different contents
confirm that it is all one or the other
"""
bucket_name = get_new_bucket()
objname = 'testobj'
client = get_client()
client.put_object(Bucket=bucket_name, Key=objname)
# write <file_size> file of B's
# but before we're done, try to write all A's
fp_a = FakeWriteFile(file_size, 'A')
def rewind_put_fp_a():
fp_a.seek(0)
client.put_object(Bucket=bucket_name, Key=objname, Body=fp_a)
fp_b = FakeWriteFile(file_size, 'B', rewind_put_fp_a)
client.put_object(Bucket=bucket_name, Key=objname, Body=fp_b)
# verify the file
_verify_atomic_key_data(bucket_name, objname, file_size, 'B')
@attr(resource='object')
@attr(method='put')
@attr(operation='write one or the other')
@attr(assertion='1MB successful')
def test_atomic_dual_write_1mb():
_test_atomic_dual_write(1024*1024)
@attr(resource='object')
@attr(method='put')
@attr(operation='write one or the other')
@attr(assertion='4MB successful')
def test_atomic_dual_write_4mb():
_test_atomic_dual_write(1024*1024*4)
@attr(resource='object')
@attr(method='put')
@attr(operation='write one or the other')
@attr(assertion='8MB successful')
def test_atomic_dual_write_8mb():
_test_atomic_dual_write(1024*1024*8)
def _test_atomic_conditional_write(file_size):
"""
Create a file of A's, use it to set_contents_from_file.
Verify the contents are all A's.
Create a file of B's, use it to re-set_contents_from_file.
Before re-set continues, verify content's still A's
Re-read the contents, and confirm we get B's
"""
bucket_name = get_new_bucket()
objname = 'testobj'
client = get_client()
# create <file_size> file of A's
fp_a = FakeWriteFile(file_size, 'A')
client.put_object(Bucket=bucket_name, Key=objname, Body=fp_a)
fp_b = FakeWriteFile(file_size, 'B',
lambda: _verify_atomic_key_data(bucket_name, objname, file_size, 'A')
)
# create <file_size> file of B's
# but try to verify the file before we finish writing all the B's
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': '*'}))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key=objname, Body=fp_b)
# verify B's
_verify_atomic_key_data(bucket_name, objname, file_size, 'B')
@attr(resource='object')
@attr(method='put')
@attr(operation='write atomicity')
@attr(assertion='1MB successful')
@attr('fails_on_aws')
def test_atomic_conditional_write_1mb():
_test_atomic_conditional_write(1024*1024)
def _test_atomic_dual_conditional_write(file_size):
"""
create an object, two sessions writing different contents
confirm that it is all one or the other
"""
bucket_name = get_new_bucket()
objname = 'testobj'
client = get_client()
fp_a = FakeWriteFile(file_size, 'A')
response = client.put_object(Bucket=bucket_name, Key=objname, Body=fp_a)
_verify_atomic_key_data(bucket_name, objname, file_size, 'A')
etag_fp_a = response['ETag'].replace('"', '')
# write <file_size> file of C's
# but before we're done, try to write all B's
fp_b = FakeWriteFile(file_size, 'B')
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': etag_fp_a}))
client.meta.events.register('before-call.s3.PutObject', lf)
def rewind_put_fp_b():
fp_b.seek(0)
client.put_object(Bucket=bucket_name, Key=objname, Body=fp_b)
fp_c = FakeWriteFile(file_size, 'C', rewind_put_fp_b)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=objname, Body=fp_c)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
# verify the file
_verify_atomic_key_data(bucket_name, objname, file_size, 'B')
@attr(resource='object')
@attr(method='put')
@attr(operation='write one or the other')
@attr(assertion='1MB successful')
@attr('fails_on_aws')
# TODO: test not passing with SSL, fix this
@attr('fails_on_rgw')
def test_atomic_dual_conditional_write_1mb():
_test_atomic_dual_conditional_write(1024*1024)
@attr(resource='object')
@attr(method='put')
@attr(operation='write file in deleted bucket')
@attr(assertion='fail 404')
@attr('fails_on_aws')
# TODO: test not passing with SSL, fix this
@attr('fails_on_rgw')
def test_atomic_write_bucket_gone():
bucket_name = get_new_bucket()
client = get_client()
def remove_bucket():
client.delete_bucket(Bucket=bucket_name)
objname = 'foo'
fp_a = FakeWriteFile(1024*1024, 'A', remove_bucket)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=objname, Body=fp_a)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='object')
@attr(method='put')
@attr(operation='begin to overwrite file with multipart upload then abort')
@attr(assertion='read back original key contents')
def test_atomic_multipart_upload_write():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.create_multipart_upload(Bucket=bucket_name, Key='foo')
upload_id = response['UploadId']
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
client.abort_multipart_upload(Bucket=bucket_name, Key='foo', UploadId=upload_id)
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
class Counter:
def __init__(self, default_val):
self.val = default_val
def inc(self):
self.val = self.val + 1
class ActionOnCount:
def __init__(self, trigger_count, action):
self.count = 0
self.trigger_count = trigger_count
self.action = action
self.result = 0
def trigger(self):
self.count = self.count + 1
if self.count == self.trigger_count:
self.result = self.action()
@attr(resource='object')
@attr(method='put')
@attr(operation='multipart check for two writes of the same part, first write finishes last')
@attr(assertion='object contains correct content')
def test_multipart_resend_first_finishes_last():
bucket_name = get_new_bucket()
client = get_client()
key_name = "mymultipart"
response = client.create_multipart_upload(Bucket=bucket_name, Key=key_name)
upload_id = response['UploadId']
#file_size = 8*1024*1024
file_size = 8
counter = Counter(0)
# upload_part might read multiple times from the object
# first time when it calculates md5, second time when it writes data
# out. We want to interject only on the last time, but we can't be
# sure how many times it's going to read, so let's have a test run
# and count the number of reads
fp_dry_run = FakeWriteFile(file_size, 'C',
lambda: counter.inc()
)
parts = []
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key_name, PartNumber=1, Body=fp_dry_run)
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': 1})
client.complete_multipart_upload(Bucket=bucket_name, Key=key_name, UploadId=upload_id, MultipartUpload={'Parts': parts})
client.delete_object(Bucket=bucket_name, Key=key_name)
# clear parts
parts[:] = []
# ok, now for the actual test
fp_b = FakeWriteFile(file_size, 'B')
def upload_fp_b():
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key_name, Body=fp_b, PartNumber=1)
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': 1})
action = ActionOnCount(counter.val, lambda: upload_fp_b())
response = client.create_multipart_upload(Bucket=bucket_name, Key=key_name)
upload_id = response['UploadId']
fp_a = FakeWriteFile(file_size, 'A',
lambda: action.trigger()
)
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key_name, PartNumber=1, Body=fp_a)
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': 1})
client.complete_multipart_upload(Bucket=bucket_name, Key=key_name, UploadId=upload_id, MultipartUpload={'Parts': parts})
_verify_atomic_key_data(bucket_name, key_name, file_size, 'A')
@attr(resource='object')
@attr(method='get')
@attr(operation='range')
@attr(assertion='returns correct data, 206')
def test_ranged_request_response_code():
content = 'testcontent'
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
response = client.get_object(Bucket=bucket_name, Key='testobj', Range='bytes=4-7')
fetched_content = _get_body(response)
eq(fetched_content, content[4:8])
eq(response['ResponseMetadata']['HTTPHeaders']['content-range'], 'bytes 4-7/11')
eq(response['ResponseMetadata']['HTTPStatusCode'], 206)
def _generate_random_string(size):
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(size))
@attr(resource='object')
@attr(method='get')
@attr(operation='range')
@attr(assertion='returns correct data, 206')
def test_ranged_big_request_response_code():
content = _generate_random_string(8*1024*1024)
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
response = client.get_object(Bucket=bucket_name, Key='testobj', Range='bytes=3145728-5242880')
fetched_content = _get_body(response)
eq(fetched_content, content[3145728:5242881])
eq(response['ResponseMetadata']['HTTPHeaders']['content-range'], 'bytes 3145728-5242880/8388608')
eq(response['ResponseMetadata']['HTTPStatusCode'], 206)
@attr(resource='object')
@attr(method='get')
@attr(operation='range')
@attr(assertion='returns correct data, 206')
def test_ranged_request_skip_leading_bytes_response_code():
content = 'testcontent'
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
response = client.get_object(Bucket=bucket_name, Key='testobj', Range='bytes=4-')
fetched_content = _get_body(response)
eq(fetched_content, content[4:])
eq(response['ResponseMetadata']['HTTPHeaders']['content-range'], 'bytes 4-10/11')
eq(response['ResponseMetadata']['HTTPStatusCode'], 206)
@attr(resource='object')
@attr(method='get')
@attr(operation='range')
@attr(assertion='returns correct data, 206')
def test_ranged_request_return_trailing_bytes_response_code():
content = 'testcontent'
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
response = client.get_object(Bucket=bucket_name, Key='testobj', Range='bytes=-7')
fetched_content = _get_body(response)
eq(fetched_content, content[-7:])
eq(response['ResponseMetadata']['HTTPHeaders']['content-range'], 'bytes 4-10/11')
eq(response['ResponseMetadata']['HTTPStatusCode'], 206)
@attr(resource='object')
@attr(method='get')
@attr(operation='range')
@attr(assertion='returns invalid range, 416')
def test_ranged_request_invalid_range():
content = 'testcontent'
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
# test invalid range
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='testobj', Range='bytes=40-50')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 416)
eq(error_code, 'InvalidRange')
@attr(resource='object')
@attr(method='get')
@attr(operation='range')
@attr(assertion='returns invalid range, 416')
def test_ranged_request_empty_object():
content = ''
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
# test invalid range
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='testobj', Range='bytes=40-50')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 416)
eq(error_code, 'InvalidRange')
@attr(resource='bucket')
@attr(method='create')
@attr(operation='create versioned bucket')
@attr(assertion='can create and suspend bucket versioning')
@attr('versioning')
def test_versioning_bucket_create_suspend():
bucket_name = get_new_bucket()
check_versioning(bucket_name, None)
check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
def check_obj_content(client, bucket_name, key, version_id, content):
response = client.get_object(Bucket=bucket_name, Key=key, VersionId=version_id)
if content is not None:
body = _get_body(response)
eq(body, content)
else:
eq(response['DeleteMarker'], True)
def check_obj_versions(client, bucket_name, key, version_ids, contents):
# check to see if objects is pointing at correct version
response = client.list_object_versions(Bucket=bucket_name)
versions = response['Versions']
# obj versions in versions come out created last to first not first to last like version_ids & contents
versions.reverse()
i = 0
for version in versions:
eq(version['VersionId'], version_ids[i])
eq(version['Key'], key)
check_obj_content(client, bucket_name, key, version['VersionId'], contents[i])
i += 1
def create_multiple_versions(client, bucket_name, key, num_versions, version_ids = None, contents = None, check_versions = True):
contents = contents or []
version_ids = version_ids or []
for i in range(num_versions):
body = 'content-{i}'.format(i=i)
response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
version_id = response['VersionId']
contents.append(body)
version_ids.append(version_id)
if check_versions:
check_obj_versions(client, bucket_name, key, version_ids, contents)
return (version_ids, contents)
def remove_obj_version(client, bucket_name, key, version_ids, contents, index):
eq(len(version_ids), len(contents))
index = index % len(version_ids)
rm_version_id = version_ids.pop(index)
rm_content = contents.pop(index)
check_obj_content(client, bucket_name, key, rm_version_id, rm_content)
client.delete_object(Bucket=bucket_name, Key=key, VersionId=rm_version_id)
if len(version_ids) != 0:
check_obj_versions(client, bucket_name, key, version_ids, contents)
def clean_up_bucket(client, bucket_name, key, version_ids):
for version_id in version_ids:
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id)
client.delete_bucket(Bucket=bucket_name)
def _do_test_create_remove_versions(client, bucket_name, key, num_versions, remove_start_idx, idx_inc):
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
idx = remove_start_idx
for j in range(num_versions):
remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
idx += idx_inc
response = client.list_object_versions(Bucket=bucket_name)
if 'Versions' in response:
print(response['Versions'])
@attr(resource='object')
@attr(method='create')
@attr(operation='create and remove versioned object')
@attr(assertion='can create access and remove appropriate versions')
@attr('versioning')
def test_versioning_obj_create_read_remove():
bucket_name = get_new_bucket()
client = get_client()
client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'MFADelete': 'Disabled', 'Status': 'Enabled'})
key = 'testobj'
num_versions = 5
_do_test_create_remove_versions(client, bucket_name, key, num_versions, -1, 0)
_do_test_create_remove_versions(client, bucket_name, key, num_versions, -1, 0)
_do_test_create_remove_versions(client, bucket_name, key, num_versions, 0, 0)
_do_test_create_remove_versions(client, bucket_name, key, num_versions, 1, 0)
_do_test_create_remove_versions(client, bucket_name, key, num_versions, 4, -1)
_do_test_create_remove_versions(client, bucket_name, key, num_versions, 3, 3)
@attr(resource='object')
@attr(method='create')
@attr(operation='create and remove versioned object and head')
@attr(assertion='can create access and remove appropriate versions')
@attr('versioning')
def test_versioning_obj_create_read_remove_head():
bucket_name = get_new_bucket()
client = get_client()
client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'MFADelete': 'Disabled', 'Status': 'Enabled'})
key = 'testobj'
num_versions = 5
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
# removes old head object, checks new one
removed_version_id = version_ids.pop()
contents.pop()
num_versions = num_versions-1
response = client.delete_object(Bucket=bucket_name, Key=key, VersionId=removed_version_id)
response = client.get_object(Bucket=bucket_name, Key=key)
body = _get_body(response)
eq(body, contents[-1])
# add a delete marker
response = client.delete_object(Bucket=bucket_name, Key=key)
eq(response['DeleteMarker'], True)
delete_marker_version_id = response['VersionId']
version_ids.append(delete_marker_version_id)
response = client.list_object_versions(Bucket=bucket_name)
eq(len(response['Versions']), num_versions)
eq(len(response['DeleteMarkers']), 1)
eq(response['DeleteMarkers'][0]['VersionId'], delete_marker_version_id)
clean_up_bucket(client, bucket_name, key, version_ids)
@attr(resource='object')
@attr(method='create')
@attr(operation='create object, then switch to versioning')
@attr(assertion='behaves correctly')
@attr('versioning')
def test_versioning_obj_plain_null_version_removal():
bucket_name = get_new_bucket()
check_versioning(bucket_name, None)
client = get_client()
key = 'testobjfoo'
content = 'fooz'
client.put_object(Bucket=bucket_name, Key=key, Body=content)
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
client.delete_object(Bucket=bucket_name, Key=key, VersionId='null')
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchKey')
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
@attr(resource='object')
@attr(method='create')
@attr(operation='create object, then switch to versioning')
@attr(assertion='behaves correctly')
@attr('versioning')
def test_versioning_obj_plain_null_version_overwrite():
bucket_name = get_new_bucket()
check_versioning(bucket_name, None)
client = get_client()
key = 'testobjfoo'
content = 'fooz'
client.put_object(Bucket=bucket_name, Key=key, Body=content)
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
content2 = 'zzz'
response = client.put_object(Bucket=bucket_name, Key=key, Body=content2)
response = client.get_object(Bucket=bucket_name, Key=key)
body = _get_body(response)
eq(body, content2)
version_id = response['VersionId']
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id)
response = client.get_object(Bucket=bucket_name, Key=key)
body = _get_body(response)
eq(body, content)
client.delete_object(Bucket=bucket_name, Key=key, VersionId='null')
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchKey')
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
@attr(resource='object')
@attr(method='create')
@attr(operation='create object, then switch to versioning')
@attr(assertion='behaves correctly')
@attr('versioning')
def test_versioning_obj_plain_null_version_overwrite_suspended():
bucket_name = get_new_bucket()
check_versioning(bucket_name, None)
client = get_client()
key = 'testobjbar'
content = 'foooz'
client.put_object(Bucket=bucket_name, Key=key, Body=content)
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
content2 = 'zzz'
response = client.put_object(Bucket=bucket_name, Key=key, Body=content2)
response = client.get_object(Bucket=bucket_name, Key=key)
body = _get_body(response)
eq(body, content2)
response = client.list_object_versions(Bucket=bucket_name)
# original object with 'null' version id still counts as a version
eq(len(response['Versions']), 1)
client.delete_object(Bucket=bucket_name, Key=key, VersionId='null')
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchKey')
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
def delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents):
client.delete_object(Bucket=bucket_name, Key=key)
# clear out old null objects in lists since they will get overwritten
eq(len(version_ids), len(contents))
i = 0
for version_id in version_ids:
if version_id == 'null':
version_ids.pop(i)
contents.pop(i)
i += 1
return (version_ids, contents)
def overwrite_suspended_versioning_obj(client, bucket_name, key, version_ids, contents, content):
client.put_object(Bucket=bucket_name, Key=key, Body=content)
# clear out old null objects in lists since they will get overwritten
eq(len(version_ids), len(contents))
i = 0
for version_id in version_ids:
if version_id == 'null':
version_ids.pop(i)
contents.pop(i)
i += 1
# add new content with 'null' version id to the end
contents.append(content)
version_ids.append('null')
return (version_ids, contents)
@attr(resource='object')
@attr(method='create')
@attr(operation='suspend versioned bucket')
@attr(assertion='suspended versioning behaves correctly')
@attr('versioning')
def test_versioning_obj_suspend_versions():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'testobj'
num_versions = 5
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents)
delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents)
overwrite_suspended_versioning_obj(client, bucket_name, key, version_ids, contents, 'null content 1')
overwrite_suspended_versioning_obj(client, bucket_name, key, version_ids, contents, 'null content 2')
delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents)
overwrite_suspended_versioning_obj(client, bucket_name, key, version_ids, contents, 'null content 3')
delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents)
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, 3, version_ids, contents)
num_versions += 3
for idx in range(num_versions):
remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
eq(len(version_ids), 0)
eq(len(version_ids), len(contents))
@attr(resource='object')
@attr(method='remove')
@attr(operation='create and remove versions')
@attr(assertion='everything works')
@attr('versioning')
def test_versioning_obj_create_versions_remove_all():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'testobj'
num_versions = 10
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
for idx in range(num_versions):
remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
eq(len(version_ids), 0)
eq(len(version_ids), len(contents))
@attr(resource='object')
@attr(method='remove')
@attr(operation='create and remove versions')
@attr(assertion='everything works')
@attr('versioning')
def test_versioning_obj_create_versions_remove_special_names():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
keys = ['_testobj', '_', ':', ' ']
num_versions = 10
for key in keys:
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
for idx in range(num_versions):
remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
eq(len(version_ids), 0)
eq(len(version_ids), len(contents))
@attr(resource='object')
@attr(method='multipart')
@attr(operation='create and test multipart object')
@attr(assertion='everything works')
@attr('versioning')
def test_versioning_obj_create_overwrite_multipart():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'testobj'
num_versions = 3
contents = []
version_ids = []
for i in range(num_versions):
ret = _do_test_multipart_upload_contents(bucket_name, key, 3)
contents.append(ret)
response = client.list_object_versions(Bucket=bucket_name)
for version in response['Versions']:
version_ids.append(version['VersionId'])
version_ids.reverse()
check_obj_versions(client, bucket_name, key, version_ids, contents)
for idx in range(num_versions):
remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
eq(len(version_ids), 0)
eq(len(version_ids), len(contents))
@attr(resource='object')
@attr(method='multipart')
@attr(operation='list versioned objects')
@attr(assertion='everything works')
@attr('versioning')
def test_versioning_obj_list_marker():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'testobj'
key2 = 'testobj-1'
num_versions = 5
contents = []
version_ids = []
contents2 = []
version_ids2 = []
# for key #1
for i in range(num_versions):
body = 'content-{i}'.format(i=i)
response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
version_id = response['VersionId']
contents.append(body)
version_ids.append(version_id)
# for key #2
for i in range(num_versions):
body = 'content-{i}'.format(i=i)
response = client.put_object(Bucket=bucket_name, Key=key2, Body=body)
version_id = response['VersionId']
contents2.append(body)
version_ids2.append(version_id)
response = client.list_object_versions(Bucket=bucket_name)
versions = response['Versions']
# obj versions in versions come out created last to first not first to last like version_ids & contents
versions.reverse()
i = 0
# test the last 5 created objects first
for i in range(5):
version = versions[i]
eq(version['VersionId'], version_ids2[i])
eq(version['Key'], key2)
check_obj_content(client, bucket_name, key2, version['VersionId'], contents2[i])
i += 1
# then the first 5
for j in range(5):
version = versions[i]
eq(version['VersionId'], version_ids[j])
eq(version['Key'], key)
check_obj_content(client, bucket_name, key, version['VersionId'], contents[j])
i += 1
@attr(resource='object')
@attr(method='multipart')
@attr(operation='create and test versioned object copying')
@attr(assertion='everything works')
@attr('versioning')
def test_versioning_copy_obj_version():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'testobj'
num_versions = 3
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
for i in range(num_versions):
new_key_name = 'key_{i}'.format(i=i)
copy_source = {'Bucket': bucket_name, 'Key': key, 'VersionId': version_ids[i]}
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=new_key_name)
response = client.get_object(Bucket=bucket_name, Key=new_key_name)
body = _get_body(response)
eq(body, contents[i])
another_bucket_name = get_new_bucket()
for i in range(num_versions):
new_key_name = 'key_{i}'.format(i=i)
copy_source = {'Bucket': bucket_name, 'Key': key, 'VersionId': version_ids[i]}
client.copy_object(Bucket=another_bucket_name, CopySource=copy_source, Key=new_key_name)
response = client.get_object(Bucket=another_bucket_name, Key=new_key_name)
body = _get_body(response)
eq(body, contents[i])
new_key_name = 'new_key'
copy_source = {'Bucket': bucket_name, 'Key': key}
client.copy_object(Bucket=another_bucket_name, CopySource=copy_source, Key=new_key_name)
response = client.get_object(Bucket=another_bucket_name, Key=new_key_name)
body = _get_body(response)
eq(body, contents[-1])
@attr(resource='object')
@attr(method='delete')
@attr(operation='delete multiple versions')
@attr(assertion='deletes multiple versions of an object with a single call')
@attr('versioning')
def test_versioning_multi_object_delete():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'key'
num_versions = 2
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
response = client.list_object_versions(Bucket=bucket_name)
versions = response['Versions']
versions.reverse()
for version in versions:
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version['VersionId'])
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
# now remove again, should all succeed due to idempotency
for version in versions:
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version['VersionId'])
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
@attr(resource='object')
@attr(method='delete')
@attr(operation='delete multiple versions')
@attr(assertion='deletes multiple versions of an object and delete marker with a single call')
@attr('versioning')
def test_versioning_multi_object_delete_with_marker():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'key'
num_versions = 2
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
client.delete_object(Bucket=bucket_name, Key=key)
response = client.list_object_versions(Bucket=bucket_name)
versions = response['Versions']
delete_markers = response['DeleteMarkers']
version_ids.append(delete_markers[0]['VersionId'])
eq(len(version_ids), 3)
eq(len(delete_markers), 1)
for version in versions:
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version['VersionId'])
for delete_marker in delete_markers:
client.delete_object(Bucket=bucket_name, Key=key, VersionId=delete_marker['VersionId'])
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
eq(('DeleteMarkers' in response), False)
for version in versions:
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version['VersionId'])
for delete_marker in delete_markers:
client.delete_object(Bucket=bucket_name, Key=key, VersionId=delete_marker['VersionId'])
# now remove again, should all succeed due to idempotency
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
eq(('DeleteMarkers' in response), False)
@attr(resource='object')
@attr(method='delete')
@attr(operation='multi delete create marker')
@attr(assertion='returns correct marker version id')
@attr('versioning')
def test_versioning_multi_object_delete_with_marker_create():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'key'
response = client.delete_object(Bucket=bucket_name, Key=key)
delete_marker_version_id = response['VersionId']
response = client.list_object_versions(Bucket=bucket_name)
delete_markers = response['DeleteMarkers']
eq(len(delete_markers), 1)
eq(delete_marker_version_id, delete_markers[0]['VersionId'])
eq(key, delete_markers[0]['Key'])
@attr(resource='object')
@attr(method='put')
@attr(operation='change acl on an object version changes specific version')
@attr(assertion='works')
@attr('versioning')
def test_versioned_object_acl():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'xyz'
num_versions = 3
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
version_id = version_ids[1]
response = client.get_object_acl(Bucket=bucket_name, Key=key, VersionId=version_id)
display_name = get_main_display_name()
user_id = get_main_user_id()
eq(response['Owner']['DisplayName'], display_name)
eq(response['Owner']['ID'], user_id)
grants = response['Grants']
default_policy = [
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
]
check_grants(grants, default_policy)
client.put_object_acl(ACL='public-read',Bucket=bucket_name, Key=key, VersionId=version_id)
response = client.get_object_acl(Bucket=bucket_name, Key=key, VersionId=version_id)
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
client.put_object(Bucket=bucket_name, Key=key)
response = client.get_object_acl(Bucket=bucket_name, Key=key)
grants = response['Grants']
check_grants(grants, default_policy)
@attr(resource='object')
@attr(method='put')
@attr(operation='change acl on an object with no version specified changes latest version')
@attr(assertion='works')
@attr('versioning')
def test_versioned_object_acl_no_version_specified():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'xyz'
num_versions = 3
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
response = client.get_object(Bucket=bucket_name, Key=key)
version_id = response['VersionId']
response = client.get_object_acl(Bucket=bucket_name, Key=key, VersionId=version_id)
display_name = get_main_display_name()
user_id = get_main_user_id()
eq(response['Owner']['DisplayName'], display_name)
eq(response['Owner']['ID'], user_id)
grants = response['Grants']
default_policy = [
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
]
check_grants(grants, default_policy)
client.put_object_acl(ACL='public-read',Bucket=bucket_name, Key=key)
response = client.get_object_acl(Bucket=bucket_name, Key=key, VersionId=version_id)
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
def _do_create_object(client, bucket_name, key, i):
body = 'data {i}'.format(i=i)
client.put_object(Bucket=bucket_name, Key=key, Body=body)
def _do_remove_ver(client, bucket_name, key, version_id):
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id)
def _do_create_versioned_obj_concurrent(client, bucket_name, key, num):
t = []
for i in range(num):
thr = threading.Thread(target = _do_create_object, args=(client, bucket_name, key, i))
thr.start()
t.append(thr)
return t
def _do_clear_versioned_bucket_concurrent(client, bucket_name):
t = []
response = client.list_object_versions(Bucket=bucket_name)
for version in response.get('Versions', []):
thr = threading.Thread(target = _do_remove_ver, args=(client, bucket_name, version['Key'], version['VersionId']))
thr.start()
t.append(thr)
return t
def _do_wait_completion(t):
for thr in t:
thr.join()
@attr(resource='object')
@attr(method='put')
@attr(operation='concurrent creation of objects, concurrent removal')
@attr(assertion='works')
# TODO: remove fails_on_rgw when https://tracker.ceph.com/issues/39142 is resolved
@attr('fails_on_rgw')
@attr('versioning')
def test_versioned_concurrent_object_create_concurrent_remove():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'myobj'
num_versions = 5
for i in range(5):
t = _do_create_versioned_obj_concurrent(client, bucket_name, key, num_versions)
_do_wait_completion(t)
response = client.list_object_versions(Bucket=bucket_name)
versions = response['Versions']
eq(len(versions), num_versions)
t = _do_clear_versioned_bucket_concurrent(client, bucket_name)
_do_wait_completion(t)
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
@attr(resource='object')
@attr(method='put')
@attr(operation='concurrent creation and removal of objects')
@attr(assertion='works')
@attr('versioning')
def test_versioned_concurrent_object_create_and_remove():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'myobj'
num_versions = 3
all_threads = []
for i in range(3):
t = _do_create_versioned_obj_concurrent(client, bucket_name, key, num_versions)
all_threads.append(t)
t = _do_clear_versioned_bucket_concurrent(client, bucket_name)
all_threads.append(t)
for t in all_threads:
_do_wait_completion(t)
t = _do_clear_versioned_bucket_concurrent(client, bucket_name)
_do_wait_completion(t)
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set lifecycle config')
@attr('lifecycle')
def test_lifecycle_set():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'test1/', 'Status':'Enabled'},
{'ID': 'rule2', 'Expiration': {'Days': 2}, 'Prefix': 'test2/', 'Status':'Disabled'}]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get lifecycle config')
@attr('lifecycle')
def test_lifecycle_get():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'test1/', 'Expiration': {'Days': 31}, 'Prefix': 'test1/', 'Status':'Enabled'},
{'ID': 'test2/', 'Expiration': {'Days': 120}, 'Prefix': 'test2/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
response = client.get_bucket_lifecycle_configuration(Bucket=bucket_name)
eq(response['Rules'], rules)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get lifecycle config no id')
@attr('lifecycle')
def test_lifecycle_get_no_id():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'Expiration': {'Days': 31}, 'Prefix': 'test1/', 'Status':'Enabled'},
{'Expiration': {'Days': 120}, 'Prefix': 'test2/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
response = client.get_bucket_lifecycle_configuration(Bucket=bucket_name)
current_lc = response['Rules']
Rule = namedtuple('Rule',['prefix','status','days'])
rules = {'rule1' : Rule('test1/','Enabled',31),
'rule2' : Rule('test2/','Enabled',120)}
for lc_rule in current_lc:
if lc_rule['Prefix'] == rules['rule1'].prefix:
eq(lc_rule['Expiration']['Days'], rules['rule1'].days)
eq(lc_rule['Status'], rules['rule1'].status)
assert 'ID' in lc_rule
elif lc_rule['Prefix'] == rules['rule2'].prefix:
eq(lc_rule['Expiration']['Days'], rules['rule2'].days)
eq(lc_rule['Status'], rules['rule2'].status)
assert 'ID' in lc_rule
else:
# neither of the rules we supplied was returned, something wrong
print("rules not right")
assert False
# The test harness for lifecycle is configured to treat days as 10 second intervals.
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle expiration')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_expiration():
bucket_name = _create_objects(keys=['expire1/foo', 'expire1/bar', 'keep2/foo',
'keep2/bar', 'expire3/foo', 'expire3/bar'])
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'expire1/', 'Status':'Enabled'},
{'ID': 'rule2', 'Expiration': {'Days': 4}, 'Prefix': 'expire3/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
response = client.list_objects(Bucket=bucket_name)
init_objects = response['Contents']
time.sleep(28)
response = client.list_objects(Bucket=bucket_name)
expire1_objects = response['Contents']
time.sleep(10)
response = client.list_objects(Bucket=bucket_name)
keep2_objects = response['Contents']
time.sleep(20)
response = client.list_objects(Bucket=bucket_name)
expire3_objects = response['Contents']
eq(len(init_objects), 6)
eq(len(expire1_objects), 4)
eq(len(keep2_objects), 4)
eq(len(expire3_objects), 2)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle expiration with list-objects-v2')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
@attr('list-objects-v2')
def test_lifecyclev2_expiration():
bucket_name = _create_objects(keys=['expire1/foo', 'expire1/bar', 'keep2/foo',
'keep2/bar', 'expire3/foo', 'expire3/bar'])
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'expire1/', 'Status':'Enabled'},
{'ID': 'rule2', 'Expiration': {'Days': 4}, 'Prefix': 'expire3/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
response = client.list_objects_v2(Bucket=bucket_name)
init_objects = response['Contents']
time.sleep(28)
response = client.list_objects_v2(Bucket=bucket_name)
expire1_objects = response['Contents']
time.sleep(10)
response = client.list_objects_v2(Bucket=bucket_name)
keep2_objects = response['Contents']
time.sleep(20)
response = client.list_objects_v2(Bucket=bucket_name)
expire3_objects = response['Contents']
eq(len(init_objects), 6)
eq(len(expire1_objects), 4)
eq(len(keep2_objects), 4)
eq(len(expire3_objects), 2)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle expiration on versioning enabled bucket')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_expiration_versioning_enabled():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
create_multiple_versions(client, bucket_name, "test1/a", 1)
client.delete_object(Bucket=bucket_name, Key="test1/a")
rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'test1/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
time.sleep(30)
response = client.list_object_versions(Bucket=bucket_name)
versions = response['Versions']
delete_markers = response['DeleteMarkers']
eq(len(versions), 1)
eq(len(delete_markers), 1)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle expiration with 1 tag')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_expiration_tags1():
bucket_name = get_new_bucket()
client = get_client()
tom_key = 'days1/tom'
tom_tagset = {'TagSet':
[{'Key': 'tom', 'Value': 'sawyer'}]}
client.put_object(Bucket=bucket_name, Key=tom_key, Body='tom_body')
response = client.put_object_tagging(Bucket=bucket_name, Key=tom_key,
Tagging=tom_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
lifecycle_config = {
'Rules': [
{
'Expiration': {
'Days': 1,
},
'ID': 'rule_tag1',
'Filter': {
'Prefix': 'days1/',
'Tag': {
'Key': 'tom',
'Value': 'sawyer'
},
},
'Status': 'Enabled',
},
]
}
response = client.put_bucket_lifecycle_configuration(
Bucket=bucket_name, LifecycleConfiguration=lifecycle_config)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
time.sleep(28)
try:
expire_objects = response['Contents']
except KeyError:
expire_objects = []
eq(len(expire_objects), 0)
# factor out common setup code
def setup_lifecycle_tags2(client, bucket_name):
tom_key = 'days1/tom'
tom_tagset = {'TagSet':
[{'Key': 'tom', 'Value': 'sawyer'}]}
client.put_object(Bucket=bucket_name, Key=tom_key, Body='tom_body')
response = client.put_object_tagging(Bucket=bucket_name, Key=tom_key,
Tagging=tom_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
huck_key = 'days1/huck'
huck_tagset = {
'TagSet':
[{'Key': 'tom', 'Value': 'sawyer'},
{'Key': 'huck', 'Value': 'finn'}]}
client.put_object(Bucket=bucket_name, Key=huck_key, Body='huck_body')
response = client.put_object_tagging(Bucket=bucket_name, Key=huck_key,
Tagging=huck_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
lifecycle_config = {
'Rules': [
{
'Expiration': {
'Days': 1,
},
'ID': 'rule_tag1',
'Filter': {
'Prefix': 'days1/',
'Tag': {
'Key': 'tom',
'Value': 'sawyer'
},
'And': {
'Prefix': 'days1',
'Tags': [
{
'Key': 'huck',
'Value': 'finn'
},
]
}
},
'Status': 'Enabled',
},
]
}
response = client.put_bucket_lifecycle_configuration(
Bucket=bucket_name, LifecycleConfiguration=lifecycle_config)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
return response
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle expiration with 2 tags')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_expiration_tags2():
bucket_name = get_new_bucket()
client = get_client()
response = setup_lifecycle_tags2(client, bucket_name)
time.sleep(28)
response = client.list_objects(Bucket=bucket_name)
expire1_objects = response['Contents']
eq(len(expire1_objects), 1)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle expiration with versioning and 2 tags')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_expiration_versioned_tags2():
bucket_name = get_new_bucket()
client = get_client()
# mix in versioning
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
response = setup_lifecycle_tags2(client, bucket_name)
time.sleep(28)
response = client.list_objects(Bucket=bucket_name)
expire1_objects = response['Contents']
eq(len(expire1_objects), 1)
# setup for scenario based on vidushi mishra's in rhbz#1877737
def setup_lifecycle_noncur_tags(client, bucket_name, days):
# first create and tag the objects (10 versions of 1)
key = "myobject_"
tagset = {'TagSet':
[{'Key': 'vidushi', 'Value': 'mishra'}]}
for ix in range(10):
body = "%s v%d" % (key, ix)
response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.put_object_tagging(Bucket=bucket_name, Key=key,
Tagging=tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
lifecycle_config = {
'Rules': [
{
'NoncurrentVersionExpiration': {
'NoncurrentDays': days,
},
'ID': 'rule_tag1',
'Filter': {
'Prefix': '',
'Tag': {
'Key': 'vidushi',
'Value': 'mishra'
},
},
'Status': 'Enabled',
},
]
}
response = client.put_bucket_lifecycle_configuration(
Bucket=bucket_name, LifecycleConfiguration=lifecycle_config)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
return response
def verify_lifecycle_expiration_noncur_tags(client, bucket_name, secs):
time.sleep(secs)
try:
response = client.list_object_versions(Bucket=bucket_name)
objs_list = response['Versions']
except:
objs_list = []
return len(objs_list)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle noncurrent expiration with 1 tag filter')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_expiration_noncur_tags1():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
# create 10 object versions (9 noncurrent) and a tag-filter
# noncurrent version expiration at 4 "days"
response = setup_lifecycle_noncur_tags(client, bucket_name, 4)
num_objs = verify_lifecycle_expiration_noncur_tags(
client, bucket_name, 20)
# at T+20, 10 objects should exist
eq(num_objs, 10)
num_objs = verify_lifecycle_expiration_noncur_tags(
client, bucket_name, 40)
# at T+60, only the current object version should exist
eq(num_objs, 1)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='id too long in lifecycle rule')
@attr('lifecycle')
@attr(assertion='fails 400')
def test_lifecycle_id_too_long():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 256*'a', 'Expiration': {'Days': 2}, 'Prefix': 'test1/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidArgument')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='same id')
@attr('lifecycle')
@attr(assertion='fails 400')
def test_lifecycle_same_id():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'test1/', 'Status':'Enabled'},
{'ID': 'rule1', 'Expiration': {'Days': 2}, 'Prefix': 'test2/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidArgument')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='invalid status in lifecycle rule')
@attr('lifecycle')
@attr(assertion='fails 400')
def test_lifecycle_invalid_status():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Days': 2}, 'Prefix': 'test1/', 'Status':'enabled'}]
lifecycle = {'Rules': rules}
e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
rules=[{'ID': 'rule1', 'Expiration': {'Days': 2}, 'Prefix': 'test1/', 'Status':'disabled'}]
lifecycle = {'Rules': rules}
e = assert_raises(ClientError, client.put_bucket_lifecycle, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
rules=[{'ID': 'rule1', 'Expiration': {'Days': 2}, 'Prefix': 'test1/', 'Status':'invalid'}]
lifecycle = {'Rules': rules}
e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set lifecycle config with expiration date')
@attr('lifecycle')
def test_lifecycle_set_date():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Date': '2017-09-27'}, 'Prefix': 'test1/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set lifecycle config with not iso8601 date')
@attr('lifecycle')
@attr(assertion='fails 400')
def test_lifecycle_set_invalid_date():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Date': '20200101'}, 'Prefix': 'test1/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle expiration with date')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_expiration_date():
bucket_name = _create_objects(keys=['past/foo', 'future/bar'])
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Date': '2015-01-01'}, 'Prefix': 'past/', 'Status':'Enabled'},
{'ID': 'rule2', 'Expiration': {'Date': '2030-01-01'}, 'Prefix': 'future/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
response = client.list_objects(Bucket=bucket_name)
init_objects = response['Contents']
time.sleep(20)
response = client.list_objects(Bucket=bucket_name)
expire_objects = response['Contents']
eq(len(init_objects), 2)
eq(len(expire_objects), 1)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle expiration days 0')
@attr('lifecycle')
@attr('lifecycle_expiration')
def test_lifecycle_expiration_days0():
bucket_name = _create_objects(keys=['days0/foo', 'days0/bar'])
client = get_client()
rules=[{'Expiration': {'Days': 0}, 'ID': 'rule1', 'Prefix': 'days0/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
# days: 0 is legal in a transition rule, but not legal in an
# expiration rule
response_code = ""
try:
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
except botocore.exceptions.ClientError as e:
response_code = e.response['Error']['Code']
eq(response_code, 'InvalidArgument')
def setup_lifecycle_expiration(client, bucket_name, rule_id, delta_days,
rule_prefix):
rules=[{'ID': rule_id,
'Expiration': {'Days': delta_days}, 'Prefix': rule_prefix,
'Status':'Enabled'}]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(
Bucket=bucket_name, LifecycleConfiguration=lifecycle)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
key = rule_prefix + 'foo'
body = 'bar'
response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
return response
def check_lifecycle_expiration_header(response, start_time, rule_id,
delta_days):
expr_exists = ('x-amz-expiration' in response['ResponseMetadata']['HTTPHeaders'])
if (not expr_exists):
return False
expr_hdr = response['ResponseMetadata']['HTTPHeaders']['x-amz-expiration']
m = re.search(r'expiry-date="(.+)", rule-id="(.+)"', expr_hdr)
expiration = dateutil.parser.parse(m.group(1))
days_to_expire = ((expiration.replace(tzinfo=None) - start_time).days == delta_days)
rule_eq_id = (m.group(2) == rule_id)
return days_to_expire and rule_eq_id
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle expiration header put')
@attr('lifecycle')
@attr('lifecycle_expiration')
def test_lifecycle_expiration_header_put():
bucket_name = get_new_bucket()
client = get_client()
now = datetime.datetime.now(None)
response = setup_lifecycle_expiration(
client, bucket_name, 'rule1', 1, 'days1/')
eq(check_lifecycle_expiration_header(response, now, 'rule1', 1), True)
@attr(resource='bucket')
@attr(method='head')
@attr(operation='test lifecycle expiration header head')
@attr('lifecycle')
@attr('lifecycle_expiration')
def test_lifecycle_expiration_header_head():
bucket_name = get_new_bucket()
client = get_client()
now = datetime.datetime.now(None)
response = setup_lifecycle_expiration(
client, bucket_name, 'rule1', 1, 'days1/')
key = 'days1/' + 'foo'
# stat the object, check header
response = client.head_object(Bucket=bucket_name, Key=key)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
eq(check_lifecycle_expiration_header(response, now, 'rule1', 1), True)
@attr(resource='bucket')
@attr(method='head')
@attr(operation='test lifecycle expiration header head with tags')
@attr('lifecycle')
@attr('lifecycle_expiration')
def test_lifecycle_expiration_header_tags_head():
bucket_name = get_new_bucket()
client = get_client()
lifecycle={
"Rules": [
{
"Filter": {
"Tag": {"Key": "key1", "Value": "tag1"}
},
"Status": "Enabled",
"Expiration": {
"Days": 1
},
"ID": "rule1"
},
]
}
response = client.put_bucket_lifecycle_configuration(
Bucket=bucket_name, LifecycleConfiguration=lifecycle)
key1 = "obj_key1"
body1 = "obj_key1_body"
tags1={'TagSet': [{'Key': 'key1', 'Value': 'tag1'},
{'Key': 'key5','Value': 'tag5'}]}
response = client.put_object(Bucket=bucket_name, Key=key1, Body=body1)
response = client.put_object_tagging(Bucket=bucket_name, Key=key1,Tagging=tags1)
# stat the object, check header
response = client.head_object(Bucket=bucket_name, Key=key1)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
eq(check_lifecycle_expiration_header(response, datetime.datetime.now(None), 'rule1', 1), True)
# test that header is not returning when it should not
lifecycle={
"Rules": [
{
"Filter": {
"Tag": {"Key": "key2", "Value": "tag1"}
},
"Status": "Enabled",
"Expiration": {
"Days": 1
},
"ID": "rule1"
},
]
}
response = client.put_bucket_lifecycle_configuration(
Bucket=bucket_name, LifecycleConfiguration=lifecycle)
# stat the object, check header
response = client.head_object(Bucket=bucket_name, Key=key1)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
eq(check_lifecycle_expiration_header(response, datetime.datetime.now(None), 'rule1', 1), False)
@attr(resource='bucket')
@attr(method='head')
@attr(operation='test lifecycle expiration header head with tags and And')
@attr('lifecycle')
@attr('lifecycle_expiration')
def test_lifecycle_expiration_header_and_tags_head():
now = datetime.datetime.now(None)
bucket_name = get_new_bucket()
client = get_client()
lifecycle={
"Rules": [
{
"Filter": {
"And": {
"Tags": [
{
"Key": "key1",
"Value": "tag1"
},
{
"Key": "key5",
"Value": "tag6"
}
]
}
},
"Status": "Enabled",
"Expiration": {
"Days": 1
},
"ID": "rule1"
},
]
}
response = client.put_bucket_lifecycle_configuration(
Bucket=bucket_name, LifecycleConfiguration=lifecycle)
key1 = "obj_key1"
body1 = "obj_key1_body"
tags1={'TagSet': [{'Key': 'key1', 'Value': 'tag1'},
{'Key': 'key5','Value': 'tag5'}]}
response = client.put_object(Bucket=bucket_name, Key=key1, Body=body1)
response = client.put_object_tagging(Bucket=bucket_name, Key=key1,Tagging=tags1)
# stat the object, check header
response = client.head_object(Bucket=bucket_name, Key=key1)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
eq(check_lifecycle_expiration_header(response, datetime.datetime.now(None), 'rule1', 1), False)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set lifecycle config with noncurrent version expiration')
@attr('lifecycle')
def test_lifecycle_set_noncurrent():
bucket_name = _create_objects(keys=['past/foo', 'future/bar'])
client = get_client()
rules=[{'ID': 'rule1', 'NoncurrentVersionExpiration': {'NoncurrentDays': 2}, 'Prefix': 'past/', 'Status':'Enabled'},
{'ID': 'rule2', 'NoncurrentVersionExpiration': {'NoncurrentDays': 3}, 'Prefix': 'future/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle non-current version expiration')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_noncur_expiration():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
create_multiple_versions(client, bucket_name, "test1/a", 3)
# not checking the object contents on the second run, because the function doesn't support multiple checks
create_multiple_versions(client, bucket_name, "test2/abc", 3, check_versions=False)
response = client.list_object_versions(Bucket=bucket_name)
init_versions = response['Versions']
rules=[{'ID': 'rule1', 'NoncurrentVersionExpiration': {'NoncurrentDays': 2}, 'Prefix': 'test1/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
time.sleep(50)
response = client.list_object_versions(Bucket=bucket_name)
expire_versions = response['Versions']
eq(len(init_versions), 6)
eq(len(expire_versions), 4)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set lifecycle config with delete marker expiration')
@attr('lifecycle')
def test_lifecycle_set_deletemarker():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'ExpiredObjectDeleteMarker': True}, 'Prefix': 'test1/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set lifecycle config with Filter')
@attr('lifecycle')
def test_lifecycle_set_filter():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'ExpiredObjectDeleteMarker': True}, 'Filter': {'Prefix': 'foo'}, 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set lifecycle config with empty Filter')
@attr('lifecycle')
def test_lifecycle_set_empty_filter():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'ExpiredObjectDeleteMarker': True}, 'Filter': {}, 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle delete marker expiration')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_deletemarker_expiration():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
create_multiple_versions(client, bucket_name, "test1/a", 1)
create_multiple_versions(client, bucket_name, "test2/abc", 1, check_versions=False)
client.delete_object(Bucket=bucket_name, Key="test1/a")
client.delete_object(Bucket=bucket_name, Key="test2/abc")
response = client.list_object_versions(Bucket=bucket_name)
init_versions = response['Versions']
deleted_versions = response['DeleteMarkers']
total_init_versions = init_versions + deleted_versions
rules=[{'ID': 'rule1', 'NoncurrentVersionExpiration': {'NoncurrentDays': 1}, 'Expiration': {'ExpiredObjectDeleteMarker': True}, 'Prefix': 'test1/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
time.sleep(50)
response = client.list_object_versions(Bucket=bucket_name)
init_versions = response['Versions']
deleted_versions = response['DeleteMarkers']
total_expire_versions = init_versions + deleted_versions
eq(len(total_init_versions), 4)
eq(len(total_expire_versions), 2)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set lifecycle config with multipart expiration')
@attr('lifecycle')
def test_lifecycle_set_multipart():
bucket_name = get_new_bucket()
client = get_client()
rules = [
{'ID': 'rule1', 'Prefix': 'test1/', 'Status': 'Enabled',
'AbortIncompleteMultipartUpload': {'DaysAfterInitiation': 2}},
{'ID': 'rule2', 'Prefix': 'test2/', 'Status': 'Disabled',
'AbortIncompleteMultipartUpload': {'DaysAfterInitiation': 3}}
]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle multipart expiration')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_multipart_expiration():
bucket_name = get_new_bucket()
client = get_client()
key_names = ['test1/a', 'test2/']
upload_ids = []
for key in key_names:
response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
upload_ids.append(response['UploadId'])
response = client.list_multipart_uploads(Bucket=bucket_name)
init_uploads = response['Uploads']
rules = [
{'ID': 'rule1', 'Prefix': 'test1/', 'Status': 'Enabled',
'AbortIncompleteMultipartUpload': {'DaysAfterInitiation': 2}},
]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
time.sleep(50)
response = client.list_multipart_uploads(Bucket=bucket_name)
expired_uploads = response['Uploads']
eq(len(init_uploads), 2)
eq(len(expired_uploads), 1)
def _test_encryption_sse_customer_write(file_size):
"""
Tests Create a file of A's, use it to set_contents_from_file.
Create a file of B's, use it to re-set_contents_from_file.
Re-read the contents, and confirm we get B's
"""
bucket_name = get_new_bucket()
client = get_client()
key = 'testobj'
data = 'A'*file_size
sse_client_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key=key, Body=data)
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
response = client.get_object(Bucket=bucket_name, Key=key)
body = _get_body(response)
eq(body, data)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test SSE-C encrypted transfer 1 byte')
@attr(assertion='success')
@attr('encryption')
def test_encrypted_transfer_1b():
_test_encryption_sse_customer_write(1)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test SSE-C encrypted transfer 1KB')
@attr(assertion='success')
@attr('encryption')
def test_encrypted_transfer_1kb():
_test_encryption_sse_customer_write(1024)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test SSE-C encrypted transfer 1MB')
@attr(assertion='success')
@attr('encryption')
def test_encrypted_transfer_1MB():
_test_encryption_sse_customer_write(1024*1024)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test SSE-C encrypted transfer 13 bytes')
@attr(assertion='success')
@attr('encryption')
def test_encrypted_transfer_13b():
_test_encryption_sse_customer_write(13)
@attr(assertion='success')
@attr('encryption')
def test_encryption_sse_c_method_head():
bucket_name = get_new_bucket()
client = get_client()
data = 'A'*1000
key = 'testobj'
sse_client_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key=key, Body=data)
e = assert_raises(ClientError, client.head_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.HeadObject', lf)
response = client.head_object(Bucket=bucket_name, Key=key)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='object')
@attr(method='put')
@attr(operation='write encrypted with SSE-C and read without SSE-C')
@attr(assertion='operation fails')
@attr('encryption')
def test_encryption_sse_c_present():
bucket_name = get_new_bucket()
client = get_client()
data = 'A'*1000
key = 'testobj'
sse_client_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key=key, Body=data)
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='object')
@attr(method='put')
@attr(operation='write encrypted with SSE-C but read with other key')
@attr(assertion='operation fails')
@attr('encryption')
def test_encryption_sse_c_other_key():
bucket_name = get_new_bucket()
client = get_client()
data = 'A'*100
key = 'testobj'
sse_client_headers_A = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
}
sse_client_headers_B = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': '6b+WOZ1T3cqZMxgThRcXAQBrS5mXKdDUphvpxptl9/4=',
'x-amz-server-side-encryption-customer-key-md5': 'arxBvwY2V4SiOne6yppVPQ=='
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers_A))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key=key, Body=data)
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers_B))
client.meta.events.register('before-call.s3.GetObject', lf)
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='object')
@attr(method='put')
@attr(operation='write encrypted with SSE-C, but md5 is bad')
@attr(assertion='operation fails')
@attr('encryption')
def test_encryption_sse_c_invalid_md5():
bucket_name = get_new_bucket()
client = get_client()
data = 'A'*100
key = 'testobj'
sse_client_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'AAAAAAAAAAAAAAAAAAAAAA=='
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='object')
@attr(method='put')
@attr(operation='write encrypted with SSE-C, but dont provide MD5')
@attr(assertion='operation fails')
@attr('encryption')
def test_encryption_sse_c_no_md5():
bucket_name = get_new_bucket()
client = get_client()
data = 'A'*100
key = 'testobj'
sse_client_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
@attr(resource='object')
@attr(method='put')
@attr(operation='declare SSE-C but do not provide key')
@attr(assertion='operation fails')
@attr('encryption')
def test_encryption_sse_c_no_key():
bucket_name = get_new_bucket()
client = get_client()
data = 'A'*100
key = 'testobj'
sse_client_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
@attr(resource='object')
@attr(method='put')
@attr(operation='Do not declare SSE-C but provide key and MD5')
@attr(assertion='operation successfull, no encryption')
@attr('encryption')
def test_encryption_key_no_sse_c():
bucket_name = get_new_bucket()
client = get_client()
data = 'A'*100
key = 'testobj'
sse_client_headers = {
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
def _multipart_upload_enc(client, bucket_name, key, size, part_size, init_headers, part_headers, metadata, resend_parts):
"""
generate a multi-part upload for a random file of specifed size,
if requested, generate a list of the parts
return the upload descriptor
"""
if client == None:
client = get_client()
lf = (lambda **kwargs: kwargs['params']['headers'].update(init_headers))
client.meta.events.register('before-call.s3.CreateMultipartUpload', lf)
if metadata == None:
response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
else:
response = client.create_multipart_upload(Bucket=bucket_name, Key=key, Metadata=metadata)
upload_id = response['UploadId']
s = ''
parts = []
for i, part in enumerate(generate_random(size, part_size)):
# part_num is necessary because PartNumber for upload_part and in parts must start at 1 and i starts at 0
part_num = i+1
s += part
lf = (lambda **kwargs: kwargs['params']['headers'].update(part_headers))
client.meta.events.register('before-call.s3.UploadPart', lf)
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num, Body=part)
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': part_num})
if i in resend_parts:
lf = (lambda **kwargs: kwargs['params']['headers'].update(part_headers))
client.meta.events.register('before-call.s3.UploadPart', lf)
client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num, Body=part)
return (upload_id, s, parts)
def _check_content_using_range_enc(client, bucket_name, key, data, step, enc_headers=None):
response = client.get_object(Bucket=bucket_name, Key=key)
size = response['ContentLength']
for ofs in range(0, size, step):
toread = size - ofs
if toread > step:
toread = step
end = ofs + toread - 1
lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
r = 'bytes={s}-{e}'.format(s=ofs, e=end)
response = client.get_object(Bucket=bucket_name, Key=key, Range=r)
read_range = response['ContentLength']
body = _get_body(response)
eq(read_range, toread)
eq(body, data[ofs:end+1])
@attr(resource='object')
@attr(method='put')
@attr(operation='complete multi-part upload')
@attr(assertion='successful')
@attr('encryption')
@attr('fails_on_aws') # allow-unordered is a non-standard extension
def test_encryption_sse_c_multipart_upload():
bucket_name = get_new_bucket()
client = get_client()
key = "multipart_enc"
content_type = 'text/plain'
objlen = 30 * 1024 * 1024
metadata = {'foo': 'bar'}
enc_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==',
'Content-Type': content_type
}
resend_parts = []
(upload_id, data, parts) = _multipart_upload_enc(client, bucket_name, key, objlen,
part_size=5*1024*1024, init_headers=enc_headers, part_headers=enc_headers, metadata=metadata, resend_parts=resend_parts)
lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
client.meta.events.register('before-call.s3.CompleteMultipartUpload', lf)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.head_bucket(Bucket=bucket_name)
rgw_object_count = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-object-count', 1))
eq(rgw_object_count, 1)
rgw_bytes_used = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-bytes-used', objlen))
eq(rgw_bytes_used, objlen)
lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
response = client.get_object(Bucket=bucket_name, Key=key)
eq(response['Metadata'], metadata)
eq(response['ResponseMetadata']['HTTPHeaders']['content-type'], content_type)
body = _get_body(response)
eq(body, data)
size = response['ContentLength']
eq(len(body), size)
_check_content_using_range_enc(client, bucket_name, key, data, 1000000, enc_headers=enc_headers)
_check_content_using_range_enc(client, bucket_name, key, data, 10000000, enc_headers=enc_headers)
@attr(resource='object')
@attr(method='put')
@attr(operation='multipart upload with bad key for uploading chunks')
@attr(assertion='successful')
@attr('encryption')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
def test_encryption_sse_c_multipart_invalid_chunks_1():
bucket_name = get_new_bucket()
client = get_client()
key = "multipart_enc"
content_type = 'text/plain'
objlen = 30 * 1024 * 1024
metadata = {'foo': 'bar'}
init_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==',
'Content-Type': content_type
}
part_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': '6b+WOZ1T3cqZMxgThRcXAQBrS5mXKdDUphvpxptl9/4=',
'x-amz-server-side-encryption-customer-key-md5': 'arxBvwY2V4SiOne6yppVPQ=='
}
resend_parts = []
e = assert_raises(ClientError, _multipart_upload_enc, client=client, bucket_name=bucket_name,
key=key, size=objlen, part_size=5*1024*1024, init_headers=init_headers, part_headers=part_headers, metadata=metadata, resend_parts=resend_parts)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='object')
@attr(method='put')
@attr(operation='multipart upload with bad md5 for chunks')
@attr(assertion='successful')
@attr('encryption')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
def test_encryption_sse_c_multipart_invalid_chunks_2():
bucket_name = get_new_bucket()
client = get_client()
key = "multipart_enc"
content_type = 'text/plain'
objlen = 30 * 1024 * 1024
metadata = {'foo': 'bar'}
init_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==',
'Content-Type': content_type
}
part_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'AAAAAAAAAAAAAAAAAAAAAA=='
}
resend_parts = []
e = assert_raises(ClientError, _multipart_upload_enc, client=client, bucket_name=bucket_name,
key=key, size=objlen, part_size=5*1024*1024, init_headers=init_headers, part_headers=part_headers, metadata=metadata, resend_parts=resend_parts)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='object')
@attr(method='put')
@attr(operation='complete multi-part upload and download with bad key')
@attr(assertion='successful')
@attr('encryption')
def test_encryption_sse_c_multipart_bad_download():
bucket_name = get_new_bucket()
client = get_client()
key = "multipart_enc"
content_type = 'text/plain'
objlen = 30 * 1024 * 1024
metadata = {'foo': 'bar'}
put_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==',
'Content-Type': content_type
}
get_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': '6b+WOZ1T3cqZMxgThRcXAQBrS5mXKdDUphvpxptl9/4=',
'x-amz-server-side-encryption-customer-key-md5': 'arxBvwY2V4SiOne6yppVPQ=='
}
resend_parts = []
(upload_id, data, parts) = _multipart_upload_enc(client, bucket_name, key, objlen,
part_size=5*1024*1024, init_headers=put_headers, part_headers=put_headers, metadata=metadata, resend_parts=resend_parts)
lf = (lambda **kwargs: kwargs['params']['headers'].update(put_headers))
client.meta.events.register('before-call.s3.CompleteMultipartUpload', lf)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.head_bucket(Bucket=bucket_name)
rgw_object_count = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-object-count', 1))
eq(rgw_object_count, 1)
rgw_bytes_used = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-bytes-used', objlen))
eq(rgw_bytes_used, objlen)
lf = (lambda **kwargs: kwargs['params']['headers'].update(put_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
response = client.get_object(Bucket=bucket_name, Key=key)
eq(response['Metadata'], metadata)
eq(response['ResponseMetadata']['HTTPHeaders']['content-type'], content_type)
lf = (lambda **kwargs: kwargs['params']['headers'].update(get_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds and returns written data')
@attr('encryption')
def test_encryption_sse_c_post_object_authenticated_request():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["starts-with", "$x-amz-server-side-encryption-customer-algorithm", ""], \
["starts-with", "$x-amz-server-side-encryption-customer-key", ""], \
["starts-with", "$x-amz-server-side-encryption-customer-key-md5", ""], \
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),
('x-amz-server-side-encryption-customer-algorithm', 'AES256'), \
('x-amz-server-side-encryption-customer-key', 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs='), \
('x-amz-server-side-encryption-customer-key-md5', 'DWygnHRtgiJ77HCm+1rvHw=='), \
('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
get_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(get_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
body = _get_body(response)
eq(body, 'bar')
@attr(assertion='success')
@attr('encryption')
def _test_sse_kms_customer_write(file_size, key_id = 'testkey-1'):
"""
Tests Create a file of A's, use it to set_contents_from_file.
Create a file of B's, use it to re-set_contents_from_file.
Re-read the contents, and confirm we get B's
"""
bucket_name = get_new_bucket()
client = get_client()
sse_kms_client_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': key_id
}
data = 'A'*file_size
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key='testobj', Body=data)
response = client.get_object(Bucket=bucket_name, Key='testobj')
body = _get_body(response)
eq(body, data)
@attr(resource='object')
@attr(method='head')
@attr(operation='Test SSE-KMS encrypted does perform head properly')
@attr(assertion='success')
@attr('encryption')
def test_sse_kms_method_head():
kms_keyid = get_main_kms_keyid()
bucket_name = get_new_bucket()
client = get_client()
sse_kms_client_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid
}
data = 'A'*1000
key = 'testobj'
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key=key, Body=data)
response = client.head_object(Bucket=bucket_name, Key=key)
eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'], 'aws:kms')
eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption-aws-kms-key-id'], kms_keyid)
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
client.meta.events.register('before-call.s3.HeadObject', lf)
e = assert_raises(ClientError, client.head_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='object')
@attr(method='put')
@attr(operation='write encrypted with SSE-KMS and read without SSE-KMS')
@attr(assertion='operation success')
@attr('encryption')
def test_sse_kms_present():
kms_keyid = get_main_kms_keyid()
bucket_name = get_new_bucket()
client = get_client()
sse_kms_client_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid
}
data = 'A'*100
key = 'testobj'
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key=key, Body=data)
response = client.get_object(Bucket=bucket_name, Key=key)
body = _get_body(response)
eq(body, data)
@attr(resource='object')
@attr(method='put')
@attr(operation='declare SSE-KMS but do not provide key_id')
@attr(assertion='operation fails')
@attr('encryption')
def test_sse_kms_no_key():
bucket_name = get_new_bucket()
client = get_client()
sse_kms_client_headers = {
'x-amz-server-side-encryption': 'aws:kms',
}
data = 'A'*100
key = 'testobj'
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
@attr(resource='object')
@attr(method='put')
@attr(operation='Do not declare SSE-KMS but provide key_id')
@attr(assertion='operation successfull, no encryption')
@attr('encryption')
def test_sse_kms_not_declared():
bucket_name = get_new_bucket()
client = get_client()
sse_kms_client_headers = {
'x-amz-server-side-encryption-aws-kms-key-id': 'testkey-2'
}
data = 'A'*100
key = 'testobj'
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='object')
@attr(method='put')
@attr(operation='complete KMS multi-part upload')
@attr(assertion='successful')
@attr('encryption')
def test_sse_kms_multipart_upload():
kms_keyid = get_main_kms_keyid()
bucket_name = get_new_bucket()
client = get_client()
key = "multipart_enc"
content_type = 'text/plain'
objlen = 30 * 1024 * 1024
metadata = {'foo': 'bar'}
enc_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid,
'Content-Type': content_type
}
resend_parts = []
(upload_id, data, parts) = _multipart_upload_enc(client, bucket_name, key, objlen,
part_size=5*1024*1024, init_headers=enc_headers, part_headers=enc_headers, metadata=metadata, resend_parts=resend_parts)
lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
client.meta.events.register('before-call.s3.CompleteMultipartUpload', lf)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.head_bucket(Bucket=bucket_name)
rgw_object_count = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-object-count', 1))
eq(rgw_object_count, 1)
rgw_bytes_used = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-bytes-used', objlen))
eq(rgw_bytes_used, objlen)
lf = (lambda **kwargs: kwargs['params']['headers'].update(part_headers))
client.meta.events.register('before-call.s3.UploadPart', lf)
response = client.get_object(Bucket=bucket_name, Key=key)
eq(response['Metadata'], metadata)
eq(response['ResponseMetadata']['HTTPHeaders']['content-type'], content_type)
body = _get_body(response)
eq(body, data)
size = response['ContentLength']
eq(len(body), size)
_check_content_using_range(key, bucket_name, data, 1000000)
_check_content_using_range(key, bucket_name, data, 10000000)
@attr(resource='object')
@attr(method='put')
@attr(operation='multipart KMS upload with bad key_id for uploading chunks')
@attr(assertion='successful')
@attr('encryption')
def test_sse_kms_multipart_invalid_chunks_1():
kms_keyid = get_main_kms_keyid()
kms_keyid2 = get_secondary_kms_keyid()
bucket_name = get_new_bucket()
client = get_client()
key = "multipart_enc"
content_type = 'text/bla'
objlen = 30 * 1024 * 1024
metadata = {'foo': 'bar'}
init_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid,
'Content-Type': content_type
}
part_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid2
}
resend_parts = []
_multipart_upload_enc(client, bucket_name, key, objlen, part_size=5*1024*1024,
init_headers=init_headers, part_headers=part_headers, metadata=metadata,
resend_parts=resend_parts)
@attr(resource='object')
@attr(method='put')
@attr(operation='multipart KMS upload with unexistent key_id for chunks')
@attr(assertion='successful')
@attr('encryption')
def test_sse_kms_multipart_invalid_chunks_2():
kms_keyid = get_main_kms_keyid()
bucket_name = get_new_bucket()
client = get_client()
key = "multipart_enc"
content_type = 'text/plain'
objlen = 30 * 1024 * 1024
metadata = {'foo': 'bar'}
init_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid,
'Content-Type': content_type
}
part_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': 'testkey-not-present'
}
resend_parts = []
_multipart_upload_enc(client, bucket_name, key, objlen, part_size=5*1024*1024,
init_headers=init_headers, part_headers=part_headers, metadata=metadata,
resend_parts=resend_parts)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated KMS browser based upload via POST request')
@attr(assertion='succeeds and returns written data')
@attr('encryption')
def test_sse_kms_post_object_authenticated_request():
kms_keyid = get_main_kms_keyid()
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["starts-with", "$x-amz-server-side-encryption", ""], \
["starts-with", "$x-amz-server-side-encryption-aws-kms-key-id", ""], \
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),
('x-amz-server-side-encryption', 'aws:kms'), \
('x-amz-server-side-encryption-aws-kms-key-id', kms_keyid), \
('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='put')
@attr(operation='Test SSE-KMS encrypted transfer 1 byte')
@attr(assertion='success')
@attr('encryption')
def test_sse_kms_transfer_1b():
kms_keyid = get_main_kms_keyid()
if kms_keyid is None:
raise SkipTest
_test_sse_kms_customer_write(1, key_id = kms_keyid)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test SSE-KMS encrypted transfer 1KB')
@attr(assertion='success')
@attr('encryption')
def test_sse_kms_transfer_1kb():
kms_keyid = get_main_kms_keyid()
if kms_keyid is None:
raise SkipTest
_test_sse_kms_customer_write(1024, key_id = kms_keyid)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test SSE-KMS encrypted transfer 1MB')
@attr(assertion='success')
@attr('encryption')
def test_sse_kms_transfer_1MB():
kms_keyid = get_main_kms_keyid()
if kms_keyid is None:
raise SkipTest
_test_sse_kms_customer_write(1024*1024, key_id = kms_keyid)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test SSE-KMS encrypted transfer 13 bytes')
@attr(assertion='success')
@attr('encryption')
def test_sse_kms_transfer_13b():
kms_keyid = get_main_kms_keyid()
if kms_keyid is None:
raise SkipTest
_test_sse_kms_customer_write(13, key_id = kms_keyid)
@attr(resource='object')
@attr(method='get')
@attr(operation='write encrypted with SSE-KMS and read with SSE-KMS')
@attr(assertion='operation fails')
@attr('encryption')
def test_sse_kms_read_declare():
bucket_name = get_new_bucket()
client = get_client()
sse_kms_client_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': 'testkey-1'
}
data = 'A'*100
key = 'testobj'
client.put_object(Bucket=bucket_name, Key=key, Body=data)
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test Bucket Policy')
@attr(assertion='succeeds')
@attr('bucket-policy')
def test_bucket_policy():
bucket_name = get_new_bucket()
client = get_client()
key = 'asdf'
client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
resource1 = "arn:aws:s3:::" + bucket_name
resource2 = "arn:aws:s3:::" + bucket_name + "/*"
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"{}".format(resource1),
"{}".format(resource2)
]
}]
})
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
alt_client = get_alt_client()
response = alt_client.list_objects(Bucket=bucket_name)
eq(len(response['Contents']), 1)
@attr('bucket-policy')
@attr('list-objects-v2')
def test_bucketv2_policy():
bucket_name = get_new_bucket()
client = get_client()
key = 'asdf'
client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
resource1 = "arn:aws:s3:::" + bucket_name
resource2 = "arn:aws:s3:::" + bucket_name + "/*"
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"{}".format(resource1),
"{}".format(resource2)
]
}]
})
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
alt_client = get_alt_client()
response = alt_client.list_objects_v2(Bucket=bucket_name)
eq(len(response['Contents']), 1)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test Bucket Policy and ACL')
@attr(assertion='fails')
@attr('bucket-policy')
def test_bucket_policy_acl():
bucket_name = get_new_bucket()
client = get_client()
key = 'asdf'
client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
resource1 = "arn:aws:s3:::" + bucket_name
resource2 = "arn:aws:s3:::" + bucket_name + "/*"
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Deny",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"{}".format(resource1),
"{}".format(resource2)
]
}]
})
client.put_bucket_acl(Bucket=bucket_name, ACL='authenticated-read')
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
alt_client = get_alt_client()
e = assert_raises(ClientError, alt_client.list_objects, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
client.delete_bucket_policy(Bucket=bucket_name)
client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test Bucket Policy and ACL with list-objects-v2')
@attr(assertion='fails')
@attr('bucket-policy')
@attr('list-objects-v2')
def test_bucketv2_policy_acl():
bucket_name = get_new_bucket()
client = get_client()
key = 'asdf'
client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
resource1 = "arn:aws:s3:::" + bucket_name
resource2 = "arn:aws:s3:::" + bucket_name + "/*"
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Deny",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"{}".format(resource1),
"{}".format(resource2)
]
}]
})
client.put_bucket_acl(Bucket=bucket_name, ACL='authenticated-read')
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
alt_client = get_alt_client()
e = assert_raises(ClientError, alt_client.list_objects_v2, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
client.delete_bucket_policy(Bucket=bucket_name)
client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test Bucket Policy for a user belonging to a different tenant')
@attr(assertion='succeeds')
@attr('bucket-policy')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
def test_bucket_policy_different_tenant():
bucket_name = get_new_bucket()
client = get_client()
key = 'asdf'
client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
resource1 = "arn:aws:s3::*:" + bucket_name
resource2 = "arn:aws:s3::*:" + bucket_name + "/*"
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"{}".format(resource1),
"{}".format(resource2)
]
}]
})
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
# TODO: figure out how to change the bucketname
def change_bucket_name(**kwargs):
kwargs['params']['url'] = "http://localhost:8000/:{bucket_name}?encoding-type=url".format(bucket_name=bucket_name)
kwargs['params']['url_path'] = "/:{bucket_name}".format(bucket_name=bucket_name)
kwargs['params']['context']['signing']['bucket'] = ":{bucket_name}".format(bucket_name=bucket_name)
print(kwargs['request_signer'])
print(kwargs)
#bucket_name = ":" + bucket_name
tenant_client = get_tenant_client()
tenant_client.meta.events.register('before-call.s3.ListObjects', change_bucket_name)
response = tenant_client.list_objects(Bucket=bucket_name)
#alt_client = get_alt_client()
#response = alt_client.list_objects(Bucket=bucket_name)
eq(len(response['Contents']), 1)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test Bucket Policy for a user belonging to a different tenant')
@attr(assertion='succeeds')
@attr('bucket-policy')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
@attr('list-objects-v2')
def test_bucketv2_policy_different_tenant():
bucket_name = get_new_bucket()
client = get_client()
key = 'asdf'
client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
resource1 = "arn:aws:s3::*:" + bucket_name
resource2 = "arn:aws:s3::*:" + bucket_name + "/*"
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"{}".format(resource1),
"{}".format(resource2)
]
}]
})
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
# TODO: figure out how to change the bucketname
def change_bucket_name(**kwargs):
kwargs['params']['url'] = "http://localhost:8000/:{bucket_name}?encoding-type=url".format(bucket_name=bucket_name)
kwargs['params']['url_path'] = "/:{bucket_name}".format(bucket_name=bucket_name)
kwargs['params']['context']['signing']['bucket'] = ":{bucket_name}".format(bucket_name=bucket_name)
print(kwargs['request_signer'])
print(kwargs)
#bucket_name = ":" + bucket_name
tenant_client = get_tenant_client()
tenant_client.meta.events.register('before-call.s3.ListObjects', change_bucket_name)
response = tenant_client.list_objects_v2(Bucket=bucket_name)
#alt_client = get_alt_client()
#response = alt_client.list_objects_v2(Bucket=bucket_name)
eq(len(response['Contents']), 1)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test Bucket Policy on another bucket')
@attr(assertion='succeeds')
@attr('bucket-policy')
def test_bucket_policy_another_bucket():
bucket_name = get_new_bucket()
bucket_name2 = get_new_bucket()
client = get_client()
key = 'asdf'
key2 = 'abcd'
client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
client.put_object(Bucket=bucket_name2, Key=key2, Body='abcd')
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"arn:aws:s3:::*",
"arn:aws:s3:::*/*"
]
}]
})
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
response = client.get_bucket_policy(Bucket=bucket_name)
response_policy = response['Policy']
client.put_bucket_policy(Bucket=bucket_name2, Policy=response_policy)
alt_client = get_alt_client()
response = alt_client.list_objects(Bucket=bucket_name)
eq(len(response['Contents']), 1)
alt_client = get_alt_client()
response = alt_client.list_objects(Bucket=bucket_name2)
eq(len(response['Contents']), 1)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test Bucket Policy on another bucket with list-objects-v2')
@attr(assertion='succeeds')
@attr('bucket-policy')
@attr('list-objects-v2')
def test_bucketv2_policy_another_bucket():
bucket_name = get_new_bucket()
bucket_name2 = get_new_bucket()
client = get_client()
key = 'asdf'
key2 = 'abcd'
client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
client.put_object(Bucket=bucket_name2, Key=key2, Body='abcd')
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"arn:aws:s3:::*",
"arn:aws:s3:::*/*"
]
}]
})
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
response = client.get_bucket_policy(Bucket=bucket_name)
response_policy = response['Policy']
client.put_bucket_policy(Bucket=bucket_name2, Policy=response_policy)
alt_client = get_alt_client()
response = alt_client.list_objects_v2(Bucket=bucket_name)
eq(len(response['Contents']), 1)
alt_client = get_alt_client()
response = alt_client.list_objects_v2(Bucket=bucket_name2)
eq(len(response['Contents']), 1)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put condition operator end with ifExists')
@attr('bucket-policy')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
def test_bucket_policy_set_condition_operator_end_with_IfExists():
bucket_name = get_new_bucket()
client = get_client()
key = 'foo'
client.put_object(Bucket=bucket_name, Key=key)
policy = '''{
"Version":"2012-10-17",
"Statement": [{
"Sid": "Allow Public Access to All Objects",
"Effect": "Allow",
"Principal": "*",
"Action": "s3:GetObject",
"Condition": {
"StringLikeIfExists": {
"aws:Referer": "http://www.example.com/*"
}
},
"Resource": "arn:aws:s3:::%s/*"
}
]
}''' % bucket_name
boto3.set_stream_logger(name='botocore')
client.put_bucket_policy(Bucket=bucket_name, Policy=policy)
request_headers={'referer': 'http://www.example.com/'}
lf = (lambda **kwargs: kwargs['params']['headers'].update(request_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
response = client.get_object(Bucket=bucket_name, Key=key)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
request_headers={'referer': 'http://www.example.com/index.html'}
lf = (lambda **kwargs: kwargs['params']['headers'].update(request_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
response = client.get_object(Bucket=bucket_name, Key=key)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
# the 'referer' headers need to be removed for this one
#response = client.get_object(Bucket=bucket_name, Key=key)
#eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
request_headers={'referer': 'http://example.com'}
lf = (lambda **kwargs: kwargs['params']['headers'].update(request_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
# TODO: Compare Requests sent in Boto3, Wireshark, RGW Log for both boto and boto3
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
response = client.get_bucket_policy(Bucket=bucket_name)
print(response)
def _create_simple_tagset(count):
tagset = []
for i in range(count):
tagset.append({'Key': str(i), 'Value': str(i)})
return {'TagSet': tagset}
def _make_random_string(size):
return ''.join(random.choice(string.ascii_letters) for _ in range(size))
@attr(resource='object')
@attr(method='get')
@attr(operation='Test Get/PutObjTagging output')
@attr(assertion='success')
@attr('tagging')
def test_get_obj_tagging():
key = 'testputtags'
bucket_name = _create_key_with_random_content(key)
client = get_client()
input_tagset = _create_simple_tagset(2)
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(response['TagSet'], input_tagset['TagSet'])
@attr(resource='object')
@attr(method='get')
@attr(operation='Test HEAD obj tagging output')
@attr(assertion='success')
@attr('tagging')
def test_get_obj_head_tagging():
key = 'testputtags'
bucket_name = _create_key_with_random_content(key)
client = get_client()
count = 2
input_tagset = _create_simple_tagset(count)
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.head_object(Bucket=bucket_name, Key=key)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-tagging-count'], str(count))
@attr(resource='object')
@attr(method='get')
@attr(operation='Test Put max allowed tags')
@attr(assertion='success')
@attr('tagging')
def test_put_max_tags():
key = 'testputmaxtags'
bucket_name = _create_key_with_random_content(key)
client = get_client()
input_tagset = _create_simple_tagset(10)
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(response['TagSet'], input_tagset['TagSet'])
@attr(resource='object')
@attr(method='get')
@attr(operation='Test Put max allowed tags')
@attr(assertion='fails')
@attr('tagging')
def test_put_excess_tags():
key = 'testputmaxtags'
bucket_name = _create_key_with_random_content(key)
client = get_client()
input_tagset = _create_simple_tagset(11)
e = assert_raises(ClientError, client.put_object_tagging, Bucket=bucket_name, Key=key, Tagging=input_tagset)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidTag')
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(len(response['TagSet']), 0)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test Put max allowed k-v size')
@attr(assertion='success')
@attr('tagging')
def test_put_max_kvsize_tags():
key = 'testputmaxkeysize'
bucket_name = _create_key_with_random_content(key)
client = get_client()
tagset = []
for i in range(10):
k = _make_random_string(128)
v = _make_random_string(256)
tagset.append({'Key': k, 'Value': v})
input_tagset = {'TagSet': tagset}
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
for kv_pair in response['TagSet']:
eq((kv_pair in input_tagset['TagSet']), True)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test exceed key size')
@attr(assertion='success')
@attr('tagging')
def test_put_excess_key_tags():
key = 'testputexcesskeytags'
bucket_name = _create_key_with_random_content(key)
client = get_client()
tagset = []
for i in range(10):
k = _make_random_string(129)
v = _make_random_string(256)
tagset.append({'Key': k, 'Value': v})
input_tagset = {'TagSet': tagset}
e = assert_raises(ClientError, client.put_object_tagging, Bucket=bucket_name, Key=key, Tagging=input_tagset)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidTag')
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(len(response['TagSet']), 0)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test exceed val size')
@attr(assertion='success')
@attr('tagging')
def test_put_excess_val_tags():
key = 'testputexcesskeytags'
bucket_name = _create_key_with_random_content(key)
client = get_client()
tagset = []
for i in range(10):
k = _make_random_string(128)
v = _make_random_string(257)
tagset.append({'Key': k, 'Value': v})
input_tagset = {'TagSet': tagset}
e = assert_raises(ClientError, client.put_object_tagging, Bucket=bucket_name, Key=key, Tagging=input_tagset)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidTag')
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(len(response['TagSet']), 0)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test PUT modifies existing tags')
@attr(assertion='success')
@attr('tagging')
def test_put_modify_tags():
key = 'testputmodifytags'
bucket_name = _create_key_with_random_content(key)
client = get_client()
tagset = []
tagset.append({'Key': 'key', 'Value': 'val'})
tagset.append({'Key': 'key2', 'Value': 'val2'})
input_tagset = {'TagSet': tagset}
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(response['TagSet'], input_tagset['TagSet'])
tagset2 = []
tagset2.append({'Key': 'key3', 'Value': 'val3'})
input_tagset2 = {'TagSet': tagset2}
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset2)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(response['TagSet'], input_tagset2['TagSet'])
@attr(resource='object')
@attr(method='get')
@attr(operation='Test Delete tags')
@attr(assertion='success')
@attr('tagging')
def test_put_delete_tags():
key = 'testputmodifytags'
bucket_name = _create_key_with_random_content(key)
client = get_client()
input_tagset = _create_simple_tagset(2)
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(response['TagSet'], input_tagset['TagSet'])
response = client.delete_object_tagging(Bucket=bucket_name, Key=key)
eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(len(response['TagSet']), 0)
@attr(resource='object')
@attr(method='post')
@attr(operation='anonymous browser based upload via POST request')
@attr('tagging')
@attr(assertion='succeeds and returns written data')
def test_post_object_tags_anonymous_request():
bucket_name = get_new_bucket_name()
client = get_client()
url = _get_post_url(bucket_name)
client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
key_name = "foo.txt"
input_tagset = _create_simple_tagset(2)
# xml_input_tagset is the same as input_tagset in xml.
# There is not a simple way to change input_tagset to xml like there is in the boto2 tetss
xml_input_tagset = "<Tagging><TagSet><Tag><Key>0</Key><Value>0</Value></Tag><Tag><Key>1</Key><Value>1</Value></Tag></TagSet></Tagging>"
payload = OrderedDict([
("key" , key_name),
("acl" , "public-read"),
("Content-Type" , "text/plain"),
("tagging", xml_input_tagset),
('file', ('bar')),
])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key=key_name)
body = _get_body(response)
eq(body, 'bar')
response = client.get_object_tagging(Bucket=bucket_name, Key=key_name)
eq(response['TagSet'], input_tagset['TagSet'])
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr('tagging')
@attr(assertion='succeeds and returns written data')
def test_post_object_tags_authenticated_request():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [
{"bucket": bucket_name},
["starts-with", "$key", "foo"],
{"acl": "private"},
["starts-with", "$Content-Type", "text/plain"],
["content-length-range", 0, 1024],
["starts-with", "$tagging", ""]
]}
# xml_input_tagset is the same as `input_tagset = _create_simple_tagset(2)` in xml
# There is not a simple way to change input_tagset to xml like there is in the boto2 tetss
xml_input_tagset = "<Tagging><TagSet><Tag><Key>0</Key><Value>0</Value></Tag><Tag><Key>1</Key><Value>1</Value></Tag></TagSet></Tagging>"
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([
("key" , "foo.txt"),
("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("tagging", xml_input_tagset),
("Content-Type" , "text/plain"),
('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='put')
@attr(operation='Test PutObj with tagging headers')
@attr(assertion='success')
@attr('tagging')
def test_put_obj_with_tags():
bucket_name = get_new_bucket()
client = get_client()
key = 'testtagobj1'
data = 'A'*100
tagset = []
tagset.append({'Key': 'bar', 'Value': ''})
tagset.append({'Key': 'foo', 'Value': 'bar'})
put_obj_tag_headers = {
'x-amz-tagging' : 'foo=bar&bar'
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(put_obj_tag_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key=key, Body=data)
response = client.get_object(Bucket=bucket_name, Key=key)
body = _get_body(response)
eq(body, data)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
response_tagset = response['TagSet']
tagset = tagset
eq(response_tagset, tagset)
def _make_arn_resource(path="*"):
return "arn:aws:s3:::{}".format(path)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test GetObjTagging public read')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_get_tags_acl_public():
key = 'testputtagsacl'
bucket_name = _create_key_with_random_content(key)
client = get_client()
resource = _make_arn_resource("{}/{}".format(bucket_name, key))
policy_document = make_json_policy("s3:GetObjectTagging",
resource)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
input_tagset = _create_simple_tagset(10)
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
alt_client = get_alt_client()
response = alt_client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(response['TagSet'], input_tagset['TagSet'])
@attr(resource='object')
@attr(method='get')
@attr(operation='Test PutObjTagging public wrote')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_put_tags_acl_public():
key = 'testputtagsacl'
bucket_name = _create_key_with_random_content(key)
client = get_client()
resource = _make_arn_resource("{}/{}".format(bucket_name, key))
policy_document = make_json_policy("s3:PutObjectTagging",
resource)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
input_tagset = _create_simple_tagset(10)
alt_client = get_alt_client()
response = alt_client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(response['TagSet'], input_tagset['TagSet'])
@attr(resource='object')
@attr(method='get')
@attr(operation='test deleteobjtagging public')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_delete_tags_obj_public():
key = 'testputtagsacl'
bucket_name = _create_key_with_random_content(key)
client = get_client()
resource = _make_arn_resource("{}/{}".format(bucket_name, key))
policy_document = make_json_policy("s3:DeleteObjectTagging",
resource)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
input_tagset = _create_simple_tagset(10)
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
alt_client = get_alt_client()
response = alt_client.delete_object_tagging(Bucket=bucket_name, Key=key)
eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(len(response['TagSet']), 0)
@attr(resource='object')
@attr(method='put')
@attr(operation='test whether a correct version-id returned')
@attr(assertion='version-id is same as bucket list')
@attr('versioning')
def test_versioning_bucket_atomic_upload_return_version_id():
bucket_name = get_new_bucket()
client = get_client()
key = 'bar'
# for versioning-enabled-bucket, an non-empty version-id should return
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
response = client.put_object(Bucket=bucket_name, Key=key)
version_id = response['VersionId']
response = client.list_object_versions(Bucket=bucket_name)
versions = response['Versions']
for version in versions:
eq(version['VersionId'], version_id)
# for versioning-default-bucket, no version-id should return.
bucket_name = get_new_bucket()
key = 'baz'
response = client.put_object(Bucket=bucket_name, Key=key)
eq(('VersionId' in response), False)
# for versioning-suspended-bucket, no version-id should return.
bucket_name = get_new_bucket()
key = 'baz'
check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
response = client.put_object(Bucket=bucket_name, Key=key)
eq(('VersionId' in response), False)
@attr(resource='object')
@attr(method='put')
@attr(operation='test whether a correct version-id returned')
@attr(assertion='version-id is same as bucket list')
@attr('versioning')
def test_versioning_bucket_multipart_upload_return_version_id():
content_type='text/bla'
objlen = 30 * 1024 * 1024
bucket_name = get_new_bucket()
client = get_client()
key = 'bar'
metadata={'foo': 'baz'}
# for versioning-enabled-bucket, an non-empty version-id should return
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, client=client, content_type=content_type, metadata=metadata)
response = client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
version_id = response['VersionId']
response = client.list_object_versions(Bucket=bucket_name)
versions = response['Versions']
for version in versions:
eq(version['VersionId'], version_id)
# for versioning-default-bucket, no version-id should return.
bucket_name = get_new_bucket()
key = 'baz'
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, client=client, content_type=content_type, metadata=metadata)
response = client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
eq(('VersionId' in response), False)
# for versioning-suspended-bucket, no version-id should return
bucket_name = get_new_bucket()
key = 'foo'
check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, client=client, content_type=content_type, metadata=metadata)
response = client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
eq(('VersionId' in response), False)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test ExistingObjectTag conditional on get object')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_bucket_policy_get_obj_existing_tag():
bucket_name = _create_objects(keys=['publictag', 'privatetag', 'invalidtag'])
client = get_client()
tag_conditional = {"StringEquals": {
"s3:ExistingObjectTag/security" : "public"
}}
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
policy_document = make_json_policy("s3:GetObject",
resource,
conditions=tag_conditional)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
tagset = []
tagset.append({'Key': 'security', 'Value': 'public'})
tagset.append({'Key': 'foo', 'Value': 'bar'})
input_tagset = {'TagSet': tagset}
response = client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
tagset2 = []
tagset2.append({'Key': 'security', 'Value': 'private'})
input_tagset = {'TagSet': tagset2}
response = client.put_object_tagging(Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
tagset3 = []
tagset3.append({'Key': 'security1', 'Value': 'public'})
input_tagset = {'TagSet': tagset3}
response = client.put_object_tagging(Bucket=bucket_name, Key='invalidtag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
alt_client = get_alt_client()
response = alt_client.get_object(Bucket=bucket_name, Key='publictag')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
e = assert_raises(ClientError, alt_client.get_object, Bucket=bucket_name, Key='privatetag')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
e = assert_raises(ClientError, alt_client.get_object, Bucket=bucket_name, Key='invalidtag')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test ExistingObjectTag conditional on get object tagging')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_bucket_policy_get_obj_tagging_existing_tag():
bucket_name = _create_objects(keys=['publictag', 'privatetag', 'invalidtag'])
client = get_client()
tag_conditional = {"StringEquals": {
"s3:ExistingObjectTag/security" : "public"
}}
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
policy_document = make_json_policy("s3:GetObjectTagging",
resource,
conditions=tag_conditional)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
tagset = []
tagset.append({'Key': 'security', 'Value': 'public'})
tagset.append({'Key': 'foo', 'Value': 'bar'})
input_tagset = {'TagSet': tagset}
response = client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
tagset2 = []
tagset2.append({'Key': 'security', 'Value': 'private'})
input_tagset = {'TagSet': tagset2}
response = client.put_object_tagging(Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
tagset3 = []
tagset3.append({'Key': 'security1', 'Value': 'public'})
input_tagset = {'TagSet': tagset3}
response = client.put_object_tagging(Bucket=bucket_name, Key='invalidtag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
alt_client = get_alt_client()
response = alt_client.get_object_tagging(Bucket=bucket_name, Key='publictag')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
# A get object itself should fail since we allowed only GetObjectTagging
e = assert_raises(ClientError, alt_client.get_object, Bucket=bucket_name, Key='publictag')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
e = assert_raises(ClientError, alt_client.get_object_tagging, Bucket=bucket_name, Key='privatetag')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
e = assert_raises(ClientError, alt_client.get_object_tagging, Bucket=bucket_name, Key='invalidtag')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test ExistingObjectTag conditional on put object tagging')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_bucket_policy_put_obj_tagging_existing_tag():
bucket_name = _create_objects(keys=['publictag', 'privatetag', 'invalidtag'])
client = get_client()
tag_conditional = {"StringEquals": {
"s3:ExistingObjectTag/security" : "public"
}}
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
policy_document = make_json_policy("s3:PutObjectTagging",
resource,
conditions=tag_conditional)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
tagset = []
tagset.append({'Key': 'security', 'Value': 'public'})
tagset.append({'Key': 'foo', 'Value': 'bar'})
input_tagset = {'TagSet': tagset}
response = client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
tagset2 = []
tagset2.append({'Key': 'security', 'Value': 'private'})
input_tagset = {'TagSet': tagset2}
response = client.put_object_tagging(Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
alt_client = get_alt_client()
# PUT requests with object tagging are a bit wierd, if you forget to put
# the tag which is supposed to be existing anymore well, well subsequent
# put requests will fail
testtagset1 = []
testtagset1.append({'Key': 'security', 'Value': 'public'})
testtagset1.append({'Key': 'foo', 'Value': 'bar'})
input_tagset = {'TagSet': testtagset1}
response = alt_client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
e = assert_raises(ClientError, alt_client.put_object_tagging, Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
testtagset2 = []
testtagset2.append({'Key': 'security', 'Value': 'private'})
input_tagset = {'TagSet': testtagset2}
response = alt_client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
# Now try putting the original tags again, this should fail
input_tagset = {'TagSet': testtagset1}
e = assert_raises(ClientError, alt_client.put_object_tagging, Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test copy-source conditional on put obj')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_bucket_policy_put_obj_copy_source():
bucket_name = _create_objects(keys=['public/foo', 'public/bar', 'private/foo'])
client = get_client()
src_resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
policy_document = make_json_policy("s3:GetObject",
src_resource)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
bucket_name2 = get_new_bucket()
tag_conditional = {"StringLike": {
"s3:x-amz-copy-source" : bucket_name + "/public/*"
}}
resource = _make_arn_resource("{}/{}".format(bucket_name2, "*"))
policy_document = make_json_policy("s3:PutObject",
resource,
conditions=tag_conditional)
client.put_bucket_policy(Bucket=bucket_name2, Policy=policy_document)
alt_client = get_alt_client()
copy_source = {'Bucket': bucket_name, 'Key': 'public/foo'}
alt_client.copy_object(Bucket=bucket_name2, CopySource=copy_source, Key='new_foo')
# This is possible because we are still the owner, see the grants with
# policy on how to do this right
response = alt_client.get_object(Bucket=bucket_name2, Key='new_foo')
body = _get_body(response)
eq(body, 'public/foo')
copy_source = {'Bucket': bucket_name, 'Key': 'public/bar'}
alt_client.copy_object(Bucket=bucket_name2, CopySource=copy_source, Key='new_foo2')
response = alt_client.get_object(Bucket=bucket_name2, Key='new_foo2')
body = _get_body(response)
eq(body, 'public/bar')
copy_source = {'Bucket': bucket_name, 'Key': 'private/foo'}
check_access_denied(alt_client.copy_object, Bucket=bucket_name2, CopySource=copy_source, Key='new_foo2')
@attr(resource='object')
@attr(method='put')
@attr(operation='Test copy-source conditional on put obj')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_bucket_policy_put_obj_copy_source_meta():
src_bucket_name = _create_objects(keys=['public/foo', 'public/bar'])
client = get_client()
src_resource = _make_arn_resource("{}/{}".format(src_bucket_name, "*"))
policy_document = make_json_policy("s3:GetObject",
src_resource)
client.put_bucket_policy(Bucket=src_bucket_name, Policy=policy_document)
bucket_name = get_new_bucket()
tag_conditional = {"StringEquals": {
"s3:x-amz-metadata-directive" : "COPY"
}}
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
policy_document = make_json_policy("s3:PutObject",
resource,
conditions=tag_conditional)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
alt_client = get_alt_client()
lf = (lambda **kwargs: kwargs['params']['headers'].update({"x-amz-metadata-directive": "COPY"}))
alt_client.meta.events.register('before-call.s3.CopyObject', lf)
copy_source = {'Bucket': src_bucket_name, 'Key': 'public/foo'}
alt_client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='new_foo')
# This is possible because we are still the owner, see the grants with
# policy on how to do this right
response = alt_client.get_object(Bucket=bucket_name, Key='new_foo')
body = _get_body(response)
eq(body, 'public/foo')
# remove the x-amz-metadata-directive header
def remove_header(**kwargs):
if ("x-amz-metadata-directive" in kwargs['params']['headers']):
del kwargs['params']['headers']["x-amz-metadata-directive"]
alt_client.meta.events.register('before-call.s3.CopyObject', remove_header)
copy_source = {'Bucket': src_bucket_name, 'Key': 'public/bar'}
check_access_denied(alt_client.copy_object, Bucket=bucket_name, CopySource=copy_source, Key='new_foo2', Metadata={"foo": "bar"})
@attr(resource='object')
@attr(method='put')
@attr(operation='Test put obj with canned-acl not to be public')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_bucket_policy_put_obj_acl():
bucket_name = get_new_bucket()
client = get_client()
# An allow conditional will require atleast the presence of an x-amz-acl
# attribute a Deny conditional would negate any requests that try to set a
# public-read/write acl
conditional = {"StringLike": {
"s3:x-amz-acl" : "public*"
}}
p = Policy()
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
s1 = Statement("s3:PutObject",resource)
s2 = Statement("s3:PutObject", resource, effect="Deny", condition=conditional)
policy_document = p.add_statement(s1).add_statement(s2).to_json()
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
alt_client = get_alt_client()
key1 = 'private-key'
# if we want to be really pedantic, we should check that this doesn't raise
# and mark a failure, however if this does raise nosetests would mark this
# as an ERROR anyway
response = alt_client.put_object(Bucket=bucket_name, Key=key1, Body=key1)
#response = alt_client.put_object_acl(Bucket=bucket_name, Key=key1, ACL='private')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
key2 = 'public-key'
lf = (lambda **kwargs: kwargs['params']['headers'].update({"x-amz-acl": "public-read"}))
alt_client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, alt_client.put_object, Bucket=bucket_name, Key=key2, Body=key2)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test put obj with amz-grant back to bucket-owner')
@attr(assertion='success')
@attr('bucket-policy')
def test_bucket_policy_put_obj_grant():
bucket_name = get_new_bucket()
bucket_name2 = get_new_bucket()
client = get_client()
# In normal cases a key owner would be the uploader of a key in first case
# we explicitly require that the bucket owner is granted full control over
# the object uploaded by any user, the second bucket is where no such
# policy is enforced meaning that the uploader still retains ownership
main_user_id = get_main_user_id()
alt_user_id = get_alt_user_id()
owner_id_str = "id=" + main_user_id
s3_conditional = {"StringEquals": {
"s3:x-amz-grant-full-control" : owner_id_str
}}
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
policy_document = make_json_policy("s3:PutObject",
resource,
conditions=s3_conditional)
resource = _make_arn_resource("{}/{}".format(bucket_name2, "*"))
policy_document2 = make_json_policy("s3:PutObject", resource)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
client.put_bucket_policy(Bucket=bucket_name2, Policy=policy_document2)
alt_client = get_alt_client()
key1 = 'key1'
lf = (lambda **kwargs: kwargs['params']['headers'].update({"x-amz-grant-full-control" : owner_id_str}))
alt_client.meta.events.register('before-call.s3.PutObject', lf)
response = alt_client.put_object(Bucket=bucket_name, Key=key1, Body=key1)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
def remove_header(**kwargs):
if ("x-amz-grant-full-control" in kwargs['params']['headers']):
del kwargs['params']['headers']["x-amz-grant-full-control"]
alt_client.meta.events.register('before-call.s3.PutObject', remove_header)
key2 = 'key2'
response = alt_client.put_object(Bucket=bucket_name2, Key=key2, Body=key2)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
acl1_response = client.get_object_acl(Bucket=bucket_name, Key=key1)
# user 1 is trying to get acl for the object from user2 where ownership
# wasn't transferred
check_access_denied(client.get_object_acl, Bucket=bucket_name2, Key=key2)
acl2_response = alt_client.get_object_acl(Bucket=bucket_name2, Key=key2)
eq(acl1_response['Grants'][0]['Grantee']['ID'], main_user_id)
eq(acl2_response['Grants'][0]['Grantee']['ID'], alt_user_id)
@attr(resource='object')
@attr(method='put')
@attr(operation='Deny put obj requests without encryption')
@attr(assertion='success')
@attr('encryption')
@attr('bucket-policy')
# TODO: remove this 'fails_on_rgw' once I get the test passing
@attr('fails_on_rgw')
def test_bucket_policy_put_obj_enc():
bucket_name = get_new_bucket()
client = get_v2_client()
deny_incorrect_algo = {
"StringNotEquals": {
"s3:x-amz-server-side-encryption": "AES256"
}
}
deny_unencrypted_obj = {
"Null" : {
"s3:x-amz-server-side-encryption": "true"
}
}
p = Policy()
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
s1 = Statement("s3:PutObject", resource, effect="Deny", condition=deny_incorrect_algo)
s2 = Statement("s3:PutObject", resource, effect="Deny", condition=deny_unencrypted_obj)
policy_document = p.add_statement(s1).add_statement(s2).to_json()
boto3.set_stream_logger(name='botocore')
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
key1_str ='testobj'
#response = client.get_bucket_policy(Bucket=bucket_name)
#print response
check_access_denied(client.put_object, Bucket=bucket_name, Key=key1_str, Body=key1_str)
sse_client_headers = {
'x-amz-server-side-encryption' : 'AES256',
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
#TODO: why is this a 400 and not passing, it appears boto3 is not parsing the 200 response the rgw sends back properly
# DEBUGGING: run the boto2 and compare the requests
# DEBUGGING: try to run this with v2 auth (figure out why get_v2_client isn't working) to make the requests similar to what boto2 is doing
# DEBUGGING: try to add other options to put_object to see if that makes the response better
client.put_object(Bucket=bucket_name, Key=key1_str)
@attr(resource='object')
@attr(method='put')
@attr(operation='put obj with RequestObjectTag')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
def test_bucket_policy_put_obj_request_obj_tag():
bucket_name = get_new_bucket()
client = get_client()
tag_conditional = {"StringEquals": {
"s3:RequestObjectTag/security" : "public"
}}
p = Policy()
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
s1 = Statement("s3:PutObject", resource, effect="Allow", condition=tag_conditional)
policy_document = p.add_statement(s1).to_json()
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
alt_client = get_alt_client()
key1_str ='testobj'
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1_str, Body=key1_str)
headers = {"x-amz-tagging" : "security=public"}
lf = (lambda **kwargs: kwargs['params']['headers'].update(headers))
client.meta.events.register('before-call.s3.PutObject', lf)
#TODO: why is this a 400 and not passing
alt_client.put_object(Bucket=bucket_name, Key=key1_str, Body=key1_str)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test ExistingObjectTag conditional on get object acl')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_bucket_policy_get_obj_acl_existing_tag():
bucket_name = _create_objects(keys=['publictag', 'privatetag', 'invalidtag'])
client = get_client()
tag_conditional = {"StringEquals": {
"s3:ExistingObjectTag/security" : "public"
}}
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
policy_document = make_json_policy("s3:GetObjectAcl",
resource,
conditions=tag_conditional)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
tagset = []
tagset.append({'Key': 'security', 'Value': 'public'})
tagset.append({'Key': 'foo', 'Value': 'bar'})
input_tagset = {'TagSet': tagset}
response = client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
tagset2 = []
tagset2.append({'Key': 'security', 'Value': 'private'})
input_tagset = {'TagSet': tagset2}
response = client.put_object_tagging(Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
tagset3 = []
tagset3.append({'Key': 'security1', 'Value': 'public'})
input_tagset = {'TagSet': tagset3}
response = client.put_object_tagging(Bucket=bucket_name, Key='invalidtag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
alt_client = get_alt_client()
response = alt_client.get_object_acl(Bucket=bucket_name, Key='publictag')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
# A get object itself should fail since we allowed only GetObjectTagging
e = assert_raises(ClientError, alt_client.get_object, Bucket=bucket_name, Key='publictag')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
e = assert_raises(ClientError, alt_client.get_object_tagging, Bucket=bucket_name, Key='privatetag')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
e = assert_raises(ClientError, alt_client.get_object_tagging, Bucket=bucket_name, Key='invalidtag')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object lock with defalut retention')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_put_obj_lock():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'GOVERNANCE',
'Days':1
}
}}
response = client.put_object_lock_configuration(
Bucket=bucket_name,
ObjectLockConfiguration=conf)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'COMPLIANCE',
'Years':1
}
}}
response = client.put_object_lock_configuration(
Bucket=bucket_name,
ObjectLockConfiguration=conf)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.get_bucket_versioning(Bucket=bucket_name)
eq(response['Status'], 'Enabled')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object lock with bucket object lock not enabled')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_lock_invalid_bucket():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'GOVERNANCE',
'Days':1
}
}}
e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 409)
eq(error_code, 'InvalidBucketState')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object lock with days and years')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_lock_with_days_and_years():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'GOVERNANCE',
'Days':1,
'Years':1
}
}}
e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object lock with invalid days')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_lock_invalid_days():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'GOVERNANCE',
'Days':0
}
}}
e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidRetentionPeriod')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object lock with invalid years')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_lock_invalid_years():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'GOVERNANCE',
'Years':-1
}
}}
e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidRetentionPeriod')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object lock with invalid mode')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_lock_invalid_years():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'abc',
'Years':1
}
}}
e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'governance',
'Years':1
}
}}
e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object lock with invalid status')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_lock_invalid_status():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
conf = {'ObjectLockEnabled':'Disabled',
'Rule': {
'DefaultRetention':{
'Mode':'GOVERNANCE',
'Years':1
}
}}
e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
attr(resource='bucket')
@attr(method='put')
@attr(operation='Test suspend versioning when object lock enabled')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_suspend_versioning():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
e = assert_raises(ClientError, client.put_bucket_versioning, Bucket=bucket_name, VersioningConfiguration={'Status': 'Suspended'})
status, error_code = _get_status_and_error_code(e.response)
eq(status, 409)
eq(error_code, 'InvalidBucketState')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test get object lock')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_get_obj_lock():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'GOVERNANCE',
'Days':1
}
}}
client.put_object_lock_configuration(
Bucket=bucket_name,
ObjectLockConfiguration=conf)
response = client.get_object_lock_configuration(Bucket=bucket_name)
eq(response['ObjectLockConfiguration'], conf)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test get object lock with bucket object lock not enabled')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_get_obj_lock_invalid_bucket():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
e = assert_raises(ClientError, client.get_object_lock_configuration, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'ObjectLockConfigurationNotFoundError')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test put object retention')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_put_obj_retention():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
version_id = response['VersionId']
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
response = client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object retention with bucket object lock not enabled')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_retention_invalid_bucket():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidRequest')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object retention with invalid mode')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_retention_invalid_mode():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
retention = {'Mode':'governance', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
retention = {'Mode':'abc', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test get object retention')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_get_obj_retention():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
version_id = response['VersionId']
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
response = client.get_object_retention(Bucket=bucket_name, Key=key)
eq(response['Retention'], retention)
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test get object retention with invalid bucket')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_get_obj_retention_invalid_bucket():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
e = assert_raises(ClientError, client.get_object_retention, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidRequest')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object retention with version id')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_put_obj_retention_versionid():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
version_id = response['VersionId']
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, VersionId=version_id, Retention=retention)
response = client.get_object_retention(Bucket=bucket_name, Key=key, VersionId=version_id)
eq(response['Retention'], retention)
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object retention to override default retention')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_put_obj_retention_override_default_retention():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'GOVERNANCE',
'Days':1
}
}}
client.put_object_lock_configuration(
Bucket=bucket_name,
ObjectLockConfiguration=conf)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
version_id = response['VersionId']
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
response = client.get_object_retention(Bucket=bucket_name, Key=key)
eq(response['Retention'], retention)
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object retention to increase retention period')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_put_obj_retention_increase_period():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
version_id = response['VersionId']
retention1 = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention1)
retention2 = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,3,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention2)
response = client.get_object_retention(Bucket=bucket_name, Key=key)
eq(response['Retention'], retention2)
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object retention to shorten period')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_retention_shorten_period():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
version_id = response['VersionId']
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,3,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object retention to shorten period with bypass header')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_put_obj_retention_shorten_period_bypass():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
version_id = response['VersionId']
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,3,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention, BypassGovernanceRetention=True)
response = client.get_object_retention(Bucket=bucket_name, Key=key)
eq(response['Retention'], retention)
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
@attr(resource='bucket')
@attr(method='delete')
@attr(operation='Test delete object with retention')
@attr(assertion='retention period make effects')
@attr('object-lock')
def test_object_lock_delete_object_with_retention():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
e = assert_raises(ClientError, client.delete_object, Bucket=bucket_name, Key=key, VersionId=response['VersionId'])
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
response = client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'], BypassGovernanceRetention=True)
eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put legal hold')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_put_legal_hold():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
legal_hold = {'Status': 'ON'}
response = client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold=legal_hold)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status':'OFF'})
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put legal hold with invalid bucket')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_legal_hold_invalid_bucket():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
legal_hold = {'Status': 'ON'}
e = assert_raises(ClientError, client.put_object_legal_hold, Bucket=bucket_name, Key=key, LegalHold=legal_hold)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidRequest')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put legal hold with invalid status')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_legal_hold_invalid_status():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
legal_hold = {'Status': 'abc'}
e = assert_raises(ClientError, client.put_object_legal_hold, Bucket=bucket_name, Key=key, LegalHold=legal_hold)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test get legal hold')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_get_legal_hold():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
legal_hold = {'Status': 'ON'}
client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold=legal_hold)
response = client.get_object_legal_hold(Bucket=bucket_name, Key=key)
eq(response['LegalHold'], legal_hold)
legal_hold_off = {'Status': 'OFF'}
client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold=legal_hold_off)
response = client.get_object_legal_hold(Bucket=bucket_name, Key=key)
eq(response['LegalHold'], legal_hold_off)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test get legal hold with invalid bucket')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_get_legal_hold_invalid_bucket():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
e = assert_raises(ClientError, client.get_object_legal_hold, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidRequest')
@attr(resource='bucket')
@attr(method='delete')
@attr(operation='Test delete object with legal hold on')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_delete_object_with_legal_hold_on():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status': 'ON'})
e = assert_raises(ClientError, client.delete_object, Bucket=bucket_name, Key=key, VersionId=response['VersionId'])
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status':'OFF'})
@attr(resource='bucket')
@attr(method='delete')
@attr(operation='Test delete object with legal hold off')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_delete_object_with_legal_hold_off():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status': 'OFF'})
response = client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'])
eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test get object metadata')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_get_obj_metadata():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
legal_hold = {'Status': 'ON'}
client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold=legal_hold)
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
response = client.head_object(Bucket=bucket_name, Key=key)
eq(response['ObjectLockMode'], retention['Mode'])
eq(response['ObjectLockRetainUntilDate'], retention['RetainUntilDate'])
eq(response['ObjectLockLegalHoldStatus'], legal_hold['Status'])
client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status':'OFF'})
client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'], BypassGovernanceRetention=True)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put legal hold and retention when uploading object')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_uploading_obj():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key, ObjectLockMode='GOVERNANCE',
ObjectLockRetainUntilDate=datetime.datetime(2030,1,1,tzinfo=pytz.UTC), ObjectLockLegalHoldStatus='ON')
response = client.head_object(Bucket=bucket_name, Key=key)
eq(response['ObjectLockMode'], 'GOVERNANCE')
eq(response['ObjectLockRetainUntilDate'], datetime.datetime(2030,1,1,tzinfo=pytz.UTC))
eq(response['ObjectLockLegalHoldStatus'], 'ON')
client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status':'OFF'})
client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'], BypassGovernanceRetention=True)
@attr(resource='object')
@attr(method='copy')
@attr(operation='copy w/ x-amz-copy-source-if-match: the latest ETag')
@attr(assertion='succeeds')
def test_copy_object_ifmatch_good():
bucket_name = get_new_bucket()
client = get_client()
resp = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
client.copy_object(Bucket=bucket_name, CopySource=bucket_name+'/foo', CopySourceIfMatch=resp['ETag'], Key='bar')
response = client.get_object(Bucket=bucket_name, Key='bar')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='copy')
@attr(operation='copy w/ x-amz-copy-source-if-match: bogus ETag')
@attr(assertion='fails 412')
# TODO: remove fails_on_rgw when https://tracker.ceph.com/issues/40808 is resolved
@attr('fails_on_rgw')
def test_copy_object_ifmatch_failed():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
e = assert_raises(ClientError, client.copy_object, Bucket=bucket_name, CopySource=bucket_name+'/foo', CopySourceIfMatch='ABCORZ', Key='bar')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
@attr(resource='object')
@attr(method='copy')
@attr(operation='copy w/ x-amz-copy-source-if-none-match: the latest ETag')
@attr(assertion='fails 412')
# TODO: remove fails_on_rgw when https://tracker.ceph.com/issues/40808 is resolved
@attr('fails_on_rgw')
def test_copy_object_ifnonematch_good():
bucket_name = get_new_bucket()
client = get_client()
resp = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
e = assert_raises(ClientError, client.copy_object, Bucket=bucket_name, CopySource=bucket_name+'/foo', CopySourceIfNoneMatch=resp['ETag'], Key='bar')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
@attr(resource='object')
@attr(method='copy')
@attr(operation='copy w/ x-amz-copy-source-if-none-match: bogus ETag')
@attr(assertion='succeeds')
def test_copy_object_ifnonematch_failed():
bucket_name = get_new_bucket()
client = get_client()
resp = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
client.copy_object(Bucket=bucket_name, CopySource=bucket_name+'/foo', CopySourceIfNoneMatch='ABCORZ', Key='bar')
response = client.get_object(Bucket=bucket_name, Key='bar')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='get')
@attr(operation='read to invalid key')
@attr(assertion='fails 400')
# TODO: results in a 404 instead of 400 on the RGW
@attr('fails_on_rgw')
def test_object_read_unreadable():
bucket_name = get_new_bucket()
client = get_client()
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='\xae\x8a-')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(e.response['Error']['Message'], 'Couldn\'t parse the specified URI.')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test User Policy')
@attr(assertion='succeeds')
@attr('user-policy')
def test_user_policy():
client = get_tenant_iam_client()
policy_document = json.dumps(
{"Version":"2012-10-17",
"Statement": {
"Effect":"Allow",
"Action":"*",
"Resource":"*"}}
)
client.put_user_policy(
PolicyDocument= policy_document,
PolicyName='AllAccessPolicy',
UserName=get_tenant_user_id(),
)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get bucket policy status on a new bucket')
@attr(assertion='succeeds')
@attr('policy_status')
def test_get_bucket_policy_status():
bucket_name = get_new_bucket()
client = get_client()
resp = client.get_bucket_policy_status(Bucket=bucket_name)
eq(resp['PolicyStatus']['IsPublic'],False)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get bucket policy status on a public acl bucket')
@attr(assertion='succeeds')
@attr('policy_status')
def test_get_public_acl_bucket_policy_status():
bucket_name = get_new_bucket()
client = get_client()
client = get_client()
client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
resp = client.get_bucket_policy_status(Bucket=bucket_name)
eq(resp['PolicyStatus']['IsPublic'],True)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get bucket policy status on a authenticated acl bucket')
@attr(assertion='succeeds')
@attr('policy_status')
def test_get_authpublic_acl_bucket_policy_status():
bucket_name = get_new_bucket()
client = get_client()
client = get_client()
client.put_bucket_acl(Bucket=bucket_name, ACL='authenticated-read')
resp = client.get_bucket_policy_status(Bucket=bucket_name)
eq(resp['PolicyStatus']['IsPublic'],True)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get bucket policy status on a public policy bucket')
@attr(assertion='succeeds')
@attr('policy_status')
def test_get_publicpolicy_acl_bucket_policy_status():
bucket_name = get_new_bucket()
client = get_client()
client = get_client()
resp = client.get_bucket_policy_status(Bucket=bucket_name)
eq(resp['PolicyStatus']['IsPublic'],False)
resource1 = "arn:aws:s3:::" + bucket_name
resource2 = "arn:aws:s3:::" + bucket_name + "/*"
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"{}".format(resource1),
"{}".format(resource2)
]
}]
})
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
resp = client.get_bucket_policy_status(Bucket=bucket_name)
eq(resp['PolicyStatus']['IsPublic'],True)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get bucket policy status on a public policy bucket')
@attr(assertion='succeeds')
@attr('policy_status')
def test_get_nonpublicpolicy_acl_bucket_policy_status():
bucket_name = get_new_bucket()
client = get_client()
client = get_client()
resp = client.get_bucket_policy_status(Bucket=bucket_name)
eq(resp['PolicyStatus']['IsPublic'],False)
resource1 = "arn:aws:s3:::" + bucket_name
resource2 = "arn:aws:s3:::" + bucket_name + "/*"
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"{}".format(resource1),
"{}".format(resource2)
],
"Condition": {
"IpAddress":
{"aws:SourceIp": "10.0.0.0/32"}
}
}]
})
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
resp = client.get_bucket_policy_status(Bucket=bucket_name)
eq(resp['PolicyStatus']['IsPublic'],False)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get bucket policy status on a public policy bucket')
@attr(assertion='succeeds')
@attr('policy_status')
def test_get_nonpublicpolicy_deny_bucket_policy_status():
bucket_name = get_new_bucket()
client = get_client()
resp = client.get_bucket_policy_status(Bucket=bucket_name)
eq(resp['PolicyStatus']['IsPublic'],False)
resource1 = "arn:aws:s3:::" + bucket_name
resource2 = "arn:aws:s3:::" + bucket_name + "/*"
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"NotPrincipal": {"AWS": "arn:aws:iam::s3tenant1:root"},
"Action": "s3:ListBucket",
"Resource": [
"{}".format(resource1),
"{}".format(resource2)
],
}]
})
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
resp = client.get_bucket_policy_status(Bucket=bucket_name)
eq(resp['PolicyStatus']['IsPublic'],True)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get public access block on a bucket')
@attr(assertion='succeeds')
@attr('policy_status')
def test_get_default_public_block():
#client = get_svc_client(svc='s3control', client_config=Config(s3={'addressing_style': 'path'}))
bucket_name = get_new_bucket()
client = get_client()
resp = client.get_public_access_block(Bucket=bucket_name)
eq(resp['PublicAccessBlockConfiguration']['BlockPublicAcls'], False)
eq(resp['PublicAccessBlockConfiguration']['BlockPublicPolicy'], False)
eq(resp['PublicAccessBlockConfiguration']['IgnorePublicAcls'], False)
eq(resp['PublicAccessBlockConfiguration']['RestrictPublicBuckets'], False)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='get public access block on a bucket')
@attr(assertion='succeeds')
@attr('policy_status')
def test_put_public_block():
#client = get_svc_client(svc='s3control', client_config=Config(s3={'addressing_style': 'path'}))
bucket_name = get_new_bucket()
client = get_client()
access_conf = {'BlockPublicAcls': True,
'IgnorePublicAcls': True,
'BlockPublicPolicy': True,
'RestrictPublicBuckets': False}
client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf)
resp = client.get_public_access_block(Bucket=bucket_name)
eq(resp['PublicAccessBlockConfiguration']['BlockPublicAcls'], access_conf['BlockPublicAcls'])
eq(resp['PublicAccessBlockConfiguration']['BlockPublicPolicy'], access_conf['BlockPublicPolicy'])
eq(resp['PublicAccessBlockConfiguration']['IgnorePublicAcls'], access_conf['IgnorePublicAcls'])
eq(resp['PublicAccessBlockConfiguration']['RestrictPublicBuckets'], access_conf['RestrictPublicBuckets'])
@attr(resource='bucket')
@attr(method='put')
@attr(operation='get public access block on a bucket')
@attr(assertion='succeeds')
@attr('policy_status')
def test_block_public_put_bucket_acls():
#client = get_svc_client(svc='s3control', client_config=Config(s3={'addressing_style': 'path'}))
bucket_name = get_new_bucket()
client = get_client()
access_conf = {'BlockPublicAcls': True,
'IgnorePublicAcls': False,
'BlockPublicPolicy': True,
'RestrictPublicBuckets': False}
client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf)
resp = client.get_public_access_block(Bucket=bucket_name)
eq(resp['PublicAccessBlockConfiguration']['BlockPublicAcls'], access_conf['BlockPublicAcls'])
eq(resp['PublicAccessBlockConfiguration']['BlockPublicPolicy'], access_conf['BlockPublicPolicy'])
e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name,ACL='public-read')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name,ACL='public-read-write')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name,ACL='authenticated-read')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='block public acls on canned acls')
@attr(assertion='succeeds')
@attr('policy_status')
def test_block_public_object_canned_acls():
bucket_name = get_new_bucket()
client = get_client()
access_conf = {'BlockPublicAcls': True,
'IgnorePublicAcls': False,
'BlockPublicPolicy': False,
'RestrictPublicBuckets': False}
client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf)
# resp = client.get_public_access_block(Bucket=bucket_name)
# eq(resp['PublicAccessBlockConfiguration']['BlockPublicAcls'], access_conf['BlockPublicAcls'])
# eq(resp['PublicAccessBlockConfiguration']['BlockPublicPolicy'], access_conf['BlockPublicPolicy'])
#FIXME: use empty body until #42208
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo1', Body='', ACL='public-read')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo2', Body='', ACL='public-read')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo3', Body='', ACL='authenticated-read')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='block public acls on canned acls')
@attr(assertion='succeeds')
@attr('policy_status')
def test_block_public_policy():
bucket_name = get_new_bucket()
client = get_client()
access_conf = {'BlockPublicAcls': False,
'IgnorePublicAcls': False,
'BlockPublicPolicy': True,
'RestrictPublicBuckets': False}
client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf)
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
policy_document = make_json_policy("s3:GetObject",
resource)
check_access_denied(client.put_bucket_policy, Bucket=bucket_name, Policy=policy_document)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='ignore public acls on canned acls')
@attr(assertion='succeeds')
@attr('policy_status')
def test_ignore_public_acls():
bucket_name = get_new_bucket()
client = get_client()
alt_client = get_alt_client()
client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
# Public bucket should be accessible
alt_client.list_objects(Bucket=bucket_name)
client.put_object(Bucket=bucket_name,Key='key1',Body='abcde',ACL='public-read')
resp=alt_client.get_object(Bucket=bucket_name, Key='key1')
eq(_get_body(resp), 'abcde')
access_conf = {'BlockPublicAcls': False,
'IgnorePublicAcls': True,
'BlockPublicPolicy': False,
'RestrictPublicBuckets': False}
client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf)
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
# IgnorePublicACLs is true, so regardless this should behave as a private bucket
check_access_denied(alt_client.list_objects, Bucket=bucket_name)
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key='key1')
|
autorunner.py | import random
from multiprocessing import Process
from parser import drawState, from_python
from states import State, JoinResult
from orbiter import OrbiterStrategy
from swarmer import SwarmerStrategy
from interaction import send2
from orbit_util import sign, trace_orbit
_, [p1, p2] = send2([1, 0])
def survivor_strategy(state):
pid = state[2][1]
actions = []
my_ships = []
enemy_ships = []
for obj in state[3][2]:
if obj[0][0] == pid:
print(obj)
my_ships.append(obj)
else:
enemy_ships.append(obj)
for my_ship in my_ships:
my_pos = my_ship[0][2]
thrust = (-sign(my_pos[0]), 0) if abs(my_pos[0]) > abs(my_pos[1]) else (0, -sign(my_pos[1]))
actions.append([0, my_ship[0][1], thrust])
if enemy_ships:
enemy_ship = random.choice(enemy_ships)
enemy_pos = enemy_ship[0][2]
enemy_speed = enemy_ship[0][3]
actions.append([2, my_ship[0][1], (enemy_pos[0] + enemy_speed[0], enemy_pos[1] + enemy_speed[1]), 5])
return actions
def id_strategy(state):
print('= ID STRATEGY =')
State.parse(state)
print('===============')
return []
def die_strategy(state):
print('=====HANG======')
st = State.parse(state)
ship = st.player_ships(st.me)[0]
print('===============')
return [ship.do_explode()]
def move_towards(x, vx, tx):
"""
x - where we are; vx - our speed; tx - where we want to be.
Returns optimal do_thrust power.
Speeds up only if we can later stop without overshoooting.
Slows down if not slowing down would result in overdo_lasering.
"""
if x == tx:
return sign(vx)
s = sign(tx - x)
if s == -1:
x, vx, tx = -x, -vx, -tx
def can_stop(x, vx):
return x + vx * (vx - 1) // 2 <= tx
if can_stop(x + vx + 1, vx + 1):
return -s
elif can_stop(x + vx, vx):
return 0
else:
return s
assert move_towards(1, 0, 2) == -1
assert move_towards(1, 1, 2) == 0
assert move_towards(1, 3, 2) == 1
assert move_towards(1, 3, 7) == 0
assert move_towards(1, 3, 6) == 1
assert move_towards(1, 3, 20) == -1
class RotatingStrategy(object):
def __init__(self):
self.field1 = []
self.field2 = {}
def apply(self, state):
self.field1.append('blablabla')
self.field2['abc'] = 'def'
print('=====ROTATE====')
st = State.parse(state)
print(st)
ship = st.player_ships(st.me)[0]
mid = (st.field_size + st.planet_size) / 2
x, y = -ship.y, ship.x
n = max(abs(x), abs(y))
x, y = mid * x / n, mid * y / n
dx = move_towards(ship.x, ship.vx, x)
dy = move_towards(ship.y, ship.vy, y)
print('===============')
if (dx or dy) and ship.fuel:
return [ship.do_thrust(dx, dy)]
else:
return []
def player(id, key, strategy):
res = send2([2, key, [103652820, 192496425430]])
joinres = JoinResult.parse(res)
total = joinres.budget
fake_state = from_python(
[6, [0, 10, -1, id, 0, 2, [], [], 4, [], [256, 1, [total, 1, 64], [16, 128], []], [], []], 9, []])
print(f'Send 2 res: {res}, available: {total}')
initial_stats = strategy.pick_stats(res)
state = send2([3, key, initial_stats])
images = []
T = 0
while True:
T += 1
state = send2([4, key, strategy.apply(state)])
# images.append(drawState(fake_state, from_python(state))[1])
# intermediate gif saves
# if T % 10 == 0:
# images[0].save(f'player{id}.gif', save_all=True, append_images=images[1:])
if state[1] == 2:
print('done')
break
# images[0].save(f'player{id}.gif', save_all=True, append_images=images[1:])
# print(send2([122, 203, 410, 164, 444, 484, 202, 77, 251, 56, 456, 435, 28, 329, 257, 265, 501, 18, 190, 423, 384, 434, 266, 69, 34, 437, 203, 152, 160, 425, 245, 428, 99, 107, 192, 372, 346, 344, 169, 478, 393, 502, 201, 497, 313, 32, 281, 510, 436, 22, 237, 80, 325, 405, 184, 358, 57, 276, 359, 189, 284, 277, 198, 244]))
strategy2 = SwarmerStrategy(printships=False)
strategy1 = OrbiterStrategy(do_laser=True, printships=True, duplicate=False)
p1 = Process(target=player, args=p1 + [strategy1])
p2 = Process(target=player, args=p2 + [strategy2])
p1.start()
p2.start()
p1.join()
p2.join()
|
flow_control.py | import logging
import sys
import threading
import time
from parsl.dataflow.task_status_poller import TaskStatusPoller
logger = logging.getLogger(__name__)
class FlowNoControl(object):
"""FlowNoControl implements similar interfaces as FlowControl.
Null handlers are used so as to mimic the FlowControl class.
"""
def __init__(self, dfk, *args, threshold=2, interval=2):
"""Initialize the flowcontrol object. This does nothing.
Args:
- dfk (DataFlowKernel) : DFK object to track parsl progress
KWargs:
- threshold (int) : Tasks after which the callback is triggered
- interval (int) : seconds after which timer expires
"""
pass
def notify(self, event_id):
"""This notifiy fn does nothing."""
pass
def close(self):
"""This close fn does nothing."""
pass
class FlowControl(object):
"""Implements threshold-interval based flow control.
The overall goal is to trap the flow of apps from the
workflow, measure it and redirect it the appropriate executors for
processing.
This is based on the following logic:
.. code-block:: none
BEGIN (INTERVAL, THRESHOLD, callback) :
start = current_time()
while (current_time()-start < INTERVAL) :
count = get_events_since(start)
if count >= THRESHOLD :
break
callback()
This logic ensures that the callbacks are activated with a maximum delay
of `interval` for systems with infrequent events as well as systems which would
generate large bursts of events.
Once a callback is triggered, the callback generally runs a strategy
method on the sites available as well asqeuque
TODO: When the debug logs are enabled this module emits duplicate messages.
This issue needs more debugging. What I've learnt so far is that the duplicate
messages are present only when the timer thread is started, so this could be
from a duplicate logger being added by the thread.
"""
def __init__(self, dfk, *args, threshold=20, interval=5):
"""Initialize the flowcontrol object.
We start the timer thread here
Args:
- dfk (DataFlowKernel) : DFK object to track parsl progress
KWargs:
- threshold (int) : Tasks after which the callback is triggered
- interval (int) : seconds after which timer expires
"""
self.dfk = dfk
self.threshold = threshold
self.interval = interval
self.cb_args = args
self.task_status_poller = TaskStatusPoller(dfk)
self.callback = self.task_status_poller.poll
self._handle = None
self._event_count = 0
self._event_buffer = []
self._wake_up_time = time.time() + 1
self._kill_event = threading.Event()
self._thread = threading.Thread(target=self._wake_up_timer, args=(self._kill_event,), name="FlowControl-Thread")
self._thread.daemon = True
self._thread.start()
def _wake_up_timer(self, kill_event):
"""Internal. This is the function that the thread will execute.
waits on an event so that the thread can make a quick exit when close() is called
Args:
- kill_event (threading.Event) : Event to wait on
"""
while True:
prev = self._wake_up_time
# Waiting for the event returns True only when the event
# is set, usually by the parent thread
time_to_die = kill_event.wait(float(max(prev - time.time(), 0)))
if time_to_die:
return
if prev == self._wake_up_time:
self.make_callback(kind='timer')
else:
print("Sleeping a bit more")
def notify(self, event_id):
"""Let the FlowControl system know that there is an event."""
self._event_buffer.extend([event_id])
self._event_count += 1
if self._event_count >= self.threshold:
logger.debug("Eventcount >= threshold")
self.make_callback(kind="event")
def make_callback(self, kind=None):
"""Makes the callback and resets the timer.
KWargs:
- kind (str): Default=None, used to pass information on what
triggered the callback
"""
self._wake_up_time = time.time() + self.interval
try:
self.callback(tasks=self._event_buffer, kind=kind)
except Exception:
logger.error("Flow control callback threw an exception - logging and proceeding anyway", exc_info=True)
self._event_buffer = []
def add_executors(self, executors):
self.task_status_poller.add_executors(executors)
def close(self):
"""Merge the threads and terminate."""
self._kill_event.set()
self._thread.join()
class Timer(object):
"""This timer is a simplified version of the FlowControl timer.
This timer does not employ notify events.
This is based on the following logic :
.. code-block:: none
BEGIN (INTERVAL, THRESHOLD, callback) :
start = current_time()
while (current_time()-start < INTERVAL) :
wait()
break
callback()
"""
def __init__(self, callback, *args, interval=5, name=None):
"""Initialize the flowcontrol object
We start the timer thread here
Args:
- dfk (DataFlowKernel) : DFK object to track parsl progress
KWargs:
- threshold (int) : Tasks after which the callback is triggered
- interval (int) : seconds after which timer expires
- name (str) : a base name to use when naming the started thread
"""
self.interval = interval
self.cb_args = args
self.callback = callback
self._wake_up_time = time.time() + 1
self._kill_event = threading.Event()
if name is None:
name = "Timer-Thread-{}".format(id(self))
else:
name = "{}-Timer-Thread-{}".format(name, id(self))
self._thread = threading.Thread(target=self._wake_up_timer, args=(self._kill_event,), name=name)
self._thread.daemon = True
self._thread.start()
def _wake_up_timer(self, kill_event):
"""Internal. This is the function that the thread will execute.
waits on an event so that the thread can make a quick exit when close() is called
Args:
- kill_event (threading.Event) : Event to wait on
"""
# Sleep till time to wake up
while True:
prev = self._wake_up_time
# Waiting for the event returns True only when the event
# is set, usually by the parent thread
time_to_die = kill_event.wait(float(max(prev - time.time(), 0)))
if time_to_die:
return
if prev == self._wake_up_time:
self.make_callback(kind='timer')
else:
print("Sleeping a bit more")
def make_callback(self, kind=None):
"""Makes the callback and resets the timer.
"""
self._wake_up_time = time.time() + self.interval
self.callback(*self.cb_args)
def close(self):
"""Merge the threads and terminate.
"""
self._kill_event.set()
self._thread.join()
if __name__ == "__main__":
def foo():
print("Callback made at :", time.time())
timer = Timer(foo)
time.sleep(60)
timer.close()
exit(0)
print("This is broken")
def cback(*args):
print("*" * 40)
print("Callback at {0} with args : {1}".format(time.time(), args))
print("*" * 40)
fc = FlowControl(cback)
print("Testing")
print("Press E(Enter) to create and event, X(Enter) to exit")
while True:
x = sys.stdin.read(1)
if x.lower() == 'e':
print("Event")
fc.notify()
elif x.lower() == 'x':
print("Exiting ...")
break
else:
print("Continuing.. got[%s]", x)
|
app.py | #!/usr/bin/env python
import argparse
import logging
import settings
import pydoc
import os
import sys
import threading
def import_plugins(plugin_package, plugin_modules):
plugins = []
for plugin_path in plugin_modules:
plugin_class = pydoc.locate('{}.{}'.format(plugin_package, plugin_path))
if not plugin_class:
logging.warning('Plugin "{}" not found'.format(plugin_class))
else:
plugins.append(plugin_class)
return plugins
def is_valid_file(parser, arg):
if not os.path.exists(arg):
parser.error('The file {} does not exist!'.format(arg))
else:
return arg
def main(argv):
parent_args_parser = argparse.ArgumentParser()
parser_classes = import_plugins('parsers', settings.PARSERS_AVAILABLE)
serializers_classes = import_plugins('serializers', settings.SERIALIZERS_AVAILABLE)
serializers_pool = {cl.serializer_name: cl(parent_args_parser) for cl in serializers_classes}
parent_args_parser.add_argument('--format', '-f', dest='output_format',
choices=serializers_pool.keys(),
default=serializers_classes[0].serializer_name,
help="Output format")
parent_args_parser.add_argument('--if', '-i', dest='input_file',
required=True,
help='Input SIMATIC HW CFG file',
metavar='INPUT_FILE',
type=lambda x: is_valid_file(parent_args_parser, x))
parent_args_parser.add_argument('--of', '-o',
dest='output_file',
required=True,
type=argparse.FileType('wb', 0),
help='Output file name',
metavar="OUTPUT_FILE")
parent_args_parser.add_argument('--encoding', '-e',
dest='input_file_encoding',
default='cp1251',
help='Input SIMATIC HW CFG file encoding')
args = parent_args_parser.parse_args(argv)
try:
threads = []
result = {}
with open(args.input_file, mode='r', encoding=args.input_file_encoding) as f:
content = f.read()
for cl in parser_classes:
parser_instance = cl(result)
threads.append(threading.Thread(target=parser_instance.parse, args=(content,)))
for thread in threads:
thread.daemon = True
thread.start()
for thread in threads:
try:
while thread.isAlive():
thread.join(5)
except AttributeError:
# so, using python 3.9 name
while thread.is_alive():
thread.join(5)
serializer = serializers_pool.get(args.output_format)
serializer.to_serial(args, result)
except (LookupError, UnicodeError) as e:
logging.error('Exception while parse file, {}'.format(e))
raise
if __name__ == '__main__': # pragma: no cover
main(sys.argv[1:])
|
test_lock.py | import os
from subprocess import Popen
from tempfile import mktemp
from threading import Thread
import time
from unittest import TestCase
from combo_lock import ComboLock
class TestComboLock(TestCase):
def setUp(self):
self.lock_file = mktemp()
def tearDown(self):
os.remove(self.lock_file)
def test_thread_lock(self):
lock = ComboLock(self.lock_file)
call_order = []
def thread_a():
nonlocal call_order
with lock:
time.sleep(0.2)
call_order.append('a')
def thread_b():
nonlocal call_order
time.sleep(0.1)
with lock:
call_order.append('b')
a = Thread(target=thread_a)
b = Thread(target=thread_b)
b.start()
a.start()
b.join()
a.join()
self.assertEqual(call_order, ['a', 'b'])
def test_process_lock(self):
test_process = os.path.join(os.path.dirname(__file__), 'process.py')
test_output = '/tmp/test_process.lock'
# Run processes
a = Popen(['python3', test_process, 'a', self.lock_file, '0', '0.2'])
b = Popen(['python3', test_process, 'b', self.lock_file, '0.1', '0'])
a.wait()
b.wait()
# Read result
with open(test_output, 'r') as f:
result = f.read()
os.remove(test_output)
# Verify that the written order is correct
self.assertEqual(result, 'ab')
|
test_threading.py | """
Tests for the threading module.
"""
import test.support
from test.support import verbose, strip_python_stderr, import_module
from test.script_helper import assert_python_ok
import random
import re
import sys
_thread = import_module('_thread')
threading = import_module('threading')
import time
import unittest
import weakref
import os
from test.script_helper import assert_python_ok, assert_python_failure
import subprocess
from test import lock_tests
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertTrue(self.nrunning.get() <= 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertTrue(self.nrunning.get() >= 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.support.threading_setup()
def tearDown(self):
test.support.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertEqual(t.ident, None)
self.assertTrue(re.match('<TestThread\(.*, initial\)>', repr(t)))
t.start()
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join(NUMTASKS)
self.assertTrue(not t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertFalse(t.ident is None)
self.assertTrue(re.match('<TestThread\(.*, stopped -?\d+\)>',
repr(t)))
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertFalse(threading.currentThread().ident is None)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
_thread.start_new_thread(f, ())
done.wait()
self.assertFalse(ident[0] is None)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256kB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1MB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
try:
result = set_async_exc(ctypes.c_long(tid), exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(ctypes.c_long(-1), exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertTrue(not t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(ctypes.c_long(t.id), exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=10)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
t.isAlive()
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertFalse('daemon' in repr(t))
t.daemon = True
self.assertTrue('daemon' in repr(t))
def test_deamon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
sys.setswitchinterval(1e-6)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
self.addCleanup(t.join)
pid = os.fork()
if pid == 0:
os._exit(1 if t.is_alive() else 0)
else:
pid, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
class ThreadJoinOnShutdown(BaseTestCase):
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('freebsd4', 'freebsd5', 'freebsd6', 'netbsd5',
'os2emx', 'hp-ux11')
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
def assertScriptHasOutput(self, script, expected_output):
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, expected_output)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_joining_across_fork_in_worker_thread(self):
# There used to be a possible deadlock when forking from a child
# thread. See http://bugs.python.org/issue6643.
# The script takes the following steps:
# - The main thread in the parent process starts a new thread and then
# tries to join it.
# - The join operation acquires the Lock inside the thread's _block
# Condition. (See threading.py:Thread.join().)
# - We stub out the acquire method on the condition to force it to wait
# until the child thread forks. (See LOCK ACQUIRED HERE)
# - The child thread forks. (See LOCK HELD and WORKER THREAD FORKS
# HERE)
# - The main thread of the parent process enters Condition.wait(),
# which releases the lock on the child thread.
# - The child process returns. Without the necessary fix, when the
# main thread of the child process (which used to be the child thread
# in the parent process) attempts to exit, it will try to acquire the
# lock in the Thread._block Condition object and hang, because the
# lock was held across the fork.
script = """if 1:
import os, time, threading
finish_join = False
start_fork = False
def worker():
# Wait until this thread's lock is acquired before forking to
# create the deadlock.
global finish_join
while not start_fork:
time.sleep(0.01)
# LOCK HELD: Main thread holds lock across this call.
childpid = os.fork()
finish_join = True
if childpid != 0:
# Parent process just waits for child.
os.waitpid(childpid, 0)
# Child process should just return.
w = threading.Thread(target=worker)
# Stub out the private condition variable's lock acquire method.
# This acquires the lock and then waits until the child has forked
# before returning, which will release the lock soon after. If
# someone else tries to fix this test case by acquiring this lock
# before forking instead of resetting it, the test case will
# deadlock when it shouldn't.
condition = w._block
orig_acquire = condition.acquire
call_count_lock = threading.Lock()
call_count = 0
def my_acquire():
global call_count
global start_fork
orig_acquire() # LOCK ACQUIRED HERE
start_fork = True
if call_count == 0:
while not finish_join:
time.sleep(0.01) # WORKER THREAD FORKS HERE
with call_count_lock:
call_count += 1
condition.acquire = my_acquire
w.start()
w.join()
print('end of main')
"""
self.assertScriptHasOutput(script, "end of main\n")
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_5_clear_waiter_locks_to_avoid_crash(self):
# Check that a spawned thread that forks doesn't segfault on certain
# platforms, namely OS X. This used to happen if there was a waiter
# lock in the thread's condition variable's waiters list. Even though
# we know the lock will be held across the fork, it is not safe to
# release locks held across forks on all platforms, so releasing the
# waiter lock caused a segfault on OS X. Furthermore, since locks on
# OS X are (as of this writing) implemented with a mutex + condition
# variable instead of a semaphore, while we know that the Python-level
# lock will be acquired, we can't know if the internal mutex will be
# acquired at the time of the fork.
script = """if True:
import os, time, threading
start_fork = False
def worker():
# Wait until the main thread has attempted to join this thread
# before continuing.
while not start_fork:
time.sleep(0.01)
childpid = os.fork()
if childpid != 0:
# Parent process just waits for child.
(cpid, rc) = os.waitpid(childpid, 0)
assert cpid == childpid
assert rc == 0
print('end of worker thread')
else:
# Child process should just return.
pass
w = threading.Thread(target=worker)
# Stub out the private condition variable's _release_save method.
# This releases the condition's lock and flips the global that
# causes the worker to fork. At this point, the problematic waiter
# lock has been acquired once by the waiter and has been put onto
# the waiters list.
condition = w._block
orig_release_save = condition._release_save
def my_release_save():
global start_fork
orig_release_save()
# Waiter lock held here, condition lock released.
start_fork = True
condition._release_save = my_release_save
w.start()
w.join()
print('end of main thread')
"""
output = "end of worker thread\nend of main thread\n"
self.assertScriptHasOutput(script, output)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_6_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
in_f = open(os.__file__, 'rb')
stuff = in_f.read(200)
null_f = open(os.devnull, 'wb')
null_f.write(stuff)
time.sleep(random.random() / 1995)
null_f.close()
in_f.close()
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
os._exit(0)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
def test_releasing_unacquired_lock(self):
lock = threading.Lock()
self.assertRaises(RuntimeError, lock.release)
@unittest.skipUnless(sys.platform == 'darwin' and test.support.python_is_optimized(),
'test macosx problem')
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RuntimeError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode())
self.assertEqual(data, expected_output)
class TimerTests(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes and affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# An Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
if __name__ == "__main__":
unittest.main()
|
train.py | # -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import copy
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import SGD
from official.vision.detection.tools.data_mapper import data_mapper
from official.vision.detection.tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 512 * 1024 * 1024, 2.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-f", "--file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-w", "--weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-n", "--ngpus", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch_size", default=2, type=int, help="batch size for training",
)
parser.add_argument(
"-d", "--dataset_dir", default="/data/datasets", type=str,
)
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
logger.info("Device Count = %d", args.ngpus)
log_dir = "log-of-{}".format(os.path.basename(args.file).split(".")[0])
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
if args.ngpus > 1:
master_ip = "localhost"
port = dist.get_free_ports(1)[0]
dist.Server(port)
processes = list()
for rank in range(args.ngpus):
process = mp.Process(
target=worker, args=(master_ip, port, args.ngpus, rank, args)
)
process.start()
processes.append(process)
for p in processes:
p.join()
else:
worker(None, None, 1, 0, args)
def worker(master_ip, port, world_size, rank, args):
if world_size > 1:
dist.init_process_group(
master_ip=master_ip,
port=port,
world_size=world_size,
rank=rank,
device=rank,
)
logger.info("Init process group for gpu{} done".format(rank))
current_network = import_from_file(args.file)
model = current_network.Net(current_network.Cfg())
model.train()
if dist.get_rank() == 0:
logger.info(get_config_info(model.cfg))
logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size,
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay * dist.get_world_size(),
)
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("SUM", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if args.weight_file is not None:
weights = mge.load(args.weight_file)
model.backbone.bottom_up.load_state_dict(weights, strict=False)
if dist.get_world_size() > 1:
dist.bcast_list_(model.parameters(), dist.WORLD) # sync parameters
if dist.get_rank() == 0:
logger.info("Prepare dataset")
train_loader = iter(build_dataloader(args.batch_size, args.dataset_dir, model.cfg))
for epoch in range(model.cfg.max_epoch):
train_one_epoch(model, train_loader, opt, gm, epoch, args)
if dist.get_rank() == 0:
save_path = "log-of-{}/epoch_{}.pkl".format(
os.path.basename(args.file).split(".")[0], epoch
)
mge.save(
{"epoch": epoch, "state_dict": model.state_dict()}, save_path,
)
logger.info("dump weights to %s", save_path)
def train_one_epoch(model, data_queue, opt, gm, epoch, args):
def train_func(image, im_info, gt_boxes):
with gm:
loss_dict = model(image=image, im_info=im_info, gt_boxes=gt_boxes)
gm.backward(loss_dict["total_loss"])
loss_list = list(loss_dict.values())
opt.step().clear_grad()
return loss_list
meter = AverageMeter(record_len=model.cfg.num_losses)
time_meter = AverageMeter(record_len=2)
log_interval = model.cfg.log_interval
tot_step = model.cfg.nr_images_epoch // (args.batch_size * dist.get_world_size())
for step in range(tot_step):
adjust_learning_rate(opt, epoch, step, model.cfg, args)
data_tik = time.time()
mini_batch = next(data_queue)
data_tok = time.time()
tik = time.time()
loss_list = train_func(
image=mge.tensor(mini_batch["data"]),
im_info=mge.tensor(mini_batch["im_info"]),
gt_boxes=mge.tensor(mini_batch["gt_boxes"])
)
tok = time.time()
time_meter.update([tok - tik, data_tok - data_tik])
if dist.get_rank() == 0:
info_str = "e%d, %d/%d, lr:%f, "
loss_str = ", ".join(
["{}:%f".format(loss) for loss in model.cfg.losses_keys]
)
time_str = ", train_time:%.3fs, data_time:%.3fs"
log_info_str = info_str + loss_str + time_str
meter.update([loss.numpy() for loss in loss_list])
if step % log_interval == 0:
logger.info(
log_info_str,
epoch,
step,
tot_step,
opt.param_groups[0]["lr"],
*meter.average(),
*time_meter.average()
)
meter.reset()
time_meter.reset()
def adjust_learning_rate(optimizer, epoch, step, cfg, args):
base_lr = (
cfg.basic_lr * args.batch_size * (
cfg.lr_decay_rate
** bisect.bisect_right(cfg.lr_decay_stages, epoch)
)
)
# Warm up
lr_factor = 1.0
if epoch == 0 and step < cfg.warm_iters:
lr_factor = (step + 1.0) / cfg.warm_iters
for param_group in optimizer.param_groups:
param_group["lr"] = base_lr * lr_factor
def build_dataset(dataset_dir, cfg):
data_cfg = copy.deepcopy(cfg.train_dataset)
data_name = data_cfg.pop("name")
data_cfg["root"] = os.path.join(dataset_dir, data_name, data_cfg["root"])
if "ann_file" in data_cfg:
data_cfg["ann_file"] = os.path.join(dataset_dir, data_name, data_cfg["ann_file"])
data_cfg["order"] = ["image", "boxes", "boxes_category", "info"]
return data_mapper[data_name](**data_cfg)
# pylint: disable=dangerous-default-value
def build_sampler(train_dataset, batch_size, aspect_grouping=[1]):
def _compute_aspect_ratios(dataset):
aspect_ratios = []
for i in range(len(dataset)):
info = dataset.get_img_info(i)
aspect_ratios.append(info["height"] / info["width"])
return aspect_ratios
def _quantize(x, bins):
return list(map(lambda y: bisect.bisect_right(sorted(bins), y), x))
if len(aspect_grouping) == 0:
return Infinite(RandomSampler(train_dataset, batch_size, drop_last=True))
aspect_ratios = _compute_aspect_ratios(train_dataset)
group_ids = _quantize(aspect_ratios, aspect_grouping)
return Infinite(GroupedRandomSampler(train_dataset, batch_size, group_ids))
def build_dataloader(batch_size, dataset_dir, cfg):
train_dataset = build_dataset(dataset_dir, cfg)
train_sampler = build_sampler(train_dataset, batch_size)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
transforms=[
T.ShortestEdgeResize(
cfg.train_image_short_size,
cfg.train_image_max_size,
sample_style="choice",
),
T.RandomHorizontalFlip(),
T.ToMode(),
],
order=["image", "boxes", "boxes_category"],
),
collator=DetectionPadCollator(),
num_workers=2,
)
return train_dataloader
if __name__ == "__main__":
main()
|
controlsd.py | #!/usr/bin/env python3
import os
import math
import requests
import threading
from numbers import Number
from cereal import car, log
from common.numpy_fast import clip
from common.realtime import sec_since_boot, config_realtime_process, Priority, Ratekeeper, DT_CTRL
from common.profiler import Profiler
from common.params import Params, put_nonblocking
import cereal.messaging as messaging
# from common.travis_checker import gh_actions
# import selfdrive.crash as crash
# from selfdrive.version import is_fork_remote
from selfdrive.config import Conversions as CV
from selfdrive.swaglog import cloudlog
from selfdrive.boardd.boardd import can_list_to_can_capnp
from selfdrive.car.car_helpers import get_car, get_startup_event, get_one_can
from selfdrive.controls.lib.drive_helpers import update_v_cruise, initialize_v_cruise
from selfdrive.controls.lib.drive_helpers import get_lag_adjusted_curvature
from selfdrive.controls.lib.longcontrol import LongControl
from selfdrive.controls.lib.latcontrol_pid import LatControlPID
from selfdrive.controls.lib.latcontrol_indi import LatControlINDI
from selfdrive.controls.lib.latcontrol_lqr import LatControlLQR
from selfdrive.controls.lib.latcontrol_model import LatControlModel
from selfdrive.controls.lib.latcontrol_angle import LatControlAngle
from selfdrive.controls.lib.events import Events, ET
from selfdrive.controls.lib.alertmanager import AlertManager, set_offroad_alert
from selfdrive.controls.lib.vehicle_model import VehicleModel
from selfdrive.locationd.calibrationd import Calibration
from selfdrive.hardware import HARDWARE, TICI, EON
from selfdrive.manager.process_config import managed_processes
from selfdrive.controls.lib.dynamic_follow.df_manager import dfManager
from common.op_params import opParams
SOFT_DISABLE_TIME = 3 # seconds
LDW_MIN_SPEED = 31 * CV.MPH_TO_MS
LANE_DEPARTURE_THRESHOLD = 0.1
REPLAY = "REPLAY" in os.environ
SIMULATION = "SIMULATION" in os.environ
NOSENSOR = "NOSENSOR" in os.environ
IGNORE_PROCESSES = {"rtshield", "uploader", "deleter", "loggerd", "logmessaged", "tombstoned",
"logcatd", "proclogd", "clocksd", "updated", "timezoned", "manage_athenad",
"statsd", "shutdownd"} | \
{k for k, v in managed_processes.items() if not v.enabled}
ACTUATOR_FIELDS = set(car.CarControl.Actuators.schema.fields.keys())
ThermalStatus = log.DeviceState.ThermalStatus
State = log.ControlsState.OpenpilotState
PandaType = log.PandaState.PandaType
Desire = log.LateralPlan.Desire
LaneChangeState = log.LateralPlan.LaneChangeState
LaneChangeDirection = log.LateralPlan.LaneChangeDirection
EventName = car.CarEvent.EventName
ButtonEvent = car.CarState.ButtonEvent
SafetyModel = car.CarParams.SafetyModel
IGNORED_SAFETY_MODES = [SafetyModel.silent, SafetyModel.noOutput]
CSID_MAP = {"0": EventName.roadCameraError, "1": EventName.wideRoadCameraError, "2": EventName.driverCameraError}
# def log_fingerprint(candidate, timeout=15):
# if not gh_actions and is_fork_remote:
# try:
# requests.get('https://sentry.io', timeout=timeout)
# crash.init()
# crash.capture_message("fingerprinted {}".format(candidate), level='info')
# return
# except:
# pass
class Controls:
def __init__(self, sm=None, pm=None, can_sock=None):
config_realtime_process(4 if TICI else 3, Priority.CTRL_HIGH)
self.op_params = opParams()
# Setup sockets
self.pm = pm
if self.pm is None:
self.pm = messaging.PubMaster(['sendcan', 'controlsState', 'carState',
'carControl', 'carEvents', 'carParams'])
self.camera_packets = ["roadCameraState", "driverCameraState"]
if TICI:
self.camera_packets.append("wideRoadCameraState")
params = Params()
self.joystick_mode = params.get_bool("JoystickDebugMode")
joystick_packet = ['testJoystick'] if self.joystick_mode else []
self.sm = sm
if self.sm is None:
ignore = ['driverCameraState', 'managerState'] if SIMULATION else None
self.sm = messaging.SubMaster(['deviceState', 'pandaStates', 'peripheralState', 'modelV2', 'liveCalibration',
'driverMonitoringState', 'longitudinalPlan', 'lateralPlan', 'liveLocationKalman',
'managerState', 'liveParameters', 'radarState'] + self.camera_packets + joystick_packet,
ignore_alive=ignore, ignore_avg_freq=['radarState', 'longitudinalPlan'])
self.sm_smiskol = messaging.SubMaster(['radarState', 'dynamicFollowData', 'liveTracks', 'dynamicFollowButton',
'laneSpeed', 'dynamicCameraOffset', 'modelLongButton'])
self.op_params = opParams()
self.df_manager = dfManager()
self.last_model_long = False
self.can_sock = can_sock
if can_sock is None:
can_timeout = None if os.environ.get('NO_CAN_TIMEOUT', False) else 100
self.can_sock = messaging.sub_sock('can', timeout=can_timeout)
if TICI:
self.log_sock = messaging.sub_sock('androidLog')
# wait for one pandaState and one CAN packet
print("Waiting for CAN messages...")
get_one_can(self.can_sock)
self.CI, self.CP, candidate = get_car(self.can_sock, self.pm.sock['sendcan'])
# threading.Thread(target=log_fingerprint, args=[candidate]).start()
# read params
self.is_metric = params.get_bool("IsMetric")
self.is_ldw_enabled = params.get_bool("IsLdwEnabled")
openpilot_enabled_toggle = params.get_bool("OpenpilotEnabledToggle")
passive = params.get_bool("Passive") or not openpilot_enabled_toggle
# detect sound card presence and ensure successful init
sounds_available = HARDWARE.get_sound_card_online()
car_recognized = self.CP.carName != 'mock'
controller_available = self.CI.CC is not None and not passive and not self.CP.dashcamOnly
self.read_only = not car_recognized or not controller_available or self.CP.dashcamOnly
if self.read_only:
safety_config = car.CarParams.SafetyConfig.new_message()
safety_config.safetyModel = car.CarParams.SafetyModel.noOutput
self.CP.safetyConfigs = [safety_config]
# Write CarParams for radard
cp_bytes = self.CP.to_bytes()
params.put("CarParams", cp_bytes)
put_nonblocking("CarParamsCache", cp_bytes)
self.CC = car.CarControl.new_message()
self.AM = AlertManager()
self.events = Events()
self.LoC = LongControl(self.CP)
self.VM = VehicleModel(self.CP)
if self.CP.steerControlType == car.CarParams.SteerControlType.angle:
self.LaC = LatControlAngle(self.CP, self.CI)
elif self.CP.lateralTuning.which() == 'pid':
self.LaC = LatControlPID(self.CP, self.CI)
elif self.CP.lateralTuning.which() == 'indi':
self.LaC = LatControlINDI(self.CP, self.CI)
elif self.CP.lateralTuning.which() == 'lqr':
self.LaC = LatControlLQR(self.CP, self.CI)
elif self.CP.lateralTuning.which() == 'model':
self.LaC = LatControlModel(self.CP, self.CI)
self.initialized = False
self.state = State.disabled
self.enabled = False
self.active = False
self.can_rcv_error = False
self.soft_disable_timer = 0
self.v_cruise_kph = 255
self.v_cruise_kph_last = 0
self.mismatch_counter = 0
self.cruise_mismatch_counter = 0
self.can_rcv_error_counter = 0
self.last_blinker_frame = 0
self.distance_traveled = 0
self.last_functional_fan_frame = 0
self.events_prev = []
self.current_alert_types = [ET.PERMANENT]
self.logged_comm_issue = False
self.button_timers = {ButtonEvent.Type.decelCruise: 0, ButtonEvent.Type.accelCruise: 0}
self.last_actuators = car.CarControl.Actuators.new_message()
self.lat_delay_offset = 0.0
# TODO: no longer necessary, aside from process replay
self.sm['liveParameters'].valid = True
self.startup_event = get_startup_event(car_recognized, controller_available, len(self.CP.carFw) > 0, self.CP)
if not sounds_available:
self.events.add(EventName.soundsUnavailable, static=True)
if not car_recognized:
self.events.add(EventName.carUnrecognized, static=True)
if len(self.CP.carFw) > 0:
set_offroad_alert("Offroad_CarUnrecognized", True)
else:
set_offroad_alert("Offroad_NoFirmware", True)
elif self.read_only:
self.events.add(EventName.dashcamMode, static=True)
elif self.joystick_mode:
self.events.add(EventName.joystickDebug, static=True)
self.startup_event = None
# controlsd is driven by can recv, expected at 100Hz
self.rk = Ratekeeper(100, print_delay_threshold=None)
self.prof = Profiler(False) # off by default
def update_events(self, CS):
"""Compute carEvents from carState"""
self.events.clear()
# Add startup event
if self.startup_event is not None:
self.events.add(self.startup_event)
self.startup_event = None
# Don't add any more events if not initialized
if not self.initialized:
self.events.add(EventName.controlsInitializing)
return
self.events.add_from_msg(CS.events)
self.events.add_from_msg(self.sm['driverMonitoringState'].events)
# Create events for battery, temperature, disk space, and memory
if EON and (self.sm['peripheralState'].pandaType != PandaType.uno) and \
self.sm['deviceState'].batteryPercent < 1 and self.sm['deviceState'].chargingError:
# at zero percent battery, while discharging, OP should not allowed
self.events.add(EventName.lowBattery)
if self.sm['deviceState'].thermalStatus >= ThermalStatus.red:
self.events.add(EventName.overheat)
if self.sm['deviceState'].freeSpacePercent < 7 and not SIMULATION:
# under 7% of space free no enable allowed
self.events.add(EventName.outOfSpace)
# TODO: make tici threshold the same
if self.sm['deviceState'].memoryUsagePercent > (90 if TICI else 65) and not SIMULATION:
self.events.add(EventName.lowMemory)
# TODO: enable this once loggerd CPU usage is more reasonable
#cpus = list(self.sm['deviceState'].cpuUsagePercent)[:(-1 if EON else None)]
#if max(cpus, default=0) > 95 and not SIMULATION:
# self.events.add(EventName.highCpuUsage)
# Alert if fan isn't spinning for 5 seconds
if self.sm['peripheralState'].pandaType in (PandaType.uno, PandaType.dos):
if self.sm['peripheralState'].fanSpeedRpm == 0 and self.sm['deviceState'].fanSpeedPercentDesired > 50:
if (self.sm.frame - self.last_functional_fan_frame) * DT_CTRL > 5.0:
self.events.add(EventName.fanMalfunction)
else:
self.last_functional_fan_frame = self.sm.frame
# Handle calibration status
cal_status = self.sm['liveCalibration'].calStatus
if cal_status != Calibration.CALIBRATED:
if cal_status == Calibration.UNCALIBRATED:
self.events.add(EventName.calibrationIncomplete)
else:
self.events.add(EventName.calibrationInvalid)
# Handle lane change
if self.sm['lateralPlan'].laneChangeState == LaneChangeState.preLaneChange:
direction = self.sm['lateralPlan'].laneChangeDirection
if (CS.leftBlindspot and direction == LaneChangeDirection.left) or \
(CS.rightBlindspot and direction == LaneChangeDirection.right):
self.events.add(EventName.laneChangeBlocked)
else:
if direction == LaneChangeDirection.left:
self.events.add(EventName.preLaneChangeLeft)
else:
self.events.add(EventName.preLaneChangeRight)
elif self.sm['lateralPlan'].laneChangeState in (LaneChangeState.laneChangeStarting,
LaneChangeState.laneChangeFinishing):
self.events.add(EventName.laneChange)
if not CS.canValid:
self.events.add(EventName.canError)
for i, pandaState in enumerate(self.sm['pandaStates']):
# All pandas must match the list of safetyConfigs, and if outside this list, must be silent or noOutput
if i < len(self.CP.safetyConfigs):
safety_mismatch = pandaState.safetyModel != self.CP.safetyConfigs[i].safetyModel or \
pandaState.safetyParam != self.CP.safetyConfigs[i].safetyParam or \
pandaState.unsafeMode != self.CP.unsafeMode
else:
safety_mismatch = pandaState.safetyModel not in IGNORED_SAFETY_MODES
if safety_mismatch or self.mismatch_counter >= 200:
self.events.add(EventName.controlsMismatch)
if log.PandaState.FaultType.relayMalfunction in pandaState.faults:
self.events.add(EventName.relayMalfunction)
# Check for HW or system issues
if len(self.sm['radarState'].radarErrors):
self.events.add(EventName.radarFault)
elif not self.sm.valid["pandaStates"]:
self.events.add(EventName.usbError)
elif not self.sm.all_alive_and_valid() or self.can_rcv_error:
self.events.add(EventName.commIssue)
if not self.logged_comm_issue:
invalid = [s for s, valid in self.sm.valid.items() if not valid]
not_alive = [s for s, alive in self.sm.alive.items() if not alive]
cloudlog.event("commIssue", invalid=invalid, not_alive=not_alive, can_error=self.can_rcv_error, error=True)
self.logged_comm_issue = True
else:
self.logged_comm_issue = False
if not self.sm['liveParameters'].valid:
self.events.add(EventName.vehicleModelInvalid)
if not self.sm['lateralPlan'].mpcSolutionValid:
self.events.add(EventName.plannerError)
if not self.sm['liveLocationKalman'].sensorsOK and not NOSENSOR:
if self.sm.frame > 5 / DT_CTRL: # Give locationd some time to receive all the inputs
self.events.add(EventName.sensorDataInvalid)
if not self.sm['liveLocationKalman'].posenetOK:
self.events.add(EventName.posenetInvalid)
if not self.sm['liveLocationKalman'].deviceStable:
self.events.add(EventName.deviceFalling)
if not REPLAY:
# Check for mismatch between openpilot and car's PCM
cruise_mismatch = CS.cruiseState.enabled and (not self.enabled or not self.CP.pcmCruise)
self.cruise_mismatch_counter = self.cruise_mismatch_counter + 1 if cruise_mismatch else 0
if self.cruise_mismatch_counter > int(3. / DT_CTRL):
self.events.add(EventName.cruiseMismatch)
# Check for FCW
stock_long_is_braking = self.enabled and not self.CP.openpilotLongitudinalControl and CS.aEgo < -1.5
model_fcw = self.sm['modelV2'].meta.hardBrakePredicted and not CS.brakePressed and not stock_long_is_braking
planner_fcw = self.sm['longitudinalPlan'].fcw and self.enabled
if planner_fcw or model_fcw:
self.events.add(EventName.fcw)
if TICI:
for m in messaging.drain_sock(self.log_sock, wait_for_one=False):
try:
msg = m.androidLog.message
if any(err in msg for err in ("ERROR_CRC", "ERROR_ECC", "ERROR_STREAM_UNDERFLOW", "APPLY FAILED")):
csid = msg.split("CSID:")[-1].split(" ")[0]
evt = CSID_MAP.get(csid, None)
if evt is not None:
self.events.add(evt)
except UnicodeDecodeError:
pass
# TODO: fix simulator
if not SIMULATION:
if not NOSENSOR:
if not self.sm['liveLocationKalman'].gpsOK and (self.distance_traveled > 1000):
# Not show in first 1 km to allow for driving out of garage. This event shows after 5 minutes
self.events.add(EventName.noGps)
if not self.sm.all_alive(self.camera_packets):
self.events.add(EventName.cameraMalfunction)
if self.sm['modelV2'].frameDropPerc > 20:
self.events.add(EventName.modeldLagging)
if self.sm['liveLocationKalman'].excessiveResets:
self.events.add(EventName.localizerMalfunction)
# Check if all manager processes are running
not_running = {p.name for p in self.sm['managerState'].processes if not p.running}
if self.sm.rcv_frame['managerState'] and (not_running - IGNORE_PROCESSES):
self.events.add(EventName.processNotRunning)
# Only allow engagement with brake pressed when stopped behind another stopped car
speeds = self.sm['longitudinalPlan'].speeds
if len(speeds) > 1:
v_future = speeds[-1]
else:
v_future = 100.0
if CS.brakePressed and v_future >= self.CP.vEgoStarting \
and self.CP.openpilotLongitudinalControl and CS.vEgo < 0.3 and not self.last_model_long:
self.events.add(EventName.noTarget)
self.add_stock_additions_alerts(CS)
def add_stock_additions_alerts(self, CS):
self.AM.SA_set_frame(self.sm.frame)
self.AM.SA_set_enabled(self.enabled)
# alert priority is defined by code location, keeping is highest, then lane speed alert, then auto-df alert
if self.sm_smiskol['modelLongButton'].enabled != self.last_model_long:
extra_text_1 = 'disabled!' if self.last_model_long else 'enabled!'
extra_text_2 = '' if self.last_model_long else ', model may behave unexpectedly'
self.AM.SA_add('modelLongAlert', extra_text_1=extra_text_1, extra_text_2=extra_text_2)
return
if self.sm_smiskol['dynamicCameraOffset'].keepingLeft:
self.AM.SA_add('laneSpeedKeeping', extra_text_1='LEFT', extra_text_2='Oncoming traffic in right lane')
return
elif self.sm_smiskol['dynamicCameraOffset'].keepingRight:
self.AM.SA_add('laneSpeedKeeping', extra_text_1='RIGHT', extra_text_2='Oncoming traffic in left lane')
return
ls_state = self.sm_smiskol['laneSpeed'].state
if ls_state != '':
self.AM.SA_add('lsButtonAlert', extra_text_1=ls_state)
return
faster_lane = self.sm_smiskol['laneSpeed'].fastestLane
if faster_lane in ['left', 'right']:
ls_alert = 'laneSpeedAlert'
if not self.sm_smiskol['laneSpeed'].new:
ls_alert += 'Silent'
self.AM.SA_add(ls_alert, extra_text_1='{} lane faster'.format(faster_lane).upper(), extra_text_2='Change lanes to faster {} lane'.format(faster_lane))
return
df_out = self.df_manager.update()
if df_out.changed:
df_alert = 'dfButtonAlert'
if df_out.is_auto and df_out.last_is_auto:
# only show auto alert if engaged, not hiding auto, and time since lane speed alert not showing
if CS.cruiseState.enabled and not self.op_params.get('hide_auto_df_alerts'):
df_alert += 'Silent'
self.AM.SA_add(df_alert, extra_text_1=df_out.model_profile_text + ' (auto)')
return
elif self.op_params.get('df_button_alerts').strip().lower() == 'off':
return
else:
if self.op_params.get('df_button_alerts').strip().lower() == 'silent':
df_alert += 'Silent'
self.AM.SA_add(df_alert, extra_text_1=df_out.user_profile_text, extra_text_2='Dynamic follow: {} profile active'.format(df_out.user_profile_text))
return
def data_sample(self):
"""Receive data from sockets and update carState"""
# Update carState from CAN
can_strs = messaging.drain_sock_raw(self.can_sock, wait_for_one=True)
CS = self.CI.update(self.CC, can_strs)
self.sm.update(0)
self.sm_smiskol.update(0)
if not self.initialized:
all_valid = CS.canValid and self.sm.all_alive_and_valid()
if all_valid or self.sm.frame * DT_CTRL > 3.5 or SIMULATION:
if not self.read_only:
self.CI.init(self.CP, self.can_sock, self.pm.sock['sendcan'])
self.initialized = True
if REPLAY and self.sm['pandaStates'][0].controlsAllowed:
self.state = State.enabled
Params().put_bool("ControlsReady", True)
# Check for CAN timeout
if not can_strs:
self.can_rcv_error_counter += 1
self.can_rcv_error = True
else:
self.can_rcv_error = False
# When the panda and controlsd do not agree on controls_allowed
# we want to disengage openpilot. However the status from the panda goes through
# another socket other than the CAN messages and one can arrive earlier than the other.
# Therefore we allow a mismatch for two samples, then we trigger the disengagement.
if not self.enabled:
self.mismatch_counter = 0
# All pandas not in silent mode must have controlsAllowed when openpilot is enabled
if self.enabled and any(not ps.controlsAllowed for ps in self.sm['pandaStates']
if ps.safetyModel not in IGNORED_SAFETY_MODES):
self.mismatch_counter += 1
self.distance_traveled += CS.vEgo * DT_CTRL
return CS
def state_transition(self, CS):
"""Compute conditional state transitions and execute actions on state transitions"""
self.v_cruise_kph_last = self.v_cruise_kph
# if stock cruise is completely disabled, then we can use our own set speed logic
if not self.CP.pcmCruise:
self.v_cruise_kph = update_v_cruise(self.v_cruise_kph, CS.buttonEvents, self.button_timers, self.enabled, self.is_metric)
elif CS.cruiseState.enabled:
self.v_cruise_kph = CS.cruiseState.speed * CV.MS_TO_KPH
# decrement the soft disable timer at every step, as it's reset on
# entrance in SOFT_DISABLING state
self.soft_disable_timer = max(0, self.soft_disable_timer - 1)
self.current_alert_types = [ET.PERMANENT]
# ENABLED, PRE ENABLING, SOFT DISABLING
if self.state != State.disabled:
# user and immediate disable always have priority in a non-disabled state
if self.events.any(ET.USER_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.USER_DISABLE)
elif self.events.any(ET.IMMEDIATE_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.IMMEDIATE_DISABLE)
else:
# ENABLED
if self.state == State.enabled:
if self.events.any(ET.SOFT_DISABLE):
self.state = State.softDisabling
self.soft_disable_timer = int(SOFT_DISABLE_TIME / DT_CTRL)
self.current_alert_types.append(ET.SOFT_DISABLE)
# SOFT DISABLING
elif self.state == State.softDisabling:
if not self.events.any(ET.SOFT_DISABLE):
# no more soft disabling condition, so go back to ENABLED
self.state = State.enabled
elif self.soft_disable_timer > 0:
self.current_alert_types.append(ET.SOFT_DISABLE)
elif self.soft_disable_timer <= 0:
self.state = State.disabled
# PRE ENABLING
elif self.state == State.preEnabled:
if not self.events.any(ET.PRE_ENABLE):
self.state = State.enabled
else:
self.current_alert_types.append(ET.PRE_ENABLE)
# DISABLED
elif self.state == State.disabled:
if self.events.any(ET.ENABLE):
if self.events.any(ET.NO_ENTRY):
self.current_alert_types.append(ET.NO_ENTRY)
else:
if self.events.any(ET.PRE_ENABLE):
self.state = State.preEnabled
else:
self.state = State.enabled
self.current_alert_types.append(ET.ENABLE)
self.v_cruise_kph = initialize_v_cruise(CS.vEgo, CS.buttonEvents, self.v_cruise_kph_last)
# Check if actuators are enabled
self.active = self.state == State.enabled or self.state == State.softDisabling
if self.active:
self.current_alert_types.append(ET.WARNING)
# Check if openpilot is engaged
self.enabled = self.active or self.state == State.preEnabled
def state_control(self, CS):
"""Given the state, this function returns an actuators packet"""
# Update VehicleModel
params = self.sm['liveParameters']
x = max(params.stiffnessFactor, 0.1)
sr = max(params.steerRatio, 0.1)
self.VM.update_params(x, sr)
lat_plan = self.sm['lateralPlan']
long_plan = self.sm['longitudinalPlan']
actuators = car.CarControl.Actuators.new_message()
actuators.longControlState = self.LoC.long_control_state
if CS.leftBlinker or CS.rightBlinker:
self.last_blinker_frame = self.sm.frame
# State specific actions
if not self.active:
self.LaC.reset()
self.LoC.reset(v_pid=CS.vEgo)
if not self.joystick_mode:
extras_loc = {'lead_one': self.sm_smiskol['radarState'].leadOne, 'mpc_TR': self.sm_smiskol['dynamicFollowData'].mpcTR, # TODO: just pass the services
'live_tracks': self.sm_smiskol['liveTracks'], 'has_lead': long_plan.hasLead}
# accel PID loop
pid_accel_limits = self.CI.get_pid_accel_limits(self.CP, CS.vEgo, self.v_cruise_kph * CV.KPH_TO_MS)
actuators.accel = self.LoC.update(self.active, CS, self.CP, long_plan, pid_accel_limits, extras_loc)
# interpolate lat plan to 100hz
self.lat_delay_offset += DT_CTRL
if self.sm.updated['lateralPlan']:
self.lat_delay_offset = 0.
# Steering PID loop and lateral MPC
lat_active = self.active and not CS.steerWarning and not CS.steerError and CS.vEgo > self.CP.minSteerSpeed
desired_curvature, desired_curvature_rate = get_lag_adjusted_curvature(self.CP, CS.vEgo,
lat_plan.psis,
lat_plan.curvatures,
lat_plan.curvatureRates,
self.lat_delay_offset)
actuators.steer, actuators.steeringAngleDeg, lac_log = self.LaC.update(lat_active, CS, self.CP, self.VM, params, self.last_actuators,
desired_curvature, desired_curvature_rate)
else:
lac_log = log.ControlsState.LateralDebugState.new_message()
if self.sm.rcv_frame['testJoystick'] > 0 and self.active:
actuators.accel = 4.0*clip(self.sm['testJoystick'].axes[0], -1, 1)
steer = clip(self.sm['testJoystick'].axes[1], -1, 1)
# max angle is 45 for angle-based cars
actuators.steer, actuators.steeringAngleDeg = steer, steer * 45.
lac_log.active = True
lac_log.steeringAngleDeg = CS.steeringAngleDeg
lac_log.output = steer
lac_log.saturated = abs(steer) >= 0.9
# Send a "steering required alert" if saturation count has reached the limit
if lac_log.active and lac_log.saturated and not CS.steeringPressed:
dpath_points = lat_plan.dPathPoints
if len(dpath_points):
# Check if we deviated from the path
# TODO use desired vs actual curvature
left_deviation = actuators.steer > 0 and dpath_points[0] < -0.20
right_deviation = actuators.steer < 0 and dpath_points[0] > 0.20
if left_deviation or right_deviation:
self.events.add(EventName.steerSaturated)
# Ensure no NaNs/Infs
for p in ACTUATOR_FIELDS:
attr = getattr(actuators, p)
if not isinstance(attr, Number):
continue
if not math.isfinite(attr):
cloudlog.error(f"actuators.{p} not finite {actuators.to_dict()}")
setattr(actuators, p, 0.0)
return actuators, lac_log
def update_button_timers(self, buttonEvents):
# increment timer for buttons still pressed
for k in self.button_timers:
if self.button_timers[k] > 0:
self.button_timers[k] += 1
for b in buttonEvents:
if b.type.raw in self.button_timers:
self.button_timers[b.type.raw] = 1 if b.pressed else 0
def publish_logs(self, CS, start_time, actuators, lac_log):
"""Send actuators and hud commands to the car, send controlsstate and MPC logging"""
CC = car.CarControl.new_message()
CC.enabled = self.enabled
CC.active = self.active
CC.actuators = actuators
orientation_value = self.sm['liveLocationKalman'].orientationNED.value
if len(orientation_value) > 2:
CC.roll = orientation_value[0]
CC.pitch = orientation_value[1]
CC.cruiseControl.cancel = CS.cruiseState.enabled and (not self.enabled or not self.CP.pcmCruise)
if self.joystick_mode and self.sm.rcv_frame['testJoystick'] > 0 and self.sm['testJoystick'].buttons[0]:
CC.cruiseControl.cancel = True
hudControl = CC.hudControl
hudControl.setSpeed = float(self.v_cruise_kph * CV.KPH_TO_MS)
hudControl.speedVisible = self.enabled
hudControl.lanesVisible = self.enabled
hudControl.leadVisible = self.sm['longitudinalPlan'].hasLead
hudControl.rightLaneVisible = True
hudControl.leftLaneVisible = True
recent_blinker = (self.sm.frame - self.last_blinker_frame) * DT_CTRL < 5.0 # 5s blinker cooldown
ldw_allowed = self.is_ldw_enabled and CS.vEgo > LDW_MIN_SPEED and not recent_blinker \
and not self.active and self.sm['liveCalibration'].calStatus == Calibration.CALIBRATED
model_v2 = self.sm['modelV2']
desire_prediction = model_v2.meta.desirePrediction
if len(desire_prediction) and ldw_allowed:
right_lane_visible = self.sm['lateralPlan'].rProb > 0.5
left_lane_visible = self.sm['lateralPlan'].lProb > 0.5
l_lane_change_prob = desire_prediction[Desire.laneChangeLeft - 1]
r_lane_change_prob = desire_prediction[Desire.laneChangeRight - 1]
lane_lines = model_v2.laneLines
CAMERA_OFFSET = self.sm['lateralPlan'].cameraOffset
l_lane_close = left_lane_visible and (lane_lines[1].y[0] > -(1.08 + CAMERA_OFFSET))
r_lane_close = right_lane_visible and (lane_lines[2].y[0] < (1.08 - CAMERA_OFFSET))
hudControl.leftLaneDepart = bool(l_lane_change_prob > LANE_DEPARTURE_THRESHOLD and l_lane_close)
hudControl.rightLaneDepart = bool(r_lane_change_prob > LANE_DEPARTURE_THRESHOLD and r_lane_close)
if hudControl.rightLaneDepart or hudControl.leftLaneDepart:
self.events.add(EventName.ldw)
clear_event_types = set()
if ET.WARNING not in self.current_alert_types:
clear_event_types.add(ET.WARNING)
if self.enabled:
clear_event_types.add(ET.NO_ENTRY)
alerts = self.events.create_alerts(self.current_alert_types, [self.CP, self.sm, self.is_metric, self.soft_disable_timer])
self.AM.add_many(self.sm.frame, alerts)
current_alert = self.AM.process_alerts(self.sm.frame, clear_event_types)
if current_alert:
hudControl.visualAlert = current_alert.visual_alert
self.last_model_long = self.sm_smiskol['modelLongButton'].enabled
if not self.read_only and self.initialized:
# send car controls over can
self.last_actuators, can_sends = self.CI.apply(CC)
self.pm.send('sendcan', can_list_to_can_capnp(can_sends, msgtype='sendcan', valid=CS.canValid))
CC.actuatorsOutput = self.last_actuators
force_decel = (self.sm['driverMonitoringState'].awarenessStatus < 0.) or \
(self.state == State.softDisabling)
# Curvature & Steering angle
params = self.sm['liveParameters']
steer_angle_without_offset = math.radians(CS.steeringAngleDeg - params.angleOffsetDeg)
curvature = -self.VM.calc_curvature(steer_angle_without_offset, CS.vEgo, params.roll)
# controlsState
dat = messaging.new_message('controlsState')
dat.valid = CS.canValid
controlsState = dat.controlsState
if current_alert:
controlsState.alertText1 = current_alert.alert_text_1
controlsState.alertText2 = current_alert.alert_text_2
controlsState.alertSize = current_alert.alert_size
controlsState.alertStatus = current_alert.alert_status
controlsState.alertBlinkingRate = current_alert.alert_rate
controlsState.alertType = current_alert.alert_type
controlsState.alertSound = current_alert.audible_alert
controlsState.canMonoTimes = list(CS.canMonoTimes)
controlsState.longitudinalPlanMonoTime = self.sm.logMonoTime['longitudinalPlan']
controlsState.lateralPlanMonoTime = self.sm.logMonoTime['lateralPlan']
controlsState.enabled = self.enabled
controlsState.active = self.active
controlsState.curvature = curvature
controlsState.state = self.state
controlsState.engageable = not self.events.any(ET.NO_ENTRY)
controlsState.longControlState = self.LoC.long_control_state
controlsState.vPid = float(self.LoC.v_pid)
controlsState.vCruise = float(self.v_cruise_kph)
controlsState.upAccelCmd = float(self.LoC.pid.p)
controlsState.uiAccelCmd = float(self.LoC.pid.i)
controlsState.ufAccelCmd = float(self.LoC.pid.f)
controlsState.cumLagMs = -self.rk.remaining * 1000.
controlsState.startMonoTime = int(start_time * 1e9)
controlsState.forceDecel = bool(force_decel)
controlsState.canErrorCounter = self.can_rcv_error_counter
lat_tuning = self.CP.lateralTuning.which()
if self.joystick_mode:
controlsState.lateralControlState.debugState = lac_log
elif self.CP.steerControlType == car.CarParams.SteerControlType.angle:
controlsState.lateralControlState.angleState = lac_log
elif lat_tuning == 'pid':
controlsState.lateralControlState.pidState = lac_log
elif lat_tuning == 'lqr':
controlsState.lateralControlState.lqrState = lac_log
elif lat_tuning == 'indi':
controlsState.lateralControlState.indiState = lac_log
elif lat_tuning == 'model':
controlsState.lateralControlState.modelState = lac_log
self.pm.send('controlsState', dat)
# carState
car_events = self.events.to_msg()
cs_send = messaging.new_message('carState')
cs_send.valid = CS.canValid
cs_send.carState = CS
cs_send.carState.events = car_events
self.pm.send('carState', cs_send)
# carEvents - logged every second or on change
if (self.sm.frame % int(1. / DT_CTRL) == 0) or (self.events.names != self.events_prev):
ce_send = messaging.new_message('carEvents', len(self.events))
ce_send.carEvents = car_events
self.pm.send('carEvents', ce_send)
self.events_prev = self.events.names.copy()
# carParams - logged every 50 seconds (> 1 per segment)
if (self.sm.frame % int(50. / DT_CTRL) == 0):
cp_send = messaging.new_message('carParams')
cp_send.carParams = self.CP
self.pm.send('carParams', cp_send)
# carControl
cc_send = messaging.new_message('carControl')
cc_send.valid = CS.canValid
cc_send.carControl = CC
self.pm.send('carControl', cc_send)
# copy CarControl to pass to CarInterface on the next iteration
self.CC = CC
def step(self):
start_time = sec_since_boot()
self.prof.checkpoint("Ratekeeper", ignore=True)
# Sample data from sockets and get a carState
CS = self.data_sample()
self.prof.checkpoint("Sample")
self.update_events(CS)
if not self.read_only and self.initialized:
# Update control state
self.state_transition(CS)
self.prof.checkpoint("State transition")
# Compute actuators (runs PID loops and lateral MPC)
actuators, lac_log = self.state_control(CS)
self.prof.checkpoint("State Control")
# Publish data
self.publish_logs(CS, start_time, actuators, lac_log)
self.prof.checkpoint("Sent")
self.update_button_timers(CS.buttonEvents)
def controlsd_thread(self):
while True:
self.step()
self.rk.monitor_time()
self.prof.display()
def main(sm=None, pm=None, logcan=None):
controls = Controls(sm, pm, logcan)
controls.controlsd_thread()
if __name__ == "__main__":
main()
|
cli.py | import ast
import inspect
import os
import platform
import re
import sys
import traceback
from functools import update_wrapper
from operator import attrgetter
from threading import Lock
from threading import Thread
import click
from werkzeug.utils import import_string
from .globals import current_app
from .helpers import get_debug_flag
from .helpers import get_env
from .helpers import get_load_dotenv
class NoAppException(click.UsageError):
"""Raised if an application cannot be found or loaded."""
def find_best_app(module):
"""Given a module instance this tries to find the best possible
application in the module or raises an exception.
"""
from . import Flask
# Search for the most common names first.
for attr_name in ("app", "application"):
app = getattr(module, attr_name, None)
if isinstance(app, Flask):
return app
# Otherwise find the only object that is a Flask instance.
matches = [v for v in module.__dict__.values() if isinstance(v, Flask)]
if len(matches) == 1:
return matches[0]
elif len(matches) > 1:
raise NoAppException(
"Detected multiple Flask applications in module"
f" {module.__name__!r}. Use 'FLASK_APP={module.__name__}:name'"
f" to specify the correct one."
)
# Search for app factory functions.
for attr_name in ("create_app", "make_app"):
app_factory = getattr(module, attr_name, None)
if inspect.isfunction(app_factory):
try:
app = app_factory()
if isinstance(app, Flask):
return app
except TypeError as e:
if not _called_with_wrong_args(app_factory):
raise
raise NoAppException(
f"Detected factory {attr_name!r} in module {module.__name__!r},"
" but could not call it without arguments. Use"
f" \"FLASK_APP='{module.__name__}:{attr_name}(args)'\""
" to specify arguments."
) from e
raise NoAppException(
"Failed to find Flask application or factory in module"
f" {module.__name__!r}. Use 'FLASK_APP={module.__name__}:name'"
" to specify one."
)
def _called_with_wrong_args(f):
"""Check whether calling a function raised a ``TypeError`` because
the call failed or because something in the factory raised the
error.
:param f: The function that was called.
:return: ``True`` if the call failed.
"""
tb = sys.exc_info()[2]
try:
while tb is not None:
if tb.tb_frame.f_code is f.__code__:
# In the function, it was called successfully.
return False
tb = tb.tb_next
# Didn't reach the function.
return True
finally:
# Delete tb to break a circular reference.
# https://docs.python.org/2/library/sys.html#sys.exc_info
del tb
def find_app_by_string(module, app_name):
"""Check if the given string is a variable name or a function. Call
a function to get the app instance, or return the variable directly.
"""
from . import Flask
# Parse app_name as a single expression to determine if it's a valid
# attribute name or function call.
try:
expr = ast.parse(app_name.strip(), mode="eval").body
except SyntaxError:
raise NoAppException(
f"Failed to parse {app_name!r} as an attribute name or function call."
) from None
if isinstance(expr, ast.Name):
name = expr.id
args = []
kwargs = {}
elif isinstance(expr, ast.Call):
# Ensure the function name is an attribute name only.
if not isinstance(expr.func, ast.Name):
raise NoAppException(
f"Function reference must be a simple name: {app_name!r}."
)
name = expr.func.id
# Parse the positional and keyword arguments as literals.
try:
args = [ast.literal_eval(arg) for arg in expr.args]
kwargs = {kw.arg: ast.literal_eval(kw.value) for kw in expr.keywords}
except ValueError:
# literal_eval gives cryptic error messages, show a generic
# message with the full expression instead.
raise NoAppException(
f"Failed to parse arguments as literal values: {app_name!r}."
) from None
else:
raise NoAppException(
f"Failed to parse {app_name!r} as an attribute name or function call."
)
try:
attr = getattr(module, name)
except AttributeError as e:
raise NoAppException(
f"Failed to find attribute {name!r} in {module.__name__!r}."
) from e
# If the attribute is a function, call it with any args and kwargs
# to get the real application.
if inspect.isfunction(attr):
try:
app = attr(*args, **kwargs)
except TypeError as e:
if not _called_with_wrong_args(attr):
raise
raise NoAppException(
f"The factory {app_name!r} in module"
f" {module.__name__!r} could not be called with the"
" specified arguments."
) from e
else:
app = attr
if isinstance(app, Flask):
return app
raise NoAppException(
"A valid Flask application was not obtained from"
f" '{module.__name__}:{app_name}'."
)
def prepare_import(path):
"""Given a filename this will try to calculate the python path, add it
to the search path and return the actual module name that is expected.
"""
path = os.path.realpath(path)
fname, ext = os.path.splitext(path)
if ext == ".py":
path = fname
if os.path.basename(path) == "__init__":
path = os.path.dirname(path)
module_name = []
# move up until outside package structure (no __init__.py)
while True:
path, name = os.path.split(path)
module_name.append(name)
if not os.path.exists(os.path.join(path, "__init__.py")):
break
if sys.path[0] != path:
sys.path.insert(0, path)
return ".".join(module_name[::-1])
def locate_app(module_name, app_name, raise_if_not_found=True):
__traceback_hide__ = True # noqa: F841
try:
__import__(module_name)
except ImportError:
# Reraise the ImportError if it occurred within the imported module.
# Determine this by checking whether the trace has a depth > 1.
if sys.exc_info()[2].tb_next:
raise NoAppException(
f"While importing {module_name!r}, an ImportError was"
f" raised:\n\n{traceback.format_exc()}"
) from None
elif raise_if_not_found:
raise NoAppException(f"Could not import {module_name!r}.") from None
else:
return
module = sys.modules[module_name]
if app_name is None:
return find_best_app(module)
else:
return find_app_by_string(module, app_name)
def get_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
import werkzeug
from . import __version__
click.echo(
f"Python {platform.python_version()}\n"
f"Flask {__version__}\n"
f"Werkzeug {werkzeug.__version__}",
color=ctx.color,
)
ctx.exit()
version_option = click.Option(
["--version"],
help="Show the flask version",
expose_value=False,
callback=get_version,
is_flag=True,
is_eager=True,
)
class DispatchingApp:
"""Special application that dispatches to a Flask application which
is imported by name in a background thread. If an error happens
it is recorded and shown as part of the WSGI handling which in case
of the Werkzeug debugger means that it shows up in the browser.
"""
def __init__(self, loader, use_eager_loading=None):
self.loader = loader
self._app = None
self._lock = Lock()
self._bg_loading_exc = None
if use_eager_loading is None:
use_eager_loading = os.environ.get("WERKZEUG_RUN_MAIN") != "true"
if use_eager_loading:
self._load_unlocked()
else:
self._load_in_background()
def _load_in_background(self):
# Store the Click context and push it in the loader thread so
# script_info is still available.
ctx = click.get_current_context(silent=True)
def _load_app():
__traceback_hide__ = True # noqa: F841
with self._lock:
if ctx is not None:
click.globals.push_context(ctx)
try:
self._load_unlocked()
except Exception as e:
self._bg_loading_exc = e
t = Thread(target=_load_app, args=())
t.start()
def _flush_bg_loading_exception(self):
__traceback_hide__ = True # noqa: F841
exc = self._bg_loading_exc
if exc is not None:
self._bg_loading_exc = None
raise exc
def _load_unlocked(self):
__traceback_hide__ = True # noqa: F841
self._app = rv = self.loader()
self._bg_loading_exc = None
return rv
def __call__(self, environ, start_response):
__traceback_hide__ = True # noqa: F841
if self._app is not None:
return self._app(environ, start_response)
self._flush_bg_loading_exception()
with self._lock:
if self._app is not None:
rv = self._app
else:
rv = self._load_unlocked()
return rv(environ, start_response)
class ScriptInfo:
"""Helper object to deal with Flask applications. This is usually not
necessary to interface with as it's used internally in the dispatching
to click. In future versions of Flask this object will most likely play
a bigger role. Typically it's created automatically by the
:class:`FlaskGroup` but you can also manually create it and pass it
onwards as click object.
"""
def __init__(self, app_import_path=None, create_app=None, set_debug_flag=True):
#: Optionally the import path for the Flask application.
self.app_import_path = app_import_path or os.environ.get("FLASK_APP")
#: Optionally a function that is passed the script info to create
#: the instance of the application.
self.create_app = create_app
#: A dictionary with arbitrary data that can be associated with
#: this script info.
self.data = {}
self.set_debug_flag = set_debug_flag
self._loaded_app = None
def load_app(self):
"""Loads the Flask app (if not yet loaded) and returns it. Calling
this multiple times will just result in the already loaded app to
be returned.
"""
__traceback_hide__ = True # noqa: F841
if self._loaded_app is not None:
return self._loaded_app
if self.create_app is not None:
app = self.create_app()
else:
if self.app_import_path:
path, name = (
re.split(r":(?![\\/])", self.app_import_path, 1) + [None]
)[:2]
import_name = prepare_import(path)
app = locate_app(import_name, name)
else:
for path in ("wsgi.py", "app.py"):
import_name = prepare_import(path)
app = locate_app(import_name, None, raise_if_not_found=False)
if app:
break
if not app:
raise NoAppException(
"Could not locate a Flask application. You did not provide "
'the "FLASK_APP" environment variable, and a "wsgi.py" or '
'"app.py" module was not found in the current directory.'
)
if self.set_debug_flag:
# Update the app's debug flag through the descriptor so that
# other values repopulate as well.
app.debug = get_debug_flag()
self._loaded_app = app
return app
pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True)
def with_appcontext(f):
"""Wraps a callback so that it's guaranteed to be executed with the
script's application context. If callbacks are registered directly
to the ``app.cli`` object then they are wrapped with this function
by default unless it's disabled.
"""
@click.pass_context
def decorator(__ctx, *args, **kwargs):
with __ctx.ensure_object(ScriptInfo).load_app().app_context():
return __ctx.invoke(f, *args, **kwargs)
return update_wrapper(decorator, f)
class AppGroup(click.Group):
"""This works similar to a regular click :class:`~click.Group` but it
changes the behavior of the :meth:`command` decorator so that it
automatically wraps the functions in :func:`with_appcontext`.
Not to be confused with :class:`FlaskGroup`.
"""
def command(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it wraps callbacks in :func:`with_appcontext`
unless it's disabled by passing ``with_appcontext=False``.
"""
wrap_for_ctx = kwargs.pop("with_appcontext", True)
def decorator(f):
if wrap_for_ctx:
f = with_appcontext(f)
return click.Group.command(self, *args, **kwargs)(f)
return decorator
def group(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it defaults the group class to
:class:`AppGroup`.
"""
kwargs.setdefault("cls", AppGroup)
return click.Group.group(self, *args, **kwargs)
class FlaskGroup(AppGroup):
"""Special subclass of the :class:`AppGroup` group that supports
loading more commands from the configured Flask app. Normally a
developer does not have to interface with this class but there are
some very advanced use cases for which it makes sense to create an
instance of this. see :ref:`custom-scripts`.
:param add_default_commands: if this is True then the default run and
shell commands will be added.
:param add_version_option: adds the ``--version`` option.
:param create_app: an optional callback that is passed the script info and
returns the loaded app.
:param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`
files to set environment variables. Will also change the working
directory to the directory containing the first file found.
:param set_debug_flag: Set the app's debug flag based on the active
environment
.. versionchanged:: 1.0
If installed, python-dotenv will be used to load environment variables
from :file:`.env` and :file:`.flaskenv` files.
"""
def __init__(
self,
add_default_commands=True,
create_app=None,
add_version_option=True,
load_dotenv=True,
set_debug_flag=True,
**extra,
):
params = list(extra.pop("params", None) or ())
if add_version_option:
params.append(version_option)
AppGroup.__init__(self, params=params, **extra)
self.create_app = create_app
self.load_dotenv = load_dotenv
self.set_debug_flag = set_debug_flag
if add_default_commands:
self.add_command(run_command)
self.add_command(shell_command)
self.add_command(routes_command)
self._loaded_plugin_commands = False
def _load_plugin_commands(self):
if self._loaded_plugin_commands:
return
if sys.version_info >= (3, 10):
from importlib import metadata
else:
# Use a backport on Python < 3.10. We technically have
# importlib.metadata on 3.8+, but the API changed in 3.10,
# so use the backport for consistency.
import importlib_metadata as metadata
for ep in metadata.entry_points(group="flask.commands"):
self.add_command(ep.load(), ep.name)
self._loaded_plugin_commands = True
def get_command(self, ctx, name):
self._load_plugin_commands()
# Look up built-in and plugin commands, which should be
# available even if the app fails to load.
rv = super().get_command(ctx, name)
if rv is not None:
return rv
info = ctx.ensure_object(ScriptInfo)
# Look up commands provided by the app, showing an error and
# continuing if the app couldn't be loaded.
try:
return info.load_app().cli.get_command(ctx, name)
except NoAppException as e:
click.secho(f"Error: {e.format_message()}\n", err=True, fg="red")
def list_commands(self, ctx):
self._load_plugin_commands()
# Start with the built-in and plugin commands.
rv = set(super().list_commands(ctx))
info = ctx.ensure_object(ScriptInfo)
# Add commands provided by the app, showing an error and
# continuing if the app couldn't be loaded.
try:
rv.update(info.load_app().cli.list_commands(ctx))
except NoAppException as e:
# When an app couldn't be loaded, show the error message
# without the traceback.
click.secho(f"Error: {e.format_message()}\n", err=True, fg="red")
except Exception:
# When any other errors occurred during loading, show the
# full traceback.
click.secho(f"{traceback.format_exc()}\n", err=True, fg="red")
return sorted(rv)
def main(self, *args, **kwargs):
# Set a global flag that indicates that we were invoked from the
# command line interface. This is detected by Flask.run to make the
# call into a no-op. This is necessary to avoid ugly errors when the
# script that is loaded here also attempts to start a server.
os.environ["FLASK_RUN_FROM_CLI"] = "true"
if get_load_dotenv(self.load_dotenv):
load_dotenv()
obj = kwargs.get("obj")
if obj is None:
obj = ScriptInfo(
create_app=self.create_app, set_debug_flag=self.set_debug_flag
)
kwargs["obj"] = obj
kwargs.setdefault("auto_envvar_prefix", "FLASK")
return super().main(*args, **kwargs)
def _path_is_ancestor(path, other):
"""Take ``other`` and remove the length of ``path`` from it. Then join it
to ``path``. If it is the original value, ``path`` is an ancestor of
``other``."""
return os.path.join(path, other[len(path) :].lstrip(os.sep)) == other
def load_dotenv(path=None):
"""Load "dotenv" files in order of precedence to set environment variables.
If an env var is already set it is not overwritten, so earlier files in the
list are preferred over later files.
This is a no-op if `python-dotenv`_ is not installed.
.. _python-dotenv: https://github.com/theskumar/python-dotenv#readme
:param path: Load the file at this location instead of searching.
:return: ``True`` if a file was loaded.
.. versionchanged:: 1.1.0
Returns ``False`` when python-dotenv is not installed, or when
the given path isn't a file.
.. versionchanged:: 2.0
When loading the env files, set the default encoding to UTF-8.
.. versionadded:: 1.0
"""
try:
import dotenv
except ImportError:
if path or os.path.isfile(".env") or os.path.isfile(".flaskenv"):
click.secho(
" * Tip: There are .env or .flaskenv files present."
' Do "pip install python-dotenv" to use them.',
fg="yellow",
err=True,
)
return False
# if the given path specifies the actual file then return True,
# else False
if path is not None:
if os.path.isfile(path):
return dotenv.load_dotenv(path, encoding="utf-8")
return False
new_dir = None
for name in (".env", ".flaskenv"):
path = dotenv.find_dotenv(name, usecwd=True)
if not path:
continue
if new_dir is None:
new_dir = os.path.dirname(path)
dotenv.load_dotenv(path, encoding="utf-8")
return new_dir is not None # at least one file was located and loaded
def show_server_banner(env, debug, app_import_path, eager_loading):
"""Show extra startup messages the first time the server is run,
ignoring the reloader.
"""
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
return
if app_import_path is not None:
message = f" * Serving Flask app {app_import_path!r}"
if not eager_loading:
message += " (lazy loading)"
click.echo(message)
click.echo(f" * Environment: {env}")
if env == "production":
click.secho(
" WARNING: This is a development server. Do not use it in"
" a production deployment.",
fg="red",
)
click.secho(" Use a production WSGI server instead.", dim=True)
if debug is not None:
click.echo(f" * Debug mode: {'on' if debug else 'off'}")
class CertParamType(click.ParamType):
"""Click option type for the ``--cert`` option. Allows either an
existing file, the string ``'adhoc'``, or an import for a
:class:`~ssl.SSLContext` object.
"""
name = "path"
def __init__(self):
self.path_type = click.Path(exists=True, dir_okay=False, resolve_path=True)
def convert(self, value, param, ctx):
try:
import ssl
except ImportError:
raise click.BadParameter(
'Using "--cert" requires Python to be compiled with SSL support.',
ctx,
param,
) from None
try:
return self.path_type(value, param, ctx)
except click.BadParameter:
value = click.STRING(value, param, ctx).lower()
if value == "adhoc":
try:
import cryptography # noqa: F401
except ImportError:
raise click.BadParameter(
"Using ad-hoc certificates requires the cryptography library.",
ctx,
param,
) from None
return value
obj = import_string(value, silent=True)
if isinstance(obj, ssl.SSLContext):
return obj
raise
def _validate_key(ctx, param, value):
"""The ``--key`` option must be specified when ``--cert`` is a file.
Modifies the ``cert`` param to be a ``(cert, key)`` pair if needed.
"""
cert = ctx.params.get("cert")
is_adhoc = cert == "adhoc"
try:
import ssl
except ImportError:
is_context = False
else:
is_context = isinstance(cert, ssl.SSLContext)
if value is not None:
if is_adhoc:
raise click.BadParameter(
'When "--cert" is "adhoc", "--key" is not used.', ctx, param
)
if is_context:
raise click.BadParameter(
'When "--cert" is an SSLContext object, "--key is not used.', ctx, param
)
if not cert:
raise click.BadParameter('"--cert" must also be specified.', ctx, param)
ctx.params["cert"] = cert, value
else:
if cert and not (is_adhoc or is_context):
raise click.BadParameter('Required when using "--cert".', ctx, param)
return value
class SeparatedPathType(click.Path):
"""Click option type that accepts a list of values separated by the
OS's path separator (``:``, ``;`` on Windows). Each value is
validated as a :class:`click.Path` type.
"""
def convert(self, value, param, ctx):
items = self.split_envvar_value(value)
super_convert = super().convert
return [super_convert(item, param, ctx) for item in items]
@click.command("run", short_help="Run a development server.")
@click.option("--host", "-h", default="127.0.0.1", help="The interface to bind to.")
@click.option("--port", "-p", default=5000, help="The port to bind to.")
@click.option(
"--cert",
type=CertParamType(),
help="Specify a certificate file to use HTTPS.",
is_eager=True,
)
@click.option(
"--key",
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
callback=_validate_key,
expose_value=False,
help="The key file to use when specifying a certificate.",
)
@click.option(
"--reload/--no-reload",
default=None,
help="Enable or disable the reloader. By default the reloader "
"is active if debug is enabled.",
)
@click.option(
"--debugger/--no-debugger",
default=None,
help="Enable or disable the debugger. By default the debugger "
"is active if debug is enabled.",
)
@click.option(
"--eager-loading/--lazy-loading",
default=None,
help="Enable or disable eager loading. By default eager "
"loading is enabled if the reloader is disabled.",
)
@click.option(
"--with-threads/--without-threads",
default=True,
help="Enable or disable multithreading.",
)
@click.option(
"--extra-files",
default=None,
type=SeparatedPathType(),
help=(
"Extra files that trigger a reload on change. Multiple paths"
f" are separated by {os.path.pathsep!r}."
),
)
@click.option(
"--exclude-patterns",
default=None,
type=SeparatedPathType(),
help=(
"Files matching these fnmatch patterns will not trigger a reload"
" on change. Multiple patterns are separated by"
f" {os.path.pathsep!r}."
),
)
@pass_script_info
def run_command(
info,
host,
port,
reload,
debugger,
eager_loading,
with_threads,
cert,
extra_files,
exclude_patterns,
):
"""Run a local development server.
This server is for development purposes only. It does not provide
the stability, security, or performance of production WSGI servers.
The reloader and debugger are enabled by default if
FLASK_ENV=development or FLASK_DEBUG=1.
"""
debug = get_debug_flag()
if reload is None:
reload = debug
if debugger is None:
debugger = debug
show_server_banner(get_env(), debug, info.app_import_path, eager_loading)
app = DispatchingApp(info.load_app, use_eager_loading=eager_loading)
from werkzeug.serving import run_simple
run_simple(
host,
port,
app,
use_reloader=reload,
use_debugger=debugger,
threaded=with_threads,
ssl_context=cert,
extra_files=extra_files,
exclude_patterns=exclude_patterns,
)
@click.command("shell", short_help="Run a shell in the app context.")
@with_appcontext
def shell_command() -> None:
"""Run an interactive Python shell in the context of a given
Flask application. The application will populate the default
namespace of this shell according to its configuration.
This is useful for executing small snippets of management code
without having to manually configure the application.
"""
import code
from .globals import _app_ctx_stack
app = _app_ctx_stack.top.app
banner = (
f"Python {sys.version} on {sys.platform}\n"
f"App: {app.import_name} [{app.env}]\n"
f"Instance: {app.instance_path}"
)
ctx: dict = {}
# Support the regular Python interpreter startup script if someone
# is using it.
startup = os.environ.get("PYTHONSTARTUP")
if startup and os.path.isfile(startup):
with open(startup) as f:
eval(compile(f.read(), startup, "exec"), ctx)
ctx.update(app.make_shell_context())
# Site, customize, or startup script can set a hook to call when
# entering interactive mode. The default one sets up readline with
# tab and history completion.
interactive_hook = getattr(sys, "__interactivehook__", None)
if interactive_hook is not None:
try:
import readline
from rlcompleter import Completer
except ImportError:
pass
else:
# rlcompleter uses __main__.__dict__ by default, which is
# flask.__main__. Use the shell context instead.
readline.set_completer(Completer(ctx).complete)
interactive_hook()
code.interact(banner=banner, local=ctx)
@click.command("routes", short_help="Show the routes for the app.")
@click.option(
"--sort",
"-s",
type=click.Choice(("endpoint", "methods", "rule", "match")),
default="endpoint",
help=(
'Method to sort routes by. "match" is the order that Flask will match '
"routes when dispatching a request."
),
)
@click.option("--all-methods", is_flag=True, help="Show HEAD and OPTIONS methods.")
@with_appcontext
def routes_command(sort: str, all_methods: bool) -> None:
"""Show all registered routes with endpoints and methods."""
rules = list(current_app.url_map.iter_rules())
if not rules:
click.echo("No routes were registered.")
return
ignored_methods = set(() if all_methods else ("HEAD", "OPTIONS"))
if sort in ("endpoint", "rule"):
rules = sorted(rules, key=attrgetter(sort))
elif sort == "methods":
rules = sorted(rules, key=lambda rule: sorted(rule.methods)) # type: ignore
rule_methods = [
", ".join(sorted(rule.methods - ignored_methods)) # type: ignore
for rule in rules
]
headers = ("Endpoint", "Methods", "Rule")
widths = (
max(len(rule.endpoint) for rule in rules),
max(len(methods) for methods in rule_methods),
max(len(rule.rule) for rule in rules),
)
widths = [max(len(h), w) for h, w in zip(headers, widths)]
row = "{{0:<{0}}} {{1:<{1}}} {{2:<{2}}}".format(*widths)
click.echo(row.format(*headers).strip())
click.echo(row.format(*("-" * width for width in widths)))
for rule, methods in zip(rules, rule_methods):
click.echo(row.format(rule.endpoint, methods, rule.rule).rstrip())
cli = FlaskGroup(
help="""\
A general utility script for Flask applications.
Provides commands from Flask, extensions, and the application. Loads the
application defined in the FLASK_APP environment variable, or from a wsgi.py
file. Setting the FLASK_ENV environment variable to 'development' will enable
debug mode.
\b
{prefix}{cmd} FLASK_APP=hello.py
{prefix}{cmd} FLASK_ENV=development
{prefix}flask run
""".format(
cmd="export" if os.name == "posix" else "set",
prefix="$ " if os.name == "posix" else "> ",
)
)
def main() -> None:
cli.main()
if __name__ == "__main__":
main()
|
threadlocal.py | import threading
local_school = threading.local()
def process_student():
print 'Hello, %s (in %s)\n' % (local_school.student, threading.current_thread().name)
def process_thread(name):
local_school.student = name
process_student()
t1 = threading.Thread(target= process_thread, args=('Alice',), name='Thread-A')
t2 = threading.Thread(target= process_thread, args=('Bob',), name='Thread-B')
t1.start()
t2.start()
t1.join()
t2.join()
|
test_logging.py | #!/usr/bin/env python
#
# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# This file is part of the Python logging distribution. See
# http://www.red-dove.com/python_logging.html
#
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved.
"""
import select
import os, sys, string, struct, types, cPickle, cStringIO
import socket, threading, time
import logging, logging.handlers, logging.config
BANNER = "-- %-10s %-6s ---------------------------------------------------\n"
FINISH_UP = "Finish up, it's closing time. Messages should bear numbers 0 through 24."
#----------------------------------------------------------------------------
# Log receiver
#----------------------------------------------------------------------------
TIMEOUT = 10
from SocketServer import ThreadingTCPServer, StreamRequestHandler
class LogRecordStreamHandler(StreamRequestHandler):
"""
Handler for a streaming logging request. It basically logs the record
using whatever logging policy is configured locally.
"""
def handle(self):
"""
Handle multiple requests - each expected to be a 4-byte length,
followed by the LogRecord in pickle format. Logs the record
according to whatever policy is configured locally.
"""
while 1:
try:
chunk = self.connection.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + self.connection.recv(slen - len(chunk))
obj = self.unPickle(chunk)
record = logging.makeLogRecord(obj)
self.handleLogRecord(record)
except:
raise
def unPickle(self, data):
return cPickle.loads(data)
def handleLogRecord(self, record):
logname = "logrecv.tcp." + record.name
#If the end-of-messages sentinel is seen, tell the server to terminate
if record.msg == FINISH_UP:
self.server.abort = 1
record.msg = record.msg + " (via " + logname + ")"
logger = logging.getLogger(logname)
logger.handle(record)
# The server sets socketDataProcessed when it's done.
socketDataProcessed = threading.Event()
class LogRecordSocketReceiver(ThreadingTCPServer):
"""
A simple-minded TCP socket-based logging receiver suitable for test
purposes.
"""
allow_reuse_address = 1
def __init__(self, host='localhost',
port=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
handler=LogRecordStreamHandler):
ThreadingTCPServer.__init__(self, (host, port), handler)
self.abort = 0
self.timeout = 1
def serve_until_stopped(self):
abort = 0
while not abort:
rd, wr, ex = select.select([self.socket.fileno()],
[], [],
self.timeout)
if rd:
self.handle_request()
abort = self.abort
#notify the main thread that we're about to exit
socketDataProcessed.set()
def process_request(self, request, client_address):
#import threading
t = threading.Thread(target = self.finish_request,
args = (request, client_address))
t.start()
def runTCP(tcpserver):
tcpserver.serve_until_stopped()
#----------------------------------------------------------------------------
# Test 0
#----------------------------------------------------------------------------
msgcount = 0
def nextmessage():
global msgcount
rv = "Message %d" % msgcount
msgcount = msgcount + 1
return rv
def test0():
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
#These should log
ERR.log(logging.FATAL, nextmessage())
ERR.error(nextmessage())
INF.log(logging.FATAL, nextmessage())
INF.error(nextmessage())
INF.warn(nextmessage())
INF.info(nextmessage())
INF_UNDEF.log(logging.FATAL, nextmessage())
INF_UNDEF.error(nextmessage())
INF_UNDEF.warn (nextmessage())
INF_UNDEF.info (nextmessage())
INF_ERR.log(logging.FATAL, nextmessage())
INF_ERR.error(nextmessage())
INF_ERR_UNDEF.log(logging.FATAL, nextmessage())
INF_ERR_UNDEF.error(nextmessage())
DEB.log(logging.FATAL, nextmessage())
DEB.error(nextmessage())
DEB.warn (nextmessage())
DEB.info (nextmessage())
DEB.debug(nextmessage())
UNDEF.log(logging.FATAL, nextmessage())
UNDEF.error(nextmessage())
UNDEF.warn (nextmessage())
UNDEF.info (nextmessage())
GRANDCHILD.log(logging.FATAL, nextmessage())
CHILD.log(logging.FATAL, nextmessage())
#These should not log
ERR.warn(nextmessage())
ERR.info(nextmessage())
ERR.debug(nextmessage())
INF.debug(nextmessage())
INF_UNDEF.debug(nextmessage())
INF_ERR.warn(nextmessage())
INF_ERR.info(nextmessage())
INF_ERR.debug(nextmessage())
INF_ERR_UNDEF.warn(nextmessage())
INF_ERR_UNDEF.info(nextmessage())
INF_ERR_UNDEF.debug(nextmessage())
INF.info(FINISH_UP)
#----------------------------------------------------------------------------
# Test 1
#----------------------------------------------------------------------------
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 10
TACITURN = 9
TERSE = 8
EFFUSIVE = 7
SOCIABLE = 6
VERBOSE = 5
TALKATIVE = 4
GARRULOUS = 3
CHATTERBOX = 2
BORING = 1
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
#
# Now, to demonstrate filtering: suppose for some perverse reason we only
# want to print out all except GARRULOUS messages. Let's create a filter for
# this purpose...
#
class SpecificLevelFilter(logging.Filter):
def __init__(self, lvl):
self.level = lvl
def filter(self, record):
return self.level != record.levelno
class GarrulousFilter(SpecificLevelFilter):
def __init__(self):
SpecificLevelFilter.__init__(self, GARRULOUS)
#
# Now, let's demonstrate filtering at the logger. This time, use a filter
# which excludes SOCIABLE and TACITURN messages. Note that GARRULOUS events
# are still excluded.
#
class VerySpecificFilter(logging.Filter):
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
def message(s):
sys.stdout.write("%s\n" % s)
SHOULD1 = "This should only be seen at the '%s' logging level (or lower)"
def test1():
#
# Now, tell the logging system to associate names with our levels.
#
for lvl in my_logging_levels.keys():
logging.addLevelName(lvl, my_logging_levels[lvl])
#
# Now, define a test function which logs an event at each of our levels.
#
def doLog(log):
for lvl in LEVEL_RANGE:
log.log(lvl, SHOULD1, logging.getLevelName(lvl))
log = logging.getLogger("")
hdlr = log.handlers[0]
#
# Set the logging level to each different value and call the utility
# function to log events.
# In the output, you should see that each time round the loop, the number of
# logging events which are actually output decreases.
#
for lvl in LEVEL_RANGE:
message("-- setting logging level to '%s' -----" %
logging.getLevelName(lvl))
log.setLevel(lvl)
doLog(log)
#
# Now, we demonstrate level filtering at the handler level. Tell the
# handler defined above to filter at level 'SOCIABLE', and repeat the
# above loop. Compare the output from the two runs.
#
hdlr.setLevel(SOCIABLE)
message("-- Filtering at handler level to SOCIABLE --")
for lvl in LEVEL_RANGE:
message("-- setting logging level to '%s' -----" %
logging.getLevelName(lvl))
log.setLevel(lvl)
doLog(log)
hdlr.setLevel(0) #turn off level filtering at the handler
garr = GarrulousFilter()
hdlr.addFilter(garr)
message("-- Filtering using GARRULOUS filter --")
for lvl in LEVEL_RANGE:
message("-- setting logging level to '%s' -----" %
logging.getLevelName(lvl))
log.setLevel(lvl)
doLog(log)
spec = VerySpecificFilter()
log.addFilter(spec)
message("-- Filtering using specific filter for SOCIABLE, TACITURN --")
for lvl in LEVEL_RANGE:
message("-- setting logging level to '%s' -----" %
logging.getLevelName(lvl))
log.setLevel(lvl)
doLog(log)
log.removeFilter(spec)
hdlr.removeFilter(garr)
#Undo the one level which clashes...for regression tests
logging.addLevelName(logging.DEBUG, "DEBUG")
#----------------------------------------------------------------------------
# Test 2
#----------------------------------------------------------------------------
MSG = "-- logging %d at INFO, messages should be seen every 10 events --"
def test2():
logger = logging.getLogger("")
sh = logger.handlers[0]
sh.close()
logger.removeHandler(sh)
mh = logging.handlers.MemoryHandler(10,logging.WARNING, sh)
logger.setLevel(logging.DEBUG)
logger.addHandler(mh)
message("-- logging at DEBUG, nothing should be seen yet --")
logger.debug("Debug message")
message("-- logging at INFO, nothing should be seen yet --")
logger.info("Info message")
message("-- logging at WARNING, 3 messages should be seen --")
logger.warn("Warn message")
for i in xrange(102):
message(MSG % i)
logger.info("Info index = %d", i)
mh.close()
logger.removeHandler(mh)
logger.addHandler(sh)
#----------------------------------------------------------------------------
# Test 3
#----------------------------------------------------------------------------
FILTER = "a.b"
def doLog3():
logging.getLogger("a").info("Info 1")
logging.getLogger("a.b").info("Info 2")
logging.getLogger("a.c").info("Info 3")
logging.getLogger("a.b.c").info("Info 4")
logging.getLogger("a.b.c.d").info("Info 5")
logging.getLogger("a.bb.c").info("Info 6")
logging.getLogger("b").info("Info 7")
logging.getLogger("b.a").info("Info 8")
logging.getLogger("c.a.b").info("Info 9")
logging.getLogger("a.bb").info("Info 10")
def test3():
root = logging.getLogger()
root.setLevel(logging.DEBUG)
hand = root.handlers[0]
message("Unfiltered...")
doLog3()
message("Filtered with '%s'..." % FILTER)
filt = logging.Filter(FILTER)
hand.addFilter(filt)
doLog3()
hand.removeFilter(filt)
#----------------------------------------------------------------------------
# Test Harness
#----------------------------------------------------------------------------
def banner(nm, typ):
sep = BANNER % (nm, typ)
sys.stdout.write(sep)
sys.stdout.flush()
def test_main_inner():
rootLogger = logging.getLogger("")
rootLogger.setLevel(logging.DEBUG)
hdlr = logging.StreamHandler(sys.stdout)
fmt = logging.Formatter(logging.BASIC_FORMAT)
hdlr.setFormatter(fmt)
rootLogger.addHandler(hdlr)
#Set up a handler such that all events are sent via a socket to the log
#receiver (logrecv).
#The handler will only be added to the rootLogger for some of the tests
hdlr = logging.handlers.SocketHandler('localhost',
logging.handlers.DEFAULT_TCP_LOGGING_PORT)
#Configure the logger for logrecv so events do not propagate beyond it.
#The sockLogger output is buffered in memory until the end of the test,
#and printed at the end.
sockOut = cStringIO.StringIO()
sockLogger = logging.getLogger("logrecv")
sockLogger.setLevel(logging.DEBUG)
sockhdlr = logging.StreamHandler(sockOut)
sockhdlr.setFormatter(logging.Formatter(
"%(name)s -> %(levelname)s: %(message)s"))
sockLogger.addHandler(sockhdlr)
sockLogger.propagate = 0
#Set up servers
threads = []
tcpserver = LogRecordSocketReceiver()
#sys.stdout.write("About to start TCP server...\n")
threads.append(threading.Thread(target=runTCP, args=(tcpserver,)))
for thread in threads:
thread.start()
try:
banner("log_test0", "begin")
rootLogger.addHandler(hdlr)
test0()
hdlr.close()
rootLogger.removeHandler(hdlr)
banner("log_test0", "end")
banner("log_test1", "begin")
test1()
banner("log_test1", "end")
banner("log_test2", "begin")
test2()
banner("log_test2", "end")
banner("log_test3", "begin")
test3()
banner("log_test3", "end")
finally:
#wait for TCP receiver to terminate
socketDataProcessed.wait()
for thread in threads:
thread.join()
banner("logrecv output", "begin")
sys.stdout.write(sockOut.getvalue())
sockOut.close()
banner("logrecv output", "end")
sys.stdout.flush()
def test_main():
import locale
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first so we can restore it at the end.
try:
original_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, '')
except (ValueError, locale.Error):
# this happens on a Solaris box which only supports "C" locale
# or a Mac OS X box which supports very little locale stuff at all
original_locale = None
try:
test_main_inner()
finally:
if original_locale is not None:
locale.setlocale(locale.LC_ALL, original_locale)
if __name__ == "__main__":
sys.stdout.write("test_logging\n")
test_main()
|
spoof.py | import time
import threading
from scapy.all import ARP, send # pylint: disable=no-name-in-module
from .host import Host
from evillimiter.common.globals import BROADCAST
class ARPSpoofer(object):
def __init__(self, interface, gateway_ip, gateway_mac):
self.interface = interface
self.gateway_ip = gateway_ip
self.gateway_mac = gateway_mac
# interval in s spoofed ARP packets are sent to targets
self.interval = 2
self._hosts = set()
self._hosts_lock = threading.Lock()
self._running = False
def add(self, host):
self._hosts_lock.acquire()
self._hosts.add(host)
self._hosts_lock.release()
host.spoofed = True
def remove(self, host):
self._hosts_lock.acquire()
self._hosts.discard(host)
self._hosts_lock.release()
self._restore(host)
host.spoofed = False
def start(self):
thread = threading.Thread(target=self._spoof, args=[])
self._running = True
thread.start()
def stop(self):
self._running = False
def _spoof(self):
while self._running:
self._hosts_lock.acquire()
# make a deep copy to reduce lock time
hosts = self._hosts.copy()
self._hosts_lock.release()
for host in hosts:
if not self._running:
return
self._send_spoofed_packets(host)
time.sleep(self.interval)
def _send_spoofed_packets(self, host):
# 2 packets = 1 gateway packet, 1 host packet
packets = [
ARP(op=2, psrc=host.ip, pdst=self.gateway_ip, hwdst=self.gateway_mac),
ARP(op=2, psrc=self.gateway_ip, pdst=host.ip, hwdst=host.mac)
]
[send(x, verbose=0, iface=self.interface) for x in packets]
def _restore(self, host):
"""
Remaps host and gateway to their actual addresses
"""
# 2 packets = 1 gateway packet, 1 host packet
packets = [
ARP(op=2, psrc=host.ip, hwsrc=host.mac, pdst=self.gateway_ip, hwdst=BROADCAST),
ARP(op=2, psrc=self.gateway_ip, hwsrc=self.gateway_mac, pdst=host.ip, hwdst=BROADCAST)
]
[send(x, verbose=0, iface=self.interface, count=3) for x in packets] |
armFreqServer.py | from socket import AF_INET, socket, SOCK_STREAM
from threading import Thread
import subprocess
from time import sleep
import smbus
import time
# for RPI version 1, use "bus = smbus.SMBus(0)"
bus = smbus.SMBus(1)
def acceptConnections():
while True:
client, client_address = SERVER.accept()
print("%s:%s has connected." % client_address)
Thread(target=handle_client, args=(client, )).start()
def StringToBytes(val):
retVal = []
for c in val:
retVal.append(ord(c))
return retVal
def writeToBus(addr, deg, client):
#print(addr, type(int(addr)))
bus.write_i2c_block_data(int(addr), 0x00, StringToBytes(deg))
if deg[0] == '9':
block = bus.read_word_data(addr, 0)
client.send(block)
else:
client.send("Successful")
def handle_client(client):
global currentPoints
while True :
data = client.recv(4096)
value = data.split(',')
#print(value)
writeToBus(value[0], value[1], client)
clients = {}
HOST = ''
PORT = 9080
BUFSIZ = 4096
ADDR = (HOST, PORT)
SERVER = socket(AF_INET, SOCK_STREAM)
while True:
try:
SERVER.bind(ADDR)
break
except:
subprocess.call(' sudo lsof -t -i tcp:9090 | xargs kill -9', shell = True)
if __name__ == "__main__":
SERVER.listen(5)
print("Waiting for connection...")
ACCEPT_THREAD = Thread(target=acceptConnections)
ACCEPT_THREAD.start()
ACCEPT_THREAD.join()
SERVER.close()
|
timed_subprocess.py | # -*- coding: utf-8 -*-
"""
For running command line executables with a timeout
"""
from __future__ import absolute_import, print_function, unicode_literals
import shlex
import subprocess
import threading
import salt.exceptions
import salt.utils.data
from salt.ext import six
class TimedProc(object):
"""
Create a TimedProc object, calls subprocess.Popen with passed args and **kwargs
"""
def __init__(self, args, **kwargs):
self.wait = not kwargs.pop("bg", False)
self.stdin = kwargs.pop("stdin", None)
self.with_communicate = kwargs.pop("with_communicate", self.wait)
self.timeout = kwargs.pop("timeout", None)
self.stdin_raw_newlines = kwargs.pop("stdin_raw_newlines", False)
# If you're not willing to wait for the process
# you can't define any stdin, stdout or stderr
if not self.wait:
self.stdin = kwargs["stdin"] = None
self.with_communicate = False
elif self.stdin is not None:
if not self.stdin_raw_newlines:
# Translate a newline submitted as '\n' on the CLI to an actual
# newline character.
self.stdin = salt.utils.stringutils.to_bytes(
self.stdin.replace("\\n", "\n")
)
kwargs["stdin"] = subprocess.PIPE
if not self.with_communicate:
self.stdout = kwargs["stdout"] = None
self.stderr = kwargs["stderr"] = None
if self.timeout and not isinstance(self.timeout, (int, float)):
raise salt.exceptions.TimedProcTimeoutError(
"Error: timeout {0} must be a number".format(self.timeout)
)
if kwargs.get("shell", False):
args = salt.utils.data.decode(args, to_str=True)
try:
self.process = subprocess.Popen(args, **kwargs)
except (AttributeError, TypeError):
if not kwargs.get("shell", False):
if not isinstance(args, (list, tuple)):
try:
args = shlex.split(args)
except AttributeError:
args = shlex.split(six.text_type(args))
str_args = []
for arg in args:
if not isinstance(arg, six.string_types):
str_args.append(six.text_type(arg))
else:
str_args.append(arg)
args = str_args
else:
if not isinstance(args, (list, tuple, six.string_types)):
# Handle corner case where someone does a 'cmd.run 3'
args = six.text_type(args)
# Ensure that environment variables are strings
for key, val in six.iteritems(kwargs.get("env", {})):
if not isinstance(val, six.string_types):
kwargs["env"][key] = six.text_type(val)
if not isinstance(key, six.string_types):
kwargs["env"][six.text_type(key)] = kwargs["env"].pop(key)
if six.PY2 and "env" in kwargs:
# Ensure no unicode in custom env dict, as it can cause
# problems with subprocess.
kwargs["env"] = salt.utils.data.encode_dict(kwargs["env"])
args = salt.utils.data.decode(args)
self.process = subprocess.Popen(args, **kwargs)
self.command = args
def run(self):
"""
wait for subprocess to terminate and return subprocess' return code.
If timeout is reached, throw TimedProcTimeoutError
"""
def receive():
if self.with_communicate:
self.stdout, self.stderr = self.process.communicate(input=self.stdin)
elif self.wait:
self.process.wait()
if not self.timeout:
receive()
else:
rt = threading.Thread(target=receive)
rt.start()
rt.join(self.timeout)
if rt.isAlive():
# Subprocess cleanup (best effort)
self.process.kill()
def terminate():
if rt.isAlive():
self.process.terminate()
threading.Timer(10, terminate).start()
raise salt.exceptions.TimedProcTimeoutError(
"{0} : Timed out after {1} seconds".format(
self.command, six.text_type(self.timeout),
)
)
return self.process.returncode
|
docker_ctl.py | from __future__ import annotations
import os
import threading
import time
from collections import defaultdict
from typing import List, Tuple, Dict
import uuid
from assemblyline.odm.models.service import DependencyConfig, DockerConfig
from .interface import ControllerInterface, ServiceControlError
# Where to find the update directory inside this container.
INHERITED_VARIABLES = ['HTTP_PROXY', 'HTTPS_PROXY', 'NO_PROXY', 'http_proxy', 'https_proxy', 'no_proxy']
# Every this many seconds, check that the services can actually reach the service server.
NETWORK_REFRESH_INTERVAL = 60 * 3
CHANGE_KEY_NAME = 'al_change_key'
class DockerController(ControllerInterface):
"""A controller for *non* swarm mode docker."""
def __init__(self, logger, prefix='', labels: dict[str, str] = None, cpu_overallocation=1, memory_overallocation=1, log_level="INFO"):
"""
:param logger: A logger to report status and debug information.
:param prefix: A prefix used to distinguish containers launched by this controller.
:param cpu_overallocation: A multiplier on CPU usage. (2 means act like there are twice as many CPU present)
:param memory_overallocation: A multiplier on memory usage. (2 means act like there is twice as much memory)
"""
# Connect to the host docker port
import docker
self.client = docker.from_env()
self.log = logger
self.log_level = log_level
self.global_mounts: List[Tuple[str, str]] = []
self.core_mounts: List[Tuple[str, str]] = []
self._prefix: str = prefix
self._labels: dict[str, str] = labels or {}
self.prune_lock = threading.Lock()
self._service_limited_env: dict[str, dict[str, str]] = defaultdict(dict)
for network in self.client.networks.list(names=['external']):
self.external_network = network
break
else:
self.external_network = self.client.networks.create(name='external', internal=False)
self.networks = {}
# CPU and memory reserved for the host
self._reserved_cpu = 0.3
self._reserved_mem = 500
self.cpu_overallocation = cpu_overallocation
self.memory_overallocation = memory_overallocation
self._profiles = {}
self.service_server = self.find_service_server()
# Prefetch some info that shouldn't change while we are running
self._info = self.client.info()
# We aren't checking for swarm nodes
assert not self._info['Swarm']['NodeID']
# Start a background thread to keep the service server connected
threading.Thread(target=self._refresh_service_networks, daemon=True).start()
self._flush_containers() # Clear out any containers that are left over from a previous run
def find_service_server(self):
service_server_container = None
while service_server_container is None:
for container in self.client.containers.list():
if 'service_server' in container.name:
service_server_container = container
self.log.info(f'Found the service server at: {container.id} [{container.name}]')
break
if not service_server_container:
time.sleep(1)
return service_server_container
def _refresh_service_networks(self):
while True:
# noinspection PyBroadException
try:
# Make sure the server is attached to all networks
for service_name in self.networks:
network = self._get_network(service_name)
if self.service_server.name not in {c.name for c in network.containers}:
self.networks[service_name].connect(self.service_server, aliases=['service-server'])
# As long as the current service server is still running, just block its exit code in this thread
self.service_server.wait()
# If it does return, find the new service server
self.service_server = self.find_service_server()
except Exception:
self.log.exception("An error occurred while watching the service server.")
def stop(self):
self._flush_containers()
def _flush_containers(self):
with self.prune_lock:
from docker.errors import APIError
labels = [f'{name}={value}' for name, value in self._labels.items()]
if labels:
for container in self.client.containers.list(filters={'label': labels}, ignore_removed=True):
try:
container.kill()
except APIError:
pass
self.client.containers.prune()
self.client.volumes.prune()
def add_profile(self, profile, scale=0):
"""Tell the controller about a service profile it needs to manage."""
self._profiles[profile.name] = profile
self._pull_image(profile)
def _start(self, service_name):
"""Launch a docker container in a manner suitable for Assemblyline."""
container_name = self._name_container(service_name)
prof = self._profiles[service_name]
cfg = prof.container_config
# Set the list of labels
labels = dict(self._labels)
labels.update({'component': service_name})
# Prepare the volumes and folders
volumes = {row[0]: {'bind': row[1], 'mode': 'ro'} for row in self.global_mounts}
# Define environment variables
env = [f'{_e.name}={_e.value}' for _e in cfg.environment]
env += ['UPDATE_PATH=/mount/updates/']
env += [f'{name}={os.environ[name]}' for name in INHERITED_VARIABLES if name in os.environ]
env += [f'LOG_LEVEL={self.log_level}']
env += [f'{_n}={_v}' for _n, _v in self._service_limited_env[service_name].items()]
container = self.client.containers.run(
image=cfg.image,
name=container_name,
cpu_period=100000,
cpu_quota=int(100000*cfg.cpu_cores),
mem_limit=f'{cfg.ram_mb}m',
labels=labels,
restart_policy={'Name': 'always'},
command=cfg.command,
volumes=volumes,
network=self._get_network(service_name).name,
environment=env,
detach=True,
)
if cfg.allow_internet_access:
self.external_network.connect(container)
def _start_container(self, service_name, name, labels, volumes, cfg: DockerConfig, network, hostname, core_container=False):
"""Launch a docker container."""
# Take the port strings and convert them to a dictionary
ports = {}
for port_string in cfg.ports:
# It might just be a port number, try that
try:
port_number = int(port_string)
ports[port_number] = port_number
continue
except ValueError:
pass
# Then it might be "number:number"
if ':' in port_string:
a, b = port_string.split(':')
ports[int(a)] = int(b)
continue
self.log.warning(f"Not sure how to parse port string {port_string} for container {name} not using it...")
# Put together the environment variables
env = []
if core_container:
env += [f'{_n}={_v}' for _n, _v in os.environ.items()
if any(term in _n for term in ['ELASTIC', 'FILESTORE', 'UI_SERVER'])]
env += [f'{_e.name}={_e.value}' for _e in cfg.environment]
env += [f'{name}={os.environ[name]}' for name in INHERITED_VARIABLES if name in os.environ]
env += [f'LOG_LEVEL={self.log_level}', f'AL_SERVICE_NAME={service_name}']
container = self.client.containers.run(
image=cfg.image,
name=name,
cpu_period=100000,
cpu_quota=int(100000*cfg.cpu_cores),
mem_limit=f'{cfg.ram_mb}m',
mem_reservation=f'{min(cfg.ram_mb_min, cfg.ram_mb)}m',
labels=labels,
restart_policy={'Name': 'always'},
command=cfg.command,
volumes=volumes,
network=network,
environment=env,
detach=True,
# ports=ports,
)
if cfg.allow_internet_access:
self.external_network.connect(container, aliases=[hostname])
def _name_container(self, service_name):
"""Find an unused name for a container.
Container names must be unique, but we want our names to be predictable and informative.
Cycle through the pattern we want until we find the lowest free numerical suffix.
"""
# Load all container names on the system now
used_names = []
for container in self.client.containers.list(all=True, ignore_removed=True):
used_names.append(container.name)
# Try names until one works
used_names = set(used_names)
index = 0
while True:
name = f'{service_name}_{index}'
if self._prefix:
name = self._prefix + '_' + name
if name not in used_names:
return name
index += 1
def cpu_info(self):
"""Try to estimate how much CPU the docker host has unreserved.
NOTE: There is probably a better way to do this.
"""
total_cpu = cpu = self._info['NCPU'] * self.cpu_overallocation - self._reserved_cpu
for container in self.client.containers.list(ignore_removed=True):
if container.attrs['HostConfig']['CpuPeriod']:
cpu -= container.attrs['HostConfig']['CpuQuota']/container.attrs['HostConfig']['CpuPeriod']
self.log.debug(f'Total CPU available {cpu}/{self._info["NCPU"]}')
return cpu, total_cpu
def memory_info(self):
"""Try to estimate how much RAM the docker host has unreserved.
NOTE: There is probably a better way to do this.
"""
mega = 2**20
total_mem = mem = self._info['MemTotal']/mega * self.memory_overallocation - self._reserved_mem
for container in self.client.containers.list(ignore_removed=True):
mem -= container.attrs['HostConfig']['Memory']/mega
self.log.debug(f'Total Memory available {mem}/{self._info["MemTotal"]/mega}')
return mem, total_mem
def get_target(self, service_name: str) -> int:
"""Get how many instances of a service we expect to be running.
Since we start our containers with 'restart always' we just need to count how many
docker is currently trying to keep running.
"""
running = 0
filters = {'label': f'component={service_name}'}
for container in self.client.containers.list(filters=filters, ignore_removed=True):
if container.status in {'restarting', 'running'}:
running += 1
elif container.status in {'created', 'removing', 'paused', 'exited', 'dead'}:
pass
else:
self.log.warning(f"Unknown docker status string: {container.status}")
return running
def get_targets(self) -> Dict[str, int]:
return {name: self.get_target(name) for name in self._profiles.keys()}
def set_target(self, service_name, target):
"""Change how many instances of a service docker is trying to keep up.
This is managed by killing extra containers at random, or launching new ones.
"""
try:
running = self.get_target(service_name)
self.log.debug(f"New target for {service_name}: {running} -> {target}")
delta = target - running
if delta < 0:
# Kill off delta instances of of the service
filters = {'label': f'component={service_name}'}
running = [container for container in self.client.containers.list(filters=filters, ignore_removed=True)
if container.status in {'restarting', 'running'}]
running = running[0:-delta]
for container in running:
container.kill()
if delta > 0:
# Start delta instances of the service
for _ in range(delta):
self._start(service_name)
# Every time we change our container allocation do a little clean up to keep things fresh
with self.prune_lock:
self.client.containers.prune()
self.client.volumes.prune()
except Exception as error:
raise ServiceControlError(str(error), service_name)
def stop_container(self, service_name, container_id):
import docker.errors
container = None
try:
# First try the given container id in case its actually correct
container = self.client.containers.get(container_id)
except docker.errors.NotFound:
filters = {'label': f'component={service_name}'}
for possible_container in self.client.containers.list(filters=filters, ignore_removed=True):
if possible_container.id.startswith(container_id) or possible_container.name == container_id:
container = possible_container
break
if container and container.labels.get('component') == service_name and container.status == 'running':
container.kill()
def restart(self, service):
self._pull_image(service)
filters = {'label': f'component={service.name}'}
for container in self.client.containers.list(filters=filters, ignore_removed=True):
container.kill()
def get_running_container_names(self):
out = []
for container in self.client.containers.list(ignore_removed=True):
out.append(container.id)
out.append(container.id[:12])
out.append(container.name)
return out
def start_stateful_container(self, service_name: str, container_name: str, spec: DependencyConfig,
labels: dict[str, str], change_key: str):
import docker.errors
deployment_name = f'{service_name}-dep-{container_name}'
change_check = change_key + service_name + container_name + str(spec)
try:
old_container = self.client.containers.get(deployment_name)
instance_key = old_container.attrs["Config"]["Env"]['AL_INSTANCE_KEY']
if old_container.labels.get(CHANGE_KEY_NAME) == change_check and old_container.status == 'running':
self._service_limited_env[service_name][f'{container_name}_host'] = deployment_name
self._service_limited_env[service_name][f'{container_name}_key'] = instance_key
if spec.container.ports:
self._service_limited_env[service_name][f'{container_name}_port'] = spec.container.ports[0]
return
else:
old_container.kill()
except docker.errors.NotFound:
instance_key = uuid.uuid4().hex
volumes = {_n: {'bind': _v.mount_path, 'mode': 'rw'} for _n, _v in spec.volumes.items()}
if spec.run_as_core:
volumes.update({row[0]: {'bind': row[1], 'mode': 'ro'} for row in self.core_mounts})
all_labels = dict(self._labels)
all_labels.update({'component': service_name, CHANGE_KEY_NAME: change_check})
all_labels.update(labels)
spec.container.environment.append({'name': 'AL_INSTANCE_KEY', 'value': instance_key})
self._service_limited_env[service_name][f'{container_name}_host'] = deployment_name
self._service_limited_env[service_name][f'{container_name}_key'] = instance_key
if spec.container.ports:
self._service_limited_env[service_name][f'{container_name}_port'] = spec.container.ports[0]
self._start_container(service_name=service_name, name=deployment_name, labels=all_labels, volumes=volumes, hostname=container_name,
cfg=spec.container, core_container=spec.run_as_core, network=self._get_network(service_name).name)
def stop_containers(self, labels):
label_strings = [f'{name}={value}' for name, value in labels.items()]
for container in self.client.containers.list(filters={'label': label_strings}, ignore_removed=True):
container.stop()
def _get_network(self, service_name):
"""Get a reference to the network a service uses.
Since we need a reference to networks in docker we will do this setup
dynamically rather than in prepare_network.
"""
from docker.errors import NotFound
# Create network for service
network_name = f'service-net-{service_name}'
try:
self.networks[service_name] = network = self.client.networks.get(network_name)
network.reload()
except NotFound:
network = self.networks[service_name] = self.client.networks.create(name=network_name, internal=True)
if self.service_server.name not in {c.name for c in network.containers}:
self.networks[service_name].connect(self.service_server, aliases=['service-server'])
return network
def prepare_network(self, service_name, internet):
self._get_network(service_name)
def _pull_image(self, service):
"""Pull the image before we try to use it locally.
This lets us override the auth_config on a per image basis.
"""
from docker.errors import ImageNotFound
# Split the image string into "[registry/]image_name" and "tag"
repository, _, tag = service.container_config.image.rpartition(':')
if '/' in tag:
# if there is a '/' in the tag it is invalid. We have split ':' on a registry
# port not a tag, there can't be a tag in this image string. Put the registry
# string back together, and use a default tag
repository += ':' + tag
tag = 'latest'
# Add auth info if we have it
auth_config = None
if service.container_config.registry_username or service.container_config.registry_password:
auth_config = {
'username': service.container_config.registry_username,
'password': service.container_config.registry_password
}
try:
self.client.images.pull(repository, tag, auth_config=auth_config)
except ImageNotFound:
self.log.error(f"Couldn't pull image {repository}:{tag} check authentication settings. "
"Will try to use local copy.")
try:
self.client.images.get(repository + ':' + tag)
except ImageNotFound:
self.log.error(f"Couldn't find local image {repository}:{tag}")
|
Loops.py | import os
import os.path
import sys
import threading
import asyncio
import subprocess
import shutil
from multiprocessing.pool import ThreadPool
from osbot_utils.testing.Duration import Duration
from osbot_utils.utils.Files import create_folder, folder_exists, folder_delete_all
from cdr_plugin_folder_to_folder.common_settings.Config import Config, API_VERSION
from cdr_plugin_folder_to_folder.processing.Events_Log import Events_Log
from cdr_plugin_folder_to_folder.processing.Events_Log_Elastic import Events_Log_Elastic
from cdr_plugin_folder_to_folder.processing.File_Processing import File_Processing
from cdr_plugin_folder_to_folder.metadata.Metadata_Service import Metadata_Service
from cdr_plugin_folder_to_folder.pre_processing.Status import Status, FileStatus
from cdr_plugin_folder_to_folder.pre_processing.Hash_Json import Hash_Json
from cdr_plugin_folder_to_folder.processing.Report_Elastic import Report_Elastic
from cdr_plugin_folder_to_folder.storage.Storage import Storage
from elasticsearch import Elasticsearch
from datetime import datetime
from cdr_plugin_folder_to_folder.utils.Log_Duration import log_duration
from cdr_plugin_folder_to_folder.utils.Logging import log_error, log_info
from cdr_plugin_folder_to_folder.processing.Analysis_Elastic import Analysis_Elastic
class Loops(object):
continue_processing = False
processing_started = False
lock = asyncio.Lock()
def __init__(self):
self.use_es = False
self.config = Config()
self.status = Status()
self.storage = Storage()
self.hash_json = Hash_Json()
self.events = Events_Log(self.config.hd2_status_location)
self.events_elastic = Events_Log_Elastic()
self.hash=None
self.report_elastic = Report_Elastic()
self.analysis_elastic = Analysis_Elastic()
self.report_elastic.setup()
self.analysis_elastic.setup()
create_folder(self.storage.hd2_processed())
create_folder(self.storage.hd2_not_processed())
def IsProcessing(self):
return Loops.processing_started
def StopProcessing(self):
Loops.continue_processing = False
def HasBeenStopped(self):
return not Loops.continue_processing
def git_commit(self):
git_commit = 'Not available'
try:
git_commit = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode("utf-8").rstrip()
except Exception as e:
pass
return git_commit
def ProcessDirectoryWithEndpoint(self, itempath, file_hash, endpoint_index):
if not os.path.isdir(itempath):
return False
log_info(message=f"Starting ProcessDirectoryWithEndpoint on endpoint # {endpoint_index} for file {file_hash}")
meta_service = Metadata_Service()
original_file_path = meta_service.get_original_file_paths(itempath)
events = Events_Log(itempath)
endpoint = "http://" + self.config.endpoints['Endpoints'][endpoint_index]['IP'] + ":" + self.config.endpoints['Endpoints'][endpoint_index]['Port']
events.add_log("Processing with: " + endpoint)
meta_service.set_f2f_plugin_version(itempath, API_VERSION)
meta_service.set_f2f_plugin_git_commit(itempath, self.git_commit())
try:
file_processing = File_Processing(events, self.events_elastic, self.report_elastic, self.analysis_elastic, meta_service)
if not file_processing.processDirectory(endpoint, itempath):
events.add_log("CANNOT be processed")
return False
log_data = {
'file': original_file_path,
'status': FileStatus.COMPLETED,
'error': 'none',
'timestamp': datetime.now(),
}
log_info('ProcessDirectoryWithEndpoint', data=log_data)
meta_service.set_error(itempath, "none")
meta_service.set_status(itempath, FileStatus.COMPLETED)
self.hash_json.update_status(file_hash, FileStatus.COMPLETED)
events.add_log("Has been processed")
return True
except Exception as error:
log_data = {
'file': original_file_path,
'status': FileStatus.FAILED,
'error': str(error),
}
log_error(message='error in ProcessDirectoryWithEndpoint', data=log_data)
meta_service.set_error(itempath, str(error))
meta_service.set_status(itempath, FileStatus.FAILED)
self.hash_json.update_status(file_hash, FileStatus.FAILED)
events.add_log("ERROR:" + str(error))
return False
def ProcessDirectory(self, thread_data):
(itempath, file_hash, process_index) = thread_data
endpoint_index = process_index % self.config.endpoints_count
if not Loops.continue_processing:
return False
tik = datetime.now()
process_result = self.ProcessDirectoryWithEndpoint(itempath, file_hash, endpoint_index)
if process_result:
self.status.add_completed()
tok = datetime.now()
delta = tok - tik
meta_service = Metadata_Service()
meta_service.set_hd2_to_hd3_copy_time(itempath, delta.total_seconds())
else:
self.status.add_failed()
return process_result
# note: removing retries from this method (it should not be handled like this
#for idx in range(self.config.endpoints_count):
# if self.ProcessDirectoryWithEndpoint(itempath, file_hash, endpoint_index):
# return
# # The Endpoint failed to process the file
# # Retry it with the next one
# endpoint_index = (endpoint_index + 1) % self.config.endpoints_count
def updateHashJson(self):
self.hash_json.reset()
meta_service = Metadata_Service()
for hash_folder in os.listdir(self.storage.hd2_data()):
metadata_folder = self.storage.hd2_data(hash_folder)
if not os.path.isdir(metadata_folder):
continue
metadata = meta_service.get_from_file(metadata_folder)
file_name = metadata.get_file_name()
original_hash = metadata.get_original_hash()
status = metadata.get_rebuild_status()
if status != FileStatus.COMPLETED:
self.hash_json.add_file(original_hash, file_name)
self.hash_json.save()
self.status.set_processing_counters(len(self.hash_json.data()))
return self.hash_json.data()
def moveProcessedFiles(self):
json_list = self.hash_json.data()
for key in json_list:
source_path = self.storage.hd2_data(key)
if (FileStatus.COMPLETED == json_list[key]["file_status"]):
destination_path = self.storage.hd2_processed(key)
if folder_exists(destination_path):
folder_delete_all(destination_path)
shutil.move(source_path, destination_path)
if (FileStatus.FAILED == json_list[key]["file_status"]):
meta_service = Metadata_Service()
meta_service.get_from_file(source_path)
metadata = meta_service.metadata
if ("Engine response could not be decoded" == metadata.get_error()) and \
metadata.get_original_file_extension() in ['.xml', '.json']:
destination_path = self.storage.hd2_not_processed(key)
if folder_exists(destination_path):
folder_delete_all(destination_path)
shutil.move(source_path, destination_path)
def LoopHashDirectoriesInternal(self, thread_count, do_single):
if folder_exists(self.storage.hd2_data()) is False:
log_message = "ERROR: rootdir does not exist: " + self.storage.hd2_data()
log_error(log_message)
return False
if not isinstance(thread_count,int):
raise TypeError("thread_count must be a integer")
if not isinstance(do_single,bool):
raise TypeError("thread_count must be a integer")
log_message = f"LoopHashDirectoriesInternal started with {thread_count} threads"
self.events.add_log(log_message)
log_info(log_message)
json_list = self.updateHashJson()
log_message = f"LoopHashDirectoriesInternal started with {thread_count} threads"
self.events.add_log(log_message)
log_info(log_message)
threads = list()
process_index = 0
log_info(message=f'before Mapping thread_data for {len(json_list)} files')
thread_data = []
for key in json_list:
file_hash = key
itempath = self.storage.hd2_data(key)
if (FileStatus.COMPLETED == json_list[key]["file_status"]):
self.events.add_log(f"The file processing has been already completed")
continue
if not os.path.exists(itempath):
self.events.add_log(f"ERROR: Path \"{itempath}\" does not exist")
json_list[key]["file_status"] = FileStatus.FAILED
continue
process_index += 1
thread_data.append((itempath, file_hash, process_index,))
# # limit the number of parallel threads
#
# if process_index % int(thread_count) == 0: # todo: refactor this workflow to use multiprocess and queues
# # Clean up the threads
# for index, thread in enumerate(threads): # todo: since at the moment this will block allocating new threads until
# thread.join() # all have finishing execution
#
# process_index += 1
# log_info(message=f"in LoopHashDirectoriesInternal process_index={process_index} , thread #{process_index % int(thread_count) }")
# x = threading.Thread(target=self.ProcessDirectory, args=(itempath, file_hash, process_index,))
# threads.append(x)
# x.start()
#
# if do_single:
# break
#
# if not Loops.continue_processing:
# break
# for index, thread in enumerate(threads):
# thread.join()
log_info(message=f'after mapped thread_data, there are {len(thread_data)} mapped items')
#thread_data = thread_data[:500]
#log_info(message=f'to start with only processing {len(thread_data)} thread_data items')
pool = ThreadPool(thread_count)
results = pool.map(self.ProcessDirectory, thread_data)
pool.close()
pool.join()
self.moveProcessedFiles()
self.events.add_log("LoopHashDirectoriesInternal finished")
return True
async def LoopHashDirectoriesAsync(self, thread_count, do_single = False):
await Loops.lock.acquire()
try:
Loops.continue_processing = True
Loops.processing_started = True
self.status.set_started()
self.LoopHashDirectoriesInternal(thread_count, do_single)
finally:
Loops.processing_started = False
Loops.lock.release()
self.status.set_stopped()
self.hash_json.save()
@log_duration
def LoopHashDirectories(self, thread_count=None):
#Allow only a single loop to be run at a time
if self.IsProcessing():
log_error(message="ERROR: Attempt to start processing while processing is in progress")
return False
self.status.StartStatusThread()
thread_count = thread_count or self.config.thread_count
log_info(message="in LoopHashDirectories, about to start main loop")
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self.LoopHashDirectoriesAsync(thread_count))
log_info(message="in LoopHashDirectories, Loop completed")
self.status.StopStatusThread()
return True
@log_duration
def LoopHashDirectoriesSequential(self):
#Allow only a single loop to be run at a time
if self.IsProcessing():
log_error("ERROR: Attempt to start processing while processing is in progress")
return False
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self.LoopHashDirectoriesAsync(1))
return True
@log_duration
def ProcessSingleFile(self):
if self.IsProcessing():
log_error("ERROR: Attempt to start processing while processing is in progress")
return False
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self.LoopHashDirectoriesAsync(1, True))
return True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.