gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
from __future__ import print_function
import pandas as pd
import numpy as np
import os
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Dense, Dropout, Activation, Conv1D, MaxPooling1D, Flatten, LocallyConnected1D
from tensorflow.keras.models import Sequential, model_from_json, model_from_yaml
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.callbacks import CSVLogger, ReduceLROnPlateau
from sklearn.preprocessing import MaxAbsScaler
import nt3 as bmk
import candle
def initialize_parameters(default_model='nt3_default_model.txt'):
import os # ADD THIS LINE
# Build benchmark object
nt3Bmk = bmk.BenchmarkNT3(
bmk.file_path,
# default_model, # ORIGINAL LINE
os.getenv('CANDLE_DEFAULT_MODEL_FILE'), # NEW LINE
'keras',
prog='nt3_baseline',
desc='1D CNN to classify RNA sequence data in normal or tumor classes')
# Initialize parameters
gParameters = candle.finalize_parameters(nt3Bmk)
return gParameters
def load_data(train_path, test_path, gParameters):
print('Loading data...')
df_train = (pd.read_csv(train_path, header=None).values).astype('float32')
df_test = (pd.read_csv(test_path, header=None).values).astype('float32')
print('done')
print('df_train shape:', df_train.shape)
print('df_test shape:', df_test.shape)
seqlen = df_train.shape[1]
df_y_train = df_train[:, 0].astype('int')
df_y_test = df_test[:, 0].astype('int')
# only training set has noise
Y_train = to_categorical(df_y_train, gParameters['classes'])
Y_test = to_categorical(df_y_test, gParameters['classes'])
df_x_train = df_train[:, 1:seqlen].astype(np.float32)
df_x_test = df_test[:, 1:seqlen].astype(np.float32)
X_train = df_x_train
X_test = df_x_test
scaler = MaxAbsScaler()
mat = np.concatenate((X_train, X_test), axis=0)
mat = scaler.fit_transform(mat)
X_train = mat[:X_train.shape[0], :]
X_test = mat[X_train.shape[0]:, :]
# TODO: Add better names for noise boolean, make a featue for both RNA seq and label noise together
# check if noise is on (this is for label)
if gParameters['add_noise']:
# check if we want noise correlated with a feature
if gParameters['noise_correlated']:
Y_train, y_train_noise_gen = candle.label_flip_correlated(Y_train,
gParameters['label_noise'], X_train,
gParameters['feature_col'],
gParameters['feature_threshold'])
# else add uncorrelated noise
else:
Y_train, y_train_noise_gen = candle.label_flip(Y_train, gParameters['label_noise'])
# check if noise is on for RNA-seq data
elif gParameters['noise_gaussian']:
X_train = candle.add_gaussian_noise(X_train, 0, gParameters['std_dev'])
return X_train, Y_train, X_test, Y_test
def run(gParameters):
print('Params:', gParameters)
file_train = gParameters['train_data']
file_test = gParameters['test_data']
url = gParameters['data_url']
train_file = candle.get_file(file_train, url + file_train, cache_subdir='Pilot1')
test_file = candle.get_file(file_test, url + file_test, cache_subdir='Pilot1')
X_train, Y_train, X_test, Y_test = load_data(train_file, test_file, gParameters)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print('Y_train shape:', Y_train.shape)
print('Y_test shape:', Y_test.shape)
x_train_len = X_train.shape[1]
# this reshaping is critical for the Conv1D to work
X_train = np.expand_dims(X_train, axis=2)
X_test = np.expand_dims(X_test, axis=2)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
# Have to add this line or else ALW on 2020-11-15 finds Supervisor jobs using canonically CANDLE-compliant model scripts die as soon as a particular task is used a second time:
# EXCEPTION:
# InvalidArgumentError() ...
# File "<string>", line 23, in <module>
# File "/gpfs/alpine/med106/world-shared/candle/2020-11-11/checkouts/Supervisor/workflows/common/python/model_runner.py", line 241, in run_model
# result, history = run(hyper_parameter_map, obj_return)
# File "/gpfs/alpine/med106/world-shared/candle/2020-11-11/checkouts/Supervisor/workflows/common/python/model_runner.py", line 169, in run
# history = pkg.run(params)
# File "/gpfs/alpine/med106/world-shared/candle/2020-11-11/checkouts/Benchmarks/Pilot1/NT3/nt3_candle_wrappers_baseline_keras2.py", line 211, in run
# callbacks=[csv_logger, reduce_lr, candleRemoteMonitor, timeoutMonitor])
# File "/gpfs/alpine/world-shared/med106/sw/condaenv-200408/lib/python3.6/site-packages/keras/engine/training.py", line 1178, in fit
# validation_freq=validation_freq)
# File "/gpfs/alpine/world-shared/med106/sw/condaenv-200408/lib/python3.6/site-packages/keras/engine/training_arrays.py", line 204, in fit_loop
# outs = fit_function(ins_batch)
# File "/gpfs/alpine/world-shared/med106/sw/condaenv-200408/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py", line 2979, in __call__
# return self._call(inputs)
# File "/gpfs/alpine/world-shared/med106/sw/condaenv-200408/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py", line 2933, in _call
# session)
# File "/gpfs/alpine/world-shared/med106/sw/condaenv-200408/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py", line 2885, in _make_callable
# callable_fn = session._make_callable_from_options(callable_opts)
# File "/gpfs/alpine/world-shared/med106/sw/condaenv-200408/lib/python3.6/site-packages/tensorflow_core/python/client/session.py", line 1505, in _make_callable_from_options
# return BaseSession._Callable(self, callable_options)
# File "/gpfs/alpine/world-shared/med106/sw/condaenv-200408/lib/python3.6/site-packages/tensorflow_core/python/client/session.py", line 1460, in __init__
# session._session, options_ptr)
K.clear_session()
model = Sequential()
layer_list = list(range(0, len(gParameters['conv']), 3))
for _, i in enumerate(layer_list):
filters = gParameters['conv'][i]
filter_len = gParameters['conv'][i + 1]
stride = gParameters['conv'][i + 2]
print(int(i / 3), filters, filter_len, stride)
if gParameters['pool']:
pool_list = gParameters['pool']
if type(pool_list) != list:
pool_list = list(pool_list)
if filters <= 0 or filter_len <= 0 or stride <= 0:
break
if 'locally_connected' in gParameters:
model.add(LocallyConnected1D(filters, filter_len, strides=stride, padding='valid', input_shape=(x_train_len, 1)))
else:
# input layer
if i == 0:
model.add(Conv1D(filters=filters, kernel_size=filter_len, strides=stride, padding='valid', input_shape=(x_train_len, 1)))
else:
model.add(Conv1D(filters=filters, kernel_size=filter_len, strides=stride, padding='valid'))
model.add(Activation(gParameters['activation']))
if gParameters['pool']:
model.add(MaxPooling1D(pool_size=pool_list[int(i / 3)]))
model.add(Flatten())
for layer in gParameters['dense']:
if layer:
model.add(Dense(layer))
model.add(Activation(gParameters['activation']))
if gParameters['dropout']:
model.add(Dropout(gParameters['dropout']))
model.add(Dense(gParameters['classes']))
model.add(Activation(gParameters['out_activation']))
# Reference case
# model.add(Conv1D(filters=128, kernel_size=20, strides=1, padding='valid', input_shape=(P, 1)))
# model.add(Activation('relu'))
# model.add(MaxPooling1D(pool_size=1))
# model.add(Conv1D(filters=128, kernel_size=10, strides=1, padding='valid'))
# model.add(Activation('relu'))
# model.add(MaxPooling1D(pool_size=10))
# model.add(Flatten())
# model.add(Dense(200))
# model.add(Activation('relu'))
# model.add(Dropout(0.1))
# model.add(Dense(20))
# model.add(Activation('relu'))
# model.add(Dropout(0.1))
# model.add(Dense(CLASSES))
# model.add(Activation('softmax'))
kerasDefaults = candle.keras_default_config()
# Define optimizer
optimizer = candle.build_optimizer(gParameters['optimizer'],
gParameters['learning_rate'],
kerasDefaults)
model.summary()
model.compile(loss=gParameters['loss'],
optimizer=optimizer,
metrics=[gParameters['metrics']])
output_dir = gParameters['output_dir']
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# calculate trainable and non-trainable params
gParameters.update(candle.compute_trainable_params(model))
# set up a bunch of callbacks to do work during model training..
model_name = gParameters['model_name']
# path = '{}/{}.autosave.model.h5'.format(output_dir, model_name)
# checkpointer = ModelCheckpoint(filepath=path, verbose=1, save_weights_only=False, save_best_only=True)
csv_logger = CSVLogger('{}/training.log'.format(output_dir))
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=1, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0)
candleRemoteMonitor = candle.CandleRemoteMonitor(params=gParameters)
timeoutMonitor = candle.TerminateOnTimeOut(gParameters['timeout'])
history = model.fit(X_train, Y_train,
batch_size=gParameters['batch_size'],
epochs=gParameters['epochs'],
verbose=1,
validation_data=(X_test, Y_test),
callbacks=[csv_logger, reduce_lr, candleRemoteMonitor, timeoutMonitor])
score = model.evaluate(X_test, Y_test, verbose=0)
if False:
print('Test score:', score[0])
print('Test accuracy:', score[1])
# serialize model to JSON
model_json = model.to_json()
with open("{}/{}.model.json".format(output_dir, model_name), "w") as json_file:
json_file.write(model_json)
# serialize model to YAML
model_yaml = model.to_yaml()
with open("{}/{}.model.yaml".format(output_dir, model_name), "w") as yaml_file:
yaml_file.write(model_yaml)
# serialize weights to HDF5
model.save_weights("{}/{}.weights.h5".format(output_dir, model_name))
print("Saved model to disk")
# load json and create model
json_file = open('{}/{}.model.json'.format(output_dir, model_name), 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model_json = model_from_json(loaded_model_json)
# load yaml and create model
yaml_file = open('{}/{}.model.yaml'.format(output_dir, model_name), 'r')
loaded_model_yaml = yaml_file.read()
yaml_file.close()
loaded_model_yaml = model_from_yaml(loaded_model_yaml)
# load weights into new model
loaded_model_json.load_weights('{}/{}.weights.h5'.format(output_dir, model_name))
print("Loaded json model from disk")
# evaluate json loaded model on test data
loaded_model_json.compile(loss=gParameters['loss'],
optimizer=gParameters['optimizer'],
metrics=[gParameters['metrics']])
score_json = loaded_model_json.evaluate(X_test, Y_test, verbose=0)
print('json Test score:', score_json[0])
print('json Test accuracy:', score_json[1])
print("json %s: %.2f%%" % (loaded_model_json.metrics_names[1], score_json[1] * 100))
# load weights into new model
loaded_model_yaml.load_weights('{}/{}.weights.h5'.format(output_dir, model_name))
print("Loaded yaml model from disk")
# evaluate loaded model on test data
loaded_model_yaml.compile(loss=gParameters['loss'],
optimizer=gParameters['optimizer'],
metrics=[gParameters['metrics']])
score_yaml = loaded_model_yaml.evaluate(X_test, Y_test, verbose=0)
print('yaml Test score:', score_yaml[0])
print('yaml Test accuracy:', score_yaml[1])
print("yaml %s: %.2f%%" % (loaded_model_yaml.metrics_names[1], score_yaml[1] * 100))
return history
def main():
gParameters = initialize_parameters()
run(gParameters)
if __name__ == '__main__':
main()
try:
K.clear_session()
except AttributeError: # theano does not have this function
pass
|
|
# See https://zulip.readthedocs.io/en/latest/subsystems/events-system.html for
# high-level documentation on how this system works.
from typing import cast, AbstractSet, Any, Callable, Dict, List, \
Mapping, MutableMapping, Optional, Iterable, Sequence, Set, Text, Union
from mypy_extensions import TypedDict
from django.utils.translation import ugettext as _
from django.conf import settings
from collections import deque
import os
import time
import logging
import ujson
import requests
import atexit
import sys
import signal
import tornado.autoreload
import tornado.ioloop
import random
from zerver.models import UserProfile, Client
from zerver.decorator import cachify
from zerver.tornado.handlers import clear_handler_by_id, get_handler_by_id, \
finish_handler, handler_stats_string
from zerver.lib.utils import statsd
from zerver.middleware import async_request_restart
from zerver.lib.message import MessageDict
from zerver.lib.narrow import build_narrow_filter
from zerver.lib.queue import queue_json_publish
from zerver.lib.request import JsonableError
from zerver.tornado.descriptors import clear_descriptor_by_handler_id, set_descriptor_by_handler_id
from zerver.tornado.exceptions import BadEventQueueIdError
import copy
requests_client = requests.Session()
for host in ['127.0.0.1', 'localhost']:
if settings.TORNADO_SERVER and host in settings.TORNADO_SERVER:
# This seems like the only working solution to ignore proxy in
# requests library.
requests_client.trust_env = False
# The idle timeout used to be a week, but we found that in that
# situation, queues from dead browser sessions would grow quite large
# due to the accumulation of message data in those queues.
IDLE_EVENT_QUEUE_TIMEOUT_SECS = 60 * 10
EVENT_QUEUE_GC_FREQ_MSECS = 1000 * 60 * 5
# Capped limit for how long a client can request an event queue
# to live
MAX_QUEUE_TIMEOUT_SECS = 7 * 24 * 60 * 60
# The heartbeats effectively act as a server-side timeout for
# get_events(). The actual timeout value is randomized for each
# client connection based on the below value. We ensure that the
# maximum timeout value is 55 seconds, to deal with crappy home
# wireless routers that kill "inactive" http connections.
HEARTBEAT_MIN_FREQ_SECS = 45
class ClientDescriptor:
def __init__(self, user_profile_id, user_profile_email, realm_id, event_queue,
event_types, client_type_name, apply_markdown=True, client_gravatar=True,
all_public_streams=False, lifespan_secs=0, narrow=[]):
# type: (int, Text, int, 'EventQueue', Optional[Sequence[str]], Text, bool, bool, bool, int, Iterable[Sequence[Text]]) -> None
# These objects are serialized on shutdown and restored on restart.
# If fields are added or semantics are changed, temporary code must be
# added to load_event_queues() to update the restored objects.
# Additionally, the to_dict and from_dict methods must be updated
self.user_profile_id = user_profile_id
self.user_profile_email = user_profile_email
self.realm_id = realm_id
self.current_handler_id = None # type: Optional[int]
self.current_client_name = None # type: Optional[Text]
self.event_queue = event_queue
self.queue_timeout = lifespan_secs
self.event_types = event_types
self.last_connection_time = time.time()
self.apply_markdown = apply_markdown
self.client_gravatar = client_gravatar
self.all_public_streams = all_public_streams
self.client_type_name = client_type_name
self._timeout_handle = None # type: Any # TODO: should be return type of ioloop.call_later
self.narrow = narrow
self.narrow_filter = build_narrow_filter(narrow)
# Clamp queue_timeout to between minimum and maximum timeouts
self.queue_timeout = max(IDLE_EVENT_QUEUE_TIMEOUT_SECS,
min(self.queue_timeout, MAX_QUEUE_TIMEOUT_SECS))
def to_dict(self) -> Dict[str, Any]:
# If you add a new key to this dict, make sure you add appropriate
# migration code in from_dict or load_event_queues to account for
# loading event queues that lack that key.
return dict(user_profile_id=self.user_profile_id,
user_profile_email=self.user_profile_email,
realm_id=self.realm_id,
event_queue=self.event_queue.to_dict(),
queue_timeout=self.queue_timeout,
event_types=self.event_types,
last_connection_time=self.last_connection_time,
apply_markdown=self.apply_markdown,
client_gravatar=self.client_gravatar,
all_public_streams=self.all_public_streams,
narrow=self.narrow,
client_type_name=self.client_type_name)
def __repr__(self) -> str:
return "ClientDescriptor<%s>" % (self.event_queue.id,)
@classmethod
def from_dict(cls, d: MutableMapping[str, Any]) -> 'ClientDescriptor':
if 'user_profile_email' not in d:
# Temporary migration for the addition of the new user_profile_email field
from zerver.models import get_user_profile_by_id
d['user_profile_email'] = get_user_profile_by_id(d['user_profile_id']).email
if 'client_type' in d:
# Temporary migration for the rename of client_type to client_type_name
d['client_type_name'] = d['client_type']
if 'client_gravatar' not in d:
# Temporary migration for the addition of the client_gravatar field
d['client_gravatar'] = False
ret = cls(
d['user_profile_id'],
d['user_profile_email'],
d['realm_id'],
EventQueue.from_dict(d['event_queue']),
d['event_types'],
d['client_type_name'],
d['apply_markdown'],
d['client_gravatar'],
d['all_public_streams'],
d['queue_timeout'],
d.get('narrow', [])
)
ret.last_connection_time = d['last_connection_time']
return ret
def prepare_for_pickling(self) -> None:
self.current_handler_id = None
self._timeout_handle = None
def add_event(self, event: Dict[str, Any]) -> None:
if self.current_handler_id is not None:
handler = get_handler_by_id(self.current_handler_id)
async_request_restart(handler._request)
self.event_queue.push(event)
self.finish_current_handler()
def finish_current_handler(self) -> bool:
if self.current_handler_id is not None:
err_msg = "Got error finishing handler for queue %s" % (self.event_queue.id,)
try:
finish_handler(self.current_handler_id, self.event_queue.id,
self.event_queue.contents(), self.apply_markdown)
except Exception:
logging.exception(err_msg)
finally:
self.disconnect_handler()
return True
return False
def accepts_event(self, event: Mapping[str, Any]) -> bool:
if self.event_types is not None and event["type"] not in self.event_types:
return False
if event["type"] == "message":
return self.narrow_filter(event)
return True
# TODO: Refactor so we don't need this function
def accepts_messages(self) -> bool:
return self.event_types is None or "message" in self.event_types
def idle(self, now: float) -> bool:
if not hasattr(self, 'queue_timeout'):
self.queue_timeout = IDLE_EVENT_QUEUE_TIMEOUT_SECS
return (self.current_handler_id is None and
now - self.last_connection_time >= self.queue_timeout)
def connect_handler(self, handler_id: int, client_name: Text) -> None:
self.current_handler_id = handler_id
self.current_client_name = client_name
set_descriptor_by_handler_id(handler_id, self)
self.last_connection_time = time.time()
def timeout_callback() -> None:
self._timeout_handle = None
# All clients get heartbeat events
self.add_event(dict(type='heartbeat'))
ioloop = tornado.ioloop.IOLoop.instance()
interval = HEARTBEAT_MIN_FREQ_SECS + random.randint(0, 10)
if self.client_type_name != 'API: heartbeat test':
self._timeout_handle = ioloop.call_later(interval, timeout_callback)
def disconnect_handler(self, client_closed: bool=False) -> None:
if self.current_handler_id:
clear_descriptor_by_handler_id(self.current_handler_id, None)
clear_handler_by_id(self.current_handler_id)
if client_closed:
logging.info("Client disconnected for queue %s (%s via %s)" %
(self.event_queue.id, self.user_profile_email,
self.current_client_name))
self.current_handler_id = None
self.current_client_name = None
if self._timeout_handle is not None:
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.remove_timeout(self._timeout_handle)
self._timeout_handle = None
def cleanup(self) -> None:
# Before we can GC the event queue, we need to disconnect the
# handler and notify the client (or connection server) so that
# they can cleanup their own state related to the GC'd event
# queue. Finishing the handler before we GC ensures the
# invariant that event queues are idle when passed to
# `do_gc_event_queues` is preserved.
self.finish_current_handler()
do_gc_event_queues({self.event_queue.id}, {self.user_profile_id},
{self.realm_id})
def compute_full_event_type(event: Mapping[str, Any]) -> str:
if event["type"] == "update_message_flags":
if event["all"]:
# Put the "all" case in its own category
return "all_flags/%s/%s" % (event["flag"], event["operation"])
return "flags/%s/%s" % (event["operation"], event["flag"])
return event["type"]
class EventQueue:
def __init__(self, id: str) -> None:
self.queue = deque() # type: ignore # Should be Deque[Dict[str, Any]], but Deque isn't available in Python 3.4
self.next_event_id = 0 # type: int
self.id = id # type: str
self.virtual_events = {} # type: Dict[str, Dict[str, Any]]
def to_dict(self) -> Dict[str, Any]:
# If you add a new key to this dict, make sure you add appropriate
# migration code in from_dict or load_event_queues to account for
# loading event queues that lack that key.
return dict(id=self.id,
next_event_id=self.next_event_id,
queue=list(self.queue),
virtual_events=self.virtual_events)
@classmethod
def from_dict(cls, d: Dict[str, Any]) -> 'EventQueue':
ret = cls(d['id'])
ret.next_event_id = d['next_event_id']
ret.queue = deque(d['queue'])
ret.virtual_events = d.get("virtual_events", {})
return ret
def push(self, event: Dict[str, Any]) -> None:
event['id'] = self.next_event_id
self.next_event_id += 1
full_event_type = compute_full_event_type(event)
if (full_event_type in ["pointer", "restart"] or
full_event_type.startswith("flags/")):
if full_event_type not in self.virtual_events:
self.virtual_events[full_event_type] = copy.deepcopy(event)
return
# Update the virtual event with the values from the event
virtual_event = self.virtual_events[full_event_type]
virtual_event["id"] = event["id"]
if "timestamp" in event:
virtual_event["timestamp"] = event["timestamp"]
if full_event_type == "pointer":
virtual_event["pointer"] = event["pointer"]
elif full_event_type == "restart":
virtual_event["server_generation"] = event["server_generation"]
elif full_event_type.startswith("flags/"):
virtual_event["messages"] += event["messages"]
else:
self.queue.append(event)
# Note that pop ignores virtual events. This is fine in our
# current usage since virtual events should always be resolved to
# a real event before being given to users.
def pop(self) -> Dict[str, Any]:
return self.queue.popleft()
def empty(self) -> bool:
return len(self.queue) == 0 and len(self.virtual_events) == 0
# See the comment on pop; that applies here as well
def prune(self, through_id: int) -> None:
while len(self.queue) != 0 and self.queue[0]['id'] <= through_id:
self.pop()
def contents(self) -> List[Dict[str, Any]]:
contents = [] # type: List[Dict[str, Any]]
virtual_id_map = {} # type: Dict[str, Dict[str, Any]]
for event_type in self.virtual_events:
virtual_id_map[self.virtual_events[event_type]["id"]] = self.virtual_events[event_type]
virtual_ids = sorted(list(virtual_id_map.keys()))
# Merge the virtual events into their final place in the queue
index = 0
length = len(virtual_ids)
for event in self.queue:
while index < length and virtual_ids[index] < event["id"]:
contents.append(virtual_id_map[virtual_ids[index]])
index += 1
contents.append(event)
while index < length:
contents.append(virtual_id_map[virtual_ids[index]])
index += 1
self.virtual_events = {}
self.queue = deque(contents)
return contents
# maps queue ids to client descriptors
clients = {} # type: Dict[str, ClientDescriptor]
# maps user id to list of client descriptors
user_clients = {} # type: Dict[int, List[ClientDescriptor]]
# maps realm id to list of client descriptors with all_public_streams=True
realm_clients_all_streams = {} # type: Dict[int, List[ClientDescriptor]]
# list of registered gc hooks.
# each one will be called with a user profile id, queue, and bool
# last_for_client that is true if this is the last queue pertaining
# to this user_profile_id
# that is about to be deleted
gc_hooks = [] # type: List[Callable[[int, ClientDescriptor, bool], None]]
next_queue_id = 0
def clear_client_event_queues_for_testing() -> None:
assert(settings.TEST_SUITE)
clients.clear()
user_clients.clear()
realm_clients_all_streams.clear()
gc_hooks.clear()
global next_queue_id
next_queue_id = 0
def add_client_gc_hook(hook: Callable[[int, ClientDescriptor, bool], None]) -> None:
gc_hooks.append(hook)
def get_client_descriptor(queue_id: str) -> ClientDescriptor:
return clients.get(queue_id)
def get_client_descriptors_for_user(user_profile_id: int) -> List[ClientDescriptor]:
return user_clients.get(user_profile_id, [])
def get_client_descriptors_for_realm_all_streams(realm_id: int) -> List[ClientDescriptor]:
return realm_clients_all_streams.get(realm_id, [])
def add_to_client_dicts(client: ClientDescriptor) -> None:
user_clients.setdefault(client.user_profile_id, []).append(client)
if client.all_public_streams or client.narrow != []:
realm_clients_all_streams.setdefault(client.realm_id, []).append(client)
def allocate_client_descriptor(new_queue_data: MutableMapping[str, Any]) -> ClientDescriptor:
global next_queue_id
queue_id = str(settings.SERVER_GENERATION) + ':' + str(next_queue_id)
next_queue_id += 1
new_queue_data["event_queue"] = EventQueue(queue_id).to_dict()
client = ClientDescriptor.from_dict(new_queue_data)
clients[queue_id] = client
add_to_client_dicts(client)
return client
def do_gc_event_queues(to_remove: AbstractSet[str], affected_users: AbstractSet[int],
affected_realms: AbstractSet[int]) -> None:
def filter_client_dict(client_dict: MutableMapping[int, List[ClientDescriptor]], key: int) -> None:
if key not in client_dict:
return
new_client_list = [c for c in client_dict[key] if c.event_queue.id not in to_remove]
if len(new_client_list) == 0:
del client_dict[key]
else:
client_dict[key] = new_client_list
for user_id in affected_users:
filter_client_dict(user_clients, user_id)
for realm_id in affected_realms:
filter_client_dict(realm_clients_all_streams, realm_id)
for id in to_remove:
for cb in gc_hooks:
cb(clients[id].user_profile_id, clients[id], clients[id].user_profile_id not in user_clients)
del clients[id]
def gc_event_queues() -> None:
start = time.time()
to_remove = set() # type: Set[str]
affected_users = set() # type: Set[int]
affected_realms = set() # type: Set[int]
for (id, client) in clients.items():
if client.idle(start):
to_remove.add(id)
affected_users.add(client.user_profile_id)
affected_realms.add(client.realm_id)
# We don't need to call e.g. finish_current_handler on the clients
# being removed because they are guaranteed to be idle and thus
# not have a current handler.
do_gc_event_queues(to_remove, affected_users, affected_realms)
if settings.PRODUCTION:
logging.info(('Tornado removed %d idle event queues owned by %d users in %.3fs.' +
' Now %d active queues, %s')
% (len(to_remove), len(affected_users), time.time() - start,
len(clients), handler_stats_string()))
statsd.gauge('tornado.active_queues', len(clients))
statsd.gauge('tornado.active_users', len(user_clients))
def dump_event_queues() -> None:
start = time.time()
with open(settings.JSON_PERSISTENT_QUEUE_FILENAME, "w") as stored_queues:
ujson.dump([(qid, client.to_dict()) for (qid, client) in clients.items()],
stored_queues)
logging.info('Tornado dumped %d event queues in %.3fs'
% (len(clients), time.time() - start))
def load_event_queues() -> None:
global clients
start = time.time()
# ujson chokes on bad input pretty easily. We separate out the actual
# file reading from the loading so that we don't silently fail if we get
# bad input.
try:
with open(settings.JSON_PERSISTENT_QUEUE_FILENAME, "r") as stored_queues:
json_data = stored_queues.read()
try:
clients = dict((qid, ClientDescriptor.from_dict(client))
for (qid, client) in ujson.loads(json_data))
except Exception:
logging.exception("Could not deserialize event queues")
except (IOError, EOFError):
pass
for client in clients.values():
# Put code for migrations due to event queue data format changes here
add_to_client_dicts(client)
logging.info('Tornado loaded %d event queues in %.3fs'
% (len(clients), time.time() - start))
def send_restart_events(immediate: bool=False) -> None:
event = dict(type='restart', server_generation=settings.SERVER_GENERATION) # type: Dict[str, Any]
if immediate:
event['immediate'] = True
for client in clients.values():
if client.accepts_event(event):
client.add_event(event.copy())
def setup_event_queue() -> None:
if not settings.TEST_SUITE:
load_event_queues()
atexit.register(dump_event_queues)
# Make sure we dump event queues even if we exit via signal
signal.signal(signal.SIGTERM, lambda signum, stack: sys.exit(1))
tornado.autoreload.add_reload_hook(dump_event_queues)
try:
os.rename(settings.JSON_PERSISTENT_QUEUE_FILENAME, "/var/tmp/event_queues.json.last")
except OSError:
pass
# Set up event queue garbage collection
ioloop = tornado.ioloop.IOLoop.instance()
pc = tornado.ioloop.PeriodicCallback(gc_event_queues,
EVENT_QUEUE_GC_FREQ_MSECS, ioloop)
pc.start()
send_restart_events(immediate=settings.DEVELOPMENT)
def fetch_events(query: Mapping[str, Any]) -> Dict[str, Any]:
queue_id = query["queue_id"] # type: str
dont_block = query["dont_block"] # type: bool
last_event_id = query["last_event_id"] # type: int
user_profile_id = query["user_profile_id"] # type: int
new_queue_data = query.get("new_queue_data") # type: Optional[MutableMapping[str, Any]]
user_profile_email = query["user_profile_email"] # type: Text
client_type_name = query["client_type_name"] # type: Text
handler_id = query["handler_id"] # type: int
try:
was_connected = False
orig_queue_id = queue_id
extra_log_data = ""
if queue_id is None:
if dont_block:
client = allocate_client_descriptor(new_queue_data)
queue_id = client.event_queue.id
else:
raise JsonableError(_("Missing 'queue_id' argument"))
else:
if last_event_id is None:
raise JsonableError(_("Missing 'last_event_id' argument"))
client = get_client_descriptor(queue_id)
if client is None:
raise BadEventQueueIdError(queue_id)
if user_profile_id != client.user_profile_id:
raise JsonableError(_("You are not authorized to get events from this queue"))
client.event_queue.prune(last_event_id)
was_connected = client.finish_current_handler()
if not client.event_queue.empty() or dont_block:
response = dict(events=client.event_queue.contents(),
handler_id=handler_id) # type: Dict[str, Any]
if orig_queue_id is None:
response['queue_id'] = queue_id
if len(response["events"]) == 1:
extra_log_data = "[%s/%s/%s]" % (queue_id, len(response["events"]),
response["events"][0]["type"])
else:
extra_log_data = "[%s/%s]" % (queue_id, len(response["events"]))
if was_connected:
extra_log_data += " [was connected]"
return dict(type="response", response=response, extra_log_data=extra_log_data)
# After this point, dont_block=False, the queue is empty, and we
# have a pre-existing queue, so we wait for new events.
if was_connected:
logging.info("Disconnected handler for queue %s (%s/%s)" % (queue_id, user_profile_email,
client_type_name))
except JsonableError as e:
return dict(type="error", exception=e)
client.connect_handler(handler_id, client_type_name)
return dict(type="async")
# The following functions are called from Django
# Workaround to support the Python-requests 1.0 transition of .json
# from a property to a function
requests_json_is_function = callable(requests.Response.json)
def extract_json_response(resp: requests.Response) -> Dict[str, Any]:
if requests_json_is_function:
return resp.json()
else:
return resp.json # type: ignore # mypy trusts the stub, not the runtime type checking of this fn
def request_event_queue(user_profile, user_client, apply_markdown, client_gravatar,
queue_lifespan_secs, event_types=None, all_public_streams=False,
narrow=[]):
# type: (UserProfile, Client, bool, bool, int, Optional[Iterable[str]], bool, Iterable[Sequence[Text]]) -> Optional[str]
if settings.TORNADO_SERVER:
req = {'dont_block': 'true',
'apply_markdown': ujson.dumps(apply_markdown),
'client_gravatar': ujson.dumps(client_gravatar),
'all_public_streams': ujson.dumps(all_public_streams),
'client': 'internal',
'user_client': user_client.name,
'narrow': ujson.dumps(narrow),
'lifespan_secs': queue_lifespan_secs}
if event_types is not None:
req['event_types'] = ujson.dumps(event_types)
try:
resp = requests_client.get(settings.TORNADO_SERVER + '/api/v1/events',
auth=requests.auth.HTTPBasicAuth(
user_profile.email, user_profile.api_key),
params=req)
except requests.adapters.ConnectionError:
logging.error('Tornado server does not seem to be running, check %s '
'and %s for more information.' %
(settings.ERROR_FILE_LOG_PATH, "tornado.log"))
raise requests.adapters.ConnectionError(
"Django cannot connect to Tornado server (%s); try restarting" %
(settings.TORNADO_SERVER))
resp.raise_for_status()
return extract_json_response(resp)['queue_id']
return None
def get_user_events(user_profile: UserProfile, queue_id: str, last_event_id: int) -> List[Dict[Any, Any]]:
if settings.TORNADO_SERVER:
resp = requests_client.get(settings.TORNADO_SERVER + '/api/v1/events',
auth=requests.auth.HTTPBasicAuth(
user_profile.email, user_profile.api_key),
params={'queue_id': queue_id,
'last_event_id': last_event_id,
'dont_block': 'true',
'client': 'internal'})
resp.raise_for_status()
return extract_json_response(resp)['events']
return []
# Send email notifications to idle users
# after they are idle for 1 hour
NOTIFY_AFTER_IDLE_HOURS = 1
def build_offline_notification(user_profile_id: int, message_id: int) -> Dict[str, Any]:
return {"user_profile_id": user_profile_id,
"message_id": message_id,
"timestamp": time.time()}
def missedmessage_hook(user_profile_id: int, client: ClientDescriptor, last_for_client: bool) -> None:
"""The receiver_is_off_zulip logic used to determine whether a user
has no active client suffers from a somewhat fundamental race
condition. If the client is no longer on the Internet,
receiver_is_off_zulip will still return true for
IDLE_EVENT_QUEUE_TIMEOUT_SECS, until the queue is
garbage-collected. This would cause us to reliably miss
push/email notifying users for messages arriving during the
IDLE_EVENT_QUEUE_TIMEOUT_SECS after they suspend their laptop (for
example). We address this by, when the queue is garbage-collected
at the end of those 10 minutes, checking to see if it's the last
one, and if so, potentially triggering notifications to the user
at that time, resulting in at most a IDLE_EVENT_QUEUE_TIMEOUT_SECS
delay in the arrival of their notifications.
As Zulip's APIs get more popular and the mobile apps start using
long-lived event queues for perf optimization, future versions of
this will likely need to replace checking `last_for_client` with
something more complicated, so that we only consider clients like
web browsers, not the mobile apps or random API scripts.
"""
# Only process missedmessage hook when the last queue for a
# client has been garbage collected
if not last_for_client:
return
for event in client.event_queue.contents():
if event['type'] != 'message':
continue
assert 'flags' in event
flags = event['flags']
mentioned = 'mentioned' in flags and 'read' not in flags
private_message = event['message']['type'] == 'private'
# stream_push_notify is set in process_message_event.
stream_push_notify = event.get('stream_push_notify', False)
stream_name = None
if not private_message:
stream_name = event['message']['display_recipient']
# Since one is by definition idle, we don't need to check always_push_notify
always_push_notify = False
# Since we just GC'd the last event queue, the user is definitely idle.
idle = True
message_id = event['message']['id']
# Pass on the information on whether a push or email notification was already sent.
already_notified = dict(
push_notified = event.get("push_notified", False),
email_notified = event.get("email_notified", False),
)
maybe_enqueue_notifications(user_profile_id, message_id, private_message, mentioned,
stream_push_notify, stream_name, always_push_notify, idle,
already_notified)
def receiver_is_off_zulip(user_profile_id: int) -> bool:
# If a user has no message-receiving event queues, they've got no open zulip
# session so we notify them
all_client_descriptors = get_client_descriptors_for_user(user_profile_id)
message_event_queues = [client for client in all_client_descriptors if client.accepts_messages()]
off_zulip = len(message_event_queues) == 0
return off_zulip
def maybe_enqueue_notifications(user_profile_id, message_id, private_message,
mentioned, stream_push_notify, stream_name,
always_push_notify, idle, already_notified):
# type: (int, int, bool, bool, bool, Optional[str], bool, bool, Dict[str, bool]) -> Dict[str, bool]
"""This function has a complete unit test suite in
`test_enqueue_notifications` that should be expanded as we add
more features here."""
notified = dict() # type: Dict[str, bool]
if (idle or always_push_notify) and (private_message or mentioned or stream_push_notify):
notice = build_offline_notification(user_profile_id, message_id)
if private_message:
notice['trigger'] = 'private_message'
elif mentioned:
notice['trigger'] = 'mentioned'
elif stream_push_notify:
notice['trigger'] = 'stream_push_notify'
else:
raise AssertionError("Unknown notification trigger!")
notice['stream_name'] = stream_name
if not already_notified.get("push_notified"):
queue_json_publish("missedmessage_mobile_notifications", notice, lambda notice: None)
notified['push_notified'] = True
# Send missed_message emails if a private message or a
# mention. Eventually, we'll add settings to allow email
# notifications to match the model of push notifications
# above.
if idle and (private_message or mentioned):
# We require RabbitMQ to do this, as we can't call the email handler
# from the Tornado process. So if there's no rabbitmq support do nothing
if not already_notified.get("email_notified"):
queue_json_publish("missedmessage_emails", notice, lambda notice: None)
notified['email_notified'] = True
return notified
ClientInfo = TypedDict('ClientInfo', {
'client': ClientDescriptor,
'flags': Optional[Iterable[str]],
'is_sender': bool,
})
def get_client_info_for_message_event(event_template: Mapping[str, Any],
users: Iterable[Mapping[str, Any]]) -> Dict[str, ClientInfo]:
'''
Return client info for all the clients interested in a message.
This basically includes clients for users who are recipients
of the message, with some nuances for bots that auto-subscribe
to all streams, plus users who may be mentioned, etc.
'''
send_to_clients = {} # type: Dict[str, ClientInfo]
sender_queue_id = event_template.get('sender_queue_id', None) # type: Optional[str]
def is_sender_client(client: ClientDescriptor) -> bool:
return (sender_queue_id is not None) and client.event_queue.id == sender_queue_id
# If we're on a public stream, look for clients (typically belonging to
# bots) that are registered to get events for ALL streams.
if 'stream_name' in event_template and not event_template.get("invite_only"):
realm_id = event_template['realm_id']
for client in get_client_descriptors_for_realm_all_streams(realm_id):
send_to_clients[client.event_queue.id] = dict(
client=client,
flags=None,
is_sender=is_sender_client(client)
)
for user_data in users:
user_profile_id = user_data['id'] # type: int
flags = user_data.get('flags', []) # type: Iterable[str]
for client in get_client_descriptors_for_user(user_profile_id):
send_to_clients[client.event_queue.id] = dict(
client=client,
flags=flags,
is_sender=is_sender_client(client)
)
return send_to_clients
def process_message_event(event_template: Mapping[str, Any], users: Iterable[Mapping[str, Any]]) -> None:
send_to_clients = get_client_info_for_message_event(event_template, users)
presence_idle_user_ids = set(event_template.get('presence_idle_user_ids', []))
wide_dict = event_template['message_dict'] # type: Dict[str, Any]
sender_id = wide_dict['sender_id'] # type: int
message_id = wide_dict['id'] # type: int
message_type = wide_dict['type'] # type: str
sending_client = wide_dict['client'] # type: Text
@cachify
def get_client_payload(apply_markdown: bool, client_gravatar: bool) -> Dict[str, Any]:
dct = copy.deepcopy(wide_dict)
MessageDict.finalize_payload(dct, apply_markdown, client_gravatar)
return dct
# Extra user-specific data to include
extra_user_data = {} # type: Dict[int, Any]
for user_data in users:
user_profile_id = user_data['id'] # type: int
flags = user_data.get('flags', []) # type: Iterable[str]
# If the recipient was offline and the message was a single or group PM to them
# or they were @-notified potentially notify more immediately
private_message = message_type == "private" and user_profile_id != sender_id
mentioned = 'mentioned' in flags and 'read' not in flags
stream_push_notify = user_data.get('stream_push_notify', False)
# We first check if a message is potentially mentionable,
# since receiver_is_off_zulip is somewhat expensive.
if private_message or mentioned or stream_push_notify:
idle = receiver_is_off_zulip(user_profile_id) or (user_profile_id in presence_idle_user_ids)
always_push_notify = user_data.get('always_push_notify', False)
stream_name = event_template.get('stream_name')
result = maybe_enqueue_notifications(user_profile_id, message_id, private_message,
mentioned, stream_push_notify, stream_name,
always_push_notify, idle, {})
result['stream_push_notify'] = stream_push_notify
extra_user_data[user_profile_id] = result
for client_data in send_to_clients.values():
client = client_data['client']
flags = client_data['flags']
is_sender = client_data.get('is_sender', False) # type: bool
extra_data = extra_user_data.get(client.user_profile_id, None) # type: Optional[Mapping[str, bool]]
if not client.accepts_messages():
# The actual check is the accepts_event() check below;
# this line is just an optimization to avoid copying
# message data unnecessarily
continue
message_dict = get_client_payload(client.apply_markdown, client.client_gravatar)
# Make sure Zephyr mirroring bots know whether stream is invite-only
if "mirror" in client.client_type_name and event_template.get("invite_only"):
message_dict = message_dict.copy()
message_dict["invite_only_stream"] = True
user_event = dict(type='message', message=message_dict, flags=flags) # type: Dict[str, Any]
if extra_data is not None:
user_event.update(extra_data)
if is_sender:
local_message_id = event_template.get('local_id', None)
if local_message_id is not None:
user_event["local_message_id"] = local_message_id
if not client.accepts_event(user_event):
continue
# The below prevents (Zephyr) mirroring loops.
if ('mirror' in sending_client and
sending_client.lower() == client.client_type_name.lower()):
continue
client.add_event(user_event)
def process_event(event: Mapping[str, Any], users: Iterable[int]) -> None:
for user_profile_id in users:
for client in get_client_descriptors_for_user(user_profile_id):
if client.accepts_event(event):
client.add_event(dict(event))
def process_userdata_event(event_template: Mapping[str, Any], users: Iterable[Mapping[str, Any]]) -> None:
for user_data in users:
user_profile_id = user_data['id']
user_event = dict(event_template) # shallow copy, but deep enough for our needs
for key in user_data.keys():
if key != "id":
user_event[key] = user_data[key]
for client in get_client_descriptors_for_user(user_profile_id):
if client.accepts_event(user_event):
client.add_event(user_event)
def process_message_update_event(event_template: Mapping[str, Any],
users: Iterable[Mapping[str, Any]]) -> None:
prior_mention_user_ids = set(event_template.get('prior_mention_user_ids', []))
mention_user_ids = set(event_template.get('mention_user_ids', []))
presence_idle_user_ids = set(event_template.get('presence_idle_user_ids', []))
stream_push_user_ids = set(event_template.get('stream_push_user_ids', []))
push_notify_user_ids = set(event_template.get('push_notify_user_ids', []))
stream_name = event_template.get('stream_name')
message_id = event_template['message_id']
for user_data in users:
user_profile_id = user_data['id']
user_event = dict(event_template) # shallow copy, but deep enough for our needs
for key in user_data.keys():
if key != "id":
user_event[key] = user_data[key]
maybe_enqueue_notifications_for_message_update(
user_profile_id=user_profile_id,
message_id=message_id,
stream_name=stream_name,
prior_mention_user_ids=prior_mention_user_ids,
mention_user_ids=mention_user_ids,
presence_idle_user_ids=presence_idle_user_ids,
stream_push_user_ids=stream_push_user_ids,
push_notify_user_ids=push_notify_user_ids,
)
for client in get_client_descriptors_for_user(user_profile_id):
if client.accepts_event(user_event):
client.add_event(user_event)
def maybe_enqueue_notifications_for_message_update(user_profile_id,
message_id,
stream_name,
prior_mention_user_ids,
mention_user_ids,
presence_idle_user_ids,
stream_push_user_ids,
push_notify_user_ids):
# type: (UserProfile, int, Text, Set[int], Set[int], Set[int], Set[int], Set[int]) -> None
private_message = (stream_name is None)
if private_message:
# We don't do offline notifications for PMs, because
# we already notified the user of the original message
return
if (user_profile_id in prior_mention_user_ids):
# Don't spam people with duplicate mentions. This is
# especially important considering that most message
# edits are simple typo corrections.
return
stream_push_notify = (user_profile_id in stream_push_user_ids)
if stream_push_notify:
# Currently we assume that if this flag is set to True, then
# the user already was notified about the earlier message,
# so we short circuit. We may handle this more rigorously
# in the future by looking at something like an AlreadyNotified
# model.
return
# We can have newly mentioned people in an updated message.
mentioned = (user_profile_id in mention_user_ids)
always_push_notify = user_profile_id in push_notify_user_ids
idle = (user_profile_id in presence_idle_user_ids) or \
receiver_is_off_zulip(user_profile_id)
maybe_enqueue_notifications(
user_profile_id=user_profile_id,
message_id=message_id,
private_message=private_message,
mentioned=mentioned,
stream_push_notify=stream_push_notify,
stream_name=stream_name,
always_push_notify=always_push_notify,
idle=idle,
already_notified={},
)
def process_notification(notice: Mapping[str, Any]) -> None:
event = notice['event'] # type: Mapping[str, Any]
users = notice['users'] # type: Union[List[int], List[Mapping[str, Any]]]
start_time = time.time()
if event['type'] == "message":
process_message_event(event, cast(Iterable[Mapping[str, Any]], users))
elif event['type'] == "update_message":
process_message_update_event(event, cast(Iterable[Mapping[str, Any]], users))
elif event['type'] == "delete_message":
process_userdata_event(event, cast(Iterable[Mapping[str, Any]], users))
else:
process_event(event, cast(Iterable[int], users))
logging.debug("Tornado: Event %s for %s users took %sms" % (
event['type'], len(users), int(1000 * (time.time() - start_time))))
# Runs in the Django process to send a notification to Tornado.
#
# We use JSON rather than bare form parameters, so that we can represent
# different types and for compatibility with non-HTTP transports.
def send_notification_http(data: Mapping[str, Any]) -> None:
if settings.TORNADO_SERVER and not settings.RUNNING_INSIDE_TORNADO:
requests_client.post(settings.TORNADO_SERVER + '/notify_tornado', data=dict(
data = ujson.dumps(data),
secret = settings.SHARED_SECRET))
else:
process_notification(data)
def send_notification(data: Dict[str, Any]) -> None:
queue_json_publish("notify_tornado", data, send_notification_http)
def send_event(event: Mapping[str, Any],
users: Union[Iterable[int], Iterable[Mapping[str, Any]]]) -> None:
"""`users` is a list of user IDs, or in the case of `message` type
events, a list of dicts describing the users and metadata about
the user/message pair."""
queue_json_publish("notify_tornado",
dict(event=event, users=users),
send_notification_http)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Tests for the util.py module'''
import sys
import pytest
from omnisharp.exceptions import BadResponseError, ServerConnectionError
from omnisharp.util import (UtilCtx, find_free_port, formatPathForClient,
formatPathForServer, getResponse,
quickfixes_from_response)
@pytest.fixture(scope='module')
def ctx():
return UtilCtx()
def test_get_response_no_server(ctx):
'''Test that the getResponse throws when there is no server'''
port = find_free_port()
with pytest.raises(ServerConnectionError):
getResponse(ctx, "http://localhost:%d" % port)
def test_get_response_mocked_server(ctx, mocker):
'''Test that we can get a response the server'''
build_opener = mocker.patch('omnisharp.util.request.build_opener')
expected_response = 'Mocked response with UTF-8 BOM'
if sys.version_info >= (3, 0):
mocked_response = (
'\xef\xbb\xbf' + expected_response).encode('utf-8')
else:
mocked_response = '\xef\xbb\xbf' + expected_response
build_opener \
.return_value.open \
.return_value.read \
.return_value = mocked_response
response = getResponse(ctx, "http://my_endpoint")
assert expected_response == response
def test_get_json_response_mocked_server(ctx, mocker):
'''Test that we can get a response the server'''
build_opener = mocker.patch('omnisharp.util.request.build_opener')
expected_response = '{"foo": "bar"}'
if sys.version_info >= (3, 0):
mocked_response = (
'\xef\xbb\xbf' + expected_response).encode('utf-8')
else:
mocked_response = '\xef\xbb\xbf' + expected_response
build_opener \
.return_value.open \
.return_value.read \
.return_value = mocked_response
response = getResponse(ctx, "http://my_endpoint", json=True)
assert {'foo': 'bar'} == response
def test_get_bad_json_response(ctx, mocker):
'''Malformed json response throws BadResponseError'''
build_opener = mocker.patch('omnisharp.util.request.build_opener')
expected_response = '{"foo": "bar"'
if sys.version_info >= (3, 0):
mocked_response = (
'\xef\xbb\xbf' + expected_response).encode('utf-8')
else:
mocked_response = '\xef\xbb\xbf' + expected_response
build_opener \
.return_value.open \
.return_value.read \
.return_value = mocked_response
with pytest.raises(BadResponseError):
getResponse(ctx, "http://my_endpoint", json=True)
def test_format_no_translate(ctx):
ctx.translate_cygwin_wsl = False
path = '/foo/bar/baz'
assert formatPathForClient(ctx, path) == path
path = '/foo/bar/baz'
assert formatPathForServer(ctx, path) == path
def test_format_client_relative(ctx):
ctx.translate_cygwin_wsl = False
ctx.cwd = '/foo'
path = '/foo/bar/baz'
assert formatPathForClient(ctx, path) == 'bar/baz'
def test_translate_for_server(ctx):
ctx.translate_cygwin_wsl = True
ctx.is_msys = True
path = '/c/foo/bar'
assert formatPathForServer(ctx, path) == r'C:\foo\bar'
ctx.is_msys = False
ctx.is_cygwin = True
path = '/cygdrive/c/foo/bar'
assert formatPathForServer(ctx, path) == r'C:\foo\bar'
ctx.is_cygwin = False
ctx.is_wsl = True
path = '/mnt/c/foo/bar'
assert formatPathForServer(ctx, path) == r'C:\foo\bar'
def test_translate_for_client(ctx):
ctx.translate_cygwin_wsl = True
ctx.is_msys = True
path = r'C:\foo\bar'
assert formatPathForClient(ctx, path) == '/c/foo/bar'
ctx.is_msys = False
ctx.is_cygwin = True
assert formatPathForClient(ctx, path) == '/cygdrive/c/foo/bar'
ctx.is_cygwin = False
ctx.is_wsl = True
assert formatPathForClient(ctx, path) == '/mnt/c/foo/bar'
def test_quickfixes_from_response(ctx):
ctx.translate_cygwin_wsl = False
response = [
{
'FileName': 'foo.cs',
'Text': 'some text',
'Line': 5,
'Column': 8,
},
]
qf = quickfixes_from_response(ctx, response)
expected = [
{
'filename': 'foo.cs',
'text': 'some text',
'lnum': 5,
'col': 8,
'vcol': 0,
},
]
assert qf == expected
ctx.buffer_name = 'myfile.cs'
response = [
{
'Message': 'some text',
'Line': 5,
'Column': 8,
'LogLevel': 'Error',
},
]
qf = quickfixes_from_response(ctx, response)
expected = [
{
'filename': ctx.buffer_name,
'text': 'some text',
'lnum': 5,
'col': 8,
'vcol': 0,
'type': 'E',
},
]
assert qf == expected
response = [
{
'FileName': 'foo.cs',
'Text': 'some text',
'Line': 5,
'Column': 8,
'LogLevel': 'Hidden',
},
]
qf = quickfixes_from_response(ctx, response)
expected = [
{
'filename': 'foo.cs',
'text': 'some text',
'lnum': 5,
'col': 8,
'vcol': 0,
'type': 'W',
'subtype': 'Style',
},
]
assert qf == expected
|
|
# coding=utf-8
# Copyright 2022 The Edward2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for random_feature."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import edward2.jax as ed
import flax.linen as nn
import jax
import jax.numpy as jnp
import numpy as np
def exp_quadratic(x1, x2):
return jnp.exp(-jnp.sum((x1 - x2)**2) / 2.)
def linear(x1, x2):
return jnp.sum(x1 * x2)
def cov_map(xs, xs2=None, cov_func=None):
"""Compute a covariance matrix from a covariance function and data points.
Args:
xs: array of data points, stacked along the leading dimension.
xs2: second array of data points, stacked along the non-leading dimension.
cov_func: callable function, maps pairs of data points to scalars.
Returns:
A 2d array `a` such that `a[i, j] = cov_func(xs[i], xs[j])`.
"""
if xs2 is None:
return jax.vmap(lambda x: jax.vmap(lambda y: cov_func(x, y))(xs))(xs)
else:
return jax.vmap(lambda x: jax.vmap(lambda y: cov_func(x, y))(xs))(xs2).T
def _compute_posterior_kernel(x_tr, x_ts, ridge_penalty, kernel_func=None):
"""Computes the posterior covariance matrix of a Gaussian process."""
if kernel_func is None:
kernel_func = functools.partial(cov_map, cov_func=linear)
num_sample = x_tr.shape[0]
k_tt = kernel_func(x_tr)
k_tt_ridge = k_tt + ridge_penalty * jnp.eye(num_sample)
k_ts = kernel_func(x_tr, x_ts)
k_tt_inv_k_ts = jnp.linalg.solve(k_tt_ridge, k_ts)
k_ss = kernel_func(x_ts)
return k_ss - jnp.matmul(jnp.transpose(k_ts), k_tt_inv_k_ts)
def _generate_normal_data(num_sample, num_dim, loc=0., seed=None):
"""Generates random data sampled from i.i.d. normal distribution."""
np.random.seed(seed)
return np.random.normal(
size=(num_sample, num_dim), loc=loc, scale=1. / np.sqrt(num_dim))
class RandomFeatureGaussianProcessTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.seed = 0
self.ridge_penalty = 1.
self.num_data_dim = 4
self.num_test_sample = 16
self.num_train_sample = 2000
self.num_random_features = 1024
self.rbf_func = functools.partial(cov_map, cov_func=exp_quadratic)
self.x_train = _generate_normal_data(
self.num_train_sample, self.num_data_dim, seed=12)
self.x_test = _generate_normal_data(
self.num_test_sample, self.num_data_dim, seed=21)
# Uses classic RBF random feature distribution.
self.hidden_kwargs = dict(
kernel_init=nn.initializers.normal(stddev=1.), feature_scale=None)
self.rbf_approx_maximum_tol = 5e-3
self.rbf_approx_average_tol = 5e-4
self.primal_dual_maximum_diff = 1e-6
self.primal_dual_average_diff = 1e-7
def one_step_rfgp_result(self, train_data, test_data, **eval_kwargs):
"""Returns the RFGP result after one-step covariance update."""
rfgp = ed.nn.RandomFeatureGaussianProcess(
features=1,
hidden_features=self.num_random_features,
normalize_input=False,
hidden_kwargs=self.hidden_kwargs,
covmat_kwargs=dict(ridge_penalty=self.ridge_penalty))
# Computes posterior covariance on test data.
init_key = jax.random.PRNGKey(self.seed)
init_variables = rfgp.init(init_key, inputs=train_data)
state, params = init_variables.pop('params')
del init_variables
# Perform one-step update on training data.
unused_rfgp_logits_train, updated_state = rfgp.apply(
{
'params': params,
**state
},
inputs=train_data,
mutable=list(state.keys()))
del unused_rfgp_logits_train
# Returns the evaluate result on test data.
# Note we don't specify mutable collection during eval.
updated_variables = {'params': params, **updated_state}
return rfgp.apply(updated_variables, inputs=test_data, **eval_kwargs)
def test_rfgp_posterior_approximation_exact_rbf(self):
"""Tests if posterior covmat approximates that from a RBF model."""
# Evaluates on test data.
_, rfgp_covmat_test = self.one_step_rfgp_result(
self.x_train, self.x_test, return_full_covmat=True)
# Compares with exact RBF posterior covariance.
rbf_covmat_test = _compute_posterior_kernel(self.x_train, self.x_test,
self.ridge_penalty,
self.rbf_func)
covmat_maximum_diff = jnp.max(jnp.abs(rbf_covmat_test - rfgp_covmat_test))
covmat_average_diff = jnp.mean(jnp.abs(rbf_covmat_test - rfgp_covmat_test))
self.assertLess(covmat_maximum_diff, self.rbf_approx_maximum_tol)
self.assertLess(covmat_average_diff, self.rbf_approx_average_tol)
def test_rfgp_posterior_approximation_dual_form(self):
"""Tests if the primal-form posterior matches with the dual form."""
# Computes the covariance matrix using primal-form formula.
x_train = _generate_normal_data(128, self.num_data_dim)
x_test = _generate_normal_data(64, self.num_data_dim)
_, _, rfgp_features_train = self.one_step_rfgp_result(
train_data=x_train, test_data=x_train,
return_full_covmat=True, return_random_features=True)
_, rfgp_covmat_primal, rfgp_features_test = self.one_step_rfgp_result(
train_data=x_train, test_data=x_test,
return_full_covmat=True, return_random_features=True)
# Computing random feature posterior covariance using primal formula.
linear_kernel_func = functools.partial(cov_map, cov_func=linear)
rfgp_covmat_dual = _compute_posterior_kernel(
rfgp_features_train, rfgp_features_test,
ridge_penalty=self.ridge_penalty,
kernel_func=linear_kernel_func)
covmat_diff = jnp.abs(rfgp_covmat_dual - rfgp_covmat_primal)
covmat_maximum_diff = jnp.max(covmat_diff)
covmat_average_diff = jnp.mean(covmat_diff)
self.assertLess(covmat_maximum_diff, self.primal_dual_maximum_diff)
self.assertLess(covmat_average_diff, self.primal_dual_average_diff)
@parameterized.named_parameters(
('diag_covmat_no_rff', False, False),
('diag_covmat_with_rff', False, True),
('full_covmat_no_rff', True, False),
('full_covmat_with_rff', True, True),
)
def test_rfgp_output_shape(self, return_full_covmat, return_random_features):
"""Tests if the shape of output covmat and random features are correct."""
rfgp_results = self.one_step_rfgp_result(
train_data=self.x_train,
test_data=self.x_test,
return_full_covmat=return_full_covmat,
return_random_features=return_random_features)
expected_results_len = 2 + return_random_features
observed_covmat_shape = rfgp_results[1].shape
expected_covmat_shape = ((self.num_test_sample,) if not return_full_covmat
else (self.num_test_sample, self.num_test_sample))
self.assertLen(rfgp_results, expected_results_len)
self.assertEqual(observed_covmat_shape, expected_covmat_shape)
if return_random_features:
expected_feature_shape = (self.num_test_sample, self.num_random_features)
observed_feature_shape = rfgp_results[2].shape
self.assertEqual(expected_feature_shape, observed_feature_shape)
def test_rfgp_default_parameter_collections(self):
rfgp = ed.nn.RandomFeatureGaussianProcess(
features=1, hidden_features=self.num_random_features)
# Computes posterior covariance on test data.
init_key = jax.random.PRNGKey(self.seed)
init_variables = rfgp.init(init_key, inputs=self.x_train)
state, params = init_variables.pop('params')
del init_variables
# Note: the norm_layer should not show up in `param`
# since by default it does not have trainable parameters.
self.assertEqual(list(params.keys()), ['output_layer'])
self.assertEqual(
list(state.keys()), ['random_features', 'laplace_covariance'])
class RandomFeatureTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.seed = 0
self.collection_name = 'random_fourier_features'
self.num_data_dim = 128
self.num_train_sample = 512
self.num_random_features = 10240
self.rbf_kern_func = functools.partial(cov_map, cov_func=exp_quadratic)
self.x_train = _generate_normal_data(self.num_train_sample,
self.num_data_dim)
self.x_test = _generate_normal_data(self.num_train_sample,
self.num_data_dim)
# Uses classic RBF random feature distribution.
self.hidden_kwargs = dict(
kernel_init=nn.initializers.normal(stddev=1.), feature_scale=None)
self.kernel_approx_tolerance = dict(atol=5e-2, rtol=1e-2)
def test_random_feature_mutable_collection(self):
"""Tests if RFF variables are properly nested under a mutable collection."""
rng = jax.random.PRNGKey(self.seed)
rff_layer = ed.nn.RandomFourierFeatures(
features=self.num_random_features,
collection_name=self.collection_name,
**self.hidden_kwargs)
# Computes forward pass with mutable collection specified.
init_vars = rff_layer.init(rng, self.x_train)
_, mutable_vars = rff_layer.apply(
init_vars, self.x_train, mutable=[self.collection_name])
# Check if random feature variables are in the mutable variable collection.
rff_vars = mutable_vars[self.collection_name]
rff_kernel = rff_vars['kernel']
rff_bias = rff_vars['bias']
self.assertEqual(rff_kernel.shape,
(self.num_data_dim, self.num_random_features))
self.assertEqual(rff_bias.shape, (self.num_random_features,))
@parameterized.named_parameters(
('3d_input_tensor', (8, 12, 64)), # 3-Dimensional input
('4d_input_tensor', (8, 6, 16, 32)), # 4-Dimensional input
)
def test_random_feature_nd_input(self, input_shape):
rng = jax.random.PRNGKey(self.seed)
x = jnp.ones(input_shape)
rff_layer = ed.nn.RandomFourierFeatures(
features=self.num_random_features, **self.hidden_kwargs)
y, _ = rff_layer.init_with_output(rng, x)
expected_output_shape = input_shape[:-1] + (self.num_random_features,)
self.assertEqual(y.shape, expected_output_shape)
def test_random_feature_kernel_approximation(self):
"""Tests if default RFF layer approximates a RBF kernel matrix."""
rng = jax.random.PRNGKey(self.seed)
rff_layer = ed.nn.RandomFourierFeatures(
features=self.num_random_features,
collection_name=self.collection_name,
**self.hidden_kwargs)
# Extracts random features by computing forward pass.
init_vars = rff_layer.init(rng, self.x_train)
random_feature, _ = rff_layer.apply(
init_vars, self.x_train, mutable=[self.collection_name])
# Computes the approximated and the exact kernel matrices.
prior_kernel_computed = random_feature.dot(random_feature.T)
prior_kernel_expected = self.rbf_kern_func(self.x_train, self.x_train)
np.testing.assert_allclose(prior_kernel_computed, prior_kernel_expected,
**self.kernel_approx_tolerance)
class LaplaceRandomFeatureCovarianceTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.seed = 0
self.collection_name = 'laplace_covariance'
self.num_random_features = 1024
self.batch_size = 31
self.ridge_penalty = 0.32
self.kernel_approx_tolerance = dict(atol=5e-2, rtol=1e-2)
@parameterized.named_parameters(('gaussian_multi_class', 'gaussian', 42),
('binary_univariate', 'binary_logistic', 1),
('poisson_univariate', 'poisson', 1))
def test_laplace_covariance_shape(self, likelihood, logit_dim):
"""Tests if the shape of the covariance matrix is correct."""
rng = jax.random.PRNGKey(self.seed)
rff_key, logit_key, init_key = jax.random.split(rng, 3)
gp_features = jax.random.uniform(
rff_key, (self.batch_size, self.num_random_features))
gp_logits = jax.random.uniform(logit_key, (self.batch_size, logit_dim))
cov_layer = ed.nn.LaplaceRandomFeatureCovariance(
hidden_features=self.num_random_features,
likelihood=likelihood,
)
# Intialize and apply one update.
init_vars = cov_layer.init(init_key, gp_features, gp_logits)
cov_null, mutated_vars = cov_layer.apply(
init_vars, gp_features, gp_logits, mutable=[cov_layer.collection_name])
# Evaluate covariance.
cov_diag = cov_layer.apply(
mutated_vars, gp_features, gp_logits, diagonal_only=True)
cov_mat = cov_layer.apply(
mutated_vars, gp_features, gp_logits, diagonal_only=False)
# No covariance is returned during mutable update.
self.assertIsNone(cov_null)
# Shape of returned covariance depends on diagonal_only=True / False.
self.assertEqual(cov_diag.shape, (self.batch_size,))
self.assertEqual(cov_mat.shape, (self.batch_size, self.batch_size))
@parameterized.named_parameters(
('binary_multivariate_logit', 'binary_logistic', 3),
('binary_no_logit', 'binary_logistic', None),
('poisson_multivariate_logit', 'binary_logistic', 2),
('poisson_no_logit', 'poisson', None))
def test_laplace_covariance_likelhood_error(self, likelihood, logit_dim):
"""Tests if no-Gaussian model throw error for multivariate / null logits."""
rng = jax.random.PRNGKey(self.seed)
rff_key, logit_key, init_key = jax.random.split(rng, 3)
gp_features = jax.random.uniform(
rff_key, (self.batch_size, self.num_random_features))
gp_logits = jax.random.uniform(logit_key,
(self.batch_size,
logit_dim)) if logit_dim else None
cov_layer = ed.nn.LaplaceRandomFeatureCovariance(
hidden_features=self.num_random_features,
likelihood=likelihood,
)
init_vars = cov_layer.init(init_key, gp_features, gp_logits)
with self.assertRaises(ValueError):
_ = cov_layer.apply(
init_vars,
gp_features,
gp_logits,
mutable=[cov_layer.collection_name])
def test_laplace_covariance_gaussian_update(self):
"""Tests if orthogonal data leads to an identity covariance matrix."""
sample_size = self.num_random_features * 2
rng = jax.random.PRNGKey(self.seed)
rff_key, init_key = jax.random.split(rng, 2)
# Make orthogonal data using SVD.
gp_features = jax.random.uniform(
rff_key, (sample_size, self.num_random_features))
gp_features_ortho, _, _ = jnp.linalg.svd(gp_features, full_matrices=False)
cov_layer = ed.nn.LaplaceRandomFeatureCovariance(
hidden_features=self.num_random_features,
likelihood='gaussian',
ridge_penalty=self.ridge_penalty)
# Intialize and apply one update.
init_vars = cov_layer.init(init_key, gp_features_ortho)
_, mutated_vars = cov_layer.apply(
init_vars, gp_features_ortho, mutable=[cov_layer.collection_name])
# Check precision matrices after update.
# Under exact update and Gaussian likelihood, the precision matrix should be
# (tr(U) @ U + ridge * I) which equals to (1 + ridge) * I.
updated_mat_computed = mutated_vars[
cov_layer.collection_name]['precision_matrix']
updated_mat_expected = jnp.eye(
self.num_random_features) * (1. + self.ridge_penalty)
np.testing.assert_allclose(updated_mat_computed, updated_mat_expected,
rtol=1e-5, atol=1e-5)
@parameterized.named_parameters(('gaussian_multi_class', 'gaussian', 4),
('binary_univariate', 'binary_logistic', 1),
('poisson_univariate', 'poisson', 1))
def test_laplace_covariance_exact_update(self, likelihood, logit_dim):
"""Tests if exact update returns correct result."""
# Perform exact update by setting momentum to `None`.
momentum = None
rng = jax.random.PRNGKey(self.seed)
rff_key, logit_key, init_key = jax.random.split(rng, 3)
gp_features = jax.random.uniform(
rff_key, (self.batch_size, self.num_random_features))
gp_logits = jax.random.uniform(logit_key,
(self.batch_size,
logit_dim)) if logit_dim else None
cov_layer = ed.nn.LaplaceRandomFeatureCovariance(
hidden_features=self.num_random_features,
likelihood=likelihood,
ridge_penalty=self.ridge_penalty,
momentum=momentum)
# Intialize and apply one update.
init_vars = cov_layer.init(init_key, gp_features, gp_logits)
_, mutated_vars = cov_layer.apply(
init_vars, gp_features, gp_logits, mutable=[cov_layer.collection_name])
# Check precision matrices at initialization and after update.
init_mat_computed = init_vars[cov_layer.collection_name]['precision_matrix']
init_mat_expected = jnp.eye(self.num_random_features) * self.ridge_penalty
updated_mat_computed = mutated_vars[
cov_layer.collection_name]['precision_matrix']
updated_mat_expected = cov_layer.update_precision_matrix(
gp_features, gp_logits, 0.) + init_mat_expected
np.testing.assert_allclose(init_mat_computed, init_mat_expected)
np.testing.assert_allclose(updated_mat_computed, updated_mat_expected)
@parameterized.named_parameters(
('gaussian_multi_class_0', 'gaussian', 4, 0.),
('gaussian_multi_class_0.52', 'gaussian', 4, .52),
('gaussian_multi_class_1', 'gaussian', 4, 1.),
('binary_univariate_0', 'binary_logistic', 1, 0.),
('binary_univariate_0.18', 'binary_logistic', 1, .18),
('binary_univariate_1', 'binary_logistic', 1, 1.),
('poisson_univariate_0', 'poisson', 1, 0.),
('poisson_univariate_0.73', 'poisson', 1, .73),
('poisson_univariate_1', 'poisson', 1, 1.))
def test_laplace_covariance_momentum_update(self, likelihood, logit_dim,
momentum):
"""Tests if momentum update is correct."""
rng = jax.random.PRNGKey(self.seed)
rff_key, logit_key, init_key = jax.random.split(rng, 3)
gp_features = jax.random.uniform(
rff_key, (self.batch_size, self.num_random_features))
gp_logits = jax.random.uniform(logit_key,
(self.batch_size,
logit_dim)) if logit_dim else None
cov_layer = ed.nn.LaplaceRandomFeatureCovariance(
hidden_features=self.num_random_features,
likelihood=likelihood,
ridge_penalty=self.ridge_penalty,
momentum=momentum)
# Intialize and apply one update.
init_vars = cov_layer.init(init_key, gp_features, gp_logits)
_, mutated_vars = cov_layer.apply(
init_vars, gp_features, gp_logits, mutable=[cov_layer.collection_name])
# Check precision matrices at initialization and after update.
init_mat_computed = init_vars[cov_layer.collection_name]['precision_matrix']
init_mat_expected = jnp.eye(self.num_random_features) * self.ridge_penalty
updated_mat_computed = mutated_vars[
cov_layer.collection_name]['precision_matrix']
updated_mat_expected = cov_layer.update_precision_matrix(
gp_features, gp_logits, 0.) + momentum * init_mat_expected
np.testing.assert_allclose(init_mat_computed, init_mat_expected)
np.testing.assert_allclose(updated_mat_computed, updated_mat_expected)
@parameterized.named_parameters(('gaussian_multi_class', 'gaussian', 4),
('binary_univariate', 'binary_logistic', 1),
('poisson_univariate', 'poisson', 1))
def test_laplace_covariance_diagonal_covariance(self, likelihood, logit_dim):
"""Tests if computed predictive variance is the diagonal of covar matrix."""
rng = jax.random.PRNGKey(self.seed)
rff_key, logit_key, init_key = jax.random.split(rng, 3)
gp_features = jax.random.uniform(
rff_key, (self.batch_size, self.num_random_features))
gp_logits = jax.random.uniform(logit_key, (self.batch_size, logit_dim))
cov_layer = ed.nn.LaplaceRandomFeatureCovariance(
hidden_features=self.num_random_features,
likelihood=likelihood,
ridge_penalty=self.ridge_penalty)
# Intialize and apply one update.
init_vars = cov_layer.init(init_key, gp_features, gp_logits)
_, mutated_vars = cov_layer.apply(
init_vars, gp_features, gp_logits, mutable=[cov_layer.collection_name])
# Evaluate covariance.
cov_diag = cov_layer.apply(
mutated_vars, gp_features, gp_logits, diagonal_only=True)
cov_mat = cov_layer.apply(
mutated_vars, gp_features, gp_logits, diagonal_only=False)
np.testing.assert_allclose(
cov_diag, jnp.diag(cov_mat), rtol=1e-6, atol=1e-6)
if __name__ == '__main__':
absltest.main()
|
|
# -*- coding: utf-8 -*-
"""
sphinx.ext.autosummary
~~~~~~~~~~~~~~~~~~~~~~
Sphinx extension that adds an autosummary:: directive, which can be
used to generate function/method/attribute/etc. summary lists, similar
to those output eg. by Epydoc and other API doc generation tools.
An :autolink: role is also provided.
autosummary directive
---------------------
The autosummary directive has the form::
.. autosummary::
:nosignatures:
:toctree: generated/
module.function_1
module.function_2
...
and it generates an output table (containing signatures, optionally)
======================== =============================================
module.function_1(args) Summary line from the docstring of function_1
module.function_2(args) Summary line from the docstring
...
======================== =============================================
If the :toctree: option is specified, files matching the function names
are inserted to the toctree with the given prefix:
generated/module.function_1
generated/module.function_2
...
Note: The file names contain the module:: or currentmodule:: prefixes.
.. seealso:: autosummary_generate.py
autolink role
-------------
The autolink role functions as ``:obj:`` when the name referred can be
resolved to a Python object, and otherwise it becomes simple emphasis.
This can be used as the default role to make links 'smart'.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
import sys
import inspect
import posixpath
from six import string_types
from types import ModuleType
from six import text_type
from docutils.parsers.rst import directives
from docutils.statemachine import ViewList
from docutils import nodes
import sphinx
from sphinx import addnodes
from sphinx.util import import_object, rst
from sphinx.util.compat import Directive
from sphinx.pycode import ModuleAnalyzer, PycodeError
from sphinx.ext.autodoc import Options
# -- autosummary_toc node ------------------------------------------------------
class autosummary_toc(nodes.comment):
pass
def process_autosummary_toc(app, doctree):
"""Insert items described in autosummary:: to the TOC tree, but do
not generate the toctree:: list.
"""
env = app.builder.env
crawled = {}
def crawl_toc(node, depth=1):
crawled[node] = True
for j, subnode in enumerate(node):
try:
if (isinstance(subnode, autosummary_toc) and
isinstance(subnode[0], addnodes.toctree)):
env.note_toctree(env.docname, subnode[0])
continue
except IndexError:
continue
if not isinstance(subnode, nodes.section):
continue
if subnode not in crawled:
crawl_toc(subnode, depth + 1)
crawl_toc(doctree)
def autosummary_toc_visit_html(self, node):
"""Hide autosummary toctree list in HTML output."""
raise nodes.SkipNode
def autosummary_noop(self, node):
pass
# -- autosummary_table node ----------------------------------------------------
class autosummary_table(nodes.comment):
pass
def autosummary_table_visit_html(self, node):
"""Make the first column of the table non-breaking."""
try:
tbody = node[0][0][-1]
for row in tbody:
col1_entry = row[0]
par = col1_entry[0]
for j, subnode in enumerate(list(par)):
if isinstance(subnode, nodes.Text):
new_text = text_type(subnode.astext())
new_text = new_text.replace(u" ", u"\u00a0")
par[j] = nodes.Text(new_text)
except IndexError:
pass
# -- autodoc integration -------------------------------------------------------
class FakeDirective(object):
env = {}
genopt = Options()
def get_documenter(obj, parent):
"""Get an autodoc.Documenter class suitable for documenting the given
object.
*obj* is the Python object to be documented, and *parent* is an
another Python object (e.g. a module or a class) to which *obj*
belongs to.
"""
from sphinx.ext.autodoc import AutoDirective, DataDocumenter, \
ModuleDocumenter
if inspect.ismodule(obj):
# ModuleDocumenter.can_document_member always returns False
return ModuleDocumenter
# Construct a fake documenter for *parent*
if parent is not None:
parent_doc_cls = get_documenter(parent, None)
else:
parent_doc_cls = ModuleDocumenter
if hasattr(parent, '__name__'):
parent_doc = parent_doc_cls(FakeDirective(), parent.__name__)
else:
parent_doc = parent_doc_cls(FakeDirective(), "")
# Get the corrent documenter class for *obj*
classes = [cls for cls in AutoDirective._registry.values()
if cls.can_document_member(obj, '', False, parent_doc)]
if classes:
classes.sort(key=lambda cls: cls.priority)
return classes[-1]
else:
return DataDocumenter
# -- .. autosummary:: ----------------------------------------------------------
class Autosummary(Directive):
"""
Pretty table containing short signatures and summaries of functions etc.
autosummary can also optionally generate a hidden toctree:: node.
"""
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
has_content = True
option_spec = {
'toctree': directives.unchanged,
'nosignatures': directives.flag,
'template': directives.unchanged,
}
def warn(self, msg):
self.warnings.append(self.state.document.reporter.warning(
msg, line=self.lineno))
def run(self):
self.env = env = self.state.document.settings.env
self.genopt = Options()
self.warnings = []
self.result = ViewList()
names = [x.strip().split()[0] for x in self.content
if x.strip() and re.search(r'^[~a-zA-Z_]', x.strip()[0])]
items = self.get_items(names)
nodes = self.get_table(items)
if 'toctree' in self.options:
dirname = posixpath.dirname(env.docname)
tree_prefix = self.options['toctree'].strip()
docnames = []
for name, sig, summary, real_name in items:
docname = posixpath.join(tree_prefix, real_name)
docname = posixpath.normpath(posixpath.join(dirname, docname))
if docname not in env.found_docs:
self.warn('toctree references unknown document %r'
% docname)
docnames.append(docname)
tocnode = addnodes.toctree()
tocnode['includefiles'] = docnames
tocnode['entries'] = [(None, docn) for docn in docnames]
tocnode['maxdepth'] = -1
tocnode['glob'] = None
tocnode = autosummary_toc('', '', tocnode)
nodes.append(tocnode)
return self.warnings + nodes
def get_items(self, names):
"""Try to import the given names, and return a list of
``[(name, signature, summary_string, real_name), ...]``.
"""
env = self.state.document.settings.env
prefixes = get_import_prefixes_from_env(env)
items = []
max_item_chars = 50
for name in names:
display_name = name
if name.startswith('~'):
name = name[1:]
display_name = name.split('.')[-1]
try:
real_name, obj, parent, modname = import_by_name(name, prefixes=prefixes)
except ImportError:
self.warn('failed to import %s' % name)
items.append((name, '', '', name))
continue
self.result = ViewList() # initialize for each documenter
full_name = real_name
if not isinstance(obj, ModuleType):
# give explicitly separated module name, so that members
# of inner classes can be documented
full_name = modname + '::' + full_name[len(modname) + 1:]
# NB. using full_name here is important, since Documenters
# handle module prefixes slightly differently
documenter = get_documenter(obj, parent)(self, full_name)
if not documenter.parse_name():
self.warn('failed to parse name %s' % real_name)
items.append((display_name, '', '', real_name))
continue
if not documenter.import_object():
self.warn('failed to import object %s' % real_name)
items.append((display_name, '', '', real_name))
continue
if documenter.options.members and not documenter.check_module():
continue
# try to also get a source code analyzer for attribute docs
try:
documenter.analyzer = ModuleAnalyzer.for_module(
documenter.get_real_modname())
# parse right now, to get PycodeErrors on parsing (results will
# be cached anyway)
documenter.analyzer.find_attr_docs()
except PycodeError as err:
documenter.env.app.debug(
'[autodoc] module analyzer failed: %s', err)
# no source file -- e.g. for builtin and C modules
documenter.analyzer = None
# -- Grab the signature
sig = documenter.format_signature()
if not sig:
sig = ''
else:
max_chars = max(10, max_item_chars - len(display_name))
sig = mangle_signature(sig, max_chars=max_chars)
# -- Grab the summary
documenter.add_content(None)
doc = list(documenter.process_doc([self.result.data]))
while doc and not doc[0].strip():
doc.pop(0)
# If there's a blank line, then we can assume the first sentence /
# paragraph has ended, so anything after shouldn't be part of the
# summary
for i, piece in enumerate(doc):
if not piece.strip():
doc = doc[:i]
break
# Try to find the "first sentence", which may span multiple lines
m = re.search(r"^([A-Z].*?\.)(?:\s|$)", " ".join(doc).strip())
if m:
summary = m.group(1).strip()
elif doc:
summary = doc[0].strip()
else:
summary = ''
items.append((display_name, sig, summary, real_name))
return items
def get_table(self, items):
"""Generate a proper list of table nodes for autosummary:: directive.
*items* is a list produced by :meth:`get_items`.
"""
table_spec = addnodes.tabular_col_spec()
table_spec['spec'] = 'p{0.5\linewidth}p{0.5\linewidth}'
table = autosummary_table('')
real_table = nodes.table('', classes=['longtable'])
table.append(real_table)
group = nodes.tgroup('', cols=2)
real_table.append(group)
group.append(nodes.colspec('', colwidth=10))
group.append(nodes.colspec('', colwidth=90))
body = nodes.tbody('')
group.append(body)
def append_row(*column_texts):
row = nodes.row('')
for text in column_texts:
node = nodes.paragraph('')
vl = ViewList()
vl.append(text, '<autosummary>')
self.state.nested_parse(vl, 0, node)
try:
if isinstance(node[0], nodes.paragraph):
node = node[0]
except IndexError:
pass
row.append(nodes.entry('', node))
body.append(row)
for name, sig, summary, real_name in items:
qualifier = 'obj'
if 'nosignatures' not in self.options:
col1 = ':%s:`%s <%s>`\ %s' % (qualifier, name, real_name, rst.escape(sig))
else:
col1 = ':%s:`%s <%s>`' % (qualifier, name, real_name)
col2 = summary
append_row(col1, col2)
return [table_spec, table]
def mangle_signature(sig, max_chars=30):
"""Reformat a function signature to a more compact form."""
s = re.sub(r"^\((.*)\)$", r"\1", sig).strip()
# Strip strings (which can contain things that confuse the code below)
s = re.sub(r"\\\\", "", s)
s = re.sub(r"\\'", "", s)
s = re.sub(r"'[^']*'", "", s)
# Parse the signature to arguments + options
args = []
opts = []
opt_re = re.compile(r"^(.*, |)([a-zA-Z0-9_*]+)=")
while s:
m = opt_re.search(s)
if not m:
# The rest are arguments
args = s.split(', ')
break
opts.insert(0, m.group(2))
s = m.group(1)[:-2]
# Produce a more compact signature
sig = limited_join(", ", args, max_chars=max_chars - 2)
if opts:
if not sig:
sig = "[%s]" % limited_join(", ", opts, max_chars=max_chars - 4)
elif len(sig) < max_chars - 4 - 2 - 3:
sig += "[, %s]" % limited_join(", ", opts,
max_chars=max_chars - len(sig) - 4 - 2)
return u"(%s)" % sig
def limited_join(sep, items, max_chars=30, overflow_marker="..."):
"""Join a number of strings to one, limiting the length to *max_chars*.
If the string overflows this limit, replace the last fitting item by
*overflow_marker*.
Returns: joined_string
"""
full_str = sep.join(items)
if len(full_str) < max_chars:
return full_str
n_chars = 0
n_items = 0
for j, item in enumerate(items):
n_chars += len(item) + len(sep)
if n_chars < max_chars - len(overflow_marker):
n_items += 1
else:
break
return sep.join(list(items[:n_items]) + [overflow_marker])
# -- Importing items -----------------------------------------------------------
def get_import_prefixes_from_env(env):
"""
Obtain current Python import prefixes (for `import_by_name`)
from ``document.env``
"""
prefixes = [None]
currmodule = env.ref_context.get('py:module')
if currmodule:
prefixes.insert(0, currmodule)
currclass = env.ref_context.get('py:class')
if currclass:
if currmodule:
prefixes.insert(0, currmodule + "." + currclass)
else:
prefixes.insert(0, currclass)
return prefixes
def import_by_name(name, prefixes=[None]):
"""Import a Python object that has the given *name*, under one of the
*prefixes*. The first name that succeeds is used.
"""
tried = []
for prefix in prefixes:
try:
if prefix:
prefixed_name = '.'.join([prefix, name])
else:
prefixed_name = name
obj, parent, modname = _import_by_name(prefixed_name)
return prefixed_name, obj, parent, modname
except ImportError:
tried.append(prefixed_name)
raise ImportError('no module named %s' % ' or '.join(tried))
def _import_by_name(name):
"""Import a Python object given its full name."""
try:
name_parts = name.split('.')
# try first interpret `name` as MODNAME.OBJ
modname = '.'.join(name_parts[:-1])
if modname:
try:
__import__(modname)
mod = sys.modules[modname]
return getattr(mod, name_parts[-1]), mod, modname
except (ImportError, IndexError, AttributeError):
pass
# ... then as MODNAME, MODNAME.OBJ1, MODNAME.OBJ1.OBJ2, ...
last_j = 0
modname = None
for j in reversed(range(1, len(name_parts) + 1)):
last_j = j
modname = '.'.join(name_parts[:j])
try:
__import__(modname)
except ImportError:
continue
if modname in sys.modules:
break
if last_j < len(name_parts):
parent = None
obj = sys.modules[modname]
for obj_name in name_parts[last_j:]:
parent = obj
obj = getattr(obj, obj_name)
return obj, parent, modname
else:
return sys.modules[modname], None, modname
except (ValueError, ImportError, AttributeError, KeyError) as e:
raise ImportError(*e.args)
# -- :autolink: (smart default role) -------------------------------------------
def autolink_role(typ, rawtext, etext, lineno, inliner,
options={}, content=[]):
"""Smart linking role.
Expands to ':obj:`text`' if `text` is an object that can be imported;
otherwise expands to '*text*'.
"""
env = inliner.document.settings.env
r = env.get_domain('py').role('obj')(
'obj', rawtext, etext, lineno, inliner, options, content)
pnode = r[0][0]
prefixes = get_import_prefixes_from_env(env)
try:
name, obj, parent, modname = import_by_name(pnode['reftarget'], prefixes)
except ImportError:
content = pnode[0]
r[0][0] = nodes.emphasis(rawtext, content[0].astext(),
classes=content['classes'])
return r
def get_rst_suffix(app):
def get_supported_format(suffix):
parser_class = app.config.source_parsers.get(suffix)
if parser_class is None:
return ('restructuredtext',)
if isinstance(parser_class, string_types):
parser_class = import_object(parser_class, 'source parser')
return parser_class.supported
for suffix in app.config.source_suffix:
if 'restructuredtext' in get_supported_format(suffix):
return suffix
return None
def process_generate_options(app):
genfiles = app.config.autosummary_generate
if genfiles and not hasattr(genfiles, '__len__'):
env = app.builder.env
genfiles = [env.doc2path(x, base=None) for x in env.found_docs
if os.path.isfile(env.doc2path(x))]
if not genfiles:
return
from sphinx.ext.autosummary.generate import generate_autosummary_docs
ext = app.config.source_suffix
genfiles = [genfile + (not genfile.endswith(tuple(ext)) and ext[0] or '')
for genfile in genfiles]
suffix = get_rst_suffix(app)
if suffix is None:
app.warn('autosummary generats .rst files internally. '
'But your source_suffix does not contain .rst. Skipped.')
return
generate_autosummary_docs(genfiles, builder=app.builder,
warn=app.warn, info=app.info, suffix=suffix,
base_path=app.srcdir)
def setup(app):
# I need autodoc
app.setup_extension('sphinx.ext.autodoc')
app.add_node(autosummary_toc,
html=(autosummary_toc_visit_html, autosummary_noop),
latex=(autosummary_noop, autosummary_noop),
text=(autosummary_noop, autosummary_noop),
man=(autosummary_noop, autosummary_noop),
texinfo=(autosummary_noop, autosummary_noop))
app.add_node(autosummary_table,
html=(autosummary_table_visit_html, autosummary_noop),
latex=(autosummary_noop, autosummary_noop),
text=(autosummary_noop, autosummary_noop),
man=(autosummary_noop, autosummary_noop),
texinfo=(autosummary_noop, autosummary_noop))
app.add_directive('autosummary', Autosummary)
app.add_role('autolink', autolink_role)
app.connect('doctree-read', process_autosummary_toc)
app.connect('builder-inited', process_generate_options)
app.add_config_value('autosummary_generate', [], True, [bool])
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
|
|
#!/usr/bin/env python
from __future__ import unicode_literals
from __future__ import print_function
import dendrogenous as dg
import dendrogenous.core
import numpy as np
import unittest
try:
import unittest.mock as mock
except:
import mock
import os
import shutil
import sys
import pytest
import pickle
import time
from socket import gethostname
from Bio import SeqIO
from Bio import Phylo
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
from dendrogenous.test.base import BaseTestCase
class TestCoreInit(BaseTestCase):
"""
Unit tests for the core dendrogeous init and reform methods
"""
def setUp(self):
#self.test_class = dg.core.Dendrogenous(self.test_record,
# self.settings)
self.test_record = SeqRecord(\
Seq("MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEGGEEEEVAVF",
IUPAC.protein),
id="YP_025292_1", name="HokC",
description="toxic membrane protein, small")
dir_paths = {"run_data": "0.run_data",
"blast_hits": "1.blast_hits",
"blast_fail": os.path.join("1.blast_hits", "insufficient_hits"),
"alignment": "2.alignment",
"mask": "3.mask",
"mask_fail": os.path.join("3.mask", "insufficient_sites"),
"tree": "4.phylogeny",
"named": "5.name"}
for key in dir_paths.keys():
dir_paths[key] = os.path.join('testdir', dir_paths[key])
self.dir_paths = dir_paths
def init_core():
mock_settings = mock.Mock(dg.settings.Settings)
mock_settings.logger_name = "test"
test_class = dg.core.Dendrogenous(test_record,
mock_settings)
return test_class
def test_init_clean(self):
"""
Ensure class init works when provided a seqrecord and settings file
"""
settings = mock.Mock(dg.settings.Settings)
settings.dir_paths = self.dir_paths
settings.logger_name = "test"
test_class = dg.core.Dendrogenous(self.test_record,
settings)
expected_name = "YP_025292_1"
expected_seed = (">YP_025292_1\n"
"MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEGGEEEEVAVF\n")
self.assertEqual(test_class.seed, expected_seed)
self.assertEqual(test_class.seq_name, expected_name)
self.assertEqual(test_class.settings.dir_paths, self.dir_paths)
self.assertEqual(test_class.aligned_seqs, os.path.join(self.dir_paths['alignment'],
expected_name + ".afa"))
self.assertEqual(test_class.masked_seqs, os.path.join(self.dir_paths['mask'],
expected_name + ".mask"))
self.assertEqual(test_class.phylogeny, os.path.join(self.dir_paths['tree'],
expected_name + ".tre"))
self.assertEqual(test_class.named_phylogeny, os.path.join(self.dir_paths['named'],
expected_name + ".named_tre"))
def test_init_bad_seqrecord(self):
"""
Ensure ValueError is raised if class is instantiated without valid seqrecord
"""
settings = mock.Mock(dg.settings.Settings)
invalid_seq = ""
with self.assertRaises(ValueError):
dg.core.Dendrogenous(invalid_seq,
settings)
def test_init_bad_settings(self):
"""
Ensure ValueError is raised if class is instantiated without valid seqrecord
"""
invalid_settings = ""
with self.assertRaises(ValueError):
dg.core.Dendrogenous(self.test_record,
invalid_settings)
class TestCoreGetSeqs(BaseTestCase):
"""
Test the components of the get_seqs method and the _blast and _parse
functions it relies on
"""
def setUp(self):
#self.test_class = dg.core.Dendrogenous(self.test_record,
# self.settings)
self.test_record = SeqRecord(\
Seq("MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEGGEEEEVAVF",
IUPAC.protein),
id="YP_025292_1", name="HokC",
description="toxic membrane protein, small")
dir_paths = {"run_data": "0.run_data",
"blast_hits": "1.blast_hits",
"blast_fail": os.path.join("1.blast_hits", "insufficient_hits"),
"alignment": "2.alignment",
"mask": "3.mask",
"mask_fail": os.path.join("3.mask", "insufficient_sites"),
"tree": "4.phylogeny",
"named": "5.name"}
for key in dir_paths.keys():
dir_paths[key] = os.path.join('testdir', dir_paths[key])
self.dir_paths = dir_paths
def test__blast_runs(self):
"""
Make sure the __blast method correctly runs and returns decoded xml output
"""
mock_settings = mock.Mock(dg.settings.Settings)
mock_settings.dir_paths = self.dir_paths
mock_settings.binary_paths = {'blastp': os.path.join(self.binary_path, "blastp")}
mock_settings.blast_settings = {'num_seqs': 1,
'evalue': 1e-5}
mock_settings.logger_name = "test"
genome = os.path.join(self.test_resources, "Escherichia_coli_O157_H7_str._Sakai.fas")
binary_paths = os.path.join("dendrogenous", "dependencies")
test_class = dg.core.Dendrogenous(self.test_record,
mock_settings)
expected_output = self.parse_file(os.path.join(self.test_resources,
"expected_core_blastp_output.xml"))
blast_output = test_class._blast(genome)
self.assertEqual(blast_output.split(os.linesep), expected_output)
@pytest.mark.skipif("gethostname() != 'zorya'")
def test__parse_blast(self):
"""
Ensure correct parsing of hits locally when connected to server
"""
mock_settings = mock.Mock(dg.settings.Settings)
mock_settings.dir_paths = self.dir_paths
mock_settings.logger_name = "test"
with open(os.path.join(self.test_resources, 'secret.pickle'), 'rb') as secret:
mock_settings.dbconfig = pickle.load(secret)
test_class = dg.core.Dendrogenous(self.test_record,
mock_settings)
blastp_xml = self.parse_file(os.path.join(self.test_resources,
"expected_core_blastp_output.xml"))
blastp_output = "\n".join(blastp_xml)
parsed_blast = test_class._parse_blast(blastp_output)
expected_parsed_hit_id = '15829270'
expected_parsed_seq = Seq('MLNTC', IUPAC.IUPACProtein())
self.assertEqual(parsed_blast[0].id, expected_parsed_hit_id)
self.assertEqual(parsed_blast[0].seq[:5], expected_parsed_seq)
@pytest.mark.skipif("gethostname() != 'zorya'")
def test__parse_blast_broken(self):
"""
Ensure correct parsing of hits locally when connected to server
"""
mock_settings = mock.Mock(dg.settings.Settings)
mock_settings.dir_paths = self.dir_paths
mock_settings.logger_name = "test"
with open(os.path.join(self.test_resources, 'secret.pickle'), 'rb') as secret:
mock_settings.dbconfig = pickle.load(secret)
test_class = dg.core.Dendrogenous(self.test_record,
mock_settings)
blastp_xml = self.parse_file(os.path.join(self.test_resources,
"broken_blastp_output.xml"))
blastp_output = "\n".join(blastp_xml)
parsed_blast = test_class._parse_blast(blastp_output)
expected_output = []
self.assertEqual(parsed_blast, expected_output)
@pytest.mark.skipif("gethostname() != 'zorya'")
def test_get_seqs(self):
"""
Test dendrogenous.get_seqs() works correctly and writes a file of seqs
to the appropriate directory - integration test with _blast and _parse
"""
#configure all the dependencies in a mock settings object
mock_settings = mock.Mock(dg.settings.Settings)
mock_settings.dir_paths = self.dir_paths
mock_settings.binary_paths = {'blastp': os.path.join(self.binary_path, "blastp")}
mock_settings.minimums = {'min_seqs': 3}
mock_settings.blast_settings = {'num_seqs': 2,
'evalue': 5}
mock_settings.genomes= [os.path.join(self.test_resources, "Escherichia_coli_O157_H7_str._Sakai.fas"),
os.path.join(self.test_resources, "Escherichia_coli_IAI39.fas"),
os.path.join(self.test_resources, "Nanoarchaeum_equitans_Kin4-M.fas")]
mock_settings.logger_name = "test"
# make output dir that is normally done by runner
os.mkdir('testdir')
os.mkdir(mock_settings.dir_paths['blast_hits'])
# load db settings from secret pickle file
with open(os.path.join(self.test_resources, 'secret.pickle'), 'rb') as secret:
mock_settings.dbconfig = pickle.load(secret)
test_class = dg.core.Dendrogenous(self.test_record,
mock_settings)
test_class.get_seqs()
expected_output_file = os.path.join(self.dir_paths['blast_hits'], 'YP_025292_1.fas')
self.assertTrue(os.path.exists(expected_output_file))
with open(expected_output_file, 'r') as out_fh:
seqs = list(SeqIO.parse(out_fh, 'fasta'))
print(seqs)
self.assertEqual(len(seqs), 6)
@pytest.mark.skipif("gethostname() != 'zorya'")
def test_get_seqs_fails_correctly(self):
"""
Test dendrogenous.get_seqs() works correctly and writes a file of seqs
to the appropriate directory - integration test with _blast and _parse
Test it correctly identifies too few hits and moves file to insufficient hits dir
"""
#configure all the dependencies in a mock settings object
mock_settings = mock.Mock(dg.settings.Settings)
mock_settings.dir_paths = self.dir_paths
mock_settings.binary_paths = {'blastp': os.path.join(self.binary_path, "blastp")}
mock_settings.minimums = {'min_seqs': 10}
mock_settings.blast_settings = {'num_seqs': 2,
'evalue': 5}
mock_settings.genomes= [os.path.join(self.test_resources, "Escherichia_coli_O157_H7_str._Sakai.fas"),
os.path.join(self.test_resources, "Escherichia_coli_IAI39.fas"),
os.path.join(self.test_resources, "Nanoarchaeum_equitans_Kin4-M.fas")]
mock_settings.logger_name = "test"
# make output dir that is normally done by runner
os.mkdir('testdir')
os.mkdir(mock_settings.dir_paths['blast_hits'])
os.mkdir(mock_settings.dir_paths['blast_fail'])
# load db settings from secret pickle file
with open(os.path.join(self.test_resources, 'secret.pickle'), 'rb') as secret:
mock_settings.dbconfig = pickle.load(secret)
test_class = dg.core.Dendrogenous(self.test_record,
mock_settings)
with self.assertRaises(dg.utils.GetSeqFail):
test_class.get_seqs()
expected_output_file = os.path.join(self.dir_paths['blast_fail'], 'YP_025292_1.insufficient_hits')
self.assertTrue(os.path.exists(expected_output_file))
with open(expected_output_file, 'r') as out_fh:
seqs = list(SeqIO.parse(out_fh, 'fasta'))
print(seqs)
self.assertEqual(len(seqs), 6)
def tearDown(self):
if os.path.exists('testdir'):
shutil.rmtree('testdir')
class TestPhylogenyPipe(BaseTestCase):
"""
Test remaining functions used in dg core class phylogeny pipe
"""
def setUp(self):
self.test_record = SeqRecord(\
Seq("MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEGGEEEEVAVF",
IUPAC.protein),
id="test")
self.test_dir = 'testdir2'
dir_paths = {"run_data": "0.run_data",
"blast_hits": "1.blast_hits",
"blast_fail": os.path.join("1.blast_hits", "insufficient_hits"),
"alignment": "2.alignment",
"mask": "3.mask",
"mask_fail": os.path.join("3.mask", "insufficient_sites"),
"tree": "4.phylogeny",
"named": "5.name"}
for key in dir_paths.keys():
dir_paths[key] = os.path.join(self.test_dir, dir_paths[key])
self.dir_paths = dir_paths
os.mkdir(self.test_dir)
def test_align(self):
"""
Check align runs correctly
"""
mock_settings = mock.Mock(dg.settings.Settings)
mock_settings.dir_paths = self.dir_paths
mock_settings.binary_paths = {'kalign': os.path.join(self.binary_path, "kalign")}
mock_settings.logger_name = "test"
os.mkdir(self.dir_paths['blast_hits'])
os.mkdir(self.dir_paths['alignment'])
shutil.copy(os.path.join(self.test_resources, 'test.fas'), self.dir_paths['blast_hits'])
expected_file = os.path.join(self.dir_paths['alignment'], 'test.afa')
test_class = dg.core.Dendrogenous(self.test_record,
mock_settings)
with mock.patch.object(test_class, 'get_seqs', return_value=None) as mock_method:
test_class.align()
self.assertEqual(self.file_hash(expected_file),
self.file_hash(os.path.join(self.test_resources, 'test_alignment.afa')))
self.assertFalse(mock_method.called)
def test_align_calls_seqs_if_seqs_missing(self):
"""
Check align runs called get_seqs if seqs file is missing
"""
mock_settings = mock.Mock(dg.settings.Settings)
mock_settings.dir_paths = self.dir_paths
mock_settings.binary_paths = {'kalign': os.path.join(self.binary_path, "kalign")}
mock_settings.logger_name = "test"
os.mkdir(self.dir_paths['blast_hits'])
os.mkdir(self.dir_paths['alignment'])
test_class = dg.core.Dendrogenous(self.test_record,
mock_settings)
# patching out get_seqs so this doesn't run and the output check function as this will
# also fail due to get_seqs not actually running and thus outputting anything
with mock.patch.object(test_class, 'get_seqs', return_value=None) as mock_method:
with mock.patch.object(test_class, '_check_output', return_value=None) as mock_check:
test_class.align()
self.assertTrue(mock_method.called)
self.assertTrue(mock_check.called)
def test_mask_normal(self):
"""
Check mask runs correctly when alignment file exists and mask has enough sites
"""
mock_settings = mock.Mock(dg.settings.Settings)
mock_settings.dir_paths = self.dir_paths
mock_settings.minimums = {'min_sites': 29}
mock_settings.binary_paths = {'trimal': os.path.join(self.binary_path, "trimal")}
mock_settings.logger_name = "test"
os.mkdir(self.dir_paths['alignment'])
os.mkdir(self.dir_paths['mask'])
shutil.copy(os.path.join(self.test_resources, 'test_alignment.afa'),
os.path.join(self.dir_paths['alignment'], 'test.afa'))
expected_file = os.path.join(self.dir_paths['mask'], 'test.mask')
test_class = dg.core.Dendrogenous(self.test_record,
mock_settings)
with mock.patch.object(test_class, 'align', return_value=None) as mock_method:
test_class.mask()
self.assertEqual(self.file_hash(expected_file),
self.file_hash(os.path.join(self.test_resources, 'test_mask.mask')))
self.assertFalse(mock_method.called)
def test_mask_needs_automated_mask(self):
"""
Check mask correctly reruns trimal with automated if nogaps produces too small a mask
"""
mock_settings = mock.Mock(dg.settings.Settings)
mock_settings.dir_paths = self.dir_paths
mock_settings.minimums = {'min_sites': 40}
mock_settings.binary_paths = {'trimal': os.path.join(self.binary_path, "trimal")}
mock_settings.logger_name = "test"
os.mkdir(self.dir_paths['alignment'])
os.mkdir(self.dir_paths['mask'])
shutil.copy(os.path.join(self.test_resources, 'test_alignment_auto_mask.afa'),
os.path.join(self.dir_paths['alignment'], 'test.afa'))
test_class = dg.core.Dendrogenous(self.test_record,
mock_settings)
expected_file = os.path.join(self.dir_paths['mask'], 'test.mask')
with mock.patch.object(test_class, 'align', return_value=None) as mock_method:
test_class.mask()
self.assertTrue(os.path.exists(expected_file))
self.assertEqual(self.file_hash(expected_file),
self.file_hash(os.path.join(self.test_resources, 'test_mask_automated.mask')))
self.assertFalse(mock_method.called)
def test_mask_fails_correctly(self):
"""
Ensure mask fails correctly if automated and nogaps masking
still results in too short a file
"""
mock_settings = mock.Mock(dg.settings.Settings)
mock_settings.dir_paths = self.dir_paths
mock_settings.logger_name = "test"
mock_settings.minimums = {'min_sites': 100}
mock_settings.binary_paths = {'trimal': os.path.join(self.binary_path, "trimal")}
os.mkdir(self.dir_paths['alignment'])
os.mkdir(self.dir_paths['mask'])
os.mkdir(self.dir_paths['mask_fail'])
shutil.copy(os.path.join(self.test_resources, 'test_alignment.afa'),
os.path.join(self.dir_paths['alignment'], 'test.afa'))
test_class = dg.core.Dendrogenous(self.test_record,
mock_settings)
with self.assertRaises(dg.utils.MaskFail):
test_class.mask()
not_expected_file = os.path.join(self.dir_paths['mask'], 'test.mask')
expected_file = os.path.join(self.dir_paths['mask_fail'], 'test.mask_too_short')
self.assertFalse(os.path.exists(not_expected_file))
self.assertTrue(os.path.exists(expected_file))
def test_mask_calls_align_if_alignment_missing(self):
"""
Ensure dg.mask() calls dg.align() if it can't find the alignment file
"""
mock_settings = mock.Mock(dg.settings.Settings)
mock_settings.dir_paths = self.dir_paths
mock_settings.minimums = {'min_sites': 29}
mock_settings.binary_paths = {'trimal': os.path.join(self.binary_path, "trimal")}
mock_settings.logger_name = "test"
os.mkdir(self.dir_paths['alignment'])
os.mkdir(self.dir_paths['mask'])
expected_file = os.path.join(self.dir_paths['mask'], 'test.mask')
test_class = dg.core.Dendrogenous(self.test_record,
mock_settings)
# mock align but mask will fail and align is not actually aligning thus
# the try/except hack
with mock.patch.object(test_class, 'align', return_value=None) as mock_method:
try:
test_class.mask()
except:
pass
self.assertTrue(mock_method.called)
# not sure how to mock masked_seqs to return a given value
#def test__mask_check(self):
# """
# ensure mask check returns correct sequence length for first seq in masked
# fasta
# """
# mock_settings = mock.Mock(dg.settings.Settings)
# mock_settings.dir_paths = self.dir_paths
# test_class = dg.core.Dendrogenous(self.test_record,
# mock_settings)
# test_file = os.path.join(self.test_resources, 'test_mask.mask')
# with mock.patch.object(test_class, 'masked_seqs', new_callable=mock.PropertyMock) as mock_attr:
# mock_attr.return_value = test_file
# mask_size = test_class._mask_check()
# self.assertEqual(mask_size, 52)
def test_phylogeny_normal(self):
"""
Ensure phylogeny code correctly runs and generates of phylogeny in the right place
"""
mock_settings = mock.Mock(dg.settings.Settings)
mock_settings.dir_paths = self.dir_paths
mock_settings.binary_paths = {'FastTree': os.path.join(self.binary_path, "FastTree")}
mock_settings.logger_name = "test"
os.mkdir(self.dir_paths['mask'])
os.mkdir(self.dir_paths['tree'])
shutil.copy(os.path.join(self.test_resources, 'test_mask.mask'),
os.path.join(self.dir_paths['mask'], 'test.mask'))
expected_file = os.path.join(self.dir_paths['tree'], 'test.tre')
test_class = dg.core.Dendrogenous(self.test_record,
mock_settings)
with mock.patch.object(test_class, 'mask', return_value=None) as mock_method:
test_class.estimate_phylogeny()
self.assertEqual(self.file_hash(expected_file),
self.file_hash(os.path.join(self.test_resources, 'test_tree.tre')))
self.assertFalse(mock_method.called)
def test_phylogeny_runs_mask_if_mask_is_missing(self):
"""
Ensure phylogeny code correctly calls self.mask() if phylogeny is missing
"""
mock_settings = mock.Mock(dg.settings.Settings)
mock_settings.dir_paths = self.dir_paths
mock_settings.binary_paths = {'FastTree': os.path.join(self.binary_path, "FastTree")}
mock_settings.logger_name = "test"
os.mkdir(self.dir_paths['mask'])
os.mkdir(self.dir_paths['tree'])
test_class = dg.core.Dendrogenous(self.test_record,
mock_settings)
with mock.patch.object(test_class, 'mask', return_value=None) as mock_method:
with mock.patch.object(test_class, '_check_output', return_value=None) as mock_check:
test_class.estimate_phylogeny()
self.assertTrue(mock_method.called)
self.assertTrue(mock_check.called)
@pytest.mark.skipif("gethostname() != 'zorya'")
def test_phylogeny_rename(self):
"""
Ensure phylogeny rename works as expected
"""
mock_settings = mock.Mock(dg.settings.Settings)
mock_settings.dir_paths = self.dir_paths
mock_settings.logger_name = "test"
with open(os.path.join(self.test_resources, 'secret.pickle'), 'rb') as secret:
mock_settings.dbconfig = pickle.load(secret)
os.mkdir(self.dir_paths['tree'])
os.mkdir(self.dir_paths['named'])
shutil.copy(os.path.join(self.test_resources, 'name_test.tre'),
os.path.join(self.dir_paths['tree'], 'test.tre'))
test_class = dg.core.Dendrogenous(self.test_record,
mock_settings)
expected_file = os.path.join(self.dir_paths['named'], 'test.named_tre')
with mock.patch.object(test_class, 'estimate_phylogeny', return_value=None) as mock_method:
test_class.name_phylogeny()
self.assertFalse(mock_method.called)
self.assertTrue(os.path.exists(expected_file))
output_tree = Phylo.read(expected_file, 'newick')
# this parser sucks it only returns the last whitespace separated label
terminal_labels = [leaf.name for leaf in output_tree.get_terminals()]
self.assertEqual(len(terminal_labels), 10)
self.assertIn("TAXA", terminal_labels)
self.assertIn("SEQUENCE", terminal_labels)
def tearDown(self):
if os.path.exists(self.test_dir):
shutil.rmtree(self.test_dir)
class TestBuildTraining(BaseTestCase):
def test_function(self):
training_dir = os.path.join(self.test_resources, "training")
settings = {"class_defs": {"host": ["Alveolata",
"Paramecium",
"Tetrahymena",
"Oxytricha"],
"endosymbiont": ["Arabidopsis",
"Chlamydomonas",
"Ostreococcus",
"Micromonas",
"Chlorella",
"Physcomitrella"],
"food": ["Bacteria",
"Bacillus",
"Escherichia",
"Salmonella",
"Chlamydophila",
"Chlorobium",
"Deinococcus",
"Caulobacter"],
"unknown":["Saccharomyces",
"Neurospora",
"Homo",
"Mus",
"Dictyostelium",
"Toxoplasma",
"Guillardia",
"Bigelowiella",
"Emiliania",
"Aureococcus",
"Ectocarpus",
"Schizosaccharomyces",
"Amycolatopsis",
"Aquifex",
"Sulfolobus",
"Nanoarchaeum",
"Haloferax",
"Methanococcus",
"Cenarchaeum"]},
"class_locs": {"host": os.path.join(training_dir, "host"),
"endosymbiont": os.path.join(training_dir, "endosymbiont"),
"food": os.path.join(training_dir, "food"),
"unknown": os.path.join(training_dir, "unknown")}}
label_def = settings['class_defs']
label_loc = settings['class_locs']
a = dg.core.BuildTraining(label_def, label_loc)
X, y, encoded_labels = a.build_training()
self.assertEqual(X.shape, (46,4))
self.assertEqual(y.shape, (46,1))
self.assertEqual(encoded_labels, {"endosymbiont":0, "food":1, "host":2, "unknown":3})
class TestBuildTest(BaseTestCase):
def test_functional(self):
training_dir = os.path.join(self.test_resources, "training")
settings = {"class_defs": {"host": ["Alveolata",
"Paramecium",
"Tetrahymena",
"Oxytricha"],
"endosymbiont": ["Arabidopsis",
"Chlamydomonas",
"Ostreococcus",
"Micromonas",
"Chlorella",
"Physcomitrella"],
"food": ["Bacteria",
"Bacillus",
"Escherichia",
"Salmonella",
"Chlamydophila",
"Chlorobium",
"Deinococcus",
"Caulobacter"],
"unknown":["Saccharomyces",
"Neurospora",
"Homo",
"Mus",
"Dictyostelium",
"Toxoplasma",
"Guillardia",
"Bigelowiella",
"Emiliania",
"Aureococcus",
"Ectocarpus",
"Schizosaccharomyces",
"Amycolatopsis",
"Aquifex",
"Sulfolobus",
"Nanoarchaeum",
"Haloferax",
"Methanococcus",
"Cenarchaeum"]},
"test_dir": os.path.join(training_dir, "host")}
label_def = settings['class_defs']
test_dir = settings['test_dir']
a = dg.core.BuildTest(label_def, test_dir)
X = a.build_test()
self.assertEqual(X.shape, (16,4))
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2012 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_utils import uuidutils
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc
from sqlalchemy.orm import scoped_session
from neutron.api.v2 import attributes
from neutron.callbacks import events
from neutron.callbacks import exceptions
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.common import constants
from neutron.common import utils
from neutron.db import api as db_api
from neutron.db import db_base_plugin_v2
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import securitygroup as ext_sg
LOG = logging.getLogger(__name__)
IP_PROTOCOL_MAP = {constants.PROTO_NAME_TCP: constants.PROTO_NUM_TCP,
constants.PROTO_NAME_UDP: constants.PROTO_NUM_UDP,
constants.PROTO_NAME_ICMP: constants.PROTO_NUM_ICMP,
constants.PROTO_NAME_ICMP_V6: constants.PROTO_NUM_ICMP_V6}
class SecurityGroup(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 neutron security group."""
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
class DefaultSecurityGroup(model_base.BASEV2):
__tablename__ = 'default_security_group'
tenant_id = sa.Column(sa.String(255), primary_key=True, nullable=False)
security_group_id = sa.Column(sa.String(36),
sa.ForeignKey("securitygroups.id",
ondelete="CASCADE"),
nullable=False)
security_group = orm.relationship(
SecurityGroup, lazy='joined',
backref=orm.backref('default_security_group', cascade='all,delete'),
primaryjoin="SecurityGroup.id==DefaultSecurityGroup.security_group_id",
)
class SecurityGroupPortBinding(model_base.BASEV2):
"""Represents binding between neutron ports and security profiles."""
port_id = sa.Column(sa.String(36),
sa.ForeignKey("ports.id",
ondelete='CASCADE'),
primary_key=True)
security_group_id = sa.Column(sa.String(36),
sa.ForeignKey("securitygroups.id"),
primary_key=True)
# Add a relationship to the Port model in order to instruct SQLAlchemy to
# eagerly load security group bindings
ports = orm.relationship(
models_v2.Port,
backref=orm.backref("security_groups",
lazy='joined', cascade='delete'))
class SecurityGroupRule(model_base.BASEV2, models_v2.HasId,
models_v2.HasTenant):
"""Represents a v2 neutron security group rule."""
security_group_id = sa.Column(sa.String(36),
sa.ForeignKey("securitygroups.id",
ondelete="CASCADE"),
nullable=False)
remote_group_id = sa.Column(sa.String(36),
sa.ForeignKey("securitygroups.id",
ondelete="CASCADE"),
nullable=True)
direction = sa.Column(sa.Enum('ingress', 'egress',
name='securitygrouprules_direction'))
ethertype = sa.Column(sa.String(40))
protocol = sa.Column(sa.String(40))
port_range_min = sa.Column(sa.Integer)
port_range_max = sa.Column(sa.Integer)
remote_ip_prefix = sa.Column(sa.String(255))
security_group = orm.relationship(
SecurityGroup,
backref=orm.backref('rules', cascade='all,delete', lazy='joined'),
primaryjoin="SecurityGroup.id==SecurityGroupRule.security_group_id")
source_group = orm.relationship(
SecurityGroup,
backref=orm.backref('source_rules', cascade='all,delete'),
primaryjoin="SecurityGroup.id==SecurityGroupRule.remote_group_id")
class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase):
"""Mixin class to add security group to db_base_plugin_v2."""
__native_bulk_support = True
def create_security_group_bulk(self, context, security_group_rule):
return self._create_bulk('security_group', context,
security_group_rule)
def create_security_group(self, context, security_group, default_sg=False):
"""Create security group.
If default_sg is true that means we are a default security group for
a given tenant if it does not exist.
"""
s = security_group['security_group']
kwargs = {
'context': context,
'security_group': s,
'is_default': default_sg,
}
# NOTE(armax): a callback exception here will prevent the request
# from being processed. This is a hook point for backend's validation;
# we raise to propagate the reason for the failure.
try:
registry.notify(
resources.SECURITY_GROUP, events.BEFORE_CREATE, self,
**kwargs)
except exceptions.CallbackFailure as e:
raise ext_sg.SecurityGroupConflict(reason=e)
tenant_id = self._get_tenant_id_for_create(context, s)
if not default_sg:
self._ensure_default_security_group(context, tenant_id)
with db_api.autonested_transaction(context.session):
security_group_db = SecurityGroup(id=s.get('id') or (
uuidutils.generate_uuid()),
description=s['description'],
tenant_id=tenant_id,
name=s['name'])
context.session.add(security_group_db)
if default_sg:
context.session.add(DefaultSecurityGroup(
security_group=security_group_db,
tenant_id=security_group_db['tenant_id']))
for ethertype in ext_sg.sg_supported_ethertypes:
if default_sg:
# Allow intercommunication
ingress_rule = SecurityGroupRule(
id=uuidutils.generate_uuid(), tenant_id=tenant_id,
security_group=security_group_db,
direction='ingress',
ethertype=ethertype,
source_group=security_group_db)
context.session.add(ingress_rule)
egress_rule = SecurityGroupRule(
id=uuidutils.generate_uuid(), tenant_id=tenant_id,
security_group=security_group_db,
direction='egress',
ethertype=ethertype)
context.session.add(egress_rule)
secgroup_dict = self._make_security_group_dict(security_group_db)
kwargs['security_group'] = secgroup_dict
registry.notify(resources.SECURITY_GROUP, events.AFTER_CREATE, self,
**kwargs)
return secgroup_dict
def get_security_groups(self, context, filters=None, fields=None,
sorts=None, limit=None,
marker=None, page_reverse=False, default_sg=False):
# If default_sg is True do not call _ensure_default_security_group()
# so this can be done recursively. Context.tenant_id is checked
# because all the unit tests do not explicitly set the context on
# GETS. TODO(arosen) context handling can probably be improved here.
if not default_sg and context.tenant_id:
tenant_id = filters.get('tenant_id')
if tenant_id:
tenant_id = tenant_id[0]
else:
tenant_id = context.tenant_id
self._ensure_default_security_group(context, tenant_id)
marker_obj = self._get_marker_obj(context, 'security_group', limit,
marker)
return self._get_collection(context,
SecurityGroup,
self._make_security_group_dict,
filters=filters, fields=fields,
sorts=sorts,
limit=limit, marker_obj=marker_obj,
page_reverse=page_reverse)
def get_security_groups_count(self, context, filters=None):
return self._get_collection_count(context, SecurityGroup,
filters=filters)
def get_security_group(self, context, id, fields=None, tenant_id=None):
"""Tenant id is given to handle the case when creating a security
group rule on behalf of another use.
"""
if tenant_id:
tmp_context_tenant_id = context.tenant_id
context.tenant_id = tenant_id
try:
with context.session.begin(subtransactions=True):
ret = self._make_security_group_dict(self._get_security_group(
context, id), fields)
ret['security_group_rules'] = self.get_security_group_rules(
context, {'security_group_id': [id]})
finally:
if tenant_id:
context.tenant_id = tmp_context_tenant_id
return ret
def _get_security_group(self, context, id):
try:
query = self._model_query(context, SecurityGroup)
sg = query.filter(SecurityGroup.id == id).one()
except exc.NoResultFound:
raise ext_sg.SecurityGroupNotFound(id=id)
return sg
def delete_security_group(self, context, id):
filters = {'security_group_id': [id]}
ports = self._get_port_security_group_bindings(context, filters)
if ports:
raise ext_sg.SecurityGroupInUse(id=id)
# confirm security group exists
sg = self._get_security_group(context, id)
if sg['name'] == 'default' and not context.is_admin:
raise ext_sg.SecurityGroupCannotRemoveDefault()
kwargs = {
'context': context,
'security_group_id': id,
'security_group': sg,
}
# NOTE(armax): a callback exception here will prevent the request
# from being processed. This is a hook point for backend's validation;
# we raise to propagate the reason for the failure.
try:
registry.notify(
resources.SECURITY_GROUP, events.BEFORE_DELETE, self,
**kwargs)
except exceptions.CallbackFailure as e:
reason = _('cannot be deleted due to %s') % e
raise ext_sg.SecurityGroupInUse(id=id, reason=reason)
with context.session.begin(subtransactions=True):
context.session.delete(sg)
kwargs.pop('security_group')
registry.notify(resources.SECURITY_GROUP, events.AFTER_DELETE, self,
**kwargs)
def update_security_group(self, context, id, security_group):
s = security_group['security_group']
kwargs = {
'context': context,
'security_group_id': id,
'security_group': s,
}
# NOTE(armax): a callback exception here will prevent the request
# from being processed. This is a hook point for backend's validation;
# we raise to propagate the reason for the failure.
try:
registry.notify(
resources.SECURITY_GROUP, events.BEFORE_UPDATE, self,
**kwargs)
except exceptions.CallbackFailure as e:
raise ext_sg.SecurityGroupConflict(reason=e)
with context.session.begin(subtransactions=True):
sg = self._get_security_group(context, id)
if sg['name'] == 'default' and 'name' in s:
raise ext_sg.SecurityGroupCannotUpdateDefault()
sg.update(s)
sg_dict = self._make_security_group_dict(sg)
kwargs['security_group'] = sg_dict
registry.notify(resources.SECURITY_GROUP, events.AFTER_UPDATE, self,
**kwargs)
return sg_dict
def _make_security_group_dict(self, security_group, fields=None):
res = {'id': security_group['id'],
'name': security_group['name'],
'tenant_id': security_group['tenant_id'],
'description': security_group['description']}
res['security_group_rules'] = [self._make_security_group_rule_dict(r)
for r in security_group.rules]
return self._fields(res, fields)
def _make_security_group_binding_dict(self, security_group, fields=None):
res = {'port_id': security_group['port_id'],
'security_group_id': security_group['security_group_id']}
return self._fields(res, fields)
def _create_port_security_group_binding(self, context, port_id,
security_group_id):
with context.session.begin(subtransactions=True):
db = SecurityGroupPortBinding(port_id=port_id,
security_group_id=security_group_id)
context.session.add(db)
def _get_port_security_group_bindings(self, context,
filters=None, fields=None):
return self._get_collection(context,
SecurityGroupPortBinding,
self._make_security_group_binding_dict,
filters=filters, fields=fields)
def _delete_port_security_group_bindings(self, context, port_id):
query = self._model_query(context, SecurityGroupPortBinding)
bindings = query.filter(
SecurityGroupPortBinding.port_id == port_id)
with context.session.begin(subtransactions=True):
for binding in bindings:
context.session.delete(binding)
def create_security_group_rule_bulk(self, context, security_group_rules):
return self._create_bulk('security_group_rule', context,
security_group_rules)
def create_security_group_rule_bulk_native(self, context,
security_group_rules):
rules = security_group_rules['security_group_rules']
scoped_session(context.session)
security_group_id = self._validate_security_group_rules(
context, security_group_rules)
with context.session.begin(subtransactions=True):
if not self.get_security_group(context, security_group_id):
raise ext_sg.SecurityGroupNotFound(id=security_group_id)
self._check_for_duplicate_rules(context, rules)
ret = []
for rule_dict in rules:
res_rule_dict = self._create_security_group_rule(
context, rule_dict, validate=False)
ret.append(res_rule_dict)
return ret
def create_security_group_rule(self, context, security_group_rule):
return self._create_security_group_rule(context, security_group_rule)
def _create_security_group_rule(self, context, security_group_rule,
validate=True):
if validate:
self._validate_security_group_rule(context, security_group_rule)
self._check_for_duplicate_rules_in_db(context, security_group_rule)
rule_dict = security_group_rule['security_group_rule']
kwargs = {
'context': context,
'security_group_rule': rule_dict
}
# NOTE(armax): a callback exception here will prevent the request
# from being processed. This is a hook point for backend's validation;
# we raise to propagate the reason for the failure.
try:
registry.notify(
resources.SECURITY_GROUP_RULE, events.BEFORE_CREATE, self,
**kwargs)
except exceptions.CallbackFailure as e:
raise ext_sg.SecurityGroupConflict(reason=e)
tenant_id = self._get_tenant_id_for_create(context, rule_dict)
with context.session.begin(subtransactions=True):
db = SecurityGroupRule(
id=(rule_dict.get('id') or uuidutils.generate_uuid()),
tenant_id=tenant_id,
security_group_id=rule_dict['security_group_id'],
direction=rule_dict['direction'],
remote_group_id=rule_dict.get('remote_group_id'),
ethertype=rule_dict['ethertype'],
protocol=rule_dict['protocol'],
port_range_min=rule_dict['port_range_min'],
port_range_max=rule_dict['port_range_max'],
remote_ip_prefix=rule_dict.get('remote_ip_prefix'))
context.session.add(db)
res_rule_dict = self._make_security_group_rule_dict(db)
kwargs['security_group_rule'] = res_rule_dict
registry.notify(
resources.SECURITY_GROUP_RULE, events.AFTER_CREATE, self,
**kwargs)
return res_rule_dict
def _get_ip_proto_number(self, protocol):
if protocol is None:
return
# According to bug 1381379, protocol is always set to string to avoid
# problems with comparing int and string in PostgreSQL. Here this
# string is converted to int to give an opportunity to use it as
# before.
return int(IP_PROTOCOL_MAP.get(protocol, protocol))
def _validate_port_range(self, rule):
"""Check that port_range is valid."""
if (rule['port_range_min'] is None and
rule['port_range_max'] is None):
return
if not rule['protocol']:
raise ext_sg.SecurityGroupProtocolRequiredWithPorts()
ip_proto = self._get_ip_proto_number(rule['protocol'])
if ip_proto in [constants.PROTO_NUM_TCP, constants.PROTO_NUM_UDP]:
if (rule['port_range_min'] is not None and
rule['port_range_max'] is not None and
rule['port_range_min'] <= rule['port_range_max']):
pass
else:
raise ext_sg.SecurityGroupInvalidPortRange()
elif ip_proto == constants.PROTO_NUM_ICMP:
for attr, field in [('port_range_min', 'type'),
('port_range_max', 'code')]:
if rule[attr] is not None and rule[attr] > 255:
raise ext_sg.SecurityGroupInvalidIcmpValue(
field=field, attr=attr, value=rule[attr])
if (rule['port_range_min'] is None and
rule['port_range_max'] is not None):
raise ext_sg.SecurityGroupMissingIcmpType(
value=rule['port_range_max'])
def _validate_single_tenant_and_group(self, security_group_rules):
"""Check that all rules belong to the same security group and tenant
"""
sg_groups = set()
tenants = set()
for rule_dict in security_group_rules['security_group_rules']:
rule = rule_dict['security_group_rule']
sg_groups.add(rule['security_group_id'])
if len(sg_groups) > 1:
raise ext_sg.SecurityGroupNotSingleGroupRules()
tenants.add(rule['tenant_id'])
if len(tenants) > 1:
raise ext_sg.SecurityGroupRulesNotSingleTenant()
return sg_groups.pop()
def _validate_security_group_rule(self, context, security_group_rule):
rule = security_group_rule['security_group_rule']
self._validate_port_range(rule)
self._validate_ip_prefix(rule)
if rule['remote_ip_prefix'] and rule['remote_group_id']:
raise ext_sg.SecurityGroupRemoteGroupAndRemoteIpPrefix()
remote_group_id = rule['remote_group_id']
# Check that remote_group_id exists for tenant
if remote_group_id:
self.get_security_group(context, remote_group_id,
tenant_id=rule['tenant_id'])
security_group_id = rule['security_group_id']
# Confirm that the tenant has permission
# to add rules to this security group.
self.get_security_group(context, security_group_id,
tenant_id=rule['tenant_id'])
return security_group_id
def _validate_security_group_rules(self, context, security_group_rules):
sg_id = self._validate_single_tenant_and_group(security_group_rules)
for rule in security_group_rules['security_group_rules']:
self._validate_security_group_rule(context, rule)
return sg_id
def _make_security_group_rule_dict(self, security_group_rule, fields=None):
res = {'id': security_group_rule['id'],
'tenant_id': security_group_rule['tenant_id'],
'security_group_id': security_group_rule['security_group_id'],
'ethertype': security_group_rule['ethertype'],
'direction': security_group_rule['direction'],
'protocol': security_group_rule['protocol'],
'port_range_min': security_group_rule['port_range_min'],
'port_range_max': security_group_rule['port_range_max'],
'remote_ip_prefix': security_group_rule['remote_ip_prefix'],
'remote_group_id': security_group_rule['remote_group_id']}
return self._fields(res, fields)
def _make_security_group_rule_filter_dict(self, security_group_rule):
sgr = security_group_rule['security_group_rule']
res = {'tenant_id': [sgr['tenant_id']],
'security_group_id': [sgr['security_group_id']],
'direction': [sgr['direction']]}
include_if_present = ['protocol', 'port_range_max', 'port_range_min',
'ethertype', 'remote_ip_prefix',
'remote_group_id']
for key in include_if_present:
value = sgr.get(key)
if value:
res[key] = [value]
return res
def _check_for_duplicate_rules(self, context, security_group_rules):
for i in security_group_rules:
found_self = False
for j in security_group_rules:
if i['security_group_rule'] == j['security_group_rule']:
if found_self:
raise ext_sg.DuplicateSecurityGroupRuleInPost(rule=i)
found_self = True
self._check_for_duplicate_rules_in_db(context, i)
def _check_for_duplicate_rules_in_db(self, context, security_group_rule):
# Check in database if rule exists
filters = self._make_security_group_rule_filter_dict(
security_group_rule)
db_rules = self.get_security_group_rules(context, filters)
# Note(arosen): the call to get_security_group_rules wildcards
# values in the filter that have a value of [None]. For
# example, filters = {'remote_group_id': [None]} will return
# all security group rules regardless of their value of
# remote_group_id. Therefore it is not possible to do this
# query unless the behavior of _get_collection()
# is changed which cannot be because other methods are already
# relying on this behavior. Therefore, we do the filtering
# below to check for these corner cases.
for db_rule in db_rules:
# need to remove id from db_rule for matching
id = db_rule.pop('id')
if (security_group_rule['security_group_rule'] == db_rule):
raise ext_sg.SecurityGroupRuleExists(id=id)
def _validate_ip_prefix(self, rule):
"""Check that a valid cidr was specified as remote_ip_prefix
No need to check that it is in fact an IP address as this is already
validated by attribute validators.
Check that rule ethertype is consistent with remote_ip_prefix ip type.
Add mask to ip_prefix if absent (192.168.1.10 -> 192.168.1.10/32).
"""
input_prefix = rule['remote_ip_prefix']
if input_prefix:
addr = netaddr.IPNetwork(input_prefix)
# set input_prefix to always include the netmask:
rule['remote_ip_prefix'] = str(addr)
# check consistency of ethertype with addr version
if rule['ethertype'] != "IPv%d" % (addr.version):
raise ext_sg.SecurityGroupRuleParameterConflict(
ethertype=rule['ethertype'], cidr=input_prefix)
def get_security_group_rules(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'security_group_rule',
limit, marker)
return self._get_collection(context,
SecurityGroupRule,
self._make_security_group_rule_dict,
filters=filters, fields=fields,
sorts=sorts,
limit=limit, marker_obj=marker_obj,
page_reverse=page_reverse)
def get_security_group_rules_count(self, context, filters=None):
return self._get_collection_count(context, SecurityGroupRule,
filters=filters)
def get_security_group_rule(self, context, id, fields=None):
security_group_rule = self._get_security_group_rule(context, id)
return self._make_security_group_rule_dict(security_group_rule, fields)
def _get_security_group_rule(self, context, id):
try:
query = self._model_query(context, SecurityGroupRule)
sgr = query.filter(SecurityGroupRule.id == id).one()
except exc.NoResultFound:
raise ext_sg.SecurityGroupRuleNotFound(id=id)
return sgr
def delete_security_group_rule(self, context, id):
kwargs = {
'context': context,
'security_group_rule_id': id
}
# NOTE(armax): a callback exception here will prevent the request
# from being processed. This is a hook point for backend's validation;
# we raise to propagate the reason for the failure.
try:
registry.notify(
resources.SECURITY_GROUP_RULE, events.BEFORE_DELETE, self,
**kwargs)
except exceptions.CallbackFailure as e:
reason = _('cannot be deleted due to %s') % e
raise ext_sg.SecurityGroupRuleInUse(id=id, reason=reason)
with context.session.begin(subtransactions=True):
query = self._model_query(context, SecurityGroupRule)
if query.filter(SecurityGroupRule.id == id).delete() == 0:
raise ext_sg.SecurityGroupRuleNotFound(id=id)
registry.notify(
resources.SECURITY_GROUP_RULE, events.AFTER_DELETE, self,
**kwargs)
def _extend_port_dict_security_group(self, port_res, port_db):
# Security group bindings will be retrieved from the sqlalchemy
# model. As they're loaded eagerly with ports because of the
# joined load they will not cause an extra query.
security_group_ids = [sec_group_mapping['security_group_id'] for
sec_group_mapping in port_db.security_groups]
port_res[ext_sg.SECURITYGROUPS] = security_group_ids
return port_res
# Register dict extend functions for ports
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.PORTS, ['_extend_port_dict_security_group'])
def _process_port_create_security_group(self, context, port,
security_group_ids):
if attributes.is_attr_set(security_group_ids):
for security_group_id in security_group_ids:
self._create_port_security_group_binding(context, port['id'],
security_group_id)
# Convert to list as a set might be passed here and
# this has to be serialized
port[ext_sg.SECURITYGROUPS] = (security_group_ids and
list(security_group_ids) or [])
def _ensure_default_security_group(self, context, tenant_id):
"""Create a default security group if one doesn't exist.
:returns: the default security group id for given tenant.
"""
# Make no more than two attempts
for attempts in (1, 2):
try:
query = self._model_query(context, DefaultSecurityGroup)
default_group = query.filter_by(tenant_id=tenant_id).one()
return default_group['security_group_id']
except exc.NoResultFound as ex:
if attempts > 1:
# the second iteration means that attempt to add default
# group failed with duplicate error. Since we're still
# not seeing this group we're most probably inside a
# transaction with REPEATABLE READ isolation level ->
# need to restart the whole transaction
raise db_exc.RetryRequest(ex)
security_group = {
'security_group':
{'name': 'default',
'tenant_id': tenant_id,
'description': _('Default security group')}
}
try:
security_group = self.create_security_group(
context, security_group, default_sg=True)
return security_group['id']
except db_exc.DBDuplicateEntry as ex:
# default security group was created concurrently
LOG.debug("Duplicate default security group %s was "
"not created", ex.value)
def _get_security_groups_on_port(self, context, port):
"""Check that all security groups on port belong to tenant.
:returns: all security groups IDs on port belonging to tenant.
"""
p = port['port']
if not attributes.is_attr_set(p.get(ext_sg.SECURITYGROUPS)):
return
if p.get('device_owner') and p['device_owner'].startswith('network:'):
return
port_sg = p.get(ext_sg.SECURITYGROUPS, [])
filters = {'id': port_sg}
tenant_id = p.get('tenant_id')
if tenant_id:
filters['tenant_id'] = [tenant_id]
valid_groups = set(g['id'] for g in
self.get_security_groups(context, fields=['id'],
filters=filters))
requested_groups = set(port_sg)
port_sg_missing = requested_groups - valid_groups
if port_sg_missing:
raise ext_sg.SecurityGroupNotFound(id=', '.join(port_sg_missing))
return requested_groups
def _ensure_default_security_group_on_port(self, context, port):
# we don't apply security groups for dhcp, router
if (port['port'].get('device_owner') and
port['port']['device_owner'].startswith('network:')):
return
tenant_id = self._get_tenant_id_for_create(context,
port['port'])
default_sg = self._ensure_default_security_group(context, tenant_id)
if not attributes.is_attr_set(port['port'].get(ext_sg.SECURITYGROUPS)):
port['port'][ext_sg.SECURITYGROUPS] = [default_sg]
def _check_update_deletes_security_groups(self, port):
"""Return True if port has as a security group and it's value
is either [] or not is_attr_set, otherwise return False
"""
if (ext_sg.SECURITYGROUPS in port['port'] and
not (attributes.is_attr_set(port['port'][ext_sg.SECURITYGROUPS])
and port['port'][ext_sg.SECURITYGROUPS] != [])):
return True
return False
def _check_update_has_security_groups(self, port):
"""Return True if port has security_groups attribute set and
its not empty, or False otherwise.
This method is called both for port create and port update.
"""
if (ext_sg.SECURITYGROUPS in port['port'] and
(attributes.is_attr_set(port['port'][ext_sg.SECURITYGROUPS]) and
port['port'][ext_sg.SECURITYGROUPS] != [])):
return True
return False
def update_security_group_on_port(self, context, id, port,
original_port, updated_port):
"""Update security groups on port.
This method returns a flag which indicates request notification
is required and does not perform notification itself.
It is because another changes for the port may require notification.
"""
need_notify = False
port_updates = port['port']
if (ext_sg.SECURITYGROUPS in port_updates and
not utils.compare_elements(
original_port.get(ext_sg.SECURITYGROUPS),
port_updates[ext_sg.SECURITYGROUPS])):
# delete the port binding and read it with the new rules
port_updates[ext_sg.SECURITYGROUPS] = (
self._get_security_groups_on_port(context, port))
self._delete_port_security_group_bindings(context, id)
self._process_port_create_security_group(
context,
updated_port,
port_updates[ext_sg.SECURITYGROUPS])
need_notify = True
else:
updated_port[ext_sg.SECURITYGROUPS] = (
original_port[ext_sg.SECURITYGROUPS])
return need_notify
|
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
""" Estimating subunits for entire population of a cell type using Jitter data.
Extending "almost convolutional" model for high resolution stimulus.
"""
from datetime import datetime
import time
import sys
import os.path
import collections
import tensorflow as tf
from absl import app
from absl import flags
from absl import gfile
from tensorflow.contrib.slim.model_deploy import DeploymentConfig, deploy
from tensorflow.python.profiler.model_analyzer import PrintModelAnalysis
import cPickle as pickle
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pylab
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
from scipy import ndimage
import retina.response_model.python.l1_projection_tf as l1_projection_tf
import retina.response_model.python.population_subunits.jitter.distributed.jitter_model as jitter_model
import retina.response_model.python.population_subunits.jitter.distributed.jitter_model_2 as jitter_model_2
import retina.response_model.python.population_subunits.jitter.distributed.get_data_mat_fast as get_data_mat
import random
FLAGS = flags.FLAGS
## Flags for data locations
flags.DEFINE_string('folder_name', 'experiment_jitter',
'folder where to store all the data')
flags.DEFINE_string('save_location',
'/home/bhaishahster/distributed5/',
'where to store logs and outputs?');
flags.DEFINE_string('data_location',
'/home/retina_data/Google datasets/2016-04-21-1/data006(2016-04-21-1_data006_data006)/',
'where to take data from?')
# flags for stochastic learning
flags.DEFINE_integer('batchsz', 240*4, 'batch size for training')
flags.DEFINE_integer('n_chunks',1793, 'number of data chunks') # should be 216
flags.DEFINE_integer('num_chunks_to_load', 2*6,
'number of chunks to load for 1 batch of data')
flags.DEFINE_integer('train_len', 216 - 21, 'how much training length to use?')
flags.DEFINE_float('step_sz', 20, 'step size for learning algorithm')
flags.DEFINE_integer('max_steps', 400000, 'maximum number of steps')
## Random number generators initialized
# removes unneccessary data variabilities while comparing algorithms
flags.DEFINE_integer('np_randseed', 23, 'numpy RNG seed')
flags.DEFINE_integer('randseed', 65, 'python RNG seed')
## Flags for model/loss specification
flags.DEFINE_string('model_id', 'relu_window_mother_sfm', 'which model to fit')
## list of models here, and quick explanation
flags.DEFINE_string('loss', 'poisson', 'which loss to use?')
# poisson, (conditional poisson - TODO), logistic or hinge
## Model specific terms
# useful for convolution-like models
flags.DEFINE_string('architecture','complex',
'the architecture of model to be learnt')
# options : 1 layer, complex (stimulus put to lower dimensions),
# 2 layer_delta (two layered architecture of delta weights)
# stimulus downsampling options - if architecture = '2 layer_stimulus',
# then downsample stimulus with these windows and strides.
flags.DEFINE_integer('stim_downsample_window', 4,
'How to down sample the stimulus')
flags.DEFINE_integer('stim_downsample_stride',4,
'stride to use to downsample stimulus')
# low resolution stimulus will now be maxpooled.
flags.DEFINE_integer('window_maxpool', 2,
'window for maxpooling of downsampled stimulus')
flags.DEFINE_integer('stride_maxpool', 2,
'stride for maxpooling of downsampled stimulus')
# weight windows on stimulus for subunits
flags.DEFINE_integer('window', 16,
'size of window for each subunit in relu_window model')
flags.DEFINE_integer('stride', 16,
'stride for relu_window')
flags.DEFINE_integer('su_channels', 3,
'number of color channels each subunit should take input from')
# some models need regularization of parameters
flags.DEFINE_float('lam_w', 0.000, 'sparsitiy regularization of w')
flags.DEFINE_float('lam_a', 0.000, 'sparsitiy regularization of a')
flags.DEFINE_float('rad_a', 1000000000000000000, 'L1 norm radius for constraint on a')
# how to parametrize a - should we use softmax version of a or not
flags.DEFINE_boolean('if_a_sfm', True,
'Should we use softmax of a as subunit to cell weights?')
# How to combine LL across different cells?
flags.DEFINE_boolean('if_weighted_LL', False,
'If the poisson log-likelihood should be weighted by different cells firing rate')
## Dataset specific
flags.DEFINE_float('n_cells',1, 'number of cells in the dataset')
## Distributed TF specific flags
flags.DEFINE_string("master", "local",
"""BNS name of the TensorFlow master to use.""")
flags.DEFINE_integer("task", 0,
"""Task id of the replica running the training.""")
flags.DEFINE_integer("ps_tasks", 0,
"""Number of tasks in the ps job.
If 0 no ps job is used.""")
#flags.DEFINE_integer("is_eval", 0, """If this is eval worker""")
# specs for multi-gpu training
tf.app.flags.DEFINE_string('config_params', '',
"""Deployment config params.""")
# parameters used for synchronous updating of gradients from multiple workers
flags.DEFINE_boolean("sync_replicas", False,
"Use the sync_replicas (synchronized replicas) mode, "
"wherein the parameter updates from workers are aggregated "
"before applied to avoid stale gradients")
flags.DEFINE_integer("replicas_to_aggregate", None,
"Number of replicas to aggregate before parameter update"
"is applied (For sync_replicas mode only; default: "
"num_workers)")
## Learn or analyze a model?
flags.DEFINE_integer("learn",1,"""If we learn the model or analyse it""")
FLAGS = flags.FLAGS
def main(argv):
RunComputation()
def get_filename():
# add parameters in filename specific to the architecture
if FLAGS.architecture == '2 layer_stimulus':
architecture_string = ('_architecture=' + str(FLAGS.architecture) +
'_stim_downsample_window=' +
str(FLAGS.stim_downsample_window) +
'_stim_downsample_stride=' +
str(FLAGS.stim_downsample_stride))
elif FLAGS.architecture == 'complex':
architecture_string = ('_architecture=' + str(FLAGS.architecture) +
'_stim_downsample_window=' +
str(FLAGS.stim_downsample_window) +
'_stim_downsample_stride=' +
str(FLAGS.stim_downsample_stride) +
'_window_mp=' + str(FLAGS.window_maxpool) +
'_stride_mp=' + str(FLAGS.stride_maxpool))
else:
architecture_string = ('_architecture=' + str(FLAGS.architecture))
short_filename = ('model=' + str(FLAGS.model_id) + '_loss='+
str(FLAGS.loss) + '_batch_sz='+ str(FLAGS.batchsz) +
'_lam_w=' + str(FLAGS.lam_w) + '_lam_a=' + str(FLAGS.lam_a) +
'_step_sz'+ str(FLAGS.step_sz) +
'_tlen=' + str(FLAGS.train_len) +
'_window='+str(FLAGS.window) +
'_stride='+str(FLAGS.stride) +
str(architecture_string) + '_jitter')
if not(FLAGS.if_a_sfm):
print('if_a_sfm false')
short_filename = (short_filename +
'not_a_sfm_l1_proj_rad=' + str(FLAGS.rad_a))
if FLAGS.if_weighted_LL:
short_filename = (short_filename + '_weightedLL')
return short_filename
def RunComputation():
# filename for saving files, derived from FLAGS.
short_filename=get_filename()
# make a folder with name derived from parameters of the algorithm
# it saves checkpoint files and summaries used in tensorboard
parent_folder = FLAGS.save_location + FLAGS.folder_name + '/'
# make folder if it does not exist
if not gfile.IsDirectory(parent_folder):
gfile.MkDir(parent_folder)
FLAGS.save_location = parent_folder + short_filename + '/'
print('Does the file exist?', gfile.IsDirectory(FLAGS.save_location))
if not gfile.IsDirectory(FLAGS.save_location):
gfile.MkDir(FLAGS.save_location)
save_filename = FLAGS.save_location + short_filename
if FLAGS.learn ==0:
# for analysis, use smaller batch sizes, so that we can work with single GPU.
FLAGS.batchsz=600
#Set up tensorflow
with tf.Graph().as_default() as gra:
with tf.device(tf.ReplicaDeviceSetter(FLAGS.ps_tasks)):
print(FLAGS.config_params)
tf.logging.info(FLAGS.config_params)
# set up training dataset
# tc_mean = get_data_mat.init_chunks(FLAGS.n_chunks) <- use this with old get_data_mat
tc_mean = get_data_mat.init_chunks(FLAGS.batchsz)
#plt.plot(tc_mean)
#plt.show()
#plt.draw()
# Create computation graph.
#
# Graph should be fully constructed before you create supervisor.
# Attempt to modify graph after supervisor is created will cause an error.
with tf.name_scope('model'):
if FLAGS.architecture == '1 layer':
# single GPU model
if False:
global_step = tf.contrib.framework.create_global_step()
model, stim, resp = jitter_model.approximate_conv_jitter(FLAGS.n_cells,
FLAGS.lam_w,
FLAGS.window,
FLAGS.stride,
FLAGS.step_sz,
tc_mean,
FLAGS.su_channels)
# multiGPU model
if True:
model, stim, resp, global_step = jitter_model.approximate_conv_jitter_multigpu(FLAGS.n_cells,
FLAGS.lam_w, FLAGS.window, FLAGS.stride, FLAGS.step_sz,
tc_mean, FLAGS.su_channels, FLAGS.config_params)
if FLAGS.architecture == '2 layer_stimulus':
# stimulus is first smoothened to lower dimensions, then same model is applied
print('First take a low resolution version of stimulus')
model, stim, resp, global_step, stim_tuple = (jitter_model.
approximate_conv_jitter_multigpu_stim_lr(
FLAGS.n_cells,
FLAGS.lam_w, FLAGS.window,
FLAGS.stride, FLAGS.step_sz,
tc_mean, FLAGS.su_channels,
FLAGS.config_params,
FLAGS.stim_downsample_window,
FLAGS.stim_downsample_stride))
if FLAGS.architecture == 'complex':
print(' Multiple modifications over 2 layered model above')
model, stim, resp, global_step = (jitter_model_2.
approximate_conv_jitter_multigpu_complex(
FLAGS.n_cells,
FLAGS.lam_w, FLAGS.window,
FLAGS.stride, FLAGS.step_sz,
tc_mean, FLAGS.su_channels,
FLAGS.config_params,
FLAGS.stim_downsample_window,
FLAGS.stim_downsample_stride))
# Print the number of variables in graph
print('Calculating model size') # Hope we do not exceed memory
PrintModelAnalysis(gra, max_depth=10)
# Builds our summary op.
summary_op = model.merged_summary
# Create a Supervisor. It will take care of initialization, summaries,
# checkpoints, and recovery.
#
# When multiple replicas of this program are running, the first one,
# identified by --task=0 is the 'chief' supervisor. It is the only one
# that takes case of initialization, etc.
is_chief = (FLAGS.task == 0) # & (FLAGS.learn==1)
print(save_filename)
if FLAGS.learn==1:
# use supervisor only for learning,
# otherwise it messes up data as it tries to store variables while you are doing analysis
sv = tf.train.Supervisor(logdir=save_filename,
is_chief=is_chief,
saver=tf.train.Saver(),
summary_op=None,
save_model_secs=100,
global_step=global_step,
recovery_wait_secs=5)
if (is_chief and FLAGS.learn==1):
# save graph only if task id =0 (is_chief) and learning the model
tf.train.write_graph(tf.get_default_graph().as_graph_def(),
save_filename, 'graph.pbtxt')
# Get an initialized, and possibly recovered session. Launch the
# services: Checkpointing, Summaries, step counting.
#
# When multiple replicas of this program are running the services are
# only launched by the 'chief' replica.
session_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False)
sess = sv.PrepareSession(FLAGS.master, config=session_config)
# Finally, learn the parameters of the model
FitComputation(sv, sess, model, stim, resp, global_step, summary_op)
sv.Stop()
else:
# Analyse the model
session_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False)
with tf.Session(config=session_config) as sess:
# First, recover the model
saver_var = tf.train.Saver(tf.all_variables(),
keep_checkpoint_every_n_hours=float('inf'))
restore_file = tf.train.latest_checkpoint(save_filename)
print(restore_file)
start_iter = int(restore_file.split('/')[-1].split('-')[-1])
saver_var.restore(sess, restore_file)
# model specific analysis
if FLAGS.architecture == '2 layer_stimulus':
AnalyseModel_lr(sess, model)
elif FLAGS.architecture == 'complex':
AnalyseModel_complex(sess, model, stim, resp, save_filename)
else:
AnalyseModel(sv, sess, model)
def FitComputation(sv, sess, model, stim, resp, global_step, summary_op):
def Test():
# Just call this function to perform analysis on a chunk of test data,
# and compute average test log-likelihood
loss_batch = 0
n_test_chunks = 2 # should be 8 #len(get_data_mat.test_chunks)
for ichunk in range(n_test_chunks):
if get_data_mat.test_counter >=n_test_chunks:
get_data_mat.test_counter = 0
stim_test, resp_test, test_len = get_data_mat.get_stim_resp(data_type='test')
fd_test = {stim: np.array(stim_test,dtype='float32'),
resp: np.array(resp_test,dtype='float32')}
loss_batch += sess.run(model.loss_inter, feed_dict=fd_test)
print_loss = loss_batch / n_test_chunks
print('Test loss:%.3f' % print_loss)
return print_loss
# Do parameter updates and projections (if needed) in a loop
step = sess.run(global_step)
is_chief = (FLAGS.task == 0)
loss_avg = []
while not sv.ShouldStop():
# Get training data, log amount of time taken, and make training dictionary
start_time = time.time()
stim_train, resp_train, train_len = get_data_mat.get_stim_resp('train')
duration = time.time() - start_time
format_str = ('%s: get_data @ step %d, %.3f '
'sec/batch)')
tf.logging.info(format_str % (datetime.now(), step, duration))
print(format_str % (datetime.now(), step, duration))
fd_train = {stim: np.array(stim_train,dtype='float32'),
resp: np.array(resp_train,dtype='float32')}
# do projection of parameters
for proj_op in model.proj_ops:
sess.run(proj_op)
print('did projection')
probe_vals = sess.run(model.variables_lr.a)
probe_vals = np.ndarray.flatten(probe_vals)
print('a statistics: postives %d, negatives %d, zeros %d'
% (np.sum(probe_vals>0) , np.sum(probe_vals<0), np.sum(probe_vals==0)))
# Take a training step - gradient step and projections (if wanted)
start_time = time.time()
_, current_loss = sess.run([model.train_step, model.loss_inter], feed_dict=fd_train)
# Log and print the loss on last 10 iterations
loss_avg.append(current_loss)
duration = time.time() - start_time
format_str = ('%s: train @ step %d, %.3f '
'sec/batch) loss = %.3f')
tf.logging.info(format_str % (datetime.now(), step, duration, np.mean(np.array(loss_avg))))
print(format_str % (datetime.now(), step, duration, np.mean(np.array(loss_avg))))
if len(loss_avg) > 10:
loss_avg = loss_avg[1:]
#from IPython.terminal.embed import InteractiveShellEmbed
#ipshell = InteractiveShellEmbed()
#ipshell()
if step >= FLAGS.max_steps: # Break if more than particular steps
break
# The chief writes summary
if is_chief and step % 10 == 0:
# call summary op
mean_loss = np.mean(np.array(loss_avg))
start_time = time.time()
summary_str = sess.run(summary_op, feed_dict=fd_train)
sv.summary_computed(sess, summary_str) # tell supervisor to write summary
duration = time.time() - start_time
format_str = ('%s: summary @ step %d, %.3f '
'sec/batch), loss: %.3f')
#tf.logging.info(format_str % (datetime.now(), step, duration, loss_inter_summary))
#print(format_str % (datetime.now(), step, duration, mean_loss))
loss_avg = []
# Test data loss
'''
test_loss = Test()
test_summary = tf.Summary()
value = test_summary.value.add()
value.tag = 'Test loss'
value.simple_value = test_loss
print('Test loss %.3f' % value.simple_value)
sv.summary_computed(sess, test_summary)
#print('adding summary')
'''
step += 1
def AnalyseModel_complex(sess, model, stim, resp, save_filename):
# Analyse different parameters of a learnt "complex" model
print('Starting analysis')
tf.logging.info('Starting analysis')
# Plot the "mother" subunit weights
w_fit_mother = sess.run(model.variables_lr.w_mother)
print(np.shape(w_fit_mother))
for ichannel in range(1):
plt.subplot(1,1,ichannel+1)
print(np.squeeze(w_fit_mother[:,:,ichannel,0]))
plt.imshow(np.squeeze(w_fit_mother[:,:,ichannel,0]), cmap='gray',
interpolation='nearest')
plt.title('Mother subunit')
tf.logging.info('Mother subunit')
plt.draw()
plt.show()
# Plot w_stim_lr - to see how the stimulus is put to a lower resolution
w_fit_stim_lr = sess.run(model.variables_lr.w_stim_lr)
print(np.shape(w_fit_stim_lr))
for ichannel in range(3):
plt.subplot(1,3,ichannel+1)
print(np.squeeze(w_fit_stim_lr[:,:,ichannel,0]))
plt.imshow(np.squeeze(w_fit_stim_lr[:,:,ichannel,0]), cmap='gray',
interpolation='nearest')
plt.title('w_stimlr')
tf.logging.info('w_stimlr')
plt.draw()
plt.show()
'''
# Plot delta subunit for 'almost convolutional - model + delta models'
w_del_e = np.squeeze(sess.run(model.variables_lr.w_del))
w_mot = sess.run(model.variables_lr.w_mother)
dimx = model.dimensions.dimx
dimy = model.dimensions.dimy
print(dimx, dimy)
for icol in np.arange(1):
icnt=1
for idimx in np.arange(dimx):
print(idimx)
for idimy in np.arange(dimy):
w_del_flatteni = np.squeeze(w_del_e[idimx, idimy, :])
plt.subplot(dimx, dimy, icnt)
#plt.subplot(6,6,icnt)
wts = w_del_flatteni
wh = 2*FLAGS.window+1
wts = np.reshape(wts[wh*wh*icol:wh*wh*(icol+1)],(wh,wh))
plt.imshow(np.squeeze(wts + np.squeeze(w_mot[:,:,icol])),cmap='gray',
interpolation='nearest')
icnt=icnt+1
plt.suptitle('w mother + w delta')
print(icol)
plt.draw()
plt.show()
'''
# Plot strongly connected subunits for a chosen cell
w_del_e = np.squeeze(sess.run(model.variables_lr.w_del))
w_mot = sess.run(model.variables_lr.w_mother)
a_model = sess.run(model.variables_lr.a)
a_sfm_eval = a_model
icell = 30
icol = 0 # 1 for green
a_wts = a_sfm_eval[:,icell]
a_thr = np.percentile(a_wts, 99.9)
sus = np.arange(a_sfm_eval.shape[0])
chosen_su = sus[a_wts > a_thr]
wh = 2 * FLAGS.window + 1
dimx = model.dimensions.dimx
dimy = model.dimensions.dimy
icnt=-1
isu = 0
print(chosen_su)
for idimx in np.arange(dimx):
print(idimx)
for idimy in np.arange(dimy):
icnt=icnt+1
if(a_wts[icnt]>=a_thr):
good_sux=idimx
good_suy=idimy
print(icnt, idimx, idimy, a_wts[icnt])
# plot this subunit
# compute 2D subunit
w_del_flatteni = np.squeeze(w_del_e[idimx, idimy, :])
wts = w_del_flatteni
wts = np.reshape(wts[wh * wh * icol:wh * wh * (icol + 1)], (wh, wh))
isu = isu + 1
print(isu)
# plot w_mother + w_delta
ax=plt.subplot(len(chosen_su), 2, (isu - 1) * 2 + 1)
plt.imshow(np.squeeze(wts + np.squeeze(w_mot[:, :, icol])), cmap='gray',
interpolation='nearest')
#plt.title(str(a_wts[icnt]))
ax.set_xticklabels([])
ax.set_yticklabels([])
# plot w_delta
ax=plt.subplot(len(chosen_su), 2, (isu - 1) * 2 + 2)
plt.imshow(np.squeeze(wts),cmap='gray', interpolation='nearest')
#plt.title(str(a_wts[icnt]))
ax.set_xticklabels([])
ax.set_yticklabels([])
tf.logging.info('strongly connected SU')
plt.show()
plt.draw()
## analyse 'a'
a = model.variables_lr.a
if FLAGS.if_a_sfm:
a_sfm = tf.transpose(tf.nn.softmax(tf.transpose(a)))
else:
a_sfm = a
a_sfm_fit = sess.run(a_sfm)
plt.plot(np.sort(np.ndarray.flatten(a_sfm_fit)), '.')
plt.show()
plt.title('All of a')
tf.logging.info('All of a')
## plot location of top subunits for many the cells.
cell_ids_plot = np.arange(36)
for icnt,icell in enumerate(cell_ids_plot):
plt.subplot(6, 6, icnt+1)
su_wts = a_sfm_fit[:,icnt]
su_wts_sort = np.sort(su_wts)
thr = su_wts_sort[-5]
plt.imshow(np.reshape(su_wts>thr, (dimx, dimy)),
interpolation='nearest', cmap='gray')
plt.title(str(thr))
plt.show()
tf.logging.info('top SU for multiple cells')
## plot weights of subunits for many the cells.
cell_ids_plot = np.arange(36)
for icnt,icell in enumerate(cell_ids_plot):
plt.subplot(6, 6, icnt+1)
su_wts = a_sfm_fit[:,icnt]
su_wts_sort = np.sort(su_wts)
plt.imshow(np.reshape(su_wts, (dimx, dimy)),
interpolation='nearest', cmap='gray')
plt.title(str(thr))
plt.show()
tf.logging.info('all SU for multiple cells')
## plot weight histogram of subunits for many the cells.
cell_ids_plot = np.arange(36)
for icnt,icell in enumerate(cell_ids_plot):
plt.subplot(6, 6, icnt+1)
su_wts = a_sfm_fit[:,icnt]
su_wts_sort = np.sort(su_wts)
plt.plot(su_wts_sort[-100:],'.')
plt.show()
tf.logging.info('SU weights (sorted) for multiple cells')
from IPython.terminal.embed import InteractiveShellEmbed
ipshell = InteractiveShellEmbed()
ipshell()
print('Analysing functional properties of subunit weights')
w_stim_lr_fit = sess.run(model.variables_lr.w_stim_lr)
w_mother_fit = sess.run(model.variables_lr.w_mother)
a_fit = sess.run(model.variables_lr.a)
w_del_fit = sess.run(model.variables_lr.w_del)
bias_su_fit = sess.run(model.variables_lr.bias_su)
bias_cell_fit = sess.run(model.variables_lr.bias_cell)
tcmean_fit = sess.run(model.variables_lr.time_course)
sux = good_sux
plt.imshow(np.squeeze(np.sqrt(np.sum(w_del_fit**2,2))),
cmap='gray', interpolation='nearest')
plt.title('w_del map')
tf.logging.info('w_del map')
plt.show()
plt.draw()
# STA calculation for all cells / subunits
'''
stas_su, stas_cells = jitter_model_2.calculate_STA_su_cell(a_fit,
w_stim_lr_fit,
w_mother_fit,
w_del_fit,
bias_su_fit,
bias_cell_fit)
'''
#from IPython.terminal.embed import InteractiveShellEmbed
#ipshell = InteractiveShellEmbed()
#ipshell()
# STA calculation for a subunit, or a cell
print('Compute STA for some window weights')
tf.logging.info('Compute STA for some window weights')
# compute STA form small batches of data
'''
# Add STA node to tensorflow graph
dimx = model.dimensions.dimx
dimy = model.dimensions.dimy
stim_tf = model.probe_ops[-1][0]
su_act_tf = model.probe_ops[-1][1]
sta = tf.reshape(tf.matmul(tf.reshape(tf.transpose(stim_tf, [1,2,3,0]),[640*320*3, FLAGS.batchsz-29]),
tf.expand_dims(tf.squeeze(su_act_tf[:,good_sux,good_suy]),1)), [640, 320, 3])
#sta_su = tf.reshape(tf.matmul(tf.reshape(tf.transpose(stim_tf, [1,2,3,0]),[640*320*3, FLAGS.batchsz-29]),
tf.reshape(su_act_tf, [-1, dimx*dimy])), [640, 320, 3, dimx, dimy])
#sta_np = np.zeros((640, 320, 3, dimx, dimy))
sta_np = np.zeros((640, 320, 3))
n_batches= 1000
#plt.ion()
#fig = plt.figure()
for ibatch in np.arange(n_batches):
# generate random stimulus sample
start_time = time.time()
# generate jitter stimulus instead
stim_np, _, _ = get_data_mat.get_stim_resp()
duration = time.time() - start_time
format_str = ('%s: generate_random_samples @ step %d, %.3f '
'sec/batch)')
print(format_str % (datetime.now(), ibatch, duration))
tf.logging.info(format_str % (datetime.now(), ibatch, duration))
# Compute STA
start_time = time.time()
#sta_np = sta_np + sess.run(sta_su, feed_dict={stim: stim_np})
sta_np = sta_np + sess.run(sta, feed_dict={stim: stim_np})
#[stim_tf_np, su_act_tf_np] = sess.run([stim_tf, su_act_tf], feed_dict={stim: stim_np})
duration = time.time() - start_time
format_str = ('%s: compute STA @ step %d, %.3f '
'sec/batch)')
print(format_str % (datetime.now(), ibatch, duration))
tf.logging.info(format_str % (datetime.now(), ibatch, duration))
print(ibatch)
sta_np_save = sta_np / (ibatch+1)
#pickle.dump(sta_np_save, gfile.Open('/home/bhaishahster/tmp/stas.pkl', "w"))
#plt.cla()
#plt.imshow(sta_np_save[:,:,1],
# cmap='gray', interpolation='nearest')
#plt.title('batch: %d' % ibatch)
#plt.show()
#plt.draw()
#fig.canvas.draw()
print('subunit STA dumped')
tf.logging.info('subunit STA dumped')
'''
from IPython.terminal.embed import InteractiveShellEmbed
ipshell = InteractiveShellEmbed()
ipshell()
# find STA for a row
dimx = model.dimensions.dimx
dimy = model.dimensions.dimy
stim_tf = model.probe_ops[-1][0]
su_act_tf = model.probe_ops[-1][1]
sta_list = []
for isu_col in range(dimy):
sta_list += [tf.reshape(tf.matmul(tf.reshape(tf.transpose(stim_tf, [1,2,3,0]),[640*320*3, FLAGS.batchsz-29]),
tf.squeeze(su_act_tf[:,:, isu_col])), [640, 320, 3, dimx])]
sta_np_list = [[]]*dimy
for isu_col in range(dimy):
sta_np_list[isu_col] = np.array(np.zeros((640, 320, 3, dimx)), dtype='float32')
print('column %d initialized' % isu_col)
#sta_np = np.zeros((640, 320, 3, dimx, dimy))
sta_np = np.zeros((640, 320, 3))
n_batches= 1000
#plt.ion()
#fig = plt.figure()
for ibatch in np.arange(n_batches):
# generate random stimulus sample
start_time = time.time()
# generate jitter stimulus instead
stim_np, _, _ = get_data_mat.get_stim_resp()
duration = time.time() - start_time
format_str = ('%s: generate_random_samples @ step %d, %.3f '
'sec/batch)')
print(format_str % (datetime.now(), ibatch, duration))
tf.logging.info(format_str % (datetime.now(), ibatch, duration))
# Compute STA
start_time = time.time()
for ista_node in range(dimy):
sta_np_list[ista_node] = (ibatch*sta_np_list[ista_node] + sess.run(sta_list[isu], feed_dict={stim: stim_np}))/(ibatch+1)
print(str(ista_node)+ ' done')
duration = time.time() - start_time
format_str = ('%s: compute STA @ step %d, %.3f '
'sec/batch)')
print(format_str % (datetime.now(), ibatch, duration))
tf.logging.info(format_str % (datetime.now(), ibatch, duration))
# save the STAs
if ibatch%10 == 9 :
print('Saving')
for isu_col in range(dimy):
pickle.dump(sta_np_list[isu_col], open('/home/bhaishahster/tmp/stas_column_%d.pkl' % isu_col,'wb'))
print('column %d dumped' % isu_col)
print(ibatch)
# sta_np_save = sta_np / (ibatch+1)
#pickle.dump(sta_np_save, gfile.Open('/home/bhaishahster/tmp/stas.pkl', "w"))
#plt.cla()
#plt.imshow(sta_np_save[:,:,1],
# cmap='gray', interpolation='nearest')
#plt.title('batch: %d' % ibatch)
#plt.show()
#plt.draw()
#fig.canvas.draw()
#print('subunit STA dumped')
# tf.logging.info('subunit STA dumped')
# how quickly does the correlation between different pixels go away?
stim_np, _, _ = get_data_mat.get_stim_resp() #np.array(100*(2 * np.random.randint(2, size=(FLAGS.batchsz ,640,320,3)) - 1), dtype='float32')
su_act_tf = model.probe_ops[-1][1]
su_act_np = sess.run(su_act_tf, feed_dict={stim: stim_np})
ix =40
for iy in np.arange(20-3, 20+3, 1): #np.arange(dimy):
plt.plot(su_act_np[:,ix, iy])
plt.hold(True)
plt.show()
plt.draw()
# plot correlation with distance
for xcenter in np.arange(30, 50):
centerpt = [xcenter,20]
corr_dist = []
corr_val = []
for icnt, iy in enumerate(np.arange(-10, 10)):
corr_dist += [iy]
cc = np.corrcoef(su_act_np[:,centerpt[0], centerpt[1]+iy],
su_act_np[:,centerpt[0], centerpt[1]])
corr_val += [cc[0,1]]
plt.plot(corr_dist, corr_val)
plt.hold(True)
plt.show()
plt.draw()
## Decode maximally activating stimulus for each subunit and each cell
# Not very useful analysis in hindsight.
# Start from a random white noise stimulus and
# do updates to increase subunit(or cell) activation,
# keeping the stimulus normalized
g = tf.Graph()
with g.as_default():
with tf.Session() as sess2:
for suy in [good_suy]:#np.arange(30):
# plot a
a = tf.constant(np.array(a_fit, dtype='float32'))
a_sfm = tf.transpose(tf.nn.softmax(tf.transpose(a)))
a_sfm_expanded = tf.expand_dims(a_sfm, 0)
a_sfm_expanded = tf.expand_dims(a_sfm_expanded, -1)
a_sfm_np = sess2.run(a_sfm_expanded)
plt.imshow(np.squeeze(a_sfm_np), cmap='gray', interpolation='nearest')
plt.show()
plt.draw()
# Maximize the activation for a particular subunit
vars_lst = jitter_model.variables_lr(w_mother_fit, w_del_fit, a_fit,
w_stim_lr_fit, bias_su_fit,
bias_cell_fit, tcmean_fit)
np.random.seed(11111)
stim4D = tf.Variable(np.array(np.random.randn(1,640,320,3),
dtype='float32'), name="decoded_stimulus")
decode_fcn = jitter_model_2.decode_op_complex(sess2, stim4D, sux, suy,
vars_lst, FLAGS.window,
FLAGS.stride,
FLAGS.stim_downsample_window,
FLAGS.stim_downsample_stride,
model.dimensions_stimlr.dimx_slr,
model.dimensions_stimlr.dimy_slr,
model.dimensions.dimx, model.dimensions.dimy,
FLAGS.n_cells)
stim_decode, max_val = decode_fcn()
print(np.shape(stim_decode))
icol =0
plt.subplot(1,2,1)
plt.imshow(np.squeeze(stim_decode[0,:,:,icol]), cmap='gray',
interpolation='nearest')
xx = np.squeeze(stim_decode[0,:,:,icol])
rc = np.nonzero(xx>0.8*np.max(np.ndarray.flatten(xx)))
xxy = xx[np.min(rc[0]):np.max(rc[0]), np.min(rc[1]):np.max(rc[1])]
plt.subplot(1,2,2)
plt.imshow(xxy, cmap='gray', interpolation='nearest')
plt.title('Max val: '+ str(max_val))
plt.show()
plt.draw()
# Maximize stimulus for a particular cell
for mcellid in [icell]:#np.arange(49): # which cell ID to plot
np.random.seed(11111)
stim4D = tf.Variable(np.array(np.random.randn(1,640,320,3),
dtype='float32'),
name="decoded_stimulus")
decode_fcn = jitter_model_2.decode_op_complex(sess2, stim4D, mcellid,
-1, vars_lst,
FLAGS.window,
FLAGS.stride,
FLAGS.stim_downsample_window,
FLAGS.stim_downsample_stride,
model.dimensions_stimlr.dimx_slr,
model.dimensions_stimlr.dimy_slr,
model.dimensions.dimx, model.dimensions.dimy,
FLAGS.n_cells, max_element='cell')
stim_decode, max_val = decode_fcn()
print(np.shape(stim_decode))
icol =1
#plt.subplot(7, 7, mcellid+1);
plt.imshow(np.squeeze(stim_decode[0, :, :, icol]),
cmap='gray', interpolation='nearest')
plt.show()
plt.draw()
#from IPython.terminal.embed import InteractiveShellEmbed
#ipshell = InteractiveShellEmbed()
#ipshell()
def analyse_cell_su(a_sfm_fit, dimx, dimy, cellID):
su_wts = a_sfm_fit[:, cellID]
su_wts_sort = np.sort(su_wts)
thr = su_wts_sort[-5]
x,y = np.where(np.reshape(su_wts>thr, (dimx, dimy)))
print([x,y])
num_win = x.shape[0]
for isu in np.arange(num_win):
print('Loading %d su @ %d, %d' %(isu, x[isu], y[isu]))
stas = pickle.load(open('/home/bhaishahster/tmp/stas_column_%d.pkl' % y[isu],'r'))
plt.subplot(int(np.sqrt(num_win)), int(np.sqrt(num_win+1)), isu+1)
plt.imshow(stas[ 250:380,1:100, 0, x[isu]], interpolation='nearest');
#plt.imshow(stas[ :, :, 2, x[isu]]);
plt.show()
if __name__ == '__main__':
app.run()
|
|
from __future__ import division
from ..Qt import QtGui, QtCore
import numpy as np
import collections
from .. import functions as fn
from .. import debug as debug
from .GraphicsObject import GraphicsObject
from ..Point import Point
from .. import getConfigOption
__all__ = ['ImageItem']
class ImageItem(GraphicsObject):
"""
**Bases:** :class:`GraphicsObject <pyqtgraph.GraphicsObject>`
GraphicsObject displaying an image. Optimized for rapid update (ie video display).
This item displays either a 2D numpy array (height, width) or
a 3D array (height, width, RGBa). This array is optionally scaled (see
:func:`setLevels <pyqtgraph.ImageItem.setLevels>`) and/or colored
with a lookup table (see :func:`setLookupTable <pyqtgraph.ImageItem.setLookupTable>`)
before being displayed.
ImageItem is frequently used in conjunction with
:class:`HistogramLUTItem <pyqtgraph.HistogramLUTItem>` or
:class:`HistogramLUTWidget <pyqtgraph.HistogramLUTWidget>` to provide a GUI
for controlling the levels and lookup table used to display the image.
"""
sigImageChanged = QtCore.Signal()
sigRemoveRequested = QtCore.Signal(object) # self; emitted when 'remove' is selected from context menu
def __init__(self, image=None, **kargs):
"""
See :func:`setImage <pyqtgraph.ImageItem.setImage>` for all allowed initialization arguments.
"""
GraphicsObject.__init__(self)
self.menu = None
self.image = None ## original image data
self.qimage = None ## rendered image for display
self.paintMode = None
self.levels = None ## [min, max] or [[redMin, redMax], ...]
self.lut = None
self.autoDownsample = False
self.axisOrder = getConfigOption('imageAxisOrder')
# In some cases, we use a modified lookup table to handle both rescaling
# and LUT more efficiently
self._effectiveLut = None
self.drawKernel = None
self.border = None
self.removable = False
if image is not None:
self.setImage(image, **kargs)
else:
self.setOpts(**kargs)
def setCompositionMode(self, mode):
"""Change the composition mode of the item (see QPainter::CompositionMode
in the Qt documentation). This is useful when overlaying multiple ImageItems.
============================================ ============================================================
**Most common arguments:**
QtGui.QPainter.CompositionMode_SourceOver Default; image replaces the background if it
is opaque. Otherwise, it uses the alpha channel to blend
the image with the background.
QtGui.QPainter.CompositionMode_Overlay The image color is mixed with the background color to
reflect the lightness or darkness of the background.
QtGui.QPainter.CompositionMode_Plus Both the alpha and color of the image and background pixels
are added together.
QtGui.QPainter.CompositionMode_Multiply The output is the image color multiplied by the background.
============================================ ============================================================
"""
self.paintMode = mode
self.update()
def setBorder(self, b):
self.border = fn.mkPen(b)
self.update()
def width(self):
if self.image is None:
return None
axis = 0 if self.axisOrder == 'col-major' else 1
return self.image.shape[axis]
def height(self):
if self.image is None:
return None
axis = 1 if self.axisOrder == 'col-major' else 0
return self.image.shape[axis]
def channels(self):
if self.image is None:
return None
return self.image.shape[2] if self.image.ndim == 3 else 1
def boundingRect(self):
if self.image is None:
return QtCore.QRectF(0., 0., 0., 0.)
return QtCore.QRectF(0., 0., float(self.width()), float(self.height()))
def setLevels(self, levels, update=True):
"""
Set image scaling levels. Can be one of:
* [blackLevel, whiteLevel]
* [[minRed, maxRed], [minGreen, maxGreen], [minBlue, maxBlue]]
Only the first format is compatible with lookup tables. See :func:`makeARGB <pyqtgraph.makeARGB>`
for more details on how levels are applied.
"""
if levels is not None:
levels = np.asarray(levels)
if not fn.eq(levels, self.levels):
self.levels = levels
self._effectiveLut = None
if update:
self.updateImage()
def getLevels(self):
return self.levels
#return self.whiteLevel, self.blackLevel
def setLookupTable(self, lut, update=True):
"""
Set the lookup table (numpy array) to use for this image. (see
:func:`makeARGB <pyqtgraph.makeARGB>` for more information on how this is used).
Optionally, lut can be a callable that accepts the current image as an
argument and returns the lookup table to use.
Ordinarily, this table is supplied by a :class:`HistogramLUTItem <pyqtgraph.HistogramLUTItem>`
or :class:`GradientEditorItem <pyqtgraph.GradientEditorItem>`.
"""
if lut is not self.lut:
self.lut = lut
self._effectiveLut = None
if update:
self.updateImage()
def setAutoDownsample(self, ads):
"""
Set the automatic downsampling mode for this ImageItem.
Added in version 0.9.9
"""
self.autoDownsample = ads
self.qimage = None
self.update()
def setOpts(self, update=True, **kargs):
if 'axisOrder' in kargs:
val = kargs['axisOrder']
if val not in ('row-major', 'col-major'):
raise ValueError('axisOrder must be either "row-major" or "col-major"')
self.axisOrder = val
if 'lut' in kargs:
self.setLookupTable(kargs['lut'], update=update)
if 'levels' in kargs:
self.setLevels(kargs['levels'], update=update)
#if 'clipLevel' in kargs:
#self.setClipLevel(kargs['clipLevel'])
if 'opacity' in kargs:
self.setOpacity(kargs['opacity'])
if 'compositionMode' in kargs:
self.setCompositionMode(kargs['compositionMode'])
if 'border' in kargs:
self.setBorder(kargs['border'])
if 'removable' in kargs:
self.removable = kargs['removable']
self.menu = None
if 'autoDownsample' in kargs:
self.setAutoDownsample(kargs['autoDownsample'])
if update:
self.update()
def setRect(self, rect):
"""Scale and translate the image to fit within rect (must be a QRect or QRectF)."""
self.resetTransform()
self.translate(rect.left(), rect.top())
self.scale(rect.width() / self.width(), rect.height() / self.height())
def clear(self):
self.image = None
self.prepareGeometryChange()
self.informViewBoundsChanged()
self.update()
def setImage(self, image=None, autoLevels=None, **kargs):
"""
Update the image displayed by this item. For more information on how the image
is processed before displaying, see :func:`makeARGB <pyqtgraph.makeARGB>`
================= =========================================================================
**Arguments:**
image (numpy array) Specifies the image data. May be 2D (width, height) or
3D (width, height, RGBa). The array dtype must be integer or floating
point of any bit depth. For 3D arrays, the third dimension must
be of length 3 (RGB) or 4 (RGBA). See *notes* below.
autoLevels (bool) If True, this forces the image to automatically select
levels based on the maximum and minimum values in the data.
By default, this argument is true unless the levels argument is
given.
lut (numpy array) The color lookup table to use when displaying the image.
See :func:`setLookupTable <pyqtgraph.ImageItem.setLookupTable>`.
levels (min, max) The minimum and maximum values to use when rescaling the image
data. By default, this will be set to the minimum and maximum values
in the image. If the image array has dtype uint8, no rescaling is necessary.
opacity (float 0.0-1.0)
compositionMode See :func:`setCompositionMode <pyqtgraph.ImageItem.setCompositionMode>`
border Sets the pen used when drawing the image border. Default is None.
autoDownsample (bool) If True, the image is automatically downsampled to match the
screen resolution. This improves performance for large images and
reduces aliasing.
================= =========================================================================
**Notes:**
For backward compatibility, image data is assumed to be in column-major order (column, row).
However, most image data is stored in row-major order (row, column) and will need to be
transposed before calling setImage()::
imageitem.setImage(imagedata.T)
This requirement can be changed by calling ``image.setOpts(axisOrder='row-major')`` or
by changing the ``imageAxisOrder`` :ref:`global configuration option <apiref_config>`.
"""
profile = debug.Profiler()
gotNewData = False
if image is None:
if self.image is None:
return
else:
gotNewData = True
shapeChanged = (self.image is None or image.shape != self.image.shape)
image = image.view(np.ndarray)
if self.image is None or image.dtype != self.image.dtype:
self._effectiveLut = None
self.image = image
if self.image.shape[0] > 2**15-1 or self.image.shape[1] > 2**15-1:
if 'autoDownsample' not in kargs:
kargs['autoDownsample'] = True
if shapeChanged:
self.prepareGeometryChange()
self.informViewBoundsChanged()
profile()
if autoLevels is None:
if 'levels' in kargs:
autoLevels = False
else:
autoLevels = True
if autoLevels:
img = self.image
while img.size > 2**16:
img = img[::2, ::2]
mn, mx = img.min(), img.max()
if mn == mx:
mn = 0
mx = 255
kargs['levels'] = [mn,mx]
profile()
self.setOpts(update=False, **kargs)
profile()
self.qimage = None
self.update()
profile()
if gotNewData:
self.sigImageChanged.emit()
def dataTransform(self):
"""Return the transform that maps from this image's input array to its
local coordinate system.
This transform corrects for the transposition that occurs when image data
is interpreted in row-major order.
"""
# Might eventually need to account for downsampling / clipping here
tr = QtGui.QTransform()
if self.axisOrder == 'row-major':
# transpose
tr.scale(1, -1)
tr.rotate(-90)
return tr
def inverseDataTransform(self):
"""Return the transform that maps from this image's local coordinate
system to its input array.
See dataTransform() for more information.
"""
tr = QtGui.QTransform()
if self.axisOrder == 'row-major':
# transpose
tr.scale(1, -1)
tr.rotate(-90)
return tr
def mapToData(self, obj):
tr = self.inverseDataTransform()
return tr.map(obj)
def mapFromData(self, obj):
tr = self.dataTransform()
return tr.map(obj)
def quickMinMax(self, targetSize=1e6):
"""
Estimate the min/max values of the image data by subsampling.
"""
data = self.image
while data.size > targetSize:
ax = np.argmax(data.shape)
sl = [slice(None)] * data.ndim
sl[ax] = slice(None, None, 2)
data = data[sl]
return nanmin(data), nanmax(data)
def updateImage(self, *args, **kargs):
## used for re-rendering qimage from self.image.
## can we make any assumptions here that speed things up?
## dtype, range, size are all the same?
defaults = {
'autoLevels': False,
}
defaults.update(kargs)
return self.setImage(*args, **defaults)
def render(self):
# Convert data to QImage for display.
profile = debug.Profiler()
if self.image is None or self.image.size == 0:
return
# Request a lookup table if this image has only one channel
if self.image.ndim == 2 or self.image.shape[2] == 1:
if isinstance(self.lut, collections.Callable):
lut = self.lut(self.image)
else:
lut = self.lut
else:
lut = None
if self.autoDownsample:
# reduce dimensions of image based on screen resolution
o = self.mapToDevice(QtCore.QPointF(0,0))
x = self.mapToDevice(QtCore.QPointF(1,0))
y = self.mapToDevice(QtCore.QPointF(0,1))
w = Point(x-o).length()
h = Point(y-o).length()
if w == 0 or h == 0:
self.qimage = None
return
xds = max(1, int(1.0 / w))
yds = max(1, int(1.0 / h))
axes = [1, 0] if self.axisOrder == 'row-major' else [0, 1]
image = fn.downsample(self.image, xds, axis=axes[0])
image = fn.downsample(image, yds, axis=axes[1])
self._lastDownsample = (xds, yds)
else:
image = self.image
# if the image data is a small int, then we can combine levels + lut
# into a single lut for better performance
levels = self.levels
if levels is not None and levels.ndim == 1 and image.dtype in (np.ubyte, np.uint16):
if self._effectiveLut is None:
eflsize = 2**(image.itemsize*8)
ind = np.arange(eflsize)
minlev, maxlev = levels
levdiff = maxlev - minlev
levdiff = 1 if levdiff == 0 else levdiff # don't allow division by 0
if lut is None:
efflut = fn.rescaleData(ind, scale=255./levdiff,
offset=minlev, dtype=np.ubyte)
else:
lutdtype = np.min_scalar_type(lut.shape[0]-1)
efflut = fn.rescaleData(ind, scale=(lut.shape[0]-1)/levdiff,
offset=minlev, dtype=lutdtype, clip=(0, lut.shape[0]-1))
efflut = lut[efflut]
self._effectiveLut = efflut
lut = self._effectiveLut
levels = None
# Convert single-channel image to 2D array
if image.ndim == 3 and image.shape[-1] == 1:
image = image[..., 0]
# Assume images are in column-major order for backward compatibility
# (most images are in row-major order)
if self.axisOrder == 'col-major':
image = image.transpose((1, 0, 2)[:image.ndim])
argb, alpha = fn.makeARGB(image, lut=lut, levels=levels)
self.qimage = fn.makeQImage(argb, alpha, transpose=False)
def paint(self, p, *args):
profile = debug.Profiler()
if self.image is None:
return
if self.qimage is None:
self.render()
if self.qimage is None:
return
profile('render QImage')
if self.paintMode is not None:
p.setCompositionMode(self.paintMode)
profile('set comp mode')
shape = self.image.shape[:2] if self.axisOrder == 'col-major' else self.image.shape[:2][::-1]
p.drawImage(QtCore.QRectF(0,0,*shape), self.qimage)
profile('p.drawImage')
if self.border is not None:
p.setPen(self.border)
p.drawRect(self.boundingRect())
def save(self, fileName, *args):
"""Save this image to file. Note that this saves the visible image (after scale/color changes), not the original data."""
if self.qimage is None:
self.render()
self.qimage.save(fileName, *args)
def getHistogram(self, bins='auto', step='auto', perChannel=False, targetImageSize=200,
targetHistogramSize=500, **kwds):
"""Returns x and y arrays containing the histogram values for the current image.
For an explanation of the return format, see numpy.histogram().
The *step* argument causes pixels to be skipped when computing the histogram to save time.
If *step* is 'auto', then a step is chosen such that the analyzed data has
dimensions roughly *targetImageSize* for each axis.
The *bins* argument and any extra keyword arguments are passed to
np.histogram(). If *bins* is 'auto', then a bin number is automatically
chosen based on the image characteristics:
* Integer images will have approximately *targetHistogramSize* bins,
with each bin having an integer width.
* All other types will have *targetHistogramSize* bins.
If *perChannel* is True, then the histogram is computed once per channel
and the output is a list of the results.
This method is also used when automatically computing levels.
"""
if self.image is None:
return None,None
if step == 'auto':
step = (int(np.ceil(self.image.shape[0] / targetImageSize)),
int(np.ceil(self.image.shape[1] / targetImageSize)))
if np.isscalar(step):
step = (step, step)
stepData = self.image[::step[0], ::step[1]]
if bins == 'auto':
mn = stepData.min()
mx = stepData.max()
if stepData.dtype.kind in "ui":
# For integer data, we select the bins carefully to avoid aliasing
step = np.ceil((mx-mn) / 500.)
bins = np.arange(mn, mx+1.01*step, step, dtype=np.int)
else:
# for float data, let numpy select the bins.
bins = np.linspace(mn, mx, 500)
if len(bins) == 0:
bins = [mn, mx]
kwds['bins'] = bins
stepData = stepData[np.isfinite(stepData)]
if perChannel:
hist = []
for i in range(stepData.shape[-1]):
h = np.histogram(stepData[..., i], **kwds)
hist.append((h[1][:-1], h[0]))
return hist
else:
hist = np.histogram(stepData, **kwds)
return hist[1][:-1], hist[0]
def setPxMode(self, b):
"""
Set whether the item ignores transformations and draws directly to screen pixels.
If True, the item will not inherit any scale or rotation transformations from its
parent items, but its position will be transformed as usual.
(see GraphicsItem::ItemIgnoresTransformations in the Qt documentation)
"""
self.setFlag(self.ItemIgnoresTransformations, b)
def setScaledMode(self):
self.setPxMode(False)
def getPixmap(self):
if self.qimage is None:
self.render()
if self.qimage is None:
return None
return QtGui.QPixmap.fromImage(self.qimage)
def pixelSize(self):
"""return scene-size of a single pixel in the image"""
br = self.sceneBoundingRect()
if self.image is None:
return 1,1
return br.width()/self.width(), br.height()/self.height()
def viewTransformChanged(self):
if self.autoDownsample:
self.qimage = None
self.update()
def mouseDragEvent(self, ev):
if ev.button() != QtCore.Qt.LeftButton:
ev.ignore()
return
elif self.drawKernel is not None:
ev.accept()
self.drawAt(ev.pos(), ev)
def mouseClickEvent(self, ev):
if ev.button() == QtCore.Qt.RightButton:
if self.raiseContextMenu(ev):
ev.accept()
if self.drawKernel is not None and ev.button() == QtCore.Qt.LeftButton:
self.drawAt(ev.pos(), ev)
def raiseContextMenu(self, ev):
menu = self.getMenu()
if menu is None:
return False
menu = self.scene().addParentContextMenus(self, menu, ev)
pos = ev.screenPos()
menu.popup(QtCore.QPoint(pos.x(), pos.y()))
return True
def getMenu(self):
if self.menu is None:
if not self.removable:
return None
self.menu = QtGui.QMenu()
self.menu.setTitle("Image")
remAct = QtGui.QAction("Remove image", self.menu)
remAct.triggered.connect(self.removeClicked)
self.menu.addAction(remAct)
self.menu.remAct = remAct
return self.menu
def hoverEvent(self, ev):
if not ev.isExit() and self.drawKernel is not None and ev.acceptDrags(QtCore.Qt.LeftButton):
ev.acceptClicks(QtCore.Qt.LeftButton) ## we don't use the click, but we also don't want anyone else to use it.
ev.acceptClicks(QtCore.Qt.RightButton)
elif not ev.isExit() and self.removable:
ev.acceptClicks(QtCore.Qt.RightButton) ## accept context menu clicks
def tabletEvent(self, ev):
pass
#print(ev.device())
#print(ev.pointerType())
#print(ev.pressure())
def drawAt(self, pos, ev=None):
pos = [int(pos.x()), int(pos.y())]
dk = self.drawKernel
kc = self.drawKernelCenter
sx = [0,dk.shape[0]]
sy = [0,dk.shape[1]]
tx = [pos[0] - kc[0], pos[0] - kc[0]+ dk.shape[0]]
ty = [pos[1] - kc[1], pos[1] - kc[1]+ dk.shape[1]]
for i in [0,1]:
dx1 = -min(0, tx[i])
dx2 = min(0, self.image.shape[0]-tx[i])
tx[i] += dx1+dx2
sx[i] += dx1+dx2
dy1 = -min(0, ty[i])
dy2 = min(0, self.image.shape[1]-ty[i])
ty[i] += dy1+dy2
sy[i] += dy1+dy2
ts = (slice(tx[0],tx[1]), slice(ty[0],ty[1]))
ss = (slice(sx[0],sx[1]), slice(sy[0],sy[1]))
mask = self.drawMask
src = dk
if isinstance(self.drawMode, collections.Callable):
self.drawMode(dk, self.image, mask, ss, ts, ev)
else:
src = src[ss]
if self.drawMode == 'set':
if mask is not None:
mask = mask[ss]
self.image[ts] = self.image[ts] * (1-mask) + src * mask
else:
self.image[ts] = src
elif self.drawMode == 'add':
self.image[ts] += src
else:
raise Exception("Unknown draw mode '%s'" % self.drawMode)
self.updateImage()
def setDrawKernel(self, kernel=None, mask=None, center=(0,0), mode='set'):
self.drawKernel = kernel
self.drawKernelCenter = center
self.drawMode = mode
self.drawMask = mask
def removeClicked(self):
## Send remove event only after we have exited the menu event handler
self.removeTimer = QtCore.QTimer()
self.removeTimer.timeout.connect(self.emitRemoveRequested)
self.removeTimer.start(0)
def emitRemoveRequested(self):
self.removeTimer.timeout.disconnect(self.emitRemoveRequested)
self.sigRemoveRequested.emit(self)
|
|
#!/usr/bin/env python
import sys
from ruffus import *
import yaml
import time
import os
import helpers
from helpers import runCommand,isGzip
from os.path import (
join, expanduser, expandvars,
splitext,split, basename, dirname, exists
)
helpers.setup_shell_environment()
import tasks
import glob
#import pyprind
from termcolor import colored
import datetime
is_64bits = sys.maxsize > 2**32
#import graphviz
if not is_64bits:
print "Please upgrade your operating system to 64 bit, application such as diamond don't run on 32 bit"
sys.exit(0)
options = helpers.get_options()
#logger_proxy, logging_mutex = helpers.make_logger(options, __file__)
basedir = os.path.relpath('./')
projectDir = options.outdir # set output dir
proDir= os.path.basename(projectDir)
print proDir
inputDir = options.fastq_file
inputDir = os.path.abspath(inputDir)# get fastq.gz file
print inputDir
dir_name = basename(inputDir)
probe = os.path.abspath(options.probe_file)
probPath, probFile = os.path.split(probe)
print probe
#plateWall_bc = os.path.abspath(options.sample_sheet)
#pbpath,plateWallBC = os.path.split(plateWall_bc)
#print plateWall_bc
cpuNum = options.cpuNum
print cpuNum
logsDir = join(proDir, "logs")
resultDir = join(proDir, "result")
tempDir = join(resultDir, "tempFolder")
genomeDir = join(proDir, "result", "Genome")
print logsDir
print resultDir
# setup starting files
param = [
#[inputFile, join(proDir, "input", inFile)],
[probe, join(proDir, "result", probFile)]
]
@graphviz(height=1.8, width=2, label="Prepare\nanalysis")
@follows(mkdir(proDir,resultDir, logsDir,tempDir))
@files(param)
def prepare_analysis(input,output):
"""Copy the inputfiles to analysis dir
-`input`: input file to copy to the outdir
-`outfile`: output file name
"""
stage1= "Stage1: Copy", input, " to ", output
print colored(stage1, "green")
result = tasks.copyFileToAnaFolder(input, output)
return result
@graphviz(height=1.8, width=2, label="Prepare DB\nfiles")
@follows(prepare_analysis)
@transform(prepare_analysis, suffix("_manifest.csv"), ".fa")
def prepareDB_file(input_file, output_file):
"""docstring for prepareDB__file
-`input_file`: A manifest probe csv file that contain probe sequences
-`output_file`: A fasta output file that contain probe sequences
"""
print colored("Stage 2: Creating DB file from probe file ...", "green")
print input_file
result =tasks.db_file(input_file, output_file)
return result
@graphviz(height=1.8, width=2, label="Create\ngtf file")
@follows(prepare_analysis)
@transform(prepare_analysis, suffix("_manifest.csv"), ".gtf")
def create_gtf_file(input_file, output_file):
"""Create pseudo gtf file for all probes sequences
`input_file`: A manifest csv file that contain probe information
`output_file`: A pseudo gtf file that serve for annotation
"""
print colored("Stage 3: Creating custom gtf file from manifest file ...", "green")
print input_file
result =tasks.create_gtf(input_file, output_file)
return result
@graphviz(height=1.8, width=2, label="Index\nDB")
@follows(prepareDB_file)
@follows(mkdir(genomeDir))
@transform(prepareDB_file, suffix(".fa"), "SAindex")
def indexGenomeFile(input, output):
"""Index STAR genome index file
`input`: Input probes fasta file
`output`: SAindex file to check the completion of STAR genome index
"""
#print input
#print output
base = splitext(input)[0]
base = base + ".gtf"
#print base
gtfFile = base
outputDir = proDir + "/result/Genome"
print colored("Stage 4: Creating genome index file from the probe fasta file ....", "green")
print input
#print cpuNum
result = tasks.index_db_file(input, outputDir, cpuNum, gtfFile)
return result
@graphviz(height=1.8, width=2, label="Map to\nprobes")
@follows(indexGenomeFile)
@follows(create_gtf_file)
@transform(join(inputDir, "*.fastq.gz"), suffix(".gz"), ".bam")
def map_to_probes(fastq, output):
"""Map the fastq file to the indexed probe sequences. The fastq must be in the gzipped with the following extension. (*.fastq.gz)
`fastq`: a dir that contains all *.fastq.gz file for the experment
`output`: output .bam files and '*fastqReadPrepGene.out.tab' count files
"""
outfile = basename(output)
outfile = join(tempDir, outfile)
suf = splitext(outfile)[0]
outPrefix = os.path.abspath(suf)
print tasks.comment()
print colored("Stage 5: Map sequence fastq file to the indexed genome file ... ", "green")
#print fastq
#print output
#print genomeDir
#print outPrefix
print tasks.comment()
result = tasks.map_seq_to_probes(fastq, genomeDir, cpuNum, outPrefix)
return result
@graphviz(height=1.8, width=2, label="Count Mapped\nprobes")
@follows(map_to_probes)
@transform(join(tempDir, "*fastqAligned.out.sam"), suffix(".sam"), ".count.txt")
def count_mapped_reads(bamFile, outfile):
"""Coun the mapped sequence to the genome featur5e
`bamFile`: A bam alignment file
`outfile`: Count txt file
"""
import re
p=re.match(r'(.*)_manifest.csv', probFile, re.M|re.I)
gtfF = p.group(1) + ".gtf"
gtfFile = join(resultDir,gtfF)
print tasks.comment()
print colored("Stage 6: Count Mapped file that overlap with genome feature ... ", "green")
#print bamFile
#print gtfFile
print tasks.comment()
result = tasks.count_mapped(bamFile, outfile, gtfFile)
return result
#@graphviz(height=1.8, width=2, label="Format\ncount data")
#@follows(map_to_probes)
#@transform(join(tempDir, "*fastqReadsPerGene.out.tab"), formatter(".tab"), ".txt")
#def format_count(input,output):
#"""Prepare the count file to merge to a single file
#`input`: Count file from previous stage (*fastqReadsPerGene.out.tab)
#`output`: Formatted *.txt file with the same file name
#"""
#outfile = basename(input)
#out_suffix = splitext(outfile)[0]
#out_file_name = out_suffix + output
#out_file_name = join(tempDir, out_file_name)
#print tasks.comment()
#print colored("Stage 6: Formatting count file ... ", "green")
#print input
#print out_file_name
#print tasks.comment()
#result = tasks.formatCount(input,out_file_name)
#return result
@graphviz(height=1.8, width=2, label="Combine\ncount data")
@follows(count_mapped_reads)
#@collate(join(tempDir, "*sortedByCoord.out.count.txt"), formatter(".txt"), resultDir + "DATA_COUNT_countcombined.csv")
@collate(join(tempDir, "*fastqAligned.out.count.txt"), formatter(".txt"), resultDir + "DATA_COUNT_countcombined.csv")
def combine_count_data(input, output):
"""Combine count files
`input`: Formatted *.out.txt count files
`output`: A single summary count csv file nammed 'DATA_COUNT_countcombined.csv' under project dir
"""
print tasks.comment()
#print input
#print output
print colored("Stage 7: Combining count data ...", "green")
print tasks.comment()
result = tasks.combineCount(input, output)
return result
@graphviz(height=1.8, width=2, label="Format\ncount data")
@follows(combine_count_data)
@transform(combine_count_data, suffix(".csv"), "_formated.csv")
def format_count(input,output):
"""Format count csv file
`input`: csv file
`output`: Formatted *.csv file
"""
print tasks.comment()
print colored("Stage 8: Formatting count file ... ", "green")
#print input
#print output
print tasks.comment()
result = tasks.formatCount(input,output)
return result
@graphviz(height=1.8, width=2, label="Alignment\nSummary")
@follows(map_to_probes)
@transform(join(tempDir, "*fastqLog.final.out"), formatter(".out"), ".txt")
def alignment_summary(input, output):
"""Generate Alignment summary
`input`: *fastqLog.final.out files
`output`: Extracted necessary data and create *.txt file for each count log file
"""
outfile = basename(input)
out_suffix = splitext(outfile)[0]
out_file_name = out_suffix + output
out_file_name = join(tempDir, out_file_name)
print tasks.comment()
print colored("Stage 8: Generate Alingmnet summary ....", "green")
#print input
#print output
print tasks.comment()
result = tasks.alignmentSummary(input, out_file_name)
return result
@graphviz(height=1.8, width=2, label="Combine Alignment\nSummary")
@follows(alignment_summary)
@collate(join(tempDir, "*fastqLog.final.txt"), formatter(".txt"), resultDir + "DATA_alignment_summary.csv")
def combine_alignment_summary(input, output):
"""Combine formatted alignment log files
`input`: Formatted alignment stat log files (*fastqLog.final.txt)
`output`: Combined alignment stat csv file named (DATA_alignment_summary.csv)
"""
print tasks.comment()
#print input
#print output
print colored("Stage 9: Aggrigate alignment summary ....", "green")
print tasks.comment()
result = tasks.combineAlignmentSummary(input, output)
return result
@graphviz(height=1.8, width=2, label="plot\nalignment stat")
@follows(combine_alignment_summary)
@transform(combine_alignment_summary, formatter(".csv"), resultDir + "DATA_alignment_summary.png")
def plot_alignment_summary(input, output):
"""Plot alignment summary
`input`: Alignment summary csv file
`output`: output png file bar plot
"""
print tasks.comment()
print colored("Stage 10: Plot alignment summary ...", "green")
print input
print output
print tasks.comment()
result = tasks.plotAlignmentStat(input, output)
return result
def convertPs(psfile):
"""Utility function to convert ps file to pdf
during test
"""
if os.path.isfile(psfile):
cmd = "ps2pdf %s" % (psfile)
runCommand(cmd, "T")
else:
pass
return
def commands(commandlist, index):
'''
Just return the correct commands from commandlist based on index given
:param list commandlist: [(commandpath, commandfunc), ...]
:param int index: Index of item in each tuple to extract
'''
return map(lambda x: x[index], commandlist)
def main():
t0 = time.time()
print (" Starting time ..... :") + str(t0)
tasks_torun = [prepare_analysis, prepareDB_file, create_gtf_file, indexGenomeFile,
map_to_probes, format_count, combine_count_data, alignment_summary,
combine_alignment_summary,plot_alignment_summary]
pipeline_printout_graph('summary_pipeline_stages_to_run.ps', 'ps', tasks_torun, user_colour_scheme={"colour_scheme_index": 6},
no_key_legend=False, pipeline_name="TempO-seq Analysis", size=(11, 8), dpi = 30,
forcedtorun_tasks = [indexGenomeFile, combine_count_data],draw_vertically=True, ignore_upstream_of_target=False)
pipeline_run(["prepare_analysis", "prepareDB_file",'create_gtf_file', 'indexGenomeFile', 'map_to_probes','count_mapped_reads', 'combine_count_data', 'format_count', 'alignment_summary','combine_alignment_summary'],verbose = 1, multiprocess = cpuNum)
print "....................." + resultDir
tasks.comment()
psfile = options.flowchart
#psfile = "./summary_pipeline_stages_to_run.ps"
convertPs(psfile)
tasks.comment()
elapsedTime = int((time.time()) - t0)
elapsedTime = str(datetime.timedelta(seconds=elapsedTime))
print("Time to complete the task ....." ) + colored (elapsedTime, "red")
|
|
# Copyright 2013-2019 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file contains the detection logic for external dependencies useful for
# development purposes, such as testing, debugging, etc..
import glob
import os
import re
import pathlib
import shutil
import typing as T
from .. import mesonlib, mlog
from ..mesonlib import version_compare, stringlistify, extract_as_list, MachineChoice
from ..environment import get_llvm_tool_names
from .base import (
DependencyException, DependencyMethods, ExternalDependency, PkgConfigDependency,
strip_system_libdirs, ConfigToolDependency, CMakeDependency, DependencyFactory,
)
from .misc import threads_factory
from ..compilers.c import AppleClangCCompiler
from ..compilers.cpp import AppleClangCPPCompiler
if T.TYPE_CHECKING:
from ..envconfig import MachineInfo
from .. environment import Environment
def get_shared_library_suffix(environment, for_machine: MachineChoice):
"""This is only guaranteed to work for languages that compile to machine
code, not for languages like C# that use a bytecode and always end in .dll
"""
m = environment.machines[for_machine]
if m.is_windows():
return '.dll'
elif m.is_darwin():
return '.dylib'
return '.so'
class GTestDependencySystem(ExternalDependency):
def __init__(self, name: str, environment, kwargs):
super().__init__(name, environment, kwargs, language='cpp')
self.main = kwargs.get('main', False)
self.src_dirs = ['/usr/src/gtest/src', '/usr/src/googletest/googletest/src']
if not self._add_sub_dependency(threads_factory(environment, self.for_machine, {})):
self.is_found = False
return
self.detect()
def detect(self):
gtest_detect = self.clib_compiler.find_library("gtest", self.env, [])
gtest_main_detect = self.clib_compiler.find_library("gtest_main", self.env, [])
if gtest_detect and (not self.main or gtest_main_detect):
self.is_found = True
self.compile_args = []
self.link_args = gtest_detect
if self.main:
self.link_args += gtest_main_detect
self.sources = []
self.prebuilt = True
elif self.detect_srcdir():
self.is_found = True
self.compile_args = ['-I' + d for d in self.src_include_dirs]
self.link_args = []
if self.main:
self.sources = [self.all_src, self.main_src]
else:
self.sources = [self.all_src]
self.prebuilt = False
else:
self.is_found = False
def detect_srcdir(self):
for s in self.src_dirs:
if os.path.exists(s):
self.src_dir = s
self.all_src = mesonlib.File.from_absolute_file(
os.path.join(self.src_dir, 'gtest-all.cc'))
self.main_src = mesonlib.File.from_absolute_file(
os.path.join(self.src_dir, 'gtest_main.cc'))
self.src_include_dirs = [os.path.normpath(os.path.join(self.src_dir, '..')),
os.path.normpath(os.path.join(self.src_dir, '../include')),
]
return True
return False
def log_info(self):
if self.prebuilt:
return 'prebuilt'
else:
return 'building self'
def log_tried(self):
return 'system'
@staticmethod
def get_methods():
return [DependencyMethods.PKGCONFIG, DependencyMethods.SYSTEM]
class GTestDependencyPC(PkgConfigDependency):
def __init__(self, name: str, environment: 'Environment', kwargs: T.Dict[str, T.Any]):
assert name == 'gtest'
if kwargs.get('main'):
name = 'gtest_main'
super().__init__(name, environment, kwargs)
class GMockDependencySystem(ExternalDependency):
def __init__(self, name: str, environment, kwargs):
super().__init__(name, environment, kwargs, language='cpp')
self.main = kwargs.get('main', False)
if not self._add_sub_dependency(threads_factory(environment, self.for_machine, {})):
self.is_found = False
return
# If we are getting main() from GMock, we definitely
# want to avoid linking in main() from GTest
gtest_kwargs = kwargs.copy()
if self.main:
gtest_kwargs['main'] = False
# GMock without GTest is pretty much useless
# this also mimics the structure given in WrapDB,
# where GMock always pulls in GTest
found = self._add_sub_dependency(gtest_factory(environment, self.for_machine, gtest_kwargs))
if not found:
self.is_found = False
return
# GMock may be a library or just source.
# Work with both.
gmock_detect = self.clib_compiler.find_library("gmock", self.env, [])
gmock_main_detect = self.clib_compiler.find_library("gmock_main", self.env, [])
if gmock_detect and (not self.main or gmock_main_detect):
self.is_found = True
self.link_args += gmock_detect
if self.main:
self.link_args += gmock_main_detect
self.prebuilt = True
return
for d in ['/usr/src/googletest/googlemock/src', '/usr/src/gmock/src', '/usr/src/gmock']:
if os.path.exists(d):
self.is_found = True
# Yes, we need both because there are multiple
# versions of gmock that do different things.
d2 = os.path.normpath(os.path.join(d, '..'))
self.compile_args += ['-I' + d, '-I' + d2, '-I' + os.path.join(d2, 'include')]
all_src = mesonlib.File.from_absolute_file(os.path.join(d, 'gmock-all.cc'))
main_src = mesonlib.File.from_absolute_file(os.path.join(d, 'gmock_main.cc'))
if self.main:
self.sources += [all_src, main_src]
else:
self.sources += [all_src]
self.prebuilt = False
return
self.is_found = False
def log_info(self):
if self.prebuilt:
return 'prebuilt'
else:
return 'building self'
def log_tried(self):
return 'system'
@staticmethod
def get_methods():
return [DependencyMethods.PKGCONFIG, DependencyMethods.SYSTEM]
class GMockDependencyPC(PkgConfigDependency):
def __init__(self, name: str, environment: 'Environment', kwargs: T.Dict[str, T.Any]):
assert name == 'gmock'
if kwargs.get('main'):
name = 'gmock_main'
super().__init__(name, environment, kwargs)
class LLVMDependencyConfigTool(ConfigToolDependency):
"""
LLVM uses a special tool, llvm-config, which has arguments for getting
c args, cxx args, and ldargs as well as version.
"""
tool_name = 'llvm-config'
__cpp_blacklist = {'-DNDEBUG'}
def __init__(self, name: str, environment, kwargs):
self.tools = get_llvm_tool_names('llvm-config')
# Fedora starting with Fedora 30 adds a suffix of the number
# of bits in the isa that llvm targets, for example, on x86_64
# and aarch64 the name will be llvm-config-64, on x86 and arm
# it will be llvm-config-32.
if environment.machines[self.get_for_machine_from_kwargs(kwargs)].is_64_bit:
self.tools.append('llvm-config-64')
else:
self.tools.append('llvm-config-32')
# It's necessary for LLVM <= 3.8 to use the C++ linker. For 3.9 and 4.0
# the C linker works fine if only using the C API.
super().__init__(name, environment, kwargs, language='cpp')
self.provided_modules = []
self.required_modules = set()
self.module_details = []
if not self.is_found:
return
self.provided_modules = self.get_config_value(['--components'], 'modules')
modules = stringlistify(extract_as_list(kwargs, 'modules'))
self.check_components(modules)
opt_modules = stringlistify(extract_as_list(kwargs, 'optional_modules'))
self.check_components(opt_modules, required=False)
cargs = set(self.get_config_value(['--cppflags'], 'compile_args'))
self.compile_args = list(cargs.difference(self.__cpp_blacklist))
if version_compare(self.version, '>= 3.9'):
self._set_new_link_args(environment)
else:
self._set_old_link_args()
self.link_args = strip_system_libdirs(environment, self.for_machine, self.link_args)
self.link_args = self.__fix_bogus_link_args(self.link_args)
if not self._add_sub_dependency(threads_factory(environment, self.for_machine, {})):
self.is_found = False
return
def __fix_bogus_link_args(self, args):
"""This function attempts to fix bogus link arguments that llvm-config
generates.
Currently it works around the following:
- FreeBSD: when statically linking -l/usr/lib/libexecinfo.so will
be generated, strip the -l in cases like this.
- Windows: We may get -LIBPATH:... which is later interpreted as
"-L IBPATH:...", if we're using an msvc like compilers convert
that to "/LIBPATH", otherwise to "-L ..."
"""
cpp = self.env.coredata.compilers[self.for_machine]['cpp']
new_args = []
for arg in args:
if arg.startswith('-l') and arg.endswith('.so'):
new_args.append(arg.lstrip('-l'))
elif arg.startswith('-LIBPATH:'):
new_args.extend(cpp.get_linker_search_args(arg.lstrip('-LIBPATH:')))
else:
new_args.append(arg)
return new_args
def __check_libfiles(self, shared):
"""Use llvm-config's --libfiles to check if libraries exist."""
mode = '--link-shared' if shared else '--link-static'
# Set self.required to true to force an exception in get_config_value
# if the returncode != 0
restore = self.required
self.required = True
try:
# It doesn't matter what the stage is, the caller needs to catch
# the exception anyway.
self.link_args = self.get_config_value(['--libfiles', mode], '')
finally:
self.required = restore
def _set_new_link_args(self, environment):
"""How to set linker args for LLVM versions >= 3.9"""
try:
mode = self.get_config_value(['--shared-mode'], 'link_args')[0]
except IndexError:
mlog.debug('llvm-config --shared-mode returned an error')
self.is_found = False
return
if not self.static and mode == 'static':
# If llvm is configured with LLVM_BUILD_LLVM_DYLIB but not with
# LLVM_LINK_LLVM_DYLIB and not LLVM_BUILD_SHARED_LIBS (which
# upstream doesn't recommend using), then llvm-config will lie to
# you about how to do shared-linking. It wants to link to a a bunch
# of individual shared libs (which don't exist because llvm wasn't
# built with LLVM_BUILD_SHARED_LIBS.
#
# Therefore, we'll try to get the libfiles, if the return code is 0
# or we get an empty list, then we'll try to build a working
# configuration by hand.
try:
self.__check_libfiles(True)
except DependencyException:
lib_ext = get_shared_library_suffix(environment, self.for_machine)
libdir = self.get_config_value(['--libdir'], 'link_args')[0]
# Sort for reproducibility
matches = sorted(glob.iglob(os.path.join(libdir, f'libLLVM*{lib_ext}')))
if not matches:
if self.required:
raise
self.is_found = False
return
self.link_args = self.get_config_value(['--ldflags'], 'link_args')
libname = os.path.basename(matches[0]).rstrip(lib_ext).lstrip('lib')
self.link_args.append(f'-l{libname}')
return
elif self.static and mode == 'shared':
# If, however LLVM_BUILD_SHARED_LIBS is true # (*cough* gentoo *cough*)
# then this is correct. Building with LLVM_BUILD_SHARED_LIBS has a side
# effect, it stops the generation of static archives. Therefore we need
# to check for that and error out on static if this is the case
try:
self.__check_libfiles(False)
except DependencyException:
if self.required:
raise
self.is_found = False
return
link_args = ['--link-static', '--system-libs'] if self.static else ['--link-shared']
self.link_args = self.get_config_value(
['--libs', '--ldflags'] + link_args + list(self.required_modules),
'link_args')
def _set_old_link_args(self):
"""Setting linker args for older versions of llvm.
Old versions of LLVM bring an extra level of insanity with them.
llvm-config will provide the correct arguments for static linking, but
not for shared-linnking, we have to figure those out ourselves, because
of course we do.
"""
if self.static:
self.link_args = self.get_config_value(
['--libs', '--ldflags', '--system-libs'] + list(self.required_modules),
'link_args')
else:
# llvm-config will provide arguments for static linking, so we get
# to figure out for ourselves what to link with. We'll do that by
# checking in the directory provided by --libdir for a library
# called libLLVM-<ver>.(so|dylib|dll)
libdir = self.get_config_value(['--libdir'], 'link_args')[0]
expected_name = f'libLLVM-{self.version}'
re_name = re.compile(fr'{expected_name}.(so|dll|dylib)$')
for file_ in os.listdir(libdir):
if re_name.match(file_):
self.link_args = [f'-L{libdir}',
'-l{}'.format(os.path.splitext(file_.lstrip('lib'))[0])]
break
else:
raise DependencyException(
'Could not find a dynamically linkable library for LLVM.')
def check_components(self, modules, required=True):
"""Check for llvm components (modules in meson terms).
The required option is whether the module is required, not whether LLVM
is required.
"""
for mod in sorted(set(modules)):
status = ''
if mod not in self.provided_modules:
if required:
self.is_found = False
if self.required:
raise DependencyException(
f'Could not find required LLVM Component: {mod}')
status = '(missing)'
else:
status = '(missing but optional)'
else:
self.required_modules.add(mod)
self.module_details.append(mod + status)
def log_details(self):
if self.module_details:
return 'modules: ' + ', '.join(self.module_details)
return ''
class LLVMDependencyCMake(CMakeDependency):
def __init__(self, name: str, env, kwargs):
self.llvm_modules = stringlistify(extract_as_list(kwargs, 'modules'))
self.llvm_opt_modules = stringlistify(extract_as_list(kwargs, 'optional_modules'))
super().__init__(name, env, kwargs, language='cpp')
# Cmake will always create a statically linked binary, so don't use
# cmake if dynamic is required
if not self.static:
self.is_found = False
mlog.warning('Ignoring LLVM CMake dependency because dynamic was requested')
return
if self.traceparser is None:
return
# Extract extra include directories and definitions
inc_dirs = self.traceparser.get_cmake_var('PACKAGE_INCLUDE_DIRS')
defs = self.traceparser.get_cmake_var('PACKAGE_DEFINITIONS')
# LLVM explicitly uses space-separated variables rather than semicolon lists
if len(defs) == 1:
defs = defs[0].split(' ')
temp = ['-I' + x for x in inc_dirs] + defs
self.compile_args += [x for x in temp if x not in self.compile_args]
if not self._add_sub_dependency(threads_factory(env, self.for_machine, {})):
self.is_found = False
return
def _main_cmake_file(self) -> str:
# Use a custom CMakeLists.txt for LLVM
return 'CMakeListsLLVM.txt'
def _extra_cmake_opts(self) -> T.List[str]:
return ['-DLLVM_MESON_MODULES={}'.format(';'.join(self.llvm_modules + self.llvm_opt_modules))]
def _map_module_list(self, modules: T.List[T.Tuple[str, bool]], components: T.List[T.Tuple[str, bool]]) -> T.List[T.Tuple[str, bool]]:
res = []
for mod, required in modules:
cm_targets = self.traceparser.get_cmake_var(f'MESON_LLVM_TARGETS_{mod}')
if not cm_targets:
if required:
raise self._gen_exception(f'LLVM module {mod} was not found')
else:
mlog.warning('Optional LLVM module', mlog.bold(mod), 'was not found')
continue
for i in cm_targets:
res += [(i, required)]
return res
def _original_module_name(self, module: str) -> str:
orig_name = self.traceparser.get_cmake_var(f'MESON_TARGET_TO_LLVM_{module}')
if orig_name:
return orig_name[0]
return module
class ValgrindDependency(PkgConfigDependency):
'''
Consumers of Valgrind usually only need the compile args and do not want to
link to its (static) libraries.
'''
def __init__(self, env, kwargs):
super().__init__('valgrind', env, kwargs)
def get_link_args(self, **kwargs):
return []
class ZlibSystemDependency(ExternalDependency):
def __init__(self, name: str, environment: 'Environment', kwargs: T.Dict[str, T.Any]):
super().__init__(name, environment, kwargs)
m = self.env.machines[self.for_machine]
# I'm not sure this is entirely correct. What if we're cross compiling
# from something to macOS?
if ((m.is_darwin() and isinstance(self.clib_compiler, (AppleClangCCompiler, AppleClangCPPCompiler))) or
m.is_freebsd() or m.is_dragonflybsd()):
self.is_found = True
self.link_args = ['-lz']
# No need to set includes,
# on macos xcode/clang will do that for us.
# on freebsd zlib.h is in /usr/include
elif m.is_windows():
if self.clib_compiler.get_argument_syntax() == 'msvc':
libs = ['zlib1' 'zlib']
else:
libs = ['z']
for lib in libs:
l = self.clib_compiler.find_library(lib, environment, [])
h = self.clib_compiler.has_header('zlib.h', '', environment, dependencies=[self])
if l and h[0]:
self.is_found = True
self.link_args = l
break
else:
return
else:
mlog.debug(f'Unsupported OS {m.system}')
return
v, _ = self.clib_compiler.get_define('ZLIB_VERSION', '#include <zlib.h>', self.env, [], [self])
self.version = v.strip('"')
@staticmethod
def get_methods():
return [DependencyMethods.SYSTEM]
class JDKSystemDependency(ExternalDependency):
def __init__(self, environment: 'Environment', kwargs: T.Dict[str, T.Any]):
super().__init__('jdk', environment, kwargs)
m = self.env.machines[self.for_machine]
if 'java' not in environment.coredata.compilers[self.for_machine]:
environment.detect_compiler_for('java', self.for_machine)
self.javac = environment.coredata.compilers[self.for_machine]['java']
self.version = self.javac.version
if 'version' in kwargs and not version_compare(self.version, kwargs['version']):
mlog.error(f'Incorrect JDK version found ({self.version}), wanted {kwargs["version"]}')
self.is_found = False
return
self.java_home = environment.properties[self.for_machine].get_java_home()
if not self.java_home:
self.java_home = pathlib.Path(shutil.which(self.javac.exelist[0])).resolve().parents[1]
platform_include_dir = self.__machine_info_to_platform_include_dir(m)
if platform_include_dir is None:
mlog.error("Could not find a JDK platform include directory for your OS, please open an issue or provide a pull request.")
self.is_found = False
return
java_home_include = self.java_home / 'include'
self.compile_args.append(f'-I{java_home_include}')
self.compile_args.append(f'-I{java_home_include / platform_include_dir}')
self.is_found = True
@staticmethod
def get_methods() -> T.List[DependencyMethods]:
return [DependencyMethods.SYSTEM]
@staticmethod
def __machine_info_to_platform_include_dir(m: 'MachineInfo') -> T.Optional[str]:
"""Translates the machine information to the platform-dependent include directory
When inspecting a JDK release tarball or $JAVA_HOME, inside the `include/` directory is a
platform dependent folder that must be on the target's include path in addition to the
parent `include/` directory.
"""
if m.is_linux():
return 'linux'
elif m.is_windows():
return 'win32'
elif m.is_darwin():
return 'darwin'
return None
llvm_factory = DependencyFactory(
'LLVM',
[DependencyMethods.CMAKE, DependencyMethods.CONFIG_TOOL],
cmake_class=LLVMDependencyCMake,
configtool_class=LLVMDependencyConfigTool,
)
gtest_factory = DependencyFactory(
'gtest',
[DependencyMethods.PKGCONFIG, DependencyMethods.SYSTEM],
pkgconfig_class=GTestDependencyPC,
system_class=GTestDependencySystem,
)
gmock_factory = DependencyFactory(
'gmock',
[DependencyMethods.PKGCONFIG, DependencyMethods.SYSTEM],
pkgconfig_class=GMockDependencyPC,
system_class=GMockDependencySystem,
)
zlib_factory = DependencyFactory(
'zlib',
[DependencyMethods.PKGCONFIG, DependencyMethods.CMAKE, DependencyMethods.SYSTEM],
cmake_name='ZLIB',
system_class=ZlibSystemDependency,
)
|
|
# Copyright 2014, 2017 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""LPAR, the EntryWrapper for LogicalPartition."""
from oslo_log import log as logging
import pypowervm.const as pc
from pypowervm.i18n import _
import pypowervm.util as u
import pypowervm.wrappers.base_partition as bp
import pypowervm.wrappers.entry_wrapper as ewrap
LOG = logging.getLogger(__name__)
_LPAR_MIG_STG_VIOS_DATA_STATUS = 'MigrationStorageViosDataStatus'
_LPAR_MIG_STG_VIOS_DATA_TIME = 'MigrationStorageViosDataTimestamp'
_LPAR_RR = 'RemoteRestartCapable'
_LPAR_SRR = 'SimplifiedRemoteRestartCapable'
_LPAR_HAS_DED_PROCS_FOR_MIG = 'HasDedicatedProcessorsForMigration'
_LPAR_SUSPEND_CAP = 'SuspendCapable'
_LPAR_MIG_DISABLE = 'MigrationDisable'
_LPAR_MIG_STATE = 'MigrationState'
_LPAR_RR_STATE = 'RemoteRestartState'
_LPAR_PRI_PGING_SVC_PART = 'PrimaryPagingServicePartition'
_LPAR_POWER_MGT_MODE = 'PowerManagementMode'
_LPAR_SEC_PGING_SVC_PART = 'SecondaryPagingServicePartition'
_LPAR_USES_HSL_OPTICONN = 'UsesHighSpeedLinkOpticonnect'
_LPAR_USES_VIRT_OPTICONN = 'UsesVirtualOpticonnect'
_LPAR_VFC_CLIENT_ADPTS = 'VirtualFibreChannelClientAdapters'
_LPAR_VSCSI_CLIENT_ADPTS = 'VirtualSCSIClientAdapters'
_LPAR_RESTRICTED_IO = 'IsRestrictedIOPartition'
_LPAR_STG_DEV_UDID = 'StorageDeviceUniqueDeviceID'
_LPAR_DES_IPL_SRC = 'DesignatedIPLSource'
_LPAR_DED_VNICS = 'DedicatedVirtualNICs'
_LPAR_BOOTLIST_INFO = 'BootListInformation'
_LPAR_EL_ORDER = bp.BP_EL_ORDER + (
_LPAR_MIG_STG_VIOS_DATA_STATUS, _LPAR_MIG_STG_VIOS_DATA_TIME, _LPAR_RR,
_LPAR_SRR, _LPAR_HAS_DED_PROCS_FOR_MIG, _LPAR_SUSPEND_CAP,
_LPAR_MIG_DISABLE, _LPAR_MIG_STATE, _LPAR_RR_STATE,
_LPAR_PRI_PGING_SVC_PART, _LPAR_POWER_MGT_MODE, _LPAR_SEC_PGING_SVC_PART,
_LPAR_USES_HSL_OPTICONN, _LPAR_USES_VIRT_OPTICONN, _LPAR_VFC_CLIENT_ADPTS,
_LPAR_VSCSI_CLIENT_ADPTS, _LPAR_RESTRICTED_IO, _LPAR_STG_DEV_UDID,
_LPAR_DES_IPL_SRC, _LPAR_DED_VNICS, _LPAR_BOOTLIST_INFO)
class IPLSrc(object):
"""Mirror of IPLSource.Enum (relevant to IBMi partitions only).
Valid values for:
- LPAR.desig_ipl_src
- 'iIPLsource' param in pypowervm.power.power_on.
Example usage:
- ilpar.desig_ipl_src = IPLSrc.C
ilpar.update()
- power_on(..., add_parms={IPLSrc.KEY: IPLSrc.A, ...})
"""
KEY = 'iIPLsource'
A = 'a'
B = 'b'
C = 'c'
D = 'd'
UNKNOWN = 'Unknown'
ALL_VALUES = (A, B, C, D, UNKNOWN)
class RRState(object):
"""Remote Restart states - mirror of PartitionRemoteRestart.Enum."""
INVALID = "Invalid"
RR_ABLE = "Remote_Restartable"
SRC_RRING = "Source_Remote_Restarting"
DEST_RRING = "Destination_Remote_Restarting"
REM_RESTARTED = "Remote_Restarted"
PROF_RESTORED = "Profile_Restored"
RES_STG_DEV_UPD_FAIL = "Reserved_Storage_Device_Update_Failed"
FORCED_SRC_RESTART = "Forced_Source_Side_Restart"
SRC_CLEANUP_FAIL = "Source_Side_Cleanup_Failed"
RES_STG_DEV_UPD_FAIL_W_OVRD = ("Reserved_Storage_Device_Update_Failed_With"
"_Override")
RR_ABLE_SUSPENDED = "Remote_Restartable_Suspended"
LOC_UPD_FAIL = "Local_Update_Failed"
PART_UPD = "Partial_Update"
STALE_DATA = "Stale_Data"
LOC_DATA_VALID = "Local_Data_Valid"
OUT_OF_SPACE = "Out_Of_Space"
LOC_DATA_INVALID = "Local_Data_Invalid"
DEST_RR_ED = "Destination_Remote_Restarted"
SRC_RRING_SUSPENDED = "Source_Remote_Restarting_Suspended"
LOC_STG_UPD_FAIL = "Local_Storage_Update_Failed"
PG_DEV_UPD_OVRD = "Page_Device_Update_Override"
class BootStorageType(object):
"""Enumeration of possible storage connection methods for devices."""
VSCSI = 'vscsi'
VFC = 'npiv'
UNKNOWN = 'Unknown'
ALL_VALUES = (VSCSI, VFC, UNKNOWN)
@ewrap.EntryWrapper.pvm_type('LogicalPartition',
child_order=_LPAR_EL_ORDER)
class LPAR(bp.BasePartition, ewrap.WrapperSetUUIDMixin):
@classmethod
def bld(cls, adapter, name, mem_cfg, proc_cfg, env=bp.LPARType.AIXLINUX,
io_cfg=None):
"""Creates an LPAR wrapper.
Thin wrapper around BasePartition._bld_base, defaulting env.
"""
return super(LPAR, cls)._bld_base(adapter, name, mem_cfg, proc_cfg,
env, io_cfg)
def _can_modify(self, dlpar_cap, cap_desc):
"""Checks to determine if the LPAR can be modified.
:param dlpar_cap: The appropriate DLPAR attribute to validate. Only
used if system is active.
:param cap_desc: A translated string indicating the DLPAR capability.
:return capable: True if HW can be added/removed. False otherwise.
:return reason: A translated message that will indicate why it was not
capable of modification. If capable is True, the
reason will be None.
"""
# If we are in the LPAR, we have access to the operating system type.
# If it is an OS400 type, then we can add/remove HW no matter what.
if self.env == bp.LPARType.OS400:
return True, None
return super(LPAR, self)._can_modify(dlpar_cap, cap_desc)
def can_lpm(self, host_w, migr_data=None):
"""Determines if a LPAR is ready for Live Partition Migration.
This check validates that the target system is capable of
handling the LPAR if the LPAR is an IBMi. It simply validates that
the LPAR has the essential capabilities in place for a LPM operation.
:param host_w: The host wrapper for the system.
:param migr_data: The dictionary of migration data for the target host.
If parameters are not passed in, will skip the check
and let the low levels surface related error.
The supported key today is:
- ibmi_lpar_mobility_capable: Boolean
TODO(IBM): add more destination checks here. Ex.
migrate an AIX or IBMi VM to a Linux only host.
:return capable: True if the LPAR is LPM capable. False otherwise.
:return reason: A translated message that will indicate why it was not
capable of LPM. If capable is True, the reason will
be None.
"""
# First check is the not activated state
if self.state != bp.LPARState.RUNNING:
return False, _("LPAR is not in an active state.")
if self.env == bp.LPARType.OS400:
# IBM i does not require RMC, but does need to check for target
# host and source host are capable for IBMi mobility and
# restricted I/O.
if migr_data is not None:
c = migr_data.get('ibmi_lpar_mobility_capable')
if c is not None and not c:
return False, _('Target system does not have the IBM i'
' LPAR Mobility Capability.')
if not self.restrictedio:
return False, _('IBM i LPAR does not have restricted I/O.')
if not host_w.get_capability('ibmi_lpar_mobility_capable'):
return False, _('Source system does not have the IBM i'
' LPAR Mobility Capability.')
elif self.rmc_state != bp.RMCState.ACTIVE:
return False, _('LPAR does not have an active RMC connection.')
if self.is_mgmt_partition:
return False, _('LPAR is the management partition')
c = self.capabilities
if not (c.mem_dlpar and c.proc_dlpar):
return False, _('LPAR is not available for LPM due to missing '
'DLPAR capabilities.')
return True, None
@property
def migration_state(self):
"""See PartitionMigrationStateEnum.
e.g. 'Not_Migrating', 'Migration_Starting', 'Migration_Failed', etc.
Defaults to 'Not_Migrating'
"""
return self._get_val_str(_LPAR_MIG_STATE, 'Not_Migrating')
@property
def rr_enabled(self):
"""Deprecated (n/a for NovaLink) - use srr_enabled instead."""
import warnings
warnings.warn(_("This is not the property you are looking for. Use "
"srr_enabled in a NovaLink environment."),
DeprecationWarning)
return None
@rr_enabled.setter
def rr_enabled(self, value):
"""Deprecated (n/a for NovaLink) - use srr_enabled instead."""
import warnings
warnings.warn(_("This is not the property you are looking for. Use "
"srr_enabled in a NovaLink environment."),
DeprecationWarning)
@property
def rr_state(self):
"""Deprecated (n/a for NovaLink) - use srr_enabled instead."""
import warnings
warnings.warn(_("This is not the property you are looking for. Use "
"srr_enabled in a NovaLink environment."),
DeprecationWarning)
return None
@property
def srr_enabled(self):
"""Simplied remote restart.
:returns: Returns SRR config boolean
"""
return self._get_val_bool(_LPAR_SRR, False)
@srr_enabled.setter
def srr_enabled(self, value):
self.set_parm_value(_LPAR_SRR, u.sanitize_bool_for_api(value),
attrib=pc.ATTR_KSV120)
@property
def restrictedio(self):
return self._get_val_bool(_LPAR_RESTRICTED_IO, False)
@restrictedio.setter
def restrictedio(self, value):
self.set_parm_value(_LPAR_RESTRICTED_IO,
u.sanitize_bool_for_api(value))
@property
def desig_ipl_src(self):
"""Designated IPL Source - see IPLSrc enumeration."""
return self._get_val_str(_LPAR_DES_IPL_SRC)
@desig_ipl_src.setter
def desig_ipl_src(self, value):
"""Designated IPL Source - see IPLSrc enumeration."""
if value not in IPLSrc.ALL_VALUES:
raise ValueError(_("Invalid IPLSrc '%s'.") % value)
self.set_parm_value(_LPAR_DES_IPL_SRC, value)
def set_uuid(self, value):
# LPAR uuids must be uppercase.
up_uuid = str(value).upper()
super(LPAR, self).set_uuid(up_uuid)
self.set_parm_value(bp._BP_UUID, up_uuid)
|
|
# Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tests for swift.common.utils """
from __future__ import with_statement
from test.unit import temptree
import logging
import mimetools
import os
import errno
import socket
import sys
import time
import unittest
from getpass import getuser
from shutil import rmtree
from StringIO import StringIO
from functools import partial
from tempfile import TemporaryFile, NamedTemporaryFile
from eventlet import sleep
from swift.common.exceptions import Timeout, MessageTimeout, \
ConnectionTimeout
from swift.common import utils
class MockOs():
def __init__(self, pass_funcs=[], called_funcs=[], raise_funcs=[]):
self.closed_fds = []
for func in pass_funcs:
setattr(self, func, self.pass_func)
self.called_funcs = {}
for func in called_funcs:
c_func = partial(self.called_func, func)
setattr(self, func, c_func)
for func in raise_funcs:
r_func = partial(self.raise_func, func)
setattr(self, func, r_func)
def pass_func(self, *args, **kwargs):
pass
chdir = setsid = setgid = setuid = umask = pass_func
def called_func(self, name, *args, **kwargs):
self.called_funcs[name] = True
def raise_func(self, name, *args, **kwargs):
self.called_funcs[name] = True
raise OSError()
def dup2(self, source, target):
self.closed_fds.append(target)
def __getattr__(self, name):
# I only over-ride portions of the os module
try:
return object.__getattr__(self, name)
except AttributeError:
return getattr(os, name)
class MockSys():
def __init__(self):
self.stdin = TemporaryFile('w')
self.stdout = TemporaryFile('r')
self.stderr = TemporaryFile('r')
self.__stderr__ = self.stderr
self.stdio_fds = [self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()]
def reset_loggers():
if hasattr(utils.get_logger, 'handler4logger'):
for logger, handler in utils.get_logger.handler4logger.items():
logger.removeHandler(handler)
delattr(utils.get_logger, 'handler4logger')
if hasattr(utils.get_logger, 'console_handler4logger'):
for logger, h in utils.get_logger.console_handler4logger.items():
logger.removeHandler(h)
delattr(utils.get_logger, 'console_handler4logger')
class TestUtils(unittest.TestCase):
""" Tests for swift.common.utils """
def setUp(self):
utils.HASH_PATH_SUFFIX = 'endcap'
def test_normalize_timestamp(self):
""" Test swift.common.utils.normalize_timestamp """
self.assertEquals(utils.normalize_timestamp('1253327593.48174'),
"1253327593.48174")
self.assertEquals(utils.normalize_timestamp(1253327593.48174),
"1253327593.48174")
self.assertEquals(utils.normalize_timestamp('1253327593.48'),
"1253327593.48000")
self.assertEquals(utils.normalize_timestamp(1253327593.48),
"1253327593.48000")
self.assertEquals(utils.normalize_timestamp('253327593.48'),
"0253327593.48000")
self.assertEquals(utils.normalize_timestamp(253327593.48),
"0253327593.48000")
self.assertEquals(utils.normalize_timestamp('1253327593'),
"1253327593.00000")
self.assertEquals(utils.normalize_timestamp(1253327593),
"1253327593.00000")
self.assertRaises(ValueError, utils.normalize_timestamp, '')
self.assertRaises(ValueError, utils.normalize_timestamp, 'abc')
def test_mkdirs(self):
testroot = os.path.join(os.path.dirname(__file__), 'mkdirs')
try:
os.unlink(testroot)
except Exception:
pass
rmtree(testroot, ignore_errors=1)
self.assert_(not os.path.exists(testroot))
utils.mkdirs(testroot)
self.assert_(os.path.exists(testroot))
utils.mkdirs(testroot)
self.assert_(os.path.exists(testroot))
rmtree(testroot, ignore_errors=1)
testdir = os.path.join(testroot, 'one/two/three')
self.assert_(not os.path.exists(testdir))
utils.mkdirs(testdir)
self.assert_(os.path.exists(testdir))
utils.mkdirs(testdir)
self.assert_(os.path.exists(testdir))
rmtree(testroot, ignore_errors=1)
open(testroot, 'wb').close()
self.assert_(not os.path.exists(testdir))
self.assertRaises(OSError, utils.mkdirs, testdir)
os.unlink(testroot)
def test_split_path(self):
""" Test swift.common.utils.split_account_path """
self.assertRaises(ValueError, utils.split_path, '')
self.assertRaises(ValueError, utils.split_path, '/')
self.assertRaises(ValueError, utils.split_path, '//')
self.assertEquals(utils.split_path('/a'), ['a'])
self.assertRaises(ValueError, utils.split_path, '//a')
self.assertEquals(utils.split_path('/a/'), ['a'])
self.assertRaises(ValueError, utils.split_path, '/a/c')
self.assertRaises(ValueError, utils.split_path, '//c')
self.assertRaises(ValueError, utils.split_path, '/a/c/')
self.assertRaises(ValueError, utils.split_path, '/a//')
self.assertRaises(ValueError, utils.split_path, '/a', 2)
self.assertRaises(ValueError, utils.split_path, '/a', 2, 3)
self.assertRaises(ValueError, utils.split_path, '/a', 2, 3, True)
self.assertEquals(utils.split_path('/a/c', 2), ['a', 'c'])
self.assertEquals(utils.split_path('/a/c/o', 3), ['a', 'c', 'o'])
self.assertRaises(ValueError, utils.split_path, '/a/c/o/r', 3, 3)
self.assertEquals(utils.split_path('/a/c/o/r', 3, 3, True),
['a', 'c', 'o/r'])
self.assertEquals(utils.split_path('/a/c', 2, 3, True),
['a', 'c', None])
self.assertRaises(ValueError, utils.split_path, '/a', 5, 4)
self.assertEquals(utils.split_path('/a/c/', 2), ['a', 'c'])
self.assertEquals(utils.split_path('/a/c/', 2, 3), ['a', 'c', ''])
try:
utils.split_path('o\nn e', 2)
except ValueError, err:
self.assertEquals(str(err), 'Invalid path: o%0An%20e')
try:
utils.split_path('o\nn e', 2, 3, True)
except ValueError, err:
self.assertEquals(str(err), 'Invalid path: o%0An%20e')
def test_NullLogger(self):
""" Test swift.common.utils.NullLogger """
sio = StringIO()
nl = utils.NullLogger()
nl.write('test')
self.assertEquals(sio.getvalue(), '')
def test_LoggerFileObject(self):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
sio = StringIO()
handler = logging.StreamHandler(sio)
logger = logging.getLogger()
logger.addHandler(handler)
lfo = utils.LoggerFileObject(logger)
print 'test1'
self.assertEquals(sio.getvalue(), '')
sys.stdout = lfo
print 'test2'
self.assertEquals(sio.getvalue(), 'STDOUT: test2\n')
sys.stderr = lfo
print >> sys.stderr, 'test4'
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n')
sys.stdout = orig_stdout
print 'test5'
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n')
print >> sys.stderr, 'test6'
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n'
'STDOUT: test6\n')
sys.stderr = orig_stderr
print 'test8'
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n'
'STDOUT: test6\n')
lfo.writelines(['a', 'b', 'c'])
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n'
'STDOUT: test6\nSTDOUT: a#012b#012c\n')
lfo.close()
lfo.write('d')
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n'
'STDOUT: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n')
lfo.flush()
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n'
'STDOUT: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n')
got_exc = False
try:
for line in lfo:
pass
except Exception:
got_exc = True
self.assert_(got_exc)
got_exc = False
try:
for line in lfo.xreadlines():
pass
except Exception:
got_exc = True
self.assert_(got_exc)
self.assertRaises(IOError, lfo.read)
self.assertRaises(IOError, lfo.read, 1024)
self.assertRaises(IOError, lfo.readline)
self.assertRaises(IOError, lfo.readline, 1024)
lfo.tell()
def test_parse_options(self):
# use mkstemp to get a file that is definately on disk
with NamedTemporaryFile() as f:
conf_file = f.name
conf, options = utils.parse_options(test_args=[conf_file])
self.assertEquals(conf, conf_file)
# assert defaults
self.assertEquals(options['verbose'], False)
self.assert_('once' not in options)
# assert verbose as option
conf, options = utils.parse_options(test_args=[conf_file, '-v'])
self.assertEquals(options['verbose'], True)
# check once option
conf, options = utils.parse_options(test_args=[conf_file],
once=True)
self.assertEquals(options['once'], False)
test_args = [conf_file, '--once']
conf, options = utils.parse_options(test_args=test_args, once=True)
self.assertEquals(options['once'], True)
# check options as arg parsing
test_args = [conf_file, 'once', 'plugin_name', 'verbose']
conf, options = utils.parse_options(test_args=test_args, once=True)
self.assertEquals(options['verbose'], True)
self.assertEquals(options['once'], True)
self.assertEquals(options['extra_args'], ['plugin_name'])
def test_parse_options_errors(self):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
stdo = StringIO()
stde = StringIO()
utils.sys.stdout = stdo
utils.sys.stderr = stde
self.assertRaises(SystemExit, utils.parse_options, once=True,
test_args=[])
self.assert_('missing config file' in stdo.getvalue())
# verify conf file must exist, context manager will delete temp file
with NamedTemporaryFile() as f:
conf_file = f.name
self.assertRaises(SystemExit, utils.parse_options, once=True,
test_args=[conf_file])
self.assert_('unable to locate' in stdo.getvalue())
# reset stdio
utils.sys.stdout = orig_stdout
utils.sys.stderr = orig_stderr
def test_get_logger(self):
sio = StringIO()
logger = logging.getLogger('server')
logger.addHandler(logging.StreamHandler(sio))
logger = utils.get_logger(None, 'server', log_route='server')
logger.warn('test1')
self.assertEquals(sio.getvalue(), 'test1\n')
logger.debug('test2')
self.assertEquals(sio.getvalue(), 'test1\n')
logger = utils.get_logger({'log_level': 'DEBUG'}, 'server',
log_route='server')
logger.debug('test3')
self.assertEquals(sio.getvalue(), 'test1\ntest3\n')
# Doesn't really test that the log facility is truly being used all the
# way to syslog; but exercises the code.
logger = utils.get_logger({'log_facility': 'LOG_LOCAL3'}, 'server',
log_route='server')
logger.warn('test4')
self.assertEquals(sio.getvalue(),
'test1\ntest3\ntest4\n')
# make sure debug doesn't log by default
logger.debug('test5')
self.assertEquals(sio.getvalue(),
'test1\ntest3\ntest4\n')
# make sure notice lvl logs by default
logger.notice('test6')
def test_clean_logger_exception(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
logger.logger.addHandler(handler)
def strip_value(sio):
v = sio.getvalue()
sio.truncate(0)
return v
def log_exception(exc):
try:
raise exc
except (Exception, Timeout):
logger.exception('blah')
try:
# establish base case
self.assertEquals(strip_value(sio), '')
logger.info('test')
self.assertEquals(strip_value(sio), 'test\n')
self.assertEquals(strip_value(sio), '')
logger.info('test')
logger.info('test')
self.assertEquals(strip_value(sio), 'test\ntest\n')
self.assertEquals(strip_value(sio), '')
# test OSError
for en in (errno.EIO, errno.ENOSPC):
log_exception(OSError(en, 'my %s error message' % en))
log_msg = strip_value(sio)
self.assert_('Traceback' not in log_msg)
self.assert_('my %s error message' % en in log_msg)
# unfiltered
log_exception(OSError())
self.assert_('Traceback' in strip_value(sio))
# test socket.error
log_exception(socket.error(errno.ECONNREFUSED,
'my error message'))
log_msg = strip_value(sio)
self.assert_('Traceback' not in log_msg)
self.assert_('errno.ECONNREFUSED message test' not in log_msg)
self.assert_('Connection refused' in log_msg)
log_exception(socket.error(errno.EHOSTUNREACH,
'my error message'))
log_msg = strip_value(sio)
self.assert_('Traceback' not in log_msg)
self.assert_('my error message' not in log_msg)
self.assert_('Host unreachable' in log_msg)
log_exception(socket.error(errno.ETIMEDOUT, 'my error message'))
log_msg = strip_value(sio)
self.assert_('Traceback' not in log_msg)
self.assert_('my error message' not in log_msg)
self.assert_('Connection timeout' in log_msg)
# unfiltered
log_exception(socket.error(0, 'my error message'))
log_msg = strip_value(sio)
self.assert_('Traceback' in log_msg)
self.assert_('my error message' in log_msg)
# test eventlet.Timeout
log_exception(ConnectionTimeout(42, 'my error message'))
log_msg = strip_value(sio)
self.assert_('Traceback' not in log_msg)
self.assert_('ConnectionTimeout' in log_msg)
self.assert_('(42s)' in log_msg)
self.assert_('my error message' not in log_msg)
log_exception(MessageTimeout(42, 'my error message'))
log_msg = strip_value(sio)
self.assert_('Traceback' not in log_msg)
self.assert_('MessageTimeout' in log_msg)
self.assert_('(42s)' in log_msg)
self.assert_('my error message' in log_msg)
# test unhandled
log_exception(Exception('my error message'))
log_msg = strip_value(sio)
self.assert_('Traceback' in log_msg)
self.assert_('my error message' in log_msg)
finally:
logger.logger.removeHandler(handler)
reset_loggers()
def test_swift_log_formatter(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
handler.setFormatter(utils.SwiftLogFormatter())
logger.logger.addHandler(handler)
def strip_value(sio):
v = sio.getvalue()
sio.truncate(0)
return v
try:
self.assertFalse(logger.txn_id)
logger.error('my error message')
log_msg = strip_value(sio)
self.assert_('my error message' in log_msg)
self.assert_('txn' not in log_msg)
logger.txn_id = '12345'
logger.error('test')
log_msg = strip_value(sio)
self.assert_('txn' in log_msg)
self.assert_('12345' in log_msg)
# test no txn on info message
self.assertEquals(logger.txn_id, '12345')
logger.info('test')
log_msg = strip_value(sio)
self.assert_('txn' not in log_msg)
self.assert_('12345' not in log_msg)
# test txn already in message
self.assertEquals(logger.txn_id, '12345')
logger.warn('test 12345 test')
self.assertEquals(strip_value(sio), 'test 12345 test\n')
# test client_ip
self.assertFalse(logger.client_ip)
logger.error('my error message')
log_msg = strip_value(sio)
self.assert_('my error message' in log_msg)
self.assert_('client_ip' not in log_msg)
logger.client_ip = '1.2.3.4'
logger.error('test')
log_msg = strip_value(sio)
self.assert_('client_ip' in log_msg)
self.assert_('1.2.3.4' in log_msg)
# test no client_ip on info message
self.assertEquals(logger.client_ip, '1.2.3.4')
logger.info('test')
log_msg = strip_value(sio)
self.assert_('client_ip' not in log_msg)
self.assert_('1.2.3.4' not in log_msg)
# test client_ip (and txn) already in message
self.assertEquals(logger.client_ip, '1.2.3.4')
logger.warn('test 1.2.3.4 test 12345')
self.assertEquals(strip_value(sio), 'test 1.2.3.4 test 12345\n')
finally:
logger.logger.removeHandler(handler)
reset_loggers()
def test_storage_directory(self):
self.assertEquals(utils.storage_directory('objects', '1', 'ABCDEF'),
'objects/1/DEF/ABCDEF')
def test_whataremyips(self):
myips = utils.whataremyips()
self.assert_(len(myips) > 1)
self.assert_('127.0.0.1' in myips)
def test_hash_path(self):
# Yes, these tests are deliberately very fragile. We want to make sure
# that if someones changes the results hash_path produces, they know it
self.assertEquals(utils.hash_path('a'),
'1c84525acb02107ea475dcd3d09c2c58')
self.assertEquals(utils.hash_path('a', 'c'),
'33379ecb053aa5c9e356c68997cbb59e')
self.assertEquals(utils.hash_path('a', 'c', 'o'),
'06fbf0b514e5199dfc4e00f42eb5ea83')
self.assertEquals(utils.hash_path('a', 'c', 'o', raw_digest=False),
'06fbf0b514e5199dfc4e00f42eb5ea83')
self.assertEquals(utils.hash_path('a', 'c', 'o', raw_digest=True),
'\x06\xfb\xf0\xb5\x14\xe5\x19\x9d\xfcN\x00\xf4.\xb5\xea\x83')
self.assertRaises(ValueError, utils.hash_path, 'a', object='o')
def test_load_libc_function(self):
self.assert_(callable(
utils.load_libc_function('printf')))
self.assert_(callable(
utils.load_libc_function('some_not_real_function')))
def test_readconf(self):
conf = '''[section1]
foo = bar
[section2]
log_name = yarr'''
# setup a real file
with open('/tmp/test', 'wb') as f:
f.write(conf)
make_filename = lambda: '/tmp/test'
# setup a file stream
make_fp = lambda: StringIO(conf)
for conf_object_maker in (make_filename, make_fp):
conffile = conf_object_maker()
result = utils.readconf(conffile)
expected = {'__file__': conffile,
'log_name': None,
'section1': {'foo': 'bar'},
'section2': {'log_name': 'yarr'}}
self.assertEquals(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1')
expected = {'__file__': conffile, 'log_name': 'section1',
'foo': 'bar'}
self.assertEquals(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile,
'section2').get('log_name')
expected = 'yarr'
self.assertEquals(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1',
log_name='foo').get('log_name')
expected = 'foo'
self.assertEquals(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1',
defaults={'bar': 'baz'})
expected = {'__file__': conffile, 'log_name': 'section1',
'foo': 'bar', 'bar': 'baz'}
self.assertEquals(result, expected)
self.assertRaises(SystemExit, utils.readconf, '/tmp/test', 'section3')
os.unlink('/tmp/test')
self.assertRaises(SystemExit, utils.readconf, '/tmp/test')
def test_readconf_raw(self):
conf = '''[section1]
foo = bar
[section2]
log_name = %(yarr)s'''
# setup a real file
with open('/tmp/test', 'wb') as f:
f.write(conf)
make_filename = lambda: '/tmp/test'
# setup a file stream
make_fp = lambda: StringIO(conf)
for conf_object_maker in (make_filename, make_fp):
conffile = conf_object_maker()
result = utils.readconf(conffile, raw=True)
expected = {'__file__': conffile,
'log_name': None,
'section1': {'foo': 'bar'},
'section2': {'log_name': '%(yarr)s'}}
self.assertEquals(result, expected)
os.unlink('/tmp/test')
self.assertRaises(SystemExit, utils.readconf, '/tmp/test')
def test_drop_privileges(self):
user = getuser()
# over-ride os with mock
required_func_calls = ('setgid', 'setuid', 'setsid', 'chdir', 'umask')
utils.os = MockOs(called_funcs=required_func_calls)
# exercise the code
utils.drop_privileges(user)
for func in required_func_calls:
self.assert_(utils.os.called_funcs[func])
# reset; test same args, OSError trying to get session leader
utils.os = MockOs(called_funcs=required_func_calls,
raise_funcs=('setsid',))
for func in required_func_calls:
self.assertFalse(utils.os.called_funcs.get(func, False))
utils.drop_privileges(user)
for func in required_func_calls:
self.assert_(utils.os.called_funcs[func])
def test_capture_stdio(self):
# stubs
logger = utils.get_logger(None, 'dummy')
# mock utils system modules
_orig_sys = utils.sys
_orig_os = utils.os
try:
utils.sys = MockSys()
utils.os = MockOs()
# basic test
utils.capture_stdio(logger)
self.assert_(utils.sys.excepthook is not None)
self.assertEquals(utils.os.closed_fds, utils.sys.stdio_fds)
self.assert_(isinstance(utils.sys.stdout, utils.LoggerFileObject))
self.assert_(isinstance(utils.sys.stderr, utils.LoggerFileObject))
# reset; test same args, but exc when trying to close stdio
utils.os = MockOs(raise_funcs=('dup2',))
utils.sys = MockSys()
# test unable to close stdio
utils.capture_stdio(logger)
self.assert_(utils.sys.excepthook is not None)
self.assertEquals(utils.os.closed_fds, [])
self.assert_(isinstance(utils.sys.stdout, utils.LoggerFileObject))
self.assert_(isinstance(utils.sys.stderr, utils.LoggerFileObject))
# reset; test some other args
utils.os = MockOs()
utils.sys = MockSys()
logger = utils.get_logger(None, log_to_console=True)
# test console log
utils.capture_stdio(logger, capture_stdout=False,
capture_stderr=False)
self.assert_(utils.sys.excepthook is not None)
# when logging to console, stderr remains open
self.assertEquals(utils.os.closed_fds, utils.sys.stdio_fds[:2])
reset_loggers()
# stdio not captured
self.assertFalse(isinstance(utils.sys.stdout,
utils.LoggerFileObject))
self.assertFalse(isinstance(utils.sys.stderr,
utils.LoggerFileObject))
reset_loggers()
finally:
utils.sys = _orig_sys
utils.os = _orig_os
def test_get_logger_console(self):
reset_loggers()
logger = utils.get_logger(None)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertFalse(console_handlers)
logger = utils.get_logger(None, log_to_console=True)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assert_(console_handlers)
# make sure you can't have two console handlers
self.assertEquals(len(console_handlers), 1)
old_handler = console_handlers[0]
logger = utils.get_logger(None, log_to_console=True)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertEquals(len(console_handlers), 1)
new_handler = console_handlers[0]
self.assertNotEquals(new_handler, old_handler)
reset_loggers()
def test_ratelimit_sleep(self):
running_time = 0
start = time.time()
for i in range(100):
running_time = utils.ratelimit_sleep(running_time, 0)
self.assertTrue(abs((time.time() - start) * 100) < 1)
running_time = 0
start = time.time()
for i in range(50):
running_time = utils.ratelimit_sleep(running_time, 200)
# make sure its accurate to 10th of a second
self.assertTrue(abs(25 - (time.time() - start) * 100) < 10)
def test_ratelimit_sleep_with_incr(self):
running_time = 0
start = time.time()
vals = [5, 17, 0, 3, 11, 30,
40, 4, 13, 2, -1] * 2 # adds up to 250 (with no -1)
total = 0
for i in vals:
running_time = utils.ratelimit_sleep(running_time,
500, incr_by=i)
total += i
self.assertTrue(abs(50 - (time.time() - start) * 100) < 10)
def test_urlparse(self):
parsed = utils.urlparse('http://127.0.0.1/')
self.assertEquals(parsed.scheme, 'http')
self.assertEquals(parsed.hostname, '127.0.0.1')
self.assertEquals(parsed.path, '/')
parsed = utils.urlparse('http://127.0.0.1:8080/')
self.assertEquals(parsed.port, 8080)
parsed = utils.urlparse('https://127.0.0.1/')
self.assertEquals(parsed.scheme, 'https')
parsed = utils.urlparse('http://[::1]/')
self.assertEquals(parsed.hostname, '::1')
parsed = utils.urlparse('http://[::1]:8080/')
self.assertEquals(parsed.hostname, '::1')
self.assertEquals(parsed.port, 8080)
parsed = utils.urlparse('www.example.com')
self.assertEquals(parsed.hostname, '')
def test_ratelimit_sleep_with_sleep(self):
running_time = 0
start = time.time()
sleeps = [0] * 7 + [.2] * 3 + [0] * 30
for i in sleeps:
running_time = utils.ratelimit_sleep(running_time, 40,
rate_buffer=1)
time.sleep(i)
# make sure its accurate to 10th of a second
self.assertTrue(abs(100 - (time.time() - start) * 100) < 10)
def test_search_tree(self):
# file match & ext miss
with temptree(['asdf.conf', 'blarg.conf', 'asdf.cfg']) as t:
asdf = utils.search_tree(t, 'a*', '.conf')
self.assertEquals(len(asdf), 1)
self.assertEquals(asdf[0],
os.path.join(t, 'asdf.conf'))
# multi-file match & glob miss & sort
with temptree(['application.bin', 'apple.bin', 'apropos.bin']) as t:
app_bins = utils.search_tree(t, 'app*', 'bin')
self.assertEquals(len(app_bins), 2)
self.assertEquals(app_bins[0],
os.path.join(t, 'apple.bin'))
self.assertEquals(app_bins[1],
os.path.join(t, 'application.bin'))
# test file in folder & ext miss & glob miss
files = (
'sub/file1.ini',
'sub/file2.conf',
'sub.bin',
'bus.ini',
'bus/file3.ini',
)
with temptree(files) as t:
sub_ini = utils.search_tree(t, 'sub*', '.ini')
self.assertEquals(len(sub_ini), 1)
self.assertEquals(sub_ini[0],
os.path.join(t, 'sub/file1.ini'))
# test multi-file in folder & sub-folder & ext miss & glob miss
files = (
'folder_file.txt',
'folder/1.txt',
'folder/sub/2.txt',
'folder2/3.txt',
'Folder3/4.txt'
'folder.rc',
)
with temptree(files) as t:
folder_texts = utils.search_tree(t, 'folder*', '.txt')
self.assertEquals(len(folder_texts), 4)
f1 = os.path.join(t, 'folder_file.txt')
f2 = os.path.join(t, 'folder/1.txt')
f3 = os.path.join(t, 'folder/sub/2.txt')
f4 = os.path.join(t, 'folder2/3.txt')
for f in [f1, f2, f3, f4]:
self.assert_(f in folder_texts)
def test_write_file(self):
with temptree([]) as t:
file_name = os.path.join(t, 'test')
utils.write_file(file_name, 'test')
with open(file_name, 'r') as f:
contents = f.read()
self.assertEquals(contents, 'test')
# and also subdirs
file_name = os.path.join(t, 'subdir/test2')
utils.write_file(file_name, 'test2')
with open(file_name, 'r') as f:
contents = f.read()
self.assertEquals(contents, 'test2')
# but can't over-write files
file_name = os.path.join(t, 'subdir/test2/test3')
self.assertRaises(IOError, utils.write_file, file_name,
'test3')
def test_remove_file(self):
with temptree([]) as t:
file_name = os.path.join(t, 'blah.pid')
# assert no raise
self.assertEquals(os.path.exists(file_name), False)
self.assertEquals(utils.remove_file(file_name), None)
with open(file_name, 'w') as f:
f.write('1')
self.assert_(os.path.exists(file_name))
self.assertEquals(utils.remove_file(file_name), None)
self.assertFalse(os.path.exists(file_name))
def test_human_readable(self):
self.assertEquals(utils.human_readable(0), '0')
self.assertEquals(utils.human_readable(1), '1')
self.assertEquals(utils.human_readable(10), '10')
self.assertEquals(utils.human_readable(100), '100')
self.assertEquals(utils.human_readable(999), '999')
self.assertEquals(utils.human_readable(1024), '1Ki')
self.assertEquals(utils.human_readable(1535), '1Ki')
self.assertEquals(utils.human_readable(1536), '2Ki')
self.assertEquals(utils.human_readable(1047552), '1023Ki')
self.assertEquals(utils.human_readable(1048063), '1023Ki')
self.assertEquals(utils.human_readable(1048064), '1Mi')
self.assertEquals(utils.human_readable(1048576), '1Mi')
self.assertEquals(utils.human_readable(1073741824), '1Gi')
self.assertEquals(utils.human_readable(1099511627776), '1Ti')
self.assertEquals(utils.human_readable(1125899906842624), '1Pi')
self.assertEquals(utils.human_readable(1152921504606846976), '1Ei')
self.assertEquals(utils.human_readable(1180591620717411303424), '1Zi')
self.assertEquals(utils.human_readable(1208925819614629174706176),
'1Yi')
self.assertEquals(utils.human_readable(1237940039285380274899124224),
'1024Yi')
def test_validate_sync_to(self):
for goodurl in ('http://1.1.1.1/v1/a/c/o',
'http://1.1.1.1:8080/a/c/o',
'http://2.2.2.2/a/c/o',
'https://1.1.1.1/v1/a/c/o'):
self.assertEquals(utils.validate_sync_to(goodurl,
['1.1.1.1', '2.2.2.2']),
None)
for badurl in ('http://1.1.1.1',
'httpq://1.1.1.1/v1/a/c/o',
'http://1.1.1.1/v1/a/c/o?query',
'http://1.1.1.1/v1/a/c/o#frag',
'http://1.1.1.1/v1/a/c/o?query#frag',
'http://1.1.1.1/v1/a/c/o?query=param',
'http://1.1.1.1/v1/a/c/o?query=param#frag',
'http://1.1.1.2/v1/a/c/o'):
self.assertNotEquals(utils.validate_sync_to(badurl,
['1.1.1.1', '2.2.2.2']),
None)
def test_TRUE_VALUES(self):
for v in utils.TRUE_VALUES:
self.assertEquals(v, v.lower())
if __name__ == '__main__':
unittest.main()
|
|
import warnings
from itertools import chain
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from sklearn.utils import check_random_state
from sklearn.utils import column_or_1d
from sklearn.utils import deprecated
from sklearn.utils import gen_even_slices
from sklearn.utils import resample
from sklearn.utils import safe_indexing
from sklearn.utils import safe_mask
from sklearn.utils import shuffle
from sklearn.utils.arpack import eigsh
from sklearn.utils.extmath import pinvh
from sklearn.utils.graph import graph_laplacian
from sklearn.utils.mocking import MockDataFrame
from sklearn.utils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest, assert_raises_regex,
assert_greater_equal)
def test_make_rng():
# Check the check_random_state utility function behavior
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_resample_noarg():
# Border case not worth mentioning in doctests
assert_true(resample() is None)
def test_deprecated():
# Test whether the deprecated decorator issues appropriate warnings
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample_value_errors():
# Check that invalid arguments yield ValueError
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1], n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_arpack_eigsh_initialization():
# Non-regression test that shows null-space computation is better with
# initialization of eigsh from [-1,1] instead of [0,1]
random_state = check_random_state(42)
A = random_state.rand(50, 50)
A = np.dot(A.T, A) # create s.p.d. matrix
A = graph_laplacian(A) + 1e-7 * np.identity(A.shape[0])
k = 5
# Test if eigsh is working correctly
# New initialization [-1,1] (as in original ARPACK)
# Was [0,1] before, with which this test could fail
v0 = random_state.uniform(-1, 1, A.shape[0])
w, _ = eigsh(A, k=k, sigma=0.0, v0=v0)
# Eigenvalues of s.p.d. matrix should be nonnegative, w[0] is smallest
assert_greater_equal(w[0], 0)
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
# fun with read-only data in dataframes
# this happens in joblib memmapping
X.setflags(write=False)
X_df_readonly = pd.DataFrame(X)
with warnings.catch_warnings(record=True):
X_df_ro_indexed = safe_indexing(X_df_readonly, inds)
assert_array_equal(np.array(X_df_ro_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
def test_shuffle_dont_convert_to_array():
# Check that shuffle does not try to convert to numpy arrays with float
# dtypes can let any indexable datastructure pass-through.
a = ['a', 'b', 'c']
b = np.array(['a', 'b', 'c'], dtype=object)
c = [1, 2, 3]
d = MockDataFrame(np.array([['a', 0],
['b', 1],
['c', 2]],
dtype=object))
e = sp.csc_matrix(np.arange(6).reshape(3, 2))
a_s, b_s, c_s, d_s, e_s = shuffle(a, b, c, d, e, random_state=0)
assert_equal(a_s, ['c', 'b', 'a'])
assert_equal(type(a_s), list)
assert_array_equal(b_s, ['c', 'b', 'a'])
assert_equal(b_s.dtype, object)
assert_equal(c_s, [3, 2, 1])
assert_equal(type(c_s), list)
assert_array_equal(d_s, np.array([['c', 2],
['b', 1],
['a', 0]],
dtype=object))
assert_equal(type(d_s), MockDataFrame)
assert_array_equal(e_s.toarray(), np.array([[4, 5],
[2, 3],
[0, 1]]))
def test_gen_even_slices():
# check that gen_even_slices contains all samples
some_range = range(10)
joined_range = list(chain(*[some_range[slice] for slice in gen_even_slices(10, 3)]))
assert_array_equal(some_range, joined_range)
# check that passing negative n_chunks raises an error
slices = gen_even_slices(10, -1)
assert_raises_regex(ValueError, "gen_even_slices got n_packs=-1, must be"
" >=1", next, slices)
|
|
from typing import Any, Dict, List, Set, Tuple, Union
from collections import defaultdict
import datetime
import logging
import pytz
from django.conf import settings
from django.utils.timezone import now as timezone_now
from confirmation.models import one_click_unsubscribe_link
from zerver.lib.email_notifications import build_message_list
from zerver.lib.send_email import send_future_email, FromAddress
from zerver.lib.url_encoding import encode_stream
from zerver.models import UserProfile, Recipient, Subscription, UserActivity, \
get_active_streams, get_user_profile_by_id, Realm, Message, RealmAuditLog
from zerver.context_processors import common_context
from zerver.lib.queue import queue_json_publish
from zerver.lib.logging_util import log_to_file
logger = logging.getLogger(__name__)
log_to_file(logger, settings.DIGEST_LOG_PATH)
DIGEST_CUTOFF = 5
# Digests accumulate 2 types of interesting traffic for a user:
# 1. New streams
# 2. Interesting stream traffic, as determined by the longest and most
# diversely comment upon topics.
def inactive_since(user_profile: UserProfile, cutoff: datetime.datetime) -> bool:
# Hasn't used the app in the last DIGEST_CUTOFF (5) days.
most_recent_visit = [row.last_visit for row in
UserActivity.objects.filter(
user_profile=user_profile)]
if not most_recent_visit:
# This person has never used the app.
return True
last_visit = max(most_recent_visit)
return last_visit < cutoff
def should_process_digest(realm_str: str) -> bool:
if realm_str in settings.SYSTEM_ONLY_REALMS:
# Don't try to send emails to system-only realms
return False
return True
# Changes to this should also be reflected in
# zerver/worker/queue_processors.py:DigestWorker.consume()
def queue_digest_recipient(user_profile: UserProfile, cutoff: datetime.datetime) -> None:
# Convert cutoff to epoch seconds for transit.
event = {"user_profile_id": user_profile.id,
"cutoff": cutoff.strftime('%s')}
queue_json_publish("digest_emails", event)
def enqueue_emails(cutoff: datetime.datetime) -> None:
if not settings.SEND_DIGEST_EMAILS:
return
weekday = timezone_now().weekday()
for realm in Realm.objects.filter(deactivated=False, digest_emails_enabled=True, digest_weekday=weekday):
if not should_process_digest(realm.string_id):
continue
user_profiles = UserProfile.objects.filter(
realm=realm, is_active=True, is_bot=False, enable_digest_emails=True)
for user_profile in user_profiles:
if inactive_since(user_profile, cutoff):
queue_digest_recipient(user_profile, cutoff)
logger.info("%s is inactive, queuing for potential digest" % (
user_profile.email,))
def gather_hot_conversations(user_profile: UserProfile, messages: List[Message]) -> List[Dict[str, Any]]:
# Gather stream conversations of 2 types:
# 1. long conversations
# 2. conversations where many different people participated
#
# Returns a list of dictionaries containing the templating
# information for each hot conversation.
conversation_length = defaultdict(int) # type: Dict[Tuple[int, str], int]
conversation_messages = defaultdict(list) # type: Dict[Tuple[int, str], List[Message]]
conversation_diversity = defaultdict(set) # type: Dict[Tuple[int, str], Set[str]]
for message in messages:
key = (message.recipient.type_id,
message.topic_name())
conversation_messages[key].append(message)
if not message.sent_by_human():
# Don't include automated messages in the count.
continue
conversation_diversity[key].add(
message.sender.full_name)
conversation_length[key] += 1
diversity_list = list(conversation_diversity.items())
diversity_list.sort(key=lambda entry: len(entry[1]), reverse=True)
length_list = list(conversation_length.items())
length_list.sort(key=lambda entry: entry[1], reverse=True)
# Get up to the 4 best conversations from the diversity list
# and length list, filtering out overlapping conversations.
hot_conversations = [elt[0] for elt in diversity_list[:2]]
for candidate, _ in length_list:
if candidate not in hot_conversations:
hot_conversations.append(candidate)
if len(hot_conversations) >= 4:
break
# There was so much overlap between the diversity and length lists that we
# still have < 4 conversations. Try to use remaining diversity items to pad
# out the hot conversations.
num_convos = len(hot_conversations)
if num_convos < 4:
hot_conversations.extend([elt[0] for elt in diversity_list[num_convos:4]])
hot_conversation_render_payloads = []
for h in hot_conversations:
users = list(conversation_diversity[h])
count = conversation_length[h]
messages = conversation_messages[h]
# We'll display up to 2 messages from the conversation.
first_few_messages = messages[:2]
teaser_data = {"participants": users,
"count": count - len(first_few_messages),
"first_few_messages": build_message_list(
user_profile, first_few_messages)}
hot_conversation_render_payloads.append(teaser_data)
return hot_conversation_render_payloads
def gather_new_streams(user_profile: UserProfile,
threshold: datetime.datetime) -> Tuple[int, Dict[str, List[str]]]:
if user_profile.can_access_public_streams():
new_streams = list(get_active_streams(user_profile.realm).filter(
invite_only=False, date_created__gt=threshold))
else:
new_streams = []
base_url = "%s/#narrow/stream/" % (user_profile.realm.uri,)
streams_html = []
streams_plain = []
for stream in new_streams:
narrow_url = base_url + encode_stream(stream.id, stream.name)
stream_link = "<a href='%s'>%s</a>" % (narrow_url, stream.name)
streams_html.append(stream_link)
streams_plain.append(stream.name)
return len(new_streams), {"html": streams_html, "plain": streams_plain}
def enough_traffic(hot_conversations: str, new_streams: int) -> bool:
return bool(hot_conversations or new_streams)
def handle_digest_email(user_profile_id: int, cutoff: float,
render_to_web: bool = False) -> Union[None, Dict[str, Any]]:
user_profile = get_user_profile_by_id(user_profile_id)
# Convert from epoch seconds to a datetime object.
cutoff_date = datetime.datetime.fromtimestamp(int(cutoff), tz=pytz.utc)
context = common_context(user_profile)
# Start building email template data.
context.update({
'unsubscribe_link': one_click_unsubscribe_link(user_profile, "digest")
})
home_view_streams = Subscription.objects.filter(
user_profile=user_profile,
recipient__type=Recipient.STREAM,
active=True,
is_muted=False).values_list('recipient__type_id', flat=True)
if not user_profile.long_term_idle:
stream_ids = home_view_streams
else:
stream_ids = exclude_subscription_modified_streams(user_profile, home_view_streams, cutoff_date)
# Fetch list of all messages sent after cutoff_date where the user is subscribed
messages = Message.objects.filter(
recipient__type=Recipient.STREAM,
recipient__type_id__in=stream_ids,
pub_date__gt=cutoff_date).select_related('recipient', 'sender', 'sending_client')
# Gather hot conversations.
context["hot_conversations"] = gather_hot_conversations(
user_profile, messages)
# Gather new streams.
new_streams_count, new_streams = gather_new_streams(
user_profile, cutoff_date)
context["new_streams"] = new_streams
context["new_streams_count"] = new_streams_count
if render_to_web:
return context
# We don't want to send emails containing almost no information.
if enough_traffic(context["hot_conversations"], new_streams_count):
logger.info("Sending digest email for %s" % (user_profile.email,))
# Send now, as a ScheduledEmail
send_future_email('zerver/emails/digest', user_profile.realm, to_user_ids=[user_profile.id],
from_name="Zulip Digest", from_address=FromAddress.NOREPLY, context=context)
return None
def exclude_subscription_modified_streams(user_profile: UserProfile,
stream_ids: List[int],
cutoff_date: datetime.datetime) -> List[int]:
"""Exclude streams from given list where users' subscription was modified."""
events = [
RealmAuditLog.SUBSCRIPTION_CREATED,
RealmAuditLog.SUBSCRIPTION_ACTIVATED,
RealmAuditLog.SUBSCRIPTION_DEACTIVATED
]
# Streams where the user's subscription was changed
modified_streams = RealmAuditLog.objects.filter(
realm=user_profile.realm,
modified_user=user_profile,
event_time__gt=cutoff_date,
event_type__in=events).values_list('modified_stream_id', flat=True)
return list(set(stream_ids) - set(modified_streams))
|
|
# Application Scope Bucket
from datetime import datetime
import json
from kii import exceptions as exc, results as rs
from kii.data.clauses import (
Clause,
AllClause,
AndClause,
)
from kii.helpers import BucketsHelper
# Manage Buckets
class ManageBuckets(BucketsHelper):
def __init__(self, scope, bucket_id):
super().__init__(scope)
self.bucket_id = bucket_id
@property
def bucket_id(self):
return self._bucket_id
@bucket_id.setter
def bucket_id(self, bucket_id):
self._bucket_id = bucket_id
@property
def api_path(self):
return '/apps/{appID}/buckets/{bucketID}'.format(
appID=self.api.app_id,
bucketID=self.bucket_id)
@property
def headers(self):
headers = super().headers
if self.access_token:
headers['Authorization'] = self.authorization
return headers
class RetrieveABucket(ManageBuckets):
method = 'GET'
result_container = rs.BucketResult
class DeleteABucket(ManageBuckets):
method = 'DELETE'
result_container = rs.BaseResult
# Manage Objects
class CreateAnObject(BucketsHelper):
method = 'POST'
result_container = rs.CreateResult
def __init__(self, scope, data):
super().__init__(scope)
self.data = data
@property
def api_path(self):
return '/apps/{appID}/buckets/{bucketID}/objects'.format(
appID=self.api.app_id,
bucketID=self.bucket_id)
@property
def headers(self):
headers = super().headers
headers['Content-Type'] = 'application/json'
if self.access_token:
headers['Authorization'] = self.authorization
return headers
def request(self):
return super().request(json=self.data)
class RetrieveAnObject(BucketsHelper):
method = 'GET'
result_container = rs.ObjectResult
def __init__(self, scope, object_id):
super().__init__(scope)
self.object_id = object_id
@property
def api_path(self):
return '/apps/{appID}/buckets/{bucketID}/objects/{objectID}'.format(
appID=self.api.app_id,
bucketID=self.bucket_id,
objectID=self.object_id)
@property
def headers(self):
headers = super().headers
headers['Content-Type'] = 'application/json'
return headers
class FullyUpdateAnObject(BucketsHelper):
method = 'PUT'
result_container = rs.UpdateResult
def __init__(self, scope, object_id, data, *, if_match=None, if_none_match=None):
super().__init__(scope)
self.object_id = object_id
self.data = data
self.if_match = if_match
self.if_none_match = if_none_match
@property
def api_path(self):
return '/apps/{appID}/buckets/{bucketID}/objects/{objectID}'.format(
appID=self.api.app_id,
bucketID=self.bucket_id,
objectID=self.object_id)
@property
def headers(self):
headers = super().headers
headers['Content-Type'] = 'application/json'
if self.access_token:
headers['Authorization'] = self.authorization
if self.if_match:
headers['If-Match'] = self.if_match
if self.if_none_match:
headers['If-None-Match'] = self.if_none_match
return headers
def request(self):
return super().request(json=self.data)
class CreateANewObjectWithAnID(FullyUpdateAnObject):
"""
synonym of FullyUpdateAnObject
"""
class PartiallyUpdateAnObject(FullyUpdateAnObject):
method = 'POST'
@property
def headers(self):
headers = super().headers
headers['X-HTTP-Method-Override'] = 'PATCH'
headers['Content-Type'] = 'application/json'
if self.access_token:
headers['Authorization'] = self.authorization
if self.if_match:
headers['If-Match'] = self.if_match
if self.if_none_match:
headers['If-None-Match'] = self.if_none_match
return headers
class DeleteAnObject(BucketsHelper):
method = 'DELETE'
result_container = rs.DeleteResult
def __init__(self, scope, object_id, *, if_match=None, if_none_match=None):
super().__init__(scope)
self.object_id = object_id
self.if_match = if_match
self.if_none_match = if_none_match
@property
def api_path(self):
return '/apps/{appID}/buckets/{bucketID}/objects/{objectID}'.format(
appID=self.api.app_id,
bucketID=self.bucket_id,
objectID=self.object_id)
@property
def headers(self):
headers = super().headers
headers['Content-Type'] = 'application/json'
if self.access_token:
headers['Authorization'] = self.authorization
if self.if_match:
headers['If-Match'] = self.if_match
if self.if_none_match:
headers['If-None-Match'] = self.if_none_match
return headers
class QueryForObjects(BucketsHelper):
method = 'POST'
result_container = rs.QueryResult
def __init__(self, scope,
clause=None,
*,
order_by=None,
descending=None,
pagination_key=None,
best_effort_limit=None,
limit=None):
super().__init__(scope)
self.internal = False
if clause is None:
clause = AllClause()
self.clause = clause
self._order_by = order_by
self._descending = descending
self._pagination_key = pagination_key
self._best_effort_limit = best_effort_limit
self._limit = limit
self._offset = 0
self._aggregations = []
@property
def api_path(self):
return '/apps/{appID}/buckets/{bucketID}/query'.format(
appID=self.api.app_id,
bucketID=self.bucket_id)
@property
def headers(self):
headers = super().headers
headers['Content-Type'] = 'application/vnd.kii.QueryRequest+json'
return headers
@property
def clause(self):
return self._clause
@clause.setter
def clause(self, clause):
if not isinstance(clause, Clause):
raise exc.KiiInvalidClauseError
self._clause = clause
def clone(self):
instance = self.__class__(self.scope, self.clause)
instance._order_by = self._order_by
instance._descending = self._descending
instance._pagination_key = self._pagination_key
instance._best_effort_limit = self._best_effort_limit
instance._limit = self._limit
instance._offset = self._offset
return instance
def filter(self, *clauses):
instance = self.clone()
instance.clause = AndClause(instance.clause, *clauses)
return instance
def request(self):
return super().request(json=self._assemble())
def bucket_query(self):
query = {}
query['clause'] = self.clause.query()
if self._order_by is not None:
query['orderBy'] = self._order_by
if self._descending is not None:
query['descending'] = self._descending
if self._aggregations:
query['aggregations'] = self._aggregations
return query
def _assemble(self):
params = {}
query = self.bucket_query()
if query:
params['bucketQuery'] = query
if self._pagination_key:
params['paginationKey'] = self._pagination_key
if self._limit and self._best_effort_limit is None:
self._best_effort_limit = self._limit
if self._best_effort_limit:
params['bestEffortLimit'] = self._best_effort_limit + self._offset
return params
def all(self):
return self.request()
def count(self):
self.result_container = rs.QueryCountResult
self._aggregations = [
{
"type": "COUNT",
"putAggregationInto": "count_field"
}
]
result = self.request()
return result.count
def first(self):
results = self.request()
try:
return results[0]
except IndexError:
return None
def one(self):
results = self.request()
if len(results) > 1:
raise exc.KiiMultipleResultsFoundError
try:
return results[0]
except IndexError as e:
raise exc.KiiObjectNotFoundError from e
def offset(self, offset):
self._offset = offset
return self
def step(self, step):
self._step = step
return self
def best_effort_limit(self, best_effort_limit):
self._best_effort_limit = best_effort_limit
return self
def limit(self, limit):
self._limit = limit
return self
def order_by(self, key, descending=True):
self._order_by = key
self._descending = descending
return self
def pagination_key(self, pagination_key):
self._pagination_key = pagination_key
return self
def __str__(self):
headers = json.dumps(self.headers, ensure_ascii=False, indent=4, sort_keys=True)
query = json.dumps(self._assemble(), ensure_ascii=False, indent=4, sort_keys=True)
return '''\
[{method}] {url}
Headers:
{headers}
Query Request:
{query}'''.format(method=self.method,
url=self.url,
headers=headers,
query=query)
class RetrieveAnObjectBody(BucketsHelper):
method = 'GET'
result_container = rs.BodyResult
def __init__(self, scope, object_id, *,
if_match=None,
range=None):
super().__init__(scope)
self.object_id = object_id
self.if_match = if_match
# range is tuple or list. e.g.) [begin, end]
if range is not None and not isinstance(range, (list, tuple)):
raise exc.KiiInvalidTypeError
self.range = range
@property
def api_path(self):
return '/apps/{appID}/buckets/{bucketID}/objects/{objectID}/body'.format(
appID=self.api.app_id,
bucketID=self.bucket_id,
objectID=self.object_id)
@property
def headers(self):
headers = super().headers
headers['Accept'] = '*/*'
if self.access_token:
headers['Authorization'] = self.authorization
if self.if_match:
headers['If-Match'] = self.if_match
if self.range:
headers['Range'] = 'bytes={0}-{1}'.format(*self.range)
return headers
class AddOrReplaceAnObjectBody(BucketsHelper):
method = 'PUT'
result_container = rs.BaseResult
def __init__(self, scope, object_id, body, content_type, *,
if_match=None, if_none_match=None):
super().__init__(scope)
self.object_id = object_id
self.body = body
self.content_type = content_type
self.if_match = if_match
self.if_none_match = if_none_match
@property
def api_path(self):
return '/apps/{appID}/buckets/{bucketID}/objects/{objectID}/body'.format(
appID=self.api.app_id,
bucketID=self.bucket_id,
objectID=self.object_id)
@property
def headers(self):
headers = super().headers
headers['Content-Type'] = self.content_type
if self.access_token:
headers['Authorization'] = self.authorization
if self.if_match:
headers['If-Match'] = self.if_match
if self.if_none_match:
headers['If-None-Match'] = self.if_none_match
return headers
def request(self):
return super().request(data=self.body)
class VerifyTheObjectBodyExistence(BucketsHelper):
method = 'HEAD'
result_container = rs.ObjectResult
def __init__(self, scope, object_id):
super().__init__(scope)
self.object_id = object_id
@property
def api_path(self):
return '/apps/{appID}/buckets/{bucketID}/objects/{objectID}/body'.format(
appID=self.api.app_id,
bucketID=self.bucket_id,
objectID=self.object_id)
@property
def headers(self):
headers = super().headers
if self.access_token:
headers['Authorization'] = self.authorization
return headers
class DeleteAnObjectBody(BucketsHelper):
method = 'DELETE'
result_container = rs.ObjectResult
def __init__(self, scope, object_id):
super().__init__(scope)
self.object_id = object_id
@property
def api_path(self):
return '/apps/{appID}/buckets/{bucketID}/objects/{objectID}/body'.format(
appID=self.api.app_id,
bucketID=self.bucket_id,
objectID=self.object_id)
@property
def headers(self):
headers = super().headers
if self.access_token:
headers['Authorization'] = self.authorization
return headers
class PublishAnObjectBody(BucketsHelper):
method = 'POST'
result_container = rs.PublishBodyResult
def __init__(self, scope, object_id, *,
expires_at=None, expires_in=None):
"""
expires_at: The date in Unix epoch in milliseconds
when the publication URL should expire
expires_in: The period in seconds the publication URL
has to be available, after that it will expire
"""
super().__init__(scope)
self.object_id = object_id
self.expires_at = expires_at
self.expires_in = expires_in
@property
def api_path(self):
return '/apps/{appID}/buckets/{bucketID}/objects/{objectID}/body/publish'.format(
appID=self.api.app_id,
bucketID=self.bucket_id,
objectID=self.object_id)
@property
def headers(self):
headers = super().headers
headers['Content-Type'] = 'application/vnd.kii.ObjectBodyPublicationRequest+json'
if self.access_token:
headers['Authorization'] = self.authorization
return headers
def request(self):
data = {}
if self.expires_at is not None:
if not isinstance(self.expires_at, datetime):
raise exc.KiiInvalidExpirationError
expire = int(self.expires_at.timestamp() * 1000)
data['expiresAt'] = expire
if self.expires_in is not None:
data['expiresIn'] = self.expires_in
return super().request(json=data)
from .startuploadinganobjectbody import StartUploadingAnObjectBody # NOQA
from .gettheuploadmetadata import GetTheUploadMetadata # NOQA
from .uploadthegivenobjectdata import UploadTheGivenObjectData # NOQA
from .settheobjectbodyuploadstatustocommitted import SetTheObjectBodyUploadStatusToCommitted # NOQA
from .settheobjectbodyuploadstatustocancelled import SetTheObjectBodyUploadStatusToCancelled # NOQA
|
|
from KMeansBase import KMeansBase
from KMeansPP import KMeansPP
from ScalableKMeansPP import ScalableKMeansPP
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import pandas
def samplesize_initcost():
k = 10
sampleSizes = np.linspace(100000, 1000000, 10)
# sampleSizes = [1000, 2000, 3000, 4000, 5000, 6000]
kmeans_perf = []
kmeanspp_perf = []
kmeansscalble_perf = []
for n in sampleSizes:
data = np.random.randn(n, 2)
print('Sample Size:', n)
print('KMeans')
kmeans = KMeansBase(data, k)
kmeans_perf.append(kmeans.initCost() * 100)
print('KMeans++')
kmeans_pp = KMeansPP(data, k)
kmeanspp_perf.append(kmeans_pp.initCost() * 100)
print('Scalable KMeans++')
kmeans_scalable = ScalableKMeansPP(data, k, 4, 3)
kmeansscalble_perf.append(kmeans_scalable.initCost() * 100)
# plot
plt.figure(figsize=(10, 10))
plt.plot(sampleSizes, kmeans_perf, '-o', lw=3, markersize=10)
plt.plot(sampleSizes, kmeanspp_perf, '-o', lw=3, markersize=10)
plt.plot(sampleSizes, kmeansscalble_perf, '-o', lw=3, markersize=10)
plt.legend(('KMeans', 'KMeans++', 'Scalable KMeans++'), prop={'size': 18}, loc=0)
plt.xlabel('Sample Size', fontsize=18)
plt.ylabel('Initialization Cost (ms)', fontsize=18)
ax = plt.gca()
ax.xaxis.set_tick_params(labelsize=16)
ax.xaxis.get_major_formatter().set_powerlimits((0, 0))
ax.yaxis.set_tick_params(labelsize=16)
plt.savefig('samples-initcost.png')
plt.close()
def clusters_initcost():
n = 500000
clusters = np.linspace(10, 50, 5)
# sampleSizes = [1000, 2000, 3000, 4000, 5000, 6000]
kmeans_perf = []
kmeanspp_perf = []
kmeansscalble_perf = []
for k1 in clusters:
k = int(k1)
data = np.random.randn(n, 2)
print('k:', k)
print('KMeans')
kmeans = KMeansBase(data, k)
kmeans_perf.append(kmeans.initCost() * 100)
print('KMeans++')
kmeans_pp = KMeansPP(data, k)
kmeanspp_perf.append(kmeans_pp.initCost() * 100)
print('Scalable KMeans++')
kmeans_scalable = ScalableKMeansPP(data, k, 11, 5)
kmeansscalble_perf.append(kmeans_scalable.initCost() * 100)
# plot
plt.figure(figsize=(10, 10))
plt.plot(clusters, kmeans_perf, '-o', lw=3, markersize=10)
plt.plot(clusters, kmeanspp_perf, '-o', lw=3, markersize=10)
plt.plot(clusters, kmeansscalble_perf, '-o', lw=3, markersize=10)
plt.legend(('KMeans', 'KMeans++', 'Scalable KMeans++'), prop={'size': 18}, loc=0)
plt.xlabel('Number of Clusters (K)', fontsize=18)
plt.ylabel('Initialization Cost (ms)', fontsize=18)
ax = plt.gca()
ax.xaxis.set_tick_params(labelsize=16)
#ax.xaxis.get_major_formatter().set_powerlimits((0, 0))
ax.yaxis.set_tick_params(labelsize=16)
plt.savefig('k-initcost.png')
plt.close()
def no_of_iterations(n):
mean = [0, 1, 2]
cov = [[1, 0.5, 0.5], [0.5, 1, 0.5], [0.5, 0.5, 1]]
data0 = np.random.multivariate_normal(mean, cov, n)
data0 = np.hstack((data0, np.ones((data0.shape[0],1))))
mean1 = [6, 8, 9]
cov1 = [[1, 0.5, 0.5], [0.5, 1, 0.5], [0.5, 0.5, 1]]
data1 = np.random.multivariate_normal(mean1, cov1, n)
data1 = np.hstack((data1, np.ones((data1.shape[0],1)) * 2))
mean2 = [15, 18, 19]
cov2 = [[1, 0.5,0.5], [0.5, 1, 0.5], [0.5, 0.5, 1]]
data2 = np.random.multivariate_normal(mean2, cov2, n)
data2 = np.hstack((data2, np.ones((data2.shape[0],1)) * 3))
mean3 = [25, 26, 27]
cov3 = [[1, 0.5,0.5], [0.5, 1, 0.5], [0.5, 0.5, 1]]
data3 = np.random.multivariate_normal(mean3, cov3, n)
data3 = np.hstack((data3, np.ones((data3.shape[0],1)) * 4))
data = np.vstack((data0, data1, data2, data3))
np.random.shuffle(data)
print (data.shape)
return data
def itr_vs_sampels():
k = 4
r = 3
n_from_each_cluster = np.linspace(10000, 100000, 10)
kmeans_mean = []
kmeanspp_mean = []
kmeansscalable_mean1 = []
kmeansscalable_mean2 = []
kmeansscalable_mean3 = []
for e in n_from_each_cluster:
n = int(e)
kmeans_itr = []
kmeanspp_itr = []
kmeansscalable_itr1 = []
kmeansscalable_itr2 = []
kmeansscalable_itr3 = []
for i in range(20):
print ("n: ",n, ' i:', i)
data = no_of_iterations(n)
kmeans = KMeansBase(data[:,:4], k)
iterations, j_values, centroids, min_location = kmeans.cluster()
kmeans_itr.append(iterations)
kmeans = KMeansPP(data, k)
iterations, j_values, centroids, min_location = kmeans.cluster()
kmeanspp_itr.append(iterations)
kmeans = ScalableKMeansPP(data, k, 2, r)
iterations, j_values, centroids, min_location = kmeans.cluster()
kmeansscalable_itr1.append(iterations)
kmeans = ScalableKMeansPP(data, k, 4, r)
iterations, j_values, centroids, min_location = kmeans.cluster()
kmeansscalable_itr2.append(iterations)
kmeans = ScalableKMeansPP(data, k, 8, r)
iterations, j_values, centroids, min_location = kmeans.cluster()
kmeansscalable_itr3.append(iterations)
kmeans_mean.append(np.mean(kmeans_itr))
kmeanspp_mean.append(np.mean(kmeanspp_itr))
kmeansscalable_mean1.append(np.mean(kmeansscalable_itr1))
kmeansscalable_mean2.append(np.mean(kmeansscalable_itr2))
kmeansscalable_mean3.append(np.mean(kmeansscalable_itr3))
# plot
plt.figure(figsize=(10, 10))
plt.plot(n_from_each_cluster * 4, kmeans_mean, '-o', lw=3, markersize=10)
plt.plot(n_from_each_cluster * 4, kmeanspp_mean, '-o', lw=3, markersize=10)
plt.plot(n_from_each_cluster * 4, kmeansscalable_mean1, '-o', lw=3, markersize=10)
plt.plot(n_from_each_cluster * 4, kmeansscalable_mean2, '-o', lw=3, markersize=10)
plt.plot(n_from_each_cluster * 4, kmeansscalable_mean3, '-o', lw=3, markersize=10)
plt.legend(('KMeans', 'KMeans++', 'Scalable KMeans++ (l = 0.5k)', 'Scalable KMeans++ (l = 1k)', 'Scalable KMeans++ (l = 2k)'), prop={'size': 18}, loc=0)
plt.xlabel('Sample Size', fontsize=18)
plt.ylabel('Number of iterations', fontsize=18)
ax = plt.gca()
ax.xaxis.set_tick_params(labelsize=16)
#ax.xaxis.get_major_formatter().set_powerlimits((0, 0))
ax.yaxis.set_tick_params(labelsize=16)
plt.savefig('itr-samples.png')
plt.close()
def accuracy_1():
d = pandas.read_csv('../data/kddcup.data_10_percent_corrected')
d_clean = d[d.isnull().any(axis=1)==False]
data_full = d_clean.iloc[:,:].values
for col in [1,2,3,41]:
unique_labels = np.unique(data_full[:,col])
for label in range(len(unique_labels)):
data_full[np.where(data_full[:,col]==unique_labels[label])[0], col] = label
k = 23
r_count = 20000
data = data_full[:r_count,:41]
kmeans_ppv = []
kmeanspp_ppv = []
kmeansppscalable_ppv = []
kmeansppscalable1_ppv = []
kmeansppscalable2_ppv = []
for i in range (1):
print ('iteration: ', i)
kmeans = KMeansBase(data, k)
kmeans_ppv.append(gather_ppv(kmeans, data_full[:r_count,41]))
kmeans = KMeansPP(data, k)
kmeanspp_ppv.append(gather_ppv(kmeans, data_full[:r_count,41]))
kmeans = ScalableKMeansPP(data, k, 12, 3)
kmeansppscalable_ppv.append(gather_ppv(kmeans, data_full[:r_count,41]))
kmeans = ScalableKMeansPP(data, k, 23, 3)
kmeansppscalable1_ppv.append(gather_ppv(kmeans, data_full[:r_count,41]))
kmeans = ScalableKMeansPP(data, k, 46, 3)
kmeansppscalable2_ppv.append(gather_ppv(kmeans, data_full[:r_count,41]))
ppv = np.array((np.mean(kmeans_ppv), np.mean(kmeanspp_ppv),
np.mean(kmeansppscalable_ppv), np.mean(kmeansppscalable1_ppv), np.mean(kmeansppscalable2_ppv)))
std = np.array((np.std(kmeans_ppv), np.std(kmeanspp_ppv),
np.std(kmeansppscalable_ppv), np.std(kmeansppscalable1_ppv), np.std(kmeansppscalable2_ppv)))
ind = np.arange(len(ppv))
width = 0.35
plt.bar(ind, ppv, width=width, yerr=std, color='y')
plt.ylabel('Mean PPV')
plt.xlabel('Algorithm')
plt.xticks(ind+width/2., ('KMeans', 'KMeans++', 'Sc. KMeans++\n(l=0.5)', 'Sc. KMeans++\n(l=1)', 'Sc. KMeans++\n(l=2)'))
plt.savefig('ppv-exp.png')
plt.close()
def accuracy_spam():
d = pandas.read_csv('../data/spambase.data')
d_clean = d[d.isnull().any(axis=1)==False]
data_full = d_clean.iloc[:,:].values
k = 2
data = data_full[:,:57]
kmeans_ppv = []
kmeanspp_ppv = []
kmeansppscalable_ppv = []
kmeansppscalable1_ppv = []
kmeansppscalable2_ppv = []
for i in range (100):
print ('iteration: ', i)
kmeans = KMeansBase(data, k)
kmeans_ppv.append(gather_ppv(kmeans, data_full[:,57]))
kmeans = KMeansPP(data, k)
kmeanspp_ppv.append(gather_ppv(kmeans, data_full[:,57]))
kmeans = ScalableKMeansPP(data, k, 1, 3)
kmeansppscalable_ppv.append(gather_ppv(kmeans, data_full[:,57]))
kmeans = ScalableKMeansPP(data, k, 2, 3)
kmeansppscalable1_ppv.append(gather_ppv(kmeans, data_full[:,57]))
kmeans = ScalableKMeansPP(data, k, 4, 3)
kmeansppscalable2_ppv.append(gather_ppv(kmeans, data_full[:,57]))
ppv = np.array((np.mean(kmeans_ppv), np.mean(kmeanspp_ppv),
np.mean(kmeansppscalable_ppv), np.mean(kmeansppscalable1_ppv), np.mean(kmeansppscalable2_ppv)))
std = np.array((np.std(kmeans_ppv), np.std(kmeanspp_ppv),
np.std(kmeansppscalable_ppv), np.std(kmeansppscalable1_ppv), np.std(kmeansppscalable2_ppv)))
ind = np.arange(len(ppv))
width = 0.35
plt.bar(ind, ppv, width=width, yerr=std, color='y')
plt.ylabel('Mean PPV')
plt.xticks(ind+width/2., ('KMeans', 'KMeans++', 'Sc. KMeans++\n(l=0.5)', 'Sc. KMeans++\n(l=1)', 'Sc. KMeans++\n(l=2)'))
plt.savefig('ppv-exp.png')
plt.close()
def calc_ppv(cluster_assignment, initial_cluster_assignment):
cluster_index = []
for i in np.unique(initial_cluster_assignment):
cluster_index.append(np.where(initial_cluster_assignment == i))
assigned_cluster_index = []
for i in np.unique(cluster_assignment):
assigned_cluster_index.append(np.where(cluster_assignment == i))
correspondance = []
for index in cluster_index:
overlap = []
for assigned_i in assigned_cluster_index:
overlap.append(np.intersect1d(index, assigned_i).shape[0])
correspondance.append(np.argmax(overlap))
# now calculate the PPV
# get the true positives
ttp = 0
tfp = 0
for i in range(len(cluster_index)):
tp = np.intersect1d(cluster_index[i], assigned_cluster_index[correspondance[i]]).shape[0]
fp = len(cluster_index[i][0]) - tp
print ('**********************', tp, fp)
ttp += tp
tfp += fp
return ttp/float(ttp + tfp)
def gather_ppv(kmeans, initial_cluster_assignment):
iterations, j_values, centroids, min_location = kmeans.cluster()
cluster_assignment = np.argmax(min_location, axis=1)
return calc_ppv(cluster_assignment, initial_cluster_assignment)
def accuracy_synthetic():
k = 4
kmeans_ppv = []
kmeanspp_ppv = []
kmeansppscalable_ppv = []
kmeansppscalable1_ppv = []
kmeansppscalable2_ppv = []
for i in range (20):
data = no_of_iterations(100000)
kmeans = KMeansBase(data[:,:3], k)
kmeans_ppv.append(gather_ppv(kmeans, data[:,3]))
kmeans = KMeansPP(data[:,:3], k)
kmeanspp_ppv.append(gather_ppv(kmeans, data[:,3]))
kmeans = ScalableKMeansPP(data[:,:3], k, 2, 3)
kmeansppscalable_ppv.append(gather_ppv(kmeans, data[:,3]))
kmeans = ScalableKMeansPP(data[:,:3], k, 4, 3)
kmeansppscalable1_ppv.append(gather_ppv(kmeans, data[:,3]))
kmeans = ScalableKMeansPP(data[:,:3], k, 8, 3)
kmeansppscalable2_ppv.append(gather_ppv(kmeans, data[:,3]))
ppv = np.array((np.mean(kmeans_ppv), np.mean(kmeanspp_ppv),
np.mean(kmeansppscalable_ppv), np.mean(kmeansppscalable1_ppv), np.mean(kmeansppscalable2_ppv)))
std = np.array((np.std(kmeans_ppv), np.std(kmeanspp_ppv),
np.std(kmeansppscalable_ppv), np.std(kmeansppscalable1_ppv), np.std(kmeansppscalable2_ppv)))
ind = np.arange(len(ppv))
width = 0.35
plt.bar(ind, ppv, width=width, yerr=std, color='y')
plt.ylabel('Mean PPV')
plt.xticks(ind+width/2., ('KMeans', 'KMeans++', 'Sc. KMeans++\n(l=0.5)', 'Sc. KMeans++\n(l=1)', 'Sc. KMeans++\n(l=2)'))
plt.savefig('ppv.png')
plt.close()
if __name__ == '__main__':
#print(np.where(data[:,3] == (np.argmax(min_location, axis=1) + 1))[0].shape[0])
#samplesize_initcost()
#clusters_initcost()
#itr_vs_sampels()
#accuracy_synthetic()
accuracy_spam()
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image FeatureConnectors."""
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
class Image(tfds.features.Image):
"""Image `FeatureConnector`.
Unlike tfds.features.Image this class has the following advantages:
1. Support tf.uint16 images.
3. Stores the image size, channels and format in addition.
4. Supports rank2 image tensors of shape (H, W).
Example:
* In the DatasetInfo object:
features=features.FeaturesDict({
'image': features.Image(shape=(None, None, 3), dtype=tf.uint8),
})
* Internally stored as:
{
'image/encoded': 'encoded image string',
'image/width': image width,
'image/height': image height,
'image/channels': image channels,
'image/format': 'format string'
}
* During generation:
yield {
'image': np.ones(shape=(480, 640, 3), dtype=np.uint8),
}
* Decoding will return as dictionary of tensorflow tensors:
{
'image': tf.Tensor(shape=(480, 640, 3), dtype=tf.uint8)
}
"""
def __init__(self, shape=None, encoding_format='png', dtype=tf.uint8):
self._shape = tuple(shape) if shape is not None else (None, None, 3)
self._channels = self._shape[-1] if len(self._shape) > 2 else 0
self._dtype = dtype
encode_fn_map = {
'png': tf.image.encode_png,
'jpeg': tf.image.encode_jpeg,
}
supported = encode_fn_map.keys()
if encoding_format not in supported:
raise ValueError('`encoding_format` must be one of %s.' % supported)
self._encoding_format = encoding_format
self._encoding_fn = encode_fn_map[encoding_format]
def get_serialized_info(self):
return {
'encoded':
tfds.features.TensorInfo(shape=(), dtype=tf.string),
'height':
tfds.features.TensorInfo(
shape=(), dtype=tf.int64, default_value=-1),
'width':
tfds.features.TensorInfo(
shape=(), dtype=tf.int64, default_value=-1),
'channels':
tfds.features.TensorInfo(
shape=(), dtype=tf.int64, default_value=-1),
'format':
tfds.features.TensorInfo(
shape=(), dtype=tf.string, default_value='png'),
}
def encode_example(self, image_np):
encoded_image = self._encode_image(image_np)
return {
'encoded': encoded_image,
'height': image_np.shape[0],
'width': image_np.shape[1],
'channels': image_np.shape[2] if image_np.ndim == 3 else 0,
'format': self._encoding_format
}
def decode_example(self, example):
image = tf.image.decode_image(
example['encoded'], channels=None, dtype=self._dtype)
if self._channels == 0:
image = tf.squeeze(image, axis=-1)
image.set_shape(self._shape)
return image
def _encode_image(self, image_np):
"""Returns image_np encoded as jpeg or png."""
tfds.core.utils.assert_shape_match(image_np.shape, self._shape)
image_tf = tf.convert_to_tensor(image_np)
if image_np.ndim == 2:
image_tf = tf.expand_dims(image_tf, axis=2)
return self._encoding_fn(image_tf).numpy()
class Depth(Image):
"""Depth Image `FeatureConnector` for storing depth maps.
Given a floating point depth image, the encoder internally stores the depth
map as a uint16/uint8 PNG image (after scaling with a provided shift value).
During decoding the shift value is divided back to return a floating point
image. As expected this process is hidden from user, but depth map will loose
some accuracy because of the quantization.
Example:
* In the DatasetInfo object:
features=features.FeaturesDict({
'depth': features.Depth(shift=1000.0, dtype=tf.float32),
})
* Internally stored as:
{
'depth/encoded': 'encoded depth string',
'depth/width': image width,
'depth/height': image height,
'depth/channels': image channels,
'depth/format': 'format string'
'depth/shift': depth shift value.
}
* During generation:
yield {
'depth': np.random.uniform(high=5.0, size=(480, 640)).astype('f'),
}
* Decoding will return as dictionary of tensorflow tensors:
{
'depth': tf.Tensor(shape=(480, 640), dtype=tf.float32)
}
"""
def __init__(self,
height=None,
width=None,
shift=1000.0,
dtype=tf.float32,
encoding_dtype=tf.uint16):
if not dtype.is_floating:
raise ValueError('Requires floating point type but got %s.' % dtype)
super(Depth, self).__init__(
shape=(height, width), encoding_format='png', dtype=encoding_dtype)
self._shift = shift
self._target_dtype = dtype
self._encoding_dtype = encoding_dtype.as_numpy_dtype
def get_serialized_info(self):
serialized_info = super(Depth, self).get_serialized_info()
serialized_info.update({
'shift':
tfds.features.TensorInfo(
shape=(), dtype=tf.float32, default_value=1000.0)
})
return serialized_info
def encode_example(self, depth_np):
shifted_depth = (depth_np * self._shift).astype(self._encoding_dtype)
encoded = super(Depth, self).encode_example(shifted_depth)
encoded.update({'shift': self._shift})
return encoded
def decode_example(self, example):
shifted_depth = super(Depth, self).decode_example(example)
scale = tf.cast(1.0 / example['shift'], self._target_dtype)
return tf.cast(shifted_depth, self._target_dtype) * scale
class Normal(Image):
"""Normal Image `FeatureConnector` for storing normal maps.
Given a floating point normal image, the encoder internally stores the normal
image as a 3 channel uint16/uint8 PNG image. The dtype of the encoded image is
determined by the encoding_dtype argument.
Example:
* In the DatasetInfo object:
features=features.FeaturesDict({
'normal': features.Normal(dtype=tf.float32),
})
* Internally stored as:
{
'normal/encoded': 'encoded normal string',
'normal/width': image width,
'normal/height': image height,
'normal/channels': image channels,
'normal/format': 'format string'
}
* During generation:
yield {
'normal': np.random.uniform(high=1.0, size=(480,640, 3)).astype('f'),
}
* Decoding will return as dictionary of tensorflow tensors:
{
'normal': tf.Tensor(shape=(480, 640, 3), dtype=tf.float32)
}
"""
def __init__(self,
height=None,
width=None,
dtype=tf.float32,
encoding_dtype=tf.uint16):
if not dtype.is_floating:
raise ValueError('Requires floating point type but got %s.' % dtype)
super(Normal, self).__init__(
shape=(height, width, 3), encoding_format='png', dtype=encoding_dtype)
self._target_dtype = dtype
self._encoding_dtype = encoding_dtype.as_numpy_dtype
self._scale = np.iinfo(self._encoding_dtype).max / 2.0
def encode_example(self, normal_np):
normal_discrete = ((normal_np + 1.0) * self._scale).astype(
self._encoding_dtype)
return super(Normal, self).encode_example(normal_discrete)
def decode_example(self, example):
normal_discrete = super(Normal, self).decode_example(example)
normal = (tf.cast(normal_discrete, self._target_dtype) / self._scale) - 1.0
return normal
class Unary(Image):
"""Unary `FeatureConnector` for storing multiclass image unary maps.
This FeatureConnector is used to store multi-class probability maps (e.g.
image unary for semantic segmentation). The data is stored internally as a set
of PNG16 images.
Given a dense, per-pixel, multi-class unary (probability) map as a tensor of
shape (H, W, C), the encoder internally stores the unary as per channel uint16
PNG images (after scaling the valued for [0, 1] to [0, 65,535]).
Example:
* In the DatasetInfo object:
features=features.FeaturesDict({
'unary': Unary(dtype=tf.float32),
})
* Internally stored as:
{
'unary/encoded': ['class0 PNG string', 'class1 PNG string', ...]
'unary/width': unary width,
'unary/height': unary height,
'unary/channels': unary channels,
'unary/format': 'format string'
}
* During generation:
yield {
'unary': softmax(np.random.rand(480, 640, 10).astype('f'), axis=2),
}
* Decoding will return as dictionary of tensorflow tensors:
{
'unary': tf.Tensor(shape=(480, 640), dtype=tf.float32)
}
"""
def __init__(self, shape=(None, None, None), dtype=tf.float32):
if not dtype.is_floating:
raise ValueError('Requires floating point type but got %s.' % dtype)
super(Unary, self).__init__(
shape=shape, encoding_format='png', dtype=tf.uint16)
self._target_dtype = dtype
def get_serialized_info(self):
serialized_info = super(Unary, self).get_serialized_info()
serialized_info['encoded'] = tfds.features.TensorInfo(
shape=(None,), dtype=tf.string)
return serialized_info
def encode_example(self, unary_prob):
scale = np.iinfo(np.uint16).max
unary_scaled = (unary_prob * scale).astype(np.uint16)
channels = unary_prob.shape[2]
encoded = [self._encode_image(x) for x in np.dsplit(unary_scaled, channels)]
return {
'encoded': encoded,
'height': unary_prob.shape[0],
'width': unary_prob.shape[1],
'channels': unary_prob.shape[2],
'format': self._encoding_format
}
def decode_example(self, example):
enoded = example['encoded']
unary_slices = [
tf.squeeze(tf.image.decode_image(x, dtype=self._dtype), axis=2)
for x in enoded
]
unary = np.stack(unary_slices, axis=2)
scale = tf.cast(1.0 / np.iinfo(np.uint16).max, self._target_dtype)
unary = tf.cast(unary, self._target_dtype) * scale
unary.set_shape(self._shape)
return unary
|
|
#!/usr/bin/env python
r"""
Parallel assembling and solving of a Poisson's equation, using commands for
interactive use.
Find :math:`u` such that:
.. math::
\int_{\Omega} \nabla v \cdot \nabla u
= \int_{\Omega} v f
\;, \quad \forall s \;.
Important Notes
---------------
- This example requires petsc4py, mpi4py and (optionally) pymetis with their
dependencies installed!
- This example generates a number of files - do not use an existing non-empty
directory for the ``output_dir`` argument.
- Use the ``--clear`` option with care!
Notes
-----
- Each task is responsible for a subdomain consisting of a set of cells (a cell
region).
- Each subdomain owns PETSc DOFs within a consecutive range.
- When both global and task-local variables exist, the task-local
variables have ``_i`` suffix.
- This example does not use a nonlinear solver.
- This example can serve as a template for solving a linear single-field scalar
problem - just replace the equations in :func:`create_local_problem()`.
- The command line options are saved into <output_dir>/options.txt file.
Usage Examples
--------------
See all options::
$ python examples/diffusion/poisson_parallel_interactive.py -h
See PETSc options::
$ python examples/diffusion/poisson_parallel_interactive.py -help
Single process run useful for debugging with :func:`debug()
<sfepy.base.base.debug>`::
$ python examples/diffusion/poisson_parallel_interactive.py output-parallel
Parallel runs::
$ mpiexec -n 3 python examples/diffusion/poisson_parallel_interactive.py output-parallel -2 --shape=101,101
$ mpiexec -n 3 python examples/diffusion/poisson_parallel_interactive.py output-parallel -2 --shape=101,101 --metis
$ mpiexec -n 5 python examples/diffusion/poisson_parallel_interactive.py output-parallel -2 --shape=101,101 --verify --metis -ksp_monitor -ksp_converged_reason
View the results using::
$ python postproc.py output-parallel/sol.h5 --wireframe -b -d'u,plot_warp_scalar'
"""
from __future__ import absolute_import
from argparse import RawDescriptionHelpFormatter, ArgumentParser
import os
import sys
sys.path.append('.')
import csv
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.base.base import output, Struct
from sfepy.base.ioutils import ensure_path, remove_files_patterns, save_options
from sfepy.base.timing import Timer
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.discrete.common.region import Region
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.discrete.evaluate import apply_ebc_to_matrix
from sfepy.terms import Term
from sfepy.solvers.ls import PETScKrylovSolver
import sfepy.parallel.parallel as pl
import sfepy.parallel.plot_parallel_dofs as ppd
def create_local_problem(omega_gi, order):
"""
Local problem definition using a domain corresponding to the global region
`omega_gi`.
"""
mesh = omega_gi.domain.mesh
# All tasks have the whole mesh.
bbox = mesh.get_bounding_box()
min_x, max_x = bbox[:, 0]
eps_x = 1e-8 * (max_x - min_x)
mesh_i = Mesh.from_region(omega_gi, mesh, localize=True)
domain_i = FEDomain('domain_i', mesh_i)
omega_i = domain_i.create_region('Omega', 'all')
gamma1_i = domain_i.create_region('Gamma1',
'vertices in (x < %.10f)'
% (min_x + eps_x),
'facet', allow_empty=True)
gamma2_i = domain_i.create_region('Gamma2',
'vertices in (x > %.10f)'
% (max_x - eps_x),
'facet', allow_empty=True)
field_i = Field.from_args('fu', nm.float64, 1, omega_i,
approx_order=order)
output('number of local field DOFs:', field_i.n_nod)
u_i = FieldVariable('u_i', 'unknown', field_i)
v_i = FieldVariable('v_i', 'test', field_i, primary_var_name='u_i')
integral = Integral('i', order=2*order)
mat = Material('m', lam=10, mu=5)
t1 = Term.new('dw_laplace(m.lam, v_i, u_i)',
integral, omega_i, m=mat, v_i=v_i, u_i=u_i)
def _get_load(coors):
val = nm.ones_like(coors[:, 0])
for coor in coors.T:
val *= nm.sin(4 * nm.pi * coor)
return val
def get_load(ts, coors, mode=None, **kwargs):
if mode == 'qp':
return {'val' : _get_load(coors).reshape(coors.shape[0], 1, 1)}
load = Material('load', function=Function('get_load', get_load))
t2 = Term.new('dw_volume_lvf(load.val, v_i)',
integral, omega_i, load=load, v_i=v_i)
eq = Equation('balance', t1 - 100 * t2)
eqs = Equations([eq])
ebc1 = EssentialBC('ebc1', gamma1_i, {'u_i.all' : 0.0})
ebc2 = EssentialBC('ebc2', gamma2_i, {'u_i.all' : 0.1})
pb = Problem('problem_i', equations=eqs, active_only=False)
pb.time_update(ebcs=Conditions([ebc1, ebc2]))
pb.update_materials()
return pb
def verify_save_dof_maps(field, cell_tasks, dof_maps, id_map, options,
verbose=False):
vec = pl.verify_task_dof_maps(dof_maps, id_map, field, verbose=verbose)
order = options.order
mesh = field.domain.mesh
sfield = Field.from_args('aux', nm.float64, 'scalar', field.region,
approx_order=order)
aux = FieldVariable('aux', 'parameter', sfield,
primary_var_name='(set-to-None)')
out = aux.create_output(vec,
linearization=Struct(kind='adaptive',
min_level=order-1,
max_level=order-1,
eps=1e-8))
filename = os.path.join(options.output_dir,
'para-domains-dofs.h5')
if field.is_higher_order():
out['aux'].mesh.write(filename, out=out)
else:
mesh.write(filename, out=out)
out = Struct(name='cells', mode='cell',
data=cell_tasks[:, None, None, None])
filename = os.path.join(options.output_dir,
'para-domains-cells.h5')
mesh.write(filename, out={'cells' : out})
def solve_problem(mesh_filename, options, comm):
order = options.order
rank, size = comm.Get_rank(), comm.Get_size()
output('rank', rank, 'of', size)
stats = Struct()
timer = Timer('solve_timer')
timer.start()
mesh = Mesh.from_file(mesh_filename)
stats.t_read_mesh = timer.stop()
timer.start()
if rank == 0:
cell_tasks = pl.partition_mesh(mesh, size, use_metis=options.metis,
verbose=True)
else:
cell_tasks = None
stats.t_partition_mesh = timer.stop()
output('creating global domain and field...')
timer.start()
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field = Field.from_args('fu', nm.float64, 1, omega, approx_order=order)
stats.t_create_global_fields = timer.stop()
output('...done in', timer.dt)
output('distributing field %s...' % field.name)
timer.start()
distribute = pl.distribute_fields_dofs
lfds, gfds = distribute([field], cell_tasks,
is_overlap=True,
save_inter_regions=options.save_inter_regions,
output_dir=options.output_dir,
comm=comm, verbose=True)
lfd = lfds[0]
stats.t_distribute_fields_dofs = timer.stop()
output('...done in', timer.dt)
if rank == 0:
dof_maps = gfds[0].dof_maps
id_map = gfds[0].id_map
if options.verify:
verify_save_dof_maps(field, cell_tasks,
dof_maps, id_map, options, verbose=True)
if options.plot:
ppd.plot_partitioning([None, None], field, cell_tasks, gfds[0],
options.output_dir, size)
output('creating local problem...')
timer.start()
omega_gi = Region.from_cells(lfd.cells, field.domain)
omega_gi.finalize()
omega_gi.update_shape()
pb = create_local_problem(omega_gi, order)
variables = pb.get_initial_state()
eqs = pb.equations
u_i = variables['u_i']
field_i = u_i.field
stats.t_create_local_problem = timer.stop()
output('...done in', timer.dt)
if options.plot:
ppd.plot_local_dofs([None, None], field, field_i, omega_gi,
options.output_dir, rank)
output('allocating global system...')
timer.start()
sizes, drange = pl.get_sizes(lfd.petsc_dofs_range, field.n_nod, 1)
output('sizes:', sizes)
output('drange:', drange)
pdofs = pl.get_local_ordering(field_i, lfd.petsc_dofs_conn)
output('pdofs:', pdofs)
pmtx, psol, prhs = pl.create_petsc_system(pb.mtx_a, sizes, pdofs, drange,
is_overlap=True, comm=comm,
verbose=True)
stats.t_allocate_global_system = timer.stop()
output('...done in', timer.dt)
output('evaluating local problem...')
timer.start()
variables.fill_state(0.0)
variables.apply_ebc()
rhs_i = eqs.eval_residuals(variables())
# This must be after pl.create_petsc_system() call!
mtx_i = eqs.eval_tangent_matrices(variables(), pb.mtx_a)
stats.t_evaluate_local_problem = timer.stop()
output('...done in', timer.dt)
output('assembling global system...')
timer.start()
apply_ebc_to_matrix(mtx_i, u_i.eq_map.eq_ebc)
pl.assemble_rhs_to_petsc(prhs, rhs_i, pdofs, drange, is_overlap=True,
comm=comm, verbose=True)
pl.assemble_mtx_to_petsc(pmtx, mtx_i, pdofs, drange, is_overlap=True,
comm=comm, verbose=True)
stats.t_assemble_global_system = timer.stop()
output('...done in', timer.dt)
output('creating solver...')
timer.start()
conf = Struct(method='cg', precond='gamg', sub_precond='none',
i_max=10000, eps_a=1e-50, eps_r=1e-5, eps_d=1e4, verbose=True)
status = {}
ls = PETScKrylovSolver(conf, comm=comm, mtx=pmtx, status=status)
stats.t_create_solver = timer.stop()
output('...done in', timer.dt)
output('solving...')
timer.start()
psol = ls(prhs, psol)
psol_i = pl.create_local_petsc_vector(pdofs)
gather, scatter = pl.create_gather_scatter(pdofs, psol_i, psol, comm=comm)
scatter(psol_i, psol)
sol0_i = variables() - psol_i[...]
psol_i[...] = sol0_i
gather(psol, psol_i)
stats.t_solve = timer.stop()
output('...done in', timer.dt)
output('saving solution...')
timer.start()
variables.set_state(sol0_i)
out = u_i.create_output()
filename = os.path.join(options.output_dir, 'sol_%02d.h5' % comm.rank)
pb.domain.mesh.write(filename, io='auto', out=out)
gather_to_zero = pl.create_gather_to_zero(psol)
psol_full = gather_to_zero(psol)
if comm.rank == 0:
sol = psol_full[...].copy()[id_map]
u = FieldVariable('u', 'parameter', field,
primary_var_name='(set-to-None)')
filename = os.path.join(options.output_dir, 'sol.h5')
if (order == 1) or (options.linearization == 'strip'):
out = u.create_output(sol)
mesh.write(filename, io='auto', out=out)
else:
out = u.create_output(sol, linearization=Struct(kind='adaptive',
min_level=0,
max_level=order,
eps=1e-3))
out['u'].mesh.write(filename, io='auto', out=out)
stats.t_save_solution = timer.stop()
output('...done in', timer.dt)
stats.t_total = timer.total
stats.n_dof = sizes[1]
stats.n_dof_local = sizes[0]
stats.n_cell = omega.shape.n_cell
stats.n_cell_local = omega_gi.shape.n_cell
if options.show:
plt.show()
return stats
def save_stats(filename, pars, stats, overwrite, rank, comm=None):
out = stats.to_dict()
names = sorted(out.keys())
shape_dict = {'n%d' % ii : pars.shape[ii] for ii in range(pars.dim)}
keys = ['size', 'rank', 'dim'] + list(shape_dict.keys()) + ['order'] + names
out['size'] = comm.size
out['rank'] = rank
out['dim'] = pars.dim
out.update(shape_dict)
out['order'] = pars.order
if rank == 0 and overwrite:
with open(filename, 'w') as fd:
writer = csv.DictWriter(fd, fieldnames=keys)
writer.writeheader()
writer.writerow(out)
else:
with open(filename, 'a') as fd:
writer = csv.DictWriter(fd, fieldnames=keys)
writer.writerow(out)
helps = {
'output_dir' :
'output directory',
'dims' :
'dimensions of the block [default: %(default)s]',
'shape' :
'shape (counts of nodes in x, y, z) of the block [default: %(default)s]',
'centre' :
'centre of the block [default: %(default)s]',
'2d' :
'generate a 2D rectangle, the third components of the above'
' options are ignored',
'order' :
'field approximation order',
'linearization' :
'linearization used for storing the results with approximation order > 1'
' [default: %(default)s]',
'metis' :
'use metis for domain partitioning',
'verify' :
'verify domain partitioning, save cells and DOFs of tasks'
' for visualization',
'plot' :
'make partitioning plots',
'save_inter_regions' :
'save inter-task regions for debugging partitioning problems',
'show' :
'show partitioning plots (implies --plot)',
'stats_filename' :
'name of the stats file for storing elapsed time statistics',
'new_stats' :
'create a new stats file with a header line (overwrites existing!)',
'silent' : 'do not print messages to screen',
'clear' :
'clear old solution files from output directory'
' (DANGEROUS - use with care!)',
}
def main():
parser = ArgumentParser(description=__doc__.rstrip(),
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('output_dir', help=helps['output_dir'])
parser.add_argument('--dims', metavar='dims',
action='store', dest='dims',
default='1.0,1.0,1.0', help=helps['dims'])
parser.add_argument('--shape', metavar='shape',
action='store', dest='shape',
default='11,11,11', help=helps['shape'])
parser.add_argument('--centre', metavar='centre',
action='store', dest='centre',
default='0.0,0.0,0.0', help=helps['centre'])
parser.add_argument('-2', '--2d',
action='store_true', dest='is_2d',
default=False, help=helps['2d'])
parser.add_argument('--order', metavar='int', type=int,
action='store', dest='order',
default=1, help=helps['order'])
parser.add_argument('--linearization', choices=['strip', 'adaptive'],
action='store', dest='linearization',
default='strip', help=helps['linearization'])
parser.add_argument('--metis',
action='store_true', dest='metis',
default=False, help=helps['metis'])
parser.add_argument('--verify',
action='store_true', dest='verify',
default=False, help=helps['verify'])
parser.add_argument('--plot',
action='store_true', dest='plot',
default=False, help=helps['plot'])
parser.add_argument('--show',
action='store_true', dest='show',
default=False, help=helps['show'])
parser.add_argument('--save-inter-regions',
action='store_true', dest='save_inter_regions',
default=False, help=helps['save_inter_regions'])
parser.add_argument('--stats', metavar='filename',
action='store', dest='stats_filename',
default=None, help=helps['stats_filename'])
parser.add_argument('--new-stats',
action='store_true', dest='new_stats',
default=False, help=helps['new_stats'])
parser.add_argument('--silent',
action='store_true', dest='silent',
default=False, help=helps['silent'])
parser.add_argument('--clear',
action='store_true', dest='clear',
default=False, help=helps['clear'])
options, petsc_opts = parser.parse_known_args()
if options.show:
options.plot = True
comm = pl.PETSc.COMM_WORLD
output_dir = options.output_dir
filename = os.path.join(output_dir, 'output_log_%02d.txt' % comm.rank)
if comm.rank == 0:
ensure_path(filename)
comm.barrier()
output.prefix = 'sfepy_%02d:' % comm.rank
output.set_output(filename=filename, combined=options.silent == False)
output('petsc options:', petsc_opts)
mesh_filename = os.path.join(options.output_dir, 'para.h5')
dim = 2 if options.is_2d else 3
dims = nm.array(eval(options.dims), dtype=nm.float64)[:dim]
shape = nm.array(eval(options.shape), dtype=nm.int32)[:dim]
centre = nm.array(eval(options.centre), dtype=nm.float64)[:dim]
output('dimensions:', dims)
output('shape: ', shape)
output('centre: ', centre)
if comm.rank == 0:
from sfepy.mesh.mesh_generators import gen_block_mesh
if options.clear:
remove_files_patterns(output_dir,
['*.h5', '*.mesh', '*.txt', '*.png'],
ignores=['output_log_%02d.txt' % ii
for ii in range(comm.size)],
verbose=True)
save_options(os.path.join(output_dir, 'options.txt'),
[('options', vars(options))])
mesh = gen_block_mesh(dims, shape, centre, name='block-fem',
verbose=True)
mesh.write(mesh_filename, io='auto')
comm.barrier()
output('field order:', options.order)
stats = solve_problem(mesh_filename, options, comm)
output(stats)
if options.stats_filename:
if comm.rank == 0:
ensure_path(options.stats_filename)
comm.barrier()
pars = Struct(dim=dim, shape=shape, order=options.order)
pl.call_in_rank_order(
lambda rank, comm:
save_stats(options.stats_filename, pars, stats, options.new_stats,
rank, comm),
comm
)
if __name__ == '__main__':
main()
|
|
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
from kaitaistruct import __version__ as ks_version, KaitaiStruct, KaitaiStream, BytesIO
from enum import Enum
if parse_version(ks_version) < parse_version('0.7'):
raise Exception("Incompatible Kaitai Struct Python API: 0.7 or later is required, but you have %s" % (ks_version))
class DnsPacket(KaitaiStruct):
"""(No support for Auth-Name + Add-Name for simplicity)
"""
class ClassType(Enum):
in_class = 1
cs = 2
ch = 3
hs = 4
class TypeType(Enum):
a = 1
ns = 2
md = 3
mf = 4
cname = 5
soe = 6
mb = 7
mg = 8
mr = 9
null = 10
wks = 11
ptr = 12
hinfo = 13
minfo = 14
mx = 15
txt = 16
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.transaction_id = self._io.read_u2be()
self.flags = self._root.PacketFlags(self._io, self, self._root)
self.qdcount = self._io.read_u2be()
self.ancount = self._io.read_u2be()
self.nscount = self._io.read_u2be()
self.arcount = self._io.read_u2be()
self.queries = [None] * (self.qdcount)
for i in range(self.qdcount):
self.queries[i] = self._root.Query(self._io, self, self._root)
self.answers = [None] * (self.ancount)
for i in range(self.ancount):
self.answers[i] = self._root.Answer(self._io, self, self._root)
class PointerStruct(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.value = self._io.read_u1()
@property
def contents(self):
if hasattr(self, '_m_contents'):
return self._m_contents if hasattr(self, '_m_contents') else None
io = self._root._io
_pos = io.pos()
io.seek(self.value)
self._m_contents = self._root.DomainName(io, self, self._root)
io.seek(_pos)
return self._m_contents if hasattr(self, '_m_contents') else None
class Label(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.length = self._io.read_u1()
if self.is_pointer:
self.pointer = self._root.PointerStruct(self._io, self, self._root)
if not (self.is_pointer):
self.name = (self._io.read_bytes(self.length)).decode(u"ASCII")
@property
def is_pointer(self):
if hasattr(self, '_m_is_pointer'):
return self._m_is_pointer if hasattr(self, '_m_is_pointer') else None
self._m_is_pointer = self.length == 192
return self._m_is_pointer if hasattr(self, '_m_is_pointer') else None
class Query(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.name = self._root.DomainName(self._io, self, self._root)
self.type = self._root.TypeType(self._io.read_u2be())
self.query_class = self._root.ClassType(self._io.read_u2be())
class DomainName(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.name = []
i = 0
while True:
_ = self._root.Label(self._io, self, self._root)
self.name.append(_)
if ((_.length == 0) or (_.length == 192)) :
break
i += 1
class Address(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.ip = [None] * (4)
for i in range(4):
self.ip[i] = self._io.read_u1()
class Answer(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.name = self._root.DomainName(self._io, self, self._root)
self.type = self._root.TypeType(self._io.read_u2be())
self.answer_class = self._root.ClassType(self._io.read_u2be())
self.ttl = self._io.read_s4be()
self.rdlength = self._io.read_u2be()
if self.type == self._root.TypeType.ptr:
self.ptrdname = self._root.DomainName(self._io, self, self._root)
if self.type == self._root.TypeType.a:
self.address = self._root.Address(self._io, self, self._root)
class PacketFlags(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.flag = self._io.read_u2be()
@property
def qr(self):
if hasattr(self, '_m_qr'):
return self._m_qr if hasattr(self, '_m_qr') else None
self._m_qr = ((self.flag & 32768) >> 15)
return self._m_qr if hasattr(self, '_m_qr') else None
@property
def ra(self):
if hasattr(self, '_m_ra'):
return self._m_ra if hasattr(self, '_m_ra') else None
self._m_ra = ((self.flag & 128) >> 7)
return self._m_ra if hasattr(self, '_m_ra') else None
@property
def tc(self):
if hasattr(self, '_m_tc'):
return self._m_tc if hasattr(self, '_m_tc') else None
self._m_tc = ((self.flag & 512) >> 9)
return self._m_tc if hasattr(self, '_m_tc') else None
@property
def rcode(self):
if hasattr(self, '_m_rcode'):
return self._m_rcode if hasattr(self, '_m_rcode') else None
self._m_rcode = ((self.flag & 15) >> 0)
return self._m_rcode if hasattr(self, '_m_rcode') else None
@property
def opcode(self):
if hasattr(self, '_m_opcode'):
return self._m_opcode if hasattr(self, '_m_opcode') else None
self._m_opcode = ((self.flag & 30720) >> 11)
return self._m_opcode if hasattr(self, '_m_opcode') else None
@property
def aa(self):
if hasattr(self, '_m_aa'):
return self._m_aa if hasattr(self, '_m_aa') else None
self._m_aa = ((self.flag & 1024) >> 10)
return self._m_aa if hasattr(self, '_m_aa') else None
@property
def z(self):
if hasattr(self, '_m_z'):
return self._m_z if hasattr(self, '_m_z') else None
self._m_z = ((self.flag & 64) >> 6)
return self._m_z if hasattr(self, '_m_z') else None
@property
def rd(self):
if hasattr(self, '_m_rd'):
return self._m_rd if hasattr(self, '_m_rd') else None
self._m_rd = ((self.flag & 256) >> 8)
return self._m_rd if hasattr(self, '_m_rd') else None
@property
def cd(self):
if hasattr(self, '_m_cd'):
return self._m_cd if hasattr(self, '_m_cd') else None
self._m_cd = ((self.flag & 16) >> 4)
return self._m_cd if hasattr(self, '_m_cd') else None
@property
def ad(self):
if hasattr(self, '_m_ad'):
return self._m_ad if hasattr(self, '_m_ad') else None
self._m_ad = ((self.flag & 32) >> 5)
return self._m_ad if hasattr(self, '_m_ad') else None
|
|
# Copyright (c) 2016 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
# Gabor Dozsa
# System components used by the bigLITTLE.py configuration script
import m5
from m5.objects import *
m5.util.addToPath('../../')
from common.Caches import *
from common import CpuConfig
class L1I(L1_ICache):
tag_latency = 1
data_latency = 1
response_latency = 1
mshrs = 4
tgts_per_mshr = 8
size = '48kB'
assoc = 3
class L1D(L1_DCache):
tag_latency = 2
data_latency = 2
response_latency = 1
mshrs = 16
tgts_per_mshr = 16
size = '32kB'
assoc = 2
write_buffers = 16
class WalkCache(PageTableWalkerCache):
tag_latency = 4
data_latency = 4
response_latency = 4
mshrs = 6
tgts_per_mshr = 8
size = '1kB'
assoc = 8
write_buffers = 16
class L2(L2Cache):
tag_latency = 12
data_latency = 12
response_latency = 5
mshrs = 32
tgts_per_mshr = 8
size = '1MB'
assoc = 16
write_buffers = 8
clusivity='mostly_excl'
class L3(Cache):
size = '16MB'
assoc = 16
tag_latency = 20
data_latency = 20
response_latency = 20
mshrs = 20
tgts_per_mshr = 12
clusivity='mostly_excl'
class MemBus(SystemXBar):
badaddr_responder = BadAddr(warn_access="warn")
default = Self.badaddr_responder.pio
class CpuCluster(SubSystem):
def __init__(self, system, num_cpus, cpu_clock, cpu_voltage,
cpu_type, l1i_type, l1d_type, wcache_type, l2_type):
super(CpuCluster, self).__init__()
self._cpu_type = cpu_type
self._l1i_type = l1i_type
self._l1d_type = l1d_type
self._wcache_type = wcache_type
self._l2_type = l2_type
assert num_cpus > 0
self.voltage_domain = VoltageDomain(voltage=cpu_voltage)
self.clk_domain = SrcClockDomain(clock=cpu_clock,
voltage_domain=self.voltage_domain)
self.cpus = [ self._cpu_type(cpu_id=system.numCpus() + idx,
clk_domain=self.clk_domain)
for idx in range(num_cpus) ]
for cpu in self.cpus:
cpu.createThreads()
cpu.createInterruptController()
cpu.socket_id = system.numCpuClusters()
system.addCpuCluster(self, num_cpus)
def requireCaches(self):
return self._cpu_type.require_caches()
def memoryMode(self):
return self._cpu_type.memory_mode()
def addL1(self):
for cpu in self.cpus:
l1i = None if self._l1i_type is None else self._l1i_type()
l1d = None if self._l1d_type is None else self._l1d_type()
iwc = None if self._wcache_type is None else self._wcache_type()
dwc = None if self._wcache_type is None else self._wcache_type()
cpu.addPrivateSplitL1Caches(l1i, l1d, iwc, dwc)
def addL2(self, clk_domain):
if self._l2_type is None:
return
self.toL2Bus = L2XBar(width=64, clk_domain=clk_domain)
self.l2 = self._l2_type()
for cpu in self.cpus:
cpu.connectAllPorts(self.toL2Bus)
self.toL2Bus.master = self.l2.cpu_side
def connectMemSide(self, bus):
bus.slave
try:
self.l2.mem_side = bus.slave
except AttributeError:
for cpu in self.cpus:
cpu.connectAllPorts(bus)
class AtomicCluster(CpuCluster):
def __init__(self, system, num_cpus, cpu_clock, cpu_voltage="1.0V"):
cpu_config = [ CpuConfig.get("atomic"), None, None, None, None ]
super(AtomicCluster, self).__init__(system, num_cpus, cpu_clock,
cpu_voltage, *cpu_config)
def addL1(self):
pass
class SimpleSystem(LinuxArmSystem):
cache_line_size = 64
def __init__(self, caches, mem_size, **kwargs):
super(SimpleSystem, self).__init__(**kwargs)
self.voltage_domain = VoltageDomain(voltage="1.0V")
self.clk_domain = SrcClockDomain(clock="1GHz",
voltage_domain=Parent.voltage_domain)
self.realview = VExpress_GEM5_V1()
self.gic_cpu_addr = self.realview.gic.cpu_addr
self.flags_addr = self.realview.realview_io.pio_addr + 0x30
self.membus = MemBus()
self.intrctrl = IntrControl()
self.terminal = Terminal()
self.vncserver = VncServer()
self.iobus = IOXBar()
# CPUs->PIO
self.iobridge = Bridge(delay='50ns')
# Device DMA -> MEM
mem_range = self.realview._mem_regions[0]
mem_range_size = long(mem_range[1]) - long(mem_range[0])
assert mem_range_size >= long(Addr(mem_size))
self._mem_range = AddrRange(start=mem_range[0], size=mem_size)
self._caches = caches
if self._caches:
self.iocache = IOCache(addr_ranges=[self._mem_range])
else:
self.dmabridge = Bridge(delay='50ns',
ranges=[self._mem_range])
self._pci_devices = 0
self._clusters = []
self._num_cpus = 0
def attach_pci(self, dev):
dev.pci_bus, dev.pci_dev, dev.pci_func = (0, self._pci_devices + 1, 0)
self._pci_devices += 1
self.realview.attachPciDevice(dev, self.iobus)
def connect(self):
self.iobridge.master = self.iobus.slave
self.iobridge.slave = self.membus.master
if self._caches:
self.iocache.mem_side = self.membus.slave
self.iocache.cpu_side = self.iobus.master
else:
self.dmabridge.master = self.membus.slave
self.dmabridge.slave = self.iobus.master
self.gic_cpu_addr = self.realview.gic.cpu_addr
self.realview.attachOnChipIO(self.membus, self.iobridge)
self.realview.attachIO(self.iobus)
self.system_port = self.membus.slave
def numCpuClusters(self):
return len(self._clusters)
def addCpuCluster(self, cpu_cluster, num_cpus):
assert cpu_cluster not in self._clusters
assert num_cpus > 0
self._clusters.append(cpu_cluster)
self._num_cpus += num_cpus
def numCpus(self):
return self._num_cpus
def addCaches(self, need_caches, last_cache_level):
if not need_caches:
# connect each cluster to the memory hierarchy
for cluster in self._clusters:
cluster.connectMemSide(self.membus)
return
cluster_mem_bus = self.membus
assert last_cache_level >= 1 and last_cache_level <= 3
for cluster in self._clusters:
cluster.addL1()
if last_cache_level > 1:
for cluster in self._clusters:
cluster.addL2(cluster.clk_domain)
if last_cache_level > 2:
max_clock_cluster = max(self._clusters,
key=lambda c: c.clk_domain.clock[0])
self.l3 = L3(clk_domain=max_clock_cluster.clk_domain)
self.toL3Bus = L2XBar(width=64)
self.toL3Bus.master = self.l3.cpu_side
self.l3.mem_side = self.membus.slave
cluster_mem_bus = self.toL3Bus
# connect each cluster to the memory hierarchy
for cluster in self._clusters:
cluster.connectMemSide(cluster_mem_bus)
|
|
# Author: Prabhu Ramachandran <prabhu [at] aero . iitb . ac . in>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
import numpy
# Enthought library imports.
from traits.api import Instance, Bool, Array, Button, Str
from traitsui.api import View, Group, Item
from tvtk.api import tvtk
import tvtk.common as tvtk_common
# Local imports.
from mayavi.core.filter import Filter
from mayavi.core.pipeline_info import PipelineInfo
################################################################################
# `ImageDataProbe` class.
################################################################################
class ImageDataProbe(Filter):
"""
A filter that can be used to probe any dataset using a Structured
Points dataset. The filter also allows one to convert the scalar
data to an unsigned short array so that the scalars can be used for
volume visualization.
"""
# The image data onto which the data is probed.
probe_data = Instance(tvtk.ImageData, args=())
# The probe filter.
filter = Instance(tvtk.ProbeFilter, args=())
rescale_scalars = Bool(False, desc='if the input scalars are '\
'rescaled to an unsigned short '\
'array')
# Specifies if we can change the spacing/dimensions -- not allowed
# for imagedata/structured points data.
allow_changes = Bool(True)
# Spacing of points in the image data.
spacing = Array(value=(0.0, 0.0, 0.0),
shape=(3,),
cols=1,
dtype=float,
enter_set=True,
auto_set=False,
labels=['sx', 'sy', 'sz'],
desc='the spacing of points')
# Dimensions of the image data.
dimensions = Array(value=(0,0,0),
shape=(3,),
cols=1,
dtype=int,
enter_set=True,
auto_set=False,
labels=['nx', 'ny', 'nz'],
desc='the dimensions of the image data')
# Reset settings to defaults.
reset_defaults = Button(desc='if probe data is reset to defaults')
# Name of rescaled scalar to generate.
rescaled_scalar_name = Str('probe_us_array')
input_info = PipelineInfo(datasets=['image_data'],
attribute_types=['any'],
attributes=['any'])
output_info = PipelineInfo(datasets=['image_data'],
attribute_types=['any'],
attributes=['any'])
########################################
# Private traits.
# A trait to prevent static handlers from firing unnecessarily.
_event_handled = Bool(False)
########################################
# View related traits.
view = View(Group(Item(name='dimensions',
enabled_when='allow_changes'
),
Item(name='spacing',
enabled_when='allow_changes'),
Item(name='rescale_scalars'),
Item(name='reset_defaults',
show_label=False),
),
resizable=True
)
######################################################################
# `Filter` interface.
######################################################################
def setup_pipeline(self):
"""Creates the pipeline."""
self.configure_input_data(self.filter, self.probe_data)
def update_pipeline(self):
"""Connect and update the pipeline."""
inputs = self.inputs
if len(inputs) == 0:
return
fil = self.filter
self.configure_source_data(fil, inputs[0].outputs[0])
reset = False
if self.dimensions.sum() == 0:
reset = True
self._setup_probe_data(reset)
fil.update()
self._rescale_scalars_changed(self.rescale_scalars)
self._set_outputs([fil.output])
######################################################################
# Non-public interface.
######################################################################
def _setup_probe_data(self, reset=False):
pd = self.probe_data
input = self.inputs[0].outputs[0]
if input.is_a('vtkImageData'):
self.allow_changes = False
self.set(spacing=input.spacing,
dimensions=input.dimensions)
pd.set(origin=input.origin,
dimensions=input.dimensions,
spacing=input.spacing)
pd.update()
elif reset:
self.allow_changes = True
b = numpy.array(input.bounds)
pd.origin = b[::2]
l = b[1::2] - b[::2]
tot_len = sum(l)
npnt = pow(input.number_of_points, 1./3.) + 0.5
fac = 3.0*npnt/tot_len
dims = (l*fac).astype(int) + 1
extent = (0, dims[0] -1, 0, dims[1] -1, 0, dims[2] -1)
if tvtk_common.is_old_pipeline():
pd.set(extent=extent,
update_extent=extent,
whole_extent=extent,
dimensions=dims)
else:
pd.set(extent=extent,
dimensions=dims)
max_dim = dims.max()
dims = (dims-1).clip(min=1, max=max_dim+1)
l = l.clip(min=1e-3, max=l.max()+1.0)
pd.spacing = l/dims
self._event_handled = True
self.set(spacing = pd.spacing,
dimensions=pd.dimensions)
self._event_handled = False
def _rescale_scalars_changed(self, value):
out = self.filter.output
pd = out.point_data
sc = pd.scalars
if sc is None:
# no input scalars
return
if not value:
orig_sc = self.inputs[0].outputs[0].point_data.scalars
if sc.is_a('vtkUnsignedShortArray') and \
sc.name == self.rescaled_scalar_name:
pd.set_active_scalars(orig_sc.name)
pd.update()
self.pipeline_changed = True
self.render()
return
s_min, s_max = sc.range
# checking to see if input array is constant.
avg = (s_max + s_min)*0.5
diff = 1
if (s_max > avg) and (avg > s_min):
diff = s_max - s_min
arr = (sc.to_array() - s_min)*65535.0/diff
uc = tvtk.UnsignedShortArray(name=self.rescaled_scalar_name)
uc.from_array(arr)
pd.add_array(uc)
pd.set_active_scalars(self.rescaled_scalar_name)
pd.update()
self.pipeline_changed = True
self.render()
def _dimensions_changed(self, value):
if not self.allow_changes or self._event_handled:
return
max_d = value.max()
dims = (value-1).clip(min=1, max=max_d)
b = numpy.array(self.inputs[0].outputs[0].bounds)
l = b[1::2] - b[::2]
self.spacing = l/dims
self._update_probe()
def _spacing_changed(self, value):
if not self.allow_changes or self._event_handled:
return
b = numpy.array(self.inputs[0].outputs[0].bounds)
l = b[1::2] - b[::2]
dims = (l/value + 0.5).astype(int) + 1
# Recalculate space because of rounding.
maxd = dims.max()
dims1 = (dims -1).clip(min=1, max=maxd)
sp = l/dims1
self._event_handled = True
self.set(spacing = sp, dimensions=dims)
self._event_handled = False
self._update_probe ()
def _update_probe(self):
pd = self.probe_data
dims = self.dimensions
spacing = self.spacing
extent = (0, dims[0] -1, 0, dims[1] -1, 0, dims[2] -1)
if tvtk_common.is_old_pipeline():
pd.set(extent=extent,
update_extent=extent,
whole_extent=extent,
dimensions=dims,
spacing=spacing)
else:
pd.set(extent=extent,
dimensions=dims,
spacing=spacing)
pd.modified()
fil = self.filter
w = fil.global_warning_display
fil.global_warning_display = False
fil.remove_all_inputs()
self.configure_input_data(fil, pd)
fil.update_whole_extent()
fil.update()
self._rescale_scalars_changed(self.rescale_scalars)
fil.global_warning_display = w
self.data_changed = True
def _reset_defaults_fired(self):
self._setup_probe_data(reset=True)
self._rescale_scalars_changed(self.rescale_scalars)
|
|
#!/usr/bin/env python2.7
#
# This script makes it easy to do two things:
#
# 1. Automatically release a new version of pants to the java repo, including patching in our
# custom patches.
#
# 2. Run pants in development mode along with those same patches (avoiding the need to manually
# merge branches).
#
# This script only works if the pants development working directory is clean. It attempts to find
# that directory automatically (assuming it's named something like ~/src/pants), but you can also
# set the PANTS_SRC environment variable.
#
from __future__ import print_function, with_statement
import argparse
import logging
import os
import sys
from contextlib import contextmanager
from datetime import date
from textwrap import dedent
from binary_utils import BinaryUtils, Command, PantsGit
logger = logging.getLogger(__name__)
SQUARE_REMOTE = 'https://github.com/square/pants'
SQUARE_RELEASE_BRANCH = 'square/release'
SQUARE_RELEASE_FORMAT = 'square-%Y%m%d-01'
SQUARE_RELEASE_WIKI = 'https://wiki.corp.squareup.com/display/ATLS2/Pants+Release+Procedure'
# List of tuples in the form :
# (patch_url, description of patch)
# or
# (patch branch name, description of patch)
#
# These are applied in order, aborting if any patch fails to apply.
# TODO(gmalmquist) Maybe it would be good to load these in from a .json or something?
PANTS_PATCHES = [
]
class RunError(Exception):
"""Error running patchy pants."""
class PatchyPants(object):
@classmethod
def run_pants(cls, options, args, patches):
"""Run PANTS_DEV=1 ./pants with the given arguments, after applying the given list of patches.
"""
git = PantsGit()
with git.apply_patches(patches, commit=options.commit_patches):
BinaryUtils.run_dev_pants(args)
@classmethod
def square_pants_run(cls, options, pants_args):
"""Runs pants in development mode with the global list of PANTS_PATCHES."""
cls.run_pants(options, pants_args, PANTS_PATCHES)
@classmethod
def square_pants_release(cls, options, release_args):
"""Runs a pants release with the given arguments to the release script."""
known_args = {'--no-push', '--overwrite', '--dirty'}
unknown = [arg for arg in release_args if arg not in known_args]
if unknown:
logger.error('Got unknown arguments for --release: {}'.format(unknown))
cls.usage()
return
releaser = Releaser(release_args, options.dirty)
releaser.release()
@classmethod
def usage(cls):
print(dedent('''
Usage:
{script} <arguments to pants>
{script} --release [--no-push] [--overwrite] [--dirty]
'''.format(script='pants_with_patches')))
@classmethod
def main(cls, args):
logging.basicConfig(format='%(message)s')
if not args:
cls.usage()
return
executors = {
'pants-run': cls.square_pants_run,
'release': cls.square_pants_release,
}
parser = argparse.ArgumentParser('Apply patches to pants, for development runs or releases.')
# Global options.
parser.add_argument('--action', default='pants-run', choices=executors.keys(),
help=argparse.SUPPRESS) # Just used as storage.
parser.add_argument('--release', dest='action', action='store_const', const='release',
help='Automatically patch and release pants to the java repo.')
parser.add_argument('-l', '--log-level', default='info',
help='Set the log level.')
parser.add_argument('--dirty', dest='dirty', action='store_true',
help='Use the current state of the pants repo instead of pulling.')
parser.add_argument('--no-dirty', dest='dirty', action='store_false',
help='Update the pants repo to the latest version first.')
parser.add_argument('--commit-patches', default=False, action='store_true',
help='Commit patches after applying them. This happens by default for a '
'release, but not when just running pants in development mode. In '
'development mode, the commits will be kept on '
'temp/temporary-patching-branch until the next time this command is '
'run.')
parser.set_defaults(dirty=False)
options, action_args = parser.parse_known_args(args)
if action_args and args[-len(action_args):] != action_args:
mixed_in = [a for a in args[-len(action_args):] if a not in action_args]
logger.error('Error: arguments to --{} have to be last.'.format(options.action))
if mixed_in:
logger.error(' Options {} were mixed in with the '.format(mixed_in))
logger.error(' args: {}'.format(action_args))
return
logging.getLogger().level = getattr(logging, options.log_level.upper(), logging.INFO)
runner = executors.get(options.action)
try:
logger.info("Executing {}('{}')".format(runner.__name__, ' '.join(action_args)))
runner(options, action_args)
except RunError as rp:
logger.critical('\n{}: {}\n'.format(type(rp).__name__, rp))
except KeyboardInterrupt:
logger.error('Aborted.')
class Releaser(object):
"""Automates most of the work of updating the version of pants in our java repo."""
def __init__(self, release_script_args, use_dirty):
self.release_script_args = release_script_args
self.use_dirty = use_dirty
def _get_java_dir(self):
"""Returns the current working directory if it is the java repo, otherwise raises an error."""
java_dir = BinaryUtils.find_java_dir()
if not java_dir:
raise RunError('Not in java repo.')
return java_dir
def _assert_square_exists(self, git, try_add=True):
"""Checks to see if the 'square' remote repo exists.
Raises an exception if it 'square' isn't present and can't be added.
:param Git git: the pants repo git command.
:param bool try_add: whether to attempt 'git remote add ...' automatically.
"""
remotes = git.remotes()
if 'square' not in remotes:
if try_add:
# Have to run with pipe=False to allow user to enter github credentials.
if git('remote', 'add', 'square', SQUARE_REMOTE, pipe=False):
self._assert_square_exists(git, try_add=False)
return
raise RunError('Square remote was not found. Please run:\n'
' git remote add square {}'.format(SQUARE_REMOTE))
def _get_upstream_remote(self, git):
"""Determines the name of the pants upstream repository.
If present, prefer the repository 'upstream', otherwise, choose 'origin.
:param git: the Git command object for the pants repo.
"""
remotes = git.remotes()
if 'upstream' in remotes:
return 'upstream'
if 'origin' in remotes:
return 'origin'
raise RunError('Could not find upstream or origin remotes.')
@contextmanager
def _setup_pants_repo(self):
"""Cleans the pants repo and applies patches, yielding the Git command for the repo."""
git = PantsGit()
if self.use_dirty:
yield git
raise StopIteration
if not git.is_clean():
raise RunError('Pants source not clean: please stash or commit changes in {}.'
.format(git.cwd))
self._assert_square_exists(git)
pants_upstream = self._get_upstream_remote(git)
git('checkout', 'master')
git('fetch', pants_upstream)
git('reset', '--hard', '{}/master'.format(pants_upstream))
git('clean', '-fdx')
with git.apply_patches(PANTS_PATCHES, on_branch=SQUARE_RELEASE_BRANCH, commit=True):
git('push', '-f', 'square')
BinaryUtils.pause('Patches applied. It is recommended that you run either:"\n'
' full CI: {cwd}/build-support/bin/ci.sh\n'
' or just the unit tests: cd {cwd} ; ./pants test tests/python/pants_test:all\n'
'before continuing.'.format(cwd=git.cwd))
yield git
def _run_release_script(self, java_dir):
"""Invokes pants_release.sh."""
default_release_name=date.today().strftime(SQUARE_RELEASE_FORMAT)
release_name = raw_input('Release name (default is {}): '.format(default_release_name))
release_name = release_name.strip() or default_release_name
releaser = Command(BinaryUtils.squarepants_binary('pants_release.sh'), cwd=java_dir)
if not releaser(*(self.release_script_args+[release_name]), pipe=False):
raise RunError('{} failed.'.format(releaser.name))
return release_name
def _test_exemplar(self, pants_git, java_dir, release_name):
logger.info('\nTesting on exemplar:\n')
env = os.environ.copy()
env['SQPANTS_VERSION'] = release_name
if 'PANTS_DEV' in env:
env.pop('PANTS_DEV')
env['PANTS_SRC'] = pants_git.cwd
java_pants = Command('./pants', cwd=java_dir, env=env)
success = True
if not java_pants('binary', 'service/exemplar', pipe=False):
BinaryUtils.pause('Building service/exemplar failed.')
success = False
elif not java_pants('test', 'service/exemplar', pipe=False):
BinaryUtils.pause('Testing service/exemplar failed.')
success = False
return success
def _print_closing_info(self, release_name):
print('\nYou should edit squarepants/bin/pants_bootstrap.sh to update the version number ({}).\n'.format(release_name))
print(dedent('''
If you want to verify that things are working as expected in the java repo, you can run
pants-check-compile job: squarepants/bin/check.sh compile | tee ~/check-compile.txt # takes on the order of 1.5 hours on Jenkins
pants-check-test job: squarepants/bin/check.sh test | tee ~/check-test.txt # takes on the order of 15 hours on Jenkins
These are tracked at go/pants-success.
Update squarepants/CHANGELOG.md
Make a PR in square/java containing the change to pants_bootstrap.sh with the updated CHANGELOG.md to download and any other changes needed to update compatibility.
In your commit message, record the sha of the square/stable branch you built pants.pex from similar to the following:
Built from github square/pants commit fbcea7ec27fa8789df6919263fa3c638ca09ec26
This should allow us to investigate bugs in the future.
'''))
def release(self):
java_dir = self._get_java_dir() # Run this first to fail-fast if we're not in the java repo.
print('\nAdapted from manual release procedure:\n{}\n'.format(SQUARE_RELEASE_WIKI))
with self._setup_pants_repo() as pants_git:
release_name = self._run_release_script(java_dir)
BinaryUtils.pause('You should check to see if BUILD.tools or pants.ini need updating now.')
self._test_exemplar(pants_git, java_dir, release_name)
self._print_closing_info(release_name)
if __name__ == '__main__':
PatchyPants.main(sys.argv[1:])
|
|
# This module is, as much a possible, a clone of the pygame
# mixer api.
import android._android_sound as sound
import time
import threading
import os
condition = threading.Condition()
def periodic():
for i in range(0, num_channels):
if i in channels:
channels[i].periodic()
num_channels = 8
reserved_channels = 0
def init(frequency=22050, size=-16, channels=2, buffer=4096):
return None
def pre_init(frequency=22050, size=-16, channels=2, buffersize=4096):
return None
def quit():
stop()
return None
def stop():
for i in range(0, num_channels):
sound.stop(i)
def pause():
for i in range(0, num_channels):
sound.pause(i)
def unpause():
for i in range(0, num_channels):
sound.unpause(i)
def get_busy():
for i in range(0, num_channels):
if sound.busy(i):
return True
return False
def fadeout(time):
# Fadeout doesn't work - it just immediately stops playback.
stop()
# A map from channel number to Channel object.
channels = {}
def set_num_channels(count):
global num_channels
num_channels = count
def get_num_channels(count):
return num_channels
def set_reserved(count):
global reserved_channels
reserved_channels = count
def find_channel(force=False):
busy = []
for i in range(reserved_channels, num_channels):
c = Channel(i)
if not c.get_busy():
return c
busy.append(c)
if not force:
return None
busy.sort(key=lambda x: x.play_time)
return busy[0]
class ChannelImpl(object):
def __init__(self, id):
self.id = id
self.loop = None
self.queued = None
self.play_time = time.time()
def periodic(self):
qd = sound.queue_depth(self.id)
if qd < 2:
self.queued = None
if self.loop is not None and sound.queue_depth(self.id) < 2:
self.queue(self.loop, loops=1)
def play(self, s, loops=0, maxtime=0, fade_ms=0):
if loops:
self.loop = s
sound.play(self.id, s.file, s.serial)
self.play_time = time.time()
with condition:
condition.notify()
def seek(self, position):
sound.seek(self.id, position)
def stop(self):
self.loop = None
sound.stop(self.id)
def pause(self):
sound.pause(self.id)
def unpause(self):
sound.pause(self.id)
def fadeout(self, time):
# No fadeout
self.stop()
def set_volume(self, left, right=None):
sound.set_volume(self.id, left)
def get_volume(self):
return sound.get_volume(self.id)
def get_busy(self):
return sound.busy(self.id)
def get_sound(self):
is_busy = sound.busy(self.id)
if not is_busy:
return
serial = sound.playing_name(self.id)
if not serial:
return
return sounds.get(serial, None)
def queue(self, s):
self.loop = None
self.queued = s
sound.queue(self.id, s.what, s.serial)
with condition:
condition.notify()
def get_queue(self):
return self.queued
def get_pos(self):
return sound.get_pos(self.id)/1000.
def get_length(self):
return sound.get_length(self.id)/1000.
def Channel(n):
"""
Gets the channel with the given number.
"""
rv = channels.get(n, None)
if rv is None:
rv = ChannelImpl(n)
channels[n] = rv
return rv
sound_serial = 0
sounds = {}
class Sound(object):
def __init__(self, what):
# Doesn't support buffers.
global sound_serial
self._channel = None
self._volume = 1.
self.serial = str(sound_serial)
sound_serial += 1
if isinstance(what, file): # noqa F821
self.file = what
else:
self.file = file(os.path.abspath(what), "rb") # noqa F821
sounds[self.serial] = self
def play(self, loops=0, maxtime=0, fade_ms=0):
# avoid new play if the sound is already playing
# -> same behavior as standard pygame.
if self._channel is not None:
if self._channel.get_sound() is self:
return
self._channel = channel = find_channel(True)
channel.set_volume(self._volume)
channel.play(self, loops=loops)
return channel
def stop(self):
for i in range(0, num_channels):
if Channel(i).get_sound() is self:
Channel(i).stop()
def fadeout(self, time):
self.stop()
def set_volume(self, left, right=None):
self._volume = left
if self._channel:
if self._channel.get_sound() is self:
self._channel.set_volume(self._volume)
def get_volume(self):
return self._volume
def get_num_channels(self):
rv = 0
for i in range(0, num_channels):
if Channel(i).get_sound() is self:
rv += 1
return rv
def get_length(self):
return 1.0
music_channel = Channel(256)
music_sound = None
class music(object):
@staticmethod
def load(filename):
music_channel.stop()
global music_sound
music_sound = Sound(filename)
@staticmethod
def play(loops=0, start=0.0):
# No start.
music_channel.play(music_sound, loops=loops)
@staticmethod
def rewind():
music_channel.play(music_sound)
@staticmethod
def seek(position):
music_channel.seek(position)
@staticmethod
def stop():
music_channel.stop()
@staticmethod
def pause():
music_channel.pause()
@staticmethod
def unpause():
music_channel.unpause()
@staticmethod
def fadeout(time):
music_channel.fadeout(time)
@staticmethod
def set_volume(value):
music_channel.set_volume(value)
@staticmethod
def get_volume():
return music_channel.get_volume()
@staticmethod
def get_busy():
return music_channel.get_busy()
@staticmethod
def get_pos():
return music_channel.get_pos()
@staticmethod
def queue(filename):
return music_channel.queue(Sound(filename))
|
|
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
# Copyright (c) 2015 Goutham Pacha Ravi. All rights reserved.
# Copyright (c) 2016 Mike Rooney. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Mock unit tests for the NetApp block storage 7-mode library
"""
import ddt
from lxml import etree
import mock
from cinder import exception
from cinder import test
import cinder.tests.unit.volume.drivers.netapp.dataontap.client.fakes \
as client_fakes
import cinder.tests.unit.volume.drivers.netapp.dataontap.fakes as fake
import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes
from cinder.volume.drivers.netapp.dataontap import block_7mode
from cinder.volume.drivers.netapp.dataontap import block_base
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.client import client_base
from cinder.volume.drivers.netapp.dataontap.performance import perf_7mode
from cinder.volume.drivers.netapp import utils as na_utils
@ddt.ddt
class NetAppBlockStorage7modeLibraryTestCase(test.TestCase):
"""Test case for NetApp's 7-Mode iSCSI library."""
def setUp(self):
super(NetAppBlockStorage7modeLibraryTestCase, self).setUp()
kwargs = {'configuration': self.get_config_7mode()}
self.library = block_7mode.NetAppBlockStorage7modeLibrary(
'driver', 'protocol', **kwargs)
self.library.zapi_client = mock.Mock()
self.zapi_client = self.library.zapi_client
self.library.perf_library = mock.Mock()
self.library.vfiler = mock.Mock()
# Deprecated option
self.library.configuration.netapp_volume_list = None
def tearDown(self):
super(NetAppBlockStorage7modeLibraryTestCase, self).tearDown()
def get_config_7mode(self):
config = na_fakes.create_configuration_7mode()
config.netapp_storage_protocol = 'iscsi'
config.netapp_login = 'admin'
config.netapp_password = 'pass'
config.netapp_server_hostname = '127.0.0.1'
config.netapp_transport_type = 'http'
config.netapp_server_port = '80'
return config
@mock.patch.object(perf_7mode, 'Performance7modeLibrary', mock.Mock())
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.MagicMock(return_value=(1, 20)))
@mock.patch.object(block_7mode.NetAppBlockStorage7modeLibrary,
'_get_root_volume_name')
@mock.patch.object(block_7mode.NetAppBlockStorage7modeLibrary,
'_do_partner_setup')
@mock.patch.object(block_base.NetAppBlockStorageLibrary, 'do_setup')
def test_do_setup(self, super_do_setup, mock_do_partner_setup,
mock_get_root_volume_name):
self.mock_object(client_base.Client, '_init_ssh_client')
mock_get_root_volume_name.return_value = 'vol0'
context = mock.Mock()
self.library.do_setup(context)
super_do_setup.assert_called_once_with(context)
mock_do_partner_setup.assert_called_once_with()
mock_get_root_volume_name.assert_called_once_with()
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.MagicMock(return_value=(1, 20)))
def test_do_partner_setup(self):
self.mock_object(client_base.Client, '_init_ssh_client')
self.library.configuration.netapp_partner_backend_name = 'partner'
self.library._do_partner_setup()
self.assertIsNotNone(self.library.partner_zapi_client)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.MagicMock(return_value=(1, 20)))
def test_do_partner_setup_no_partner(self):
self.mock_object(client_base.Client, '_init_ssh_client')
self.library._do_partner_setup()
self.assertFalse(hasattr(self.library, 'partner_zapi_client'))
@mock.patch.object(
block_base.NetAppBlockStorageLibrary, 'check_for_setup_error')
def test_check_for_setup_error(self, super_check_for_setup_error):
self.zapi_client.get_ontapi_version.return_value = (1, 9)
self.mock_object(self.library, '_refresh_volume_info')
self.library.volume_list = ['open1', 'open2']
self.library.check_for_setup_error()
super_check_for_setup_error.assert_called_once_with()
def test_check_for_setup_error_no_filtered_pools(self):
self.zapi_client.get_ontapi_version.return_value = (1, 9)
self.mock_object(self.library, '_refresh_volume_info')
self.library.volume_list = []
self.assertRaises(exception.NetAppDriverException,
self.library.check_for_setup_error)
def test_check_for_setup_error_too_old(self):
self.zapi_client.get_ontapi_version.return_value = (1, 8)
self.assertRaises(exception.VolumeBackendAPIException,
self.library.check_for_setup_error)
def test_find_mapped_lun_igroup(self):
response = netapp_api.NaElement(etree.XML("""
<results status="passed">
<initiator-groups>
<initiator-group-info>
<initiator-group-name>%(initiator-group-name)s</initiator-group-name>
<initiator-group-type>%(initiator-group-type)s</initiator-group-type>
<initiator-group-uuid>1477ee47-0e1f-4b35-a82c-dcca0b76fc44
</initiator-group-uuid>
<initiator-group-os-type>linux</initiator-group-os-type>
<initiator-group-throttle-reserve>0</initiator-group-throttle-reserve>
<initiator-group-throttle-borrow>false
</initiator-group-throttle-borrow>
<initiator-group-vsa-enabled>false</initiator-group-vsa-enabled>
<initiator-group-alua-enabled>true</initiator-group-alua-enabled>
<initiator-group-report-scsi-name-enabled>true
</initiator-group-report-scsi-name-enabled>
<initiator-group-use-partner>true</initiator-group-use-partner>
<initiators>
<initiator-info>
<initiator-name>21:00:00:24:ff:40:6c:c3</initiator-name>
</initiator-info>
<initiator-info>
<initiator-name>21:00:00:24:ff:40:6c:c2</initiator-name>
<initiator-alias-info>
<initiator-alias>Centos</initiator-alias>
</initiator-alias-info>
</initiator-info>
</initiators>
<lun-id>2</lun-id>
</initiator-group-info>
</initiator-groups>
</results>""" % fake.IGROUP1))
initiators = fake.FC_FORMATTED_INITIATORS
self.zapi_client.get_lun_map.return_value = response
(igroup, lun_id) = self.library._find_mapped_lun_igroup('path',
initiators)
self.assertEqual(fake.IGROUP1_NAME, igroup)
self.assertEqual('2', lun_id)
def test_find_mapped_lun_igroup_initiator_mismatch(self):
response = netapp_api.NaElement(etree.XML("""
<results status="passed">
<initiator-groups>
<initiator-group-info>
<initiator-group-name>openstack-igroup1</initiator-group-name>
<initiator-group-type>fcp</initiator-group-type>
<initiator-group-uuid>1477ee47-0e1f-4b35-a82c-dcca0b76fc44
</initiator-group-uuid>
<initiator-group-os-type>linux</initiator-group-os-type>
<initiator-group-throttle-reserve>0</initiator-group-throttle-reserve>
<initiator-group-throttle-borrow>false
</initiator-group-throttle-borrow>
<initiator-group-vsa-enabled>false</initiator-group-vsa-enabled>
<initiator-group-alua-enabled>true</initiator-group-alua-enabled>
<initiator-group-report-scsi-name-enabled>true
</initiator-group-report-scsi-name-enabled>
<initiator-group-use-partner>true</initiator-group-use-partner>
<initiators>
<initiator-info>
<initiator-name>21:00:00:24:ff:40:6c:c3</initiator-name>
</initiator-info>
</initiators>
<lun-id>2</lun-id>
</initiator-group-info>
</initiator-groups>
</results>"""))
initiators = fake.FC_FORMATTED_INITIATORS
self.zapi_client.get_lun_map.return_value = response
(igroup, lun_id) = self.library._find_mapped_lun_igroup('path',
initiators)
self.assertIsNone(igroup)
self.assertIsNone(lun_id)
def test_find_mapped_lun_igroup_no_igroups(self):
response = netapp_api.NaElement(etree.XML("""
<results status="passed">
<initiator-groups />
</results>"""))
initiators = fake.FC_FORMATTED_INITIATORS
self.zapi_client.get_lun_map.return_value = response
(igroup, lun_id) = self.library._find_mapped_lun_igroup('path',
initiators)
self.assertIsNone(igroup)
self.assertIsNone(lun_id)
def test_find_mapped_lun_igroup_raises(self):
self.zapi_client.get_lun_map.side_effect = netapp_api.NaApiError
initiators = fake.FC_FORMATTED_INITIATORS
self.assertRaises(netapp_api.NaApiError,
self.library._find_mapped_lun_igroup,
'path',
initiators)
def test_has_luns_mapped_to_initiators_local_map(self):
initiator_list = fake.FC_FORMATTED_INITIATORS
self.zapi_client.has_luns_mapped_to_initiators.return_value = True
self.library.partner_zapi_client = mock.Mock()
result = self.library._has_luns_mapped_to_initiators(initiator_list)
self.assertTrue(result)
self.zapi_client.has_luns_mapped_to_initiators.assert_called_once_with(
initiator_list)
self.assertEqual(0, self.library.partner_zapi_client.
has_luns_mapped_to_initiators.call_count)
def test_has_luns_mapped_to_initiators_partner_map(self):
initiator_list = fake.FC_FORMATTED_INITIATORS
self.zapi_client.has_luns_mapped_to_initiators.return_value = False
self.library.partner_zapi_client = mock.Mock()
self.library.partner_zapi_client.has_luns_mapped_to_initiators.\
return_value = True
result = self.library._has_luns_mapped_to_initiators(initiator_list)
self.assertTrue(result)
self.zapi_client.has_luns_mapped_to_initiators.assert_called_once_with(
initiator_list)
self.library.partner_zapi_client.has_luns_mapped_to_initiators.\
assert_called_with(initiator_list)
def test_has_luns_mapped_to_initiators_no_maps(self):
initiator_list = fake.FC_FORMATTED_INITIATORS
self.zapi_client.has_luns_mapped_to_initiators.return_value = False
self.library.partner_zapi_client = mock.Mock()
self.library.partner_zapi_client.has_luns_mapped_to_initiators.\
return_value = False
result = self.library._has_luns_mapped_to_initiators(initiator_list)
self.assertFalse(result)
self.zapi_client.has_luns_mapped_to_initiators.assert_called_once_with(
initiator_list)
self.library.partner_zapi_client.has_luns_mapped_to_initiators.\
assert_called_with(initiator_list)
def test_has_luns_mapped_to_initiators_no_partner(self):
initiator_list = fake.FC_FORMATTED_INITIATORS
self.zapi_client.has_luns_mapped_to_initiators.return_value = False
self.library.partner_zapi_client = mock.Mock()
self.library.partner_zapi_client.has_luns_mapped_to_initiators.\
return_value = True
result = self.library._has_luns_mapped_to_initiators(
initiator_list, include_partner=False)
self.assertFalse(result)
self.zapi_client.has_luns_mapped_to_initiators.assert_called_once_with(
initiator_list)
self.assertEqual(0, self.library.partner_zapi_client.
has_luns_mapped_to_initiators.call_count)
@ddt.data(True, False)
def test_clone_lun_zero_block_count(self, is_snapshot):
"""Test for when clone lun is not passed a block count."""
self.library._get_lun_attr = mock.Mock(return_value={
'Volume': 'fakeLUN', 'Path': '/vol/fake/fakeLUN'})
self.library.zapi_client = mock.Mock()
self.library.zapi_client.get_lun_by_args.return_value = [fake.FAKE_LUN]
self.library._add_lun_to_table = mock.Mock()
self.library._clone_lun('fakeLUN', 'newFakeLUN', 'false',
is_snapshot=is_snapshot)
self.library.zapi_client.clone_lun.assert_called_once_with(
'/vol/fake/fakeLUN', '/vol/fake/newFakeLUN', 'fakeLUN',
'newFakeLUN', 'false', block_count=0, dest_block=0,
source_snapshot=None, src_block=0)
def test_clone_lun_blocks(self):
"""Test for when clone lun is passed block information."""
block_count = 10
src_block = 10
dest_block = 30
self.library._get_lun_attr = mock.Mock(return_value={
'Volume': 'fakeLUN', 'Path': '/vol/fake/fakeLUN'})
self.library.zapi_client = mock.Mock()
self.library.zapi_client.get_lun_by_args.return_value = [fake.FAKE_LUN]
self.library._add_lun_to_table = mock.Mock()
self.library._clone_lun('fakeLUN', 'newFakeLUN', 'false',
block_count=block_count, src_block=src_block,
dest_block=dest_block)
self.library.zapi_client.clone_lun.assert_called_once_with(
'/vol/fake/fakeLUN', '/vol/fake/newFakeLUN', 'fakeLUN',
'newFakeLUN', 'false', block_count=block_count,
dest_block=dest_block, src_block=src_block,
source_snapshot=None)
def test_clone_lun_no_space_reservation(self):
"""Test for when space_reservation is not passed."""
self.library._get_lun_attr = mock.Mock(return_value={
'Volume': 'fakeLUN', 'Path': '/vol/fake/fakeLUN'})
self.library.lun_space_reservation = 'false'
self.library.zapi_client = mock.Mock()
self.library.zapi_client.get_lun_by_args.return_value = [fake.FAKE_LUN]
self.library._add_lun_to_table = mock.Mock()
self.library._clone_lun('fakeLUN', 'newFakeLUN')
self.library.zapi_client.clone_lun.assert_called_once_with(
'/vol/fake/fakeLUN', '/vol/fake/newFakeLUN', 'fakeLUN',
'newFakeLUN', 'false', block_count=0, dest_block=0, src_block=0,
source_snapshot=None)
def test_clone_lun_qos_supplied(self):
"""Test for qos supplied in clone lun invocation."""
self.assertRaises(exception.VolumeDriverException,
self.library._clone_lun,
'fakeLUN',
'newFakeLUN',
qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME)
def test_get_fc_target_wwpns(self):
ports1 = [fake.FC_FORMATTED_TARGET_WWPNS[0],
fake.FC_FORMATTED_TARGET_WWPNS[1]]
ports2 = [fake.FC_FORMATTED_TARGET_WWPNS[2],
fake.FC_FORMATTED_TARGET_WWPNS[3]]
self.zapi_client.get_fc_target_wwpns.return_value = ports1
self.library.partner_zapi_client = mock.Mock()
self.library.partner_zapi_client.get_fc_target_wwpns.return_value = \
ports2
result = self.library._get_fc_target_wwpns()
self.assertSetEqual(set(fake.FC_FORMATTED_TARGET_WWPNS), set(result))
def test_get_fc_target_wwpns_no_partner(self):
ports1 = [fake.FC_FORMATTED_TARGET_WWPNS[0],
fake.FC_FORMATTED_TARGET_WWPNS[1]]
ports2 = [fake.FC_FORMATTED_TARGET_WWPNS[2],
fake.FC_FORMATTED_TARGET_WWPNS[3]]
self.zapi_client.get_fc_target_wwpns.return_value = ports1
self.library.partner_zapi_client = mock.Mock()
self.library.partner_zapi_client.get_fc_target_wwpns.return_value = \
ports2
result = self.library._get_fc_target_wwpns(include_partner=False)
self.assertSetEqual(set(ports1), set(result))
@mock.patch.object(block_7mode.NetAppBlockStorage7modeLibrary,
'_refresh_volume_info', mock.Mock())
@mock.patch.object(block_7mode.NetAppBlockStorage7modeLibrary,
'_get_pool_stats', mock.Mock())
def test_vol_stats_calls_provide_ems(self):
self.library.zapi_client.provide_ems = mock.Mock()
self.library.get_volume_stats(refresh=True)
self.assertEqual(1, self.library.zapi_client.provide_ems.call_count)
def test_create_lun(self):
self.library.vol_refresh_voluntary = False
self.library._create_lun(fake.VOLUME_ID, fake.LUN_ID,
fake.LUN_SIZE, fake.LUN_METADATA)
self.library.zapi_client.create_lun.assert_called_once_with(
fake.VOLUME_ID, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA,
None)
self.assertTrue(self.library.vol_refresh_voluntary)
def test_create_lun_with_qos_policy_group(self):
self.assertRaises(exception.VolumeDriverException,
self.library._create_lun, fake.VOLUME_ID,
fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA,
qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME)
def test_check_volume_type_for_lun_legacy_qos_not_supported(self):
mock_get_volume_type = self.mock_object(na_utils,
'get_volume_type_from_volume')
self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
self.library._check_volume_type_for_lun,
na_fakes.VOLUME, {}, {}, na_fakes.LEGACY_EXTRA_SPECS)
self.assertEqual(0, mock_get_volume_type.call_count)
def test_check_volume_type_for_lun_no_volume_type(self):
mock_get_volume_type = self.mock_object(na_utils,
'get_volume_type_from_volume')
mock_get_volume_type.return_value = None
mock_get_backend_spec = self.mock_object(
na_utils, 'get_backend_qos_spec_from_volume_type')
self.library._check_volume_type_for_lun(na_fakes.VOLUME, {}, {}, None)
self.assertEqual(0, mock_get_backend_spec.call_count)
def test_check_volume_type_for_lun_qos_spec_not_supported(self):
mock_get_volume_type = self.mock_object(na_utils,
'get_volume_type_from_volume')
mock_get_volume_type.return_value = na_fakes.VOLUME_TYPE
mock_get_backend_spec = self.mock_object(
na_utils, 'get_backend_qos_spec_from_volume_type')
mock_get_backend_spec.return_value = na_fakes.QOS_SPEC
self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
self.library._check_volume_type_for_lun,
na_fakes.VOLUME, {}, {}, na_fakes.EXTRA_SPECS)
def test_get_preferred_target_from_list(self):
result = self.library._get_preferred_target_from_list(
fake.ISCSI_TARGET_DETAILS_LIST)
self.assertEqual(fake.ISCSI_TARGET_DETAILS_LIST[0], result)
def test_mark_qos_policy_group_for_deletion(self):
result = self.library._mark_qos_policy_group_for_deletion(
fake.QOS_POLICY_GROUP_INFO)
self.assertIsNone(result)
def test_setup_qos_for_volume(self):
result = self.library._setup_qos_for_volume(fake.VOLUME,
fake.EXTRA_SPECS)
self.assertIsNone(result)
def test_manage_existing_lun_same_name(self):
mock_lun = block_base.NetAppLun('handle', 'name', '1',
{'Path': '/vol/FAKE_CMODE_VOL1/name'})
self.library._get_existing_vol_with_manage_ref = mock.Mock(
return_value=mock_lun)
self.mock_object(na_utils, 'get_volume_extra_specs')
self.mock_object(na_utils, 'log_extra_spec_warnings')
self.library._check_volume_type_for_lun = mock.Mock()
self.library._add_lun_to_table = mock.Mock()
self.zapi_client.move_lun = mock.Mock()
self.library.manage_existing({'name': 'name'}, {'ref': 'ref'})
self.library._get_existing_vol_with_manage_ref.assert_called_once_with(
{'ref': 'ref'})
self.assertEqual(1, self.library._check_volume_type_for_lun.call_count)
self.assertEqual(1, self.library._add_lun_to_table.call_count)
self.assertEqual(0, self.zapi_client.move_lun.call_count)
def test_manage_existing_lun_new_path(self):
mock_lun = block_base.NetAppLun(
'handle', 'name', '1', {'Path': '/vol/FAKE_CMODE_VOL1/name'})
self.library._get_existing_vol_with_manage_ref = mock.Mock(
return_value=mock_lun)
self.mock_object(na_utils, 'get_volume_extra_specs')
self.mock_object(na_utils, 'log_extra_spec_warnings')
self.library._check_volume_type_for_lun = mock.Mock()
self.library._add_lun_to_table = mock.Mock()
self.zapi_client.move_lun = mock.Mock()
self.library.manage_existing({'name': 'volume'}, {'ref': 'ref'})
self.assertEqual(
2, self.library._get_existing_vol_with_manage_ref.call_count)
self.assertEqual(1, self.library._check_volume_type_for_lun.call_count)
self.assertEqual(1, self.library._add_lun_to_table.call_count)
self.zapi_client.move_lun.assert_called_once_with(
'/vol/FAKE_CMODE_VOL1/name', '/vol/FAKE_CMODE_VOL1/volume')
def test_get_pool_stats_no_volumes(self):
self.library.vols = []
result = self.library._get_pool_stats()
self.assertListEqual([], result)
@ddt.data({'netapp_lun_space_reservation': 'enabled'},
{'netapp_lun_space_reservation': 'disabled'})
@ddt.unpack
def test_get_pool_stats(self, netapp_lun_space_reservation):
self.library.volume_list = ['vol0', 'vol1', 'vol2']
self.library.root_volume_name = 'vol0'
self.library.reserved_percentage = 5
self.library.max_over_subscription_ratio = 10.0
self.library.configuration.netapp_lun_space_reservation = (
netapp_lun_space_reservation)
self.library.vols = netapp_api.NaElement(
client_fakes.VOLUME_LIST_INFO_RESPONSE).get_child_by_name(
'volumes').get_children()
self.library.perf_library.get_node_utilization = (
mock.Mock(return_value=30.0))
thick = netapp_lun_space_reservation == 'enabled'
result = self.library._get_pool_stats(filter_function='filter',
goodness_function='goodness')
expected = [{
'pool_name': 'vol1',
'consistencygroup_support': True,
'QoS_support': False,
'thin_provisioning_support': not thick,
'thick_provisioning_support': thick,
'provisioned_capacity_gb': 2.94,
'free_capacity_gb': 1339.27,
'total_capacity_gb': 1342.21,
'reserved_percentage': 5,
'max_over_subscription_ratio': 10.0,
'utilization': 30.0,
'filter_function': 'filter',
'goodness_function': 'goodness',
}]
self.assertEqual(expected, result)
def test_get_filtered_pools_invalid_conf(self):
"""Verify an exception is raised if the regex pattern is invalid."""
self.library.configuration.netapp_pool_name_search_pattern = '(.+'
self.assertRaises(exception.InvalidConfigurationValue,
self.library._get_filtered_pools)
@ddt.data('.*?3$|mix.+', '(.+?[0-9]+) ', '^.+3$', '^[a-z].*?[^4]$')
def test_get_filtered_pools_match_select_pools(self, patterns):
self.library.vols = fake.FAKE_7MODE_VOLUME['all']
self.library.configuration.netapp_pool_name_search_pattern = patterns
filtered_pools = self.library._get_filtered_pools()
self.assertEqual(
fake.FAKE_7MODE_VOLUME['all'][0].get_child_content('name'),
filtered_pools[0]
)
self.assertEqual(
fake.FAKE_7MODE_VOLUME['all'][1].get_child_content('name'),
filtered_pools[1]
)
@ddt.data('', 'mix.+|open.+', '.+', 'open123, mixed3, open1234', '.+')
def test_get_filtered_pools_match_all_pools(self, patterns):
self.library.vols = fake.FAKE_7MODE_VOLUME['all']
self.library.configuration.netapp_pool_name_search_pattern = patterns
filtered_pools = self.library._get_filtered_pools()
self.assertEqual(
fake.FAKE_7MODE_VOLUME['all'][0].get_child_content('name'),
filtered_pools[0]
)
self.assertEqual(
fake.FAKE_7MODE_VOLUME['all'][1].get_child_content('name'),
filtered_pools[1]
)
self.assertEqual(
fake.FAKE_7MODE_VOLUME['all'][2].get_child_content('name'),
filtered_pools[2]
)
@ddt.data('abc|stackopen|openstack|abc.*', 'abc',
'stackopen, openstack, open', '^$')
def test_get_filtered_pools_non_matching_patterns(self, patterns):
self.library.vols = fake.FAKE_7MODE_VOLUME['all']
self.library.configuration.netapp_pool_name_search_pattern = patterns
filtered_pools = self.library._get_filtered_pools()
self.assertListEqual([], filtered_pools)
def test_get_pool_stats_no_ssc_vols(self):
self.library.vols = {}
pools = self.library._get_pool_stats()
self.assertListEqual([], pools)
def test_get_pool_stats_with_filtered_pools(self):
self.library.vols = fake.FAKE_7MODE_VOL1
self.library.volume_list = [
fake.FAKE_7MODE_VOL1[0].get_child_content('name')
]
self.library.root_volume_name = ''
self.library.perf_library.get_node_utilization = (
mock.Mock(return_value=30.0))
pools = self.library._get_pool_stats(filter_function='filter',
goodness_function='goodness')
self.assertListEqual(fake.FAKE_7MODE_POOLS, pools)
def test_get_pool_stats_no_filtered_pools(self):
self.library.vols = fake.FAKE_7MODE_VOL1
self.library.volume_list = ['open1', 'open2']
self.library.root_volume_name = ''
pools = self.library._get_pool_stats()
self.assertListEqual([], pools)
def test_delete_volume(self):
self.library.vol_refresh_voluntary = False
mock_super_delete_volume = self.mock_object(
block_base.NetAppBlockStorageLibrary, 'delete_volume')
self.library.delete_volume(fake.VOLUME)
mock_super_delete_volume.assert_called_once_with(fake.VOLUME)
self.assertTrue(self.library.vol_refresh_voluntary)
def test_delete_snapshot(self):
self.library.vol_refresh_voluntary = False
mock_super_delete_snapshot = self.mock_object(
block_base.NetAppBlockStorageLibrary, 'delete_snapshot')
self.library.delete_snapshot(fake.SNAPSHOT)
mock_super_delete_snapshot.assert_called_once_with(fake.SNAPSHOT)
self.assertTrue(self.library.vol_refresh_voluntary)
|
|
"""
MassOpenCloud / Hardware Isolation Layer (MOC/HIL)
HIL Client Interface
August 2017, Tim Donahue tdonahue@mit.edu
"""
import urllib
import time
from hil.client.client import Client, RequestsHTTPClient
from hil.client.base import FailedAPICallException
from hil_slurm_logging import log_info, log_debug, log_error
from hil_slurm_settings import HIL_ENDPOINT, HIL_USER, HIL_PW
# timeout ensures that networking actions are completed in a resonable time.
HIL_TIMEOUT = 20
DEBUG = False
class HILClientFailure(Exception):
"""Exception indicating that the HIL client failed"""
class ProjectMismatchError(Exception):
"""Raised when projects don't match"""
def _hil_client_connect(endpoint_ip, name, pw):
'''
Connect to the HIL server and return a HIL Client instance
Note this call will succeed if the API server is running, but the network server is down '''
hil_http_client = RequestsHTTPClient()
if not hil_http_client:
log_error('Unable to create HIL HTTP Client')
return None
hil_http_client.auth = (name, pw)
c = Client(endpoint_ip, hil_http_client)
if not c:
log_error('Unable to create HIL client')
return c
def hil_init():
return _hil_client_connect(HIL_ENDPOINT, HIL_USER, HIL_PW)
def check_hil_interface():
hil_client = hil_init()
def hil_reserve_nodes(nodelist, from_project, hil_client=None):
'''
Cause HIL nodes to move from the 'from' project to the HIL free pool.
Typically, the 'from' project is the Slurm loaner project.
This methods first powers off the nodes, then disconnects all networks,
then moves the node from the 'from' project to the free pool.
We power off the nodes before removing the networks because the IPMI
network is also controlled by HIL. If we removed all networks, then we will
not be able to perform any IPMI operations on nodes.
'''
if not hil_client:
hil_client = hil_init()
# Get information from node and ensure that the node is actually connected
# to <from_project> before proceeding.
# iterate over a copy of nodelist, otherwise we can't modify it.
for node in nodelist[:]:
node_info = show_node(hil_client, node)
project = node_info['project']
# if node already in the free pool, skip any processing.
if project is None:
log_info('HIL release: Node `%s` already in the free pool, skipping' % node)
nodelist.remove(node)
elif (project != from_project):
log_error('HIL reservation failure: Node `%s` (in project `%s`) not in `%s` project' % (node, project, from_project))
raise ProjectMismatchError()
# Power off all nodes.
for node in nodelist:
power_off_node(hil_client, node)
# Remove all networks from nodes.
for node in nodelist:
try:
_remove_all_networks(hil_client, node)
except:
log_error('Failed to remove networks from node %s' % node)
continue
# Finally, remove node from project.
for node in nodelist:
try:
_ensure_no_networks(hil_client, node)
except:
log_error('Failed to ensure node %s is disconnected from all networks' % node)
continue
# tries 10 times to detach the project because there might be a pending
# networking action setup by revert port in the previous step.
counter = 10
while counter:
try:
hil_client.project.detach(from_project, node)
log_info('Node `%s` removed from project `%s`' % (node, from_project))
break
except FailedAPICallException as ex:
if ex.message == 'Node has pending network actions':
counter -= 1
time.sleep(0.5)
else:
log_error('HIL reservation failure: Unable to detach node `%s` from project `%s`' % (node, from_project))
raise HILClientFailure(ex.message)
if counter == 0:
log_error('HIL reservation failure: Unable to detach node `%s` from project `%s`' % (node, from_project))
raise HILClientFailure()
def hil_free_nodes(nodelist, to_project, hil_client=None):
'''
Cause HIL nodes to move the HIL free pool to the 'to' project.
Typically, the 'to' project is the Slurm loaner project.
This method first powers off the nodes, then disconnects all networks,
then moves the node from the free pool to the 'to' project.
We power off the nodes before removing the networks because the IPMI
network is also controlled by HIL. If we removed all networks, then we will
not be able to perform any IPMI operations on nodes.
'''
if not hil_client:
hil_client = hil_init()
# Get information from node and ensure that the node is actually connected
# to <from_project> before proceeding.
# iterate over a copy of nodelist, otherwise we can't modify it.
for node in nodelist[:]:
node_info = show_node(hil_client, node)
# If the node is in the Slurm project now, skip further processing, but don't indicate
# failure.
project = node_info['project']
if (project == to_project):
log_info('HIL release: Node `%s` already in `%s` project, skipping' % (node, to_project))
nodelist.remove(node)
# Finally, connect node to <to_project>
for node in nodelist:
try:
hil_client.project.connect(to_project, node)
log_info('Node `%s` connected to project `%s`' % (node, to_project))
except FailedAPICallException, ConnectionError:
log_error('HIL reservation failure: Unable to connect node `%s` to project `%s`' % (node, to_project))
raise HILClientFailure()
def _remove_all_networks(hil_client, node):
'''
Disconnect all networks from all of the node's NICs
'''
node_info = show_node(hil_client, node)
# get node information and then iterate on the nics
for nic in node_info['nics']:
# get the port and switch to which the nics are connected to
port = nic['port']
switch = nic['switch']
if port and switch:
try:
hil_client.port.port_revert(switch, port)
log_info('Removed all networks from node `%s`' % node)
except FailedAPICallException, ConnectionError:
log_error('Failed to revert port `%s` on node `%s` switch `%s`' % (port, node, switch))
raise HILClientFailure()
def _ensure_no_networks(hil_client, node):
"""Polls on the output of show node to check if networks have been removed.
It will timeout and raise an exception if it's taking too long.
"""
connected_to_network = True
end_time = time.time() + HIL_TIMEOUT
while connected_to_network:
if time.time() > end_time:
raise HILClientFailure('Networks not removed from node in reasonable time')
node_info = show_node(hil_client, node)
for nic in node_info['nics']:
if nic['networks']:
connected_to_network = True
break
else:
connected_to_network = False
# don't tight loop.
time.sleep(0.5)
return
def show_node(hil_client, node):
"""Returns node information and takes care of handling exceptions"""
try:
node_info = hil_client.node.show(node)
return node_info
except FailedAPICallException, ConnectionError:
# log a note for the admins, and the exact exception before raising
# an error.
log_error('HIL reservation failure: HIL node info unavailable, node `%s`' % node)
raise HILClientFailure()
def power_off_node(hil_client, node):
try:
hil_client.node.power_off(node)
log_info('Node `%s` succesfully powered off' % node)
except FailedAPICallException, ConnectionError:
log_error('HIL reservation failure: Unable to power off node `%s`' % node)
raise HILClientFailure()
|
|
#!/usr/bin/env python
#
# Copyright 2011-2015 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from tests import testlib
import logging
import splunklib.client as client
class Tests(testlib.SDKTestCase):
def setUp(self):
self.service = client.connect(**self.opts.kwargs)
self.storage_passwords = self.service.storage_passwords
def tearDown(self):
# Delete all passwords created by SDK tests
for sp in self.storage_passwords:
if "delete-me" in sp.username or "delete-me" in sp.realm:
sp.delete()
def test_create(self):
start_count = len(self.storage_passwords)
realm = testlib.tmpname()
username = testlib.tmpname()
p = self.storage_passwords.create("changeme", username, realm)
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.assertEqual(p.realm, realm)
self.assertEqual(p.username, username)
self.assertEqual(p.clear_password, "changeme")
self.assertEqual(p.name, realm + ":" + username + ":")
p.delete()
self.assertEqual(start_count, len(self.storage_passwords))
def test_create_with_backslashes(self):
start_count = len(self.storage_passwords)
realm = "\\" + testlib.tmpname()
username = "\\" + testlib.tmpname()
# Prepends one escaped slash
p = self.storage_passwords.create("changeme", username, realm)
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.assertEqual(p.realm, realm)
# Prepends one escaped slash
self.assertEqual(p.username, username)
self.assertEqual(p.clear_password, "changeme")
# Checks for 2 escaped slashes (Splunk encodes the single slash)
self.assertEqual(p.name, "\\" + realm + ":\\" + username + ":")
p.delete()
self.assertEqual(start_count, len(self.storage_passwords))
def test_create_with_slashes(self):
start_count = len(self.storage_passwords)
realm = "/" + testlib.tmpname()
username = "/" + testlib.tmpname()
# Prepends one escaped slash
p = self.storage_passwords.create("changeme", username, realm)
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.assertEqual(p.realm, realm)
# Prepends one escaped slash
self.assertEqual(p.username, username)
self.assertEqual(p.clear_password, "changeme")
# Checks for 2 escaped slashes (Splunk encodes the single slash)
self.assertEqual(p.name, realm + ":" + username + ":")
p.delete()
self.assertEqual(start_count, len(self.storage_passwords))
def test_create_norealm(self):
start_count = len(self.storage_passwords)
username = testlib.tmpname()
p = self.storage_passwords.create("changeme", username)
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.assertEqual(p.realm, None)
self.assertEqual(p.username, username)
self.assertEqual(p.clear_password, "changeme")
self.assertEqual(p.name, ":" + username + ":")
p.delete()
self.assertEqual(start_count, len(self.storage_passwords))
def test_create_with_colons(self):
start_count = len(self.storage_passwords)
username = testlib.tmpname()
realm = testlib.tmpname()
p = self.storage_passwords.create("changeme", username + ":end",
":start" + realm)
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.assertEqual(p.realm, ":start" + realm)
self.assertEqual(p.username, username + ":end")
self.assertEqual(p.clear_password, "changeme")
self.assertEqual(p.name,
"\\:start" + realm + ":" + username + "\\:end:")
p.delete()
self.assertEqual(start_count, len(self.storage_passwords))
prefix = testlib.tmpname()
realm = prefix + ":r:e:a:l:m:"
user = ":u:s:e:r:"
p = self.storage_passwords.create("changeme", user, realm)
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.assertEqual(p.realm, realm)
self.assertEqual(p.username, user)
self.assertEqual(p.clear_password, "changeme")
self.assertEqual(p.name,
prefix + "\\:r\\:e\\:a\\:l\\:m\\::\\:u\\:s\\:e\\:r\\::")
p.delete()
self.assertEqual(start_count, len(self.storage_passwords))
def test_create_crazy(self):
start_count = len(self.storage_passwords)
username = testlib.tmpname()
realm = testlib.tmpname()
p = self.storage_passwords.create("changeme",
username + ":end!@#$%^&*()_+{}:|<>?",
":start::!@#$%^&*()_+{}:|<>?" + realm)
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.assertEqual(p.realm, ":start::!@#$%^&*()_+{}:|<>?" + realm)
self.assertEqual(p.username, username + ":end!@#$%^&*()_+{}:|<>?")
self.assertEqual(p.clear_password, "changeme")
self.assertEqual(p.name,
"\\:start\\:\\:!@#$%^&*()_+{}\\:|<>?" + realm + ":" + username + "\\:end!@#$%^&*()_+{}\\:|<>?:")
p.delete()
self.assertEqual(start_count, len(self.storage_passwords))
def test_read(self):
start_count = len(self.storage_passwords)
username = testlib.tmpname()
p = self.storage_passwords.create("changeme", username)
self.assertEqual(start_count + 1, len(self.storage_passwords))
for sp in self.storage_passwords:
self.assertTrue(p.name in self.storage_passwords)
# Name works with or without a trailing colon
self.assertTrue((":" + username + ":") in self.storage_passwords)
self.assertTrue((":" + username) in self.storage_passwords)
p.delete()
self.assertEqual(start_count, len(self.storage_passwords))
def test_update(self):
start_count = len(self.storage_passwords)
realm = testlib.tmpname()
username = testlib.tmpname()
p = self.storage_passwords.create("changeme", username, realm)
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.assertEqual(p.realm, realm)
self.assertEqual(p.username, username)
self.assertEqual(p.clear_password, "changeme")
self.assertEqual(p.name, realm + ":" + username + ":")
p.update(password="Splunkeroo!")
self.assertEqual(p.clear_password, "changeme")
p.refresh()
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.assertEqual(p.realm, realm)
self.assertEqual(p.username, username)
self.assertEqual(p.clear_password, "Splunkeroo!")
self.assertEqual(p.name, realm + ":" + username + ":")
p.delete()
self.assertEqual(start_count, len(self.storage_passwords))
def test_delete(self):
start_count = len(self.storage_passwords)
username = testlib.tmpname()
p = self.storage_passwords.create("changeme", username, "myrealm")
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.assertEqual(p.realm, "myrealm")
self.assertEqual(p.username, username)
self.assertEqual(p.clear_password, "changeme")
self.assertEqual(p.name, "myrealm:" + username + ":")
self.storage_passwords.delete(username, "myrealm")
self.assertEqual(start_count, len(self.storage_passwords))
self.storage_passwords.create("changeme", username, "myrealm")
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.storage_passwords.delete("myrealm:" + username + ":")
self.assertEqual(start_count, len(self.storage_passwords))
# Test named parameters
self.storage_passwords.create(password="changeme", username=username,
realm="myrealm")
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.storage_passwords.delete(username, "myrealm")
self.assertEqual(start_count, len(self.storage_passwords))
self.storage_passwords.create(password="changeme", username=username + "/foo",
realm="/myrealm")
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.storage_passwords.delete(username + "/foo", "/myrealm")
self.assertEqual(start_count, len(self.storage_passwords))
if __name__ == "__main__":
try:
import unittest2 as unittest
except ImportError:
import unittest
unittest.main()
|
|
import asyncio
from unittest import mock
try:
import aionotify
except OSError:
aionotify = None # type: ignore
import pytest
import typeguard
from opentrons import types
from opentrons import hardware_control as hc
from opentrons.hardware_control.types import Axis
from opentrons.hardware_control.dev_types import PipetteDict
LEFT_PIPETTE_PREFIX = 'p10_single'
LEFT_PIPETTE_MODEL = '{}_v1'.format(LEFT_PIPETTE_PREFIX)
LEFT_PIPETTE_ID = 'testy'
@pytest.fixture
def dummy_instruments():
dummy_instruments_attached = {
types.Mount.LEFT: {
'model': LEFT_PIPETTE_MODEL,
'id': LEFT_PIPETTE_ID,
'name': LEFT_PIPETTE_PREFIX,
},
types.Mount.RIGHT: {
'model': None,
'id': None,
'name': None,
}
}
return dummy_instruments_attached
@pytest.fixture
def dummy_backwards_compatibility():
dummy_instruments_attached = {
types.Mount.LEFT: {
'model': 'p20_single_v2.0',
'id': LEFT_PIPETTE_ID,
'name': 'p20_single_gen2'
},
types.Mount.RIGHT: {
'model': 'p300_single_v2.0',
'id': LEFT_PIPETTE_ID + '2',
'name': 'p300_single_gen2',
}
}
return dummy_instruments_attached
async def test_cache_instruments(dummy_instruments, loop):
hw_api = await hc.API.build_hardware_simulator(
attached_instruments=dummy_instruments,
loop=loop)
await hw_api.cache_instruments()
attached = hw_api.attached_instruments
typeguard.check_type(
'left mount dict', attached[types.Mount.LEFT],
PipetteDict)
async def test_mismatch_fails(dummy_instruments, loop):
hw_api = await hc.API.build_hardware_simulator(
attached_instruments=dummy_instruments,
loop=loop)
requested_instr = {
types.Mount.LEFT: 'p20_single_gen2', types.Mount.RIGHT: 'p300_single'}
with pytest.raises(RuntimeError):
await hw_api.cache_instruments(requested_instr)
async def test_backwards_compatibility(dummy_backwards_compatibility, loop):
hw_api = await hc.API.build_hardware_simulator(
attached_instruments=dummy_backwards_compatibility,
loop=loop)
requested_instr = {
types.Mount.LEFT: 'p10_single',
types.Mount.RIGHT: 'p300_single'}
volumes = {
types.Mount.LEFT: {'min': 1, 'max': 10},
types.Mount.RIGHT: {'min': 30, 'max': 300}
}
await hw_api.cache_instruments(requested_instr)
attached = hw_api.attached_instruments
for mount, name in requested_instr.items():
assert attached[mount]['name']\
== dummy_backwards_compatibility[mount]['name']
assert attached[mount]['min_volume'] == volumes[mount]['min']
assert attached[mount]['max_volume'] == volumes[mount]['max']
@pytest.mark.skipif(aionotify is None,
reason='inotify not available')
async def test_cache_instruments_hc(monkeypatch, dummy_instruments,
hardware_controller_lockfile,
running_on_pi, cntrlr_mock_connect, loop):
hw_api_cntrlr = await hc.API.build_hardware_controller(loop=loop)
def mock_driver_model(mount):
attached_pipette = {'left': LEFT_PIPETTE_MODEL, 'right': None}
return attached_pipette[mount]
def mock_driver_id(mount):
attached_pipette = {'left': LEFT_PIPETTE_ID, 'right': None}
return attached_pipette[mount]
monkeypatch.setattr(hw_api_cntrlr._backend._smoothie_driver,
'read_pipette_model', mock_driver_model)
monkeypatch.setattr(hw_api_cntrlr._backend._smoothie_driver,
'read_pipette_id', mock_driver_id)
await hw_api_cntrlr.cache_instruments()
attached = hw_api_cntrlr.attached_instruments
typeguard.check_type('left mount dict default',
attached[types.Mount.LEFT],
PipetteDict)
# If we pass a conflicting expectation we should get an error
with pytest.raises(RuntimeError):
await hw_api_cntrlr.cache_instruments({types.Mount.LEFT: 'p300_multi'})
# If we pass a matching expects it should work
await hw_api_cntrlr.cache_instruments(
{types.Mount.LEFT: LEFT_PIPETTE_PREFIX})
attached = hw_api_cntrlr.attached_instruments
typeguard.check_type('left mount dict after expects',
attached[types.Mount.LEFT],
PipetteDict)
async def test_cache_instruments_sim(loop, dummy_instruments):
def fake_func1(value):
return value
def fake_func2(mount, value):
return mount, value
sim = await hc.API.build_hardware_simulator(loop=loop)
# With nothing specified at init or expected, we should have nothing
# afterwards and nothing should have been reconfigured
sim._backend._smoothie_driver.update_steps_per_mm = mock.Mock(fake_func1)
sim._backend._smoothie_driver.update_pipette_config = mock.Mock(fake_func2)
sim._backend._smoothie_driver.set_dwelling_current = mock.Mock(fake_func1)
await sim.cache_instruments()
attached = sim.attached_instruments
assert attached == {
types.Mount.LEFT: {}, types.Mount.RIGHT: {}}
sim._backend._smoothie_driver.update_steps_per_mm.assert_not_called()
sim._backend._smoothie_driver.update_pipette_config.assert_not_called()
sim._backend._smoothie_driver.set_dwelling_current.assert_not_called()
sim._backend._smoothie_driver.update_steps_per_mm.reset_mock()
sim._backend._smoothie_driver.update_pipette_config.reset_mock()
# When we expect instruments, we should get what we expect since nothing
# was specified at init time
await sim.cache_instruments(
{types.Mount.LEFT: 'p10_single',
types.Mount.RIGHT: 'p300_single_gen2'})
attached = sim.attached_instruments
assert attached[types.Mount.LEFT]['model']\
== 'p10_single_v1'
assert attached[types.Mount.LEFT]['name']\
== 'p10_single'
steps_mm_calls = [mock.call({'B': 768}), mock.call({'C': 3200})]
pip_config_calls = [
mock.call('Z', {'home': 220}),
mock.call('A', {'home': 172.15}),
mock.call('B', {'max_travel': 30}),
mock.call('C', {'max_travel': 60})]
current_calls = [mock.call({'B': 0.05}), mock.call({'C': 0.05})]
sim._backend._smoothie_driver.update_steps_per_mm.assert_has_calls(
steps_mm_calls, any_order=True)
sim._backend._smoothie_driver.update_pipette_config.assert_has_calls(
pip_config_calls, any_order=True)
await sim.cache_instruments(
{types.Mount.LEFT: 'p10_single',
types.Mount.RIGHT: 'p300_multi_gen2'})
current_calls = [mock.call({'B': 0.05}), mock.call({'C': 0.3})]
sim._backend._smoothie_driver.set_dwelling_current.assert_has_calls(
current_calls, any_order=True)
# If we use prefixes, that should work too
await sim.cache_instruments({types.Mount.RIGHT: 'p300_single'})
attached = sim.attached_instruments
assert attached[types.Mount.RIGHT]['model']\
== 'p300_single_v1'
assert attached[types.Mount.RIGHT]['name']\
== 'p300_single'
# If we specify instruments at init time, we should get them without
# passing an expectation
sim = await hc.API.build_hardware_simulator(
attached_instruments=dummy_instruments)
await sim.cache_instruments()
attached = sim.attached_instruments
typeguard.check_type(
'after config',
attached[types.Mount.LEFT],
PipetteDict
)
# If we specify conflicting expectations and init arguments we should
# get a RuntimeError
with pytest.raises(RuntimeError):
await sim.cache_instruments({types.Mount.LEFT: 'p300_multi'})
# Unless we specifically told the simulator to not strictly enforce
# correspondence between expectations and preconfiguration
sim = await hc.API.build_hardware_simulator(
attached_instruments=dummy_instruments,
loop=loop, strict_attached_instruments=False)
await sim.cache_instruments({types.Mount.LEFT: 'p300_multi'})
with pytest.raises(RuntimeError):
# If you pass something that isn't a pipette name it absolutely
# should not work
await sim.cache_instruments({types.Mount.LEFT: 'p10_sing'})
async def test_prep_aspirate(dummy_instruments, loop):
hw_api = await hc.API.build_hardware_simulator(
attached_instruments=dummy_instruments, loop=loop)
await hw_api.home()
await hw_api.cache_instruments()
mount = types.Mount.LEFT
await hw_api.pick_up_tip(mount, 20.0)
# If we're empty and haven't prepared, we should get an error
with pytest.raises(RuntimeError):
await hw_api.aspirate(mount, 1, 1.0)
# If we're empty and have prepared, we should be fine
await hw_api.prepare_for_aspirate(mount)
await hw_api.aspirate(mount, 1)
# If we're not empty, we should be fine
await hw_api.aspirate(mount, 1)
async def test_aspirate_new(dummy_instruments, loop):
hw_api = await hc.API.build_hardware_simulator(
attached_instruments=dummy_instruments, loop=loop)
await hw_api.home()
await hw_api.cache_instruments()
mount = types.Mount.LEFT
await hw_api.pick_up_tip(mount, 20.0)
aspirate_ul = 3.0
aspirate_rate = 2
await hw_api.prepare_for_aspirate(mount)
await hw_api.aspirate(mount, aspirate_ul, aspirate_rate)
new_plunger_pos = 6.05285
pos = await hw_api.current_position(mount)
assert pos[Axis.B] == new_plunger_pos
async def test_aspirate_old(dummy_instruments, loop, old_aspiration):
hw_api = await hc.API.build_hardware_simulator(
attached_instruments=dummy_instruments, loop=loop)
await hw_api.home()
await hw_api.cache_instruments()
mount = types.Mount.LEFT
await hw_api.pick_up_tip(mount, 20.0)
aspirate_ul = 3.0
aspirate_rate = 2
await hw_api.prepare_for_aspirate(mount)
await hw_api.aspirate(mount, aspirate_ul, aspirate_rate)
new_plunger_pos = 5.660769
pos = await hw_api.current_position(mount)
assert pos[Axis.B] == new_plunger_pos
async def test_dispense(dummy_instruments, loop):
hw_api = await hc.API.build_hardware_simulator(
attached_instruments=dummy_instruments, loop=loop)
await hw_api.home()
await hw_api.cache_instruments()
mount = types.Mount.LEFT
await hw_api.pick_up_tip(mount, 20.0)
aspirate_ul = 10.0
aspirate_rate = 2
await hw_api.prepare_for_aspirate(mount)
await hw_api.aspirate(mount, aspirate_ul, aspirate_rate)
dispense_1 = 3.0
await hw_api.dispense(mount, dispense_1)
plunger_pos_1 = 10.810573
assert (await hw_api.current_position(mount))[Axis.B]\
== plunger_pos_1
await hw_api.dispense(mount, rate=2)
plunger_pos_2 = 2
assert (await hw_api.current_position(mount))[Axis.B]\
== plunger_pos_2
async def test_no_pipette(dummy_instruments, loop):
hw_api = await hc.API.build_hardware_simulator(
attached_instruments=dummy_instruments, loop=loop)
await hw_api.cache_instruments()
aspirate_ul = 3.0
aspirate_rate = 2
with pytest.raises(types.PipetteNotAttachedError):
await hw_api.aspirate(types.Mount.RIGHT, aspirate_ul, aspirate_rate)
assert not hw_api._current_volume[types.Mount.RIGHT]
async def test_pick_up_tip(dummy_instruments, loop, is_robot):
hw_api = await hc.API.build_hardware_simulator(
attached_instruments=dummy_instruments, loop=loop)
mount = types.Mount.LEFT
await hw_api.home()
await hw_api.cache_instruments()
tip_position = types.Point(12.13, 9, 150)
target_position = {Axis.X: 46.13, # Left mount offset
Axis.Y: 9,
Axis.Z: 218, # Z retracts after pick_up
Axis.A: 218,
Axis.B: 2,
Axis.C: 19}
await hw_api.move_to(mount, tip_position)
# Note: pick_up_tip without a tip_length argument requires the pipette on
# the associated mount to have an associated tip rack from which to infer
# the tip length. That behavior is not tested here.
tip_length = 25.0
await hw_api.pick_up_tip(mount, tip_length)
assert hw_api._attached_instruments[mount].has_tip
assert hw_api._attached_instruments[mount].current_volume == 0
assert hw_api._current_position == target_position
async def test_aspirate_flow_rate(dummy_instruments, loop, monkeypatch):
hw_api = await hc.API.build_hardware_simulator(
attached_instruments=dummy_instruments, loop=loop)
mount = types.Mount.LEFT
await hw_api.home()
await hw_api.cache_instruments()
await hw_api.pick_up_tip(mount, 20.0)
mock_move_plunger = mock.Mock()
def instant_future(mount, distance, speed):
fut = asyncio.Future()
fut.set_result(None)
return fut
mock_move_plunger.side_effect = instant_future
monkeypatch.setattr(hw_api, '_move_plunger', mock_move_plunger)
pip = hw_api._attached_instruments[mount]
await hw_api.prepare_for_aspirate(types.Mount.LEFT)
await hw_api.aspirate(types.Mount.LEFT, 2)
assert mock_move_plunger.called_with(
mount,
hw_api._plunger_position(pip, 2, 'aspirate'),
speed=hw_api._plunger_speed(
pip, pip.config.aspirate_flow_rate, 'aspirate')
)
mock_move_plunger.reset_mock()
await hw_api.prepare_for_aspirate(types.Mount.LEFT)
await hw_api.aspirate(types.Mount.LEFT, 2, rate=0.5)
assert mock_move_plunger.called_with(
mount,
hw_api._plunger_position(pip, 4, 'aspirate'),
speed=hw_api._plunger_speed(
pip, pip.config.aspirate_flow_rate * 0.5, 'aspirate')
)
mock_move_plunger.reset_mock()
hw_api.set_flow_rate(mount, aspirate=1)
await hw_api.prepare_for_aspirate(types.Mount.LEFT)
await hw_api.aspirate(types.Mount.LEFT, 2)
assert mock_move_plunger.called_with(
mount,
hw_api._plunger_position(pip, 6, 'aspirate'),
speed=hw_api._plunger_speed(pip, 2, 'aspirate')
)
mock_move_plunger.reset_mock()
await hw_api.prepare_for_aspirate(types.Mount.LEFT)
await hw_api.aspirate(types.Mount.LEFT, 2, rate=0.5)
assert mock_move_plunger.called_with(
mount,
hw_api._plunger_position(pip, 8, 'aspirate'),
speed=hw_api._plunger_speed(pip, 1, 'aspirate')
)
mock_move_plunger.reset_mock()
hw_api.set_pipette_speed(mount, aspirate=10)
await hw_api.prepare_for_aspirate(types.Mount.LEFT)
await hw_api.aspirate(types.Mount.LEFT, 1)
assert mock_move_plunger.called_with(
mount,
hw_api._plunger_position(pip, 8, 'aspirate'),
speed=10
)
mock_move_plunger.reset_mock()
await hw_api.prepare_for_aspirate(types.Mount.LEFT)
await hw_api.aspirate(types.Mount.LEFT, 1, rate=0.5)
assert mock_move_plunger.called_with(
mount,
hw_api._plunger_position(pip, 8, 'aspirate'),
speed=5
)
async def test_dispense_flow_rate(dummy_instruments, loop, monkeypatch):
hw_api = await hc.API.build_hardware_simulator(
attached_instruments=dummy_instruments, loop=loop)
mount = types.Mount.LEFT
await hw_api.home()
await hw_api.cache_instruments()
await hw_api.pick_up_tip(mount, 20.0)
await hw_api.prepare_for_aspirate(types.Mount.LEFT)
await hw_api.aspirate(mount, 10)
mock_move_plunger = mock.Mock()
def instant_future(mount, distance, speed):
fut = asyncio.Future()
fut.set_result(None)
return fut
mock_move_plunger.side_effect = instant_future
monkeypatch.setattr(hw_api, '_move_plunger', mock_move_plunger)
pip = hw_api._attached_instruments[mount]
await hw_api.dispense(types.Mount.LEFT, 2)
assert mock_move_plunger.called_with(
mount,
hw_api._plunger_position(pip, 8, 'dispense'),
speed=hw_api._plunger_speed(
pip, pip.config.dispense_flow_rate, 'dispense')
)
mock_move_plunger.reset_mock()
await hw_api.dispense(types.Mount.LEFT, 2, rate=0.5)
assert mock_move_plunger.called_with(
mount,
hw_api._plunger_position(pip, 6, 'dispense'),
speed=hw_api._plunger_speed(
pip, pip.config.dispense_flow_rate * 0.5, 'dispense')
)
mock_move_plunger.reset_mock()
hw_api.set_flow_rate(mount, dispense=3)
await hw_api.dispense(types.Mount.LEFT, 2)
assert mock_move_plunger.called_with(
mount,
hw_api._plunger_position(pip, 4, 'dispense'),
speed=hw_api._plunger_speed(pip, 3, 'dispense')
)
mock_move_plunger.reset_mock()
await hw_api.dispense(types.Mount.LEFT, 2, rate=0.5)
assert mock_move_plunger.called_with(
mount,
hw_api._plunger_position(pip, 2, 'dispense'),
speed=hw_api._plunger_speed(pip, 1.5, 'dispense')
)
mock_move_plunger.reset_mock()
hw_api.set_pipette_speed(mount, dispense=10)
await hw_api.dispense(types.Mount.LEFT, 1)
assert mock_move_plunger.called_with(
mount,
hw_api._plunger_position(pip, 1, 'dispense'),
speed=10
)
mock_move_plunger.reset_mock()
await hw_api.dispense(types.Mount.LEFT, 1, rate=0.5)
assert mock_move_plunger.called_with(
mount,
hw_api._plunger_position(pip, 0, 'dispense'),
speed=5
)
async def test_blowout_flow_rate(dummy_instruments, loop, monkeypatch):
hw_api = await hc.API.build_hardware_simulator(
attached_instruments=dummy_instruments, loop=loop)
mount = types.Mount.LEFT
await hw_api.home()
await hw_api.cache_instruments()
await hw_api.pick_up_tip(mount, 20.0)
mock_move_plunger = mock.Mock()
def instant_future(mount, distance, speed):
fut = asyncio.Future()
fut.set_result(None)
return fut
mock_move_plunger.side_effect = instant_future
monkeypatch.setattr(hw_api, '_move_plunger', mock_move_plunger)
pip = hw_api._attached_instruments[mount]
await hw_api.prepare_for_aspirate(mount)
await hw_api.aspirate(mount, 10)
mock_move_plunger.reset_mock()
await hw_api.blow_out(mount)
assert mock_move_plunger.called_with(
mount,
pip.config.blow_out,
speed=hw_api._plunger_speed(
pip, pip.config.blow_out_flow_rate, 'dispense')
)
mock_move_plunger.reset_mock()
hw_api.set_flow_rate(mount, blow_out=2)
await hw_api.prepare_for_aspirate(mount)
await hw_api.aspirate(mount, 10)
mock_move_plunger.reset_mock()
await hw_api.blow_out(types.Mount.LEFT)
assert mock_move_plunger.called_with(
mount,
pip.config.blow_out,
speed=hw_api._plunger_speed(pip, 2, 'dispense')
)
mock_move_plunger.reset_mock()
hw_api.set_pipette_speed(mount, blow_out=15)
await hw_api.prepare_for_aspirate(mount)
await hw_api.aspirate(types.Mount.LEFT, 10)
mock_move_plunger.reset_mock()
await hw_api.blow_out(types.Mount.LEFT)
assert mock_move_plunger.called_with(
mount,
pip.config.blow_out,
speed=15
)
async def test_reset_instruments(dummy_instruments, loop, monkeypatch):
hw_api = await hc.API.build_hardware_simulator(
attached_instruments=dummy_instruments, loop=loop)
hw_api.set_flow_rate(types.Mount.LEFT, 20)
# gut check
assert hw_api.attached_instruments[types.Mount.LEFT]['aspirate_flow_rate']\
== 20
old_l = hw_api._attached_instruments[types.Mount.LEFT]
old_r = hw_api._attached_instruments[types.Mount.RIGHT]
hw_api.reset_instrument(types.Mount.LEFT)
# left should have been reset, right should not
assert not (old_l is hw_api._attached_instruments[types.Mount.LEFT])
assert old_r is hw_api._attached_instruments[types.Mount.RIGHT]
# after the reset, the left should be more or less the same
assert old_l.pipette_id\
== hw_api._attached_instruments[types.Mount.LEFT].pipette_id
# but non-default configs should be changed
assert hw_api.attached_instruments[types.Mount.LEFT]['aspirate_flow_rate']\
!= 20
old_l = hw_api._attached_instruments[types.Mount.LEFT]
old_r = hw_api._attached_instruments[types.Mount.RIGHT]
hw_api.reset_instrument()
assert not (old_l is hw_api._attached_instruments[types.Mount.LEFT])
assert not (old_r is hw_api._attached_instruments[types.Mount.LEFT])
|
|
from flask import Blueprint, render_template, redirect, url_for, jsonify, abort, flash, current_app
from flask_login import login_user, current_user, login_required
from sqlalchemy.exc import IntegrityError
from .. import db
from ..models import Category, Sensor, Subview, View, ChartConfig, User
from ..queries import query_get_category_by_id, query_get_sensor_by_id, query_get_subview_by_id, query_get_view_by_id, \
query_get_user_by_id, query_get_user_by_name, query_get_chartconfig_by_id
from ..forms import CategoryForm, SensorForm, SubviewForm, ViewForm, UserForm, LoginForm
from ..utils import send_email
from ..decorators import check_confirmed
forms = Blueprint('forms', __name__)
@forms.route('/forms/login', methods=['GET', 'POST'])
def login_form():
form = LoginForm()
if form.validate_on_submit():
user = query_get_user_by_name(form.username.data)
if user is None:
abort(400)
if not user.confirmed:
flash('Please activate your account!', 'warning')
return redirect(url_for('main.index'))
if user.verify_password(form.password.data):
login_user(user)
return redirect(url_for('main.userpage', user_slug=user.user_slug))
return render_template('form.html.j2', title='Login', url=url_for('forms.login_form'), form=form, cta='Login')
@forms.route('/forms/users/add', methods=['GET', 'POST'])
def add_user_form():
form = UserForm()
if form.validate_on_submit():
user = User()
form.populate_obj(user)
db.session.add(user)
try:
db.session.commit()
token = user.generate_confirmation_token()
url = url_for('main.confirm_email', token=token, _external=True)
send_email(to=user.email, subject='Confirm Your Account', body=render_template('activate.html.j2', url=url))
except IntegrityError:
db.session.rollback()
login_user(user)
return redirect(url_for('main.index'))
return render_template('form.html.j2', title='Sign Up', url=url_for('forms.add_user_form'), form=form, cta='Sign Up')
@forms.route('/forms/users/edit', methods=['GET', 'POST'])
@login_required
@check_confirmed
def edit_user_form():
user = query_get_user_by_id(current_user.id)
form = UserForm(obj=user)
if form.validate_on_submit():
form.populate_obj(user)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
return redirect(url_for('main.userpage', user_slug=user.user_slug))
form.password.data = None
form.repeat_password.data = None
return render_template('form.html.j2', title='Edit profile', url=url_for('forms.edit_user_form', id=id), form=form, cta='Save')
@forms.route('/forms/categories/add/<user_id>', methods=['GET', 'POST'])
@login_required
@check_confirmed
def add_category_form(user_id):
form = CategoryForm()
if form.validate_on_submit():
category = Category()
form.populate_obj(category)
db.session.add(category)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
return redirect(url_for('main.userpage', user_slug=current_user.user_slug))
form.user_id.data = user_id
return render_template('form.html.j2', title='Add category', url=url_for('forms.add_category_form', user_id=user_id), form=form, cta='Add')
@forms.route('/forms/categories/edit/<id>', methods=['GET', 'POST'])
@login_required
@check_confirmed
def edit_category_form(id):
category = query_get_category_by_id(id, current_user.id)
form = CategoryForm(obj=category)
if form.validate_on_submit():
form.populate_obj(category)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
return redirect(url_for('main.userpage', user_slug=current_user.user_slug))
return render_template('form.html.j2', title='Edit category', url=url_for('forms.edit_category_form', id=id), form=form, cta='Edit')
@forms.route('/forms/sensors/add', methods=['GET', 'POST'])
@login_required
@check_confirmed
def add_sensor_form():
form = SensorForm()
if form.validate_on_submit():
sen = Sensor()
form.populate_obj(sen)
db.session.add(sen)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
return redirect(url_for('main.userpage', user_slug=current_user.user_slug))
return render_template('form.html.j2', title='Add sensor', url=url_for('forms.add_sensor_form'), form=form, cta='Add')
@forms.route('/forms/sensors/edit/<id>', methods=['GET', 'POST'])
@login_required
@check_confirmed
def edit_sensor_form(id):
sensor = query_get_sensor_by_id(id, current_user.id)
form = SensorForm(obj=sensor)
if form.validate_on_submit():
form.populate_obj(sensor)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
return redirect(url_for('main.userpage', user_slug=current_user.user_slug))
category = query_get_category_by_id(sensor.category_id, current_user.id)
form.category_name.data = category.name
return render_template('form.html.j2', title='Edit sensor', url=url_for('forms.edit_sensor_form', id=id), form=form, cta='Edit')
@forms.route('/forms/subviews/add/<view_id>', methods=['GET', 'POST'])
@login_required
@check_confirmed
def add_subview_form(view_id):
form = SubviewForm()
if form.validate_on_submit():
subview = Subview()
form.populate_obj(subview)
db.session.add(subview)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
return redirect(url_for('main.userpage', user_slug=current_user.user_slug))
form.view_id.data = view_id
return render_template('form.html.j2', title='Add subview', url=url_for('forms.add_subview_form', view_id=view_id), form=form, cta='Add')
@forms.route('/forms/subviews/edit/<id>', methods=['GET', 'POST'])
@login_required
@check_confirmed
def edit_subview_form(id):
subview = query_get_subview_by_id(id)
form = SubviewForm(obj=subview)
if form.validate_on_submit():
form.populate_obj(subview)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
return redirect(url_for('main.userpage', user_slug=current_user.user_slug))
form.sensor_name.data = query_get_sensor_by_id(subview.sensor_id, current_user.id)
form.chartconfig_type.data = query_get_chartconfig_by_id(subview.chartconfig_id)
return render_template('form.html.j2', title='Edit subview', url=url_for('forms.edit_subview_form', id=id), form=form, cta='Edit')
@forms.route('/forms/views/add', methods=['GET', 'POST'])
@login_required
@check_confirmed
def add_view_form():
form = ViewForm()
if form.validate_on_submit():
view = View()
form.populate_obj(view)
db.session.add(view)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
return redirect(url_for('main.userpage', user_slug=current_user.user_slug))
return render_template('form.html.j2', title='Add view', url=url_for('forms.add_view_form'), form=form, cta='Add')
@forms.route('/forms/views/edit/<id>', methods=['GET', 'POST'])
@login_required
@check_confirmed
def edit_view_form(id):
view = query_get_view_by_id(id, current_user.id)
form = ViewForm(obj=view)
if form.validate_on_submit():
form.populate_obj(view)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
return redirect(url_for('main.userpage', user_slug=current_user.user_slug))
return render_template('form.html.j2', title='Edit view', url=url_for('forms.edit_view_form', id=id), form=form, cta='Edit')
|
|
"""
Add simple, flexible caching layer.
Uses [dogpile caching](http://dogpilecache.readthedocs.org/en/latest/index.html).
"""
__author__ = 'Dan Gunter <dkgunter@lbl.gov>'
__date__ = '9/26/15'
## Imports
# System
import hashlib
import logging
import os
import time
import uuid
# Third-party
from dogpile.cache import make_region
import redis
# Local
from doekbase.data_api.util import PerfCollector, get_logger
_log = get_logger(__name__)
## Functions and Classes
class Cache(object):
def __init__(self):
self.region = None
def __getattr__(self, item):
return getattr(self.region, item)
class RedisCache(Cache):
def __init__(self, **kwargs):
super(self.__class__, self).__init__()
self.region = get_redis_region(**kwargs)
class DBMCache(Cache):
def __init__(self, **kwargs):
super(self.__class__, self).__init__()
self.region = get_dbm_region(**kwargs)
class NullCache(Cache):
def __init__(self, **kwargs):
super(self.__class__, self).__init__()
self.region = get_null_region()
def get_redis_region(redis_host='localhost', redis_port=6379):
"""Get a new redis cache 'region' object.
Args:
redis_host (str): Hostname or IP for Redis server
redis_port (int): Redis server listening port
Returns:
An object, of type CacheRegion
"""
region = make_region().configure(
'dogpile.cache.redis',
arguments={
'host': redis_host,
'port': redis_port,
'db': 0,
'redis_expiration_time': 60 * 60 * 2, # 2 hours
'distributed_lock': True
}
)
return region
def get_dbm_region(path='/tmp', name=''):
"""Get a new anydbm (DBM) cache 'region' object.
Args:
path (str): Path to directory with cache file
name (str): Name of cache file. if empty a random name
will be generated.
Returns:
An object, of type CacheRegion
"""
if not name:
name = str(uuid.uuid1())
filename = os.path.join(path, name)
region = make_region().configure(
'dogpile.cache.dbm',
arguments={
'filename': filename
}
)
return region
def get_null_region():
"""Region for a "NULL" cache that doesn't really cache at all.
Returns:
(CacheRegion) object
"""
return make_region().configure('dogpile.cache.null')
class ObjectCache(object):
"""Caching for ObjectAPI.
This class provides some basic performance information
for each of its operations.
"""
# Maximum timeout to fetch a cached value, in seconds
MAX_FETCH_TIMEOUT = 5
# You can set these at the class level, and they will
# be used for parameters of the same name to the constructor
# if those parameters are empty (None).
cache_class = NullCache #: Class for cache backend
cache_params = {} #: Constructor parameters for cache backend
def __init__(self, ref, stats=None, cache_class=None, cache_params=None, is_public=True):
"""Constructor.
Args:
ref (str): Key for caching
stats (PerfCollector): Shared statistics object
cache_class (class): Subclass of `Cache`
cache_params (dict): Parameters for cache constructor
"""
self._key = ref
self._public = is_public
# init performance statistics
self._stats = stats or PerfCollector(self.__class__.__name__)
self._stats.start_event('cache.init', self._key)
# init cache
cc = cache_class or self.cache_class
cp = cache_params or self.cache_params
self._cache = cc(**cp) # workers of the world unite!
self._stats.end_event('cache.init', self._key)
_log.debug('ObjectCache.init.end cache_class={}'.format(
cc.__name__))
def get_derived_data(self, parent_method, name):
key = self._key + '::' + name # store separately from 'raw' data
self._stats.start_event('cache.get_derived_data', key)
data = self._cache.get_or_create(key, parent_method,
should_cache_fn=self._should_cache)
self._stats.end_event('cache.get_derived_data', key)
return data
def get_data(self, parent_method):
"""Get data from cache or the callee's method.
"""
self._stats.start_event('cache.get_data', self._key)
data = self._cache.get_or_create(self._key, parent_method)
self._stats.end_event('cache.get_data', self._key)
return data
def get_data_subset(self, parent_method, path_list=None):
"""Get data subset from cache or the callee's method.
"""
self._stats.start_event('cache.get_data_subset', self._key)
# save a little time for a no-op
if path_list is None:
self._stats.end_event('cache.get_data_subset', self._key,
msg='empty-path-list')
return {}
# create unique key for object + path
key = '{}:{}'.format(self._key, self.path_hash(path_list))
# creator function, currying path_list arg.
creator = lambda : parent_method(path_list=path_list)
# get from cache, or create
data = self.cache_get_or_create(key, creator)
self._stats.end_event('cache.get_data_subset', self._key)
return data
def cache_get_or_create(self, key, creator):
"""Get from cache, or create, with extra logic to handle
a Redis server that is not yet fully up and running.
Args:
key (str): Cache item key
creator (function): Called to create the item if not found
Return:
(object) value Will return a value unless MAX_FETCH_TIMEOUT
seconds is exceeded
Raises:
RuntimeError: on timeout
"""
kw = dict(should_cache_fn=self._should_cache)
data, total_sleep = None, 0
while data is None and total_sleep < self.MAX_FETCH_TIMEOUT:
try:
data = self._cache.get_or_create(key, creator, **kw)
except redis.BusyLoadingError:
_log.warn('Redis is busy, sleep for 0.1s and try again')
time.sleep(0.1)
total_sleep += 0.1
if data is None and total_sleep >= self.MAX_FETCH_TIMEOUT:
raise RuntimeError('Timeout while fetching {} from cache'
.format(key))
return data
def _should_cache(self, data):
"""Whether this data should be cached, or fetched new every time.
Args:
data (dict): Result from ``get_data`` or ``get_data_subset`` in
:class:`doekbase.data_api.core.ObjectAPI`.
Return:
(bool) True if this object should be cached, False otherwise.
"""
result = self._public
_log.debug("should_cache result={:d}".format(int(result)))
return result
@staticmethod
def path_hash(plist):
return hashlib.sha1(';'.join(plist)).hexdigest()
@property
def stats(self):
return self._stats
# @staticmethod
# def extract_paths(data, path_list):
# """Extract all matching paths from `path_list` that
# are found in `data`.
#
# Note: Not used right now, since all retrievals of data by
# subset use the full path as the key (thus caching the exact
# subset of data, and not needing to subset the object manually)
#
# Args:
# data (dict): Source data
# path_list (list): List of path strings, which use a '/'
# separator between items of the path.
# Return:
# (dict) All data subsets matching the paths
# """
# result = {}
# # Extract data for each path in path_list
# for p in path_list:
# extracted = {} # create extracted path
# cur_ex = extracted # current position in extracted path
# path = p.split('/') # split path into its parts
# # Traverse nodes matching path in `data`, building the
# # nested dict in `extracted` as we go. Stop on missing nodes.
# cur_data, had_path = data, True
# # Loop over all (internal) nodes in the path
# for node in path[:-1]:
# # If the current node is not found, or it is a leaf,
# # then this path is not in the data: stop.
# if not node in cur_data or \
# not isinstance(cur_data[node], dict):
# had_path = False
# break
# cur_ex[node] = {} # create nested dict
# cur_ex = cur_ex[node] # descend in extracted path
# cur_data = cur_data[node] # descend in data
# # Done with nodes, now let's look for the leaf
# leaf = path[-1]
# # If the full path was not in data, go to next path
# if not had_path or not leaf in cur_data:
# continue
# cur_ex[leaf] = cur_data[leaf] # copy leaf to extracted path
# result.update(extracted) # add extracted path to result
# print("@@ update result with {}: NEW VALUE = {}".format(extracted,
# result))
# # Repeat this process with the next path
# print("@@ return result: {}".format(result))
# return result
|
|
# Copyright (c) 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import re
from lxml import builder
from lxml import etree as ET
from oslo_concurrency import processutils
from oslo_log import log
import six
from manila.common import constants as const
from manila import exception
from manila.i18n import _, _LI, _LW
from manila.share.drivers.emc.plugins.vnx import connector
from manila.share.drivers.emc.plugins.vnx import constants
from manila.share.drivers.emc.plugins.vnx import utils as vnx_utils
from manila.share.drivers.emc.plugins.vnx import xml_api_parser as parser
from manila import utils
LOG = log.getLogger(__name__)
@vnx_utils.decorate_all_methods(vnx_utils.log_enter_exit,
debug_only=True)
class StorageObjectManager(object):
def __init__(self, configuration):
self.context = dict()
self.connectors = dict()
self.connectors['XML'] = connector.XMLAPIConnector(configuration)
self.connectors['SSH'] = connector.SSHConnector(configuration)
elt_maker = builder.ElementMaker(nsmap={None: constants.XML_NAMESPACE})
xml_parser = parser.XMLAPIParser()
obj_types = StorageObject.__subclasses__() # pylint: disable=E1101
for item in obj_types:
key = item.__name__
self.context[key] = eval(key)(self.connectors,
elt_maker,
xml_parser,
self)
def getStorageContext(self, type):
if type in self.context:
return self.context[type]
else:
message = (_("Invalid storage object type %s.") % type)
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
class StorageObject(object):
def __init__(self, conn, elt_maker, xml_parser, manager):
self.conn = conn
self.elt_maker = elt_maker
self.xml_parser = xml_parser
self.manager = manager
self.xml_retry = False
self.ssh_retry_patterns = [
(
constants.SSH_DEFAULT_RETRY_PATTERN,
exception.EMCVnxLockRequiredException()
),
]
def _translate_response(self, response):
"""Translate different status to ok/error status."""
if (constants.STATUS_OK == response['maxSeverity'] or
constants.STATUS_ERROR == response['maxSeverity']):
return
old_Severity = response['maxSeverity']
if response['maxSeverity'] in (constants.STATUS_DEBUG,
constants.STATUS_INFO):
response['maxSeverity'] = constants.STATUS_OK
LOG.warning(_LW("Translated status from %(old)s to %(new)s. "
"Message: %(info)s."),
{'old': old_Severity,
'new': response['maxSeverity'],
'info': response})
def _response_validation(self, response, error_code):
"""Validates whether a response includes a certain error code."""
msg_codes = self._get_problem_message_codes(response['problems'])
for code in msg_codes:
if code == error_code:
return True
return False
def _get_problem_message_codes(self, problems):
message_codes = []
for problem in problems:
if 'messageCode' in problem:
message_codes.append(problem['messageCode'])
return message_codes
def _get_problem_messages(self, problems):
messages = []
for problem in problems:
if 'message' in problem:
messages.append(problem['message'])
return messages
def _get_problem_diags(self, problems):
diags = []
for problem in problems:
if 'Diagnostics' in problem:
diags.append(problem['Diagnostics'])
return diags
def _build_query_package(self, body):
return self.elt_maker.RequestPacket(
self.elt_maker.Request(
self.elt_maker.Query(body)
)
)
def _build_task_package(self, body):
return self.elt_maker.RequestPacket(
self.elt_maker.Request(
self.elt_maker.StartTask(body, timeout='300')
)
)
@utils.retry(exception.EMCVnxLockRequiredException)
def _send_request(self, req):
req_xml = constants.XML_HEADER + ET.tostring(req).decode('utf-8')
rsp_xml = self.conn['XML'].request(str(req_xml))
response = self.xml_parser.parse(rsp_xml)
self._translate_response(response)
if (response['maxSeverity'] != constants.STATUS_OK and
self._response_validation(response,
constants.MSG_CODE_RETRY)):
raise exception.EMCVnxLockRequiredException
return response
@utils.retry(exception.EMCVnxLockRequiredException)
def _execute_cmd(self, cmd, retry_patterns=None, check_exit_code=False):
"""Execute NAS command via SSH.
:param retry_patterns: list of tuples,where each tuple contains a reg
expression and a exception.
:param check_exit_code: Boolean. Raise
processutils.ProcessExecutionError if the command failed to
execute and this parameter is set to True.
"""
if retry_patterns is None:
retry_patterns = self.ssh_retry_patterns
try:
out, err = self.conn['SSH'].run_ssh(cmd, check_exit_code)
except processutils.ProcessExecutionError as e:
for pattern in retry_patterns:
if re.search(pattern[0], e.stdout):
raise pattern[1]
raise e
return out, err
def _copy_properties(self, source, target, property_map, deep_copy=True):
for property in property_map:
if isinstance(property, tuple):
target_key, src_key = property
else:
target_key = src_key = property
if src_key in source:
if deep_copy and isinstance(source[src_key], list):
target[target_key] = copy.deepcopy(source[src_key])
else:
target[target_key] = source[src_key]
else:
target[target_key] = None
def _get_mover_id(self, mover_name, is_vdm):
if is_vdm:
return self.get_context('VDM').get_id(mover_name)
else:
return self.get_context('Mover').get_id(mover_name,
self.xml_retry)
def get_context(self, type):
return self.manager.getStorageContext(type)
@vnx_utils.decorate_all_methods(vnx_utils.log_enter_exit,
debug_only=True)
class FileSystem(StorageObject):
def __init__(self, conn, elt_maker, xml_parser, manager):
super(FileSystem, self).__init__(conn, elt_maker, xml_parser, manager)
self.filesystem_map = dict()
@utils.retry(exception.EMCVnxInvalidMoverID)
def create(self, name, size, pool_name, mover_name, is_vdm=True):
pool_id = self.get_context('StoragePool').get_id(pool_name)
mover_id = self._get_mover_id(mover_name, is_vdm)
if is_vdm:
mover = self.elt_maker.Vdm(vdm=mover_id)
else:
mover = self.elt_maker.Mover(mover=mover_id)
if self.xml_retry:
self.xml_retry = False
request = self._build_task_package(
self.elt_maker.NewFileSystem(
mover,
self.elt_maker.StoragePool(
pool=pool_id,
size=six.text_type(size),
mayContainSlices='true'
),
name=name
)
)
response = self._send_request(request)
if (self._response_validation(response,
constants.MSG_INVALID_MOVER_ID) and
not self.xml_retry):
self.xml_retry = True
raise exception.EMCVnxInvalidMoverID(id=mover_id)
elif self._response_validation(
response, constants.MSG_FILESYSTEM_EXIST):
LOG.warning(_LW("File system %s already exists. "
"Skip the creation."), name)
return
elif constants.STATUS_OK != response['maxSeverity']:
message = (_("Failed to create file system %(name)s. "
"Reason: %(err)s.") %
{'name': name, 'err': response['problems']})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
def get(self, name):
if name not in self.filesystem_map:
request = self._build_query_package(
self.elt_maker.FileSystemQueryParams(
self.elt_maker.AspectSelection(
fileSystems='true',
fileSystemCapacityInfos='true'
),
self.elt_maker.Alias(name=name)
)
)
response = self._send_request(request)
if constants.STATUS_OK != response['maxSeverity']:
if self._is_filesystem_nonexistent(response):
return constants.STATUS_NOT_FOUND, response['problems']
else:
return response['maxSeverity'], response['problems']
if not response['objects']:
return constants.STATUS_NOT_FOUND, response['problems']
src = response['objects'][0]
filesystem = {}
property_map = (
'name',
('pools_id', 'storagePools'),
('volume_id', 'volume'),
('size', 'volumeSize'),
('id', 'fileSystem'),
'type',
'dataServicePolicies',
)
self._copy_properties(src, filesystem, property_map)
self.filesystem_map[name] = filesystem
return constants.STATUS_OK, self.filesystem_map[name]
def delete(self, name):
status, out = self.get(name)
if constants.STATUS_NOT_FOUND == status:
LOG.warning(_LW("File system %s not found. Skip the deletion."),
name)
return
elif constants.STATUS_OK != status:
message = (_("Failed to get file system by name %(name)s. "
"Reason: %(err)s.") %
{'name': name, 'err': out})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
id = self.filesystem_map[name]['id']
request = self._build_task_package(
self.elt_maker.DeleteFileSystem(fileSystem=id)
)
response = self._send_request(request)
if constants.STATUS_OK != response['maxSeverity']:
message = (_("Failed to delete file system %(name)s. "
"Reason: %(err)s.") %
{'name': name, 'err': response['problems']})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
self.filesystem_map.pop(name)
def extend(self, name, pool_name, new_size):
status, out = self.get(name)
if constants.STATUS_OK != status:
message = (_("Failed to get file system by name %(name)s. "
"Reason: %(err)s.") %
{'name': name, 'err': out})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
id = out['id']
size = int(out['size'])
if new_size < size:
message = (_("Failed to extend file system %(name)s because new "
"size %(new_size)d is smaller than old size "
"%(size)d.") %
{'name': name, 'new_size': new_size, 'size': size})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
elif new_size == size:
return
pool_id = self.get_context('StoragePool').get_id(pool_name)
request = self._build_task_package(
self.elt_maker.ExtendFileSystem(
self.elt_maker.StoragePool(
pool=pool_id,
size=six.text_type(new_size - size)
),
fileSystem=id,
)
)
response = self._send_request(request)
if constants.STATUS_OK != response['maxSeverity']:
message = (_("Failed to extend file system %(name)s to new size "
"%(new_size)d. Reason: %(err)s.") %
{'name': name,
'new_size': new_size,
'err': response['problems']})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
def get_id(self, name):
status, out = self.get(name)
if constants.STATUS_OK != status:
message = (_("Failed to get file system by name %(name)s. "
"Reason: %(err)s.") %
{'name': name, 'err': out})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
return self.filesystem_map[name]['id']
def _is_filesystem_nonexistent(self, response):
"""Translate different status to ok/error status."""
msg_codes = self._get_problem_message_codes(response['problems'])
diags = self._get_problem_diags(response['problems'])
for code, diagnose in zip(msg_codes, diags):
if (code == constants.MSG_FILESYSTEM_NOT_FOUND and
diagnose.find('File system not found.') != -1):
return True
return False
def create_from_snapshot(self, name, snap_name, source_fs_name, pool_name,
mover_name, connect_id):
create_fs_cmd = [
'env', 'NAS_DB=/nas', '/nas/bin/nas_fs',
'-name', name,
'-type', 'uxfs',
'-create',
'samesize=' + source_fs_name,
'pool=%s' % pool_name,
'storage=SINGLE',
'worm=off',
'-thin', 'no',
'-option', 'slice=y',
]
self._execute_cmd(create_fs_cmd)
ro_mount_cmd = [
'env', 'NAS_DB=/nas', '/nas/bin/server_mount', mover_name,
'-option', 'ro',
name,
'/%s' % name,
]
self._execute_cmd(ro_mount_cmd)
session_name = name + ':' + snap_name
copy_ckpt_cmd = [
'env', 'NAS_DB=/nas', '/nas/bin/nas_copy',
'-name', session_name[0:63],
'-source', '-ckpt', snap_name,
'-destination', '-fs', name,
'-interconnect',
'id=%s' % connect_id,
'-overwrite_destination',
'-full_copy',
]
try:
self._execute_cmd(copy_ckpt_cmd, check_exit_code=True)
except processutils.ProcessExecutionError as expt:
message = (_("Failed to copy content from snapshot %(snap)s to "
"file system %(filesystem)s. Reason: %(err)s.") %
{'snap': snap_name,
'filesystem': name,
'err': six.text_type(expt)})
LOG.error(message)
# When an error happens during nas_copy, we need to continue
# deleting the checkpoint of the target file system if it exists.
query_fs_cmd = [
'env', 'NAS_DB=/nas', '/nas/bin/nas_fs',
'-info', name,
]
out, err = self._execute_cmd(query_fs_cmd)
re_ckpts = r'ckpts\s*=\s*(.*)\s*'
m = re.search(re_ckpts, out)
if m is not None:
ckpts = m.group(1)
for ckpt in re.split(',', ckpts):
umount_ckpt_cmd = [
'env', 'NAS_DB=/nas',
'/nas/bin/server_umount', mover_name,
'-perm', ckpt,
]
self._execute_cmd(umount_ckpt_cmd)
delete_ckpt_cmd = [
'env', 'NAS_DB=/nas', '/nas/bin/nas_fs',
'-delete', ckpt,
'-Force',
]
self._execute_cmd(delete_ckpt_cmd)
rw_mount_cmd = [
'env', 'NAS_DB=/nas', '/nas/bin/server_mount', mover_name,
'-option', 'rw',
name,
'/%s' % name,
]
self._execute_cmd(rw_mount_cmd)
@vnx_utils.decorate_all_methods(vnx_utils.log_enter_exit,
debug_only=True)
class StoragePool(StorageObject):
def __init__(self, conn, elt_maker, xml_parser, manager):
super(StoragePool, self).__init__(conn, elt_maker, xml_parser, manager)
self.pool_map = dict()
def get(self, name, force=False):
if name not in self.pool_map or force:
status, out = self.get_all()
if constants.STATUS_OK != status:
return status, out
if name not in self.pool_map:
return constants.STATUS_NOT_FOUND, None
return constants.STATUS_OK, self.pool_map[name]
def get_all(self):
self.pool_map.clear()
request = self._build_query_package(
self.elt_maker.StoragePoolQueryParams()
)
response = self._send_request(request)
if constants.STATUS_OK != response['maxSeverity']:
return response['maxSeverity'], response['problems']
if not response['objects']:
return constants.STATUS_NOT_FOUND, response['problems']
for item in response['objects']:
pool = {}
property_map = (
'name',
('movers_id', 'movers'),
('total_size', 'autoSize'),
('used_size', 'usedSize'),
'diskType',
'dataServicePolicies',
('id', 'pool'),
)
self._copy_properties(item, pool, property_map)
self.pool_map[item['name']] = pool
return constants.STATUS_OK, self.pool_map
def get_id(self, name):
status, out = self.get(name)
if constants.STATUS_OK != status:
message = (_("Failed to get storage pool by name %(name)s. "
"Reason: %(err)s.") %
{'name': name, 'err': out})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
return out['id']
@vnx_utils.decorate_all_methods(vnx_utils.log_enter_exit,
debug_only=True)
class MountPoint(StorageObject):
def __init__(self, conn, elt_maker, xml_parser, manager):
super(MountPoint, self).__init__(conn, elt_maker, xml_parser, manager)
@utils.retry(exception.EMCVnxInvalidMoverID)
def create(self, mount_path, fs_name, mover_name, is_vdm=True):
fs_id = self.get_context('FileSystem').get_id(fs_name)
mover_id = self._get_mover_id(mover_name, is_vdm)
if self.xml_retry:
self.xml_retry = False
request = self._build_task_package(
self.elt_maker.NewMount(
self.elt_maker.MoverOrVdm(
mover=mover_id,
moverIdIsVdm='true' if is_vdm else 'false',
),
fileSystem=fs_id,
path=mount_path
)
)
response = self._send_request(request)
if (self._response_validation(response,
constants.MSG_INVALID_MOVER_ID) and
not self.xml_retry):
self.xml_retry = True
raise exception.EMCVnxInvalidMoverID(id=mover_id)
elif self._is_mount_point_already_existent(response):
LOG.warning(_LW("Mount Point %(mount)s already exists. "
"Skip the creation."), {'mount': mount_path})
return
elif constants.STATUS_OK != response['maxSeverity']:
message = (_('Failed to create Mount Point %(mount)s for '
'file system %(fs_name)s. Reason: %(err)s.') %
{'mount': mount_path,
'fs_name': fs_name,
'err': response['problems']})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
@utils.retry(exception.EMCVnxInvalidMoverID)
def get(self, mover_name, is_vdm=True):
mover_id = self._get_mover_id(mover_name, is_vdm)
if self.xml_retry:
self.xml_retry = False
request = self._build_query_package(
self.elt_maker.MountQueryParams(
self.elt_maker.MoverOrVdm(
mover=mover_id,
moverIdIsVdm='true' if is_vdm else 'false'
)
)
)
response = self._send_request(request)
if (self._response_validation(response,
constants.MSG_INVALID_MOVER_ID) and
not self.xml_retry):
self.xml_retry = True
raise exception.EMCVnxInvalidMoverID(id=mover_id)
elif constants.STATUS_OK != response['maxSeverity']:
return response['maxSeverity'], response['objects']
if not response['objects']:
return constants.STATUS_NOT_FOUND, None
else:
return constants.STATUS_OK, response['objects']
@utils.retry(exception.EMCVnxInvalidMoverID)
def delete(self, mount_path, mover_name, is_vdm=True):
mover_id = self._get_mover_id(mover_name, is_vdm)
if self.xml_retry:
self.xml_retry = False
request = self._build_task_package(
self.elt_maker.DeleteMount(
mover=mover_id,
moverIdIsVdm='true' if is_vdm else 'false',
path=mount_path
)
)
response = self._send_request(request)
if (self._response_validation(response,
constants.MSG_INVALID_MOVER_ID) and
not self.xml_retry):
self.xml_retry = True
raise exception.EMCVnxInvalidMoverID(id=mover_id)
elif self._is_mount_point_nonexistent(response):
LOG.warning(_LW('Mount point %(mount)s on mover %(mover_name)s '
'not found.'),
{'mount': mount_path, 'mover_name': mover_name})
return
elif constants.STATUS_OK != response['maxSeverity']:
message = (_('Failed to delete mount point %(mount)s on mover '
'%(mover_name)s. Reason: %(err)s.') %
{'mount': mount_path,
'mover_name': mover_name,
'err': response})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
def _is_mount_point_nonexistent(self, response):
"""Translate different status to ok/error status."""
msg_codes = self._get_problem_message_codes(response['problems'])
message = self._get_problem_messages(response['problems'])
for code, msg in zip(msg_codes, message):
if ((code == constants.MSG_GENERAL_ERROR and msg.find(
'No such path or invalid operation') != -1) or
code == constants.MSG_INVALID_VDM_ID or
code == constants.MSG_INVALID_MOVER_ID):
return True
return False
def _is_mount_point_already_existent(self, response):
"""Translate different status to ok/error status."""
msg_codes = self._get_problem_message_codes(response['problems'])
message = self._get_problem_messages(response['problems'])
for code, msg in zip(msg_codes, message):
if ((code == constants.MSG_GENERAL_ERROR and msg.find(
'Mount already exists') != -1)):
return True
return False
@vnx_utils.decorate_all_methods(vnx_utils.log_enter_exit,
debug_only=True)
class Mover(StorageObject):
def __init__(self, conn, elt_maker, xml_parser, manager):
super(Mover, self).__init__(conn, elt_maker, xml_parser, manager)
self.mover_map = dict()
self.mover_ref_map = dict()
def get_ref(self, name, force=False):
if name not in self.mover_ref_map or force:
self.mover_ref_map.clear()
request = self._build_query_package(
self.elt_maker.MoverQueryParams(
self.elt_maker.AspectSelection(movers='true')
)
)
response = self._send_request(request)
if constants.STATUS_ERROR == response['maxSeverity']:
return response['maxSeverity'], response['problems']
for item in response['objects']:
mover = {}
property_map = ('name', ('id', 'mover'))
self._copy_properties(item, mover, property_map)
if mover:
self.mover_ref_map[mover['name']] = mover
if (name not in self.mover_ref_map or
self.mover_ref_map[name]['id'] == ''):
return constants.STATUS_NOT_FOUND, None
return constants.STATUS_OK, self.mover_ref_map[name]
def get(self, name, force=False):
if name not in self.mover_map or force:
if name in self.mover_ref_map and not force:
mover_id = self.mover_ref_map[name]['id']
else:
mover_id = self.get_id(name, force)
if name in self.mover_map:
self.mover_map.pop(name)
request = self._build_query_package(
self.elt_maker.MoverQueryParams(
self.elt_maker.AspectSelection(
moverDeduplicationSettings='true',
moverDnsDomains='true',
moverInterfaces='true',
moverNetworkDevices='true',
moverNisDomains='true',
moverRoutes='true',
movers='true',
moverStatuses='true'
),
mover=mover_id
)
)
response = self._send_request(request)
if constants.STATUS_ERROR == response['maxSeverity']:
return response['maxSeverity'], response['problems']
if not response['objects']:
return constants.STATUS_NOT_FOUND, response['problems']
mover = {}
src = response['objects'][0]
property_map = (
'name',
('id', 'mover'),
('Status', 'maxSeverity'),
'version',
'uptime',
'role',
('interfaces', 'MoverInterface'),
('devices', 'LogicalNetworkDevice'),
('dns_domain', 'MoverDnsDomain'),
)
self._copy_properties(src, mover, property_map)
internal_devices = []
if mover['interfaces']:
for interface in mover['interfaces']:
if self._is_internal_device(interface['device']):
internal_devices.append(interface)
mover['interfaces'] = [var for var in mover['interfaces'] if
var not in internal_devices]
self.mover_map[name] = mover
return constants.STATUS_OK, self.mover_map[name]
def get_id(self, name, force=False):
status, mover_ref = self.get_ref(name, force)
if constants.STATUS_OK != status:
message = (_("Failed to get mover by name %(name)s.") %
{'name': name})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
return mover_ref['id']
def _is_internal_device(self, device):
for device_type in ('mge', 'fxg', 'tks', 'fsn'):
if device.find(device_type) == 0:
return True
return False
def get_interconnect_id(self, source, destination):
header = [
'id',
'name',
'source_server',
'destination_system',
'destination_server',
]
conn_id = None
command_nas_cel = [
'env', 'NAS_DB=/nas', '/nas/bin/nas_cel',
'-interconnect', '-l',
]
out, err = self._execute_cmd(command_nas_cel)
lines = out.strip().split('\n')
for line in lines:
if line.strip().split() == header:
LOG.info(_LI('Found the header of the command '
'/nas/bin/nas_cel -interconnect -l.'))
else:
interconn = line.strip().split()
if interconn[2] == source and interconn[4] == destination:
conn_id = interconn[0]
return conn_id
def get_physical_devices(self, mover_name):
physical_network_devices = []
cmd_sysconfig = [
'env', 'NAS_DB=/nas', '/nas/bin/server_sysconfig', mover_name,
'-pci'
]
out, err = self._execute_cmd(cmd_sysconfig)
re_pattern = ('0:\s*(?P<name>\S+)\s*IRQ:\s*(?P<irq>\d+)\n'
'.*\n'
'\s*Link:\s*(?P<link>[A-Za-z]+)')
for device in re.finditer(re_pattern, out):
if 'Up' in device.group('link'):
physical_network_devices.append(device.group('name'))
return physical_network_devices
@vnx_utils.decorate_all_methods(vnx_utils.log_enter_exit,
debug_only=True)
class VDM(StorageObject):
def __init__(self, conn, elt_maker, xml_parser, manager):
super(VDM, self).__init__(conn, elt_maker, xml_parser, manager)
self.vdm_map = dict()
@utils.retry(exception.EMCVnxInvalidMoverID)
def create(self, name, mover_name):
mover_id = self._get_mover_id(mover_name, False)
if self.xml_retry:
self.xml_retry = False
request = self._build_task_package(
self.elt_maker.NewVdm(mover=mover_id, name=name)
)
response = self._send_request(request)
if (self._response_validation(response,
constants.MSG_INVALID_MOVER_ID) and
not self.xml_retry):
self.xml_retry = True
raise exception.EMCVnxInvalidMoverID(id=mover_id)
elif self._response_validation(response, constants.MSG_VDM_EXIST):
LOG.warning(_LW("VDM %(name)s already exists. Skip the creation."),
{'name': name})
elif constants.STATUS_OK != response['maxSeverity']:
message = (_("Failed to create VDM %(name)s on mover "
"%(mover_name)s. Reason: %(err)s.") %
{'name': name,
'mover_name': mover_name,
'err': response['problems']})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
def get(self, name):
if name not in self.vdm_map:
request = self._build_query_package(
self.elt_maker.VdmQueryParams()
)
response = self._send_request(request)
if constants.STATUS_OK != response['maxSeverity']:
return response['maxSeverity'], response['problems']
elif not response['objects']:
return constants.STATUS_NOT_FOUND, response['problems']
for item in response['objects']:
vdm = {}
property_map = (
'name',
('id', 'vdm'),
'state',
('host_mover_id', 'mover'),
('interfaces', 'Interfaces'),
)
self._copy_properties(item, vdm, property_map)
self.vdm_map[item['name']] = vdm
if name not in self.vdm_map:
return constants.STATUS_NOT_FOUND, None
return constants.STATUS_OK, self.vdm_map[name]
def delete(self, name):
status, out = self.get(name)
if constants.STATUS_NOT_FOUND == status:
LOG.warning(_LW("VDM %s not found. Skip the deletion."),
name)
return
elif constants.STATUS_OK != status:
message = (_("Failed to get VDM by name %(name)s. "
"Reason: %(err)s.") %
{'name': name, 'err': out})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
vdm_id = self.vdm_map[name]['id']
request = self._build_task_package(
self.elt_maker.DeleteVdm(vdm=vdm_id)
)
response = self._send_request(request)
if constants.STATUS_OK != response['maxSeverity']:
message = (_("Failed to delete VDM %(name)s. "
"Reason: %(err)s.") %
{'name': name, 'err': response['problems']})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
self.vdm_map.pop(name)
def get_id(self, name):
status, vdm = self.get(name)
if constants.STATUS_OK != status:
message = (_("Failed to get VDM by name %(name)s.") %
{'name': name})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
return vdm['id']
def attach_nfs_interface(self, vdm_name, if_name):
command_attach_nfs_interface = [
'env', 'NAS_DB=/nas', '/nas/bin/nas_server',
'-vdm', vdm_name,
'-attach', if_name,
]
self._execute_cmd(command_attach_nfs_interface)
def detach_nfs_interface(self, vdm_name, if_name):
command_detach_nfs_interface = [
'env', 'NAS_DB=/nas', '/nas/bin/nas_server',
'-vdm', vdm_name,
'-detach', if_name,
]
try:
self._execute_cmd(command_detach_nfs_interface,
check_exit_code=True)
except processutils.ProcessExecutionError:
interfaces = self.get_interfaces(vdm_name)
if if_name not in interfaces['nfs']:
LOG.debug("Failed to detach interface %(interface)s "
"from mover %(mover_name)s.",
{'interface': if_name, 'mover_name': vdm_name})
else:
message = (_("Failed to detach interface %(interface)s "
"from mover %(mover_name)s.") %
{'interface': if_name, 'mover_name': vdm_name})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
def get_interfaces(self, vdm_name):
interfaces = {
'cifs': [],
'nfs': [],
}
re_pattern = ('Interfaces to services mapping:'
'\s*(?P<interfaces>(\s*interface=.*)*)')
command_get_interfaces = [
'env', 'NAS_DB=/nas', '/nas/bin/nas_server',
'-i',
'-vdm', vdm_name,
]
out, err = self._execute_cmd(command_get_interfaces)
m = re.search(re_pattern, out)
if m:
if_list = m.group('interfaces').split('\n')
for i in if_list:
m_if = re.search('\s*interface=(?P<if>.*)\s*:'
'\s*(?P<type>.*)\s*', i)
if m_if:
if_name = m_if.group('if').strip()
if 'cifs' == m_if.group('type') and if_name != '':
interfaces['cifs'].append(if_name)
elif 'vdm' == m_if.group('type') and if_name != '':
interfaces['nfs'].append(if_name)
return interfaces
@vnx_utils.decorate_all_methods(vnx_utils.log_enter_exit,
debug_only=True)
class Snapshot(StorageObject):
def __init__(self, conn, elt_maker, xml_parser, manager):
super(Snapshot, self).__init__(conn, elt_maker, xml_parser, manager)
self.snap_map = dict()
def create(self, name, fs_name, pool_id, ckpt_size=None):
fs_id = self.get_context('FileSystem').get_id(fs_name)
if ckpt_size:
elt_pool = self.elt_maker.StoragePool(
pool=pool_id,
size=six.text_type(ckpt_size)
)
else:
elt_pool = self.elt_maker.StoragePool(pool=pool_id)
new_ckpt = self.elt_maker.NewCheckpoint(
self.elt_maker.SpaceAllocationMethod(
elt_pool
),
checkpointOf=fs_id,
name=name
)
request = self._build_task_package(new_ckpt)
response = self._send_request(request)
if self._response_validation(response, constants.MSG_SNAP_EXIST):
LOG.warning(_LW("Snapshot %(name)s already exists. "
"Skip the creation."),
{'name': name})
elif constants.STATUS_OK != response['maxSeverity']:
message = (_("Failed to create snapshot %(name)s on "
"filesystem %(fs_name)s. Reason: %(err)s.") %
{'name': name,
'fs_name': fs_name,
'err': response['problems']})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
def get(self, name):
if name not in self.snap_map:
request = self._build_query_package(
self.elt_maker.CheckpointQueryParams(
self.elt_maker.Alias(name=name)
)
)
response = self._send_request(request)
if constants.STATUS_OK != response['maxSeverity']:
return response['maxSeverity'], response['problems']
if not response['objects']:
return constants.STATUS_NOT_FOUND, response['problems']
src = response['objects'][0]
snap = {}
property_map = (
'name',
('id', 'checkpoint'),
'checkpointOf',
'state',
)
self._copy_properties(src, snap, property_map)
self.snap_map[name] = snap
return constants.STATUS_OK, self.snap_map[name]
def delete(self, name):
status, out = self.get(name)
if constants.STATUS_NOT_FOUND == status:
LOG.warning(_LW("Snapshot %s not found. Skip the deletion."),
name)
return
elif constants.STATUS_OK != status:
message = (_("Failed to get snapshot by name %(name)s. "
"Reason: %(err)s.") %
{'name': name, 'err': out})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
chpt_id = self.snap_map[name]['id']
request = self._build_task_package(
self.elt_maker.DeleteCheckpoint(checkpoint=chpt_id)
)
response = self._send_request(request)
if constants.STATUS_OK != response['maxSeverity']:
message = (_("Failed to delete snapshot %(name)s. "
"Reason: %(err)s.") %
{'name': name, 'err': response['problems']})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
self.snap_map.pop(name)
def get_id(self, name):
status, out = self.get(name)
if constants.STATUS_OK != status:
message = (_("Failed to get snapshot by %(name)s. "
"Reason: %(err)s.") %
{'name': name, 'err': out})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
return self.snap_map[name]['id']
@vnx_utils.decorate_all_methods(vnx_utils.log_enter_exit,
debug_only=True)
class MoverInterface(StorageObject):
def __init__(self, conn, elt_maker, xml_parser, manager):
super(MoverInterface, self).__init__(conn, elt_maker, xml_parser,
manager)
@utils.retry(exception.EMCVnxInvalidMoverID)
def create(self, interface):
# Maximum of 32 characters for mover interface name
name = interface['name']
if len(name) > 32:
name = name[0:31]
device_name = interface['device_name']
ip_addr = interface['ip']
mover_name = interface['mover_name']
net_mask = interface['net_mask']
vlan_id = interface['vlan_id'] if interface['vlan_id'] else -1
mover_id = self._get_mover_id(mover_name, False)
if self.xml_retry:
self.xml_retry = False
request = self._build_task_package(
self.elt_maker.NewMoverInterface(
device=device_name,
ipAddress=six.text_type(ip_addr),
mover=mover_id,
name=name,
netMask=net_mask,
vlanid=six.text_type(vlan_id)
)
)
response = self._send_request(request)
if (self._response_validation(response,
constants.MSG_INVALID_MOVER_ID) and
not self.xml_retry):
self.xml_retry = True
raise exception.EMCVnxInvalidMoverID(id=mover_id)
elif self._response_validation(
response, constants.MSG_INTERFACE_NAME_EXIST):
LOG.warning(_LW("Mover interface name %s already exists. "
"Skip the creation."), name)
return
elif self._response_validation(
response, constants.MSG_INTERFACE_EXIST):
LOG.warning(_LW("Mover interface IP %s already exists. "
"Skip the creation."), ip_addr)
return
elif self._response_validation(
response, constants.MSG_INTERFACE_INVALID_VLAN_ID):
# When fail to create a mover interface with the specified
# vlan id, VNX will leave a interface with vlan id 0 in the
# backend. So we should explicitly remove the interface.
try:
self.delete(six.text_type(ip_addr), mover_name)
except exception.EMCVnxXMLAPIError:
pass
message = (_("Invalid vlan id %s. Other interfaces on this "
"subnet are in a different vlan.") % vlan_id)
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
elif constants.STATUS_OK != response['maxSeverity']:
message = (_("Failed to create mover interface %(interface)s. "
"Reason: %(err)s.") %
{'interface': interface,
'err': response['problems']})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
def get(self, name, mover_name):
# Maximum of 32 characters for mover interface name
if len(name) > 32:
name = name[0:31]
status, mover = self.manager.getStorageContext('Mover').get(
mover_name, True)
if constants.STATUS_OK == status:
for interface in mover['interfaces']:
if name == interface['name']:
return constants.STATUS_OK, interface
return constants.STATUS_NOT_FOUND, None
@utils.retry(exception.EMCVnxInvalidMoverID)
def delete(self, ip_addr, mover_name):
mover_id = self._get_mover_id(mover_name, False)
if self.xml_retry:
self.xml_retry = False
request = self._build_task_package(
self.elt_maker.DeleteMoverInterface(
ipAddress=six.text_type(ip_addr),
mover=mover_id
)
)
response = self._send_request(request)
if (self._response_validation(response,
constants.MSG_INVALID_MOVER_ID) and
not self.xml_retry):
self.xml_retry = True
raise exception.EMCVnxInvalidMoverID(id=mover_id)
elif self._response_validation(
response, constants.MSG_INTERFACE_NON_EXISTENT):
LOG.warning(_LW("Mover interface %s not found. "
"Skip the deletion."), ip_addr)
return
elif constants.STATUS_OK != response['maxSeverity']:
message = (_("Failed to delete mover interface %(ip)s on mover "
"%(mover)s. Reason: %(err)s.") %
{'ip': ip_addr,
'mover': mover_name,
'err': response['problems']})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
@vnx_utils.decorate_all_methods(vnx_utils.log_enter_exit,
debug_only=True)
class DNSDomain(StorageObject):
def __init__(self, conn, elt_maker, xml_parser, manager):
super(DNSDomain, self).__init__(conn, elt_maker, xml_parser, manager)
@utils.retry(exception.EMCVnxInvalidMoverID)
def create(self, mover_name, name, servers, protocol='udp'):
mover_id = self._get_mover_id(mover_name, False)
if self.xml_retry:
self.xml_retry = False
request = self._build_task_package(
self.elt_maker.NewMoverDnsDomain(
mover=mover_id,
name=name,
servers=servers,
protocol=protocol
)
)
response = self._send_request(request)
if (self._response_validation(response,
constants.MSG_INVALID_MOVER_ID) and
not self.xml_retry):
self.xml_retry = True
raise exception.EMCVnxInvalidMoverID(id=mover_id)
elif constants.STATUS_OK != response['maxSeverity']:
message = (_("Failed to create DNS domain %(name)s. "
"Reason: %(err)s.") %
{'name': name, 'err': response['problems']})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
@utils.retry(exception.EMCVnxInvalidMoverID)
def delete(self, mover_name, name):
mover_id = self._get_mover_id(mover_name, False)
if self.xml_retry:
self.xml_retry = False
request = self._build_task_package(
self.elt_maker.DeleteMoverDnsDomain(
mover=mover_id,
name=name
)
)
response = self._send_request(request)
if (self._response_validation(response,
constants.MSG_INVALID_MOVER_ID) and
not self.xml_retry):
self.xml_retry = True
raise exception.EMCVnxInvalidMoverID(id=mover_id)
elif constants.STATUS_OK != response['maxSeverity']:
LOG.warning(_LW("Failed to delete DNS domain %(name)s. "
"Reason: %(err)s."),
{'name': name, 'err': response['problems']})
@vnx_utils.decorate_all_methods(vnx_utils.log_enter_exit,
debug_only=True)
class CIFSServer(StorageObject):
def __init__(self, conn, elt_maker, xml_parser, manager):
super(CIFSServer, self).__init__(conn, elt_maker, xml_parser, manager)
self.cifs_server_map = dict()
@utils.retry(exception.EMCVnxInvalidMoverID)
def create(self, server_args):
compName = server_args['name']
# Maximum of 14 characters for netBIOS name
name = server_args['name'][-14:]
# Maximum of 12 characters for alias name
alias_name = server_args['name'][-12:]
interfaces = server_args['interface_ip']
domain_name = server_args['domain_name']
user_name = server_args['user_name']
password = server_args['password']
mover_name = server_args['mover_name']
is_vdm = server_args['is_vdm']
mover_id = self._get_mover_id(mover_name, is_vdm)
if self.xml_retry:
self.xml_retry = False
alias_name_list = [self.elt_maker.li(alias_name)]
request = self._build_task_package(
self.elt_maker.NewW2KCifsServer(
self.elt_maker.MoverOrVdm(
mover=mover_id,
moverIdIsVdm='true' if server_args['is_vdm'] else 'false'
),
self.elt_maker.Aliases(*alias_name_list),
self.elt_maker.JoinDomain(userName=user_name,
password=password),
compName=compName,
domain=domain_name,
interfaces=interfaces,
name=name
)
)
response = self._send_request(request)
if (self._response_validation(response,
constants.MSG_INVALID_MOVER_ID) and
not self.xml_retry):
self.xml_retry = True
raise exception.EMCVnxInvalidMoverID(id=mover_id)
if constants.STATUS_OK != response['maxSeverity']:
status, out = self.get(compName, mover_name, is_vdm)
if constants.STATUS_OK == status and out['domainJoined'] == 'true':
return
else:
message = (_("Failed to create CIFS server %(name)s. "
"Reason: %(err)s.") %
{'name': name,
'err': response['problems']})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
@utils.retry(exception.EMCVnxInvalidMoverID)
def get_all(self, mover_name, is_vdm=True):
mover_id = self._get_mover_id(mover_name, is_vdm)
if self.xml_retry:
self.xml_retry = False
request = self._build_query_package(
self.elt_maker.CifsServerQueryParams(
self.elt_maker.MoverOrVdm(
mover=mover_id,
moverIdIsVdm='true' if is_vdm else 'false'
)
)
)
response = self._send_request(request)
if (self._response_validation(response,
constants.MSG_INVALID_MOVER_ID) and
not self.xml_retry):
self.xml_retry = True
raise exception.EMCVnxInvalidMoverID(id=mover_id)
elif constants.STATUS_OK != response['maxSeverity']:
return response['maxSeverity'], response['objects']
if mover_name in self.cifs_server_map:
self.cifs_server_map.pop(mover_name)
self.cifs_server_map[mover_name] = dict()
for item in response['objects']:
self.cifs_server_map[mover_name][item['compName'].lower()] = item
return constants.STATUS_OK, self.cifs_server_map[mover_name]
def get(self, name, mover_name, is_vdm=True, force=False):
# name is compName
name = name.lower()
if (mover_name in self.cifs_server_map and
name in self.cifs_server_map[mover_name]) and not force:
return constants.STATUS_OK, self.cifs_server_map[mover_name][name]
self.get_all(mover_name, is_vdm)
if mover_name in self.cifs_server_map:
for compName, server in self.cifs_server_map[mover_name].items():
if name == compName:
return constants.STATUS_OK, server
return constants.STATUS_NOT_FOUND, None
@utils.retry(exception.EMCVnxInvalidMoverID)
def modify(self, server_args):
"""Make CIFS server join or un-join the domain.
:param server_args: Dictionary for CIFS server modification
name: CIFS server name instead of compName
join_domain: True for joining the domain, false for un-joining
user_name: User name under which the domain is joined
password: Password associated with the user name
mover_name: mover or VDM name
is_vdm: Boolean to indicate mover or VDM
:raises exception.EMCVnxXMLAPIError: if modification fails.
"""
name = server_args['name']
join_domain = server_args['join_domain']
user_name = server_args['user_name']
password = server_args['password']
mover_name = server_args['mover_name']
if 'is_vdm' in server_args.keys():
is_vdm = server_args['is_vdm']
else:
is_vdm = True
mover_id = self._get_mover_id(mover_name, is_vdm)
if self.xml_retry:
self.xml_retry = False
request = self._build_task_package(
self.elt_maker.ModifyW2KCifsServer(
self.elt_maker.DomainSetting(
joinDomain='true' if join_domain else 'false',
password=password,
userName=user_name,
),
mover=mover_id,
moverIdIsVdm='true' if is_vdm else 'false',
name=name
)
)
response = self._send_request(request)
if (self._response_validation(response,
constants.MSG_INVALID_MOVER_ID) and
not self.xml_retry):
self.xml_retry = True
raise exception.EMCVnxInvalidMoverID(id=mover_id)
elif self._ignore_modification_error(response, join_domain):
return
elif constants.STATUS_OK != response['maxSeverity']:
message = (_("Failed to modify CIFS server %(name)s. "
"Reason: %(err)s.") %
{'name': name,
'err': response['problems']})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
def _ignore_modification_error(self, response, join_domain):
if self._response_validation(response, constants.MSG_JOIN_DOMAIN):
return join_domain
elif self._response_validation(response, constants.MSG_UNJOIN_DOMAIN):
return not join_domain
return False
def delete(self, computer_name, mover_name, is_vdm=True):
try:
status, out = self.get(
computer_name.lower(), mover_name, is_vdm, self.xml_retry)
if constants.STATUS_NOT_FOUND == status:
LOG.warning(_LW("CIFS server %(name)s on mover %(mover_name)s "
"not found. Skip the deletion."),
{'name': computer_name, 'mover_name': mover_name})
return
except exception.EMCVnxXMLAPIError:
LOG.warning(_LW("CIFS server %(name)s on mover %(mover_name)s "
"not found. Skip the deletion."),
{'name': computer_name, 'mover_name': mover_name})
return
server_name = out['name']
mover_id = self._get_mover_id(mover_name, is_vdm)
request = self._build_task_package(
self.elt_maker.DeleteCifsServer(
mover=mover_id,
moverIdIsVdm='true' if is_vdm else 'false',
name=server_name
)
)
response = self._send_request(request)
if constants.STATUS_OK != response['maxSeverity']:
message = (_("Failed to delete CIFS server %(name)s. "
"Reason: %(err)s.") %
{'name': computer_name, 'err': response['problems']})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
self.cifs_server_map[mover_name].pop(computer_name)
@vnx_utils.decorate_all_methods(vnx_utils.log_enter_exit,
debug_only=True)
class CIFSShare(StorageObject):
def __init__(self, conn, elt_maker, xml_parser, manager):
super(CIFSShare, self).__init__(conn, elt_maker, xml_parser, manager)
self.cifs_share_map = dict()
@utils.retry(exception.EMCVnxInvalidMoverID)
def create(self, name, server_name, mover_name, is_vdm=True):
mover_id = self._get_mover_id(mover_name, is_vdm)
if self.xml_retry:
self.xml_retry = False
share_path = '/' + name
request = self._build_task_package(
self.elt_maker.NewCifsShare(
self.elt_maker.MoverOrVdm(
mover=mover_id,
moverIdIsVdm='true' if is_vdm else 'false'
),
self.elt_maker.CifsServers(self.elt_maker.li(server_name)),
name=name,
path=share_path
)
)
response = self._send_request(request)
if (self._response_validation(response,
constants.MSG_INVALID_MOVER_ID) and
not self.xml_retry):
self.xml_retry = True
raise exception.EMCVnxInvalidMoverID(id=mover_id)
elif constants.STATUS_OK != response['maxSeverity']:
message = (_("Failed to create file share %(name)s. "
"Reason: %(err)s.") %
{'name': name, 'err': response['problems']})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
def get(self, name):
if name not in self.cifs_share_map:
request = self._build_query_package(
self.elt_maker.CifsShareQueryParams(name=name)
)
response = self._send_request(request)
if constants.STATUS_OK != response['maxSeverity']:
return response['maxSeverity'], response['problems']
if not response['objects']:
return constants.STATUS_NOT_FOUND, None
self.cifs_share_map[name] = response['objects'][0]
return constants.STATUS_OK, self.cifs_share_map[name]
@utils.retry(exception.EMCVnxInvalidMoverID)
def delete(self, name, mover_name, is_vdm=True):
status, out = self.get(name)
if constants.STATUS_NOT_FOUND == status:
LOG.warning(_LW("CIFS share %s not found. Skip the deletion."),
name)
return
elif constants.STATUS_OK != status:
message = (_("Failed to get CIFS share by name %(name)s. "
"Reason: %(err)s.") %
{'name': name, 'err': out})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
mover_id = self._get_mover_id(mover_name, is_vdm)
if self.xml_retry:
self.xml_retry = False
netbios_names = self.cifs_share_map[name]['CifsServers']
request = self._build_task_package(
self.elt_maker.DeleteCifsShare(
self.elt_maker.CifsServers(*map(lambda a: self.elt_maker.li(a),
netbios_names)),
mover=mover_id,
moverIdIsVdm='true' if is_vdm else 'false',
name=name
)
)
response = self._send_request(request)
if (self._response_validation(response,
constants.MSG_INVALID_MOVER_ID) and
not self.xml_retry):
self.xml_retry = True
raise exception.EMCVnxInvalidMoverID(id=mover_id)
elif constants.STATUS_OK != response['maxSeverity']:
message = (_("Failed to delete file system %(name)s. "
"Reason: %(err)s.") %
{'name': name, 'err': response['problems']})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
self.cifs_share_map.pop(name)
def disable_share_access(self, share_name, mover_name):
cmd_str = 'sharesd %s set noaccess' % share_name
disable_access = [
'env', 'NAS_DB=/nas', '/nas/bin/.server_config', mover_name,
'-v', "%s" % cmd_str,
]
try:
self._execute_cmd(disable_access, check_exit_code=True)
except processutils.ProcessExecutionError as expt:
message = (_('Failed to disable the access to CIFS share '
'%(name)s. Reason: %(err)s.') %
{'name': share_name, 'err': expt})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
def allow_share_access(self, mover_name, share_name, user_name, domain,
access=constants.CIFS_ACL_FULLCONTROL):
account = user_name + "@" + domain
allow_str = ('sharesd %(share_name)s grant %(account)s=%(access)s'
% {'share_name': share_name,
'account': account,
'access': access})
allow_access = [
'env', 'NAS_DB=/nas', '/nas/bin/.server_config', mover_name,
'-v', "%s" % allow_str,
]
try:
self._execute_cmd(allow_access, check_exit_code=True)
except processutils.ProcessExecutionError as expt:
dup_msg = re.compile(r'ACE for %(domain)s\\%(user)s unchanged' %
{'domain': domain, 'user': user_name}, re.I)
if re.search(dup_msg, expt.stdout):
LOG.warning(_LW("Duplicate access control entry, "
"skipping allow..."))
else:
message = (_('Failed to allow the access %(access)s to '
'CIFS share %(name)s. Reason: %(err)s.') %
{'access': access, 'name': share_name, 'err': expt})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
def deny_share_access(self, mover_name, share_name, user_name, domain,
access=constants.CIFS_ACL_FULLCONTROL):
account = user_name + "@" + domain
revoke_str = ('sharesd %(share_name)s revoke %(account)s=%(access)s'
% {'share_name': share_name,
'account': account,
'access': access})
allow_access = [
'env', 'NAS_DB=/nas', '/nas/bin/.server_config', mover_name,
'-v', "%s" % revoke_str,
]
try:
self._execute_cmd(allow_access, check_exit_code=True)
except processutils.ProcessExecutionError as expt:
not_found_msg = re.compile(
r'No ACE found for %(domain)s\\%(user)s'
% {'domain': domain, 'user': user_name}, re.I)
user_err_msg = re.compile(
r'Cannot get mapping for %(domain)s\\%(user)s'
% {'domain': domain, 'user': user_name}, re.I)
if re.search(not_found_msg, expt.stdout):
LOG.warning(_LW("No access control entry found, "
"skipping deny..."))
elif re.search(user_err_msg, expt.stdout):
LOG.warning(_LW("User not found on domain, skipping deny..."))
else:
message = (_('Failed to deny the access %(access)s to '
'CIFS share %(name)s. Reason: %(err)s.') %
{'access': access, 'name': share_name, 'err': expt})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
@vnx_utils.decorate_all_methods(vnx_utils.log_enter_exit,
debug_only=True)
class NFSShare(StorageObject):
def __init__(self, conn, elt_maker, xml_parser, manager):
super(NFSShare, self).__init__(conn, elt_maker, xml_parser, manager)
self.nfs_share_map = {}
def create(self, name, mover_name):
share_path = '/' + name
create_nfs_share_cmd = [
'env', 'NAS_DB=/nas', '/nas/bin/server_export', mover_name,
'-option', 'access=-0.0.0.0/0.0.0.0',
share_path,
]
try:
self._execute_cmd(create_nfs_share_cmd, check_exit_code=True)
except processutils.ProcessExecutionError as expt:
message = (_('Failed to create NFS share %(name)s on mover '
'%(mover_name)s. Reason: %(err)s.') %
{'name': name, 'mover_name': mover_name, 'err': expt})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
def delete(self, name, mover_name):
path = '/' + name
status, out = self.get(name, mover_name)
if constants.STATUS_NOT_FOUND == status:
LOG.warning(_LW("NFS share %s not found. Skip the deletion."),
path)
return
delete_nfs_share_cmd = [
'env', 'NAS_DB=/nas', '/nas/bin/server_export', mover_name,
'-unexport',
'-perm',
path,
]
try:
self._execute_cmd(delete_nfs_share_cmd, check_exit_code=True)
except processutils.ProcessExecutionError as expt:
message = (_('Failed to delete NFS share %(name)s on '
'%(mover_name)s. Reason: %(err)s.') %
{'name': name, 'mover_name': mover_name, 'err': expt})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
self.nfs_share_map.pop(name)
def get(self, name, mover_name, force=False, check_exit_code=False):
if name in self.nfs_share_map and not force:
return constants.STATUS_OK, self.nfs_share_map[name]
path = '/' + name
nfs_share = {
"mover_name": '',
"path": '',
'AccessHosts': [],
'RwHosts': [],
'RoHosts': [],
'RootHosts': [],
'readOnly': '',
}
nfs_query_cmd = [
'env', 'NAS_DB=/nas', '/nas/bin/server_export', mover_name,
'-P', 'nfs',
'-list', path,
]
try:
out, err = self._execute_cmd(nfs_query_cmd,
check_exit_code=check_exit_code)
except processutils.ProcessExecutionError as expt:
dup_msg = (r'%(mover_name)s : No such file or directory' %
{'mover_name': mover_name})
if re.search(dup_msg, expt.stdout):
LOG.warning(_LW("NFS share %s not found."), name)
return constants.STATUS_NOT_FOUND, None
else:
message = (_('Failed to list NFS share %(name)s on '
'%(mover_name)s. Reason: %(err)s.') %
{'name': name,
'mover_name': mover_name,
'err': expt})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
re_exports = '%s\s*:\s*\nexport\s*(.*)\n' % mover_name
m = re.search(re_exports, out)
if m is not None:
nfs_share['path'] = path
nfs_share['mover_name'] = mover_name
export = m.group(1)
fields = export.split(" ")
for field in fields:
field = field.strip()
if field.startswith('rw='):
nfs_share['RwHosts'] = field[3:].split(":")
elif field.startswith('access='):
nfs_share['AccessHosts'] = field[7:].split(":")
elif field.startswith('root='):
nfs_share['RootHosts'] = field[5:].split(":")
elif field.startswith('ro='):
nfs_share['RoHosts'] = field[3:].split(":")
self.nfs_share_map[name] = nfs_share
else:
return constants.STATUS_NOT_FOUND, None
return constants.STATUS_OK, self.nfs_share_map[name]
def allow_share_access(self, share_name, host_ip, mover_name,
access_level=const.ACCESS_LEVEL_RW):
@utils.synchronized('emc-shareaccess-' + share_name)
def do_allow_access(share_name, host_ip, mover_name, access_level):
status, share = self.get(share_name, mover_name)
if constants.STATUS_NOT_FOUND == status:
message = (_('NFS share %s not found.') % share_name)
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
changed = False
rwhosts = share['RwHosts']
rohosts = share['RoHosts']
if access_level == const.ACCESS_LEVEL_RW:
if host_ip not in rwhosts:
rwhosts.append(host_ip)
changed = True
if host_ip in rohosts:
rohosts.remove(host_ip)
changed = True
if access_level == const.ACCESS_LEVEL_RO:
if host_ip not in rohosts:
rohosts.append(host_ip)
changed = True
if host_ip in rwhosts:
rwhosts.remove(host_ip)
changed = True
roothosts = share['RootHosts']
if host_ip not in roothosts:
roothosts.append(host_ip)
changed = True
accesshosts = share['AccessHosts']
if host_ip not in accesshosts:
accesshosts.append(host_ip)
changed = True
if not changed:
LOG.debug("%(host)s is already in access list of share "
"%(name)s.", {'host': host_ip, 'name': share_name})
else:
path = '/' + share_name
self._set_share_access(path,
mover_name,
rwhosts,
rohosts,
roothosts,
accesshosts)
# Update self.nfs_share_map
self.get(share_name, mover_name, force=True,
check_exit_code=True)
do_allow_access(share_name, host_ip, mover_name, access_level)
def deny_share_access(self, share_name, host_ip, mover_name):
@utils.synchronized('emc-shareaccess-' + share_name)
def do_deny_access(share_name, host_ip, mover_name):
status, share = self.get(share_name, mover_name)
if constants.STATUS_OK != status:
message = (_('Query nfs share %(path)s failed. '
'Reason %(err)s.') %
{'path': share_name, 'err': share})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
changed = False
rwhosts = set(share['RwHosts'])
if host_ip in rwhosts:
rwhosts.remove(host_ip)
changed = True
roothosts = set(share['RootHosts'])
if host_ip in roothosts:
roothosts.remove(host_ip)
changed = True
accesshosts = set(share['AccessHosts'])
if host_ip in accesshosts:
accesshosts.remove(host_ip)
changed = True
rohosts = set(share['RoHosts'])
if host_ip in rohosts:
rohosts.remove(host_ip)
changed = True
if not changed:
LOG.debug("%(host)s is already in access list of share "
"%(name)s.", {'host': host_ip, 'name': share_name})
else:
path = '/' + share_name
self._set_share_access(path,
mover_name,
rwhosts,
rohosts,
roothosts,
accesshosts)
# Update self.nfs_share_map
self.get(share_name, mover_name, force=True,
check_exit_code=True)
do_deny_access(share_name, host_ip, mover_name)
def _set_share_access(self, path, mover_name, rw_hosts, ro_hosts,
root_hosts, access_hosts):
access_str = ('access=%(access)s'
% {'access': ':'.join(access_hosts)})
if root_hosts:
access_str += (',root=%(root)s' % {'root': ':'.join(root_hosts)})
if rw_hosts:
access_str += ',rw=%(rw)s' % {'rw': ':'.join(rw_hosts)}
if ro_hosts:
access_str += ',ro=%(ro)s' % {'ro': ':'.join(ro_hosts)}
set_nfs_share_access_cmd = [
'env', 'NAS_DB=/nas', '/nas/bin/server_export', mover_name,
'-ignore',
'-option', access_str,
path,
]
try:
self._execute_cmd(set_nfs_share_access_cmd, check_exit_code=True)
except processutils.ProcessExecutionError as expt:
message = (_('Failed to set NFS share %(name)s access on '
'%(mover_name)s. Reason: %(err)s.') %
{'name': path[1:],
'mover_name': mover_name,
'err': expt})
LOG.error(message)
raise exception.EMCVnxXMLAPIError(err=message)
|
|
# repo.py
# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
from git.exc import (
InvalidGitRepositoryError,
NoSuchPathError,
GitCommandError
)
from git.cmd import (
Git,
handle_process_output
)
from git.refs import (
HEAD,
Head,
Reference,
TagReference,
)
from git.objects import (
Submodule,
RootModule,
Commit
)
from git.util import (
Actor,
finalize_process
)
from git.index import IndexFile
from git.config import GitConfigParser
from git.remote import (
Remote,
add_progress
)
from git.db import (
GitCmdObjectDB,
GitDB
)
from gitdb.util import (
join,
isfile,
hex_to_bin
)
from .fun import (
rev_parse,
is_git_dir,
find_git_dir,
touch,
)
from git.compat import (
text_type,
defenc
)
import os
import sys
import re
DefaultDBType = GitDB
if sys.version_info[:2] < (2, 5): # python 2.4 compatiblity
DefaultDBType = GitCmdObjectDB
# END handle python 2.4
__all__ = ('Repo', )
def _expand_path(p):
return os.path.abspath(os.path.expandvars(os.path.expanduser(p)))
class Repo(object):
"""Represents a git repository and allows you to query references,
gather commit information, generate diffs, create and clone repositories query
the log.
The following attributes are worth using:
'working_dir' is the working directory of the git command, which is the working tree
directory if available or the .git directory in case of bare repositories
'working_tree_dir' is the working tree directory, but will raise AssertionError
if we are a bare repository.
'git_dir' is the .git repository directory, which is always set."""
DAEMON_EXPORT_FILE = 'git-daemon-export-ok'
__slots__ = ("working_dir", "_working_tree_dir", "git_dir", "_bare", "git", "odb")
# precompiled regex
re_whitespace = re.compile(r'\s+')
re_hexsha_only = re.compile('^[0-9A-Fa-f]{40}$')
re_hexsha_shortened = re.compile('^[0-9A-Fa-f]{4,40}$')
re_author_committer_start = re.compile(r'^(author|committer)')
re_tab_full_line = re.compile(r'^\t(.*)$')
# invariants
# represents the configuration level of a configuration file
config_level = ("system", "user", "global", "repository")
# Subclass configuration
# Subclasses may easily bring in their own custom types by placing a constructor or type here
GitCommandWrapperType = Git
def __init__(self, path=None, odbt=DefaultDBType, search_parent_directories=False):
"""Create a new Repo instance
:param path:
the path to either the root git directory or the bare git repo::
repo = Repo("/Users/mtrier/Development/git-python")
repo = Repo("/Users/mtrier/Development/git-python.git")
repo = Repo("~/Development/git-python.git")
repo = Repo("$REPOSITORIES/Development/git-python.git")
:param odbt:
Object DataBase type - a type which is constructed by providing
the directory containing the database objects, i.e. .git/objects. It will
be used to access all object data
:raise InvalidGitRepositoryError:
:raise NoSuchPathError:
:return: git.Repo """
epath = _expand_path(path or os.getcwd())
self.git = None # should be set for __del__ not to fail in case we raise
if not os.path.exists(epath):
raise NoSuchPathError(epath)
self.working_dir = None
self._working_tree_dir = None
self.git_dir = None
curpath = epath
# walk up the path to find the .git dir
while curpath:
# ABOUT os.path.NORMPATH
# It's important to normalize the paths, as submodules will otherwise initialize their
# repo instances with paths that depend on path-portions that will not exist after being
# removed. It's just cleaner.
if is_git_dir(curpath):
self.git_dir = os.path.normpath(curpath)
self._working_tree_dir = os.path.dirname(self.git_dir)
break
gitpath = find_git_dir(join(curpath, '.git'))
if gitpath is not None:
self.git_dir = os.path.normpath(gitpath)
self._working_tree_dir = curpath
break
if not search_parent_directories:
break
curpath, dummy = os.path.split(curpath)
if not dummy:
break
# END while curpath
if self.git_dir is None:
raise InvalidGitRepositoryError(epath)
self._bare = False
try:
self._bare = self.config_reader("repository").getboolean('core', 'bare')
except Exception:
# lets not assume the option exists, although it should
pass
# adjust the wd in case we are actually bare - we didn't know that
# in the first place
if self._bare:
self._working_tree_dir = None
# END working dir handling
self.working_dir = self._working_tree_dir or self.git_dir
self.git = self.GitCommandWrapperType(self.working_dir)
# special handling, in special times
args = [join(self.git_dir, 'objects')]
if issubclass(odbt, GitCmdObjectDB):
args.append(self.git)
self.odb = odbt(*args)
def __del__(self):
if self.git:
self.git.clear_cache()
def __eq__(self, rhs):
if isinstance(rhs, Repo):
return self.git_dir == rhs.git_dir
return False
def __ne__(self, rhs):
return not self.__eq__(rhs)
def __hash__(self):
return hash(self.git_dir)
# Description property
def _get_description(self):
filename = join(self.git_dir, 'description')
return open(filename, 'rb').read().rstrip().decode(defenc)
def _set_description(self, descr):
filename = join(self.git_dir, 'description')
open(filename, 'wb').write((descr + '\n').encode(defenc))
description = property(_get_description, _set_description,
doc="the project's description")
del _get_description
del _set_description
@property
def working_tree_dir(self):
""":return: The working tree directory of our git repository. If this is a bare repository, None is returned.
"""
return self._working_tree_dir
@property
def bare(self):
""":return: True if the repository is bare"""
return self._bare
@property
def heads(self):
"""A list of ``Head`` objects representing the branch heads in
this repo
:return: ``git.IterableList(Head, ...)``"""
return Head.list_items(self)
@property
def references(self):
"""A list of Reference objects representing tags, heads and remote references.
:return: IterableList(Reference, ...)"""
return Reference.list_items(self)
# alias for references
refs = references
# alias for heads
branches = heads
@property
def index(self):
""":return: IndexFile representing this repository's index."""
return IndexFile(self)
@property
def head(self):
""":return: HEAD Object pointing to the current head reference"""
return HEAD(self, 'HEAD')
@property
def remotes(self):
"""A list of Remote objects allowing to access and manipulate remotes
:return: ``git.IterableList(Remote, ...)``"""
return Remote.list_items(self)
def remote(self, name='origin'):
""":return: Remote with the specified name
:raise ValueError: if no remote with such a name exists"""
r = Remote(self, name)
if not r.exists():
raise ValueError("Remote named '%s' didn't exist" % name)
return r
#{ Submodules
@property
def submodules(self):
"""
:return: git.IterableList(Submodule, ...) of direct submodules
available from the current head"""
return Submodule.list_items(self)
def submodule(self, name):
""" :return: Submodule with the given name
:raise ValueError: If no such submodule exists"""
try:
return self.submodules[name]
except IndexError:
raise ValueError("Didn't find submodule named %r" % name)
# END exception handling
def create_submodule(self, *args, **kwargs):
"""Create a new submodule
:note: See the documentation of Submodule.add for a description of the
applicable parameters
:return: created submodules"""
return Submodule.add(self, *args, **kwargs)
def iter_submodules(self, *args, **kwargs):
"""An iterator yielding Submodule instances, see Traversable interface
for a description of args and kwargs
:return: Iterator"""
return RootModule(self).traverse(*args, **kwargs)
def submodule_update(self, *args, **kwargs):
"""Update the submodules, keeping the repository consistent as it will
take the previous state into consideration. For more information, please
see the documentation of RootModule.update"""
return RootModule(self).update(*args, **kwargs)
#}END submodules
@property
def tags(self):
"""A list of ``Tag`` objects that are available in this repo
:return: ``git.IterableList(TagReference, ...)`` """
return TagReference.list_items(self)
def tag(self, path):
""":return: TagReference Object, reference pointing to a Commit or Tag
:param path: path to the tag reference, i.e. 0.1.5 or tags/0.1.5 """
return TagReference(self, path)
def create_head(self, path, commit='HEAD', force=False, logmsg=None):
"""Create a new head within the repository.
For more documentation, please see the Head.create method.
:return: newly created Head Reference"""
return Head.create(self, path, commit, force, logmsg)
def delete_head(self, *heads, **kwargs):
"""Delete the given heads
:param kwargs: Additional keyword arguments to be passed to git-branch"""
return Head.delete(self, *heads, **kwargs)
def create_tag(self, path, ref='HEAD', message=None, force=False, **kwargs):
"""Create a new tag reference.
For more documentation, please see the TagReference.create method.
:return: TagReference object """
return TagReference.create(self, path, ref, message, force, **kwargs)
def delete_tag(self, *tags):
"""Delete the given tag references"""
return TagReference.delete(self, *tags)
def create_remote(self, name, url, **kwargs):
"""Create a new remote.
For more information, please see the documentation of the Remote.create
methods
:return: Remote reference"""
return Remote.create(self, name, url, **kwargs)
def delete_remote(self, remote):
"""Delete the given remote."""
return Remote.remove(self, remote)
def _get_config_path(self, config_level):
# we do not support an absolute path of the gitconfig on windows ,
# use the global config instead
if sys.platform == "win32" and config_level == "system":
config_level = "global"
if config_level == "system":
return "/etc/gitconfig"
elif config_level == "user":
config_home = os.environ.get("XDG_CONFIG_HOME") or os.path.join(os.environ.get("HOME", '~'), ".config")
return os.path.normpath(os.path.expanduser(join(config_home, "git", "config")))
elif config_level == "global":
return os.path.normpath(os.path.expanduser("~/.gitconfig"))
elif config_level == "repository":
return os.path.normpath(join(self.git_dir, "config"))
raise ValueError("Invalid configuration level: %r" % config_level)
def config_reader(self, config_level=None):
"""
:return:
GitConfigParser allowing to read the full git configuration, but not to write it
The configuration will include values from the system, user and repository
configuration files.
:param config_level:
For possible values, see config_writer method
If None, all applicable levels will be used. Specify a level in case
you know which exact file you whish to read to prevent reading multiple files for
instance
:note: On windows, system configuration cannot currently be read as the path is
unknown, instead the global path will be used."""
files = None
if config_level is None:
files = [self._get_config_path(f) for f in self.config_level]
else:
files = [self._get_config_path(config_level)]
return GitConfigParser(files, read_only=True)
def config_writer(self, config_level="repository"):
"""
:return:
GitConfigParser allowing to write values of the specified configuration file level.
Config writers should be retrieved, used to change the configuration ,and written
right away as they will lock the configuration file in question and prevent other's
to write it.
:param config_level:
One of the following values
system = sytem wide configuration file
global = user level configuration file
repository = configuration file for this repostory only"""
return GitConfigParser(self._get_config_path(config_level), read_only=False)
def commit(self, rev=None):
"""The Commit object for the specified revision
:param rev: revision specifier, see git-rev-parse for viable options.
:return: ``git.Commit``"""
if rev is None:
return self.head.commit
else:
return self.rev_parse(text_type(rev) + "^0")
def iter_trees(self, *args, **kwargs):
""":return: Iterator yielding Tree objects
:note: Takes all arguments known to iter_commits method"""
return (c.tree for c in self.iter_commits(*args, **kwargs))
def tree(self, rev=None):
"""The Tree object for the given treeish revision
Examples::
repo.tree(repo.heads[0])
:param rev: is a revision pointing to a Treeish ( being a commit or tree )
:return: ``git.Tree``
:note:
If you need a non-root level tree, find it by iterating the root tree. Otherwise
it cannot know about its path relative to the repository root and subsequent
operations might have unexpected results."""
if rev is None:
return self.head.commit.tree
else:
return self.rev_parse(text_type(rev) + "^{tree}")
def iter_commits(self, rev=None, paths='', **kwargs):
"""A list of Commit objects representing the history of a given ref/commit
:parm rev:
revision specifier, see git-rev-parse for viable options.
If None, the active branch will be used.
:parm paths:
is an optional path or a list of paths to limit the returned commits to
Commits that do not contain that path or the paths will not be returned.
:parm kwargs:
Arguments to be passed to git-rev-list - common ones are
max_count and skip
:note: to receive only commits between two named revisions, use the
"revA..revB" revision specifier
:return ``git.Commit[]``"""
if rev is None:
rev = self.head.commit
return Commit.iter_items(self, rev, paths, **kwargs)
def merge_base(self, *rev, **kwargs):
"""Find the closest common ancestor for the given revision (e.g. Commits, Tags, References, etc)
:param rev: At least two revs to find the common ancestor for.
:param kwargs: Additional arguments to be passed to the repo.git.merge_base() command which does all the work.
:return: A list of Commit objects. If --all was not specified as kwarg, the list will have at max one Commit,
or is empty if no common merge base exists.
:raises ValueError: If not at least two revs are provided
"""
if len(rev) < 2:
raise ValueError("Please specify at least two revs, got only %i" % len(rev))
# end handle input
res = list()
try:
lines = self.git.merge_base(*rev, **kwargs).splitlines()
except GitCommandError as err:
if err.status == 128:
raise
# end handle invalid rev
# Status code 1 is returned if there is no merge-base
# (see https://github.com/git/git/blob/master/builtin/merge-base.c#L16)
return res
# end exception handling
for line in lines:
res.append(self.commit(line))
# end for each merge-base
return res
def _get_daemon_export(self):
filename = join(self.git_dir, self.DAEMON_EXPORT_FILE)
return os.path.exists(filename)
def _set_daemon_export(self, value):
filename = join(self.git_dir, self.DAEMON_EXPORT_FILE)
fileexists = os.path.exists(filename)
if value and not fileexists:
touch(filename)
elif not value and fileexists:
os.unlink(filename)
daemon_export = property(_get_daemon_export, _set_daemon_export,
doc="If True, git-daemon may export this repository")
del _get_daemon_export
del _set_daemon_export
def _get_alternates(self):
"""The list of alternates for this repo from which objects can be retrieved
:return: list of strings being pathnames of alternates"""
alternates_path = join(self.git_dir, 'objects', 'info', 'alternates')
if os.path.exists(alternates_path):
try:
f = open(alternates_path, 'rb')
alts = f.read().decode(defenc)
finally:
f.close()
return alts.strip().splitlines()
else:
return list()
def _set_alternates(self, alts):
"""Sets the alternates
:parm alts:
is the array of string paths representing the alternates at which
git should look for objects, i.e. /home/user/repo/.git/objects
:raise NoSuchPathError:
:note:
The method does not check for the existance of the paths in alts
as the caller is responsible."""
alternates_path = join(self.git_dir, 'objects', 'info', 'alternates')
if not alts:
if isfile(alternates_path):
os.remove(alternates_path)
else:
try:
f = open(alternates_path, 'wb')
f.write("\n".join(alts).encode(defenc))
finally:
f.close()
# END file handling
# END alts handling
alternates = property(_get_alternates, _set_alternates,
doc="Retrieve a list of alternates paths or set a list paths to be used as alternates")
def is_dirty(self, index=True, working_tree=True, untracked_files=False):
"""
:return:
``True``, the repository is considered dirty. By default it will react
like a git-status without untracked files, hence it is dirty if the
index or the working copy have changes."""
if self._bare:
# Bare repositories with no associated working directory are
# always consired to be clean.
return False
# start from the one which is fastest to evaluate
default_args = ('--abbrev=40', '--full-index', '--raw')
if index:
# diff index against HEAD
if isfile(self.index.path) and \
len(self.git.diff('--cached', *default_args)):
return True
# END index handling
if working_tree:
# diff index against working tree
if len(self.git.diff(*default_args)):
return True
# END working tree handling
if untracked_files:
if len(self.untracked_files):
return True
# END untracked files
return False
@property
def untracked_files(self):
"""
:return:
list(str,...)
Files currently untracked as they have not been staged yet. Paths
are relative to the current working directory of the git command.
:note:
ignored files will not appear here, i.e. files mentioned in .gitignore"""
# make sure we get all files, no only untracked directores
proc = self.git.status(porcelain=True,
untracked_files=True,
as_process=True)
# Untracked files preffix in porcelain mode
prefix = "?? "
untracked_files = list()
for line in proc.stdout:
line = line.decode(defenc)
if not line.startswith(prefix):
continue
filename = line[len(prefix):].rstrip('\n')
# Special characters are escaped
if filename[0] == filename[-1] == '"':
filename = filename[1:-1].decode('string_escape')
untracked_files.append(filename)
finalize_process(proc)
return untracked_files
@property
def active_branch(self):
"""The name of the currently active branch.
:return: Head to the active branch"""
return self.head.reference
def blame(self, rev, file):
"""The blame information for the given file at the given revision.
:parm rev: revision specifier, see git-rev-parse for viable options.
:return:
list: [git.Commit, list: [<line>]]
A list of tuples associating a Commit object with a list of lines that
changed within the given commit. The Commit objects will be given in order
of appearance."""
data = self.git.blame(rev, '--', file, p=True, stdout_as_string=False)
commits = dict()
blames = list()
info = None
keepends = True
for line in data.splitlines(keepends):
try:
line = line.rstrip().decode(defenc)
except UnicodeDecodeError:
firstpart = ''
is_binary = True
else:
# As we don't have an idea when the binary data ends, as it could contain multiple newlines
# in the process. So we rely on being able to decode to tell us what is is.
# This can absolutely fail even on text files, but even if it does, we should be fine treating it
# as binary instead
parts = self.re_whitespace.split(line, 1)
firstpart = parts[0]
is_binary = False
# end handle decode of line
if self.re_hexsha_only.search(firstpart):
# handles
# 634396b2f541a9f2d58b00be1a07f0c358b999b3 1 1 7 - indicates blame-data start
# 634396b2f541a9f2d58b00be1a07f0c358b999b3 2 2 - indicates
# another line of blame with the same data
digits = parts[-1].split(" ")
if len(digits) == 3:
info = {'id': firstpart}
blames.append([None, []])
elif info['id'] != firstpart:
info = {'id': firstpart}
blames.append([commits.get(firstpart), []])
# END blame data initialization
else:
m = self.re_author_committer_start.search(firstpart)
if m:
# handles:
# author Tom Preston-Werner
# author-mail <tom@mojombo.com>
# author-time 1192271832
# author-tz -0700
# committer Tom Preston-Werner
# committer-mail <tom@mojombo.com>
# committer-time 1192271832
# committer-tz -0700 - IGNORED BY US
role = m.group(0)
if firstpart.endswith('-mail'):
info["%s_email" % role] = parts[-1]
elif firstpart.endswith('-time'):
info["%s_date" % role] = int(parts[-1])
elif role == firstpart:
info[role] = parts[-1]
# END distinguish mail,time,name
else:
# handle
# filename lib/grit.rb
# summary add Blob
# <and rest>
if firstpart.startswith('filename'):
info['filename'] = parts[-1]
elif firstpart.startswith('summary'):
info['summary'] = parts[-1]
elif firstpart == '':
if info:
sha = info['id']
c = commits.get(sha)
if c is None:
c = Commit(self, hex_to_bin(sha),
author=Actor._from_string(info['author'] + ' ' + info['author_email']),
authored_date=info['author_date'],
committer=Actor._from_string(
info['committer'] + ' ' + info['committer_email']),
committed_date=info['committer_date'],
message=info['summary'])
commits[sha] = c
# END if commit objects needs initial creation
if not is_binary:
if line and line[0] == '\t':
line = line[1:]
else:
# NOTE: We are actually parsing lines out of binary data, which can lead to the
# binary being split up along the newline separator. We will append this to the blame
# we are currently looking at, even though it should be concatenated with the last line
# we have seen.
pass
# end handle line contents
blames[-1][0] = c
blames[-1][1].append(line)
info = {'id': sha}
# END if we collected commit info
# END distinguish filename,summary,rest
# END distinguish author|committer vs filename,summary,rest
# END distinguish hexsha vs other information
return blames
@classmethod
def init(cls, path=None, mkdir=True, **kwargs):
"""Initialize a git repository at the given path if specified
:param path:
is the full path to the repo (traditionally ends with /<name>.git)
or None in which case the repository will be created in the current
working directory
:parm mkdir:
if specified will create the repository directory if it doesn't
already exists. Creates the directory with a mode=0755.
Only effective if a path is explicitly given
:parm kwargs:
keyword arguments serving as additional options to the git-init command
:return: ``git.Repo`` (the newly created repo)"""
if path:
path = _expand_path(path)
if mkdir and path and not os.path.exists(path):
os.makedirs(path, 0o755)
# git command automatically chdir into the directory
git = Git(path)
git.init(**kwargs)
return cls(path)
@classmethod
def _clone(cls, git, url, path, odb_default_type, progress, **kwargs):
# special handling for windows for path at which the clone should be
# created.
# tilde '~' will be expanded to the HOME no matter where the ~ occours. Hence
# we at least give a proper error instead of letting git fail
prev_cwd = None
prev_path = None
odbt = kwargs.pop('odbt', odb_default_type)
if os.name == 'nt':
if '~' in path:
raise OSError("Git cannot handle the ~ character in path %r correctly" % path)
# on windows, git will think paths like c: are relative and prepend the
# current working dir ( before it fails ). We temporarily adjust the working
# dir to make this actually work
match = re.match("(\w:[/\\\])(.*)", path)
if match:
prev_cwd = os.getcwd()
prev_path = path
drive, rest_of_path = match.groups()
os.chdir(drive)
path = rest_of_path
kwargs['with_keep_cwd'] = True
# END cwd preparation
# END windows handling
try:
proc = git.clone(url, path, with_extended_output=True, as_process=True,
v=True, **add_progress(kwargs, git, progress))
if progress:
handle_process_output(proc, None, progress.new_message_handler(), finalize_process)
else:
finalize_process(proc)
# end handle progress
finally:
if prev_cwd is not None:
os.chdir(prev_cwd)
path = prev_path
# END reset previous working dir
# END bad windows handling
# our git command could have a different working dir than our actual
# environment, hence we prepend its working dir if required
if not os.path.isabs(path) and git.working_dir:
path = join(git._working_dir, path)
# adjust remotes - there may be operating systems which use backslashes,
# These might be given as initial paths, but when handling the config file
# that contains the remote from which we were clones, git stops liking it
# as it will escape the backslashes. Hence we undo the escaping just to be
# sure
repo = cls(os.path.abspath(path), odbt=odbt)
if repo.remotes:
writer = repo.remotes[0].config_writer
writer.set_value('url', repo.remotes[0].url.replace("\\\\", "\\").replace("\\", "/"))
# PY3: be sure cleanup is performed and lock is released
writer.release()
# END handle remote repo
return repo
def clone(self, path, progress=None, **kwargs):
"""Create a clone from this repository.
:param path: is the full path of the new repo (traditionally ends with ./<name>.git).
:param progress: See 'git.remote.Remote.push'.
:param kwargs:
* odbt = ObjectDatabase Type, allowing to determine the object database
implementation used by the returned Repo instance
* All remaining keyword arguments are given to the git-clone command
:return: ``git.Repo`` (the newly cloned repo)"""
return self._clone(self.git, self.git_dir, path, type(self.odb), progress, **kwargs)
@classmethod
def clone_from(cls, url, to_path, progress=None, **kwargs):
"""Create a clone from the given URL
:param url: valid git url, see http://www.kernel.org/pub/software/scm/git/docs/git-clone.html#URLS
:param to_path: Path to which the repository should be cloned to
:param progress: See 'git.remote.Remote.push'.
:param kwargs: see the ``clone`` method
:return: Repo instance pointing to the cloned directory"""
return cls._clone(Git(os.getcwd()), url, to_path, GitCmdObjectDB, progress, **kwargs)
def archive(self, ostream, treeish=None, prefix=None, **kwargs):
"""Archive the tree at the given revision.
:parm ostream: file compatible stream object to which the archive will be written as bytes
:parm treeish: is the treeish name/id, defaults to active branch
:parm prefix: is the optional prefix to prepend to each filename in the archive
:parm kwargs: Additional arguments passed to git-archive
* Use the 'format' argument to define the kind of format. Use
specialized ostreams to write any format supported by python.
* You may specify the special **path** keyword, which may either be a repository-relative
path to a directory or file to place into the archive, or a list or tuple of multipe paths.
:raise GitCommandError: in case something went wrong
:return: self"""
if treeish is None:
treeish = self.head.commit
if prefix and 'prefix' not in kwargs:
kwargs['prefix'] = prefix
kwargs['output_stream'] = ostream
path = kwargs.pop('path', list())
if not isinstance(path, (tuple, list)):
path = [path]
# end assure paths is list
self.git.archive(treeish, *path, **kwargs)
return self
def has_separate_working_tree(self):
"""
:return: True if our git_dir is not at the root of our working_tree_dir, but a .git file with a
platform agnositic symbolic link. Our git_dir will be whereever the .git file points to
:note: bare repositories will always return False here
"""
if self.bare:
return False
return os.path.isfile(os.path.join(self.working_tree_dir, '.git'))
rev_parse = rev_parse
def __repr__(self):
return '<git.Repo "%s">' % self.git_dir
|
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import re
import json
import time
from collections import defaultdict, OrderedDict
import requests
import demjson
LINUX_PRICING_URLS = [
# Deprecated instances (JSON format)
'https://aws.amazon.com/ec2/pricing/json/linux-od.json',
# Previous generation instances (JavaScript file)
'https://a0.awsstatic.com/pricing/1/ec2/previous-generation/linux-od.min.js',
# New generation instances (JavaScript file)
'https://a0.awsstatic.com/pricing/1/ec2/linux-od.min.js'
]
EC2_REGIONS = [
'us-east-1',
'us-west-1',
'us-west-2',
'eu-west-1',
'eu-central-1',
'ap-southeast-1',
'ap-southeast-2',
'ap-northeast-1',
'ap-northeast-2',
'sa-east-1'
]
EC2_INSTANCE_TYPES = [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'c1.medium',
'c1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'cg1.4xlarge',
'g2.2xlarge',
'g2.8xlarge',
'cr1.8xlarge',
'hs1.4xlarge',
'hs1.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
't2.micro',
't2.small',
't2.medium',
't2.large',
'x1.32xlarge'
]
# Maps EC2 region name to region name used in the pricing file
REGION_NAME_MAP = {
'us-east': 'ec2_us_east',
'us-east-1': 'ec2_us_east',
'us-west': 'ec2_us_west',
'us-west-1': 'ec2_us_west',
'us-west-2': 'ec2_us_west_oregon',
'eu-west-1': 'ec2_eu_west',
'eu-ireland': 'ec2_eu_west',
'eu-central-1': 'ec2_eu_central',
'apac-sin': 'ec2_ap_southeast',
'ap-southeast-1': 'ec2_ap_southeast',
'apac-syd': 'ec2_ap_southeast_2',
'ap-southeast-2': 'ec2_ap_southeast_2',
'apac-tokyo': 'ec2_ap_northeast',
'ap-northeast-1': 'ec2_ap_northeast',
'ap-northeast-2': 'ec2_ap_northeast',
'sa-east-1': 'ec2_sa_east',
'us-gov-west-1': 'ec2_us_govwest'
}
INSTANCE_SIZES = [
'micro',
'small',
'medium',
'large',
'xlarge',
'x-large',
'extra-large'
]
RE_NUMERIC_OTHER = re.compile(r'(?:([0-9]+)|([-A-Z_a-z]+)|([^-0-9A-Z_a-z]+))')
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
PRICING_FILE_PATH = os.path.join(BASE_PATH, '../libcloud/data/pricing.json')
PRICING_FILE_PATH = os.path.abspath(PRICING_FILE_PATH)
def scrape_ec2_pricing():
result = defaultdict(OrderedDict)
for url in LINUX_PRICING_URLS:
response = requests.get(url)
if re.match('.*?\.json$', url):
data = response.json()
elif re.match('.*?\.js$', url):
data = response.content
match = re.match('^.*callback\((.*?)\);?$', data,
re.MULTILINE | re.DOTALL)
data = match.group(1)
# demjson supports non-strict mode and can parse unquoted objects
data = demjson.decode(data)
regions = data['config']['regions']
for region_data in regions:
region_name = region_data['region']
libcloud_region_name = REGION_NAME_MAP[region_name]
instance_types = region_data['instanceTypes']
for instance_type in instance_types:
sizes = instance_type['sizes']
for size in sizes:
price = size['valueColumns'][0]['prices']['USD']
if str(price).lower() == 'n/a':
# Price not available
continue
result[libcloud_region_name][size['size']] = float(price)
return result
def update_pricing_file(pricing_file_path, pricing_data):
with open(pricing_file_path, 'r') as fp:
content = fp.read()
data = json.loads(content)
data['updated'] = int(time.time())
data['compute'].update(pricing_data)
# Always sort the pricing info
data = sort_nested_dict(data)
content = json.dumps(data, indent=4)
lines = content.splitlines()
lines = [line.rstrip() for line in lines]
content = '\n'.join(lines)
with open(pricing_file_path, 'w') as fp:
fp.write(content)
def sort_nested_dict(value):
"""
Recursively sort a nested dict.
"""
result = OrderedDict()
for key, value in sorted(value.items(), key=sort_key_by_numeric_other):
if isinstance(value, (dict, OrderedDict)):
result[key] = sort_nested_dict(value)
else:
result[key] = value
return result
def sort_key_by_numeric_other(key_value):
"""
Split key into numeric, alpha and other part and sort accordingly.
"""
return tuple((
int(numeric) if numeric else None,
INSTANCE_SIZES.index(alpha) if alpha in INSTANCE_SIZES else alpha,
other
) for (numeric, alpha, other) in RE_NUMERIC_OTHER.findall(key_value[0]))
def main():
print('Scraping EC2 pricing data')
pricing_data = scrape_ec2_pricing()
update_pricing_file(pricing_file_path=PRICING_FILE_PATH,
pricing_data=pricing_data)
print('Pricing data updated')
if __name__ == '__main__':
main()
|
|
import datetime
import httplib
import os
import uuid
import markupsafe
from flask import make_response
from flask import redirect
from flask import request
import furl
import jwe
import jwt
from modularodm import Q
from modularodm.exceptions import NoResultsFound
from framework import sentry
from framework.auth import Auth
from framework.auth import cas
from framework.auth import oauth_scopes
from framework.auth.decorators import collect_auth, must_be_logged_in, must_be_signed
from framework.exceptions import HTTPError
from framework.routing import json_renderer
from framework.sentry import log_exception
from framework.transactions.context import TokuTransaction
from framework.transactions.handlers import no_auto_transaction
from website import mails
from website import settings
from website.addons.base import StorageAddonBase
from website.addons.base import exceptions
from website.addons.base import signals as file_signals
from website.files.models import FileNode, StoredFileNode, TrashedFileNode
from website.models import Node, NodeLog, User
from website.profile.utils import get_gravatar
from website.project import decorators
from website.project.decorators import must_be_contributor_or_public, must_be_valid_project
from website.project.model import DraftRegistration, MetaSchema
from website.project.utils import serialize_node
from website.util import rubeus
# import so that associated listener is instantiated and gets emails
from website.notifications.events.files import FileEvent # noqa
ERROR_MESSAGES = {'FILE_GONE': u'''
<style>
#toggleBar{{display: none;}}
</style>
<div class="alert alert-info" role="alert">
<p>
The file "{file_name}" stored on {provider} was deleted via the OSF.
</p>
<p>
It was deleted by <a href="/{deleted_by_guid}">{deleted_by}</a> on {deleted_on}.
</p>
</div>''',
'FILE_GONE_ACTOR_UNKNOWN': u'''
<style>
#toggleBar{{display: none;}}
</style>
<div class="alert alert-info" role="alert">
<p>
The file "{file_name}" stored on {provider} was deleted via the OSF.
</p>
<p>
It was deleted on {deleted_on}.
</p>
</div>''',
'DONT_KNOW': u'''
<style>
#toggleBar{{display: none;}}
</style>
<div class="alert alert-info" role="alert">
<p>
File not found at {provider}.
</p>
</div>''',
'BLAME_PROVIDER': u'''
<style>
#toggleBar{{display: none;}}
</style>
<div class="alert alert-info" role="alert">
<p>
This {provider} link to the file "{file_name}" is currently unresponsive.
The provider ({provider}) may currently be unavailable or "{file_name}" may have been removed from {provider} through another interface.
</p>
<p>
You may wish to verify this through {provider}'s website.
</p>
</div>''',
'FILE_SUSPENDED': u'''
<style>
#toggleBar{{display: none;}}
</style>
<div class="alert alert-info" role="alert">
This content has been removed.
</div>'''}
WATERBUTLER_JWE_KEY = jwe.kdf(settings.WATERBUTLER_JWE_SECRET.encode('utf-8'), settings.WATERBUTLER_JWE_SALT.encode('utf-8'))
@decorators.must_have_permission('write')
@decorators.must_not_be_registration
def disable_addon(auth, **kwargs):
node = kwargs['node'] or kwargs['project']
addon_name = kwargs.get('addon')
if addon_name is None:
raise HTTPError(httplib.BAD_REQUEST)
deleted = node.delete_addon(addon_name, auth)
return {'deleted': deleted}
@must_be_logged_in
def get_addon_user_config(**kwargs):
user = kwargs['auth'].user
addon_name = kwargs.get('addon')
if addon_name is None:
raise HTTPError(httplib.BAD_REQUEST)
addon = user.get_addon(addon_name)
if addon is None:
raise HTTPError(httplib.BAD_REQUEST)
return addon.to_json(user)
permission_map = {
'create_folder': 'write',
'revisions': 'read',
'metadata': 'read',
'download': 'read',
'upload': 'write',
'delete': 'write',
'copy': 'write',
'move': 'write',
'copyto': 'write',
'moveto': 'write',
'copyfrom': 'read',
'movefrom': 'write',
}
def check_access(node, auth, action, cas_resp):
"""Verify that user can perform requested action on resource. Raise appropriate
error code if action cannot proceed.
"""
permission = permission_map.get(action, None)
if permission is None:
raise HTTPError(httplib.BAD_REQUEST)
if cas_resp:
if permission == 'read':
if node.is_public:
return True
required_scope = oauth_scopes.CoreScopes.NODE_FILE_READ
else:
required_scope = oauth_scopes.CoreScopes.NODE_FILE_WRITE
if not cas_resp.authenticated \
or required_scope not in oauth_scopes.normalize_scopes(cas_resp.attributes['accessTokenScope']):
raise HTTPError(httplib.FORBIDDEN)
if permission == 'read' and node.can_view(auth):
return True
if permission == 'write' and node.can_edit(auth):
return True
# Users attempting to register projects with components might not have
# `write` permissions for all components. This will result in a 403 for
# all `copyto` actions as well as `copyfrom` actions if the component
# in question is not public. To get around this, we have to recursively
# check the node's parent node to determine if they have `write`
# permissions up the stack.
# TODO(hrybacki): is there a way to tell if this is for a registration?
# All nodes being registered that receive the `copyto` action will have
# `node.is_registration` == True. However, we have no way of telling if
# `copyfrom` actions are originating from a node being registered.
# TODO This is raise UNAUTHORIZED for registrations that have not been archived yet
if action == 'copyfrom' or (action == 'copyto' and node.is_registration):
parent = node.parent_node
while parent:
if parent.can_edit(auth):
return True
parent = parent.parent_node
# Users with the PREREG_ADMIN_TAG should be allowed to download files
# from prereg challenge draft registrations.
try:
prereg_schema = MetaSchema.find_one(
Q('name', 'eq', 'Prereg Challenge') &
Q('schema_version', 'eq', 2)
)
allowed_nodes = [node] + node.parents
prereg_draft_registration = DraftRegistration.find(
Q('branched_from', 'in', [n._id for n in allowed_nodes]) &
Q('registration_schema', 'eq', prereg_schema)
)
if action == 'download' and \
auth.user is not None and \
prereg_draft_registration.count() > 0 and \
settings.PREREG_ADMIN_TAG in auth.user.system_tags:
return True
except NoResultsFound:
pass
raise HTTPError(httplib.FORBIDDEN if auth.user else httplib.UNAUTHORIZED)
def make_auth(user):
if user is not None:
return {
'id': user._id,
'email': '{}@osf.io'.format(user._id),
'name': user.fullname,
}
return {}
@collect_auth
def get_auth(auth, **kwargs):
cas_resp = None
if not auth.user:
# Central Authentication Server OAuth Bearer Token
authorization = request.headers.get('Authorization')
if authorization and authorization.startswith('Bearer '):
client = cas.get_client()
try:
access_token = cas.parse_auth_header(authorization)
cas_resp = client.profile(access_token)
except cas.CasError as err:
sentry.log_exception()
# NOTE: We assume that the request is an AJAX request
return json_renderer(err)
if cas_resp.authenticated:
auth.user = User.load(cas_resp.user)
try:
data = jwt.decode(
jwe.decrypt(request.args.get('payload', '').encode('utf-8'), WATERBUTLER_JWE_KEY),
settings.WATERBUTLER_JWT_SECRET,
options={'require_exp': True},
algorithm=settings.WATERBUTLER_JWT_ALGORITHM
)['data']
except (jwt.InvalidTokenError, KeyError):
raise HTTPError(httplib.FORBIDDEN)
if not auth.user:
auth.user = User.from_cookie(data.get('cookie', ''))
try:
action = data['action']
node_id = data['nid']
provider_name = data['provider']
except KeyError:
raise HTTPError(httplib.BAD_REQUEST)
node = Node.load(node_id)
if not node:
raise HTTPError(httplib.NOT_FOUND)
check_access(node, auth, action, cas_resp)
provider_settings = node.get_addon(provider_name)
if not provider_settings:
raise HTTPError(httplib.BAD_REQUEST)
try:
credentials = provider_settings.serialize_waterbutler_credentials()
waterbutler_settings = provider_settings.serialize_waterbutler_settings()
except exceptions.AddonError:
log_exception()
raise HTTPError(httplib.BAD_REQUEST)
return {'payload': jwe.encrypt(jwt.encode({
'exp': datetime.datetime.utcnow() + datetime.timedelta(seconds=settings.WATERBUTLER_JWT_EXPIRATION),
'data': {
'auth': make_auth(auth.user), # A waterbutler auth dict not an Auth object
'credentials': credentials,
'settings': waterbutler_settings,
'callback_url': node.api_url_for(
('create_waterbutler_log' if not node.is_registration else 'registration_callbacks'),
_absolute=True,
),
}
}, settings.WATERBUTLER_JWT_SECRET, algorithm=settings.WATERBUTLER_JWT_ALGORITHM), WATERBUTLER_JWE_KEY)}
LOG_ACTION_MAP = {
'move': NodeLog.FILE_MOVED,
'copy': NodeLog.FILE_COPIED,
'rename': NodeLog.FILE_RENAMED,
'create': NodeLog.FILE_ADDED,
'update': NodeLog.FILE_UPDATED,
'delete': NodeLog.FILE_REMOVED,
'create_folder': NodeLog.FOLDER_CREATED,
}
@must_be_signed
@no_auto_transaction
@must_be_valid_project
def create_waterbutler_log(payload, **kwargs):
with TokuTransaction():
try:
auth = payload['auth']
action = LOG_ACTION_MAP[payload['action']]
except KeyError:
raise HTTPError(httplib.BAD_REQUEST)
user = User.load(auth['id'])
if user is None:
raise HTTPError(httplib.BAD_REQUEST)
auth = Auth(user=user)
node = kwargs['node'] or kwargs['project']
if action in (NodeLog.FILE_MOVED, NodeLog.FILE_COPIED):
for bundle in ('source', 'destination'):
for key in ('provider', 'materialized', 'name', 'nid'):
if key not in payload[bundle]:
raise HTTPError(httplib.BAD_REQUEST)
dest = payload['destination']
src = payload['source']
if src is not None and dest is not None:
dest_path = dest['materialized']
src_path = src['materialized']
if dest_path.endswith('/') and src_path.endswith('/'):
dest_path = os.path.dirname(dest_path)
src_path = os.path.dirname(src_path)
if (
os.path.split(dest_path)[0] == os.path.split(src_path)[0] and
dest['provider'] == src['provider'] and
dest['nid'] == src['nid'] and
dest['name'] != src['name']
):
action = LOG_ACTION_MAP['rename']
destination_node = node # For clarity
source_node = Node.load(payload['source']['nid'])
source = source_node.get_addon(payload['source']['provider'])
destination = node.get_addon(payload['destination']['provider'])
payload['source'].update({
'materialized': payload['source']['materialized'].lstrip('/'),
'addon': source.config.full_name,
'url': source_node.web_url_for(
'addon_view_or_download_file',
path=payload['source']['path'].lstrip('/'),
provider=payload['source']['provider']
),
'node': {
'_id': source_node._id,
'url': source_node.url,
'title': source_node.title,
}
})
payload['destination'].update({
'materialized': payload['destination']['materialized'].lstrip('/'),
'addon': destination.config.full_name,
'url': destination_node.web_url_for(
'addon_view_or_download_file',
path=payload['destination']['path'].lstrip('/'),
provider=payload['destination']['provider']
),
'node': {
'_id': destination_node._id,
'url': destination_node.url,
'title': destination_node.title,
}
})
payload.update({
'node': destination_node._id,
'project': destination_node.parent_id,
})
if not payload.get('errors'):
destination_node.add_log(
action=action,
auth=auth,
params=payload
)
if payload.get('email') is True or payload.get('errors'):
mails.send_mail(
user.username,
mails.FILE_OPERATION_FAILED if payload.get('errors')
else mails.FILE_OPERATION_SUCCESS,
action=payload['action'],
source_node=source_node,
destination_node=destination_node,
source_path=payload['source']['materialized'],
destination_path=payload['source']['materialized'],
source_addon=payload['source']['addon'],
destination_addon=payload['destination']['addon'],
)
if payload.get('error'):
# Action failed but our function succeeded
# Bail out to avoid file_signals
return {'status': 'success'}
else:
try:
metadata = payload['metadata']
node_addon = node.get_addon(payload['provider'])
except KeyError:
raise HTTPError(httplib.BAD_REQUEST)
if node_addon is None:
raise HTTPError(httplib.BAD_REQUEST)
metadata['path'] = metadata['path'].lstrip('/')
node_addon.create_waterbutler_log(auth, action, metadata)
with TokuTransaction():
file_signals.file_updated.send(node=node, user=user, event_type=action, payload=payload)
return {'status': 'success'}
@file_signals.file_updated.connect
def addon_delete_file_node(self, node, user, event_type, payload):
""" Get addon StoredFileNode(s), move it into the TrashedFileNode collection
and remove it from StoredFileNode.
Required so that the guids of deleted addon files are not re-pointed when an
addon file or folder is moved or renamed.
"""
if event_type == 'file_removed' and payload.get('provider', None) != 'osfstorage':
provider = payload['provider']
path = payload['metadata']['path']
materialized_path = payload['metadata']['materialized']
if path.endswith('/'):
folder_children = FileNode.resolve_class(provider, FileNode.ANY).find(
Q('provider', 'eq', provider) &
Q('node', 'eq', node) &
Q('materialized_path', 'startswith', materialized_path)
)
for item in folder_children:
if item.kind == 'file' and not TrashedFileNode.load(item._id):
item.delete(user=user)
elif item.kind == 'folder':
StoredFileNode.remove_one(item.stored_object)
else:
try:
file_node = FileNode.resolve_class(provider, FileNode.FILE).find_one(
Q('node', 'eq', node) &
Q('materialized_path', 'eq', materialized_path)
)
except NoResultsFound:
file_node = None
if file_node and not TrashedFileNode.load(file_node._id):
file_node.delete(user=user)
@must_be_valid_project
def addon_view_or_download_file_legacy(**kwargs):
query_params = request.args.to_dict()
node = kwargs.get('node') or kwargs['project']
action = query_params.pop('action', 'view')
provider = kwargs.get('provider', 'osfstorage')
if kwargs.get('path'):
path = kwargs['path']
elif kwargs.get('fid'):
path = kwargs['fid']
if 'download' in request.path or request.path.startswith('/api/v1/'):
action = 'download'
if kwargs.get('vid'):
query_params['version'] = kwargs['vid']
# If provider is OSFstorage, check existence of requested file in the filetree
# This prevents invalid GUIDs from being created
if provider == 'osfstorage':
node_settings = node.get_addon('osfstorage')
try:
path = node_settings.get_root().find_child_by_name(path)._id
except NoResultsFound:
raise HTTPError(
404, data=dict(
message_short='File not found',
message_long='You requested a file that does not exist.'
)
)
return redirect(
node.web_url_for(
'addon_view_or_download_file',
path=path,
provider=provider,
action=action,
**query_params
),
code=httplib.MOVED_PERMANENTLY
)
@must_be_valid_project
@must_be_contributor_or_public
def addon_deleted_file(auth, node, error_type='BLAME_PROVIDER', **kwargs):
"""Shows a nice error message to users when they try to view a deleted file
"""
# Allow file_node to be passed in so other views can delegate to this one
file_node = kwargs.get('file_node') or TrashedFileNode.load(kwargs.get('trashed_id'))
deleted_by, deleted_on = None, None
if isinstance(file_node, TrashedFileNode):
deleted_by = file_node.deleted_by
deleted_by_guid = file_node.deleted_by._id if deleted_by else None
deleted_on = file_node.deleted_on.strftime('%c') + ' UTC'
if file_node.suspended:
error_type = 'FILE_SUSPENDED'
elif file_node.deleted_by is None:
if file_node.provider == 'osfstorage':
error_type = 'FILE_GONE_ACTOR_UNKNOWN'
else:
error_type = 'BLAME_PROVIDER'
else:
error_type = 'FILE_GONE'
else:
error_type = 'DONT_KNOW'
file_path = kwargs.get('path', file_node.path)
file_name = file_node.name or os.path.basename(file_path)
file_name_title, file_name_ext = os.path.splitext(file_name)
provider_full = settings.ADDONS_AVAILABLE_DICT[file_node.provider].full_name
try:
file_guid = file_node.get_guid()._id
except AttributeError:
file_guid = None
format_params = dict(
file_name=markupsafe.escape(file_name),
deleted_by=markupsafe.escape(deleted_by),
deleted_on=markupsafe.escape(deleted_on),
provider=markupsafe.escape(provider_full)
)
if deleted_by:
format_params['deleted_by_guid'] = markupsafe.escape(deleted_by_guid)
ret = serialize_node(node, auth, primary=True)
ret.update(rubeus.collect_addon_assets(node))
ret.update({
'error': ERROR_MESSAGES[error_type].format(**format_params),
'urls': {
'render': None,
'sharejs': None,
'mfr': settings.MFR_SERVER_URL,
'gravatar': get_gravatar(auth.user, 25),
'files': node.web_url_for('collect_file_trees'),
},
'extra': {},
'size': 9966699, # Prevent file from being edited, just in case
'sharejs_uuid': None,
'file_name': file_name,
'file_path': file_path,
'file_name_title': file_name_title,
'file_name_ext': file_name_ext,
'file_guid': file_guid,
'file_id': file_node._id,
'provider': file_node.provider,
'materialized_path': file_node.materialized_path or file_path,
'private': getattr(node.get_addon(file_node.provider), 'is_private', False),
'file_tags': [tag._id for tag in file_node.tags],
'allow_comments': file_node.provider in settings.ADDONS_COMMENTABLE,
})
return ret, httplib.GONE
@must_be_valid_project
@must_be_contributor_or_public
def addon_view_or_download_file(auth, path, provider, **kwargs):
extras = request.args.to_dict()
extras.pop('_', None) # Clean up our url params a bit
action = extras.get('action', 'view')
node = kwargs.get('node') or kwargs['project']
node_addon = node.get_addon(provider)
provider_safe = markupsafe.escape(provider)
path_safe = markupsafe.escape(path)
project_safe = markupsafe.escape(node.project_or_component)
if not path:
raise HTTPError(httplib.BAD_REQUEST)
if not isinstance(node_addon, StorageAddonBase):
raise HTTPError(httplib.BAD_REQUEST, data={
'message_short': 'Bad Request',
'message_long': 'The {} add-on containing {} is no longer connected to {}.'.format(provider_safe, path_safe, project_safe)
})
if not node_addon.has_auth:
raise HTTPError(httplib.UNAUTHORIZED, data={
'message_short': 'Unauthorized',
'message_long': 'The {} add-on containing {} is no longer authorized.'.format(provider_safe, path_safe)
})
if not node_addon.complete:
raise HTTPError(httplib.BAD_REQUEST, data={
'message_short': 'Bad Request',
'message_long': 'The {} add-on containing {} is no longer configured.'.format(provider_safe, path_safe)
})
file_node = FileNode.resolve_class(provider, FileNode.FILE).get_or_create(node, path)
# Note: Cookie is provided for authentication to waterbutler
# it is overriden to force authentication as the current user
# the auth header is also pass to support basic auth
version = file_node.touch(
request.headers.get('Authorization'),
**dict(
extras,
cookie=request.cookies.get(settings.COOKIE_NAME)
)
)
if version is None:
return addon_deleted_file(file_node=file_node, path=path, **kwargs)
# TODO clean up these urls and unify what is used as a version identifier
if request.method == 'HEAD':
return make_response(('', 200, {
'Location': file_node.generate_waterbutler_url(**dict(extras, direct=None, version=version.identifier))
}))
if action == 'download':
return redirect(file_node.generate_waterbutler_url(**dict(extras, direct=None, version=version.identifier)))
if len(request.path.strip('/').split('/')) > 1:
guid = file_node.get_guid(create=True)
return redirect(furl.furl('/{}/'.format(guid._id)).set(args=extras).url)
return addon_view_file(auth, node, file_node, version)
def addon_view_file(auth, node, file_node, version):
# TODO: resolve circular import issue
from website.addons.wiki import settings as wiki_settings
if isinstance(version, tuple):
version, error = version
error = error.replace('\n', '').strip()
else:
error = None
ret = serialize_node(node, auth, primary=True)
if file_node._id + '-' + version._id not in node.file_guid_to_share_uuids:
node.file_guid_to_share_uuids[file_node._id + '-' + version._id] = uuid.uuid4()
node.save()
if ret['user']['can_edit']:
sharejs_uuid = str(node.file_guid_to_share_uuids[file_node._id + '-' + version._id])
else:
sharejs_uuid = None
download_url = furl.furl(request.url.encode('utf-8')).set(args=dict(request.args, **{
'direct': None,
'mode': 'render',
'action': 'download',
}))
render_url = furl.furl(settings.MFR_SERVER_URL).set(
path=['render'],
args={'url': download_url.url}
)
ret.update({
'urls': {
'render': render_url.url,
'mfr': settings.MFR_SERVER_URL,
'sharejs': wiki_settings.SHAREJS_URL,
'gravatar': get_gravatar(auth.user, 25),
'files': node.web_url_for('collect_file_trees'),
'archived_from': get_archived_from_url(node, file_node) if node.is_registration else None,
},
'error': error,
'file_name': file_node.name,
'file_name_title': os.path.splitext(file_node.name)[0],
'file_name_ext': os.path.splitext(file_node.name)[1],
'file_path': file_node.path,
'sharejs_uuid': sharejs_uuid,
'provider': file_node.provider,
'materialized_path': file_node.materialized_path,
'extra': version.metadata.get('extra', {}),
'size': version.size if version.size is not None else 9966699,
'private': getattr(node.get_addon(file_node.provider), 'is_private', False),
'file_tags': [tag._id for tag in file_node.tags],
'file_guid': file_node.get_guid()._id,
'file_id': file_node._id,
'allow_comments': file_node.provider in settings.ADDONS_COMMENTABLE
})
ret.update(rubeus.collect_addon_assets(node))
return ret
def get_archived_from_url(node, file_node):
if file_node.copied_from:
trashed = TrashedFileNode.load(file_node.copied_from._id)
if not trashed:
return node.registered_from.web_url_for('addon_view_or_download_file', provider=file_node.provider, path=file_node.copied_from._id)
return None
|
|
from rpython.rlib.objectmodel import specialize, we_are_translated
from rpython.rlib.rarithmetic import intmask
from hippy.objects.base import W_Object
from hippy.objects.reference import W_Reference, VirtualReference
from hippy.objects.convert import force_float_to_int_in_any_way
from hippy.error import ConvertError
from collections import OrderedDict
from rpython.rlib.rstring import StringBuilder
class CannotConvertToIndex(Exception):
pass
def new_rdict():
return OrderedDict()
def try_convert_str_to_int(key):
# try to convert 'key' from a string to an int, but carefully:
# we must not remove any space, make sure the result does not
# overflows, etc. In general we have to make sure that the
# result, when converted back to a string, would give exactly
# the original string.
try:
i = intmask(int(key)) # XXX can be done a bit more efficiently
except (ValueError, OverflowError):
raise ValueError
if str(i) != key:
raise ValueError
return i
def compute_next_idx(dct_w):
next_idx = 0
for key in dct_w.keys():
try:
intkey = try_convert_str_to_int(key)
except ValueError:
continue
if intkey >= next_idx:
next_idx = intkey + 1
return next_idx
def wrap_array_key(space, key):
try:
intkey = try_convert_str_to_int(key)
except ValueError:
return space.newstr(key)
return space.newint(intkey)
def convert_to_index(space, w_arg, allow_bogus=False):
"Returns a pair (int, str), where only one of the two is meaningful"
if w_arg.tp == space.tp_int:
return space.int_w(w_arg), None
elif w_arg.tp == space.tp_str:
return 0, space.str_w(w_arg)
elif w_arg.tp == space.tp_float:
return force_float_to_int_in_any_way(space.float_w(w_arg)), None
elif w_arg.tp == space.tp_null:
return 0, ""
elif w_arg.tp == space.tp_bool:
if space.is_w(w_arg, space.w_False):
if allow_bogus:
return 0, ""
else:
return 0, None
return 1, None
if allow_bogus:
if w_arg.tp == space.tp_file_res:
return 0, space.str_w(w_arg)
elif w_arg.tp == space.tp_object:
return 0, space.str_w(w_arg)
elif w_arg.tp == space.tp_array:
space.ec.notice("Array to string conversion")
return 0, "Array"
else:
space.ec.warn("Illegal offset type")
raise CannotConvertToIndex
else:
# XXX make a real warning
space.ec.warn("Illegal offset type")
raise CannotConvertToIndex
class W_ArrayObject(W_Object):
"""Abstract base class. Concrete subclasses use various strategies.
This base class defines the general methods that can be implemented
without needing to call (too often) the arraylen(), _getitem_str()
and _getitem_int() methods.
"""
@staticmethod
def new_array_from_list(space, lst_w):
return W_ListArrayObject(space, lst_w)
@staticmethod
def new_array_from_rdict(space, dct_w):
return W_RDictArrayObject(space, dct_w, compute_next_idx(dct_w))
@staticmethod
def new_array_from_pairs(space, pairs_ww, allow_bogus=False):
rdct_w = new_rdict()
next_idx = 0
for w_key, w_value in pairs_ww:
if w_key is not None:
try:
as_int, as_str = convert_to_index(space, w_key,
allow_bogus=allow_bogus)
except CannotConvertToIndex:
continue
if as_str is not None:
# it was string, maybe it can be integer
try:
as_int = try_convert_str_to_int(as_str)
# yes it can
as_str = None
except ValueError:
pass
# no key, just increment next_idx
else:
as_int, as_str = next_idx, None
if as_str is None:
if as_int >= next_idx:
next_idx = as_int + 1
as_str = str(as_int)
rdct_w[as_str] = w_value
return W_RDictArrayObject(space, rdct_w, next_idx=next_idx)
def copy_item(self):
return self.copy()
def is_true(self, space):
return self.arraylen() > 0
def int_w(self, space):
return int(self.is_true(space))
def as_int_arg(self, space):
raise ConvertError('cannot use array as integer argument')
def as_number(self, space):
return space.wrap(self.is_true(space))
def abs(self, space):
return space.w_False
def _lookup_item_ref(self, space, w_arg, give_notice=False):
"""Return a possibly virtual reference to the item,
or None if the lookup fails
"""
try:
as_int, as_str = convert_to_index(space, w_arg)
except CannotConvertToIndex:
space.ec.warn("Illegal offset type")
return None
if as_str is None:
r_item = self._getitem_int(as_int)
if r_item is None:
if give_notice:
space.ec.notice("Undefined offset: %d" % as_int)
return None
else:
r_item = self._getitem_str(as_str)
if r_item is None:
if give_notice:
space.ec.notice("Undefined index: %s" % as_str)
return None
return r_item
def getitem(self, space, w_arg, give_notice=False):
try:
as_int, as_str = convert_to_index(space, w_arg)
except CannotConvertToIndex:
space.ec.warn("Illegal offset type")
return space.w_Null
if as_str is None:
r_item = self._getitem_int(as_int)
if r_item is None:
if give_notice:
space.ec.notice("Undefined offset: %d" % as_int)
return space.w_Null
else:
r_item = self._getitem_str(as_str)
if r_item is None:
if give_notice:
space.ec.notice("Undefined index: %s" % as_str)
return space.w_Null
if isinstance(r_item, VirtualReference):
return r_item.deref()
else:
return r_item
def setitem2_maybe_inplace(self, space, w_arg, w_value, unique_item=False):
try:
as_int, as_str = convert_to_index(space, w_arg)
except CannotConvertToIndex:
space.ec.warn("Illegal offset type")
return self, space.w_Null
if as_str is None:
w_arr = self._setitem_int(as_int, w_value, False, unique_item)
else:
w_arr = self._setitem_str(as_str, w_value, False, unique_item)
return w_arr, w_value
def _setitem_ref(self, space, w_arg, w_ref):
try:
as_int, as_str = convert_to_index(space, w_arg)
except CannotConvertToIndex:
space.ec.warn("Illegal offset type")
return self
if as_str is None:
return self._setitem_int(as_int, w_ref, True)
else:
return self._setitem_str(as_str, w_ref, True)
def appenditem_inplace(self, space, w_item, as_ref=False):
# For now this always succeeds in appending the item in-place.
# It may need to be reconsidered if we add more strategies.
self._appenditem(w_item, as_ref)
return w_item
def packitem_maybe_inplace(self, space, w_arg, w_value):
as_int, as_str = convert_to_index(space, w_arg)
if as_str is not None:
try:
try_convert_str_to_int(as_str)
except ValueError:
# really not an int
return self._setitem_str(as_str, w_value, False)
self._appenditem(w_value)
return self
def _unsetitem(self, space, w_arg):
as_int, as_str = convert_to_index(space, w_arg)
if as_str is None:
return self._unsetitem_int(as_int)
else:
return self._unsetitem_str(as_str)
def isset_index(self, space, w_index):
as_int, as_str = convert_to_index(space, w_index)
if as_str is None:
return self._isset_int(as_int)
else:
return self._isset_str(as_str)
def _getitem_int(self, index):
raise NotImplementedError("abstract")
def _getitem_str(self, key):
raise NotImplementedError("abstract")
def _appenditem(self, w_obj, as_ref=False):
raise NotImplementedError("abstract")
def _setitem_int(self, index, w_value, as_ref, unique_item=False):
raise NotImplementedError("abstract")
def _setitem_str(self, key, w_value, as_ref,
unique_array=False, unique_item=False):
# Note: might be occasionally called with a string like "5" too
raise NotImplementedError("abstract")
def _unsetitem_int(self, index):
raise NotImplementedError("abstract")
def _unsetitem_str(self, key):
raise NotImplementedError("abstract")
def _isset_int(self, index):
raise NotImplementedError("abstract")
def _isset_str(self, key):
raise NotImplementedError("abstract")
def arraylen(self):
raise NotImplementedError("abstract")
def as_rdict(self):
raise NotImplementedError("abstract")
def _each(self, space):
raise NotImplementedError("abstract")
def _current(self, space):
raise NotImplementedError("abstract")
def _key(self, space):
raise NotImplementedError("abstract")
def _inplace_pop(self, space):
raise NotImplementedError("abstract")
def get_rdict_from_array(self):
raise NotImplementedError("abstract")
def as_dict(self):
"NOT_RPYTHON: for tests only"
return self.as_rdict()
def var_dump(self, space, indent, recursion):
return array_var_dump(self.as_rdict(), space, indent, recursion,
self, 'array')
def var_export(self, space, indent, recursion, suffix):
return array_var_export(self.as_rdict(), space, indent, recursion,
self, suffix)
def str(self, space, quiet=False):
if not quiet:
space.ec.notice("Array to string conversion")
return "Array"
def repr(self):
return "Array"
def dump(self):
items = []
next = 0
for key, w_value in self.as_rdict().items():
dumpvalue = w_value.dump()
try:
numkey = int(key)
except ValueError:
items.append('%s=>%s' % (key, dumpvalue))
continue
if numkey == next:
items.append(dumpvalue)
else:
items.append('%d=>%s' % (numkey, dumpvalue))
next = numkey + 1
return "array(%s)" % ', '.join(items)
def as_pair_list(self, space):
pairs = []
with space.iter(self) as w_iter:
while not w_iter.done():
w_key, w_val = w_iter.next_item(space)
pairs.append((w_key, w_val))
return pairs
def eval_static(self, space):
return self
def serialize(self, space, builder, memo):
array_serialize(self, space, builder, memo)
return False # counted in array_serialize
def add(self, space, other_array):
assert isinstance(other_array, W_ArrayObject)
d = self.as_rdict()
for k, w_v in other_array.as_rdict().iteritems():
if k not in d:
d[k] = w_v
return space.new_array_from_rdict(d)
class ListItemVRef(VirtualReference):
def __init__(self, w_array, index):
assert isinstance(w_array, W_ListArrayObject)
self.w_array = w_array
self.index = index
def deref(self):
return self.w_array.lst_w[self.index]
def store(self, w_value, unique=False):
self.w_array.lst_w[self.index] = w_value
def __repr__(self):
return '<ListItemVRef>'
class W_ListArrayObject(W_ArrayObject):
_has_string_keys = False
def __init__(self, space, lst_w, current_idx=0):
self.space = space
self.lst_w = lst_w
self.current_idx = current_idx
def as_unique_arraylist(self):
self._note_making_a_copy()
lst_w = [item.copy_item() for item in self.lst_w]
return W_ListArrayObject(self.space, lst_w, current_idx=self.current_idx)
def as_list_w(self):
return self.lst_w[:]
def as_unique_arraydict(self):
self._note_making_a_copy()
d = self.as_rdict() # make a fresh dictionary
return W_RDictArrayObject(self.space, d,
next_idx=len(self.lst_w),
current_idx=self.current_idx)
def arraylen(self):
return len(self.lst_w)
def as_rdict(self):
d = new_rdict()
for i in range(len(self.lst_w)):
d[str(i)] = self.lst_w[i].copy_item()
return d
def get_rdict_from_array(self):
return self.as_rdict()
def _current(self, space):
index = self.current_idx
if 0 <= index < len(self.lst_w):
return self.lst_w[index]
else:
return space.w_False
def _key(self, space):
index = self.current_idx
if 0 <= index < len(self.lst_w):
return space.newint(index)
else:
return space.w_Null
def _getitem_int(self, index):
if index >= 0:
try:
res = self.lst_w[index]
except IndexError:
pass
else:
if isinstance(res, W_Reference):
return res
else:
return ListItemVRef(self, index)
return None
def _getitem_str(self, key):
try:
i = try_convert_str_to_int(key)
except ValueError:
return None
return self._getitem_int(i)
def _appenditem(self, w_obj, as_ref=False):
self.lst_w.append(w_obj)
def _setitem_int(self, index, w_value, as_ref, unique_item=False):
length = self.arraylen()
if index >= length:
if index > length:
return self._convert_and_setitem_int(index, w_value)
self.lst_w.append(w_value)
return self
#
if index < 0:
return self._convert_and_setitem_int(index, w_value)
#
# If overwriting an existing W_Reference object, we only update
# the value in the reference. Else we need to update 'lst_w'.
if not as_ref:
w_old = self.lst_w[index]
if isinstance(w_old, W_Reference):
w_old.store(w_value, unique_item)
return self
self.lst_w[index] = w_value
return self
def _setitem_str(self, key, w_value, as_ref,
unique_array=False, unique_item=False):
try:
i = try_convert_str_to_int(key)
except ValueError:
return self._convert_and_setitem_str(key, w_value)
else:
return self._setitem_int(i, w_value, as_ref, unique_item)
def _convert_and_setitem_int(self, index, w_value):
res = self.as_unique_arraydict()
return res._setitem_int(index, w_value, False)
def _convert_and_setitem_str(self, key, w_value):
res = self.as_unique_arraydict()
return res._setitem_str(key, w_value, False)
def _unsetitem_int(self, index):
if index < 0 or index >= self.arraylen():
return self
if index == self.arraylen() - 1:
del self.lst_w[index]
if self.current_idx > len(self.lst_w):
self.current_idx = len(self.lst_w)
return self
else:
return self.as_unique_arraydict()._unsetitem_int(index)
def _unsetitem_str(self, key):
try:
i = try_convert_str_to_int(key)
except ValueError:
return self # str key, so not in the array at all
else:
return self._unsetitem_int(i)
def _isset_int(self, index):
return 0 <= index < self.arraylen()
def _isset_str(self, key):
try:
i = try_convert_str_to_int(key)
except ValueError:
return False
else:
return self._isset_int(i)
def create_iter(self, space, contextclass=None):
from hippy.objects.arrayiter import W_ListArrayIterator
return W_ListArrayIterator(self.lst_w)
def create_iter_ref(self, space, r_self, contextclass=None):
from hippy.objects.arrayiter import ListArrayIteratorRef
return ListArrayIteratorRef(space, r_self)
def copy(self):
return self.as_unique_arraylist()
def _inplace_pop(self, space):
self.current_idx = 0
return self.lst_w.pop()
def _values(self, space):
return self.lst_w
class DictItemVRef(VirtualReference):
def __init__(self, w_array, index):
self.w_array = w_array
self.index = index
def deref(self):
return self.w_array.dct_w[self.index]
def store(self, w_value, unique=False):
self.w_array.dct_w[self.index] = w_value
class W_RDictArrayObject(W_ArrayObject):
_has_string_keys = True
strategy_name = 'hash'
_keylist = None
def __init__(self, space, dct_w, next_idx, current_idx=0):
if not we_are_translated():
assert isinstance(dct_w, OrderedDict)
self.space = space
self.dct_w = dct_w
self.next_idx = next_idx
self.current_idx = current_idx
def as_rdict(self):
new_dict = OrderedDict()
for key, w_value in self.dct_w.iteritems():
new_dict[key] = w_value.copy_item()
return new_dict
def get_rdict_from_array(self):
return self.dct_w.copy()
def as_unique_arraydict(self):
self._note_making_a_copy()
return W_RDictArrayObject(self.space, self.as_rdict(),
next_idx=self.next_idx,
current_idx=self.current_idx)
def as_list_w(self):
return self.dct_w.values()
def _getkeylist(self):
keylist = self._keylist
if keylist is None:
keylist = self.dct_w.keys()
self._keylist = keylist
return keylist
def _keylist_changed(self):
self._keylist = None
def _current(self, space):
keylist = self._getkeylist()
index = self.current_idx
if 0 <= index < len(keylist):
return self.dct_w[keylist[index]]
else:
return space.w_False
def _key(self, space):
keylist = self._getkeylist()
index = self.current_idx
if 0 <= index < len(keylist):
return wrap_array_key(space, keylist[index])
else:
return space.w_Null
def arraylen(self):
return len(self.dct_w)
def _getitem_int(self, index):
return self._getitem_str(str(index))
def _getitem_str(self, key):
try:
res = self.dct_w[key]
except KeyError:
return None
if isinstance(res, W_Reference):
return res
else:
return DictItemVRef(self, key)
def _appenditem(self, w_obj, as_ref=False):
res = self._setitem_int(self.next_idx, w_obj, as_ref)
assert res is self
def _setitem_int(self, index, w_value, as_ref, unique_item=False):
return self._setitem_str(str(index), w_value, as_ref, unique_item)
def _setitem_str(self, key, w_value, as_ref,
unique_array=False, unique_item=False):
# If overwriting an existing W_Reference object, we only update
# the value in the reference and return 'self'.
if not as_ref:
try:
w_old = self.dct_w[key]
except KeyError:
w_old = None
if isinstance(w_old, W_Reference): # and is not None
w_old.store(w_value, unique_item)
return self
# Else update the 'dct_w'.
if self._keylist is not None and key not in self.dct_w:
self._keylist_changed()
self.dct_w[key] = w_value
# Blah
try:
i = try_convert_str_to_int(key)
except ValueError:
pass
else:
if self.next_idx <= i:
self.next_idx = i + 1
return self
def _unsetitem_int(self, index):
return self._unsetitem_str(str(index))
def _unsetitem_str(self, key):
if key not in self.dct_w:
return self
# XXX slow hacks to know if we must decrement current_idx or not:
# this is if and only if the removed item is before current_idx.
current_idx = self.current_idx
if current_idx > 0:
keylist = self._getkeylist()
length = len(self.dct_w)
if current_idx <= length // 2:
# look on the left of current_idx
for i in range(current_idx):
if keylist[i] == key:
# found: decrement current_idx
self.current_idx = current_idx - 1
break
else:
# look on the right of current_idx
for i in range(current_idx, length):
if keylist[i] == key:
# found: don't decrement current_idx
break
else:
# not found: decrement current_idx
self.current_idx = current_idx - 1
#
del self.dct_w[key]
self._keylist_changed()
return self
def _isset_int(self, index):
return self._isset_str(str(index))
def _isset_str(self, key):
return key in self.dct_w
def create_iter(self, space, contextclass=None):
from hippy.objects.arrayiter import W_RDictArrayIterator
return W_RDictArrayIterator(self.dct_w)
def create_iter_ref(self, space, r_self, contextclass=None):
from hippy.objects.arrayiter import RDictArrayIteratorRef
return RDictArrayIteratorRef(space, r_self)
def copy(self):
return self.as_unique_arraydict()
def _inplace_pop(self, space):
key, w_value = self.dct_w.popitem()
self._keylist_changed()
if key == str(self.next_idx - 1):
self.next_idx -= 1
self.current_idx = 0
return w_value
def _values(self, space):
return self.dct_w.values()
def var_export(self, space, indent, recursion, suffix):
return array_var_export(self.as_rdict(), space, indent, recursion,
self, suffix)
def array_var_dump(dct_w, space, indent, recursion, w_reckey, header):
if w_reckey in recursion:
return '%s*RECURSION*\n' % indent
res = StringBuilder()
recursion[w_reckey] = None
res.append('%s%s(%d) {\n' % (indent, header, len(dct_w)))
if indent.endswith('&'):
indent = indent[:-1]
subindent = indent + ' '
for key, w_value in dct_w.iteritems():
try:
index = try_convert_str_to_int(key)
except ValueError:
s = '%s["%s"]=>\n' % (subindent, key)
else:
s = '%s[%d]=>\n' % (subindent, index)
res.append(s)
res.append(w_value.var_dump(space, subindent, recursion))
res.append('%s}\n' % indent)
del recursion[w_reckey]
return res.build()
def array_var_export(dct_w, space, indent, recursion, w_reckey,
header, prefix=' ', suffix='', arr_in_arr=False):
acc = StringBuilder()
if w_reckey in recursion:
return '%s*RECURSION*\n' % indent
recursion[w_reckey] = None
if arr_in_arr:
acc.append('%s%s%s(\n' % (' ', header, prefix))
else:
acc.append('%s%s%s(\n' % (indent, header, prefix))
if indent.endswith('&'):
indent = indent[:-1]
subindent = indent + ' '
for key, w_value in dct_w.iteritems():
w_value = w_value.deref_temp()
# case where atrrib is protected...
if key.startswith('\x00') and len(key) > 1:
key = key[3:]
# case where key is \x00 .....
if key == '\x00':
key = '\' . \"\\0\" . \''
if w_value is w_reckey:
# space.ec.error("Nesting level too deep - recursive dependency?")
space.ec.warn("var_export does not handle circular references")
return ""
try:
index = try_convert_str_to_int(key)
s = '%s%d =>' % (subindent, index)
except ValueError:
s = '%s\'%s\' =>' % (subindent, key)
acc.append(s)
if isinstance(w_value, W_ArrayObject):
acc.append(array_var_export(w_value.as_rdict(), space,
' ', recursion, w_value,
'\n array', suffix=',', arr_in_arr=True))
elif w_value.tp == space.tp_object:
acc.append('\n')
acc.append(w_value.var_export(space, ' ', recursion, suffix='),'))
else:
acc.append(w_value.var_export(space, ' ', recursion, suffix=','))
acc.append('\n')
acc.append('%s)%s' % (indent, suffix))
del recursion[w_reckey]
return acc.build()
def array_serialize(w_arr, space, builder, memo):
builder.append("a:%d:{" % w_arr.arraylen())
memo.add_counter()
with space.iter(w_arr) as itr:
while not itr.done():
w_key, w_value = itr.next_item(space)
w_key.serialize(space, builder, memo)
if w_value.serialize(space, builder, memo):
memo.add_counter()
builder.append("}")
|
|
# -*- coding: utf-8 -*-
import copy
import json
import re
import unittest
from django.contrib import admin
from django.contrib.auth import get_permission_codename
from django.contrib.auth.models import Permission
from django.template import RequestContext
from django.utils.encoding import force_text
from django.utils.html import escape
from django.utils.http import urlencode, urlunquote
from cms.api import add_plugin, create_page, create_title
from cms.models import CMSPlugin, Page, Title
from cms.utils.urlutils import admin_reverse
from djangocms_text_ckeditor.cms_plugins import TextPlugin
from djangocms_text_ckeditor.compat import get_page_placeholders
from djangocms_text_ckeditor.models import Text
from djangocms_text_ckeditor.utils import (
_plugin_tags_to_html, _render_cms_plugin, plugin_tags_to_admin_html,
plugin_tags_to_id_list, plugin_to_tag,
)
from .base import BaseTestCase
try:
from djangocms_transfer.exporter import export_page
HAS_DJANGOCMS_TRANSFER = True
except ImportError:
HAS_DJANGOCMS_TRANSFER = False
try:
import djangocms_translations # noqa
HAS_DJANGOCMS_TRANSLATIONS = True
except ImportError:
HAS_DJANGOCMS_TRANSLATIONS = False
class PluginActionsTestCase(BaseTestCase):
def get_custom_admin_url(self, plugin_class, name):
plugin_type = plugin_class.__name__.lower()
url_name = '%s_%s_%s' % (plugin_class.model._meta.app_label, plugin_type, name)
return admin_reverse(url_name)
def _add_child_plugin(self, text_plugin, plugin_type='PicturePlugin', data_suffix=None):
name = '{} record'.format(plugin_type)
if data_suffix is not None:
name = '{} {}'.format(name, data_suffix)
basic_plugins = {
'LinkPlugin': {
'name': name,
'external_link': 'https://www.django-cms.org',
},
'PreviewDisabledPlugin': {},
'SekizaiPlugin': {},
}
if plugin_type == 'PicturePlugin':
data = {'caption_text': name, 'picture': self.create_filer_image_object()}
else:
data = basic_plugins[plugin_type]
plugin = add_plugin(
text_plugin.placeholder,
plugin_type,
'en',
target=text_plugin,
**data
)
return plugin
def _add_text_plugin(self, placeholder, plugin_type='TextPlugin'):
text_plugin = add_plugin(
placeholder,
plugin_type,
'en',
body='Hello World',
)
return text_plugin
def _replace_plugin_contents(self, text, new_plugin_content):
def _do_replace(obj, match):
return plugin_to_tag(obj, content=new_plugin_content)
return _plugin_tags_to_html(text, output_func=_do_replace)
def add_plugin_to_text(self, text_plugin, plugin):
text_plugin.body = '%s %s' % (text_plugin.body, plugin_to_tag(plugin))
text_plugin.save()
return text_plugin
def _give_permission(self, user, model, permission_type, save=True):
codename = get_permission_codename(permission_type, model._meta)
user.user_permissions.add(Permission.objects.get(codename=codename))
def _give_cms_permissions(self, user):
for perm_type in ['add', 'change', 'delete']:
for model in [Page, Title]:
self._give_permission(user, model, perm_type)
def get_page_admin(self):
admin.autodiscover()
return admin.site._registry[Page]
def get_post_request(self, data):
return self.get_request(post_data=data)
def get_plugin_id_from_response(self, response):
url = urlunquote(response.url)
# Ideal case, this looks like:
# /en/admin/cms/page/edit-plugin/1/
return re.findall(r'\d+', url)[0]
def test_add_and_edit_plugin(self):
"""
Test that you can add a text plugin
"""
admin = self.get_superuser()
simple_page = create_page('test page', 'page.html', u'en')
simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content')
endpoint = self.get_add_plugin_uri(simple_placeholder, 'TextPlugin')
with self.login_user_context(admin):
response = self.client.get(endpoint)
text_plugin_pk = self.get_plugin_id_from_response(response)
self.assertIn('?delete-on-cancel', response.url)
self.assertEqual(response.status_code, 302)
# Assert "ghost" plugin has been created
self.assertObjectExist(CMSPlugin.objects.all(), pk=text_plugin_pk)
cms_plugin = CMSPlugin.objects.get(pk=text_plugin_pk)
text_plugin_class = cms_plugin.get_plugin_class_instance()
# Assert "real" plugin has not been created yet
self.assertObjectDoesNotExist(Text.objects.all(), pk=text_plugin_pk)
add_url = response.url
with self.login_user_context(admin):
request = self.get_request()
action_token = text_plugin_class.get_action_token(request, cms_plugin)
response = self.client.get(add_url)
self.assertEqual(response.status_code, 200)
# Assert cancel token is present
self.assertContains(response, action_token)
with self.login_user_context(admin):
data = {'body': 'Hello world'}
response = self.client.post(add_url, data)
self.assertEqual(response.status_code, 200)
# Assert "real" plugin has been created yet
self.assertObjectExist(Text.objects.all(), pk=text_plugin_pk)
text_plugin = Text.objects.get(pk=text_plugin_pk)
# Assert the text was correctly saved
self.assertEqual(text_plugin.body, 'Hello world')
def test_add_and_cancel_plugin(self):
"""
Test that you can add a text plugin
"""
simple_page = create_page('test page', 'page.html', u'en')
simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content')
endpoint = self.get_add_plugin_uri(simple_placeholder, 'TextPlugin')
with self.login_user_context(self.get_superuser()):
response = self.client.get(endpoint)
self.assertEqual(response.status_code, 302)
# Point to the newly created text plugin
text_plugin_pk = self.get_plugin_id_from_response(response)
cms_plugin = CMSPlugin.objects.get(pk=text_plugin_pk)
text_plugin_class = cms_plugin.get_plugin_class_instance()
# Assert "ghost" plugin has been created
self.assertObjectExist(CMSPlugin.objects.all(), pk=text_plugin_pk)
with self.login_user_context(self.get_superuser()):
request = self.get_request()
action_token = text_plugin_class.get_action_token(request, cms_plugin)
data = {'token': action_token}
request = self.get_post_request(data)
response = text_plugin_class.delete_on_cancel(request)
self.assertEqual(response.status_code, 204)
# Assert "ghost" plugin has been removed
self.assertObjectDoesNotExist(CMSPlugin.objects.all(), pk=text_plugin_pk)
# Assert "real" plugin was never created
self.assertObjectDoesNotExist(Text.objects.all(), pk=text_plugin_pk)
# Assert user can't delete a non "ghost" plugin
text_plugin = add_plugin(
simple_placeholder,
'TextPlugin',
'en',
body="I'm the first",
)
with self.login_user_context(self.get_superuser()):
request = self.get_request()
action_token = text_plugin_class.get_action_token(request, text_plugin)
data = {'token': action_token}
request = self.get_post_request(data)
response = text_plugin_class.delete_on_cancel(request)
self.assertEqual(response.status_code, 400)
def test_add_and_cancel_child_plugin(self):
"""
Test that you can add a text plugin
"""
admin = self.get_superuser()
simple_page = create_page('test page', 'page.html', u'en')
simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content')
text_plugin = add_plugin(
simple_placeholder,
'TextPlugin',
'en',
body="I'm the first",
)
text_plugin_class = text_plugin.get_plugin_class_instance()
child_plugin_1 = add_plugin(
simple_placeholder,
'PicturePlugin',
'en',
target=text_plugin,
picture=self.create_filer_image_object(),
caption_text='Foo',
)
child_plugin_2 = add_plugin(
simple_placeholder,
'PicturePlugin',
'en',
target=text_plugin,
picture=self.create_filer_image_object(),
caption_text='Foo',
)
child_plugin_3 = add_plugin(
simple_placeholder,
'PicturePlugin',
'en',
target=text_plugin,
picture=self.create_filer_image_object(),
caption_text='Foo',
)
child_plugin_4 = add_plugin(
simple_placeholder,
'PicturePlugin',
'en',
target=text_plugin,
picture=self.create_filer_image_object(),
caption_text='Foo',
)
text_plugin = self.add_plugin_to_text(text_plugin, child_plugin_1)
text_plugin = self.add_plugin_to_text(text_plugin, child_plugin_4)
with self.login_user_context(admin):
request = self.get_request()
action_token = text_plugin_class.get_action_token(request, text_plugin)
# Assert user is unable to delete a saved child plugin
data = {'token': action_token, 'child_plugins': [child_plugin_1.pk]}
request = self.get_post_request(data)
response = text_plugin_class.delete_on_cancel(request)
self.assertEqual(response.status_code, 400)
self.assertObjectExist(CMSPlugin.objects.all(), pk=child_plugin_1.pk)
# Assert user is unable to delete if plugins array contains
# an unsaved plugin.
plugin_ids = [
child_plugin_1.pk,
child_plugin_2.pk,
child_plugin_3.pk,
child_plugin_4.pk,
]
data = {'token': action_token, 'child_plugins': plugin_ids}
request = self.get_post_request(data)
response = text_plugin_class.delete_on_cancel(request)
self.assertEqual(response.status_code, 400)
self.assertObjectExist(CMSPlugin.objects.all(), pk=child_plugin_1.pk)
self.assertObjectExist(CMSPlugin.objects.all(), pk=child_plugin_2.pk)
self.assertObjectExist(CMSPlugin.objects.all(), pk=child_plugin_3.pk)
self.assertObjectExist(CMSPlugin.objects.all(), pk=child_plugin_4.pk)
plugin_ids = [
child_plugin_2.pk,
child_plugin_3.pk,
]
data = {'token': action_token, 'child_plugins': plugin_ids}
request = self.get_post_request(data)
response = text_plugin_class.delete_on_cancel(request)
self.assertEqual(response.status_code, 204)
self.assertObjectDoesNotExist(CMSPlugin.objects.all(), pk=child_plugin_2.pk)
self.assertObjectDoesNotExist(CMSPlugin.objects.all(), pk=child_plugin_3.pk)
def test_action_token_per_session(self):
# Assert that a cancel token for the same plugin
# is different per user session.
simple_page = create_page('test page', 'page.html', u'en')
simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content')
text_plugin = add_plugin(
simple_placeholder,
'TextPlugin',
'en',
body="I'm the first",
)
text_plugin_class = text_plugin.get_plugin_class_instance()
with self.login_user_context(self.get_superuser()):
request = self.get_request()
action_token_1 = text_plugin_class.get_action_token(request, text_plugin)
with self.login_user_context(self.get_superuser()):
request = self.get_request()
action_token_2 = text_plugin_class.get_action_token(request, text_plugin)
self.assertNotEqual(action_token_1, action_token_2)
def test_add_and_cancel_plugin_permissions(self):
simple_page = create_page('test page', 'page.html', u'en')
simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content')
endpoint = self.get_add_plugin_uri(simple_placeholder, 'TextPlugin')
with self.login_user_context(self.user):
response = self.client.post(endpoint, {})
self.assertEqual(response.status_code, 302)
# Point to the newly created text plugin
text_plugin_pk = self.get_plugin_id_from_response(response)
cms_plugin = CMSPlugin.objects.get(pk=text_plugin_pk)
text_plugin_class = cms_plugin.get_plugin_class_instance()
endpoint = self.get_custom_admin_url(TextPlugin, 'delete_on_cancel')
# Assert a standard user (no staff) can't delete ghost plugin
with self.login_user_context(self.get_standard_user()):
request = self.get_request()
action_token = text_plugin_class.get_action_token(request, cms_plugin)
data = {'token': action_token}
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 403)
staff_user = self._create_user('addonly-staff', is_staff=True, is_superuser=False)
self._give_cms_permissions(staff_user)
self._give_permission(staff_user, text_plugin_class.model, 'add')
with self.login_user_context(staff_user):
request = self.get_request()
action_token = text_plugin_class.get_action_token(request, cms_plugin)
data = {'token': action_token}
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 204)
def test_change_form_has_rendered_plugin_content(self):
"""
When the text form is rendered in the admin,
the child plugins are rendered as their contents passed
as initial data to the text field.
"""
simple_page = create_page('test page', 'page.html', u'en')
simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content')
text_plugin = add_plugin(
simple_placeholder,
'TextPlugin',
'en',
body="I'm the first",
)
child_plugins = [
self._add_child_plugin(text_plugin),
self._add_child_plugin(text_plugin),
]
for plugin in child_plugins:
text_plugin = self.add_plugin_to_text(text_plugin, plugin)
with self.login_user_context(self.get_superuser()):
request = self.get_request()
context = RequestContext(request)
context['request'] = request
text_with_rendered_plugins = plugin_tags_to_admin_html(
text=text_plugin.body,
context=context,
)
endpoint = self.get_change_plugin_uri(text_plugin)
response = self.client.get(endpoint)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.context['adminform'].form['body'].value(),
text_with_rendered_plugins,
)
self.assertContains(
response,
escape(text_with_rendered_plugins),
html=False,
)
def test_user_cant_edit_child_plugins_directly(self):
"""
No user regardless of permissions can modify the contents
of a child plugin directly in the text plugin text.
"""
simple_page = create_page('test page', 'page.html', u'en')
simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content')
text_plugin = add_plugin(
simple_placeholder,
'TextPlugin',
'en',
body="I'm the first",
)
child_plugins = [
self._add_child_plugin(text_plugin),
self._add_child_plugin(text_plugin),
]
for plugin in child_plugins:
text_plugin = self.add_plugin_to_text(text_plugin, plugin)
with self.login_user_context(self.get_superuser()):
expected_text = text_plugin.body
# This returns the child plugins with their content
# overridden to <img src="">
overridden_text = self._replace_plugin_contents(
text_plugin.body,
new_plugin_content='<img src="">',
)
endpoint = self.get_change_plugin_uri(text_plugin)
response = self.client.post(endpoint, {'body': overridden_text})
text_plugin.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertXMLEqual(text_plugin.body, expected_text)
def test_render_child_plugin_endpoint(self):
simple_page = create_page('test page', 'page.html', u'en')
simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content')
text_plugin = add_plugin(
simple_placeholder,
'TextPlugin',
'en',
body="I'm the first",
)
text_plugin_class = text_plugin.get_plugin_class_instance()
child_plugin = self._add_child_plugin(text_plugin)
text_plugin = self.add_plugin_to_text(text_plugin, child_plugin)
with self.login_user_context(self.get_superuser()):
request = self.get_request()
action_token = text_plugin_class.get_action_token(request, text_plugin)
endpoint = self.get_custom_admin_url(TextPlugin, 'render_plugin')
endpoint += '?token={}&plugin={}'.format(action_token, child_plugin.pk)
response = self.client.get(endpoint)
self.assertEqual(response.status_code, 200)
context = RequestContext(request)
context['request'] = request
rendered_content = _render_cms_plugin(child_plugin, context)
rendered_child_plugin = plugin_to_tag(
child_plugin,
content=rendered_content,
admin=True,
)
self.assertEqual(force_text(response.content), rendered_child_plugin)
child_plugin = self._add_child_plugin(text_plugin, plugin_type='PreviewDisabledPlugin')
text_plugin = self.add_plugin_to_text(text_plugin, child_plugin)
with self.login_user_context(self.get_superuser()):
request = self.get_request()
action_token = text_plugin_class.get_action_token(request, text_plugin)
endpoint = self.get_custom_admin_url(TextPlugin, 'render_plugin')
endpoint += '?token={}&plugin={}'.format(action_token, child_plugin.pk)
response = self.client.get(endpoint)
self.assertEqual(response.status_code, 200)
# it is important that we do not add any extra whitespace inside of
# <cms-plugin></cms-plugin>
rendered_child_plugin = ('<cms-plugin render-plugin=false '
'alt="Preview Disabled Plugin - 3 '
'"title="Preview Disabled Plugin - 3" '
'id="3"><span>Preview is disabled for this plugin</span>'
'</cms-plugin>')
self.assertEqual(force_text(response.content), rendered_child_plugin)
def test_render_child_plugin_endpoint_calls_context_processors(self):
simple_page = create_page('test page', 'page.html', u'en')
simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content')
text_plugin = add_plugin(
simple_placeholder,
'TextPlugin',
'en',
body="I'm the first",
)
text_plugin_class = text_plugin.get_plugin_class_instance()
child_plugin = self._add_child_plugin(
text_plugin,
plugin_type='SekizaiPlugin',
)
text_plugin = self.add_plugin_to_text(text_plugin, child_plugin)
with self.login_user_context(self.get_superuser()):
request = self.get_request()
action_token = text_plugin_class.get_action_token(request, text_plugin)
endpoint = self.get_custom_admin_url(TextPlugin, 'render_plugin')
endpoint += '?token={}&plugin={}'.format(action_token, child_plugin.pk)
response = self.client.get(endpoint)
self.assertEqual(response.status_code, 200)
context = RequestContext(request)
context['request'] = request
rendered_content = _render_cms_plugin(child_plugin, context)
rendered_child_plugin = plugin_to_tag(
child_plugin,
content=rendered_content,
admin=True,
)
self.assertEqual(force_text(response.content), rendered_child_plugin)
def test_render_child_plugin_permissions(self):
"""
Users can't render a child plugin without change permissions
on the placeholder attached object and the text plugin.
"""
simple_page = create_page('test page', 'page.html', u'en')
simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content')
text_plugin = add_plugin(
simple_placeholder,
'TextPlugin',
'en',
body="I'm the first",
)
text_plugin_class = text_plugin.get_plugin_class_instance()
child_plugin = self._add_child_plugin(text_plugin)
text_plugin = self.add_plugin_to_text(text_plugin, child_plugin)
with self.login_user_context(self.get_standard_user()):
request = self.get_request()
action_token = text_plugin_class.get_action_token(request, text_plugin)
endpoint = self.get_custom_admin_url(TextPlugin, 'render_plugin')
endpoint += '?token={}&plugin={}'.format(action_token, child_plugin.pk)
response = self.client.get(endpoint)
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403, html=True)
def test_render_child_plugin_token_validation(self):
"""
Users can only render a child plugin if the token
was created in the current session and it's text plugin
matches the child plugin parent.
"""
simple_page = create_page('test page', 'page.html', u'en')
simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content')
text_plugin = add_plugin(
simple_placeholder,
'TextPlugin',
'en',
body="I'm the first",
)
text_plugin_class = text_plugin.get_plugin_class_instance()
child_plugin = self._add_child_plugin(text_plugin)
text_plugin = self.add_plugin_to_text(text_plugin, child_plugin)
# Tokens are unique per session.
# Users can't render a child plugin with a token
# from another session.
with self.login_user_context(self.get_superuser()):
request = self.get_request()
with self.login_user_context(self.get_superuser()):
action_token = text_plugin_class.get_action_token(request, text_plugin)
endpoint = self.get_custom_admin_url(TextPlugin, 'render_plugin')
endpoint += '?token={}&plugin={}'.format(action_token, child_plugin.pk)
response = self.client.get(endpoint)
self.assertEqual(response.status_code, 400)
self.assertEqual(force_text(response.content), 'Unable to process your request. Invalid token.')
text_plugin_2 = add_plugin(
simple_placeholder,
'TextPlugin',
'en',
body="I'm the second",
)
# Tokens are unique per text plugin.
# User can't render a child plugin for a token whose text plugin
# does not match the plugin's parent.
with self.login_user_context(self.get_superuser()):
request = self.get_request()
action_token = text_plugin_class.get_action_token(request, text_plugin_2)
endpoint = self.get_custom_admin_url(TextPlugin, 'render_plugin')
endpoint += '?token={}&plugin={}'.format(action_token, child_plugin.pk)
response = self.client.get(endpoint)
self.assertEqual(response.status_code, 400)
self.assertEqual(force_text(response.content), 'Unable to process your request.')
def test_render_plugin(self):
simple_page = create_page('test page', 'page.html', u'en')
simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content')
text_plugin = self._add_text_plugin(simple_placeholder)
for i in range(0, 10):
plugin = self._add_child_plugin(
text_plugin,
plugin_type='LinkPlugin',
data_suffix=i
)
text_plugin = self.add_plugin_to_text(text_plugin, plugin)
with self.assertNumQueries(2):
request = self.get_request()
context = RequestContext(request)
context['request'] = request
rendered = _render_cms_plugin(text_plugin, context)
for i in range(0, 10):
self.assertTrue('LinkPlugin record %d' % i in rendered)
def test_render_extended_plugin(self):
simple_page = create_page('test page', 'page.html', u'en')
simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content')
text_plugin = self._add_text_plugin(simple_placeholder, 'ExtendedTextPlugin')
for i in range(0, 10):
plugin = self._add_child_plugin(
text_plugin,
plugin_type='LinkPlugin',
data_suffix=i
)
text_plugin = self.add_plugin_to_text(text_plugin, plugin)
with self.assertNumQueries(2):
request = self.get_request()
context = RequestContext(request)
context['request'] = request
rendered = _render_cms_plugin(text_plugin, context)
for i in range(0, 10):
self.assertTrue('LinkPlugin record %d' % i in rendered)
def test_copy_plugin_integrity(self):
"""
Test that copying of textplugins replaces references to copied plugins
"""
simple_page = create_page('test page', 'page.html', u'en')
simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content')
text_plugin = self._add_text_plugin(simple_placeholder)
child_plugin_1 = self._add_child_plugin(
text_plugin,
plugin_type='LinkPlugin',
)
text_plugin = self.add_plugin_to_text(text_plugin, child_plugin_1)
child_plugin_2 = self._add_child_plugin(
text_plugin,
plugin_type='LinkPlugin',
)
text_plugin = self.add_plugin_to_text(text_plugin, child_plugin_2)
# create a page translation to copy plugins to
translation = create_title(
'fr',
'test-page-fr',
simple_page,
slug='test-page-fr'
)
self.assertEqual(CMSPlugin.objects.filter(language='en').count(), 3)
self.assertEqual(CMSPlugin.objects.filter(language=translation.language).count(), 0)
data = {
'source_placeholder_id': simple_placeholder.pk,
'target_placeholder_id': simple_placeholder.pk,
'target_language': translation.language,
'source_language': 'en',
}
endpoint = self.get_admin_url(Page, 'copy_plugins')
endpoint += '?' + urlencode({'cms_path': '/en/'})
with self.login_user_context(self.user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(CMSPlugin.objects.filter(language='en').count(), 3)
self.assertEqual(CMSPlugin.objects.filter(language=translation.language).count(), 3)
plugins = list(CMSPlugin.objects.all())
new_plugin = plugins[3].get_plugin_instance()[0]
idlist = sorted(plugin_tags_to_id_list(new_plugin.body))
expected = sorted([plugins[4].pk, plugins[5].pk])
self.assertEqual(idlist, expected)
def test_copy_plugin_callback(self):
simple_page = create_page('test page', 'page.html', u'en')
simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content')
text_plugin_1 = self._add_text_plugin(simple_placeholder)
child_plugin_1_a = self._add_child_plugin(
text_plugin_1,
plugin_type='LinkPlugin',
)
text_plugin_1 = self.add_plugin_to_text(text_plugin_1, child_plugin_1_a)
child_plugin_1_b = self._add_child_plugin(
text_plugin_1,
plugin_type='LinkPlugin',
)
text_plugin_1 = self.add_plugin_to_text(text_plugin_1, child_plugin_1_b)
text_plugin_2 = copy.copy(text_plugin_1)
text_plugin_2.pk = None
text_plugin_2.save()
child_plugin_2_a = self._add_child_plugin(
text_plugin_2,
plugin_type='LinkPlugin',
)
child_plugin_2_b = self._add_child_plugin(
text_plugin_2,
plugin_type='LinkPlugin',
)
source_map = {
child_plugin_1_a.pk: child_plugin_2_a,
child_plugin_1_b.pk: child_plugin_2_b,
}
TextPlugin.do_post_copy(text_plugin_2, source_map)
text_plugin_2.refresh_from_db()
idlist = sorted(plugin_tags_to_id_list(text_plugin_2.body))
expected = sorted([child_plugin_2_a.pk, child_plugin_2_b.pk])
self.assertEqual(idlist, expected)
def test_plugin_tags_to_id_list(self):
pairs = (
('<cms-plugin id="1"></cms-plugin><cms-plugin id="2"></cms-plugin>', [1, 2]),
('<cms-plugin alt="<h1>markup</h1>" id="1"></cms-plugin><cms-plugin id="1"></cms-plugin>', [1, 1]),
)
for markup, expected in pairs:
self.assertEqual(plugin_tags_to_id_list(markup), expected)
def test_text_plugin_xss(self):
page = create_page('test page', 'page.html', u'en')
placeholder = get_page_placeholders(page, 'en').get(slot='content')
plugin = add_plugin(placeholder, 'TextPlugin', 'en', body='body')
endpoint = self.get_change_plugin_uri(plugin)
with self.login_user_context(self.user):
data = {
'body': (
'<div onload="do_evil_stuff();">divcontent</div><a href="javascript:do_evil_stuff();">acontent</a>'
)
}
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.reload(plugin).body, '<div>divcontent</div><a>acontent</a>')
@unittest.skipUnless(
HAS_DJANGOCMS_TRANSLATIONS and HAS_DJANGOCMS_TRANSFER,
'Optional dependencies for tests are not installed.'
)
class DjangoCMSTranslationsIntegrationTestCase(BaseTestCase):
def setUp(self):
super(DjangoCMSTranslationsIntegrationTestCase, self).setUp()
self.page = create_page('test page', 'page.html', 'en', published=True)
self.placeholder = get_page_placeholders(self.page, 'en').get(slot='content')
def _export_page(self):
return json.loads(export_page(self.page, 'en'))
def test_textfield_without_children(self):
raw_content = '<p>Please <a href="http://www.google.com">CLICK ON LINK1</a> to go to link1.</p>'
add_plugin(self.placeholder, 'TextPlugin', 'en', body=raw_content)
plugin = self._export_page()[0]['plugins'][0]
result, children_included_in_this_content = TextPlugin.get_translation_export_content('body', plugin['data'])
self.assertEquals(result, raw_content)
self.assertEquals(children_included_in_this_content, [])
result = TextPlugin.set_translation_import_content(result, plugin)
self.assertDictEqual(result, {})
def test_textfield_with_children(self):
parent = add_plugin(self.placeholder, 'TextPlugin', 'en', body='')
child1 = add_plugin(self.placeholder, 'DummyLinkPlugin', 'en', target=parent, label='CLICK ON LINK1')
parent_body = (
'<p>Please <cms-plugin alt="Dummy Link Plugin - dummy link object "'
'title="Dummy Link Plugin - dummy link object" id="{}"></cms-plugin> to go to link1.</p>'
).format(child1.pk)
parent.body = parent_body
parent.save()
plugin = self._export_page()[0]['plugins'][0]
result, children_included_in_this_content = TextPlugin.get_translation_export_content('body', plugin['data'])
expected = (
parent_body
.replace('></cms-plugin>', '>CLICK ON LINK1</cms-plugin>', 1)
)
self.assertEquals(result, expected)
self.assertEquals(children_included_in_this_content, [child1.pk])
result = TextPlugin.set_translation_import_content(result, plugin)
self.assertDictEqual(result, {child1.pk: 'CLICK ON LINK1'})
def test_textfield_with_multiple_children(self):
parent = add_plugin(self.placeholder, 'TextPlugin', 'en', body='')
child1 = add_plugin(self.placeholder, 'DummyLinkPlugin', 'en', target=parent, label='CLICK ON LINK1')
child2 = add_plugin(self.placeholder, 'DummyLinkPlugin', 'en', target=parent, label='CLICK ON LINK2')
parent_body = (
'<p>Please <cms-plugin alt="Dummy Link Plugin - dummy link object "'
'title="Dummy Link Plugin - dummy link object" id="{}"></cms-plugin> to go to link1 '
'or <cms-plugin alt="Dummy Link Plugin - dummy link object "'
'title="Dummy Link Plugin - dummy link object" id="{}"></cms-plugin> to go to link2.</p>'
).format(child1.pk, child2.pk)
parent.body = parent_body
parent.save()
plugin = self._export_page()[0]['plugins'][0]
result, children_included_in_this_content = TextPlugin.get_translation_export_content('body', plugin['data'])
expected = (
parent_body
.replace('></cms-plugin>', '>CLICK ON LINK1</cms-plugin>', 1)
.replace('></cms-plugin>', '>CLICK ON LINK2</cms-plugin>', 1)
)
self.assertEquals(result, expected)
self.assertEquals(children_included_in_this_content, [child1.pk, child2.pk])
result = TextPlugin.set_translation_import_content(result, plugin)
self.assertDictEqual(result, {child1.pk: 'CLICK ON LINK1', child2.pk: 'CLICK ON LINK2'})
def test_textfield_with_multiple_children_one_deleted(self):
parent = add_plugin(self.placeholder, 'TextPlugin', 'en', body='')
child1 = add_plugin(self.placeholder, 'DummyLinkPlugin', 'en', target=parent, label='CLICK ON LINK1')
child2 = add_plugin(self.placeholder, 'DummyLinkPlugin', 'en', target=parent, label='CLICK ON LINK2')
parent_body = (
'<p>Please <cms-plugin alt="Dummy Link Plugin - dummy link object "'
'title="Dummy Link Plugin - dummy link object" id="{}"></cms-plugin> to go to link1 '
'or <cms-plugin alt="Dummy Link Plugin - dummy link object "'
'title="Dummy Link Plugin - dummy link object" id="{}"></cms-plugin> to go to link2.</p>'
).format(child1.pk, child2.pk)
parent.body = parent_body
parent.save()
plugin = self._export_page()[0]['plugins'][0]
child1.delete()
result, children_included_in_this_content = TextPlugin.get_translation_export_content('body', plugin['data'])
expected = (
'<p>Please to go to link1 '
'or <cms-plugin alt="Dummy Link Plugin - dummy link object "'
'title="Dummy Link Plugin - dummy link object" id="{}">CLICK ON LINK2</cms-plugin> to go to link2.</p>'
).format(child2.pk)
self.assertEquals(result, expected)
self.assertEquals(children_included_in_this_content, [child2.pk])
result = TextPlugin.set_translation_import_content(result, plugin)
self.assertDictEqual(result, {child2.pk: 'CLICK ON LINK2'})
def test_textfield_with_untranslatable_children(self):
parent = add_plugin(self.placeholder, 'TextPlugin', 'en', body='')
child1 = add_plugin(self.placeholder, 'DummySpacerPlugin', 'en', target=parent)
parent_body = (
'<p>This is cool <cms-plugin alt="Dummy Spacer Plugin - dummy spacer object "'
'title="Dummy Spacer Plugin - dummy spacer object" id="{}"></cms-plugin> this is nice</p>'
).format(child1.pk)
parent.body = parent_body
parent.save()
plugin = self._export_page()[0]['plugins'][0]
result, children_included_in_this_content = TextPlugin.get_translation_export_content('body', plugin['data'])
expected = (
parent_body
)
self.assertEquals(result, expected)
self.assertEquals(children_included_in_this_content, [child1.pk])
result = TextPlugin.set_translation_import_content(result, plugin)
self.assertDictEqual(result, {child1.pk: ''})
|
|
# -*- coding: utf-8 -*-
"""
Django settings for taskist project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path('taskist')
env = environ.Env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
'rest_framework',
'djangular',
'polymorphic',
'easy_thumbnails',
'mptt'
)
# Apps specific for this project go here.
LOCAL_APPS = (
'taskist.users', # custom users app
# Your stuff: custom apps go here
'taskist.core',
'taskist.common',
'taskist.blog'
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'taskist.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""Dmitry Chekurov""", 'chedv13@gmail.com'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
# 'default': env.db("DATABASE_URL", default="postgres:///taskist"),
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'taskist',
'USER': 'taskist',
'PASSWORD': 'YNL7TCPrn5cvWkG',
'HOST': '', # Set to empty string for localhost.
'PORT': '', # Set to empty string for default.
}
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
str(APPS_DIR.path('static/js/templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder'
# 'djangular.finders.NamespacedAngularAppDirectoriesFinder'
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Your common stuff: Below this line define 3rd party library settings
REST_FRAMEWORK = {
'PAGINATE_BY': 10,
'PAGINATE_BY_PARAM': 'page_size',
'MAX_PAGINATE_BY': 100,
# # DRF v3.1+
# 'DEFAULT_PAGINATION_CLASS':
# 'rest_framework_json_api.pagination.PageNumberPagination',
# # older than DRF v3.1
# 'DEFAULT_PAGINATION_SERIALIZER_CLASS':
# 'rest_framework_json_api.pagination.PaginationSerializer',
# 'DEFAULT_PARSER_CLASSES': (
# 'rest_framework_json_api.parsers.JSONParser',
# 'rest_framework.parsers.FormParser',
# 'rest_framework.parsers.MultiPartParser'
# ),
# 'DEFAULT_RENDERER_CLASSES': (
# 'rest_framework_json_api.renderers.JSONRenderer',
# 'rest_framework.renderers.BrowsableAPIRenderer',
# ),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
),
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
)
}
THUMBNAIL_ALIASES = {
'': {
'size': (50, 50),
'autocrop': True,
'crop': 'smart',
'upscale': True
}
}
|
|
# Copyright 2011,2013 James McCauley
# Copyright 2008 (C) Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is derived from the packet library in NOX, which was
# developed by Nicira, Inc.
#======================================================================
#
# DHCP Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | op (1) | htype (1) | hlen (1) | hops (1) |
# +---------------+---------------+---------------+---------------+
# | xid (4) |
# +-------------------------------+-------------------------------+
# | secs (2) | flags (2) |
# +-------------------------------+-------------------------------+
# | ciaddr (4) |
# +---------------------------------------------------------------+
# | yiaddr (4) |
# +---------------------------------------------------------------+
# | siaddr (4) |
# +---------------------------------------------------------------+
# | giaddr (4) |
# +---------------------------------------------------------------+
# | |
# | chaddr (16) |
# | |
# | |
# +---------------------------------------------------------------+
# | |
# | sname (64) |
# +---------------------------------------------------------------+
# | |
# | file (128) |
# +---------------------------------------------------------------+
# | |
# | options (variable) |
# +---------------------------------------------------------------+
#
#======================================================================
import struct
import string
from packet_utils import *
from packet_base import packet_base
import pox.lib.util as util
from pox.lib.addresses import *
_dhcp_option_unpackers = {}
class dhcp(packet_base):
"DHCP Packet struct"
STRUCT_BOUNDARY = 28
MIN_LEN = 240
SERVER_PORT = 67
CLIENT_PORT = 68
BROADCAST_FLAG = 0x8000
BOOTREQUEST = 1
BOOTREPLY = 2
MSG_TYPE_OPT = 53
NUM_MSG_TYPES = 8
DISCOVER_MSG = 1
OFFER_MSG = 2
REQUEST_MSG = 3
DECLINE_MSG = 4
ACK_MSG = 5
NAK_MSG = 6
RELEASE_MSG = 7
INFORM_MSG = 8
SUBNET_MASK_OPT = 1
GATEWAY_OPT = 3
ROUTERS_OPT = 3 # Synonym for above
TIME_SERVERS_OPT = 4
DNS_SERVER_OPT = 6
HOST_NAME_OPT = 12
DOMAIN_NAME_OPT = 15
MTU_OPT = 26
BCAST_ADDR_OPT = 28
VENDOR_OPT = 43
REQUEST_IP_OPT = 50
REQUEST_LEASE_OPT = 51
OVERLOAD_OPT = 52
SERVER_ID_OPT = 54
PARAM_REQ_OPT = 55
ERROR_MSG_OPT = 56
T1_OPT = 58
T2_OPT = 59
CLIENT_ID_OPT = 61
PAD_OPT = 0
END_OPT = 255
MAGIC = b'\x63\x82\x53\x63'
def __init__(self, raw=None, prev=None, **kw):
packet_base.__init__(self)
self.prev = prev
self.op = 0
self.htype = 0
self.hlen = 0
self.hops = 0
self.xid = 0
self.secs = 0
self.flags = 0
self.ciaddr = IP_ANY
self.yiaddr = IP_ANY
self.siaddr = IP_ANY
self.giaddr = IP_ANY
self.chaddr = None
self.sname = b''
self.file = b''
self.magic = self.MAGIC
self._raw_options = b''
if raw is not None:
self.parse(raw)
else:
self.options = util.DirtyDict()
self._init(kw)
def _to_str(self):
s = '[DHCP op:'+str(self.op)
s += ' htype:'+str(self.htype)
s += ' hlen:'+str(self.hlen)
s += ' hops:'+str(self.hops)
s += ' xid:'+str(self.xid)
s += ' secs:'+str(self.secs)
s += ' flags:'+str(self.flags)
s += ' ciaddr:'+str(self.ciaddr)
s += ' yiaddr:'+str(self.yiaddr)
s += ' siaddr:'+str(self.siaddr)
s += ' giaddr:'+str(self.giaddr)
s += ' chaddr:'
if isinstance(self.chaddr, EthAddr):
s += str(self.chaddr)
elif self.chaddr is not None:
s += ' '.join(["{0:02x}".format(x) for x in self.chaddr])
s += ' magic:'+' '.join(
["{0:02x}".format(ord(x)) for x in self.magic])
#s += ' options:'+' '.join(["{0:02x}".format(ord(x)) for x in
# self._raw_options])
if len(self.options):
s += ' options:'
s += ','.join(repr(x) for x in self.options.values())
s += ']'
return s
def parse(self, raw):
assert isinstance(raw, bytes)
self.raw = raw
dlen = len(raw)
if dlen < dhcp.MIN_LEN:
self.msg('(dhcp parse) warning DHCP packet data too short ' +
'to parse header: data len %u' % (dlen,))
return None
(self.op, self.htype, self.hlen, self.hops, self.xid,self.secs,
self.flags, self.ciaddr, self.yiaddr, self.siaddr,
self.giaddr) = struct.unpack('!BBBBIHHIIII', raw[:28])
self.ciaddr = IPAddr(self.ciaddr)
self.yiaddr = IPAddr(self.yiaddr)
self.siaddr = IPAddr(self.siaddr)
self.giaddr = IPAddr(self.giaddr)
self.chaddr = raw[28:44]
if self.hlen == 6:
# Assume chaddr is ethernet
self.chaddr = EthAddr(self.chaddr[:6])
self.sname = raw[44:108]
self.file = raw[102:236]
self.magic = raw[236:240]
self.hdr_len = dlen
self.parsed = True
if self.hlen > 16:
self.warn('(dhcp parse) DHCP hlen %u too long' % (self.hlen),)
return
for i in range(4):
if dhcp.MAGIC[i] != self.magic[i]:
self.warn('(dhcp parse) bad DHCP magic value %s' %
str(self.magic))
return
self._raw_options = raw[240:]
self.parseOptions()
self.unpackOptions()
self.parsed = True
def unpackOptions(self):
for k,v in self.options.items():
unpack = _dhcp_option_unpackers.get(k, DHCPRawOption.unpack)
try:
self.options[k] = unpack(v,k)
except Exception as e:
self.warn("(dhcp parse) bad option %s: %s" % (k,e))
#import traceback
#traceback.print_exc()
self.options[k] = DHCPRawOption.unpack(v,k,True)
def parseOptions(self):
self.options = util.DirtyDict()
self.parseOptionSegment(self._raw_options)
if dhcp.OVERLOAD_OPT in self.options:
opt_val = self.options[dhcp.OVERLOAD_OPT]
if len(opt_val) != 1:
self.warn('DHCP overload option has bad len %u' %
(len(opt_val),))
return
if opt_val == 1 or opt_val == 3:
self.parseOptionSegment(self.file)
if opt_val == 2 or opt_val == 3:
self.parseOptionSegment(self.sname)
def parseOptionSegment(self, barr):
ofs = 0;
l = len(barr)
while ofs < l:
opt = ord(barr[ofs])
if opt == dhcp.END_OPT:
return
ofs += 1
if opt == dhcp.PAD_OPT:
continue
if ofs >= l:
self.warn('DHCP option ofs extends past segment')
return
opt_len = ord(barr[ofs])
ofs += 1 # Account for the length octet
if ofs + opt_len > l:
return False
if opt in self.options:
# Append option, per RFC 3396
self.options[opt] += barr[ofs:ofs+opt_len]
else:
self.options[opt] = barr[ofs:ofs+opt_len]
ofs += opt_len
self.warn('DHCP end of option segment before END option')
def packOptions (self):
o = b''
def addPart (k, v):
o = b''
o += chr(k)
o += chr(len(v))
o += bytes(v)
if len(o) & 1: # Length is not even
o += chr(dhcp.PAD_OPT)
return o
for k,v in self.options.iteritems():
if k == dhcp.END_OPT: continue
if k == dhcp.PAD_OPT: continue
if isinstance(v, DHCPOption):
v = v.pack()
if isinstance(v, bytes) and (len(v) > 255):
# Long option, per RFC 3396
v = [v[i:i+255] for i in range(0, len(v), 255)]
if isinstance(v, list): # Better way to tell?
for part in v:
o += addPart(k, part)
else:
o += addPart(k, v)
o += chr(dhcp.END_OPT)
self._raw_options = o
if isinstance(self.options, util.DirtyDict):
self.options.dirty = False
def add_option(self, option, code=None):
if code is None:
code = option.CODE
self.options[code] = option
def hdr(self, payload):
if isinstance(self.options, util.DirtyDict):
if self.options.dirty:
self.packOptions()
else:
self.packOptions()
if isinstance(self.chaddr, EthAddr):
chaddr = self.chaddr.toRaw() + (b'\x00' * 10)
fmt = '!BBBBIHHiiii16s64s128s4s'
return struct.pack(fmt, self.op, self.htype, self.hlen,
self.hops, self.xid, self.secs, self.flags,
IPAddr(self.ciaddr).toSigned(),
IPAddr(self.yiaddr).toSigned(),
IPAddr(self.siaddr).toSigned(),
IPAddr(self.giaddr).toSigned(),
chaddr, self.sname, self.file,
self.magic) + self._raw_options
def appendRawOption (self, code, val = None, length = None):
"""
In general, a much better way to add options should just be
to add them to the .options dictionary.
"""
self._raw_options += chr(code)
if length is None:
if val is None:
return
length = len(val)
self._raw_options += chr(length)
self._raw_options += val
def dhcp_option_def (msg_type):
"""
DPCP Option decorator
"""
def f (cls):
_dhcp_option_unpackers[msg_type] = cls.unpack
cls.CODE = msg_type
return cls
return f
class DHCPOption (object):
CODE = None
@classmethod
def unpack (cls, data, code = None):
pass
def pack (self):
return b''
@property
def _name (self):
n = type(self).__name__
if n.startswith("DHCP"): n = n[4:]
if n.endswith("Option"): n = n[:-6]
if n == "": return "Option"
return n
class DHCPRawOption (DHCPOption):
def __init__ (self, data = b'', bad = False):
self.data = data
self.bad = bad # True if option wasn't parsed right
@classmethod
def unpack (cls, data, code = None, bad = False):
self = cls()
self.data = data
self.bad = bad
self.CODE = code
return self
def pack (self):
return self.data
def __repr__ (self):
data = self.data
if not all(ord(c)<127 and c in string.printable for c in data):
data = " ".join("%02x" % (ord(x),) for x in data)
else:
data = "".join(x if ord(x) >= 32 else "." for x in data)
if len(data) > 30:
data = data[:30] + "..."
n = self._name
if n == 'Raw': n += str(self.CODE)
return "%s(%s)" % (n, data)
class DHCPIPOptionBase (DHCPOption):
"""
Superclass for options which are an IP address
"""
def __init__ (self, addr = None):
self.addr = IPAddr(0) if addr is None else IPAddr(addr)
@classmethod
def unpack (cls, data, code = None):
self = cls()
if len(data) != 4: raise RuntimeError("Bad option length")
self.addr = IPAddr(data)
return self
def pack (self):
return self.addr.toRaw()
def __repr__ (self):
return "%s(%s)" % (self._name, self.addr)
class DHCPIPsOptionBase (DHCPOption):
"""
Superclass for options which are a list of IP addresses
"""
def __init__ (self, addrs=[]):
if isinstance(addrs, (basestring,IPAddr)):
self.addrs = [IPAddr(addrs)]
else:
self.addrs = [IPAddr(a) for a in addrs]
@classmethod
def unpack (cls, data, code = None):
self = cls()
if (len(data) % 4) != 0: raise RuntimeError("Bad option length")
while len(data):
self.addrs.append(IPAddr(data[:4]))
data = data[4:]
return self
def pack (self):
r = b''
for addr in self.addrs:
r += addr.toRaw()
return r
@property
def addr (self):
if len(self.addrs) == 0: return None
return self.addrs[0]
def __repr__ (self):
return "%s(%s)" % (self._name, self.addrs)
class DHCPSecondsOptionBase (DHCPOption):
"""
Superclass for options which are a number of seconds as 4 bytes
"""
def __init__ (self, seconds = None):
self.seconds = seconds
@classmethod
def unpack (cls, data, code = None):
self = cls()
if len(data) != 4: raise RuntimeError("Bad option length")
self.seconds, = struct.unpack('!I', data)
return self
def pack (self):
return struct.pack('!I', self.seconds)
def __repr__ (self):
return "%s(%s)" % (self._name, self.seconds)
@dhcp_option_def(dhcp.MSG_TYPE_OPT)
class DHCPMsgTypeOption (DHCPOption):
def __init__ (self, type=None):
self.type = type
@classmethod
def unpack (cls, data, code = None):
self = cls()
if len(data) != 1: raise RuntimeError("Bad option length")
self.type = ord(data[0])
return self
def pack (self):
return chr(self.type)
def __repr__ (self):
t = {
1:'DISCOVER',
2:'OFFER',
3:'REQUEST',
4:'DECLINE',
5:'ACK',
6:'NAK',
7:'RELEASE',
8:'INFORM',
}.get(self.type, "TYPE"+str(self.type))
return "%s(%s)" % (self._name, t)
@dhcp_option_def(dhcp.SUBNET_MASK_OPT)
class DHCPSubnetMaskOption (DHCPIPOptionBase):
pass
@dhcp_option_def(dhcp.ROUTERS_OPT)
class DHCPRoutersOption (DHCPIPsOptionBase):
pass
@dhcp_option_def(dhcp.TIME_SERVERS_OPT)
class DHCPTimeServersOption (DHCPIPsOptionBase):
pass
@dhcp_option_def(dhcp.DNS_SERVER_OPT)
class DHCPDNSServersOption (DHCPIPsOptionBase):
pass
@dhcp_option_def(dhcp.HOST_NAME_OPT)
class DHCPHostNameOption (DHCPRawOption):
pass
@dhcp_option_def(dhcp.DOMAIN_NAME_OPT)
class DHCPDomainNameOption (DHCPRawOption):
pass
@dhcp_option_def(dhcp.BCAST_ADDR_OPT)
class DHCPBroadcastAddressOption (DHCPIPOptionBase):
pass
@dhcp_option_def(dhcp.VENDOR_OPT)
class DHCPVendorOption (DHCPRawOption):
pass
@dhcp_option_def(dhcp.REQUEST_IP_OPT)
class DHCPRequestIPOption (DHCPIPOptionBase):
pass
@dhcp_option_def(dhcp.REQUEST_LEASE_OPT)
class DHCPIPAddressLeaseTimeOption (DHCPSecondsOptionBase):
pass
@dhcp_option_def(dhcp.OVERLOAD_OPT)
class DHCPOptionOverloadOption (DHCPOption):
def __init__ (self, value = None):
self.value = value
@classmethod
def unpack (cls, data, code = None):
self = cls()
if len(data) != 1: raise RuntimeError("Bad option length")
self.value = ord(data[0])
return self
def pack (self):
return chr(self.value)
def __repr__ (self):
return "%s(%s)" % (self._name, self.value)
@dhcp_option_def(dhcp.SERVER_ID_OPT)
class DHCPServerIdentifierOption (DHCPIPOptionBase):
pass
@dhcp_option_def(dhcp.ERROR_MSG_OPT)
class DHCPErrorMessageOption (DHCPRawOption):
pass
@dhcp_option_def(dhcp.T1_OPT)
class DHCPRenewalTimeOption (DHCPSecondsOptionBase):
pass
@dhcp_option_def(dhcp.T2_OPT)
class DHCPRebindingTimeOption (DHCPSecondsOptionBase):
pass
@dhcp_option_def(dhcp.PARAM_REQ_OPT)
class DHCPParameterRequestOption (DHCPOption):
def __init__ (self, options = []):
self.options = options
@classmethod
def unpack (cls, data, code = None):
self = cls()
self.options = [ord(x) for x in data]
return self
def pack (self):
return b''.join(chr(x) for x in self.options)
def __repr__ (self):
names = []
for o in sorted(self.options):
n = _dhcp_option_unpackers.get(o)
if n is None or not hasattr(n, 'im_self'):
n = "Opt/" + str(o)
else:
n = n.im_self.__name__
if n.startswith("DHCP"): n = n[4:]
if n.endswith("Option"): n = n[:-6]
if n == "": n = "Opt"
n += '/' + str(o)
names.append(n)
return "%s(%s)" % (self._name, " ".join(names))
|
|
import copy
from crispy_forms.utils import TEMPLATE_PACK
from django import forms
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import PermissionDenied, FieldError
from django.db import models, transaction
from django.forms.models import modelform_factory, modelform_defines_fields
from django.http import Http404, HttpResponseRedirect
from django.template.response import TemplateResponse
from django.utils.encoding import force_unicode
from django.utils.html import escape
from django.template import loader
from django.utils.translation import ugettext as _
from xadmin import widgets
from xadmin.layout import FormHelper, Layout, Fieldset, TabHolder, Container, Column, Col, Field
from xadmin.util import unquote
from xadmin.views.detail import DetailAdminUtil
from base import ModelAdminView, filter_hook, csrf_protect_m
FORMFIELD_FOR_DBFIELD_DEFAULTS = {
models.DateTimeField: {
'form_class': forms.SplitDateTimeField,
'widget': widgets.AdminSplitDateTime
},
models.DateField: {'widget': widgets.AdminDateWidget},
models.TimeField: {'widget': widgets.AdminTimeWidget},
models.TextField: {'widget': widgets.AdminTextareaWidget},
models.URLField: {'widget': widgets.AdminURLFieldWidget},
models.IntegerField: {'widget': widgets.AdminIntegerFieldWidget},
models.BigIntegerField: {'widget': widgets.AdminIntegerFieldWidget},
models.CharField: {'widget': widgets.AdminTextInputWidget},
models.IPAddressField: {'widget': widgets.AdminTextInputWidget},
models.ImageField: {'widget': widgets.AdminFileWidget},
models.FileField: {'widget': widgets.AdminFileWidget},
models.ForeignKey: {'widget': widgets.AdminSelectWidget},
models.OneToOneField: {'widget': widgets.AdminSelectWidget},
models.ManyToManyField: {'widget': widgets.AdminSelectMultiple},
}
class ReadOnlyField(Field):
template = "xadmin/layout/field_value.html"
def __init__(self, *args, **kwargs):
self.detail = kwargs.pop('detail')
super(ReadOnlyField, self).__init__(*args, **kwargs)
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK, **kwargs):
html = ''
for field in self.fields:
result = self.detail.get_field_result(field)
field = {'auto_id': field}
html += loader.render_to_string(
self.template, {'field': field, 'result': result})
return html
class ModelFormAdminView(ModelAdminView):
form = forms.ModelForm
formfield_overrides = {}
readonly_fields = ()
style_fields = {}
exclude = None
relfield_style = None
save_as = False
save_on_top = False
add_form_template = None
change_form_template = None
form_layout = None
def __init__(self, request, *args, **kwargs):
overrides = FORMFIELD_FOR_DBFIELD_DEFAULTS.copy()
overrides.update(self.formfield_overrides)
self.formfield_overrides = overrides
super(ModelFormAdminView, self).__init__(request, *args, **kwargs)
@filter_hook
def formfield_for_dbfield(self, db_field, **kwargs):
# If it uses an intermediary model that isn't auto created, don't show
# a field in admin.
if isinstance(db_field, models.ManyToManyField) and not db_field.rel.through._meta.auto_created:
return None
attrs = self.get_field_attrs(db_field, **kwargs)
return db_field.formfield(**dict(attrs, **kwargs))
@filter_hook
def get_field_style(self, db_field, style, **kwargs):
if style in ('radio', 'radio-inline') and (db_field.choices or isinstance(db_field, models.ForeignKey)):
attrs = {'widget': widgets.AdminRadioSelect(
attrs={'inline': 'inline' if style == 'radio-inline' else ''})}
if db_field.choices:
attrs['choices'] = db_field.get_choices(
include_blank=db_field.blank,
blank_choice=[('', _('Null'))]
)
return attrs
if style in ('checkbox', 'checkbox-inline') and isinstance(db_field, models.ManyToManyField):
return {'widget': widgets.AdminCheckboxSelect(attrs={'inline': style == 'checkbox-inline'}),
'help_text': None}
@filter_hook
def get_field_attrs(self, db_field, **kwargs):
if db_field.name in self.style_fields:
attrs = self.get_field_style(
db_field, self.style_fields[db_field.name], **kwargs)
if attrs:
return attrs
if hasattr(db_field, "rel") and db_field.rel:
related_modeladmin = self.admin_site._registry.get(db_field.rel.to)
if related_modeladmin and hasattr(related_modeladmin, 'relfield_style'):
attrs = self.get_field_style(
db_field, related_modeladmin.relfield_style, **kwargs)
if attrs:
return attrs
if db_field.choices:
return {'widget': widgets.AdminSelectWidget}
for klass in db_field.__class__.mro():
if klass in self.formfield_overrides:
return self.formfield_overrides[klass].copy()
return {}
@filter_hook
def prepare_form(self):
self.model_form = self.get_model_form()
@filter_hook
def instance_forms(self):
self.form_obj = self.model_form(**self.get_form_datas())
def setup_forms(self):
helper = self.get_form_helper()
if helper:
self.form_obj.helper = helper
@filter_hook
def valid_forms(self):
return self.form_obj.is_valid()
@filter_hook
def get_model_form(self, **kwargs):
"""
Returns a Form class for use in the admin add view. This is used by
add_view and change_view.
"""
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(self.get_readonly_fields())
if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# ModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
# if exclude is an empty list we pass None to be consistant with the
# default on modelform_factory
exclude = exclude or None
defaults = {
"form": self.form,
"fields": self.fields and list(self.fields) or None,
"exclude": exclude,
"formfield_callback": self.formfield_for_dbfield,
}
defaults.update(kwargs)
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = forms.ALL_FIELDS
return modelform_factory(self.model, **defaults)
try:
return modelform_factory(self.model, **defaults)
except FieldError as e:
raise FieldError('%s. Check fields/fieldsets/exclude attributes of class %s.'
% (e, self.__class__.__name__))
@filter_hook
def get_form_layout(self):
layout = copy.deepcopy(self.form_layout)
fields = self.form_obj.fields.keys() + list(self.get_readonly_fields())
if layout is None:
layout = Layout(Container(Col('full',
Fieldset("", *fields, css_class="unsort no_title"), horizontal=True, span=12)
))
elif type(layout) in (list, tuple) and len(layout) > 0:
if isinstance(layout[0], Column):
fs = layout
elif isinstance(layout[0], (Fieldset, TabHolder)):
fs = (Col('full', *layout, horizontal=True, span=12),)
else:
fs = (Col('full', Fieldset("", *layout, css_class="unsort no_title"), horizontal=True, span=12),)
layout = Layout(Container(*fs))
rendered_fields = [i[1] for i in layout.get_field_names()]
container = layout[0].fields
other_fieldset = Fieldset(_(u'Other Fields'), *[f for f in fields if f not in rendered_fields])
if len(other_fieldset.fields):
if len(container) and isinstance(container[0], Column):
container[0].fields.append(other_fieldset)
else:
container.append(other_fieldset)
return layout
@filter_hook
def get_form_helper(self):
helper = FormHelper()
helper.form_tag = False
helper.include_media = False
helper.add_layout(self.get_form_layout())
# deal with readonly fields
readonly_fields = self.get_readonly_fields()
if readonly_fields:
detail = self.get_model_view(
DetailAdminUtil, self.model, self.form_obj.instance)
for field in readonly_fields:
helper[field].wrap(ReadOnlyField, detail=detail)
return helper
@filter_hook
def get_readonly_fields(self):
"""
Hook for specifying custom readonly fields.
"""
return self.readonly_fields
@filter_hook
def save_forms(self):
self.new_obj = self.form_obj.save(commit=False)
@filter_hook
def save_models(self):
self.new_obj.save()
@filter_hook
def save_related(self):
self.form_obj.save_m2m()
@csrf_protect_m
@filter_hook
def get(self, request, *args, **kwargs):
self.instance_forms()
self.setup_forms()
return self.get_response()
@csrf_protect_m
@transaction.atomic
@filter_hook
def post(self, request, *args, **kwargs):
self.instance_forms()
self.setup_forms()
if self.valid_forms():
self.save_forms()
self.save_models()
self.save_related()
response = self.post_response()
if isinstance(response, basestring):
return HttpResponseRedirect(response)
else:
return response
return self.get_response()
@filter_hook
def get_context(self):
add = self.org_obj is None
change = self.org_obj is not None
new_context = {
'form': self.form_obj,
'original': self.org_obj,
'show_delete': self.org_obj is not None,
'add': add,
'change': change,
'errors': self.get_error_list(),
'has_add_permission': self.has_add_permission(),
'has_view_permission': self.has_view_permission(),
'has_change_permission': self.has_change_permission(self.org_obj),
'has_delete_permission': self.has_delete_permission(self.org_obj),
'has_file_field': True, # FIXME - this should check if form or formsets have a FileField,
'has_absolute_url': hasattr(self.model, 'get_absolute_url'),
'form_url': '',
'content_type_id': ContentType.objects.get_for_model(self.model).id,
'save_as': self.save_as,
'save_on_top': self.save_on_top,
}
# for submit line
new_context.update({
'onclick_attrib': '',
'show_delete_link': (new_context['has_delete_permission']
and (change or new_context['show_delete'])),
'show_save_as_new': change and self.save_as,
'show_save_and_add_another': new_context['has_add_permission'] and
(not self.save_as or add),
'show_save_and_continue': new_context['has_change_permission'],
'show_save': True
})
if self.org_obj and new_context['show_delete_link']:
new_context['delete_url'] = self.model_admin_url(
'delete', self.org_obj.pk)
context = super(ModelFormAdminView, self).get_context()
context.update(new_context)
return context
@filter_hook
def get_error_list(self):
errors = forms.utils.ErrorList()
if self.form_obj.is_bound:
errors.extend(self.form_obj.errors.values())
return errors
@filter_hook
def get_media(self):
return super(ModelFormAdminView, self).get_media() + self.form_obj.media + \
self.vendor('xadmin.page.form.js', 'xadmin.form.css')
class CreateAdminView(ModelFormAdminView):
def init_request(self, *args, **kwargs):
self.org_obj = None
if not self.has_add_permission():
raise PermissionDenied
# comm method for both get and post
self.prepare_form()
@filter_hook
def get_form_datas(self):
# Prepare the dict of initial data from the request.
# We have to special-case M2Ms as a list of comma-separated PKs.
if self.request_method == 'get':
initial = dict(self.request.GET.items())
for k in initial:
try:
f = self.opts.get_field(k)
except models.FieldDoesNotExist:
continue
if isinstance(f, models.ManyToManyField):
initial[k] = initial[k].split(",")
return {'initial': initial}
else:
return {'data': self.request.POST, 'files': self.request.FILES}
@filter_hook
def get_context(self):
new_context = {
'title': _('Add %s') % force_unicode(self.opts.verbose_name),
}
context = super(CreateAdminView, self).get_context()
context.update(new_context)
return context
@filter_hook
def get_breadcrumb(self):
bcs = super(ModelFormAdminView, self).get_breadcrumb()
item = {'title': _('Add %s') % force_unicode(self.opts.verbose_name)}
if self.has_add_permission():
item['url'] = self.model_admin_url('add')
bcs.append(item)
return bcs
@filter_hook
def get_response(self):
context = self.get_context()
context.update(self.kwargs or {})
return TemplateResponse(
self.request, self.add_form_template or self.get_template_list(
'views/model_form.html'),
context, current_app=self.admin_site.name)
@filter_hook
def post_response(self):
"""
Determines the HttpResponse for the add_view stage.
"""
request = self.request
msg = _(
'The %(name)s "%(obj)s" was added successfully.') % {'name': force_unicode(self.opts.verbose_name),
'obj': "<a class='alert-link' href='%s'>%s</a>" % (self.model_admin_url('change', self.new_obj._get_pk_val()), force_unicode(self.new_obj))}
if "_continue" in request.POST:
self.message_user(
msg + ' ' + _("You may edit it again below."), 'success')
return self.model_admin_url('change', self.new_obj._get_pk_val())
if "_addanother" in request.POST:
self.message_user(msg + ' ' + (_("You may add another %s below.") % force_unicode(self.opts.verbose_name)), 'success')
return request.path
else:
self.message_user(msg, 'success')
# Figure out where to redirect. If the user has change permission,
# redirect to the change-list page for this object. Otherwise,
# redirect to the admin index.
if "_redirect" in request.POST:
return request.POST["_redirect"]
elif self.has_view_permission():
return self.model_admin_url('changelist')
else:
return self.get_admin_url('index')
class UpdateAdminView(ModelFormAdminView):
def init_request(self, object_id, *args, **kwargs):
self.org_obj = self.get_object(unquote(object_id))
if not self.has_change_permission(self.org_obj):
raise PermissionDenied
if self.org_obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') %
{'name': force_unicode(self.opts.verbose_name), 'key': escape(object_id)})
# comm method for both get and post
self.prepare_form()
@filter_hook
def get_form_datas(self):
params = {'instance': self.org_obj}
if self.request_method == 'post':
params.update(
{'data': self.request.POST, 'files': self.request.FILES})
return params
@filter_hook
def get_context(self):
new_context = {
'title': _('Change %s') % force_unicode(self.org_obj),
'object_id': str(self.org_obj.pk),
}
context = super(UpdateAdminView, self).get_context()
context.update(new_context)
return context
@filter_hook
def get_breadcrumb(self):
bcs = super(ModelFormAdminView, self).get_breadcrumb()
item = {'title': force_unicode(self.org_obj)}
if self.has_change_permission():
item['url'] = self.model_admin_url('change', self.org_obj.pk)
bcs.append(item)
return bcs
@filter_hook
def get_response(self, *args, **kwargs):
context = self.get_context()
context.update(kwargs or {})
return TemplateResponse(
self.request, self.change_form_template or self.get_template_list(
'views/model_form.html'),
context, current_app=self.admin_site.name)
def post(self, request, *args, **kwargs):
if "_saveasnew" in self.request.POST:
return self.get_model_view(CreateAdminView, self.model).post(request)
return super(UpdateAdminView, self).post(request, *args, **kwargs)
@filter_hook
def post_response(self):
"""
Determines the HttpResponse for the change_view stage.
"""
opts = self.new_obj._meta
obj = self.new_obj
request = self.request
verbose_name = opts.verbose_name
pk_value = obj._get_pk_val()
msg = _('The %(name)s "%(obj)s" was changed successfully.') % {'name':
force_unicode(verbose_name), 'obj': force_unicode(obj)}
if "_continue" in request.POST:
self.message_user(
msg + ' ' + _("You may edit it again below."), 'success')
return request.path
elif "_addanother" in request.POST:
self.message_user(msg + ' ' + (_("You may add another %s below.")
% force_unicode(verbose_name)), 'success')
return self.model_admin_url('add')
else:
self.message_user(msg, 'success')
# Figure out where to redirect. If the user has change permission,
# redirect to the change-list page for this object. Otherwise,
# redirect to the admin index.
if "_redirect" in request.POST:
return request.POST["_redirect"]
elif self.has_view_permission():
change_list_url = self.model_admin_url('changelist')
if 'LIST_QUERY' in self.request.session \
and self.request.session['LIST_QUERY'][0] == self.model_info:
change_list_url += '?' + self.request.session['LIST_QUERY'][1]
return change_list_url
else:
return self.get_admin_url('index')
class ModelFormAdminUtil(ModelFormAdminView):
def init_request(self, obj=None):
self.org_obj = obj
self.prepare_form()
self.instance_forms()
@filter_hook
def get_form_datas(self):
return {'instance': self.org_obj}
|
|
import os
import sys
import pygame
import copy
import textrect
import energybar
import battle_v
from common.constants import *
from client.constants import *
from common.util.rect import Rect
class UnitHUD(object):
def __init__(self, inTeam, inPlayers):
self.rect = Rect((0,0), (SCREEN_SIZE[0], UNIT_HUD_HEIGHT))
self.rect.bottom = SCREEN_SIZE[1]
self.team = inTeam
self.characters = inPlayers[self.team]
self.buttonBorderVertLight = pygame.Surface((UNIT_HUD_BUTTON_BORDER_SIZE,
UNIT_HUD_BUTTON_SIZE[1]))
self.buttonBorderVertLight.fill(UNIT_HUD_BUTTON_COLORS_LIGHT_BORDER[self.team])
self.buttonBorderVertDark = pygame.Surface((UNIT_HUD_BUTTON_BORDER_SIZE,
UNIT_HUD_BUTTON_SIZE[1]))
self.buttonBorderVertDark.fill(UNIT_HUD_BUTTON_COLORS_DARK_BORDER[self.team])
self.buttonBorderHorizLight = pygame.Surface((UNIT_HUD_BUTTON_SIZE[0],
UNIT_HUD_BUTTON_BORDER_SIZE))
self.buttonBorderHorizLight.fill(UNIT_HUD_BUTTON_COLORS_LIGHT_BORDER[self.team])
self.buttonBorderHorizDark = pygame.Surface((UNIT_HUD_BUTTON_SIZE[0],
UNIT_HUD_BUTTON_BORDER_SIZE))
self.buttonBorderHorizDark.fill(UNIT_HUD_BUTTON_COLORS_DARK_BORDER[self.team])
self.blankPortrait = pygame.Surface(UNIT_HUD_PORTRAIT_SIZE)
self.blankPortrait.fill(UNIT_HUD_BLANK_PORTRAIT_COLORS[self.team])
self.portraitBase = pygame.Surface( (UNIT_HUD_PORTRAIT_SIZE[0] + (UNIT_HUD_PORTRAIT_BORDER_SIZE * 2),
UNIT_HUD_PORTRAIT_SIZE[1] + (UNIT_HUD_PORTRAIT_BORDER_SIZE * 2)))
self.portraitBase.fill(UNIT_HUD_PORTRAIT_BORDER_COLORS[self.team])
x = ( (UNIT_HUD_BORDER_WIDTH * 4) + (UNIT_HUD_BUTTON_SIZE[0] * 2) +
(UNIT_HUD_PORTRAIT_BORDER_SIZE * 2) + UNIT_HUD_PORTRAIT_SIZE[0] )
y = UNIT_HUD_BORDER_WIDTH
self.healthBarRect = Rect((x, y), UNIT_HUD_ENERGY_BAR_SIZE)
y += UNIT_HUD_BORDER_WIDTH + self.healthBarRect.height + ENERGY_BAR_FONT.get_height() - 4
self.superBarRect = Rect((x, y), UNIT_HUD_ENERGY_BAR_SIZE)
sizeX = SCREEN_SIZE[0] - self.healthBarRect.left - UNIT_HUD_BORDER_WIDTH
sizeY = UNIT_HUD_STRUCTURE_PANEL_HEIGHT
self.structureCountPanelRect = Rect( (0, 0), (sizeX, sizeY))
self.structureCountPanelRect.left = self.healthBarRect.left
self.structureCountPanelRect.bottom = UNIT_HUD_HEIGHT - UNIT_HUD_BORDER_WIDTH
self.fortressIcon = FORTRESS_COUNT_ICONS[self.team]
self.fortressIconRect = Rect( (0,0), self.fortressIcon.get_size() )
self.fortressIconRect.centery = (self.structureCountPanelRect.height / 2)
self.altarIcon = ALTAR_COUNT_ICONS[self.team]
self.altarIconRect = Rect( (0,0), self.altarIcon.get_size() )
self.altarIconRect.left = UNIT_HUD_STRUCTURE_PANEL_SPACING
self.altarIconRect.centery = (self.structureCountPanelRect.height / 2)
self.spireIcon = SPIRE_COUNT_ICONS[self.team]
self.spireIconRect = Rect( (0,0), self.spireIcon.get_size() )
self.spireIconRect.left = UNIT_HUD_STRUCTURE_PANEL_SPACING * 2
self.spireIconRect.centery = (self.structureCountPanelRect.height / 2)
self.iconRect = Rect((0,0), EFFECT_ICON_SIZE)
self.iconRect.top = self.healthBarRect.top
self.iconRect.left = self.healthBarRect.right + UNIT_HUD_BORDER_WIDTH
self.damageTagRect = Rect((0, 0), (80, 100))
self.damageTagRect.right = self.rect.width
self.damageTagRect.top = self.rect.height - 55
self.damageTag = textrect.render_textrect("Strength", STRUCTURE_COUNT_FONT, self.damageTagRect,
ALMOST_BLACK, BLACK, 1, True)
self.damagePercentRect = Rect((0, 0), (80, 100))
self.damagePercentRect.right = self.damageTagRect.right
self.damagePercentRect.top = self.damageTagRect.top + 18
self.createCharacterButtons()
self.createTerrainIcons()
self.currCharacter = None
self.healthBar = None
self.superBar = None
self.createBaseImage()
self.createTerritoryIcons()
self.createSuperIcons()
self.updateStructureCount(0, 0, 0)
def update(self, val):
self.changeCharacter(val)
self.everyFrameUpdate()
def changeCharacter(self, val):
if self.currCharacter != val:
self.currCharacter = val
self.createBaseImage()
if not self.currCharacter is None:
self.healthBar = energybar.EnergyBar(self.currCharacter.battleChar.hp, self.healthBarRect,
(UNIT_HUD_ENERGY_BAR_BORDER_SIZE, UNIT_HUD_ENERGY_BAR_BORDER_SIZE),
HEALTH_BAR_COLORS, 2,
self.currCharacter.name, None, 0)
self.superBar = energybar.EnergyBar(self.currCharacter.battleChar.superEnergy, self.superBarRect,
(UNIT_HUD_ENERGY_BAR_BORDER_SIZE, UNIT_HUD_ENERGY_BAR_BORDER_SIZE),
SUPER_BAR_COLORS, 2,
"Energy", None, 0)
self.healthBar.rect.topleft = self.healthBarRect.topleft
self.superBar.rect.topleft = self.superBarRect.topleft
else:
self.healthBar = None
self.superBar = None
def createCharacterButtons(self):
self.buttons = []
self.buttonRects = []
self.buttonBars = []
for i, c in enumerate(self.characters):
tempList = []
for j in range(2):
isOn = (j == 0)
tempList.append(self.createButton(i, c, isOn))
self.buttons.append(tempList)
def createButton(self, val, character, isOn):
faceColorOn = UNIT_HUD_BUTTON_COLORS_FACE_ON[self.team]
faceColorOff = faceColorOn = UNIT_HUD_BUTTON_COLORS_FACE_OFF[self.team]
if isOn:
faceColor = faceColorOn
borderTop = self.buttonBorderHorizDark
borderLeft = self.buttonBorderVertDark
borderBottom = self.buttonBorderHorizLight
borderRight = self.buttonBorderVertLight
else:
faceColor = faceColorOff
borderTop = self.buttonBorderHorizLight
borderLeft = self.buttonBorderVertLight
borderBottom = self.buttonBorderHorizDark
borderRight = self.buttonBorderVertDark
button = pygame.Surface(UNIT_HUD_BUTTON_SIZE)
button.fill(faceColor)
button.blit(borderTop, (0, 0))
button.blit(borderLeft, (0, 0))
button.blit(borderBottom, (0, UNIT_HUD_BUTTON_SIZE[1] -
UNIT_HUD_BUTTON_BORDER_SIZE))
button.blit(borderRight, (UNIT_HUD_BUTTON_SIZE[0] -
UNIT_HUD_BUTTON_BORDER_SIZE, 0))
textRect = Rect( (UNIT_HUD_BUTTON_BORDER_SIZE,
UNIT_HUD_BUTTON_BORDER_SIZE),
(UNIT_HUD_BUTTON_SIZE[0] - (UNIT_HUD_BUTTON_BORDER_SIZE * 2),
UNIT_HUD_BUTTON_SIZE[1] - (UNIT_HUD_BUTTON_BORDER_SIZE * 2)) )
textSurface = textrect.render_textrect(character.name, UNIT_HUD_NAMES_FONT, textRect,
ALMOST_BLACK, BLACK, 1, True)
button.blit(textSurface, textRect.topleft)
textSurface = textrect.render_textrect(str(val+1) + ".", UNIT_HUD_NAMES_FONT, textRect,
ALMOST_BLACK, BLACK, 0, True)
button.blit(textSurface, textRect.topleft)
numPerColumn = int(len(self.characters) / 2) + 1
if numPerColumn > UNIT_HUD_BUTTONS_PER_COLUMN:
numPerColumn = UNIT_HUD_BUTTONS_PER_COLUMN
if isOn:
#Rect
column = int (val / numPerColumn)
row = val % numPerColumn
x = ((UNIT_HUD_BUTTON_SIZE[0] + UNIT_HUD_BORDER_WIDTH) * column) + UNIT_HUD_BORDER_WIDTH
y = ((UNIT_HUD_BUTTON_SIZE[1] + UNIT_HUD_BORDER_WIDTH) * row) + UNIT_HUD_BORDER_WIDTH
buttonRect = Rect((x, y), UNIT_HUD_BUTTON_SIZE)
self.buttonRects.append(Rect(buttonRect))
#Bar
sizeX = UNIT_HUD_BUTTON_SIZE[0] - (UNIT_HUD_BUTTON_BORDER_SIZE * 2)
sizeY = 2
x = buttonRect.left + UNIT_HUD_BUTTON_BORDER_SIZE
y = buttonRect.bottom - UNIT_HUD_BUTTON_BORDER_SIZE - 4
barRect = Rect((x, y), (sizeX, sizeY))
newBar = energybar.EnergyBar(character.battleChar.hp, barRect, (0, 0), HEALTH_BAR_COLORS, 2)
self.buttonBars.append(newBar)
return button
def createTerritoryIcons(self):
if self.team == 0:
alliedTeam = 1
enemyTeam = 2
else:
alliedTeam = 2
enemyTeam = 1
contested = 0
options = ["neutral", "allied", "enemy", "contested"]
self.territoryEffectIcons = {}
bgColor = None
flagColor = None
for o in options:
flag = copy.copy(INTERFACE_GRAPHICS[5])
if o == "allied":
flagColor = TERRITORY_FLAG_COLORS[alliedTeam]
bgColor = EFFECT_COLORS["good"]
elif o == "enemy":
flagColor = TERRITORY_FLAG_COLORS[enemyTeam]
bgColor = EFFECT_COLORS["bad"]
elif o == "contested":
flagColor = TERRITORY_FLAG_COLORS[contested]
bgColor = EFFECT_COLORS["bad"]
else:
flag = None
bgColor = EFFECT_COLORS["neutral"]
icon = pygame.Surface(EFFECT_ICON_SIZE)
icon.fill(bgColor)
if not flag is None:
colorSwap(flag, FLAG_NEUTRAL_COLOR, flagColor, 30)
icon.blit(flag, (0,0))
self.territoryEffectIcons[o] = icon
def createTerrainIcons(self):
self.terrainEffectIcons = []
for c in self.characters:
tempList = []
for i in range(7):
terrainIcon = TERRAIN_ICONS[i]
effect = c.speedTerrainModifiers[i]
if effect <= 0.99:
bgColor = EFFECT_COLORS["bad"]
elif effect >= 1.01:
bgColor = EFFECT_COLORS["good"]
else:
bgColor = EFFECT_COLORS["neutral"]
icon = pygame.Surface(EFFECT_ICON_SIZE)
icon.fill(bgColor)
if not terrainIcon is None:
icon.blit(terrainIcon, (0,0))
tempList.append(icon)
self.terrainEffectIcons.append(tempList)
def createSuperIcons(self):
self.superIcons = []
x = self.iconRect.left
y = self.iconRect.top + self.iconRect.height + UNIT_HUD_BORDER_WIDTH
for c in self.characters:
icon = battle_v.SuperIcon(Rect((x, y), BATTLE_SUPER_ICON_SIZE),
c.battleChar.getSuperIcon(), c.battleChar.superEnergy)
self.superIcons.append(icon)
def createBaseImage(self):
self.baseImage = pygame.Surface(self.rect.size)
color = UNIT_HUD_COLORS[self.team]
self.baseImage.fill(color)
for i, c in enumerate(self.characters):
if self.currCharacter == c:
onIndex = 0
else:
onIndex = 1
self.baseImage.blit(self.buttons[i][onIndex], self.buttonRects[i].topleft)
x = ((UNIT_HUD_BUTTON_SIZE[0] + UNIT_HUD_BORDER_WIDTH) * 2) + UNIT_HUD_BORDER_WIDTH
y = UNIT_HUD_BORDER_WIDTH
if self.currCharacter is None:
portrait = self.blankPortrait
else:
portrait = self.currCharacter.portrait
finalPortrait = pygame.Surface( (UNIT_HUD_PORTRAIT_SIZE[0] + (UNIT_HUD_PORTRAIT_BORDER_SIZE * 2),
UNIT_HUD_PORTRAIT_SIZE[1] + (UNIT_HUD_PORTRAIT_BORDER_SIZE * 2)) )
finalPortrait.blit(self.portraitBase, (0,0))
finalPortrait.blit(portrait, (UNIT_HUD_PORTRAIT_BORDER_SIZE, UNIT_HUD_PORTRAIT_BORDER_SIZE))
self.baseImage.blit(finalPortrait, (x, y))
def everyFrameUpdate(self):
self.image = pygame.Surface(self.rect.size)
self.image.blit(self.baseImage, (0, 0))
if (not self.healthBar is None):
self.healthBar.update()
self.healthBar.draw(self.image)
if (not self.currCharacter is None):
if self.currCharacter.isDead():
if self.healthBar.value != self.currCharacter.respawnTime:
self.healthBar.value = self.currCharacter.respawnTime
self.healthBar.threshold = self.healthBar.value.maximum
self.healthBar.changeColor(RESPAWN_BAR_COLOR)
self.healthBar.changeFullColors(RESPAWN_BAR_COLOR, RESPAWN_BAR_COLOR)
else:
if self.healthBar.value != self.currCharacter.battleChar.hp:
self.healthBar.value = self.currCharacter.battleChar.hp
self.healthBar.threshold = self.healthBar.value.maximum
self.healthBar.changeColor(HEALTH_BAR_COLORS[0])
self.healthBar.changeFullColors(HEALTH_BAR_COLORS[1], HEALTH_BAR_COLORS[2])
if (not self.superBar is None):
self.superBar.update()
self.superBar.draw(self.image)
for i, b in enumerate(self.buttonBars):
b.update()
b.draw(self.image)
c = self.characters[i]
if c.isDead():
if b.value != c.respawnTime:
b.value = c.respawnTime
b.threshold = b.value.maximum
b.changeColor(RESPAWN_BAR_COLOR)
b.changeFullColors(RESPAWN_BAR_COLOR, RESPAWN_BAR_COLOR)
else:
if b.value != c.battleChar.hp:
b.value = c.battleChar.hp
b.threshold = b.value.maximum
b.changeColor(HEALTH_BAR_COLORS[0])
b.changeFullColors(HEALTH_BAR_COLORS[1], HEALTH_BAR_COLORS[2])
terrainIcon = None
territoryIcon = None
superIcon = None
for i, c in enumerate(self.characters):
if c == self.currCharacter:
if c.isDead():
terrainIcon = NEUTRAL_ICON
territoryIcon = NEUTRAL_ICON
else:
terrainIcon = self.terrainEffectIcons[i][self.currCharacter.currTerrain]
territoryIcon = self.territoryEffectIcons[self.currCharacter.currTerritory]
superIcon = self.superIcons[i]
break
if not terrainIcon is None:
self.image.blit(terrainIcon, self.iconRect.topleft)
x = self.iconRect.right
y = self.iconRect.top
if not territoryIcon is None:
self.image.blit(territoryIcon, (x, y))
if not superIcon is None:
superIcon.updateImage()
superIcon.draw(self.image)
self.image.blit(self.structureCountPanel, self.structureCountPanelRect.topleft)
self.updateDamagePercent()
def updateDamagePercent(self):
if (not self.currCharacter is None):
if not self.currCharacter.isDead():
self.image.blit(self.damageTag, self.damageTagRect.topleft)
textSurface = textrect.render_textrect(self.currCharacter.getDamagePercentText(), DAMAGE_PERCENT_FONT, self.damagePercentRect,
ALMOST_BLACK, BLACK, 1, True)
self.image.blit(textSurface, self.damagePercentRect.topleft)
def updateStructureCount(self, fortressCount, spireCount, altarCount):
self.fortressCount = fortressCount
self.spireCount = spireCount
self.altarCount = altarCount
self.structureCountPanel = pygame.Surface(self.structureCountPanelRect.size)
self.structureCountPanel.fill(UNIT_HUD_COLORS[self.team])
self.structureCountPanel.blit(self.fortressIcon, self.fortressIconRect.topleft)
self.structureCountPanel.blit(self.spireIcon, self.spireIconRect.topleft)
self.structureCountPanel.blit(self.altarIcon, self.altarIconRect.topleft)
textRect = Rect((0,0), (100, STRUCTURE_COUNT_FONT.get_height() + 4))
textSurface = textrect.render_textrect(" x" + str(self.fortressCount), STRUCTURE_COUNT_FONT, textRect,
ALMOST_BLACK, BLACK, 0, True)
textRect.bottomleft = self.fortressIconRect.bottomright
self.structureCountPanel.blit(textSurface, textRect.topleft)
textSurface = textrect.render_textrect(" x" + str(self.spireCount), STRUCTURE_COUNT_FONT, textRect,
ALMOST_BLACK, BLACK, 0, True)
textRect.left = self.spireIconRect.right - 3
self.structureCountPanel.blit(textSurface, textRect.topleft)
textSurface = textrect.render_textrect(" x" + str(self.altarCount), STRUCTURE_COUNT_FONT, textRect,
ALMOST_BLACK, BLACK, 0, True)
textRect.left = self.altarIconRect.right - 3
self.structureCountPanel.blit(textSurface, textRect.topleft)
def getButtonAtPos(self, inPos):
pos = [inPos[0] - self.rect.left, inPos[1] - self.rect.top]
for i, r in enumerate(self.buttonRects):
if r.collidepoint(pos):
return self.characters[i]
return None
def draw(self, screen):
screen.blit(self.image, self.rect.topleft)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the key functions in pruning library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.model_pruning.python import pruning
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import training_util
class PruningHParamsTest(test.TestCase):
PARAM_LIST = [
"name=test", "threshold_decay=0.9", "pruning_frequency=10",
"sparsity_function_end_step=100", "target_sparsity=0.9",
"weight_sparsity_map=[conv1:0.8,conv2/kernel:0.8]"
]
TEST_HPARAMS = ",".join(PARAM_LIST)
def setUp(self):
super(PruningHParamsTest, self).setUp()
# Add global step variable to the graph
self.global_step = training_util.get_or_create_global_step()
# Add sparsity
self.sparsity = variables.Variable(0.5, name="sparsity")
# Parse hparams
self.pruning_hparams = pruning.get_pruning_hparams().parse(
self.TEST_HPARAMS)
def testInit(self):
p = pruning.Pruning(self.pruning_hparams)
self.assertEqual(p._spec.name, "test")
self.assertAlmostEqual(p._spec.threshold_decay, 0.9)
self.assertEqual(p._spec.pruning_frequency, 10)
self.assertEqual(p._spec.sparsity_function_end_step, 100)
self.assertAlmostEqual(p._spec.target_sparsity, 0.9)
self.assertEqual(p._weight_sparsity_map["conv1"], 0.8)
self.assertEqual(p._weight_sparsity_map["conv2/kernel"], 0.8)
def testInitWithExternalSparsity(self):
with self.test_session():
p = pruning.Pruning(spec=self.pruning_hparams, sparsity=self.sparsity)
variables.global_variables_initializer().run()
sparsity = p._sparsity.eval()
self.assertAlmostEqual(sparsity, 0.5)
def testInitWithVariableReuse(self):
with self.test_session():
p = pruning.Pruning(spec=self.pruning_hparams, sparsity=self.sparsity)
p_copy = pruning.Pruning(
spec=self.pruning_hparams, sparsity=self.sparsity)
variables.global_variables_initializer().run()
sparsity = p._sparsity.eval()
self.assertAlmostEqual(sparsity, 0.5)
self.assertEqual(p._sparsity.eval(), p_copy._sparsity.eval())
class PruningTest(test.TestCase):
def setUp(self):
super(PruningTest, self).setUp()
self.global_step = training_util.get_or_create_global_step()
def testCreateMask2D(self):
width = 10
height = 20
with self.test_session():
weights = variables.Variable(
random_ops.random_normal([width, height], stddev=1), name="weights")
masked_weights = pruning.apply_mask(weights,
variable_scope.get_variable_scope())
variables.global_variables_initializer().run()
weights_val = weights.eval()
masked_weights_val = masked_weights.eval()
self.assertAllEqual(weights_val, masked_weights_val)
def testUpdateSingleMask(self):
with self.test_session() as session:
weights = variables.Variable(
math_ops.linspace(1.0, 100.0, 100), name="weights")
masked_weights = pruning.apply_mask(weights)
sparsity = variables.Variable(0.5, name="sparsity")
p = pruning.Pruning(sparsity=sparsity)
p._spec.threshold_decay = 0.0
mask_update_op = p.mask_update_op()
variables.global_variables_initializer().run()
masked_weights_val = masked_weights.eval()
self.assertAllEqual(np.count_nonzero(masked_weights_val), 100)
session.run(mask_update_op)
masked_weights_val = masked_weights.eval()
self.assertAllEqual(np.count_nonzero(masked_weights_val), 50)
def _blockMasking(self, hparams, weights, expected_mask):
threshold = variables.Variable(0.0, name="threshold")
sparsity = variables.Variable(0.5, name="sparsity")
test_spec = ",".join(hparams)
pruning_hparams = pruning.get_pruning_hparams().parse(test_spec)
# Set up pruning
p = pruning.Pruning(pruning_hparams, sparsity=sparsity)
with self.test_session():
variables.global_variables_initializer().run()
_, new_mask = p._maybe_update_block_mask(weights, threshold)
# Check if the mask is the same size as the weights
self.assertAllEqual(new_mask.get_shape(), weights.get_shape())
mask_val = new_mask.eval()
self.assertAllEqual(mask_val, expected_mask)
def testBlockMasking(self):
param_list = ["block_height=2", "block_width=2", "threshold_decay=0"]
weights_avg = constant_op.constant(
[[0.1, 0.1, 0.2, 0.2], [0.1, 0.1, 0.2, 0.2], [0.3, 0.3, 0.4, 0.4],
[0.3, 0.3, 0.4, 0.4]])
weights_max = constant_op.constant(
[[0.1, 0.0, 0.2, 0.0], [0.0, -0.1, 0.0, -0.2], [0.3, 0.0, 0.4, 0.0],
[0.0, -0.3, 0.0, -0.4]])
expected_mask = [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0],
[1., 1., 1., 1.], [1., 1., 1., 1.]]
self._blockMasking(param_list + ["block_pooling_function=MAX"], weights_max,
expected_mask)
self._blockMasking(param_list + ["block_pooling_function=AVG"], weights_avg,
expected_mask)
def testBlockMaskingWithHigherDimensions(self):
param_list = ["block_height=2", "block_width=2", "threshold_decay=0"]
# Weights as in testBlockMasking, but with one extra dimension.
weights_avg = constant_op.constant(
[[[0.1, 0.1, 0.2, 0.2], [0.1, 0.1, 0.2, 0.2], [0.3, 0.3, 0.4, 0.4],
[0.3, 0.3, 0.4, 0.4]]])
weights_max = constant_op.constant(
[[[0.1, 0.0, 0.2, 0.0], [0.0, -0.1, 0.0, -0.2], [0.3, 0.0, 0.4, 0.0],
[0.0, -0.3, 0.0, -0.4]]])
expected_mask = [[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0],
[1., 1., 1., 1.], [1., 1., 1., 1.]]]
self._blockMasking(param_list + ["block_pooling_function=MAX"], weights_max,
expected_mask)
self._blockMasking(param_list + ["block_pooling_function=AVG"],
weights_avg, expected_mask)
def testPartitionedVariableMasking(self):
partitioner = partitioned_variables.variable_axis_size_partitioner(40)
with self.test_session() as session:
with variable_scope.variable_scope("", partitioner=partitioner):
sparsity = variables.Variable(0.5, name="Sparsity")
weights = variable_scope.get_variable(
"weights", initializer=math_ops.linspace(1.0, 100.0, 100))
masked_weights = pruning.apply_mask(
weights, scope=variable_scope.get_variable_scope())
p = pruning.Pruning(sparsity=sparsity)
p._spec.threshold_decay = 0.0
mask_update_op = p.mask_update_op()
variables.global_variables_initializer().run()
masked_weights_val = masked_weights.eval()
session.run(mask_update_op)
masked_weights_val = masked_weights.eval()
self.assertAllEqual(np.count_nonzero(masked_weights_val), 50)
def testConditionalMaskUpdate(self):
param_list = [
"pruning_frequency=2", "begin_pruning_step=1", "end_pruning_step=6",
"nbins=100"
]
test_spec = ",".join(param_list)
pruning_hparams = pruning.get_pruning_hparams().parse(test_spec)
weights = variables.Variable(
math_ops.linspace(1.0, 100.0, 100), name="weights")
masked_weights = pruning.apply_mask(weights)
sparsity = variables.Variable(0.00, name="sparsity")
# Set up pruning
p = pruning.Pruning(pruning_hparams, sparsity=sparsity)
p._spec.threshold_decay = 0.0
mask_update_op = p.conditional_mask_update_op()
sparsity_val = math_ops.linspace(0.0, 0.9, 10)
increment_global_step = state_ops.assign_add(self.global_step, 1)
non_zero_count = []
with self.test_session() as session:
variables.global_variables_initializer().run()
for i in range(10):
session.run(state_ops.assign(sparsity, sparsity_val[i]))
session.run(mask_update_op)
session.run(increment_global_step)
non_zero_count.append(np.count_nonzero(masked_weights.eval()))
# Weights pruned at steps 0,2,4,and,6
expected_non_zero_count = [100, 100, 80, 80, 60, 60, 40, 40, 40, 40]
self.assertAllEqual(expected_non_zero_count, non_zero_count)
def testWeightSpecificSparsity(self):
param_list = [
"begin_pruning_step=1", "pruning_frequency=1", "end_pruning_step=100",
"target_sparsity=0.5", "weight_sparsity_map=[layer2/weights:0.75]",
"threshold_decay=0.0"
]
test_spec = ",".join(param_list)
pruning_hparams = pruning.get_pruning_hparams().parse(test_spec)
with variable_scope.variable_scope("layer1"):
w1 = variables.Variable(
math_ops.linspace(1.0, 100.0, 100), name="weights")
_ = pruning.apply_mask(w1)
with variable_scope.variable_scope("layer2"):
w2 = variables.Variable(
math_ops.linspace(1.0, 100.0, 100), name="weights")
_ = pruning.apply_mask(w2)
p = pruning.Pruning(pruning_hparams)
mask_update_op = p.conditional_mask_update_op()
increment_global_step = state_ops.assign_add(self.global_step, 1)
with self.test_session() as session:
variables.global_variables_initializer().run()
for _ in range(110):
session.run(mask_update_op)
session.run(increment_global_step)
self.assertAllEqual(
session.run(pruning.get_weight_sparsity()), [0.5, 0.75])
if __name__ == "__main__":
test.main()
|
|
from abc import ABCMeta, abstractmethod
from collections import Iterable
from numbers import Real
import sys
from xml.etree import ElementTree as ET
import openmc.checkvalue as cv
if sys.version_info[0] >= 3:
basestring = str
class Univariate(object):
"""Probability distribution of a single random variable.
The Univariate class is an abstract class that can be derived to implement a
specific probability distribution.
"""
__metaclass__ = ABCMeta
def __init__(self):
pass
@abstractmethod
def to_xml(self):
return ''
class Discrete(Univariate):
"""Distribution characterized by a probability mass function.
The Discrete distribution assigns probability values to discrete values of a
random variable, rather than expressing the distribution as a continuous
random variable.
Parameters
----------
x : Iterable of Real
Values of the random variable
p : Iterable of Real
Discrete probability for each value
Attributes
----------
x : Iterable of Real
Values of the random variable
p : Iterable of Real
Discrete probability for each value
"""
def __init__(self, x, p):
super(Discrete, self).__init__()
self.x = x
self.p = p
@property
def x(self):
return self._x
@property
def p(self):
return self._p
@x.setter
def x(self, x):
if cv._isinstance(x, Real):
x = [x]
cv.check_type('discrete values', x, Iterable, Real)
self._x = x
@p.setter
def p(self, p):
if cv._isinstance(p, Real):
p = [p]
cv.check_type('discrete probabilities', p, Iterable, Real)
for pk in p:
cv.check_greater_than('discrete probability', pk, 0.0, True)
self._p = p
def to_xml(self, element_name):
element = ET.Element(element_name)
element.set("type", "discrete")
params = ET.SubElement(element, "parameters")
params.text = ' '.join(map(str, self.x)) + ' ' + ' '.join(map(str, self.p))
return element
class Uniform(Univariate):
"""Distribution with constant probability over a finite interval [a,b]
Parameters
----------
a : float, optional
Lower bound of the sampling interval. Defaults to zero.
b : float, optional
Upper bound of the sampling interval. Defaults to unity.
Attributes
----------
a : float
Lower bound of the sampling interval
b : float
Upper bound of the sampling interval
"""
def __init__(self, a=0.0, b=1.0):
super(Uniform, self).__init__()
self.a = a
self.b = b
@property
def a(self):
return self._a
@property
def b(self):
return self._b
@a.setter
def a(self, a):
cv.check_type('Uniform a', a, Real)
self._a = a
@b.setter
def b(self, b):
cv.check_type('Uniform b', b, Real)
self._b = b
def to_xml(self, element_name):
element = ET.Element(element_name)
element.set("type", "uniform")
element.set("parameters", '{} {}'.format(self.a, self.b))
return element
class Maxwell(Univariate):
"""Maxwellian distribution in energy.
The Maxwellian distribution in energy is characterized by a single parameter
:math:`\theta` and has a density function :math:`p(E) dE = c E e^{-E/\theta}
dE`.
Parameters
----------
theta : float
Effective temperature for distribution
Attributes
----------
theta : float
Effective temperature for distribution
"""
def __init__(self, theta):
super(Maxwell, self).__init__()
self.theta = theta
@property
def theta(self):
return self._theta
@theta.setter
def theta(self, theta):
cv.check_type('Maxwell temperature', theta, Real)
cv.check_greater_than('Maxwell temperature', theta, 0.0)
self._theta = theta
def to_xml(self, element_name):
element = ET.Element(element_name)
element.set("type", "maxwell")
element.set("parameters", str(self.theta))
return element
class Watt(Univariate):
"""Watt fission energy spectrum.
The Watt fission energy spectrum is characterized by two parameters
:math:`a` and :math:`b` and has density function :math:`p(E) dE = c e^{-E/a}
\sinh \sqrt{b \, E} dE`.
Parameters
----------
a : float
First parameter of distribution
b : float
Second parameter of distribution
Attributes
----------
a : float
First parameter of distribution
b : float
Second parameter of distribution
"""
def __init__(self, a=0.988, b=2.249):
super(Watt, self).__init__()
self.a = a
self.b = b
@property
def a(self):
return self._a
@property
def b(self):
return self._b
@a.setter
def a(self, a):
cv.check_type('Watt a', a, Real)
cv.check_greater_than('Watt a', a, 0.0)
self._a = a
@b.setter
def b(self, b):
cv.check_type('Watt b', b, Real)
cv.check_greater_than('Watt b', b, 0.0)
self._b = b
def to_xml(self, element_name):
element = ET.Element(element_name)
element.set("type", "watt")
element.set("parameters", '{} {}'.format(self.a, self.b))
return element
class Tabular(Univariate):
"""Piecewise continuous probability distribution.
This class is used to represent a probability distribution whose density
function is tabulated at specific values and is either histogram or linearly
interpolated between points.
Parameters
----------
x : Iterable of Real
Tabulated values of the random variable
p : Iterable of Real
Tabulated probabilities
interpolation : {'histogram', 'linear-linear'}, optional
Indicate whether the density function is constant between tabulated
points or linearly-interpolated.
Attributes
----------
x : Iterable of Real
Tabulated values of the random variable
p : Iterable of Real
Tabulated probabilities
interpolation : {'histogram', 'linear-linear'}, optional
Indicate whether the density function is constant between tabulated
points or linearly-interpolated.
"""
def __init__(self, x, p, interpolation='linear-linear'):
super(Tabular, self).__init__()
self.x = x
self.p = p
self.interpolation = interpolation
@property
def x(self):
return self._x
@property
def p(self):
return self._p
@property
def interpolation(self):
return self._interpolation
@x.setter
def x(self, x):
cv.check_type('tabulated values', x, Iterable, Real)
self._x = x
@p.setter
def p(self, p):
cv.check_type('tabulated probabilities', p, Iterable, Real)
for pk in p:
cv.check_greater_than('tabulated probability', pk, 0.0, True)
self._p = p
@interpolation.setter
def interpolation(self, interpolation):
cv.check_value('interpolation', interpolation,
['linear-linear', 'histogram'])
self._interpolation = interpolation
def to_xml(self, element_name):
element = ET.Element(element_name)
element.set("type", "tabular")
element.set("interpolation", self.interpolation)
params = ET.SubElement(element, "parameters")
params.text = ' '.join(map(str, self.x)) + ' ' + ' '.join(map(str, self.p))
return element
|
|
#!/usr/bin/python -u
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import functools
import sys
from io import BytesIO
import itertools
import uuid
from optparse import OptionParser
import random
import six
from six.moves.urllib.parse import urlparse, parse_qs, quote
from swift.common.manager import Manager
from swift.common import utils, ring
from swift.common.internal_client import InternalClient, UnexpectedResponse
from swift.common.storage_policy import POLICIES
from swift.common.http import HTTP_NOT_FOUND
from swiftclient import client, get_auth, ClientException
from test.probe import PROXY_BASE_URL
from test.probe.common import ENABLED_POLICIES
TIMEOUT = 60
def meta_command(name, bases, attrs):
"""
Look for attrs with a truthy attribute __command__ and add them to an
attribute __commands__ on the type that maps names to decorated methods.
The decorated methods' doc strings also get mapped in __docs__.
Also adds a method run(command_name, *args, **kwargs) that will
execute the method mapped to the name in __commands__.
"""
commands = {}
docs = {}
for attr, value in attrs.items():
if getattr(value, '__command__', False):
commands[attr] = value
# methods have always have a __doc__ attribute, sometimes empty
docs[attr] = (getattr(value, '__doc__', None) or
'perform the %s command' % attr).strip()
attrs['__commands__'] = commands
attrs['__docs__'] = docs
def run(self, command, *args, **kwargs):
return self.__commands__[command](self, *args, **kwargs)
attrs.setdefault('run', run)
return type(name, bases, attrs)
def command(f):
f.__command__ = True
return f
class BaseBrain(object):
def _setup(self, account, container_name, object_name,
server_type, policy):
self.account = account
self.container_name = container_name
self.object_name = object_name
server_list = ['%s-server' % server_type] if server_type else ['all']
self.servers = Manager(server_list)
policies = list(ENABLED_POLICIES)
random.shuffle(policies)
self.policies = itertools.cycle(policies)
o = object_name if server_type == 'object' else None
c = container_name if server_type in ('object', 'container') else None
if server_type in ('container', 'account'):
if policy:
raise TypeError('Metadata server brains do not '
'support specific storage policies')
self.policy = None
self.ring = ring.Ring(
'/etc/swift/%s.ring.gz' % server_type)
elif server_type == 'object':
if not policy:
raise TypeError('Object BrainSplitters need to '
'specify the storage policy')
self.policy = policy
policy.load_ring('/etc/swift')
self.ring = policy.object_ring
else:
raise ValueError('Unknown server_type: %r' % server_type)
self.server_type = server_type
self.part, self.nodes = self.ring.get_nodes(self.account, c, o)
self.node_numbers = [n['id'] + 1 for n in self.nodes]
if 1 in self.node_numbers and 2 in self.node_numbers:
self.primary_numbers = (1, 2)
self.handoff_numbers = (3, 4)
else:
self.primary_numbers = (3, 4)
self.handoff_numbers = (1, 2)
@command
def start_primary_half(self):
"""
start servers 1 & 2
"""
tuple(self.servers.start(number=n) for n in self.primary_numbers)
@command
def stop_primary_half(self):
"""
stop servers 1 & 2
"""
tuple(self.servers.stop(number=n) for n in self.primary_numbers)
@command
def start_handoff_half(self):
"""
start servers 3 & 4
"""
tuple(self.servers.start(number=n) for n in self.handoff_numbers)
@command
def stop_handoff_half(self):
"""
stop servers 3 & 4
"""
tuple(self.servers.stop(number=n) for n in self.handoff_numbers)
@command
def put_container(self, policy_index=None):
"""
put container with next storage policy
"""
if policy_index is not None:
policy = POLICIES.get_by_index(int(policy_index))
if not policy:
raise ValueError('Unknown policy with index %s' % policy)
elif not self.policy:
policy = next(self.policies)
else:
policy = self.policy
headers = {'X-Storage-Policy': policy.name}
self.client.put_container(self.container_name, headers=headers)
@command
def delete_container(self):
"""
delete container
"""
self.client.delete_container(self.container_name)
@command
def put_object(self, headers=None, contents=None):
"""
issue put for test object
"""
self.client.put_object(self.container_name, self.object_name,
headers=headers, contents=contents)
@command
def delete_object(self):
"""
issue delete for test object
"""
self.client.delete_object(self.container_name, self.object_name)
@command
def get_object(self):
"""
issue GET for test object
"""
return self.client.get_object(self.container_name, self.object_name)
class PublicBrainClient(object):
def __init__(self, url, token):
self.url = url
self.token = token
self.account = utils.split_path(urlparse(url).path, 2, 2)[1]
def put_container(self, container_name, headers):
return client.put_container(self.url, self.token, container_name,
headers=headers)
def post_container(self, container_name, headers):
return client.post_container(self.url, self.token, container_name,
headers)
def delete_container(self, container_name):
return client.delete_container(self.url, self.token, container_name)
def put_object(self, container_name, object_name, headers, contents,
query_string=None):
return client.put_object(self.url, self.token, container_name,
object_name, headers=headers,
contents=contents, query_string=query_string)
def delete_object(self, container_name, object_name):
try:
client.delete_object(self.url, self.token,
container_name, object_name)
except ClientException as err:
if err.http_status != HTTP_NOT_FOUND:
raise
def head_object(self, container_name, object_name):
return client.head_object(self.url, self.token, container_name,
object_name)
def get_object(self, container_name, object_name, query_string=None):
return client.get_object(self.url, self.token,
container_name, object_name,
query_string=query_string)
def translate_client_exception(m):
@functools.wraps(m)
def wrapper(*args, **kwargs):
try:
return m(*args, **kwargs)
except UnexpectedResponse as err:
raise ClientException(
err.args[0],
http_scheme=err.resp.environ['wsgi.url_scheme'],
http_host=err.resp.environ['SERVER_NAME'],
http_port=err.resp.environ['SERVER_PORT'],
http_path=quote(err.resp.environ['PATH_INFO']),
http_query=err.resp.environ['QUERY_STRING'],
http_status=err.resp.status_int,
http_reason=err.resp.explanation,
http_response_content=err.resp.body,
http_response_headers=err.resp.headers,
)
return wrapper
class InternalBrainClient(object):
def __init__(self, conf_file, account='AUTH_test'):
self.swift = InternalClient(conf_file, 'probe-test', 3)
self.account = account
@translate_client_exception
def put_container(self, container_name, headers):
return self.swift.create_container(self.account, container_name,
headers=headers)
@translate_client_exception
def post_container(self, container_name, headers):
return self.swift.set_container_metadata(self.account, container_name,
headers)
@translate_client_exception
def delete_container(self, container_name):
return self.swift.delete_container(self.account, container_name)
def parse_qs(self, query_string):
if query_string is not None:
return {k: v[-1] for k, v in parse_qs(query_string).items()}
@translate_client_exception
def put_object(self, container_name, object_name, headers, contents,
query_string=None):
return self.swift.upload_object(BytesIO(contents), self.account,
container_name, object_name,
headers=headers,
params=self.parse_qs(query_string))
@translate_client_exception
def delete_object(self, container_name, object_name):
return self.swift.delete_object(
self.account, container_name, object_name)
@translate_client_exception
def head_object(self, container_name, object_name):
return self.swift.get_object_metadata(
self.account, container_name, object_name)
@translate_client_exception
def get_object(self, container_name, object_name, query_string=None):
status, headers, resp_iter = self.swift.get_object(
self.account, container_name, object_name,
params=self.parse_qs(query_string))
return headers, b''.join(resp_iter)
@six.add_metaclass(meta_command)
class BrainSplitter(BaseBrain):
def __init__(self, url, token, container_name='test', object_name='test',
server_type='container', policy=None):
self.client = PublicBrainClient(url, token)
self._setup(self.client.account, container_name, object_name,
server_type, policy)
@six.add_metaclass(meta_command)
class InternalBrainSplitter(BaseBrain):
def __init__(self, conf, container_name='test', object_name='test',
server_type='container', policy=None):
self.client = InternalBrainClient(conf)
self._setup(self.client.account, container_name, object_name,
server_type, policy)
parser = OptionParser('%prog [options] '
'<command>[:<args>[,<args>...]] [<command>...]')
parser.usage += '\n\nCommands:\n\t' + \
'\n\t'.join("%s - %s" % (name, doc) for name, doc in
BrainSplitter.__docs__.items())
parser.add_option('-c', '--container', default='container-%s' % uuid.uuid4(),
help='set container name')
parser.add_option('-o', '--object', default='object-%s' % uuid.uuid4(),
help='set object name')
parser.add_option('-s', '--server_type', default='container',
help='set server type')
parser.add_option('-P', '--policy_name', default=None,
help='set policy')
def main():
options, commands = parser.parse_args()
if not commands:
parser.print_help()
return 'ERROR: must specify at least one command'
for cmd_args in commands:
cmd = cmd_args.split(':', 1)[0]
if cmd not in BrainSplitter.__commands__:
parser.print_help()
return 'ERROR: unknown command %s' % cmd
url, token = get_auth(PROXY_BASE_URL + '/auth/v1.0',
'test:tester', 'testing')
if options.server_type == 'object' and not options.policy_name:
options.policy_name = POLICIES.default.name
if options.policy_name:
options.server_type = 'object'
policy = POLICIES.get_by_name(options.policy_name)
if not policy:
return 'ERROR: unknown policy %r' % options.policy_name
else:
policy = None
brain = BrainSplitter(url, token, options.container, options.object,
options.server_type, policy=policy)
for cmd_args in commands:
parts = cmd_args.split(':', 1)
command = parts[0]
if len(parts) > 1:
args = utils.list_from_csv(parts[1])
else:
args = ()
try:
brain.run(command, *args)
except ClientException as e:
print('**WARNING**: %s raised %s' % (command, e))
print('STATUS'.join(['*' * 25] * 2))
brain.servers.status()
sys.exit()
if __name__ == "__main__":
sys.exit(main())
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class Access(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Indicates whether the traffic is allowed or denied.
"""
ALLOW = "Allow"
DENY = "Deny"
class ApplicationGatewayBackendHealthServerHealth(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Health of backend server. Possible values are: 'Unknown', 'Up', 'Down', and 'Partial'.
"""
UNKNOWN = "Unknown"
UP = "Up"
DOWN = "Down"
PARTIAL = "Partial"
class ApplicationGatewayCookieBasedAffinity(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Cookie based affinity. Possible values are: 'Enabled' and 'Disabled'.
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class ApplicationGatewayFirewallMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Web application firewall mode. Possible values are: 'Detection' and 'Prevention'.
"""
DETECTION = "Detection"
PREVENTION = "Prevention"
class ApplicationGatewayOperationalState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Operational state of the application gateway resource. Possible values are: 'Stopped',
'Started', 'Running', and 'Stopping'.
"""
STOPPED = "Stopped"
STARTING = "Starting"
RUNNING = "Running"
STOPPING = "Stopping"
class ApplicationGatewayProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Protocol. Possible values are: 'Http' and 'Https'.
"""
HTTP = "Http"
HTTPS = "Https"
class ApplicationGatewayRequestRoutingRuleType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Rule type. Possible values are: 'Basic' and 'PathBasedRouting'.
"""
BASIC = "Basic"
PATH_BASED_ROUTING = "PathBasedRouting"
class ApplicationGatewaySkuName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Name of an application gateway SKU. Possible values are: 'Standard_Small', 'Standard_Medium',
'Standard_Large', 'WAF_Medium', and 'WAF_Large'.
"""
STANDARD_SMALL = "Standard_Small"
STANDARD_MEDIUM = "Standard_Medium"
STANDARD_LARGE = "Standard_Large"
WAF_MEDIUM = "WAF_Medium"
WAF_LARGE = "WAF_Large"
class ApplicationGatewaySslProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
TL_SV1_0 = "TLSv1_0"
TL_SV1_1 = "TLSv1_1"
TL_SV1_2 = "TLSv1_2"
class ApplicationGatewayTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Tier of an application gateway. Possible values are: 'Standard' and 'WAF'.
"""
STANDARD = "Standard"
WAF = "WAF"
class AssociationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The association type of the child resource to the parent resource.
"""
ASSOCIATED = "Associated"
CONTAINS = "Contains"
class AuthorizationUseStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""AuthorizationUseStatus. Possible values are: 'Available' and 'InUse'.
"""
AVAILABLE = "Available"
IN_USE = "InUse"
class BgpPeerState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The BGP peer state
"""
UNKNOWN = "Unknown"
STOPPED = "Stopped"
IDLE = "Idle"
CONNECTING = "Connecting"
CONNECTED = "Connected"
class Direction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The direction of the packet represented as a 5-tuple.
"""
INBOUND = "Inbound"
OUTBOUND = "Outbound"
class EffectiveRouteSource(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Who created the route. Possible values are: 'Unknown', 'User', 'VirtualNetworkGateway', and
'Default'.
"""
UNKNOWN = "Unknown"
USER = "User"
VIRTUAL_NETWORK_GATEWAY = "VirtualNetworkGateway"
DEFAULT = "Default"
class EffectiveRouteState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The value of effective route. Possible values are: 'Active' and 'Invalid'.
"""
ACTIVE = "Active"
INVALID = "Invalid"
class ExpressRouteCircuitPeeringAdvertisedPublicPrefixState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""AdvertisedPublicPrefixState of the Peering resource. Possible values are 'NotConfigured',
'Configuring', 'Configured', and 'ValidationNeeded'.
"""
NOT_CONFIGURED = "NotConfigured"
CONFIGURING = "Configuring"
CONFIGURED = "Configured"
VALIDATION_NEEDED = "ValidationNeeded"
class ExpressRouteCircuitPeeringState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The state of peering. Possible values are: 'Disabled' and 'Enabled'
"""
DISABLED = "Disabled"
ENABLED = "Enabled"
class ExpressRouteCircuitPeeringType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The PeeringType. Possible values are: 'AzurePublicPeering', 'AzurePrivatePeering', and
'MicrosoftPeering'.
"""
AZURE_PUBLIC_PEERING = "AzurePublicPeering"
AZURE_PRIVATE_PEERING = "AzurePrivatePeering"
MICROSOFT_PEERING = "MicrosoftPeering"
class ExpressRouteCircuitSkuFamily(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The family of the SKU. Possible values are: 'UnlimitedData' and 'MeteredData'.
"""
UNLIMITED_DATA = "UnlimitedData"
METERED_DATA = "MeteredData"
class ExpressRouteCircuitSkuTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The tier of the SKU. Possible values are 'Standard' and 'Premium'.
"""
STANDARD = "Standard"
PREMIUM = "Premium"
class IPAllocationMethod(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""PrivateIP allocation method. Possible values are: 'Static' and 'Dynamic'.
"""
STATIC = "Static"
DYNAMIC = "Dynamic"
class IPVersion(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Available from Api-Version 2016-03-30 onwards, it represents whether the specific
ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and
'IPv6'.
"""
I_PV4 = "IPv4"
I_PV6 = "IPv6"
class LoadDistribution(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The load distribution policy for this rule. Possible values are 'Default', 'SourceIP', and
'SourceIPProtocol'.
"""
DEFAULT = "Default"
SOURCE_IP = "SourceIP"
SOURCE_IP_PROTOCOL = "SourceIPProtocol"
class NetworkOperationStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Status of the Azure async operation. Possible values are: 'InProgress', 'Succeeded', and
'Failed'.
"""
IN_PROGRESS = "InProgress"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
class NextHopType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Next hop type.
"""
INTERNET = "Internet"
VIRTUAL_APPLIANCE = "VirtualAppliance"
VIRTUAL_NETWORK_GATEWAY = "VirtualNetworkGateway"
VNET_LOCAL = "VnetLocal"
HYPER_NET_GATEWAY = "HyperNetGateway"
NONE = "None"
class PcError(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
INTERNAL_ERROR = "InternalError"
AGENT_STOPPED = "AgentStopped"
CAPTURE_FAILED = "CaptureFailed"
LOCAL_FILE_FAILED = "LocalFileFailed"
STORAGE_FAILED = "StorageFailed"
class PcProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Protocol to be filtered on.
"""
TCP = "TCP"
UDP = "UDP"
ANY = "Any"
class PcStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The status of the packet capture session.
"""
NOT_STARTED = "NotStarted"
RUNNING = "Running"
STOPPED = "Stopped"
ERROR = "Error"
UNKNOWN = "Unknown"
class ProbeProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The protocol of the end point. Possible values are: 'Http' or 'Tcp'. If 'Tcp' is specified, a
received ACK is required for the probe to be successful. If 'Http' is specified, a 200 OK
response from the specifies URI is required for the probe to be successful.
"""
HTTP = "Http"
TCP = "Tcp"
class ProcessorArchitecture(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""VPN client Processor Architecture. Possible values are: 'AMD64' and 'X86'.
"""
AMD64 = "Amd64"
X86 = "X86"
class Protocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Protocol to be verified on.
"""
TCP = "TCP"
UDP = "UDP"
class ProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The provisioning state of the resource.
"""
SUCCEEDED = "Succeeded"
UPDATING = "Updating"
DELETING = "Deleting"
FAILED = "Failed"
class RouteNextHopType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of Azure hop the packet should be sent to. Possible values are:
'VirtualNetworkGateway', 'VnetLocal', 'Internet', 'VirtualAppliance', and 'None'.
"""
VIRTUAL_NETWORK_GATEWAY = "VirtualNetworkGateway"
VNET_LOCAL = "VnetLocal"
INTERNET = "Internet"
VIRTUAL_APPLIANCE = "VirtualAppliance"
NONE = "None"
class SecurityRuleAccess(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Whether network traffic is allowed or denied. Possible values are: 'Allow' and 'Deny'.
"""
ALLOW = "Allow"
DENY = "Deny"
class SecurityRuleDirection(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The direction of the rule. Possible values are: 'Inbound and Outbound'.
"""
INBOUND = "Inbound"
OUTBOUND = "Outbound"
class SecurityRuleProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The network protocol this rule applies to. Possible values are: 'Tcp', 'Udp', and '*'.
"""
TCP = "Tcp"
UDP = "Udp"
ASTERISK = "*"
class ServiceProviderProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The ServiceProviderProvisioningState state of the resource. Possible values are
'NotProvisioned', 'Provisioning', 'Provisioned', and 'Deprovisioning'.
"""
NOT_PROVISIONED = "NotProvisioned"
PROVISIONING = "Provisioning"
PROVISIONED = "Provisioned"
DEPROVISIONING = "Deprovisioning"
class TransportProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The transport protocol for the external endpoint. Possible values are 'Udp' or 'Tcp'
"""
UDP = "Udp"
TCP = "Tcp"
class UsageUnit(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""An enum describing the unit of measurement.
"""
COUNT = "Count"
class VirtualNetworkGatewayConnectionStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Virtual network Gateway connection status
"""
UNKNOWN = "Unknown"
CONNECTING = "Connecting"
CONNECTED = "Connected"
NOT_CONNECTED = "NotConnected"
class VirtualNetworkGatewayConnectionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Gateway connection type. Possible values are: 'IPsec','Vnet2Vnet','ExpressRoute', and
'VPNClient.
"""
I_PSEC = "IPsec"
VNET2_VNET = "Vnet2Vnet"
EXPRESS_ROUTE = "ExpressRoute"
VPN_CLIENT = "VPNClient"
class VirtualNetworkGatewaySkuName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Gateway SKU name. Possible values are: 'Basic', 'HighPerformance','Standard', and
'UltraPerformance'.
"""
BASIC = "Basic"
HIGH_PERFORMANCE = "HighPerformance"
STANDARD = "Standard"
ULTRA_PERFORMANCE = "UltraPerformance"
class VirtualNetworkGatewaySkuTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Gateway SKU tier. Possible values are: 'Basic', 'HighPerformance','Standard', and
'UltraPerformance'.
"""
BASIC = "Basic"
HIGH_PERFORMANCE = "HighPerformance"
STANDARD = "Standard"
ULTRA_PERFORMANCE = "UltraPerformance"
class VirtualNetworkGatewayType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of this virtual network gateway. Possible values are: 'Vpn' and 'ExpressRoute'.
"""
VPN = "Vpn"
EXPRESS_ROUTE = "ExpressRoute"
class VirtualNetworkPeeringState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The status of the virtual network peering. Possible values are 'Initiated', 'Connected', and
'Disconnected'.
"""
INITIATED = "Initiated"
CONNECTED = "Connected"
DISCONNECTED = "Disconnected"
class VpnType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of this virtual network gateway. Possible values are: 'PolicyBased' and 'RouteBased'.
"""
POLICY_BASED = "PolicyBased"
ROUTE_BASED = "RouteBased"
|
|
"""Search (Chapters 3-4)
The way to use this code is to subclass Problem to create a class of problems,
then create problem instances and solve them with calls to the various search
functions."""
from __future__ import generators
from utils import *
import agents
import math, random, sys, time, bisect, string
#______________________________________________________________________________
class Problem:
"""The abstract class for a formal problem. You should subclass this and
implement the method successor, and possibly __init__, goal_test, and
path_cost. Then you will create instances of your subclass and solve them
with the various search functions."""
def __init__(self, initial, goal=None):
"""The constructor specifies the initial state, and possibly a goal
state, if there is a unique goal. Your subclass's constructor can add
other arguments."""
self.initial = initial; self.goal = goal
def successor(self, state):
"""Given a state, return a sequence of (action, state) pairs reachable
from this state. If there are many successors, consider an iterator
that yields the successors one at a time, rather than building them
all at once. Iterators will work fine within the framework."""
abstract
def goal_test(self, state):
"""Return True if the state is a goal. The default method compares the
state to self.goal, as specified in the constructor. Implement this
method if checking against a single self.goal is not enough."""
return state == self.goal
def path_cost(self, c, state1, action, state2):
"""Return the cost of a solution path that arrives at state2 from
state1 via action, assuming cost c to get up to state1. If the problem
is such that the path doesn't matter, this function will only look at
state2. If the path does matter, it will consider c and maybe state1
and action. The default method costs 1 for every step in the path."""
return c + 1
def value(self):
"""For optimization problems, each state has a value. Hill-climbing
and related algorithms try to maximize this value."""
abstract
#______________________________________________________________________________
class Node:
"""A node in a search tree. Contains a pointer to the parent (the node
that this is a successor of) and to the actual state for this node. Note
that if a state is arrived at by two paths, then there are two nodes with
the same state. Also includes the action that got us to this state, and
the total path_cost (also known as g) to reach the node. Other functions
may add an f and h value; see best_first_graph_search and astar_search for
an explanation of how the f and h values are handled. You will not need to
subclass this class."""
def __init__(self, state, parent=None, action=None, path_cost=0):
"Create a search tree Node, derived from a parent by an action."
update(self, state=state, parent=parent, action=action,
path_cost=path_cost, depth=0)
if parent:
self.depth = parent.depth + 1
def __repr__(self):
return "<Node %s>" % (self.state,)
def path(self):
"Create a list of nodes from the root to this node."
x, result = self, [self]
while x.parent:
result.append(x.parent)
x = x.parent
return result
def expand(self, problem):
"Return a list of nodes reachable from this node. [Fig. 3.8]"
return [Node(next, self, act,
problem.path_cost(self.path_cost, self.state, act, next))
for (act, next) in problem.successor(self.state)]
#______________________________________________________________________________
class SimpleProblemSolvingAgent(agents.Agent):
"""Abstract framework for problem-solving agent. [Fig. 3.1]"""
def __init__(self):
Agent.__init__(self)
state = []
seq = []
def program(percept):
state = self.update_state(state, percept)
if not seq:
goal = self.formulate_goal(state)
problem = self.formulate_problem(state, goal)
seq = self.search(problem)
action = seq[0]
seq[0:1] = []
return action
self.program = program
#______________________________________________________________________________
## Uninformed Search algorithms
def tree_search(problem, fringe):
"""Search through the successors of a problem to find a goal.
The argument fringe should be an empty queue.
Don't worry about repeated paths to a state. [Fig. 3.8]"""
fringe.append(Node(problem.initial))
while fringe:
node = fringe.pop()
if problem.goal_test(node.state):
return node
fringe.extend(node.expand(problem))
return None
def breadth_first_tree_search(problem):
"Search the shallowest nodes in the search tree first. [p 74]"
return tree_search(problem, FIFOQueue())
def depth_first_tree_search(problem):
"Search the deepest nodes in the search tree first. [p 74]"
return tree_search(problem, Stack())
def graph_search(problem, fringe):
"""Search through the successors of a problem to find a goal.
The argument fringe should be an empty queue.
If two paths reach a state, only use the best one. [Fig. 3.18]"""
closed = {}
fringe.append(Node(problem.initial))
while fringe:
node = fringe.pop()
if problem.goal_test(node.state):
return node
if node.state not in closed:
closed[node.state] = True
fringe.extend(node.expand(problem))
return None
def breadth_first_graph_search(problem):
"Search the shallowest nodes in the search tree first. [p 74]"
return graph_search(problem, FIFOQueue())
def depth_first_graph_search(problem):
"Search the deepest nodes in the search tree first. [p 74]"
return graph_search(problem, Stack())
def depth_limited_search(problem, limit=50):
"[Fig. 3.12]"
def recursive_dls(node, problem, limit):
cutoff_occurred = False
if problem.goal_test(node.state):
return node
elif node.depth == limit:
return 'cutoff'
else:
for successor in node.expand(problem):
result = recursive_dls(successor, problem, limit)
if result == 'cutoff':
cutoff_occurred = True
elif result != None:
return result
if cutoff_occurred:
return 'cutoff'
else:
return None
# Body of depth_limited_search:
return recursive_dls(Node(problem.initial), problem, limit)
def iterative_deepening_search(problem):
"[Fig. 3.13]"
for depth in xrange(sys.maxint):
result = depth_limited_search(problem, depth)
if result is not 'cutoff':
return result
#______________________________________________________________________________
# Informed (Heuristic) Search
def best_first_graph_search(problem, f):
"""Search the nodes with the lowest f scores first.
You specify the function f(node) that you want to minimize; for example,
if f is a heuristic estimate to the goal, then we have greedy best
first search; if f is node.depth then we have depth-first search.
There is a subtlety: the line "f = memoize(f, 'f')" means that the f
values will be cached on the nodes as they are computed. So after doing
a best first search you can examine the f values of the path returned."""
f = memoize(f, 'f')
return graph_search(problem, PriorityQueue(min, f))
greedy_best_first_graph_search = best_first_graph_search
# Greedy best-first search is accomplished by specifying f(n) = h(n).
def astar_search(problem, h=None):
"""A* search is best-first graph search with f(n) = g(n)+h(n).
You need to specify the h function when you call astar_search.
Uses the pathmax trick: f(n) = max(f(n), g(n)+h(n))."""
h = h or problem.h
def f(n):
return max(getattr(n, 'f', -infinity), n.path_cost + h(n))
return best_first_graph_search(problem, f)
#______________________________________________________________________________
## Other search algorithms
def recursive_best_first_search(problem):
"[Fig. 4.5]"
def RBFS(problem, node, flimit):
if problem.goal_test(node.state):
return node
successors = expand(node, problem)
if len(successors) == 0:
return None, infinity
for s in successors:
s.f = max(s.path_cost + s.h, node.f)
while True:
successors.sort(lambda x,y: x.f - y.f) # Order by lowest f value
best = successors[0]
if best.f > flimit:
return None, best.f
alternative = successors[1]
result, best.f = RBFS(problem, best, min(flimit, alternative))
if result is not None:
return result
return RBFS(Node(problem.initial), infinity)
def hill_climbing(problem):
"""From the initial node, keep choosing the neighbor with highest value,
stopping when no neighbor is better. [Fig. 4.11]"""
current = Node(problem.initial)
while True:
neighbor = argmax(expand(node, problem), Node.value)
if neighbor.value() <= current.value():
return current.state
current = neighbor
def exp_schedule(k=20, lam=0.005, limit=100):
"One possible schedule function for simulated annealing"
return lambda t: if_(t < limit, k * math.exp(-lam * t), 0)
def simulated_annealing(problem, schedule=exp_schedule()):
"[Fig. 4.5]"
current = Node(problem.initial)
for t in xrange(sys.maxint):
T = schedule(t)
if T == 0:
return current
next = random.choice(expand(node. problem))
delta_e = next.path_cost - current.path_cost
if delta_e > 0 or probability(math.exp(delta_e/T)):
current = next
def online_dfs_agent(a):
"[Fig. 4.12]"
pass #### more
def lrta_star_agent(a):
"[Fig. 4.12]"
pass #### more
#______________________________________________________________________________
# Genetic Algorithm
def genetic_search(problem, fitness_fn, ngen=1000, pmut=0.0, n=20):
"""Call genetic_algorithm on the appropriate parts of a problem.
This requires that the problem has a successor function that generates
reasonable states, and that it has a path_cost function that scores states.
We use the negative of the path_cost function, because costs are to be
minimized, while genetic-algorithm expects a fitness_fn to be maximized."""
states = [s for (a, s) in problem.successor(problem.initial_state)[:n]]
random.shuffle(states)
fitness_fn = lambda s: - problem.path_cost(0, s, None, s)
return genetic_algorithm(states, fitness_fn, ngen, pmut)
def genetic_algorithm(population, fitness_fn, ngen=1000, pmut=0.0):
"""[Fig. 4.7]"""
def reproduce(p1, p2):
c = random.randrange(len(p1))
return p1[:c] + p2[c:]
for i in range(ngen):
new_population = []
for i in len(population):
p1, p2 = random_weighted_selections(population, 2, fitness_fn)
child = reproduce(p1, p2)
if random.uniform(0,1) > pmut:
child.mutate()
new_population.append(child)
population = new_population
return argmax(population, fitness_fn)
def random_weighted_selection(seq, n, weight_fn):
"""Pick n elements of seq, weighted according to weight_fn.
That is, apply weight_fn to each element of seq, add up the total.
Then choose an element e with probability weight[e]/total.
Repeat n times, with replacement. """
totals = []; runningtotal = 0
for item in seq:
runningtotal += weight_fn(item)
totals.append(runningtotal)
selections = []
for s in range(n):
r = random.uniform(0, totals[-1])
for i in range(len(seq)):
if totals[i] > r:
selections.append(seq[i])
break
return selections
#_____________________________________________________________________________
# The remainder of this file implements examples for the search algorithms.
#______________________________________________________________________________
# Graphs and Graph Problems
class Graph:
"""A graph connects nodes (verticies) by edges (links). Each edge can also
have a length associated with it. The constructor call is something like:
g = Graph({'A': {'B': 1, 'C': 2})
this makes a graph with 3 nodes, A, B, and C, with an edge of length 1 from
A to B, and an edge of length 2 from A to C. You can also do:
g = Graph({'A': {'B': 1, 'C': 2}, directed=False)
This makes an undirected graph, so inverse links are also added. The graph
stays undirected; if you add more links with g.connect('B', 'C', 3), then
inverse link is also added. You can use g.nodes() to get a list of nodes,
g.get('A') to get a dict of links out of A, and g.get('A', 'B') to get the
length of the link from A to B. 'Lengths' can actually be any object at
all, and nodes can be any hashable object."""
def __init__(self, dict=None, directed=True):
self.dict = dict or {}
self.directed = directed
if not directed: self.make_undirected()
def make_undirected(self):
"Make a digraph into an undirected graph by adding symmetric edges."
for a in self.dict.keys():
for (b, distance) in self.dict[a].items():
self.connect1(b, a, distance)
def connect(self, A, B, distance=1):
"""Add a link from A and B of given distance, and also add the inverse
link if the graph is undirected."""
self.connect1(A, B, distance)
if not self.directed: self.connect1(B, A, distance)
def connect1(self, A, B, distance):
"Add a link from A to B of given distance, in one direction only."
self.dict.setdefault(A,{})[B] = distance
def get(self, a, b=None):
"""Return a link distance or a dict of {node: distance} entries.
.get(a,b) returns the distance or None;
.get(a) returns a dict of {node: distance} entries, possibly {}."""
links = self.dict.setdefault(a, {})
if b is None: return links
else: return links.get(b)
def nodes(self):
"Return a list of nodes in the graph."
return self.dict.keys()
def UndirectedGraph(dict=None):
"Build a Graph where every edge (including future ones) goes both ways."
return Graph(dict=dict, directed=False)
def RandomGraph(nodes=range(10), min_links=2, width=400, height=300,
curvature=lambda: random.uniform(1.1, 1.5)):
"""Construct a random graph, with the specified nodes, and random links.
The nodes are laid out randomly on a (width x height) rectangle.
Then each node is connected to the min_links nearest neighbors.
Because inverse links are added, some nodes will have more connections.
The distance between nodes is the hypotenuse times curvature(),
where curvature() defaults to a random number between 1.1 and 1.5."""
g = UndirectedGraph()
g.locations = {}
## Build the cities
for node in nodes:
g.locations[node] = (random.randrange(width), random.randrange(height))
## Build roads from each city to at least min_links nearest neighbors.
for i in range(min_links):
for node in nodes:
if len(g.get(node)) < min_links:
here = g.locations[node]
def distance_to_node(n):
if n is node or g.get(node,n): return infinity
return distance(g.locations[n], here)
neighbor = argmin(nodes, distance_to_node)
d = distance(g.locations[neighbor], here) * curvature()
g.connect(node, neighbor, int(d))
return g
romania = UndirectedGraph(Dict(
A=Dict(Z=75, S=140, T=118),
B=Dict(U=85, P=101, G=90, F=211),
C=Dict(D=120, R=146, P=138),
D=Dict(M=75),
E=Dict(H=86),
F=Dict(S=99),
H=Dict(U=98),
I=Dict(V=92, N=87),
L=Dict(T=111, M=70),
O=Dict(Z=71, S=151),
P=Dict(R=97),
R=Dict(S=80),
U=Dict(V=142)))
romania.locations = Dict(
A=( 91, 492), B=(400, 327), C=(253, 288), D=(165, 299),
E=(562, 293), F=(305, 449), G=(375, 270), H=(534, 350),
I=(473, 506), L=(165, 379), M=(168, 339), N=(406, 537),
O=(131, 571), P=(320, 368), R=(233, 410), S=(207, 457),
T=( 94, 410), U=(456, 350), V=(509, 444), Z=(108, 531))
australia = UndirectedGraph(Dict(
T=Dict(),
SA=Dict(WA=1, NT=1, Q=1, NSW=1, V=1),
NT=Dict(WA=1, Q=1),
NSW=Dict(Q=1, V=1)))
australia.locations = Dict(WA=(120, 24), NT=(135, 20), SA=(135, 30),
Q=(145, 20), NSW=(145, 32), T=(145, 42), V=(145, 37))
class GraphProblem(Problem):
"The problem of searching a graph from one node to another."
def __init__(self, initial, goal, graph):
Problem.__init__(self, initial, goal)
self.graph = graph
def successor(self, A):
"Return a list of (action, result) pairs."
return [(B, B) for B in self.graph.get(A).keys()]
def path_cost(self, cost_so_far, A, action, B):
return cost_so_far + (self.graph.get(A,B) or infinity)
def h(self, node):
"h function is straight-line distance from a node's state to goal."
locs = getattr(self.graph, 'locations', None)
if locs:
return int(distance(locs[node.state], locs[self.goal]))
else:
return infinity
#______________________________________________________________________________
#### NOTE: NQueensProblem not working properly yet.
class NQueensProblem(Problem):
"""The problem of placing N queens on an NxN board with none attacking
each other. A state is represented as an N-element array, where the
a value of r in the c-th entry means there is a queen at column c,
row r, and a value of None means that the c-th column has not been
filled in left. We fill in columns left to right."""
def __init__(self, N):
self.N = N
self.initial = [None] * N
def successor(self, state):
"In the leftmost empty column, try all non-conflicting rows."
if state[-1] is not None:
return [] ## All columns filled; no successors
else:
def place(col, row):
new = state[:]
new[col] = row
return new
col = state.index(None)
return [(row, place(col, row)) for row in range(self.N)
if not self.conflicted(state, row, col)]
def conflicted(self, state, row, col):
"Would placing a queen at (row, col) conflict with anything?"
for c in range(col-1):
if self.conflict(row, col, state[c], c):
return True
return False
def conflict(self, row1, col1, row2, col2):
"Would putting two queens in (row1, col1) and (row2, col2) conflict?"
return (row1 == row2 ## same row
or col1 == col2 ## same column
or row1-col1 == row2-col2 ## same \ diagonal
or row1+col1 == row2+col2) ## same / diagonal
def goal_test(self, state):
"Check if all columns filled, no conflicts."
if state[-1] is None:
return False
for c in range(len(state)):
if self.conflicted(state, state[c], c):
return False
return True
#______________________________________________________________________________
## Inverse Boggle: Search for a high-scoring Boggle board. A good domain for
## iterative-repair and related search tehniques, as suggested by Justin Boyan.
ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
cubes16 = ['FORIXB', 'MOQABJ', 'GURILW', 'SETUPL',
'CMPDAE', 'ACITAO', 'SLCRAE', 'ROMASH',
'NODESW', 'HEFIYE', 'ONUDTK', 'TEVIGN',
'ANEDVZ', 'PINESH', 'ABILYT', 'GKYLEU']
def random_boggle(n=4):
"""Return a random Boggle board of size n x n.
We represent a board as a linear list of letters."""
cubes = [cubes16[i % 16] for i in range(n*n)]
random.shuffle(cubes)
return map(random.choice, cubes)
## The best 5x5 board found by Boyan, with our word list this board scores
## 2274 words, for a score of 9837
boyan_best = list('RSTCSDEIAEGNLRPEATESMSSID')
def print_boggle(board):
"Print the board in a 2-d array."
n2 = len(board); n = exact_sqrt(n2)
for i in range(n2):
if i % n == 0: print
if board[i] == 'Q': print 'Qu',
else: print str(board[i]) + ' ',
print
def boggle_neighbors(n2, cache={}):
""""Return a list of lists, where the i-th element is the list of indexes
for the neighbors of square i."""
if cache.get(n2):
return cache.get(n2)
n = exact_sqrt(n2)
neighbors = [None] * n2
for i in range(n2):
neighbors[i] = []
on_top = i < n
on_bottom = i >= n2 - n
on_left = i % n == 0
on_right = (i+1) % n == 0
if not on_top:
neighbors[i].append(i - n)
if not on_left: neighbors[i].append(i - n - 1)
if not on_right: neighbors[i].append(i - n + 1)
if not on_bottom:
neighbors[i].append(i + n)
if not on_left: neighbors[i].append(i + n - 1)
if not on_right: neighbors[i].append(i + n + 1)
if not on_left: neighbors[i].append(i - 1)
if not on_right: neighbors[i].append(i + 1)
cache[n2] = neighbors
return neighbors
def exact_sqrt(n2):
"If n2 is a perfect square, return its square root, else raise error."
n = int(math.sqrt(n2))
assert n * n == n2
return n
##_____________________________________________________________________________
class Wordlist:
"""This class holds a list of words. You can use (word in wordlist)
to check if a word is in the list, or wordlist.lookup(prefix)
to see if prefix starts any of the words in the list."""
def __init__(self, filename, min_len=3):
lines = open(filename).read().upper().split()
self.words = [word for word in lines if len(word) >= min_len]
self.words.sort()
self.bounds = {}
for c in ALPHABET:
c2 = chr(ord(c) + 1)
self.bounds[c] = (bisect.bisect(self.words, c),
bisect.bisect(self.words, c2))
def lookup(self, prefix, lo=0, hi=None):
"""See if prefix is in dictionary, as a full word or as a prefix.
Return two values: the first is the lowest i such that
words[i].startswith(prefix), or is None; the second is
True iff prefix itself is in the Wordlist."""
words = self.words
i = bisect.bisect_left(words, prefix, lo, hi)
if i < len(words) and words[i].startswith(prefix):
return i, (words[i] == prefix)
else:
return None, False
def __contains__(self, word):
return self.words[bisect.bisect_left(self.words, word)] == word
def __len__(self):
return len(self.words)
##_____________________________________________________________________________
class BoggleFinder:
"""A class that allows you to find all the words in a Boggle board. """
wordlist = None ## A class variable, holding a wordlist
def __init__(self, board=None):
if BoggleFinder.wordlist is None:
BoggleFinder.wordlist = Wordlist("../data/wordlist")
self.found = {}
if board:
self.set_board(board)
def set_board(self, board=None):
"Set the board, and find all the words in it."
if board is None:
board = random_boggle()
self.board = board
self.neighbors = boggle_neighbors(len(board))
self.found = {}
for i in range(len(board)):
lo, hi = self.wordlist.bounds[board[i]]
self.find(lo, hi, i, [], '')
return self
def find(self, lo, hi, i, visited, prefix):
"""Looking in square i, find the words that continue the prefix,
considering the entries in self.wordlist.words[lo:hi], and not
revisiting the squares in visited."""
if i in visited:
return
wordpos, is_word = self.wordlist.lookup(prefix, lo, hi)
if wordpos is not None:
if is_word:
self.found[prefix] = True
visited.append(i)
c = self.board[i]
if c == 'Q': c = 'QU'
prefix += c
for j in self.neighbors[i]:
self.find(wordpos, hi, j, visited, prefix)
visited.pop()
def words(self):
"The words found."
return self.found.keys()
scores = [0, 0, 0, 0, 1, 2, 3, 5] + [11] * 100
def score(self):
"The total score for the words found, according to the rules."
return sum([self.scores[len(w)] for w in self.words()])
def __len__(self):
"The number of words found."
return len(self.found)
##_____________________________________________________________________________
def boggle_hill_climbing(board=None, ntimes=100, print_it=True):
"""Solve inverse Boggle by hill-climbing: find a high-scoring board by
starting with a random one and changing it."""
finder = BoggleFinder()
if board is None:
board = random_boggle()
best = len(finder.set_board(board))
for _ in range(ntimes):
i, oldc = mutate_boggle(board)
new = len(finder.set_board(board))
if new > best:
best = new
print best, _, board
else:
board[i] = oldc ## Change back
if print_it:
print_boggle(board)
return board, best
def mutate_boggle(board):
i = random.randrange(len(board))
oldc = board[i]
board[i] = random.choice(random.choice(cubes16)) ##random.choice(boyan_best)
return i, oldc
#______________________________________________________________________________
## Code to compare searchers on various problems.
class InstrumentedProblem(Problem):
"""Delegates to a problem, and keeps statistics."""
def __init__(self, problem):
self.problem = problem
self.succs = self.goal_tests = self.states = 0
self.found = None
def successor(self, state):
"Return a list of (action, state) pairs reachable from this state."
result = self.problem.successor(state)
self.succs += 1; self.states += len(result)
return result
def goal_test(self, state):
"Return true if the state is a goal."
self.goal_tests += 1
result = self.problem.goal_test(state)
if result:
self.found = state
return result
def __getattr__(self, attr):
if attr in ('succs', 'goal_tests', 'states'):
return self.__dict__[attr]
else:
return getattr(self.problem, attr)
def __repr__(self):
return '<%4d/%4d/%4d/%s>' % (self.succs, self.goal_tests,
self.states, str(self.found)[0:4])
def compare_searchers(problems, header, searchers=[breadth_first_tree_search,
breadth_first_graph_search, depth_first_graph_search,
iterative_deepening_search, depth_limited_search,
astar_search]):
def do(searcher, problem):
p = InstrumentedProblem(problem)
searcher(p)
return p
table = [[name(s)] + [do(s, p) for p in problems] for s in searchers]
print_table(table, header)
def compare_graph_searchers():
compare_searchers(problems=[GraphProblem('A', 'B', romania),
GraphProblem('O', 'N', romania),
GraphProblem('Q', 'WA', australia)],
header=['Searcher', 'Romania(A,B)', 'Romania(O, N)', 'Australia'])
|
|
# -*- coding: utf-8 -*-
#
"""Simple HTTP Server.
Supports browser cache and HTTP compression."""
import os
import sys
import datetime
import io
import email
import urllib.parse
from http import HTTPStatus
import http.cookiejar
import http.server as server
from http.server import SimpleHTTPRequestHandler
# Python might be built without gzip / zlib
try:
import zlib
except ImportError:
zlib = None
# List of commonly compressed content types, copied from
# https://github.com/h5bp/server-configs-apache.
# compressed_types is set to this list when the server is started with
# command line option --gzip.
commonly_compressed_types = [ "application/atom+xml",
"application/javascript",
"application/json",
"application/ld+json",
"application/manifest+json",
"application/rdf+xml",
"application/rss+xml",
"application/schema+json",
"application/vnd.geo+json",
"application/vnd.ms-fontobject",
"application/x-font-ttf",
"application/x-javascript",
"application/x-web-app-manifest+json",
"application/xhtml+xml",
"application/xml",
"font/eot",
"font/opentype",
"image/bmp",
"image/svg+xml",
"image/vnd.microsoft.icon",
"image/x-icon",
"text/cache-manifest",
"text/css",
"text/html",
"text/javascript",
"text/plain",
"text/vcard",
"text/vnd.rim.location.xloc",
"text/vtt",
"text/x-component",
"text/x-cross-domain-policy",
"text/xml"
]
# Generators for HTTP compression
# Generators for HTTP compression
def _zlib_producer(fileobj, wbits):
"""Generator that yields data read from the file object fileobj,
compressed with the zlib library.
wbits is the same argument as for zlib.compressobj.
"""
bufsize = 2 << 17
producer = zlib.compressobj(wbits=wbits)
with fileobj:
while True:
buf = fileobj.read(bufsize)
if not buf: # end of file
yield producer.flush()
return
yield producer.compress(buf)
def _gzip_producer(fileobj):
"""Generator for gzip compression."""
return _zlib_producer(fileobj, 25)
def _deflate_producer(fileobj):
"""Generator for deflage compression."""
return _zlib_producer(fileobj, 15)
class RequestHandler(SimpleHTTPRequestHandler):
# List of Content Types that are returned with HTTP compression.
# Set to the empty list by default (no compression).
compressed_types = []
# Dictionary mapping an encoding (in an Accept-Encoding header) to a
# generator of compressed data. By default, provided zlib is available,
# the supported encodings are gzip and deflate.
# Override if a subclass wants to use other compression algorithms.
compressions = {}
if zlib:
compressions = {
'deflate': _deflate_producer,
'gzip': _gzip_producer,
'x-gzip': _gzip_producer # alias for gzip
}
def _make_chunk(self, data):
"""Produces a data chunk for Chunked Transfer Encoding."""
return ("{:X}".format(len(data)).encode("ascii") + b"\r\n" + data
+ b"\r\n")
def do_GET(self):
"""Serve a GET request."""
f = self.send_head()
if f:
try:
if hasattr(f, "read"):
self.copyfile(f, self.wfile)
else:
# Generator for compressed data
if self.protocol_version >= "HTTP/1.1":
# Chunked Transfer
for data in f:
if data:
self.wfile.write(self._make_chunk(data))
self.wfile.write(self._make_chunk(b''))
else:
for data in f:
self.wfile.write(data)
finally:
f.close()
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
parts = urllib.parse.urlsplit(self.path)
if not parts.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(HTTPStatus.MOVED_PERMANENTLY)
new_parts = (parts[0], parts[1], parts[2] + '/',
parts[3], parts[4])
new_url = urllib.parse.urlunsplit(new_parts)
self.send_header("Location", new_url)
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
f = open(path, 'rb')
except OSError:
self.send_error(HTTPStatus.NOT_FOUND, "File not found")
return None
try:
fs = os.fstat(f.fileno())
content_length = fs[6]
# Use browser cache if possible
if ("If-Modified-Since" in self.headers
and "If-None-Match" not in self.headers):
# compare If-Modified-Since and time of last file modification
try:
ims = email.utils.parsedate_to_datetime(
self.headers["If-Modified-Since"])
except (TypeError, IndexError, OverflowError, ValueError):
# ignore ill-formed values
pass
else:
if ims.tzinfo is None:
# obsolete format with no timezone, cf.
# https://tools.ietf.org/html/rfc7231#section-7.1.1.1
ims = ims.replace(tzinfo=datetime.timezone.utc)
if ims.tzinfo is datetime.timezone.utc:
# compare to UTC datetime of last modification
last_modif = datetime.datetime.fromtimestamp(
fs.st_mtime, datetime.timezone.utc)
# remove microseconds, like in If-Modified-Since
last_modif = last_modif.replace(microsecond=0)
if last_modif <= ims:
self.send_response(HTTPStatus.NOT_MODIFIED)
self.end_headers()
f.close()
return None
self.send_response(HTTPStatus.OK)
self.send_header("Content-type", ctype)
self.send_header("Last-Modified",
self.date_time_string(fs.st_mtime))
# Use HTTP compression if possible
if ctype not in self.compressed_types:
self.send_header("Content-Length", str(content_length))
self.end_headers()
return f
# Get accepted encodings ; "encodings" is a dictionary mapping
# encodings to their quality ; eg for header "gzip; q=0.8",
# encodings["gzip"] is set to 0.8
accept_encoding = self.headers.get_all("Accept-Encoding", ())
encodings = {}
for accept in http.cookiejar.split_header_words(accept_encoding):
params = iter(accept)
encoding = next(params, ("", ""))[0]
quality, value = next(params, ("", ""))
if quality == "q" and value:
try:
q = float(value)
except ValueError:
# Invalid quality : ignore encoding
q = 0
else:
q = 1 # quality defaults to 1
if q:
encodings[encoding] = max(encodings.get(encoding, 0), q)
compressions = set(encodings).intersection(self.compressions)
compression = None
if compressions:
# Take the encoding with highest quality
compression = max((encodings[enc], enc)
for enc in compressions)[1]
elif '*' in encodings and self.compressions:
# If no specified encoding is supported but "*" is accepted,
# take one of the available compressions.
compression = list(self.compressions)[0]
if compression:
# If at least one encoding is accepted, send data compressed
# with the selected compression algorithm.
producer = self.compressions[compression]
self.send_header("Content-Encoding", compression)
if content_length < 2 << 18:
# For small files, load content in memory
with f:
content = b''.join(producer(f))
content_length = len(content)
f = io.BytesIO(content)
else:
chunked = self.protocol_version >= "HTTP/1.1"
if chunked:
# Use Chunked Transfer Encoding (RFC 7230 section 4.1)
self.send_header("Transfer-Encoding", "chunked")
self.end_headers()
# Return a generator of pieces of compressed data
return producer(f)
self.send_header("Content-Length", str(content_length))
self.end_headers()
return f
except:
f.close()
raise
def run(port=8080):
server_address, handler = ('', port), RequestHandler
handler.compressed_types = commonly_compressed_types
httpd = server.HTTPServer(server_address, handler)
print(("Server running on port http://localhost:{}.".format(port)))
print("Press CTRL+C to Quit.")
httpd.serve_forever()
if __name__ == "__main__":
run()
|
|
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nose_parameterized import parameterized
from six.moves import range
import pandas as pd
from zipline.errors import BadOrderParameters
from zipline.finance.execution import (
LimitOrder,
MarketOrder,
StopLimitOrder,
StopOrder,
)
from zipline.testing.fixtures import (
WithLogger,
ZiplineTestCase,
WithConstantFutureMinuteBarData
)
from zipline.testing.predicates import assert_equal
class ExecutionStyleTestCase(WithConstantFutureMinuteBarData,
WithLogger,
ZiplineTestCase):
"""
Tests for zipline ExecutionStyle classes.
"""
class ArbitraryObject():
def __str__(self):
return """This should yield a bad order error when
passed as a stop or limit price."""
epsilon = .000001
INVALID_PRICES = [
(-1,),
(-1.0,),
(0 - epsilon,),
(float('nan'),),
(float('inf'),),
(ArbitraryObject(),),
]
# Input, expected on limit buy/stop sell, expected on limit sell/stop buy.
EXPECTED_PRICE_ROUNDING = [
(0.00, 0.00, 0.00),
(0.0005, 0.00, 0.00),
(1.0005, 1.00, 1.00), # Lowest value to round down on sell.
(1.0005 + epsilon, 1.00, 1.01),
(1.0095 - epsilon, 1.0, 1.01),
(1.0095, 1.01, 1.01), # Highest value to round up on buy.
(0.01, 0.01, 0.01)
]
# Testing for an asset with a tick_size of 0.0001
smaller_epsilon = 0.00000001
EXPECTED_PRECISION_ROUNDING = [
(0.00, 0.00, 0.00),
(0.0005, 0.0005, 0.0005),
(0.00005, 0.00, 0.0001),
(0.000005, 0.00, 0.00),
(1.000005, 1.00, 1.00), # Lowest value to round down on sell.
(1.000005 + smaller_epsilon, 1.00, 1.0001),
(1.000095 - smaller_epsilon, 1.0, 1.0001),
(1.000095, 1.0001, 1.0001), # Highest value to round up on buy.
(0.01, 0.01, 0.01)
]
# Testing for an asset with a tick_size of 0.05
EXPECTED_CUSTOM_TICK_SIZE_ROUNDING = [
(0.00, 0.00, 0.00),
(0.0005, 0.00, 0.00),
(1.0025, 1.00, 1.00), # Lowest value to round down on sell.
(1.0025 + epsilon, 1.00, 1.05),
(1.0475 - epsilon, 1.0, 1.05),
(1.0475, 1.05, 1.05), # Highest value to round up on buy.
(0.05, 0.05, 0.05)
]
# Test that the same rounding behavior is maintained if we add between 1
# and 10 to all values, because floating point math is made of lies.
EXPECTED_PRICE_ROUNDING += [
(x + delta, y + delta, z + delta)
for (x, y, z) in EXPECTED_PRICE_ROUNDING
for delta in range(1, 10)
]
EXPECTED_PRECISION_ROUNDING += [
(x + delta, y + delta, z + delta)
for (x, y, z) in EXPECTED_PRECISION_ROUNDING
for delta in range(1, 10)
]
EXPECTED_CUSTOM_TICK_SIZE_ROUNDING += [
(x + delta, y + delta, z + delta)
for (x, y, z) in EXPECTED_CUSTOM_TICK_SIZE_ROUNDING
for delta in range(1, 10)
]
# Combine everything into one parameter set
FINAL_PARAMETER_SET = [
(x, y, z, 1)
for (x, y, z) in EXPECTED_PRICE_ROUNDING
] + [
(x, y, z, 2)
for (x, y, z) in EXPECTED_PRECISION_ROUNDING
] + [
(x, y, z, 3)
for (x, y, z) in EXPECTED_CUSTOM_TICK_SIZE_ROUNDING
]
@classmethod
def make_futures_info(cls):
return pd.DataFrame.from_dict({
1: {
'multiplier': 100,
'tick_size': 0.01,
'symbol': 'F1',
'exchange': 'TEST'
},
2: {
'multiplier': 100,
'tick_size': 0.0001,
'symbol': 'F2',
'exchange': 'TEST'
},
3: {
'multiplier': 100,
'tick_size': 0.05,
'symbol': 'F3',
'exchange': 'TEST'
}
}, orient='index')
@classmethod
def init_class_fixtures(cls):
super(ExecutionStyleTestCase, cls).init_class_fixtures()
@parameterized.expand(INVALID_PRICES)
def test_invalid_prices(self, price):
"""
Test that execution styles throw appropriate exceptions upon receipt
of an invalid price field.
"""
with self.assertRaises(BadOrderParameters):
LimitOrder(price)
with self.assertRaises(BadOrderParameters):
StopOrder(price)
for lmt, stp in [(price, 1), (1, price), (price, price)]:
with self.assertRaises(BadOrderParameters):
StopLimitOrder(lmt, stp)
def test_market_order_prices(self):
"""
Basic unit tests for the MarketOrder class.
"""
style = MarketOrder()
assert_equal(style.get_limit_price(_is_buy=True), None)
assert_equal(style.get_limit_price(_is_buy=False), None)
assert_equal(style.get_stop_price(_is_buy=True), None)
assert_equal(style.get_stop_price(_is_buy=False), None)
@parameterized.expand(FINAL_PARAMETER_SET)
def test_limit_order_prices(self,
price,
expected_limit_buy_or_stop_sell,
expected_limit_sell_or_stop_buy,
asset):
"""
Test price getters for the LimitOrder class.
"""
style = LimitOrder(
price,
asset=self.asset_finder.retrieve_asset(asset)
)
assert_equal(expected_limit_buy_or_stop_sell,
style.get_limit_price(is_buy=True))
assert_equal(expected_limit_sell_or_stop_buy,
style.get_limit_price(is_buy=False))
assert_equal(None, style.get_stop_price(_is_buy=True))
assert_equal(None, style.get_stop_price(_is_buy=False))
@parameterized.expand(FINAL_PARAMETER_SET)
def test_stop_order_prices(self,
price,
expected_limit_buy_or_stop_sell,
expected_limit_sell_or_stop_buy,
asset):
"""
Test price getters for StopOrder class. Note that the expected rounding
direction for stop prices is the reverse of that for limit prices.
"""
style = StopOrder(
price,
asset=self.asset_finder.retrieve_asset(asset)
)
assert_equal(None, style.get_limit_price(_is_buy=False))
assert_equal(None, style.get_limit_price(_is_buy=True))
assert_equal(expected_limit_buy_or_stop_sell,
style.get_stop_price(is_buy=False))
assert_equal(expected_limit_sell_or_stop_buy,
style.get_stop_price(is_buy=True))
@parameterized.expand(FINAL_PARAMETER_SET)
def test_stop_limit_order_prices(self,
price,
expected_limit_buy_or_stop_sell,
expected_limit_sell_or_stop_buy,
asset):
"""
Test price getters for StopLimitOrder class. Note that the expected
rounding direction for stop prices is the reverse of that for limit
prices.
"""
style = StopLimitOrder(
price,
price + 1,
asset=self.asset_finder.retrieve_asset(asset)
)
assert_equal(expected_limit_buy_or_stop_sell,
style.get_limit_price(is_buy=True))
assert_equal(expected_limit_sell_or_stop_buy,
style.get_limit_price(is_buy=False))
assert_equal(expected_limit_buy_or_stop_sell + 1,
style.get_stop_price(is_buy=False))
assert_equal(expected_limit_sell_or_stop_buy + 1,
style.get_stop_price(is_buy=True))
|
|
import csv
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
UCSD = 'UCSD'
BUCKETS = [0.1, 0.2, 1.0, 10.0, 60.0 ] # range(500, 3001, 500)
#COLORS=['#90B0D4', '#90D492', '#D4B490', '#D490D2']
COLORS=['#8dd3c7','#bebada','#ffffb3','#fb8072','#80b1d3','#fdb462']
COLORS_E=['#8dd3c7','#bebada','#80b1d3','#ffffb3','#fdb462','#fb8072']
SAFE = ['S', 'T', 'H']
SAFE_L = ['Safe', 'Timeout', 'Hole']
UNSAFE = ['U', 'B', 'D'] #, 'O']
UNSAFE_L = ['Unsafe', 'Unbound', 'Diverge'] #, 'Output']
ALL = UNSAFE + SAFE
ALL_L = UNSAFE_L + SAFE_L
ALL_D = [ ['U', 'O'], ['B'], ['D'], ['S', 'T', 'H']]
ALL_DL = [ 'Witness', 'Unbound', 'Diverge', 'No Witness'] # ['S', 'T'], ['U'], ['B'], ['D'] ]
ALL_D_E = [ ['U', 'O', 'B', 'D'], ['H'], ['S', 'T']]
ALL_DL_E = [ 'Witness Found', 'Ad-Hoc Polymorphism', 'Non-Parametric Function *', 'Dead Code *', 'Safe Call *', 'Witness Exists *'] # ['S', 'T'], ['U'], ['B'], ['D'] ]
def read_csv(f):
with open(f) as f:
return list(csv.reader(f))
def read_csv_dict(f):
with open(f) as f:
return list(csv.DictReader(f))
def autolabel(ax, rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., height,
'%d' % round(height),
ha='center', va='bottom')
def cumulative_coverage(data):
headers = data[0]
data = data[1:]
return [(l, round(100 * len([r for r in data
if float(r[2]) <= l
and r[4] in UNSAFE])
/ float(len(data))))
for l in BUCKETS]
def plot_coverage(seminal, ucsd):
xy_s = cumulative_coverage(seminal)
xy_u = cumulative_coverage(ucsd)
print ('xy_s', xy_s)
print ('xy_u', xy_u)
N = len(xy_s)
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars: can also be len(x) sequence
fig = plt.figure()
p1 = plt.bar(ind, [r[1] for r in xy_s], width,
color=COLORS[0])
p2 = plt.bar(ind + width, [r[1] for r in xy_u], width,
color=COLORS[1])
plt.xlabel('Witness found in <= x seconds', fontsize=20)
plt.ylabel('Witnesses Found (% of total programs)', fontsize=20)
plt.title('Cumulative Coverage', fontsize=24)
plt.xticks(ind + width, [r[0] for r in xy_s], fontsize='large')
plt.yticks(np.arange(0, 101, 10), fontsize='large')
plt.legend((p1[0], p2[0]), ('UW', UCSD), loc='lower right', fontsize=16)
# plt.legend((p1[0], p2[0]), ('Men', 'Women'))
autolabel(plt, p1)
autolabel(plt, p2)
# plt.show()
fig.savefig('coverage.png')
plt.close()
def plot_user_study():
a = read_csv_dict('study-data/A-Eric-scores.csv')
b = read_csv_dict('study-data/B-Eric-scores.csv')
def f(xs):
return [int(x) for x in xs if int(x) >= 0]
def err(xs):
#p = np.average(xs)
#return 100 * np.sqrt(p * (1-p) / len(xs))
s = np.std(xs)
n = len(xs)
return 100 * (s / np.sqrt(n))
## REASON
sumlist_a = f([r['5: sumlist reason'] for r in a])
append_a = f([r['1: append reason'] for r in a])
digitsofint_a = f([r['3: digitsofint reason'] for r in a])
wwhile_a = f([r['7: wwhile reason'] for r in a])
sumlist_b = f([r['1: sumlist reason'] for r in b])
append_b = f([r['5: append reason'] for r in b])
digitsofint_b = f([r['7: digitsofint reason'] for r in b])
wwhile_b = f([r['3: wwhile reason'] for r in b])
ind = np.arange(4)
width = 0.35
print([np.average(sumlist_a) - np.average(sumlist_b),
np.average(append_b) - np.average(append_a),
np.average(digitsofint_b) - np.average(digitsofint_a),
np.average(wwhile_a) - np.average(wwhile_b)])
fig = plt.figure()
p_o = plt.bar(ind,
[100*np.average(sumlist_b), 100*np.average(append_a), 100*np.average(digitsofint_a), 100*np.average(wwhile_b)],
width,
color=COLORS[0],
yerr=map(err, [sumlist_b, append_a, digitsofint_a, wwhile_b]),
error_kw={'linewidth': 3, 'ecolor': 'gray', 'capsize': 6, 'capthick': 3}
)
p_n = plt.bar(ind + width,
[100*np.average(sumlist_a), 100*np.average(append_b), 100*np.average(digitsofint_b), 100*np.average(wwhile_a)],
width,
color=COLORS[1],
yerr=map(err, [sumlist_a, append_b, digitsofint_b, wwhile_a]),
error_kw={'linewidth': 3, 'ecolor': 'gray', 'capsize': 6, 'capthick': 3}
)
plt.title('Explanation',fontsize=24)
# plt.xlabel('Problem', fontsize=20)
plt.ylabel('% Correct', fontsize=20)
plt.xticks(ind + width, ['sumList\n(p = 0.061)', 'append\n(p = 0.018)', 'digitsOfInt\n(p = 0.12)', 'wwhile\n(p = 0.14)'], fontsize='large')
plt.legend(('OCaml', 'NanoMaLy'), loc='lower right', fontsize=16)
# autolabel(plt, p_o)
# autolabel(plt, p_n)
fig.savefig('user-study-reason.png')
plt.close()
## FIX
sumlist_a = f([r['6: sumlist fix'] for r in a])
append_a = f([r['2: append fix'] for r in a])
digitsofint_a = f([r['4: digitsofint fix'] for r in a])
wwhile_a = f([r['8: wwhile fix'] for r in a])
sumlist_b = f([r['2: sumlist fix'] for r in b])
append_b = f([r['6: append fix'] for r in b])
digitsofint_b = f([r['8: digitsofint fix'] for r in b])
wwhile_b = f([r['4: wwhile fix'] for r in b])
ind = np.arange(4)
width = 0.35
fig = plt.figure()
p_o = plt.bar(ind,
[100*np.average(sumlist_b), 100*np.average(append_a), 100*np.average(digitsofint_a), 100*np.average(wwhile_b)],
width,
color=COLORS[0],
yerr=map(err, [sumlist_b, append_a, digitsofint_a, wwhile_b]),
error_kw={'linewidth': 3, 'ecolor': 'gray', 'capsize': 6, 'capthick': 3}
)
p_n = plt.bar(ind + width,
[100*np.average(sumlist_a), 100*np.average(append_b), 100*np.average(digitsofint_b), 100*np.average(wwhile_a)],
width,
color=COLORS[1],
yerr=map(err, [sumlist_a, append_b, digitsofint_b, wwhile_a]),
error_kw={'linewidth': 3, 'ecolor': 'gray', 'capsize': 6, 'capthick': 3}
)
plt.title('Fix',fontsize=24)
# plt.xlabel('Problem', fontsize=20)
plt.ylabel('% Correct', fontsize=20)
plt.xticks(ind + width, ['sumList\n(p = 0.067)', 'append\n(p = 0.038)', 'digitsOfInt\n(p = 0.083)', 'wwhile\n(p = 0.20)'], fontsize='large')
plt.legend(('OCaml', 'NanoMaLy'), loc='lower right', fontsize=16)
# autolabel(plt, p_o)
# autolabel(plt, p_n)
fig.savefig('user-study-fix.png')
plt.close()
BINS = [5, 10, 20, 50, 100, 1000]
def cumulative_trace_size(data):
return [(len([r for r in data
if r <= l])
/ float(len(data))
* 100)
for l in BINS]
def plot_trace_size(seminal, ucsd):
# xy = cumulative_coverage(data)
# N = len(xy)
# ind = np.arange(N) # the x locations for the groups
# width = 0.5 # the width of the bars: can also be len(x) sequence
# p1 = plt.bar(ind, [r[1] for r in xy], width,
# color=COLORS[0])
step_s = [int(r[5]) for r in seminal[1:] if r[4] in UNSAFE and int(r[5]) > 0]
jump_s = [int(r[6]) for r in seminal[1:] if r[4] in UNSAFE and int(r[6]) > 0]
step_u = [int(r[5]) for r in ucsd[1:] if r[4] in UNSAFE and int(r[5]) > 0]
jump_u = [int(r[6]) for r in ucsd[1:] if r[4] in UNSAFE and int(r[6]) > 0]
binlabels = ['<= 5', '<= 10', '<= 20', '<= 50', '<= 100', 'any']
ind = np.arange(0, len(binlabels))
width = 0.35
# plt.figure(figsize=(100,50))
# fig, ax = plt.subplots()
# fig, axes = plt.subplots(ncols=2, sharex=True, sharey=True)
# ax = plt.subplot(1,2,1, aspect='equal', adjustable='box-forced')
# ax = axes[0]
# ax.set(adjustable='box-forced', aspect=4)
fig = plt.figure()
# y,binEdges=np.histogram(step_s,bins=bins)
c_step_s = cumulative_trace_size(step_s)
print('step complexity')
print('seminal:\t{}\t{}\t{}'.format(c_step_s[0], c_step_s[1], len(step_s)))
print('avg/med/max:\t{}\t{}\t{}'.format(np.mean(step_s), np.median(step_s), np.max(step_s)))
p1 = plt.bar(ind, c_step_s, label='UW', width=width, color=COLORS[0])
# y,binEdges=np.histogram(step_u,bins=bins)
c_step_u = cumulative_trace_size(step_u)
print('ucsd:\t\t{}\t{}\t{}'.format(c_step_u[0], c_step_u[1], len(step_u)))
print('avg/med/max:\t{}\t{}\t{}'.format(np.mean(step_u), np.median(step_s), np.max(step_u)))
p2 = plt.bar(ind + width, c_step_u, label=UCSD, width=width, color=COLORS[1])
plt.legend((p1[0],p2[0]), ('UW',UCSD), loc='lower right', fontsize=16)
plt.title('Step Complexity', fontsize=24)
plt.xlabel('Total Steps', fontsize=20)
plt.ylabel('Traces (%)', fontsize=20)
# ax.set_xlim(0,6)
# ax.set_ylim(0,len(step))
plt.ylim(0,100)
plt.xticks(ind + width, binlabels, fontsize='large')
plt.yticks(fontsize='large')
# autolabel(ax, p1)
plt.savefig('trace_size_step.png')
plt.close()
fig = plt.figure()
# ax = plt.subplot(1,2,2, aspect='equal', adjustable='box-forced', sharex=ax, sharey=ax)
# ax = axes[1]
# ax.set(adjustable='box-forced', aspect=4)
c_jump_s = cumulative_trace_size(jump_s)
# y,binEdges=np.histogram(jump_s,bins=bins)
# foo = y / float(len(jump_s))
print('jump complexity')
print('seminal:\t{}\t{}\t{}'.format(c_jump_s[0], c_jump_s[1], len(jump_s)))
print('avg/med/max:\t{}\t{}\t{}'.format(np.mean(jump_s), np.median(jump_s), np.max(jump_s)))
p1 = plt.bar(ind, c_jump_s, label='UW', width=width, color=COLORS[0])
# y,binEdges=np.histogram(jump_u,bins=bins)
# foo = y / float(len(jump_u))
c_jump_u = cumulative_trace_size(jump_u)
print('ucsd:\t\t{}\t{}\t{}'.format(c_jump_u[0], c_jump_u[1], len(jump_u)))
print('avg/med/max:\t{}\t{}\t{}'.format(np.mean(jump_u), np.median(jump_s), np.max(jump_u)))
p2 = plt.bar(ind + width, c_jump_u, label=UCSD, width=width, color=COLORS[1])
plt.legend((p1[0],p2[0]), ('UW',UCSD), loc='lower right', fontsize=16)
plt.title('Jump Complexity', fontsize=24)
plt.xlabel('Total Jumps', fontsize=20)
# plt.xlabel('Jumps', fontsize=20)
plt.ylabel('Traces (%)', fontsize=20)
# plt.ylabel('Traces')
# ax.set_xlim(0,6)
# fig.set_ylim(0.0,1.0)
plt.ylim(0,100)
plt.xticks(ind + width, binlabels, fontsize='large')
plt.yticks(fontsize='large')
# autolabel(ax, p2)
t_jump = cumulative_trace_size(jump_s + jump_u)
print('total:\t\t{}\t{}'.format(t_jump[0], t_jump[1]))
print('mean:\t\t{}'.format(np.mean(jump_s + jump_u)))
print('median:\t\t{}'.format(np.median(jump_s + jump_u)))
print('std:\t\t{}'.format(np.std(jump_s + jump_u)))
# plt.suptitle('Size of generated traces', fontsize=16)
# p1 = plt.bar(0.5*(binEdges[1:]+binEdges[:-1]), y, label='Steps')
# p1 = plt.hist([step,jump], bins=bins, label=['Steps', 'Jumps'], range=(0,300), color=COLORS[:2])
# plt.xlabel('Size')
# plt.yticks(np.arange(0.0, 1.1, 0.1))
# plt.legend((p1[0],), ('UW',))
# autolabel(ax, p2)
# plt.show()
plt.savefig('trace_size_jump.png')
plt.close()
def plot_distrib(seminal, ucsd):
# data = data[1:]
rs_s = [len([r for r in seminal[1:] if r[4] in o])
for o in ALL_D]
rs_u = [len([r for r in ucsd[1:] if r[4] in o])
for o in ALL_D]
print ('rs_s', rs_s)
print ('rs_u', rs_u)
# N = len(xy)
# ind = np.arange(N) # the x locations for the groups
# width = 0.5 # the width of the bars: can also be len(x) sequence
ax = plt.subplot(1,2,1, aspect=1)
#plt.figure(figsize=(1,1))
#plt.axes(aspect=1)
p1 = ax.pie(rs_s, labels=ALL_DL,
autopct='%.0f%%',
pctdistance=1.3,
labeldistance=10,
colors=COLORS,
textprops={'fontsize':16},
shadow=True)
ax.set_title('UW', fontsize=20)
ax = plt.subplot(1,2,2, aspect=1)
#ax.figure(figsize=(1,1))
#plt.axes(aspect=1)
p2 = ax.pie(rs_u, labels=ALL_DL,
autopct='%.0f%%',
pctdistance=1.3,
labeldistance=10,
colors=COLORS,
textprops={'fontsize':16},
shadow=True)
ax.set_title(UCSD, fontsize=20)
#plt.tight_layout()
plt.suptitle('Distribution of Test Outcomes', fontsize=24, y=0.9)
plt.figlegend(p1[0], ALL_DL, 'lower center', fontsize=18, ncol=2)
# p2 = plt.pie(rs, labels=ALL_L,
# autopct='%.1f%%',
# shadow=True)
# plt.xticks(ind + width/2.0, [r[0] for r in xy])
# plt.yticks(np.arange(0.0, 1.1, 0.1))
# plt.legend((p1[0],), ('Seminal',))
# plt.legend((p1[0], p2[0]), ('Men', 'Women'))
# plt.show()
plt.savefig('distrib.png')
plt.close()
def plot_distrib_extended(ucsd):
# data = data[1:]
rs_u = [len([r for r in ucsd[1:] if r[4] in o])
for o in ALL_D_E] + [0,0,0]
missed = rs_u[2]
rs_u[2] = round(missed * 0.44) # Non-parametric fun
rs_u[3] = round(missed * 0.12) # Dead code
rs_u[4] = round(missed * 0.28) # Safe call
rs_u[5] = round(missed * 0.16) # True miss
print ('rs_u', rs_u)
plt.axes(aspect=1)
p1 = plt.pie(rs_u,
labels=ALL_DL_E,
explode=[0, 0.2, 0.2, 0.2, 0.2, 0.2],
autopct='%.0f%%',
pctdistance=1.3,
labeldistance=10,
colors=COLORS_E,
textprops={'fontsize':16},
shadow=True)
plt.title('Distribution of Programs Lacking a Witness', fontsize=24)
plt.legend(p1[0], ALL_DL_E,
loc='center left',
bbox_to_anchor=(0.5,0.75),
ncol=1)
# plt.show()
plt.savefig('distrib_ext.png')
plt.close()
def plot_blame():
# xy_s = cumulative_coverage(seminal)
# xy_u = cumulative_coverage(ucsd)
# print ('xy_s', xy_s)
# print ('xy_u', xy_u)
# FIXME: load these numbers from csv...
tools = ['OCaml', 'NanoMaLy', 'Mycroft', 'SHErrLoc']
accs = [47.4, 68.3, 73.2, 75.8 ]
N = len(tools)
ind = np.arange(N) # the x locations for the groups
width = 0.5 # the width of the bars: can also be len(x) sequence
fig = plt.figure()
p1 = plt.bar(ind, accs, width,
align='center',
color=COLORS[0])
#plt.xlabel('Witness found in <= x seconds', fontsize=20)
plt.ylabel('Accuracy (%)', fontsize=20)
plt.title('Accuracy of Type Error Localization', fontsize=24)
plt.xticks(ind, tools, fontsize=20)
plt.yticks(np.arange(0, 101, 10), fontsize='large')
autolabel(plt, p1)
#autolabel(plt, p2)
# plt.show()
fig.savefig('blame.png')
plt.close()
if __name__ == '__main__':
seminal = read_csv('../../seminal.csv')
ucsd = read_csv('../../ucsd-sp14.csv')
plot_distrib(seminal, ucsd)
plot_distrib_extended(ucsd)
plot_trace_size(seminal, ucsd)
# plot_trace_size(seminal, 'Seminal')
# plot_trace_size(ucsd, UCSD)
plot_coverage(seminal, ucsd)
plot_user_study()
plot_blame()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import warnings
from typing import Dict, List, Optional, Sequence, Set, Tuple
from flask import current_app, g
from flask_appbuilder.security.sqla import models as sqla_models
from flask_appbuilder.security.sqla.manager import SecurityManager
from flask_appbuilder.security.sqla.models import Permission, PermissionView, Role, User, ViewMenu
from sqlalchemy import or_
from sqlalchemy.orm import joinedload
from airflow.exceptions import AirflowException
from airflow.models import DagBag, DagModel
from airflow.security import permissions
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.session import provide_session
from airflow.www.utils import CustomSQLAInterface
from airflow.www.views import (
CustomPermissionModelView,
CustomPermissionViewModelView,
CustomResetMyPasswordView,
CustomResetPasswordView,
CustomRoleModelView,
CustomUserDBModelView,
CustomUserInfoEditView,
CustomUserLDAPModelView,
CustomUserOAuthModelView,
CustomUserOIDModelView,
CustomUserRemoteUserModelView,
CustomUserStatsChartView,
CustomViewMenuModelView,
)
EXISTING_ROLES = {
'Admin',
'Viewer',
'User',
'Op',
'Public',
}
class AirflowSecurityManager(SecurityManager, LoggingMixin): # pylint: disable=too-many-public-methods
"""Custom security manager, which introduces a permission model adapted to Airflow"""
###########################################################################
# PERMISSIONS
###########################################################################
# [START security_viewer_perms]
VIEWER_PERMISSIONS = [
(permissions.ACTION_CAN_READ, permissions.RESOURCE_AUDIT_LOG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_DEPENDENCIES),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_CODE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_IMPORT_ERROR),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_JOB),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_MY_PASSWORD),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_MY_PASSWORD),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_MY_PROFILE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_MY_PROFILE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_PLUGIN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_SLA_MISS),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_XCOM),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_BROWSE_MENU),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DOCS),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DOCS_MENU),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_JOB),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_AUDIT_LOG),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_PLUGIN),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_SLA_MISS),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TASK_INSTANCE),
]
# [END security_viewer_perms]
# [START security_user_perms]
USER_PERMISSIONS = [
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG_RUN),
]
# [END security_user_perms]
# [START security_op_perms]
OP_PERMISSIONS = [
(permissions.ACTION_CAN_READ, permissions.RESOURCE_CONFIG),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_ADMIN_MENU),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_XCOM),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_PROVIDER),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_XCOM),
]
# [END security_op_perms]
ADMIN_PERMISSIONS = [
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_RESCHEDULE),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TASK_RESCHEDULE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_PASSWORD),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_PASSWORD),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_ROLE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_ROLE),
]
# global view-menu for dag-level access
DAG_VMS = {permissions.RESOURCE_DAG}
READ_DAG_PERMS = {permissions.ACTION_CAN_READ}
DAG_PERMS = permissions.DAG_PERMS
###########################################################################
# DEFAULT ROLE CONFIGURATIONS
###########################################################################
ROLE_CONFIGS = [
{'role': 'Public', 'perms': []},
{'role': 'Viewer', 'perms': VIEWER_PERMISSIONS},
{
'role': 'User',
'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS,
},
{
'role': 'Op',
'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS,
},
{
'role': 'Admin',
'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS + ADMIN_PERMISSIONS,
},
]
permissionmodelview = CustomPermissionModelView
permissionviewmodelview = CustomPermissionViewModelView
rolemodelview = CustomRoleModelView
viewmenumodelview = CustomViewMenuModelView
userdbmodelview = CustomUserDBModelView
resetmypasswordview = CustomResetMyPasswordView
resetpasswordview = CustomResetPasswordView
userinfoeditview = CustomUserInfoEditView
userldapmodelview = CustomUserLDAPModelView
useroauthmodelview = CustomUserOAuthModelView
userremoteusermodelview = CustomUserRemoteUserModelView
useroidmodelview = CustomUserOIDModelView
userstatschartview = CustomUserStatsChartView
def __init__(self, appbuilder):
super().__init__(appbuilder)
# Go and fix up the SQLAInterface used from the stock one to our subclass.
# This is needed to support the "hack" where we had to edit
# FieldConverter.conversion_table in place in airflow.www.utils
for attr in dir(self):
if not attr.endswith('view'):
continue
view = getattr(self, attr, None)
if not view or not getattr(view, 'datamodel', None):
continue
view.datamodel = CustomSQLAInterface(view.datamodel.obj)
self.perms = None
def init_role(self, role_name, perms):
"""
Initialize the role with the permissions and related view-menus.
:param role_name:
:param perms:
:return:
"""
warnings.warn(
"`init_role` has been deprecated. Please use `bulk_sync_roles` instead.",
DeprecationWarning,
stacklevel=2,
)
self.bulk_sync_roles([{'role': role_name, 'perms': perms}])
def bulk_sync_roles(self, roles):
"""Sync the provided roles and permissions."""
existing_roles = self._get_all_roles_with_permissions()
non_dag_perms = self._get_all_non_dag_permissionviews()
for config in roles:
role_name = config['role']
perms = config['perms']
role = existing_roles.get(role_name) or self.add_role(role_name)
for perm_name, view_name in perms:
perm_view = non_dag_perms.get((perm_name, view_name)) or self.create_permission(
perm_name, view_name
)
if perm_view not in role.permissions:
self.add_permission_to_role(role, perm_view)
def add_permissions(self, role, perms):
"""Adds resource permissions to a given role."""
for action_name, resource_name in perms:
permission = self.create_permission(action_name, resource_name)
self.add_permission_to_role(role, permission)
def get_resource(self, name: str) -> ViewMenu:
"""
Returns a resource record by name, if it exists.
:param name: Name of resource
:type name: str
:return: Resource record
:rtype: ViewMenu
"""
return self.find_view_menu(name)
def get_all_resources(self) -> List[ViewMenu]:
"""
Gets all existing resource records.
:return: List of all resources
:rtype: List[ViewMenu]
"""
return self.get_all_view_menu()
def get_action(self, name: str) -> Permission:
"""
Gets an existing action record.
:param name: name
:type name: str
:return: Action record, if it exists
:rtype: Permission
"""
return self.find_permission(name)
def get_permission(self, action_name: str, resource_name: str) -> PermissionView:
"""
Gets a permission made with the given action->resource pair, if the permission already exists.
:param action_name: Name of action
:type action_name: str
:param resource_name: Name of resource
:type resource_name: str
:return: The existing permission
:rtype: PermissionView
"""
return self.find_permission_view_menu(action_name, resource_name)
def create_permission(self, action_name: str, resource_name: str) -> PermissionView:
"""
Creates a permission linking an action and resource.
:param action_name: Name of existing action
:type action_name: str
:param resource_name: Name of existing resource
:type resource_name: str
:return: Resource created
:rtype: PermissionView
"""
return self.add_permission_view_menu(action_name, resource_name)
def delete_permission(self, action_name: str, resource_name: str) -> None:
"""
Deletes the permission linking an action->resource pair. Doesn't delete the
underlying action or resource.
:param action_name: Name of existing action
:type action_name: str
:param resource_name: Name of existing resource
:type resource_name: str
:return: None
:rtype: None
"""
self.del_permission_view_menu(action_name, resource_name)
def delete_role(self, role_name):
"""
Delete the given Role
:param role_name: the name of a role in the ab_role table
"""
session = self.get_session
role = session.query(sqla_models.Role).filter(sqla_models.Role.name == role_name).first()
if role:
self.log.info("Deleting role '%s'", role_name)
session.delete(role)
session.commit()
else:
raise AirflowException(f"Role named '{role_name}' does not exist")
@staticmethod
def get_user_roles(user=None):
"""
Get all the roles associated with the user.
:param user: the ab_user in FAB model.
:return: a list of roles associated with the user.
"""
if user is None:
user = g.user
if user.is_anonymous:
public_role = current_app.appbuilder.get_app.config["AUTH_ROLE_PUBLIC"]
return [current_app.appbuilder.sm.find_role(public_role)] if public_role else []
return user.roles
def get_current_user_permissions(self):
"""Returns permissions for logged in user as a set of tuples with the action and resource name"""
perms = set()
for role in self.get_user_roles():
perms.update({(perm.permission.name, perm.view_menu.name) for perm in role.permissions})
return perms
def get_readable_dags(self, user):
"""Gets the DAGs readable by authenticated user."""
return self.get_accessible_dags([permissions.ACTION_CAN_READ], user)
def get_editable_dags(self, user):
"""Gets the DAGs editable by authenticated user."""
return self.get_accessible_dags([permissions.ACTION_CAN_EDIT], user)
def get_readable_dag_ids(self, user) -> Set[str]:
"""Gets the DAG IDs readable by authenticated user."""
return {dag.dag_id for dag in self.get_readable_dags(user)}
def get_editable_dag_ids(self, user) -> Set[str]:
"""Gets the DAG IDs editable by authenticated user."""
return {dag.dag_id for dag in self.get_editable_dags(user)}
def get_accessible_dag_ids(self, user) -> Set[str]:
"""Gets the DAG IDs editable or readable by authenticated user."""
accessible_dags = self.get_accessible_dags(
[permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_READ], user
)
return {dag.dag_id for dag in accessible_dags}
@provide_session
def get_accessible_dags(self, user_actions, user, session=None):
"""Generic function to get readable or writable DAGs for user."""
if user.is_anonymous:
roles = self.get_user_roles(user)
else:
user_query = (
session.query(User)
.options(
joinedload(User.roles)
.subqueryload(Role.permissions)
.options(joinedload(PermissionView.permission), joinedload(PermissionView.view_menu))
)
.filter(User.id == user.id)
.first()
)
roles = user_query.roles
resources = set()
for role in roles:
for permission in role.permissions:
action = permission.permission.name
if action not in user_actions:
continue
resource = permission.view_menu.name
if resource == permissions.RESOURCE_DAG:
return session.query(DagModel)
if resource.startswith(permissions.RESOURCE_DAG_PREFIX):
resources.add(resource[len(permissions.RESOURCE_DAG_PREFIX) :])
else:
resources.add(resource)
return session.query(DagModel).filter(DagModel.dag_id.in_(resources))
def can_access_some_dags(self, action: str, dag_id: Optional[str] = None) -> bool:
"""Checks if user has read or write access to some dags."""
if dag_id and dag_id != '~':
return self.has_access(action, permissions.resource_name_for_dag(dag_id))
user = g.user
if action == permissions.ACTION_CAN_READ:
return any(self.get_readable_dags(user))
return any(self.get_editable_dags(user))
def can_read_dag(self, dag_id, user=None) -> bool:
"""Determines whether a user has DAG read access."""
if not user:
user = g.user
dag_resource_name = permissions.resource_name_for_dag(dag_id)
return self._has_access(
user, permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG
) or self._has_access(user, permissions.ACTION_CAN_READ, dag_resource_name)
def can_edit_dag(self, dag_id, user=None) -> bool:
"""Determines whether a user has DAG edit access."""
if not user:
user = g.user
dag_resource_name = permissions.resource_name_for_dag(dag_id)
return self._has_access(
user, permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG
) or self._has_access(user, permissions.ACTION_CAN_EDIT, dag_resource_name)
def prefixed_dag_id(self, dag_id):
"""Returns the permission name for a DAG id."""
warnings.warn(
"`prefixed_dag_id` has been deprecated. "
"Please use `airflow.security.permissions.resource_name_for_dag` instead.",
DeprecationWarning,
stacklevel=2,
)
return permissions.resource_name_for_dag(dag_id)
def is_dag_resource(self, resource_name):
"""Determines if a resource belongs to a DAG or all DAGs."""
if resource_name == permissions.RESOURCE_DAG:
return True
return resource_name.startswith(permissions.RESOURCE_DAG_PREFIX)
def _has_view_access(self, user, action, resource) -> bool:
"""
Overriding the method to ensure that it always returns a bool
_has_view_access can return NoneType which gives us
issues later on, this fixes that.
"""
return bool(super()._has_view_access(user, action, resource))
def has_access(self, action_name, resource_name, user=None) -> bool:
"""
Verify whether a given user could perform a certain action
(e.g can_read, can_write) on the given resource.
:param action_name: action_name on resource (e.g can_read, can_edit).
:type action_name: str
:param resource_name: name of view-menu or resource.
:type resource_name: str
:param user: user name
:type user: str
:return: Whether user could perform certain action on the resource.
:rtype bool
"""
if not user:
user = g.user
if user.is_anonymous:
user.roles = self.get_user_roles(user)
has_access = self._has_access(user, action_name, resource_name)
# FAB built-in view access method. Won't work for AllDag access.
if self.is_dag_resource(resource_name):
if action_name == permissions.ACTION_CAN_READ:
has_access |= self.can_read_dag(resource_name, user)
elif action_name == permissions.ACTION_CAN_EDIT:
has_access |= self.can_edit_dag(resource_name, user)
return has_access
def _has_access(self, user: User, action_name: str, resource_name: str) -> bool:
"""
Wraps the FAB built-in view access method. Won't work for AllDag access.
:param user: user object
:type user: User
:param action_name: action_name on resource (e.g can_read, can_edit).
:type action_name: str
:param resource_name: name of resource.
:type resource_name: str
:return: a bool whether user could perform certain action on the resource.
:rtype bool
"""
return bool(self._has_view_access(user, action_name, resource_name))
def _get_and_cache_perms(self):
"""Cache permissions"""
self.perms = self.get_current_user_permissions()
def _has_role(self, role_name_or_list):
"""Whether the user has this role name"""
if not isinstance(role_name_or_list, list):
role_name_or_list = [role_name_or_list]
return any(r.name in role_name_or_list for r in self.get_user_roles())
def _has_perm(self, action_name, resource_name):
"""Whether the user has this perm"""
if hasattr(self, 'perms') and self.perms is not None:
if (action_name, resource_name) in self.perms:
return True
# rebuild the permissions set
self._get_and_cache_perms()
return (action_name, resource_name) in self.perms
def has_all_dags_access(self):
"""
Has all the dag access in any of the 3 cases:
1. Role needs to be in (Admin, Viewer, User, Op).
2. Has can_read action on dags resource.
3. Has can_edit action on dags resource.
"""
return (
self._has_role(['Admin', 'Viewer', 'Op', 'User'])
or self._has_perm(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG)
or self._has_perm(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG)
)
def clean_perms(self):
"""FAB leaves faulty permissions that need to be cleaned up"""
self.log.debug('Cleaning faulty perms')
sesh = self.get_session
perms = sesh.query(sqla_models.PermissionView).filter(
or_(
sqla_models.PermissionView.permission == None, # noqa pylint: disable=singleton-comparison
sqla_models.PermissionView.view_menu == None, # noqa pylint: disable=singleton-comparison
)
)
# Since FAB doesn't define ON DELETE CASCADE on these tables, we need
# to delete the _object_ so that SQLA knows to delete the many-to-many
# relationship object too. :(
deleted_count = 0
for perm in perms:
sesh.delete(perm)
deleted_count += 1
sesh.commit()
if deleted_count:
self.log.info('Deleted %s faulty permissions', deleted_count)
def _merge_perm(self, action_name, resource_name):
"""
Add the new (permission, resource) to assoc_permissionview_role if it doesn't exist.
It will add the related entry to ab_permission
and ab_view_menu two meta tables as well.
:param action_name: Name of the action
:type action_name: str
:param resource_name: Name of the resource
:type resource_name: str
:return:
"""
action = self.get_action(action_name)
resource = self.get_resource(resource_name)
perm = None
if action and resource:
perm = (
self.get_session.query(self.permissionview_model)
.filter_by(permission=action, view_menu=resource)
.first()
)
if not perm and action_name and resource_name:
self.create_permission(action_name, resource_name)
def add_homepage_access_to_custom_roles(self):
"""
Add Website.can_read access to all custom roles.
:return: None.
"""
website_permission = self.create_permission(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE)
custom_roles = [role for role in self.get_all_roles() if role.name not in EXISTING_ROLES]
for role in custom_roles:
self.add_permission_to_role(role, website_permission)
self.get_session.commit()
def add_permission_to_role(self, role: Role, permission: PermissionView) -> None:
"""
Add an existing permission pair to a role.
:param role: The role about to get a new permission.
:type role: Role
:param permission: The permission pair to add to a role.
:type permission: PermissionView
:return: None
:rtype: None
"""
self.add_permission_role(role, permission)
def remove_permission_from_role(self, role: Role, permission: PermissionView) -> None:
"""
Remove a permission pair from a role.
:param role: User role containing permissions.
:type role: Role
:param permission: Object representing resource-> action pair
:type permission: PermissionView
"""
self.del_permission_role(role, permission)
def delete_action(self, name: str) -> bool:
"""
Deletes a permission action.
:param name: Name of action to delete (e.g. can_read).
:type name: str
:return: Whether or not delete was successful.
:rtype: bool
"""
return self.del_permission(name)
def get_all_permissions(self) -> Set[Tuple[str, str]]:
"""Returns all permissions as a set of tuples with the action and resource names"""
return set(
self.get_session.query(self.permissionview_model)
.join(self.permission_model)
.join(self.viewmenu_model)
.with_entities(self.permission_model.name, self.viewmenu_model.name)
.all()
)
def _get_all_non_dag_permissionviews(self) -> Dict[Tuple[str, str], PermissionView]:
"""
Returns a dict with a key of (action_name, resource_name) and value of permission
with all permissions except those that are for specific DAGs.
"""
return {
(action_name, resource_name): viewmodel
for action_name, resource_name, viewmodel in (
self.get_session.query(self.permissionview_model)
.join(self.permission_model)
.join(self.viewmenu_model)
.filter(~self.viewmenu_model.name.like(f"{permissions.RESOURCE_DAG_PREFIX}%"))
.with_entities(
self.permission_model.name, self.viewmenu_model.name, self.permissionview_model
)
.all()
)
}
def _get_all_roles_with_permissions(self) -> Dict[str, Role]:
"""Returns a dict with a key of role name and value of role with eagrly loaded permissions"""
return {
r.name: r
for r in (
self.get_session.query(self.role_model).options(joinedload(self.role_model.permissions)).all()
)
}
def create_dag_specific_permissions(self) -> None:
"""
Creates 'can_read' and 'can_edit' permissions for all DAGs,
along with any `access_control` permissions provided in them.
This does iterate through ALL the DAGs, which can be slow. See `sync_perm_for_dag`
if you only need to sync a single DAG.
:return: None.
"""
perms = self.get_all_permissions()
dagbag = DagBag(read_dags_from_db=True)
dagbag.collect_dags_from_db()
dags = dagbag.dags.values()
for dag in dags:
dag_resource_name = permissions.resource_name_for_dag(dag.dag_id)
for action_name in self.DAG_PERMS:
if (action_name, dag_resource_name) not in perms:
self._merge_perm(action_name, dag_resource_name)
if dag.access_control:
self._sync_dag_view_permissions(dag_resource_name, dag.access_control)
def update_admin_perm_view(self):
"""
Admin should have all the permissions, except the dag permissions.
because Admin already has Dags permission.
Add the missing ones to the table for admin.
:return: None.
"""
dag_resources = (
self.get_session.query(sqla_models.ViewMenu)
.filter(sqla_models.ViewMenu.name.like(f"{permissions.RESOURCE_DAG_PREFIX}%"))
.all()
)
resource_ids = [resource.id for resource in dag_resources]
perms = (
self.get_session.query(sqla_models.PermissionView)
.filter(~sqla_models.PermissionView.view_menu_id.in_(resource_ids))
.all()
)
perms = [p for p in perms if p.permission and p.view_menu]
admin = self.find_role('Admin')
admin.permissions = list(set(admin.permissions) | set(perms))
self.get_session.commit()
def sync_roles(self):
"""
1. Init the default role(Admin, Viewer, User, Op, public)
with related permissions.
2. Init the custom role(dag-user) with related permissions.
:return: None.
"""
# Create global all-dag permissions
self.create_perm_vm_for_all_dag()
# Sync the default roles (Admin, Viewer, User, Op, public) with related permissions
self.bulk_sync_roles(self.ROLE_CONFIGS)
self.add_homepage_access_to_custom_roles()
# init existing roles, the rest role could be created through UI.
self.update_admin_perm_view()
self.clean_perms()
def sync_resource_permissions(self, perms=None):
"""Populates resource-based permissions."""
if not perms:
return
for action_name, resource_name in perms:
self.create_resource(resource_name)
self.create_permission(action_name, resource_name)
def sync_perm_for_dag(self, dag_id, access_control=None):
"""
Sync permissions for given dag id. The dag id surely exists in our dag bag
as only / refresh button or DagBag will call this function
:param dag_id: the ID of the DAG whose permissions should be updated
:type dag_id: str
:param access_control: a dict where each key is a rolename and
each value is a set() of action names (e.g.,
{'can_read'}
:type access_control: dict
:return:
"""
dag_resource_name = permissions.resource_name_for_dag(dag_id)
for action_name in self.DAG_PERMS:
self.create_permission(action_name, dag_resource_name)
if access_control:
self._sync_dag_view_permissions(dag_resource_name, access_control)
def get_resource_permissions(self, resource: ViewMenu) -> PermissionView:
"""
Retrieve permission pairs associated with a specific resource object.
:param resource: Object representing a single resource.
:type resource: ViewMenu
:return: Permission objects representing resource->action pair
:rtype: PermissionView
"""
return self.find_permissions_view_menu(resource)
def _sync_dag_view_permissions(self, dag_id, access_control):
"""
Set the access policy on the given DAG's ViewModel.
:param dag_id: the ID of the DAG whose permissions should be updated
:type dag_id: str
:param access_control: a dict where each key is a rolename and
each value is a set() of action names (e.g. {'can_read'})
:type access_control: dict
"""
dag_resource_name = permissions.resource_name_for_dag(dag_id)
def _get_or_create_dag_permission(action_name):
perm = self.get_permission(action_name, dag_resource_name)
if not perm:
self.log.info("Creating new action '%s' on resource '%s'", action_name, dag_resource_name)
perm = self.create_permission(action_name, dag_resource_name)
return perm
def _revoke_stale_permissions(resource):
existing_dag_perms = self.get_resource_permissions(resource)
for perm in existing_dag_perms:
non_admin_roles = [role for role in perm.role if role.name != 'Admin']
for role in non_admin_roles:
target_perms_for_role = access_control.get(role.name, {})
if perm.permission.name not in target_perms_for_role:
self.log.info(
"Revoking '%s' on DAG '%s' for role '%s'",
perm.permission,
dag_resource_name,
role.name,
)
self.remove_permission_from_role(role, perm)
resource = self.get_resource(dag_resource_name)
if resource:
_revoke_stale_permissions(resource)
for rolename, perms in access_control.items():
role = self.find_role(rolename)
if not role:
raise AirflowException(
"The access_control mapping for DAG '{}' includes a role "
"named '{}', but that role does not exist".format(dag_id, rolename)
)
perms = set(perms)
invalid_perms = perms - self.DAG_PERMS
if invalid_perms:
raise AirflowException(
"The access_control map for DAG '{}' includes the following "
"invalid permissions: {}; The set of valid permissions "
"is: {}".format(dag_resource_name, invalid_perms, self.DAG_PERMS)
)
for action_name in perms:
dag_perm = _get_or_create_dag_permission(action_name)
self.add_permission_to_role(role, dag_perm)
def create_resource(self, name: str) -> ViewMenu:
"""
Create a resource with the given name.
:param name: The name of the resource to create created.
:type name: str
:return: The FAB resource created.
:rtype: ViewMenu
"""
return self.add_view_menu(name)
def create_perm_vm_for_all_dag(self):
"""Create perm-vm if not exist and insert into FAB security model for all-dags."""
# create perm for global logical dag
for resource_name in self.DAG_VMS:
for action_name in self.DAG_PERMS:
self._merge_perm(action_name, resource_name)
def check_authorization(
self, perms: Optional[Sequence[Tuple[str, str]]] = None, dag_id: Optional[str] = None
) -> bool:
"""Checks that the logged in user has the specified permissions."""
if not perms:
return True
for perm in perms:
if perm in (
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
):
can_access_all_dags = self.has_access(*perm)
if can_access_all_dags:
continue
action = perm[0]
if self.can_access_some_dags(action, dag_id):
continue
return False
elif not self.has_access(*perm):
return False
return True
def reset_all_permissions(self) -> None:
"""
Deletes all permission records and removes from roles,
then re-syncs them.
:return: None
:rtype: None
"""
session = self.get_session
for role in self.get_all_roles():
role.permissions = []
session.commit()
session.query(PermissionView).delete()
session.query(ViewMenu).delete()
session.query(Permission).delete()
session.commit()
self.sync_roles()
class ApplessAirflowSecurityManager(AirflowSecurityManager):
"""Security Manager that doesn't need the whole flask app"""
def __init__(self, session=None): # pylint: disable=super-init-not-called
self.session = session
@property
def get_session(self):
return self.session
|
|
# Copyright (c) 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import mock
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
import routes
import six
import webob
import webob.exc as webexc
import webtest
import neutron
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.common import config
from neutron.common import exceptions
from neutron import manager
from neutron.plugins.common import constants
from neutron import quota
from neutron.tests import base
from neutron.tests.unit.api.v2 import test_base
from neutron.tests.unit import extension_stubs as ext_stubs
import neutron.tests.unit.extensions
from neutron.tests.unit.extensions import extendedattribute as extattr
from neutron.tests.unit import testlib_api
from neutron import wsgi
LOG = logging.getLogger(__name__)
_uuid = test_base._uuid
_get_path = test_base._get_path
extensions_path = ':'.join(neutron.tests.unit.extensions.__path__)
class ExtensionsTestApp(wsgi.Router):
def __init__(self, options={}):
mapper = routes.Mapper()
controller = ext_stubs.StubBaseAppController()
mapper.resource("dummy_resource", "/dummy_resources",
controller=controller)
super(ExtensionsTestApp, self).__init__(mapper)
class FakePluginWithExtension(object):
"""A fake plugin used only for extension testing in this file."""
supported_extension_aliases = ["FOXNSOX"]
def method_to_support_foxnsox_extension(self, context):
self._log("method_to_support_foxnsox_extension", context)
class ExtensionPathTest(base.BaseTestCase):
def setUp(self):
self.base_path = extensions.get_extensions_path()
super(ExtensionPathTest, self).setUp()
def test_get_extensions_path_with_plugins(self):
path = extensions.get_extensions_path(
{constants.CORE: FakePluginWithExtension()})
self.assertEqual(path,
'%s:neutron/tests/unit/extensions' % self.base_path)
def test_get_extensions_path_no_extensions(self):
# Reset to default value, as it's overriden by base class
cfg.CONF.set_override('api_extensions_path', '')
path = extensions.get_extensions_path()
self.assertEqual(path, self.base_path)
def test_get_extensions_path_single_extension(self):
cfg.CONF.set_override('api_extensions_path', 'path1')
path = extensions.get_extensions_path()
self.assertEqual(path, '%s:path1' % self.base_path)
def test_get_extensions_path_multiple_extensions(self):
cfg.CONF.set_override('api_extensions_path', 'path1:path2')
path = extensions.get_extensions_path()
self.assertEqual(path, '%s:path1:path2' % self.base_path)
def test_get_extensions_path_duplicate_extensions(self):
cfg.CONF.set_override('api_extensions_path', 'path1:path1')
path = extensions.get_extensions_path()
self.assertEqual(path, '%s:path1' % self.base_path)
class PluginInterfaceTest(base.BaseTestCase):
def test_issubclass_hook(self):
class A(object):
def f(self):
pass
class B(extensions.PluginInterface):
@abc.abstractmethod
def f(self):
pass
self.assertTrue(issubclass(A, B))
def test_issubclass_hook_class_without_abstract_methods(self):
class A(object):
def f(self):
pass
class B(extensions.PluginInterface):
def f(self):
pass
self.assertFalse(issubclass(A, B))
def test_issubclass_hook_not_all_methods_implemented(self):
class A(object):
def f(self):
pass
class B(extensions.PluginInterface):
@abc.abstractmethod
def f(self):
pass
@abc.abstractmethod
def g(self):
pass
self.assertFalse(issubclass(A, B))
class ResourceExtensionTest(base.BaseTestCase):
class ResourceExtensionController(wsgi.Controller):
def index(self, request):
return "resource index"
def show(self, request, id):
return {'data': {'id': id}}
def notimplemented_function(self, request, id):
return webob.exc.HTTPNotImplemented()
def custom_member_action(self, request, id):
return {'member_action': 'value'}
def custom_collection_action(self, request, **kwargs):
return {'collection': 'value'}
class DummySvcPlugin(wsgi.Controller):
def get_plugin_type(self):
return constants.DUMMY
def index(self, request, **kwargs):
return "resource index"
def custom_member_action(self, request, **kwargs):
return {'member_action': 'value'}
def collection_action(self, request, **kwargs):
return {'collection': 'value'}
def show(self, request, id):
return {'data': {'id': id}}
def test_exceptions_notimplemented(self):
controller = self.ResourceExtensionController()
member = {'notimplemented_function': "GET"}
res_ext = extensions.ResourceExtension('tweedles', controller,
member_actions=member)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
# Ideally we would check for a 501 code here but webtest doesn't take
# anything that is below 200 or above 400 so we can't actually check
# it. It throws webtest.AppError instead.
try:
test_app.get("/tweedles/some_id/notimplemented_function")
# Shouldn't be reached
self.assertTrue(False)
except webtest.AppError as e:
self.assertIn('501', str(e))
def test_resource_can_be_added_as_extension(self):
res_ext = extensions.ResourceExtension(
'tweedles', self.ResourceExtensionController())
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
index_response = test_app.get("/tweedles")
self.assertEqual(200, index_response.status_int)
self.assertEqual(b"resource index", index_response.body)
show_response = test_app.get("/tweedles/25266")
self.assertEqual({'data': {'id': "25266"}}, show_response.json)
def test_resource_gets_prefix_of_plugin(self):
class DummySvcPlugin(wsgi.Controller):
def index(self, request):
return ""
def get_plugin_type(self):
return constants.DUMMY
res_ext = extensions.ResourceExtension(
'tweedles', DummySvcPlugin(), path_prefix="/dummy_svc")
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
index_response = test_app.get("/dummy_svc/tweedles")
self.assertEqual(200, index_response.status_int)
def test_resource_extension_with_custom_member_action(self):
controller = self.ResourceExtensionController()
member = {'custom_member_action': "GET"}
res_ext = extensions.ResourceExtension('tweedles', controller,
member_actions=member)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.get("/tweedles/some_id/custom_member_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['member_action'],
"value")
def test_resource_ext_with_custom_member_action_gets_plugin_prefix(self):
controller = self.DummySvcPlugin()
member = {'custom_member_action': "GET"}
collections = {'collection_action': "GET"}
res_ext = extensions.ResourceExtension('tweedles', controller,
path_prefix="/dummy_svc",
member_actions=member,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.get("/dummy_svc/tweedles/1/custom_member_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['member_action'],
"value")
response = test_app.get("/dummy_svc/tweedles/collection_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'],
"value")
def test_plugin_prefix_with_parent_resource(self):
controller = self.DummySvcPlugin()
parent = dict(member_name="tenant",
collection_name="tenants")
member = {'custom_member_action': "GET"}
collections = {'collection_action': "GET"}
res_ext = extensions.ResourceExtension('tweedles', controller, parent,
path_prefix="/dummy_svc",
member_actions=member,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
index_response = test_app.get("/dummy_svc/tenants/1/tweedles")
self.assertEqual(200, index_response.status_int)
response = test_app.get("/dummy_svc/tenants/1/"
"tweedles/1/custom_member_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['member_action'],
"value")
response = test_app.get("/dummy_svc/tenants/2/"
"tweedles/collection_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'],
"value")
def test_resource_extension_for_get_custom_collection_action(self):
controller = self.ResourceExtensionController()
collections = {'custom_collection_action': "GET"}
res_ext = extensions.ResourceExtension('tweedles', controller,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.get("/tweedles/custom_collection_action")
self.assertEqual(200, response.status_int)
LOG.debug(jsonutils.loads(response.body))
self.assertEqual(jsonutils.loads(response.body)['collection'], "value")
def test_resource_extension_for_put_custom_collection_action(self):
controller = self.ResourceExtensionController()
collections = {'custom_collection_action': "PUT"}
res_ext = extensions.ResourceExtension('tweedles', controller,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.put("/tweedles/custom_collection_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'], 'value')
def test_resource_extension_for_post_custom_collection_action(self):
controller = self.ResourceExtensionController()
collections = {'custom_collection_action': "POST"}
res_ext = extensions.ResourceExtension('tweedles', controller,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.post("/tweedles/custom_collection_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'], 'value')
def test_resource_extension_for_delete_custom_collection_action(self):
controller = self.ResourceExtensionController()
collections = {'custom_collection_action': "DELETE"}
res_ext = extensions.ResourceExtension('tweedles', controller,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.delete("/tweedles/custom_collection_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'], 'value')
def test_resource_ext_for_formatted_req_on_custom_collection_action(self):
controller = self.ResourceExtensionController()
collections = {'custom_collection_action': "GET"}
res_ext = extensions.ResourceExtension('tweedles', controller,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.get("/tweedles/custom_collection_action.json")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'], "value")
def test_resource_ext_for_nested_resource_custom_collection_action(self):
controller = self.ResourceExtensionController()
collections = {'custom_collection_action': "GET"}
parent = dict(collection_name='beetles', member_name='beetle')
res_ext = extensions.ResourceExtension('tweedles', controller,
collection_actions=collections,
parent=parent)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.get("/beetles/beetle_id"
"/tweedles/custom_collection_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'], "value")
def test_resource_extension_with_custom_member_action_and_attr_map(self):
controller = self.ResourceExtensionController()
member = {'custom_member_action': "GET"}
params = {
'tweedles': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '', 'is_visible': True},
}
}
res_ext = extensions.ResourceExtension('tweedles', controller,
member_actions=member,
attr_map=params)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.get("/tweedles/some_id/custom_member_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['member_action'],
"value")
def test_returns_404_for_non_existent_extension(self):
test_app = _setup_extensions_test_app(SimpleExtensionManager(None))
response = test_app.get("/non_extistant_extension", status='*')
self.assertEqual(404, response.status_int)
class ActionExtensionTest(base.BaseTestCase):
def setUp(self):
super(ActionExtensionTest, self).setUp()
self.extension_app = _setup_extensions_test_app()
def test_extended_action_for_adding_extra_data(self):
action_name = 'FOXNSOX:add_tweedle'
action_params = dict(name='Beetle')
req_body = jsonutils.dumps({action_name: action_params})
response = self.extension_app.post('/dummy_resources/1/action',
req_body,
content_type='application/json')
self.assertEqual(b"Tweedle Beetle Added.", response.body)
def test_extended_action_for_deleting_extra_data(self):
action_name = 'FOXNSOX:delete_tweedle'
action_params = dict(name='Bailey')
req_body = jsonutils.dumps({action_name: action_params})
response = self.extension_app.post("/dummy_resources/1/action",
req_body,
content_type='application/json')
self.assertEqual(b"Tweedle Bailey Deleted.", response.body)
def test_returns_404_for_non_existent_action(self):
non_existent_action = 'blah_action'
action_params = dict(name="test")
req_body = jsonutils.dumps({non_existent_action: action_params})
response = self.extension_app.post("/dummy_resources/1/action",
req_body,
content_type='application/json',
status='*')
self.assertEqual(404, response.status_int)
def test_returns_404_for_non_existent_resource(self):
action_name = 'add_tweedle'
action_params = dict(name='Beetle')
req_body = jsonutils.dumps({action_name: action_params})
response = self.extension_app.post("/asdf/1/action", req_body,
content_type='application/json',
status='*')
self.assertEqual(404, response.status_int)
class RequestExtensionTest(base.BaseTestCase):
def test_headers_can_be_extended(self):
def extend_headers(req, res):
assert req.headers['X-NEW-REQUEST-HEADER'] == "sox"
res.headers['X-NEW-RESPONSE-HEADER'] = "response_header_data"
return res
app = self._setup_app_with_request_handler(extend_headers, 'GET')
response = app.get("/dummy_resources/1",
headers={'X-NEW-REQUEST-HEADER': "sox"})
self.assertEqual(response.headers['X-NEW-RESPONSE-HEADER'],
"response_header_data")
def test_extend_get_resource_response(self):
def extend_response_data(req, res):
data = jsonutils.loads(res.body)
data['FOXNSOX:extended_key'] = req.GET.get('extended_key')
res.body = jsonutils.dumps(data).encode('utf-8')
return res
app = self._setup_app_with_request_handler(extend_response_data, 'GET')
response = app.get("/dummy_resources/1?extended_key=extended_data")
self.assertEqual(200, response.status_int)
response_data = jsonutils.loads(response.body)
self.assertEqual('extended_data',
response_data['FOXNSOX:extended_key'])
self.assertEqual('knox', response_data['fort'])
def test_get_resources(self):
app = _setup_extensions_test_app()
response = app.get("/dummy_resources/1?chewing=newblue")
response_data = jsonutils.loads(response.body)
self.assertEqual('newblue', response_data['FOXNSOX:googoose'])
self.assertEqual("Pig Bands!", response_data['FOXNSOX:big_bands'])
def test_edit_previously_uneditable_field(self):
def _update_handler(req, res):
data = jsonutils.loads(res.body)
data['uneditable'] = req.params['uneditable']
res.body = jsonutils.dumps(data).encode('utf-8')
return res
base_app = webtest.TestApp(setup_base_app(self))
response = base_app.put("/dummy_resources/1",
{'uneditable': "new_value"})
self.assertEqual(response.json['uneditable'], "original_value")
ext_app = self._setup_app_with_request_handler(_update_handler,
'PUT')
ext_response = ext_app.put("/dummy_resources/1",
{'uneditable': "new_value"})
self.assertEqual(ext_response.json['uneditable'], "new_value")
def _setup_app_with_request_handler(self, handler, verb):
req_ext = extensions.RequestExtension(verb,
'/dummy_resources/:(id)',
handler)
manager = SimpleExtensionManager(None, None, req_ext)
return _setup_extensions_test_app(manager)
class ExtensionManagerTest(base.BaseTestCase):
def test_invalid_extensions_are_not_registered(self):
class InvalidExtension(object):
"""Invalid extension.
This Extension doesn't implement extension methods :
get_name, get_description and get_updated
"""
def get_alias(self):
return "invalid_extension"
ext_mgr = extensions.ExtensionManager('')
ext_mgr.add_extension(InvalidExtension())
ext_mgr.add_extension(ext_stubs.StubExtension("valid_extension"))
self.assertIn('valid_extension', ext_mgr.extensions)
self.assertNotIn('invalid_extension', ext_mgr.extensions)
def test_assignment_of_attr_map(self):
"""Unit test for bug 1443342
In this bug, an extension that extended multiple resources with the
same dict would cause future extensions to inadvertently modify the
resources of all of the resources since they were referencing the same
dictionary.
"""
class MultiResourceExtension(ext_stubs.StubExtension):
"""Generated Extended Resources.
This extension's extended resource will assign
to more than one resource.
"""
def get_extended_resources(self, version):
EXTENDED_TIMESTAMP = {
'created_at': {'allow_post': False, 'allow_put': False,
'is_visible': True}}
EXTENDED_RESOURCES = ["ext1", "ext2"]
attrs = {}
for resources in EXTENDED_RESOURCES:
attrs[resources] = EXTENDED_TIMESTAMP
return attrs
class AttrExtension(ext_stubs.StubExtension):
def get_extended_resources(self, version):
attrs = {
self.alias: {
'%s-attr' % self.alias: {'allow_post': False,
'allow_put': False,
'is_visible': True}}}
return attrs
ext_mgr = extensions.ExtensionManager('')
attr_map = {}
ext_mgr.add_extension(MultiResourceExtension('timestamp'))
ext_mgr.extend_resources("2.0", attr_map)
ext_mgr.add_extension(AttrExtension("ext1"))
ext_mgr.add_extension(AttrExtension("ext2"))
ext_mgr.extend_resources("2.0", attr_map)
self.assertIn('created_at', attr_map['ext2'])
self.assertIn('created_at', attr_map['ext1'])
# now we need to make sure the attrextensions didn't leak across
self.assertNotIn('ext1-attr', attr_map['ext2'])
self.assertNotIn('ext2-attr', attr_map['ext1'])
class PluginAwareExtensionManagerTest(base.BaseTestCase):
def test_unsupported_extensions_are_not_loaded(self):
stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1", "e3"])
plugin_info = {constants.CORE: stub_plugin}
with mock.patch("neutron.api.extensions.PluginAwareExtensionManager."
"check_if_plugin_extensions_loaded"):
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(ext_stubs.StubExtension("e1"))
ext_mgr.add_extension(ext_stubs.StubExtension("e2"))
ext_mgr.add_extension(ext_stubs.StubExtension("e3"))
self.assertIn("e1", ext_mgr.extensions)
self.assertNotIn("e2", ext_mgr.extensions)
self.assertIn("e3", ext_mgr.extensions)
def test_extensions_are_not_loaded_for_plugins_unaware_of_extensions(self):
class ExtensionUnawarePlugin(object):
"""This plugin does not implement supports_extension method.
Extensions will not be loaded when this plugin is used.
"""
pass
plugin_info = {constants.CORE: ExtensionUnawarePlugin()}
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(ext_stubs.StubExtension("e1"))
self.assertNotIn("e1", ext_mgr.extensions)
def test_extensions_not_loaded_for_plugin_without_expected_interface(self):
class PluginWithoutExpectedIface(object):
"""Does not implement get_foo method as expected by extension."""
supported_extension_aliases = ["supported_extension"]
plugin_info = {constants.CORE: PluginWithoutExpectedIface()}
with mock.patch("neutron.api.extensions.PluginAwareExtensionManager."
"check_if_plugin_extensions_loaded"):
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(ext_stubs.ExtensionExpectingPluginInterface(
"supported_extension"))
self.assertNotIn("e1", ext_mgr.extensions)
def test_extensions_are_loaded_for_plugin_with_expected_interface(self):
class PluginWithExpectedInterface(object):
"""Implements get_foo method as expected by extension."""
supported_extension_aliases = ["supported_extension"]
def get_foo(self, bar=None):
pass
plugin_info = {constants.CORE: PluginWithExpectedInterface()}
with mock.patch("neutron.api.extensions.PluginAwareExtensionManager."
"check_if_plugin_extensions_loaded"):
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(ext_stubs.ExtensionExpectingPluginInterface(
"supported_extension"))
self.assertIn("supported_extension", ext_mgr.extensions)
def test_extensions_expecting_neutron_plugin_interface_are_loaded(self):
class ExtensionForQuamtumPluginInterface(ext_stubs.StubExtension):
"""This Extension does not implement get_plugin_interface method.
This will work with any plugin implementing NeutronPluginBase
"""
pass
stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1"])
plugin_info = {constants.CORE: stub_plugin}
with mock.patch("neutron.api.extensions.PluginAwareExtensionManager."
"check_if_plugin_extensions_loaded"):
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(ExtensionForQuamtumPluginInterface("e1"))
self.assertIn("e1", ext_mgr.extensions)
def test_extensions_without_need_for__plugin_interface_are_loaded(self):
class ExtensionWithNoNeedForPluginInterface(ext_stubs.StubExtension):
"""This Extension does not need any plugin interface.
This will work with any plugin implementing NeutronPluginBase
"""
def get_plugin_interface(self):
return None
stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1"])
plugin_info = {constants.CORE: stub_plugin}
with mock.patch("neutron.api.extensions.PluginAwareExtensionManager."
"check_if_plugin_extensions_loaded"):
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(ExtensionWithNoNeedForPluginInterface("e1"))
self.assertIn("e1", ext_mgr.extensions)
def test_extension_loaded_for_non_core_plugin(self):
class NonCorePluginExtenstion(ext_stubs.StubExtension):
def get_plugin_interface(self):
return None
stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1"])
plugin_info = {constants.DUMMY: stub_plugin}
with mock.patch("neutron.api.extensions.PluginAwareExtensionManager."
"check_if_plugin_extensions_loaded"):
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(NonCorePluginExtenstion("e1"))
self.assertIn("e1", ext_mgr.extensions)
def test_unloaded_supported_extensions_raises_exception(self):
stub_plugin = ext_stubs.StubPlugin(
supported_extensions=["unloaded_extension"])
plugin_info = {constants.CORE: stub_plugin}
self.assertRaises(exceptions.ExtensionsNotFound,
extensions.PluginAwareExtensionManager,
'', plugin_info)
class ExtensionControllerTest(testlib_api.WebTestCase):
def setUp(self):
super(ExtensionControllerTest, self).setUp()
self.test_app = _setup_extensions_test_app()
def test_index_gets_all_registerd_extensions(self):
response = self.test_app.get("/extensions." + self.fmt)
res_body = self.deserialize(response)
foxnsox = res_body["extensions"][0]
self.assertEqual(foxnsox["alias"], "FOXNSOX")
def test_extension_can_be_accessed_by_alias(self):
response = self.test_app.get("/extensions/FOXNSOX." + self.fmt)
foxnsox_extension = self.deserialize(response)
foxnsox_extension = foxnsox_extension['extension']
self.assertEqual(foxnsox_extension["alias"], "FOXNSOX")
def test_show_returns_not_found_for_non_existent_extension(self):
response = self.test_app.get("/extensions/non_existent" + self.fmt,
status="*")
self.assertEqual(response.status_int, 404)
def app_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
return ExtensionsTestApp(conf)
def setup_base_app(test):
base.BaseTestCase.config_parse()
app = config.load_paste_app('extensions_test_app')
return app
def setup_extensions_middleware(extension_manager=None):
extension_manager = (extension_manager or
extensions.PluginAwareExtensionManager(
extensions_path,
{constants.CORE: FakePluginWithExtension()}))
base.BaseTestCase.config_parse()
app = config.load_paste_app('extensions_test_app')
return extensions.ExtensionMiddleware(app, ext_mgr=extension_manager)
def _setup_extensions_test_app(extension_manager=None):
return webtest.TestApp(setup_extensions_middleware(extension_manager))
class SimpleExtensionManager(object):
def __init__(self, resource_ext=None, action_ext=None, request_ext=None):
self.resource_ext = resource_ext
self.action_ext = action_ext
self.request_ext = request_ext
def get_resources(self):
resource_exts = []
if self.resource_ext:
resource_exts.append(self.resource_ext)
return resource_exts
def get_actions(self):
action_exts = []
if self.action_ext:
action_exts.append(self.action_ext)
return action_exts
def get_request_extensions(self):
request_extensions = []
if self.request_ext:
request_extensions.append(self.request_ext)
return request_extensions
class ExtensionExtendedAttributeTestPlugin(object):
supported_extension_aliases = [
'ext-obj-test', "extended-ext-attr"
]
def __init__(self, configfile=None):
super(ExtensionExtendedAttributeTestPlugin, self)
self.objs = []
self.objh = {}
def create_ext_test_resource(self, context, ext_test_resource):
obj = ext_test_resource['ext_test_resource']
id = _uuid()
obj['id'] = id
self.objs.append(obj)
self.objh.update({id: obj})
return obj
def get_ext_test_resources(self, context, filters=None, fields=None):
return self.objs
def get_ext_test_resource(self, context, id, fields=None):
return self.objh[id]
class ExtensionExtendedAttributeTestCase(base.BaseTestCase):
def setUp(self):
super(ExtensionExtendedAttributeTestCase, self).setUp()
plugin = (
"neutron.tests.unit.api.test_extensions."
"ExtensionExtendedAttributeTestPlugin"
)
# point config file to: neutron/tests/etc/neutron.conf
self.config_parse()
self.setup_coreplugin(plugin)
ext_mgr = extensions.PluginAwareExtensionManager(
extensions_path,
{constants.CORE: ExtensionExtendedAttributeTestPlugin()}
)
ext_mgr.extend_resources("2.0", {})
extensions.PluginAwareExtensionManager._instance = ext_mgr
app = config.load_paste_app('extensions_test_app')
self._api = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr)
self._tenant_id = "8c70909f-b081-452d-872b-df48e6c355d1"
# Save the global RESOURCE_ATTRIBUTE_MAP
self.saved_attr_map = {}
for res, attrs in six.iteritems(attributes.RESOURCE_ATTRIBUTE_MAP):
self.saved_attr_map[res] = attrs.copy()
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attributes.RESOURCE_ATTRIBUTE_MAP.update(
extattr.EXTENDED_ATTRIBUTES_2_0)
self.agentscheduler_dbMinxin = manager.NeutronManager.get_plugin()
self.addCleanup(self.restore_attribute_map)
quota.QUOTAS._driver = None
cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver',
group='QUOTAS')
def restore_attribute_map(self):
# Restore the original RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
def _do_request(self, method, path, data=None, params=None, action=None):
content_type = 'application/json'
body = None
if data is not None: # empty dict is valid
body = wsgi.Serializer().serialize(data, content_type)
req = testlib_api.create_request(
path, body, content_type,
method, query_string=params)
res = req.get_response(self._api)
if res.status_code >= 400:
raise webexc.HTTPClientError(detail=res.body, code=res.status_code)
if res.status_code != webexc.HTTPNoContent.code:
return res.json
def _ext_test_resource_create(self, attr=None):
data = {
"ext_test_resource": {
"tenant_id": self._tenant_id,
"name": "test",
extattr.EXTENDED_ATTRIBUTE: attr
}
}
res = self._do_request('POST', _get_path('ext_test_resources'), data)
return res['ext_test_resource']
def test_ext_test_resource_create(self):
ext_test_resource = self._ext_test_resource_create()
attr = _uuid()
ext_test_resource = self._ext_test_resource_create(attr)
self.assertEqual(ext_test_resource[extattr.EXTENDED_ATTRIBUTE], attr)
def test_ext_test_resource_get(self):
attr = _uuid()
obj = self._ext_test_resource_create(attr)
obj_id = obj['id']
res = self._do_request('GET', _get_path(
'ext_test_resources/{0}'.format(obj_id)))
obj2 = res['ext_test_resource']
self.assertEqual(obj2[extattr.EXTENDED_ATTRIBUTE], attr)
|
|
# Copyright 2016 Conchylicultor. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Model to generate new songs
"""
import numpy as np # To generate random numbers
import tensorflow as tf
from deepmusic.moduleloader import ModuleLoader
from deepmusic.keyboardcell import KeyboardCell
import deepmusic.songstruct as music
class Model:
"""
Base class which manage the different models and experimentation.
"""
class TargetWeightsPolicy:
""" Structure to represent the different policy for choosing the target weights
This is used to scale the contribution of each timestep to the global loss
"""
NONE = 'none' # All weights equals (=1.0) (default behavior)
LINEAR = 'linear' # The first outputs are less penalized than the last ones
STEP = 'step' # We start penalizing only after x steps (enco/deco behavior)
def __init__(self, args):
"""
Args:
args: parameters of the model
"""
self.args = args
def get_weight(self, i):
""" Return the target weight for the given step i using the chosen policy
"""
if not self.args.target_weights or self.args.target_weights == Model.TargetWeightsPolicy.NONE:
return 1.0
elif self.args.target_weights == Model.TargetWeightsPolicy.LINEAR:
return i / (self.args.sample_length - 1) # Gradually increment the loss weight
elif self.args.target_weights == Model.TargetWeightsPolicy.STEP:
raise NotImplementedError('Step target weight policy not implemented yet, please consider another policy')
else:
raise ValueError('Unknown chosen target weight policy: {}'.format(self.args.target_weights))
@staticmethod
def get_policies():
""" Return the list of the different modes
Useful when parsing the command lines arguments
"""
return [
Model.TargetWeightsPolicy.NONE,
Model.TargetWeightsPolicy.LINEAR,
Model.TargetWeightsPolicy.STEP
]
class ScheduledSamplingPolicy:
""" Container for the schedule sampling policy
See http://arxiv.org/abs/1506.03099 for more details
"""
NONE = 'none' # No scheduled sampling (always take the given input)
ALWAYS = 'always' # Always samples from the predicted output
LINEAR = 'linear' # Gradually increase the sampling rate
def __init__(self, args):
self.sampling_policy_fct = None
assert args.scheduled_sampling
assert len(args.scheduled_sampling) > 0
policy = args.scheduled_sampling[0]
if policy == Model.ScheduledSamplingPolicy.NONE:
self.sampling_policy_fct = lambda step: 1.0
elif policy == Model.ScheduledSamplingPolicy.ALWAYS:
self.sampling_policy_fct = lambda step: 0.0
elif policy == Model.ScheduledSamplingPolicy.LINEAR:
if len(args.scheduled_sampling) != 5:
raise ValueError('Not the right arguments for the sampling linear policy ({} instead of 4)'.format(len(args.scheduled_sampling)-1))
start_step = int(args.scheduled_sampling[1])
end_step = int(args.scheduled_sampling[2])
start_value = float(args.scheduled_sampling[3])
end_value = float(args.scheduled_sampling[4])
if (start_step >= end_step or
not (0.0 <= start_value <= 1.0) or
not (0.0 <= end_value <= 1.0)):
raise ValueError('Some schedule sampling parameters incorrect.')
# TODO: Add default values (as optional arguments)
def linear_policy(step):
if step < start_step:
threshold = start_value
elif start_step <= step < end_step:
slope = (start_value-end_value)/(start_step-end_step) # < 0 (because end_step>start_step and start_value>end_value)
threshold = slope*(step-start_step) + start_value
elif end_step <= step:
threshold = end_value
else:
raise RuntimeError('Invalid value for the sampling policy') # Parameters have not been correctly defined!
assert 0.0 <= threshold <= 1.0
return threshold
self.sampling_policy_fct = linear_policy
else:
raise ValueError('Unknown chosen schedule sampling policy: {}'.format(policy))
def get_prev_threshold(self, glob_step, i=0):
""" Return the previous sampling probability for the current step.
If above, the RNN should use the previous step instead of the given input.
Args:
glob_step (int): the global iteration step for the training
i (int): the timestep of the RNN (TODO: implement incrementive slope (progression like -\|), remove the '=0')
"""
return self.sampling_policy_fct(glob_step)
def __init__(self, args):
"""
Args:
args: parameters of the model
"""
print('Model creation...')
self.args = args # Keep track of the parameters of the model
# Placeholders
self.inputs = None
self.targets = None
self.use_prev = None # Boolean tensor which say at Graph evaluation time if we use the input placeholder or the previous output.
self.current_learning_rate = None # Allow to have a dynamic learning rate
# Main operators
self.opt_op = None # Optimizer
self.outputs = None # Outputs of the network
self.final_state = None # When testing, we feed this value as initial state ?
# Other options
self.target_weights_policy = None
self.schedule_policy = None
self.learning_rate_policy = None
self.loop_processing = None
# Construct the graphs
self._build_network()
def _build_network(self):
""" Create the computational graph
"""
input_dim = ModuleLoader.batch_builders.get_module().get_input_dim()
# Placeholders (Use tf.SparseTensor with training=False instead) (TODO: Try restoring dynamic batch_size)
with tf.name_scope('placeholder_inputs'):
self.inputs = [
tf.placeholder(
tf.float32, # -1.0/1.0 ? Probably better for the sigmoid
[self.args.batch_size, input_dim], # TODO: Get input size from batch_builder
name='input')
for _ in range(self.args.sample_length)
]
with tf.name_scope('placeholder_targets'):
self.targets = [
tf.placeholder(
tf.int32, # 0/1 # TODO: Int for sofmax, Float for sigmoid
[self.args.batch_size,], # TODO: For softmax, only 1d, for sigmoid, 2d (batch_size, num_class)
name='target')
for _ in range(self.args.sample_length)
]
with tf.name_scope('placeholder_use_prev'):
self.use_prev = [
tf.placeholder(
tf.bool,
[],
name='use_prev')
for _ in range(self.args.sample_length) # The first value will never be used (always takes self.input for the first step)
]
# Define the network
self.loop_processing = ModuleLoader.loop_processings.build_module(self.args)
def loop_rnn(prev, i):
""" Loop function used to connect one output of the rnn to the next input.
The previous input and returned value have to be from the same shape.
This is useful to use the same network for both training and testing.
Args:
prev: the previous predicted keyboard configuration at step i-1
i: the current step id (Warning: start at 1, 0 is ignored)
Return:
tf.Tensor: the input at the step i
"""
next_input = self.loop_processing(prev)
# On training, we force the correct input, on testing, we use the previous output as next input
return tf.cond(self.use_prev[i], lambda: next_input, lambda: self.inputs[i])
# TODO: Try attention decoder/use dynamic_rnn instead
self.outputs, self.final_state = tf.nn.seq2seq.rnn_decoder(
decoder_inputs=self.inputs,
initial_state=None, # The initial state is defined inside KeyboardCell
cell=KeyboardCell(self.args),
loop_function=loop_rnn
)
# For training only
if not self.args.test:
# Finally, we define the loss function
# The network will predict a mix a wrong and right notes. For the loss function, we would like to
# penalize note which are wrong. Eventually, the penalty should be less if the network predict the same
# note but not in the right pitch (ex: C4 instead of C5), with a decay the further the prediction
# is (D5 and D1 more penalized than D4 and D3 if the target is D2)
# For the piano roll mode, by using sigmoid_cross_entropy_with_logits, the task is formulated as a NB_NOTES binary
# classification problems
# For the relative note experiment, it use a standard SoftMax where the label is the relative position to the previous
# note
self.schedule_policy = Model.ScheduledSamplingPolicy(self.args)
self.target_weights_policy = Model.TargetWeightsPolicy(self.args)
self.learning_rate_policy = ModuleLoader.learning_rate_policies.build_module(self.args) # Load the chosen policies
# TODO: If train on different length, check that the loss is proportional to the length or average ???
loss_fct = tf.nn.seq2seq.sequence_loss(
self.outputs,
self.targets,
[tf.constant(self.target_weights_policy.get_weight(i), shape=self.targets[0].get_shape()) for i in range(len(self.targets))], # Weights
#softmax_loss_function=tf.nn.softmax_cross_entropy_with_logits, # Previous: tf.nn.sigmoid_cross_entropy_with_logits TODO: Use option to choose. (new module ?)
average_across_timesteps=True, # Before: I think it's best for variables length sequences (specially with the target weights=0), isn't it (it implies also that short sequences are less penalized than long ones) ? (TODO: For variables length sequences, be careful about the target weights)
average_across_batch=True # Before: Penalize by sample (should allows dynamic batch size) Warning: need to tune the learning rate
)
tf.scalar_summary('training_loss', loss_fct) # Keep track of the cost
self.current_learning_rate = tf.placeholder(tf.float32, [])
# Initialize the optimizer
opt = tf.train.AdamOptimizer(
learning_rate=self.current_learning_rate,
beta1=0.9,
beta2=0.999,
epsilon=1e-08
)
# TODO: Also keep track of magnitudes (how much is updated)
self.opt_op = opt.minimize(loss_fct)
def step(self, batch, train_set=True, glob_step=-1, ret_output=False):
""" Forward/training step operation.
Does not perform run on itself but just return the operators to do so. Those have then to be run by the
main program.
If the output operator is returned, it will always be the last one on the list
Args:
batch (Batch): Input data on testing mode, input and target on output mode
train_set (Bool): indicate if the batch come from the test/train set (not used when generating)
glob_step (int): indicate the global step for the schedule sampling
ret_output (Bool): for the training mode, if true,
Return:
Tuple[ops], dict: The list of the operators to run (training_step or outputs) with the associated feed dictionary
"""
# TODO: Could optimize feeding between train/test/generating (compress code)
feed_dict = {}
ops = () # For small length, it seems (from my investigations) that tuples are faster than list for merging
batch.generate(target=False if self.args.test else True)
# Feed placeholders and choose the ops
if not self.args.test: # Training
if train_set: # We update the learning rate every x iterations # TODO: What happens when we don't feed the learning rate ??? Stays at the last value ?
assert glob_step >= 0
feed_dict[self.current_learning_rate] = self.learning_rate_policy.get_learning_rate(glob_step)
for i in range(self.args.sample_length):
feed_dict[self.inputs[i]] = batch.inputs[i]
feed_dict[self.targets[i]] = batch.targets[i]
#if np.random.rand() >= self.schedule_policy.get_prev_threshold(glob_step)*self.target_weights_policy.get_weight(i): # Regular Schedule sample (TODO: Try sampling with the weigths or a mix of weights/sampling)
if np.random.rand() >= self.schedule_policy.get_prev_threshold(glob_step): # Weight the threshold by the target weights (don't schedule sample if weight=0)
feed_dict[self.use_prev[i]] = True
else:
feed_dict[self.use_prev[i]] = False
if train_set:
ops += (self.opt_op,)
if ret_output:
ops += (self.outputs,)
else: # Generating (batch_size == 1)
# TODO: What to put for initialisation state (empty ? random ?) ?
# TODO: Modify use_prev
for i in range(self.args.sample_length):
if i < len(batch.inputs):
feed_dict[self.inputs[i]] = batch.inputs[i]
feed_dict[self.use_prev[i]] = False
else: # Even not used, we still need to feed a placeholder
feed_dict[self.inputs[i]] = batch.inputs[0] # Could be anything but we need it to be from the right shape
feed_dict[self.use_prev[i]] = True # When we don't have an input, we use the previous output instead
ops += (self.loop_processing.get_op(), self.outputs,) # The loop_processing operator correspond to the recorded softmax sampled
# Return one pass operator
return ops, feed_dict
|
|
"""
Utilities to manage AWS Elastic Block Store volumes and snapshots.
To delete EBS volumes or snapshots, use ``aegea rm``.
"""
import os, sys, re, subprocess, time, json
from functools import lru_cache
from botocore.exceptions import ClientError
from . import register_parser, logger
from .ls import add_name, filter_collection, filter_and_tabulate, register_filtering_parser
from .util import get_mkfs_command
from .util.printing import page_output, get_cell, tabulate
from .util.aws import ARN, resources, clients, ensure_vpc, ensure_subnet, resolve_instance_id, encode_tags, get_metadata
def complete_volume_id(**kwargs):
return [i["VolumeId"] for i in clients.ec2.describe_volumes()["Volumes"]]
def ebs(args):
ebs_parser.print_help()
ebs_parser = register_parser(ebs, help="Manage Elastic Block Store resources", description=__doc__)
def ls(args):
@lru_cache()
def instance_id_to_name(i):
return add_name(resources.ec2.Instance(i)).name
table = [{f: get_cell(i, f) for f in args.columns} for i in filter_collection(resources.ec2.volumes, args)]
if "attachments" in args.columns:
for row in table:
row["attachments"] = ", ".join(instance_id_to_name(a["InstanceId"]) for a in row["attachments"])
page_output(tabulate(table, args))
parser = register_filtering_parser(ls, parent=ebs_parser, help="List EC2 EBS volumes")
def snapshots(args):
page_output(filter_and_tabulate(resources.ec2.snapshots.filter(OwnerIds=[ARN.get_account_id()]), args))
parser = register_filtering_parser(snapshots, parent=ebs_parser, help="List EC2 EBS snapshots")
def create(args):
if (args.format or args.mount) and not args.attach:
raise SystemExit("Arguments --format and --mount require --attach")
if not args.size:
raise SystemExit("Argument --size-gb is required")
create_args = dict(Size=args.size, Encrypted=True)
if args.tags:
create_args.update(TagSpecifications=[dict(ResourceType="volume", Tags=encode_tags(args.tags))])
for arg in "dry_run snapshot_id availability_zone volume_type iops kms_key_id".split():
if getattr(args, arg) is not None:
create_args["".join(x.capitalize() for x in arg.split("_"))] = getattr(args, arg)
if "AvailabilityZone" not in create_args:
if args.attach:
create_args["AvailabilityZone"] = get_metadata("placement/availability-zone")
else:
create_args["AvailabilityZone"] = ensure_subnet(ensure_vpc()).availability_zone
res = clients.ec2.create_volume(**create_args)
clients.ec2.get_waiter("volume_available").wait(VolumeIds=[res["VolumeId"]])
if args.attach:
try:
attach(parser_attach.parse_args([res["VolumeId"]], namespace=args))
except Exception:
print(json.dumps(res, indent=2, default=lambda x: str(x)))
raise
return res
parser_create = register_parser(create, parent=ebs_parser, help="Create an EBS volume")
parser_create.add_argument("--dry-run", action="store_true")
parser_create.add_argument("--snapshot-id")
parser_create.add_argument("--availability-zone")
parser_create.add_argument("--kms-key-id")
parser_create.add_argument("--tags", nargs="+", metavar="TAG_NAME=VALUE")
parser_create.add_argument("--attach", action="store_true",
help="Attach volume to this instance (only valid when running on EC2)")
def snapshot(args):
return clients.ec2.create_snapshot(DryRun=args.dry_run, VolumeId=args.volume_id)
parser_snapshot = register_parser(snapshot, parent=ebs_parser, help="Create an EBS snapshot")
parser_snapshot.add_argument("volume_id").completer = complete_volume_id
def attach_volume(args):
return clients.ec2.attach_volume(DryRun=args.dry_run,
VolumeId=args.volume_id,
InstanceId=args.instance,
Device=args.device)
def find_volume_id(mountpoint):
with open("/proc/mounts") as fh:
for line in fh:
devnode, mount, _ = line.split(" ", 2)
if mountpoint == mount:
break
else:
raise Exception("Mountpoint {} not found in /proc/mounts".format(mountpoint))
for devnode_link in os.listdir("/dev/disk/by-id"):
if "Elastic_Block_Store" in devnode_link and os.path.realpath("/dev/disk/by-id/" + devnode_link) == devnode:
break
else:
raise Exception("EBS volume ID not found for mountpoint {} (devnode {})".format(mountpoint, devnode))
return re.search(r"Elastic_Block_Store_(vol[\w]+)", devnode_link).group(1).replace("vol", "vol-")
def find_devnode(volume_id):
if os.path.exists("/dev/disk/by-id"):
for devnode in os.listdir("/dev/disk/by-id"):
if "Elastic_Block_Store" in devnode and volume_id.replace("-", "") in devnode:
return "/dev/disk/by-id/" + devnode
if os.path.exists("/dev/disk/by-label/" + get_fs_label(volume_id)):
return "/dev/disk/by-label/" + get_fs_label(volume_id)
attachment = resources.ec2.Volume(volume_id).attachments[0]
if get_metadata("instance-id") == attachment["InstanceId"] and os.path.exists("/dev/" + attachment["Device"]):
return "/dev/" + attachment["Device"]
raise Exception("Could not find devnode for {}".format(volume_id))
def get_fs_label(volume_id):
return "aegv" + volume_id[4:12]
def attach(args):
if args.instance is None:
args.instance = get_metadata("instance-id")
devices = args.device if args.device else ["xvd" + chr(i + 1) for i in reversed(range(ord("a"), ord("z")))]
for i, device in enumerate(devices):
try:
args.device = devices[i]
res = attach_volume(args)
break
except ClientError as e:
if re.search("VolumeInUse.+already attached to an instance", str(e)):
if resources.ec2.Volume(args.volume_id).attachments[0]["InstanceId"] == args.instance:
logger.warn("Volume %s is already attached to instance %s", args.volume_id, args.instance)
break
if i + 1 < len(devices) and re.search("InvalidParameterValue.+Attachment point.+is already in use", str(e)):
logger.warn("BDM node %s is already in use, looking for next available node", devices[i])
continue
raise
res = clients.ec2.get_waiter("volume_in_use").wait(VolumeIds=[args.volume_id])
if args.format or args.mount:
for i in range(30):
try:
find_devnode(args.volume_id)
break
except Exception:
logger.debug("Waiting for device node to appear for %s", args.volume_id)
time.sleep(1)
if args.format:
logger.info("Formatting %s (%s)", args.volume_id, find_devnode(args.volume_id))
label = get_fs_label(args.volume_id)
command = get_mkfs_command(fs_type=args.format, label=label) + find_devnode(args.volume_id)
subprocess.check_call(command, shell=True, stdout=sys.stderr.buffer)
if args.mount:
logger.info("Mounting %s at %s", args.volume_id, args.mount)
subprocess.check_call(["mount", find_devnode(args.volume_id), args.mount], stdout=sys.stderr.buffer)
return res
parser_attach = register_parser(attach, parent=ebs_parser, help="Attach an EBS volume to an EC2 instance")
parser_attach.add_argument("volume_id").completer = complete_volume_id
parser_attach.add_argument("instance", type=resolve_instance_id, nargs="?")
parser_attach.add_argument("--device", choices=["xvd" + chr(i + 1) for i in range(ord("a"), ord("z"))],
help="Device node to attach volume to. Default: auto-select the first available node")
for parser in parser_create, parser_attach:
parser.add_argument("--format", nargs="?", const="xfs",
help="Use this command and arguments to format volume after attaching (only valid on EC2)")
parser.add_argument("--mount", nargs="?", const="/mnt", help="Mount volume on given mountpoint (only valid on EC2)")
def detach(args):
"""
Detach an EBS volume from an EC2 instance.
If *volume_id* does not start with "vol-", it is interpreted as a mountpoint on the local instance,
mapped to its underlying EBS volume, unmounted and detached.
"""
if args.volume_id.startswith("vol-"):
volume_id = args.volume_id
else:
volume_id = find_volume_id(mountpoint=args.volume_id)
args.unmount = True
if args.unmount:
cmd = "umount {devnode} || (kill -9 $(lsof -t +f -- $(readlink -f {devnode}) | sort | uniq); umount {devnode} || umount -l {devnode})" # noqa
subprocess.call(cmd.format(devnode=find_devnode(volume_id)), shell=True)
attachment = resources.ec2.Volume(volume_id).attachments[0]
res = clients.ec2.detach_volume(DryRun=args.dry_run,
VolumeId=volume_id,
InstanceId=attachment["InstanceId"],
Device=attachment["Device"],
Force=args.force)
clients.ec2.get_waiter("volume_available").wait(VolumeIds=[volume_id])
if args.delete:
logger.info("Deleting EBS volume {}".format(volume_id))
clients.ec2.delete_volume(VolumeId=volume_id, DryRun=args.dry_run)
return res
parser_detach = register_parser(detach, parent=ebs_parser)
parser_detach.add_argument("volume_id", help="EBS volume ID or mountpoint").completer = complete_volume_id
parser_detach.add_argument("--unmount", action="store_true", help="Unmount the volume before detaching")
parser_detach.add_argument("--delete", action="store_true", help="Delete the volume after detaching")
parser_detach.add_argument("--force", action="store_true")
def modify(args):
modify_args = dict(VolumeId=args.volume_id, DryRun=args.dry_run)
if args.size:
modify_args.update(Size=args.size)
if args.volume_type:
modify_args.update(VolumeType=args.volume_type)
if args.iops:
modify_args.update(Iops=args.iops)
res = clients.ec2.modify_volume(**modify_args)["VolumeModification"]
# if args.wait:
# waiter = make_waiter(clients.ec2.describe_volumes_modifications, "VolumesModifications[].ModificationState",
# "optimizing", "pathAny")
# waiter.wait(VolumeIds=[args.volume_id])
return res
parser_modify = register_parser(modify, parent=ebs_parser, help="Change the size, type, or IOPS of an EBS volume")
parser_modify.add_argument("volume_id").completer = complete_volume_id
for parser in parser_create, parser_modify:
parser.add_argument("--size-gb", dest="size", type=int, help="Volume size in gigabytes")
parser.add_argument("--volume-type", choices={"standard", "io1", "gp2", "sc1", "st1"},
help="io1, PIOPS SSD; gp2, general purpose SSD; sc1, cold HDD; st1, throughput optimized HDD")
parser.add_argument("--iops", type=int)
for parser in parser_snapshot, parser_attach, parser_detach, parser_modify:
parser.add_argument("--dry-run", action="store_true")
|
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright 2013 Canonical Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import contextlib
import re
import mock
from oslo.vmware import exceptions as vexc
from nova import context
from nova import exception
from nova.network import model as network_model
from nova.openstack.common import uuidutils
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.virt.vmwareapi import fake
from nova.tests.unit.virt.vmwareapi import stubs
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import vm_util
class partialObject(object):
def __init__(self, path='fake-path'):
self.path = path
self.fault = fake.DataObject()
class VMwareVMUtilTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVMUtilTestCase, self).setUp()
fake.reset()
stubs.set_stubs(self.stubs)
vm_util.vm_refs_cache_reset()
def _test_get_stats_from_cluster(self, connection_state="connected",
maintenance_mode=False):
ManagedObjectRefs = [fake.ManagedObjectReference("host1",
"HostSystem"),
fake.ManagedObjectReference("host2",
"HostSystem")]
hosts = fake._convert_to_array_of_mor(ManagedObjectRefs)
respool = fake.ManagedObjectReference("resgroup-11", "ResourcePool")
prop_dict = {'host': hosts, 'resourcePool': respool}
hardware = fake.DataObject()
hardware.numCpuCores = 8
hardware.numCpuThreads = 16
hardware.vendor = "Intel"
hardware.cpuModel = "Intel(R) Xeon(R)"
runtime_host_1 = fake.DataObject()
runtime_host_1.connectionState = "connected"
runtime_host_1.inMaintenanceMode = False
runtime_host_2 = fake.DataObject()
runtime_host_2.connectionState = connection_state
runtime_host_2.inMaintenanceMode = maintenance_mode
prop_list_host_1 = [fake.Prop(name="hardware_summary", val=hardware),
fake.Prop(name="runtime_summary",
val=runtime_host_1)]
prop_list_host_2 = [fake.Prop(name="hardware_summary", val=hardware),
fake.Prop(name="runtime_summary",
val=runtime_host_2)]
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.ObjectContent("prop_list_host1",
prop_list_host_1))
fake_objects.add_object(fake.ObjectContent("prop_list_host1",
prop_list_host_2))
respool_resource_usage = fake.DataObject()
respool_resource_usage.maxUsage = 5368709120
respool_resource_usage.overallUsage = 2147483648
def fake_call_method(*args):
if "get_dynamic_properties" in args:
return prop_dict
elif "get_properties_for_a_collection_of_objects" in args:
return fake_objects
else:
return respool_resource_usage
session = fake.FakeSession()
with mock.patch.object(session, '_call_method', fake_call_method):
result = vm_util.get_stats_from_cluster(session, "cluster1")
cpu_info = {}
mem_info = {}
if connection_state == "connected" and not maintenance_mode:
cpu_info['vcpus'] = 32
cpu_info['cores'] = 16
cpu_info['vendor'] = ["Intel", "Intel"]
cpu_info['model'] = ["Intel(R) Xeon(R)",
"Intel(R) Xeon(R)"]
else:
cpu_info['vcpus'] = 16
cpu_info['cores'] = 8
cpu_info['vendor'] = ["Intel"]
cpu_info['model'] = ["Intel(R) Xeon(R)"]
mem_info['total'] = 5120
mem_info['free'] = 3072
expected_stats = {'cpu': cpu_info, 'mem': mem_info}
self.assertEqual(expected_stats, result)
def test_get_stats_from_cluster_hosts_connected_and_active(self):
self._test_get_stats_from_cluster()
def test_get_stats_from_cluster_hosts_disconnected_and_active(self):
self._test_get_stats_from_cluster(connection_state="disconnected")
def test_get_stats_from_cluster_hosts_connected_and_maintenance(self):
self._test_get_stats_from_cluster(maintenance_mode=True)
def test_get_host_ref_no_hosts_in_cluster(self):
self.assertRaises(exception.NoValidHost,
vm_util.get_host_ref,
fake.FakeObjectRetrievalSession(""), 'fake_cluster')
def test_get_resize_spec(self):
fake_instance = {'id': 7, 'name': 'fake!',
'uuid': 'bda5fb9e-b347-40e8-8256-42397848cb00',
'vcpus': 2, 'memory_mb': 2048}
result = vm_util.get_vm_resize_spec(fake.FakeFactory(),
fake_instance)
expected = """{'memoryMB': 2048,
'numCPUs': 2,
'obj_name': 'ns0:VirtualMachineConfigSpec'}"""
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_get_cdrom_attach_config_spec(self):
result = vm_util.get_cdrom_attach_config_spec(fake.FakeFactory(),
fake.Datastore(),
"/tmp/foo.iso",
200, 0)
expected = """{
'deviceChange': [
{
'device': {
'connectable': {
'allowGuestControl': False,
'startConnected': True,
'connected': True,
'obj_name': 'ns0: VirtualDeviceConnectInfo'
},
'backing': {
'datastore': {
"summary.maintenanceMode": "normal",
"summary.type": "VMFS",
"summary.accessible":true,
"summary.name": "fake-ds",
"summary.capacity": 1099511627776,
"summary.freeSpace": 536870912000,
"browser": ""
},
'fileName': '/tmp/foo.iso',
'obj_name': 'ns0: VirtualCdromIsoBackingInfo'
},
'controllerKey': 200,
'unitNumber': 0,
'key': -1,
'obj_name': 'ns0: VirtualCdrom'
},
'operation': 'add',
'obj_name': 'ns0: VirtualDeviceConfigSpec'
}
],
'obj_name': 'ns0: VirtualMachineConfigSpec'
}
"""
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_lsilogic_controller_spec(self):
# Test controller spec returned for lsiLogic sas adapter type
config_spec = vm_util.create_controller_spec(fake.FakeFactory(), -101,
adapter_type="lsiLogicsas")
self.assertEqual("ns0:VirtualLsiLogicSASController",
config_spec.device.obj_name)
def test_paravirtual_controller_spec(self):
# Test controller spec returned for paraVirtual adapter type
config_spec = vm_util.create_controller_spec(fake.FakeFactory(), -101,
adapter_type="paraVirtual")
self.assertEqual("ns0:ParaVirtualSCSIController",
config_spec.device.obj_name)
def _vmdk_path_and_adapter_type_devices(self, filename, parent=None):
# Test the adapter_type returned for a lsiLogic sas controller
controller_key = 1000
disk = fake.VirtualDisk()
disk.controllerKey = controller_key
disk_backing = fake.VirtualDiskFlatVer2BackingInfo()
disk_backing.fileName = filename
if parent:
disk_backing.parent = parent
disk.backing = disk_backing
controller = fake.VirtualLsiLogicSASController()
controller.key = controller_key
devices = [disk, controller]
return devices
def test_get_vmdk_path(self):
uuid = '00000000-0000-0000-0000-000000000000'
filename = '[test_datastore] %s/%s.vmdk' % (uuid, uuid)
devices = self._vmdk_path_and_adapter_type_devices(filename)
session = fake.FakeSession()
with mock.patch.object(session, '_call_method',
return_value=devices):
instance = {'uuid': uuid}
vmdk_path = vm_util.get_vmdk_path(session, None, instance)
self.assertEqual(filename, vmdk_path)
def test_get_vmdk_path_and_adapter_type(self):
filename = '[test_datastore] test_file.vmdk'
devices = self._vmdk_path_and_adapter_type_devices(filename)
vmdk_info = vm_util.get_vmdk_path_and_adapter_type(devices)
adapter_type = vmdk_info[1]
self.assertEqual('lsiLogicsas', adapter_type)
self.assertEqual(vmdk_info[0], filename)
def test_get_vmdk_path_and_adapter_type_with_match(self):
n_filename = '[test_datastore] uuid/uuid.vmdk'
devices = self._vmdk_path_and_adapter_type_devices(n_filename)
vmdk_info = vm_util.get_vmdk_path_and_adapter_type(
devices, uuid='uuid')
adapter_type = vmdk_info[1]
self.assertEqual('lsiLogicsas', adapter_type)
self.assertEqual(n_filename, vmdk_info[0])
def test_get_vmdk_path_and_adapter_type_with_nomatch(self):
n_filename = '[test_datastore] diuu/diuu.vmdk'
devices = self._vmdk_path_and_adapter_type_devices(n_filename)
vmdk_info = vm_util.get_vmdk_path_and_adapter_type(
devices, uuid='uuid')
adapter_type = vmdk_info[1]
self.assertEqual('lsiLogicsas', adapter_type)
self.assertIsNone(vmdk_info[0])
def test_get_vmdk_adapter_type(self):
# Test for the adapter_type to be used in vmdk descriptor
# Adapter type in vmdk descriptor is same for LSI-SAS, LSILogic
# and ParaVirtual
vmdk_adapter_type = vm_util.get_vmdk_adapter_type("lsiLogic")
self.assertEqual("lsiLogic", vmdk_adapter_type)
vmdk_adapter_type = vm_util.get_vmdk_adapter_type("lsiLogicsas")
self.assertEqual("lsiLogic", vmdk_adapter_type)
vmdk_adapter_type = vm_util.get_vmdk_adapter_type("paraVirtual")
self.assertEqual("lsiLogic", vmdk_adapter_type)
vmdk_adapter_type = vm_util.get_vmdk_adapter_type("dummyAdapter")
self.assertEqual("dummyAdapter", vmdk_adapter_type)
def test_find_allocated_slots(self):
disk1 = fake.VirtualDisk(200, 0)
disk2 = fake.VirtualDisk(200, 1)
disk3 = fake.VirtualDisk(201, 1)
ide0 = fake.VirtualIDEController(200)
ide1 = fake.VirtualIDEController(201)
scsi0 = fake.VirtualLsiLogicController(key=1000, scsiCtlrUnitNumber=7)
devices = [disk1, disk2, disk3, ide0, ide1, scsi0]
taken = vm_util._find_allocated_slots(devices)
self.assertEqual([0, 1], sorted(taken[200]))
self.assertEqual([1], taken[201])
self.assertEqual([7], taken[1000])
def test_allocate_controller_key_and_unit_number_ide_default(self):
# Test that default IDE controllers are used when there is a free slot
# on them
disk1 = fake.VirtualDisk(200, 0)
disk2 = fake.VirtualDisk(200, 1)
ide0 = fake.VirtualIDEController(200)
ide1 = fake.VirtualIDEController(201)
devices = [disk1, disk2, ide0, ide1]
(controller_key, unit_number,
controller_spec) = vm_util.allocate_controller_key_and_unit_number(
None,
devices,
'ide')
self.assertEqual(201, controller_key)
self.assertEqual(0, unit_number)
self.assertIsNone(controller_spec)
def test_allocate_controller_key_and_unit_number_ide(self):
# Test that a new controller is created when there is no free slot on
# the default IDE controllers
ide0 = fake.VirtualIDEController(200)
ide1 = fake.VirtualIDEController(201)
devices = [ide0, ide1]
for controller_key in [200, 201]:
for unit_number in [0, 1]:
disk = fake.VirtualDisk(controller_key, unit_number)
devices.append(disk)
factory = fake.FakeFactory()
(controller_key, unit_number,
controller_spec) = vm_util.allocate_controller_key_and_unit_number(
factory,
devices,
'ide')
self.assertEqual(-101, controller_key)
self.assertEqual(0, unit_number)
self.assertIsNotNone(controller_spec)
def test_allocate_controller_key_and_unit_number_scsi(self):
# Test that we allocate on existing SCSI controller if there is a free
# slot on it
devices = [fake.VirtualLsiLogicController(1000, scsiCtlrUnitNumber=7)]
for unit_number in range(7):
disk = fake.VirtualDisk(1000, unit_number)
devices.append(disk)
factory = fake.FakeFactory()
(controller_key, unit_number,
controller_spec) = vm_util.allocate_controller_key_and_unit_number(
factory,
devices,
'lsiLogic')
self.assertEqual(1000, controller_key)
self.assertEqual(8, unit_number)
self.assertIsNone(controller_spec)
def _test_get_vnc_config_spec(self, port):
result = vm_util.get_vnc_config_spec(fake.FakeFactory(),
port)
return result
def test_get_vnc_config_spec(self):
result = self._test_get_vnc_config_spec(7)
expected = """{'extraConfig': [
{'value': 'true',
'key': 'RemoteDisplay.vnc.enabled',
'obj_name': 'ns0:OptionValue'},
{'value': 7,
'key': 'RemoteDisplay.vnc.port',
'obj_name': 'ns0:OptionValue'}],
'obj_name': 'ns0:VirtualMachineConfigSpec'}"""
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def _create_fake_vms(self):
fake_vms = fake.FakeRetrieveResult()
OptionValue = collections.namedtuple('OptionValue', ['key', 'value'])
for i in range(10):
vm = fake.ManagedObject()
opt_val = OptionValue(key='', value=5900 + i)
vm.set(vm_util.VNC_CONFIG_KEY, opt_val)
fake_vms.add_object(vm)
return fake_vms
def test_get_vnc_port(self):
fake_vms = self._create_fake_vms()
self.flags(vnc_port=5900, group='vmware')
self.flags(vnc_port_total=10000, group='vmware')
actual = vm_util.get_vnc_port(
fake.FakeObjectRetrievalSession(fake_vms))
self.assertEqual(actual, 5910)
def test_get_vnc_port_exhausted(self):
fake_vms = self._create_fake_vms()
self.flags(vnc_port=5900, group='vmware')
self.flags(vnc_port_total=10, group='vmware')
self.assertRaises(exception.ConsolePortRangeExhausted,
vm_util.get_vnc_port,
fake.FakeObjectRetrievalSession(fake_vms))
def test_get_all_cluster_refs_by_name_none(self):
fake_objects = fake.FakeRetrieveResult()
refs = vm_util.get_all_cluster_refs_by_name(
fake.FakeObjectRetrievalSession(fake_objects), ['fake_cluster'])
self.assertEqual({}, refs)
def test_get_all_cluster_refs_by_name_exists(self):
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.ClusterComputeResource(name='cluster'))
refs = vm_util.get_all_cluster_refs_by_name(
fake.FakeObjectRetrievalSession(fake_objects), ['cluster'])
self.assertEqual(1, len(refs))
def test_get_all_cluster_refs_by_name_missing(self):
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(partialObject(path='cluster'))
refs = vm_util.get_all_cluster_refs_by_name(
fake.FakeObjectRetrievalSession(fake_objects), ['cluster'])
self.assertEqual({}, refs)
def test_propset_dict_simple(self):
ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
object = ObjectContent(propSet=[
DynamicProperty(name='foo', val="bar")])
propdict = vm_util.propset_dict(object.propSet)
self.assertEqual("bar", propdict['foo'])
def test_propset_dict_complex(self):
ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
MoRef = collections.namedtuple('Val', ['value'])
object = ObjectContent(propSet=[
DynamicProperty(name='foo', val="bar"),
DynamicProperty(name='some.thing',
val=MoRef(value='else')),
DynamicProperty(name='another.thing', val='value')])
propdict = vm_util.propset_dict(object.propSet)
self.assertEqual("bar", propdict['foo'])
self.assertTrue(hasattr(propdict['some.thing'], 'value'))
self.assertEqual("else", propdict['some.thing'].value)
self.assertEqual("value", propdict['another.thing'])
def _test_detach_virtual_disk_spec(self, destroy_disk=False):
virtual_device_config = vm_util.detach_virtual_disk_spec(
fake.FakeFactory(),
'fake_device',
destroy_disk)
self.assertEqual('remove', virtual_device_config.operation)
self.assertEqual('fake_device', virtual_device_config.device)
self.assertEqual('ns0:VirtualDeviceConfigSpec',
virtual_device_config.obj_name)
if destroy_disk:
self.assertEqual('destroy', virtual_device_config.fileOperation)
else:
self.assertFalse(hasattr(virtual_device_config, 'fileOperation'))
def test_detach_virtual_disk_spec(self):
self._test_detach_virtual_disk_spec(destroy_disk=False)
def test_detach_virtual_disk_destroy_spec(self):
self._test_detach_virtual_disk_spec(destroy_disk=True)
def test_get_vm_create_spec(self):
instance_uuid = uuidutils.generate_uuid()
fake_instance = {'id': 7, 'name': 'fake!',
'uuid': instance_uuid,
'vcpus': 2, 'memory_mb': 2048}
result = vm_util.get_vm_create_spec(fake.FakeFactory(),
fake_instance, instance_uuid,
'fake-datastore', [])
expected = """{
'files': {'vmPathName': '[fake-datastore]',
'obj_name': 'ns0:VirtualMachineFileInfo'},
'instanceUuid': '%(instance_uuid)s',
'name': '%(instance_uuid)s', 'deviceChange': [],
'extraConfig': [{'value': '%(instance_uuid)s',
'key': 'nvp.vm-uuid',
'obj_name': 'ns0:OptionValue'}],
'memoryMB': 2048,
'managedBy': {'extensionKey': 'org.openstack.compute',
'type': 'instance',
'obj_name': 'ns0:ManagedByInfo'},
'obj_name': 'ns0:VirtualMachineConfigSpec',
'guestId': 'otherGuest',
'tools': {'beforeGuestStandby': True,
'beforeGuestReboot': True,
'beforeGuestShutdown': True,
'afterResume': True,
'afterPowerOn': True,
'obj_name': 'ns0:ToolsConfigInfo'},
'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_allocations(self):
instance_uuid = uuidutils.generate_uuid()
fake_instance = {'id': 7, 'name': 'fake!',
'uuid': instance_uuid,
'vcpus': 2, 'memory_mb': 2048}
result = vm_util.get_vm_create_spec(fake.FakeFactory(),
fake_instance, instance_uuid,
'fake-datastore', [],
allocations={'cpu_limit': 7,
'cpu_reservation': 6})
expected = """{
'files': {'vmPathName': '[fake-datastore]',
'obj_name': 'ns0:VirtualMachineFileInfo'},
'instanceUuid': '%(instance_uuid)s',
'name': '%(instance_uuid)s', 'deviceChange': [],
'extraConfig': [{'value': '%(instance_uuid)s',
'key': 'nvp.vm-uuid',
'obj_name': 'ns0:OptionValue'}],
'memoryMB': 2048,
'managedBy': {'extensionKey': 'org.openstack.compute',
'type': 'instance',
'obj_name': 'ns0:ManagedByInfo'},
'obj_name': 'ns0:VirtualMachineConfigSpec',
'guestId': 'otherGuest',
'tools': {'beforeGuestStandby': True,
'beforeGuestReboot': True,
'beforeGuestShutdown': True,
'afterResume': True,
'afterPowerOn': True,
'obj_name': 'ns0:ToolsConfigInfo'},
'cpuAllocation': {'reservation': 6,
'limit': 7,
'obj_name': 'ns0:ResourceAllocationInfo'},
'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_limit(self):
instance_uuid = uuidutils.generate_uuid()
fake_instance = {'id': 7, 'name': 'fake!',
'uuid': instance_uuid,
'vcpus': 2, 'memory_mb': 2048}
result = vm_util.get_vm_create_spec(fake.FakeFactory(),
fake_instance, instance_uuid,
'fake-datastore', [],
allocations={'cpu_limit': 7})
expected = """{
'files': {'vmPathName': '[fake-datastore]',
'obj_name': 'ns0:VirtualMachineFileInfo'},
'instanceUuid': '%(instance_uuid)s',
'name': '%(instance_uuid)s', 'deviceChange': [],
'extraConfig': [{'value': '%(instance_uuid)s',
'key': 'nvp.vm-uuid',
'obj_name': 'ns0:OptionValue'}],
'memoryMB': 2048,
'managedBy': {'extensionKey': 'org.openstack.compute',
'type': 'instance',
'obj_name': 'ns0:ManagedByInfo'},
'obj_name': 'ns0:VirtualMachineConfigSpec',
'guestId': 'otherGuest',
'tools': {'beforeGuestStandby': True,
'beforeGuestReboot': True,
'beforeGuestShutdown': True,
'afterResume': True,
'afterPowerOn': True,
'obj_name': 'ns0:ToolsConfigInfo'},
'cpuAllocation': {'limit': 7,
'obj_name': 'ns0:ResourceAllocationInfo'},
'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_share(self):
instance_uuid = uuidutils.generate_uuid()
fake_instance = {'id': 7, 'name': 'fake!',
'uuid': instance_uuid,
'vcpus': 2, 'memory_mb': 2048}
shares = {'cpu_shares_level': 'high'}
result = vm_util.get_vm_create_spec(fake.FakeFactory(),
fake_instance, instance_uuid,
'fake-datastore', [],
allocations=shares)
expected = """{
'files': {'vmPathName': '[fake-datastore]',
'obj_name': 'ns0:VirtualMachineFileInfo'},
'instanceUuid': '%(instance_uuid)s',
'name': '%(instance_uuid)s', 'deviceChange': [],
'extraConfig': [{'value': '%(instance_uuid)s',
'key': 'nvp.vm-uuid',
'obj_name': 'ns0:OptionValue'}],
'memoryMB': 2048,
'managedBy': {'extensionKey': 'org.openstack.compute',
'type': 'instance',
'obj_name': 'ns0:ManagedByInfo'},
'obj_name': 'ns0:VirtualMachineConfigSpec',
'guestId': 'otherGuest',
'tools': {'beforeGuestStandby': True,
'beforeGuestReboot': True,
'beforeGuestShutdown': True,
'afterResume': True,
'afterPowerOn': True,
'obj_name': 'ns0:ToolsConfigInfo'},
'cpuAllocation': {'shares': {'level': 'high',
'shares': 0,
'obj_name':'ns0:SharesInfo'},
'obj_name':'ns0:ResourceAllocationInfo'},
'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_share_custom(self):
instance_uuid = uuidutils.generate_uuid()
fake_instance = {'id': 7, 'name': 'fake!',
'uuid': instance_uuid,
'vcpus': 2, 'memory_mb': 2048}
shares = {'cpu_shares_level': 'custom',
'cpu_shares_share': 1948}
result = vm_util.get_vm_create_spec(fake.FakeFactory(),
fake_instance, instance_uuid,
'fake-datastore', [],
allocations=shares)
expected = """{
'files': {'vmPathName': '[fake-datastore]',
'obj_name': 'ns0:VirtualMachineFileInfo'},
'instanceUuid': '%(instance_uuid)s',
'name': '%(instance_uuid)s', 'deviceChange': [],
'extraConfig': [{'value': '%(instance_uuid)s',
'key': 'nvp.vm-uuid',
'obj_name': 'ns0:OptionValue'}],
'memoryMB': 2048,
'managedBy': {'extensionKey': 'org.openstack.compute',
'type': 'instance',
'obj_name': 'ns0:ManagedByInfo'},
'obj_name': 'ns0:VirtualMachineConfigSpec',
'guestId': 'otherGuest',
'tools': {'beforeGuestStandby': True,
'beforeGuestReboot': True,
'beforeGuestShutdown': True,
'afterResume': True,
'afterPowerOn': True,
'obj_name': 'ns0:ToolsConfigInfo'},
'cpuAllocation': {'shares': {'level': 'custom',
'shares': 1948,
'obj_name':'ns0:SharesInfo'},
'obj_name':'ns0:ResourceAllocationInfo'},
'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_create_vm(self):
method_list = ['CreateVM_Task', 'get_dynamic_property']
def fake_call_method(module, method, *args, **kwargs):
expected_method = method_list.pop(0)
self.assertEqual(expected_method, method)
if (expected_method == 'CreateVM_Task'):
return 'fake_create_vm_task'
elif (expected_method == 'get_dynamic_property'):
task_info = mock.Mock(state="success", result="fake_vm_ref")
return task_info
else:
self.fail('Should not get here....')
def fake_wait_for_task(self, *args):
task_info = mock.Mock(state="success", result="fake_vm_ref")
return task_info
session = fake.FakeSession()
fake_instance = mock.MagicMock()
fake_call_mock = mock.Mock(side_effect=fake_call_method)
fake_wait_mock = mock.Mock(side_effect=fake_wait_for_task)
with contextlib.nested(
mock.patch.object(session, '_wait_for_task',
fake_wait_mock),
mock.patch.object(session, '_call_method',
fake_call_mock)
) as (wait_for_task, call_method):
vm_ref = vm_util.create_vm(
session,
fake_instance,
'fake_vm_folder',
'fake_config_spec',
'fake_res_pool_ref')
self.assertEqual('fake_vm_ref', vm_ref)
call_method.assert_called_once_with(mock.ANY, 'CreateVM_Task',
'fake_vm_folder', config='fake_config_spec',
pool='fake_res_pool_ref')
wait_for_task.assert_called_once_with('fake_create_vm_task')
@mock.patch.object(vm_util.LOG, 'warning')
def test_create_vm_invalid_guestid(self, mock_log_warn):
"""Ensure we warn when create_vm() fails after we passed an
unrecognised guestId
"""
found = [False]
def fake_log_warn(msg, values):
if not isinstance(values, dict):
return
if values.get('ostype') == 'invalid_os_type':
found[0] = True
mock_log_warn.side_effect = fake_log_warn
instance_values = {'id': 7, 'name': 'fake-name',
'uuid': uuidutils.generate_uuid(),
'vcpus': 2, 'memory_mb': 2048}
instance = fake_instance.fake_instance_obj(
context.RequestContext('fake', 'fake', is_admin=False),
**instance_values)
session = driver.VMwareAPISession()
config_spec = vm_util.get_vm_create_spec(
session.vim.client.factory,
instance, instance.name, 'fake-datastore', [],
os_type='invalid_os_type')
self.assertRaises(vexc.VMwareDriverException,
vm_util.create_vm, session, instance, 'folder',
config_spec, 'res-pool')
self.assertTrue(found[0])
def test_convert_vif_model(self):
expected = "VirtualE1000"
result = vm_util.convert_vif_model(network_model.VIF_MODEL_E1000)
self.assertEqual(expected, result)
expected = "VirtualE1000e"
result = vm_util.convert_vif_model(network_model.VIF_MODEL_E1000E)
self.assertEqual(expected, result)
types = ["VirtualE1000", "VirtualE1000e", "VirtualPCNet32",
"VirtualVmxnet"]
for type in types:
self.assertEqual(type,
vm_util.convert_vif_model(type))
self.assertRaises(exception.Invalid,
vm_util.convert_vif_model,
"InvalidVifModel")
def test_power_on_instance_with_vm_ref(self):
session = fake.FakeSession()
fake_instance = mock.MagicMock()
with contextlib.nested(
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task"),
) as (fake_call_method, fake_wait_for_task):
vm_util.power_on_instance(session, fake_instance,
vm_ref='fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOnVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_power_on_instance_without_vm_ref(self):
session = fake.FakeSession()
fake_instance = mock.MagicMock()
with contextlib.nested(
mock.patch.object(vm_util, "get_vm_ref",
return_value='fake-vm-ref'),
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task"),
) as (fake_get_vm_ref, fake_call_method, fake_wait_for_task):
vm_util.power_on_instance(session, fake_instance)
fake_get_vm_ref.assert_called_once_with(session, fake_instance)
fake_call_method.assert_called_once_with(session.vim,
"PowerOnVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_power_on_instance_with_exception(self):
session = fake.FakeSession()
fake_instance = mock.MagicMock()
with contextlib.nested(
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task",
side_effect=exception.NovaException('fake')),
) as (fake_call_method, fake_wait_for_task):
self.assertRaises(exception.NovaException,
vm_util.power_on_instance,
session, fake_instance,
vm_ref='fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOnVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_power_on_instance_with_power_state_exception(self):
session = fake.FakeSession()
fake_instance = mock.MagicMock()
with contextlib.nested(
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(
session, "_wait_for_task",
side_effect=vexc.InvalidPowerStateException),
) as (fake_call_method, fake_wait_for_task):
vm_util.power_on_instance(session, fake_instance,
vm_ref='fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOnVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_create_virtual_disk(self):
session = fake.FakeSession()
dm = session.vim.service_content.virtualDiskManager
with contextlib.nested(
mock.patch.object(vm_util, "get_vmdk_create_spec",
return_value='fake-spec'),
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task"),
) as (fake_get_spec, fake_call_method, fake_wait_for_task):
vm_util.create_virtual_disk(session, 'fake-dc-ref',
'fake-adapter-type', 'fake-disk-type',
'fake-path', 7)
fake_get_spec.assert_called_once_with(
session.vim.client.factory, 7,
'fake-adapter-type',
'fake-disk-type')
fake_call_method.assert_called_once_with(
session.vim,
"CreateVirtualDisk_Task",
dm,
name='fake-path',
datacenter='fake-dc-ref',
spec='fake-spec')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_copy_virtual_disk(self):
session = fake.FakeSession()
dm = session.vim.service_content.virtualDiskManager
with contextlib.nested(
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task"),
) as (fake_call_method, fake_wait_for_task):
vm_util.copy_virtual_disk(session, 'fake-dc-ref',
'fake-source', 'fake-dest')
fake_call_method.assert_called_once_with(
session.vim,
"CopyVirtualDisk_Task",
dm,
sourceName='fake-source',
sourceDatacenter='fake-dc-ref',
destName='fake-dest')
fake_wait_for_task.assert_called_once_with('fake-task')
def _create_fake_vm_objects(self):
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.VirtualMachine())
return fake_objects
def test_get_values(self):
objects = self._create_fake_vm_objects()
query = vm_util.get_values_from_object_properties(
fake.FakeObjectRetrievalSession(objects), objects)
self.assertEqual('poweredOn', query['runtime.powerState'])
self.assertEqual('guestToolsRunning',
query['summary.guest.toolsRunningStatus'])
self.assertEqual('toolsOk', query['summary.guest.toolsStatus'])
def test_reconfigure_vm(self):
session = fake.FakeSession()
with contextlib.nested(
mock.patch.object(session, '_call_method',
return_value='fake_reconfigure_task'),
mock.patch.object(session, '_wait_for_task')
) as (_call_method, _wait_for_task):
vm_util.reconfigure_vm(session, 'fake-ref', 'fake-spec')
_call_method.assert_called_once_with(mock.ANY,
'ReconfigVM_Task', 'fake-ref', spec='fake-spec')
_wait_for_task.assert_called_once_with(
'fake_reconfigure_task')
def test_get_network_attach_config_spec_opaque(self):
vif_info = {'network_name': 'br-int',
'mac_address': '00:00:00:ca:fe:01',
'network_ref': {'type': 'OpaqueNetwork',
'network-id': 'fake-network-id',
'network-type': 'opaque'},
'iface_id': 7,
'vif_model': 'VirtualE1000'}
result = vm_util.get_network_attach_config_spec(
fake.FakeFactory(), vif_info, 1)
card = 'ns0:VirtualEthernetCardOpaqueNetworkBackingInfo'
expected = """{
'extraConfig': [{'value': 7,
'key': 'nvp.iface-id.1',
'obj_name':'ns0:OptionValue'}],
'deviceChange': [
{'device': {
'macAddress':'00:00:00:ca:fe:01',
'addressType': 'manual',
'connectable': {
'allowGuestControl':True,
'startConnected': True,
'connected': True,
'obj_name':'ns0:VirtualDeviceConnectInfo'},
'backing': {
'opaqueNetworkType': 'opaque',
'opaqueNetworkId': 'fake-network-id',
'obj_name': '%(card)s'},
'key': -47,
'obj_name': 'ns0:VirtualE1000',
'wakeOnLanEnabled': True},
'operation': 'add',
'obj_name': 'ns0:VirtualDeviceConfigSpec'}],
'obj_name':'ns0:VirtualMachineConfigSpec'}""" % {'card': card}
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_get_network_attach_config_spec_dvs(self):
vif_info = {'network_name': 'br100',
'mac_address': '00:00:00:ca:fe:01',
'network_ref': {'type': 'DistributedVirtualPortgroup',
'dvsw': 'fake-network-id',
'dvpg': 'fake-group'},
'iface_id': 7,
'vif_model': 'VirtualE1000'}
result = vm_util.get_network_attach_config_spec(
fake.FakeFactory(), vif_info, 1)
port = 'ns0:DistributedVirtualSwitchPortConnection'
backing = 'ns0:VirtualEthernetCardDistributedVirtualPortBackingInfo'
expected = """{
'extraConfig': [{'value': 7,
'key': 'nvp.iface-id.1',
'obj_name': 'ns0:OptionValue'}],
'deviceChange': [
{'device': {'macAddress': '00:00:00:ca:fe:01',
'addressType': 'manual',
'connectable': {
'allowGuestControl': True,
'startConnected': True,
'connected': True,
'obj_name': 'ns0:VirtualDeviceConnectInfo'},
'backing': {
'port': {
'portgroupKey': 'fake-group',
'switchUuid': 'fake-network-id',
'obj_name': '%(obj_name_port)s'},
'obj_name': '%(obj_name_backing)s'},
'key': -47,
'obj_name': 'ns0:VirtualE1000',
'wakeOnLanEnabled': True},
'operation': 'add',
'obj_name': 'ns0:VirtualDeviceConfigSpec'}],
'obj_name':'ns0:VirtualMachineConfigSpec'}""" % {
'obj_name_backing': backing,
'obj_name_port': port}
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_get_network_detach_config_spec(self):
result = vm_util.get_network_detach_config_spec(
fake.FakeFactory(), 'fake-device', 2)
expected = """{
'extraConfig': [{'value': 'free',
'key': 'nvp.iface-id.2',
'obj_name': 'ns0:OptionValue'}],
'deviceChange': [{'device': 'fake-device',
'operation': 'remove',
'obj_name': 'ns0:VirtualDeviceConfigSpec'}],
'obj_name':'ns0:VirtualMachineConfigSpec'}"""
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
@mock.patch.object(vm_util, "get_vm_ref")
def test_power_off_instance(self, fake_get_ref):
session = fake.FakeSession()
fake_instance = mock.MagicMock()
with contextlib.nested(
mock.patch.object(session, '_call_method',
return_value='fake-task'),
mock.patch.object(session, '_wait_for_task')
) as (fake_call_method, fake_wait_for_task):
vm_util.power_off_instance(session, fake_instance, 'fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOffVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
self.assertFalse(fake_get_ref.called)
@mock.patch.object(vm_util, "get_vm_ref", return_value="fake-vm-ref")
def test_power_off_instance_no_vm_ref(self, fake_get_ref):
session = fake.FakeSession()
fake_instance = mock.MagicMock()
with contextlib.nested(
mock.patch.object(session, '_call_method',
return_value='fake-task'),
mock.patch.object(session, '_wait_for_task')
) as (fake_call_method, fake_wait_for_task):
vm_util.power_off_instance(session, fake_instance)
fake_get_ref.assert_called_once_with(session, fake_instance)
fake_call_method.assert_called_once_with(session.vim,
"PowerOffVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
@mock.patch.object(vm_util, "get_vm_ref")
def test_power_off_instance_with_exception(self, fake_get_ref):
session = fake.FakeSession()
fake_instance = mock.MagicMock()
with contextlib.nested(
mock.patch.object(session, '_call_method',
return_value='fake-task'),
mock.patch.object(session, '_wait_for_task',
side_effect=exception.NovaException('fake'))
) as (fake_call_method, fake_wait_for_task):
self.assertRaises(exception.NovaException,
vm_util.power_off_instance,
session, fake_instance, 'fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOffVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
self.assertFalse(fake_get_ref.called)
@mock.patch.object(vm_util, "get_vm_ref")
def test_power_off_instance_power_state_exception(self, fake_get_ref):
session = fake.FakeSession()
fake_instance = mock.MagicMock()
with contextlib.nested(
mock.patch.object(session, '_call_method',
return_value='fake-task'),
mock.patch.object(
session, '_wait_for_task',
side_effect=vexc.InvalidPowerStateException)
) as (fake_call_method, fake_wait_for_task):
vm_util.power_off_instance(session, fake_instance, 'fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOffVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
self.assertFalse(fake_get_ref.called)
@mock.patch.object(driver.VMwareAPISession, 'vim', stubs.fake_vim_prop)
class VMwareVMUtilGetHostRefTestCase(test.NoDBTestCase):
# N.B. Mocking on the class only mocks test_*(), but we need
# VMwareAPISession.vim to be mocked in both setUp and tests. Not mocking in
# setUp causes object initialisation to fail. Not mocking in tests results
# in vim calls not using FakeVim.
@mock.patch.object(driver.VMwareAPISession, 'vim', stubs.fake_vim_prop)
def setUp(self):
super(VMwareVMUtilGetHostRefTestCase, self).setUp()
fake.reset()
vm_util.vm_refs_cache_reset()
self.session = driver.VMwareAPISession()
# Create a fake VirtualMachine running on a known host
self.host_ref = fake._db_content['HostSystem'].keys()[0]
self.vm_ref = fake.create_vm(host_ref=self.host_ref)
@mock.patch.object(vm_util, 'get_vm_ref')
def test_get_host_ref_for_vm(self, mock_get_vm_ref):
mock_get_vm_ref.return_value = self.vm_ref
ret = vm_util.get_host_ref_for_vm(self.session, 'fake-instance')
mock_get_vm_ref.assert_called_once_with(self.session, 'fake-instance')
self.assertEqual(self.host_ref, ret)
@mock.patch.object(vm_util, 'get_vm_ref')
def test_get_host_name_for_vm(self, mock_get_vm_ref):
mock_get_vm_ref.return_value = self.vm_ref
host = fake._get_object(self.host_ref)
ret = vm_util.get_host_name_for_vm(self.session, 'fake-instance')
mock_get_vm_ref.assert_called_once_with(self.session, 'fake-instance')
self.assertEqual(host.name, ret)
|
|
"""Acts like a Pymongo client to TinyDB"""
# coding: utf-8
from __future__ import absolute_import
import copy
from functools import reduce
import logging
import os
from math import ceil
from operator import itemgetter
from uuid import uuid1
from tinydb import Query, TinyDB, where
from .results import (
InsertOneResult,
InsertManyResult,
UpdateResult,
DeleteResult
)
from .errors import DuplicateKeyError
try:
basestring
except NameError:
basestring = str
logger = logging.getLogger(__name__)
def Q(query, key):
return reduce(lambda partial_query, field: partial_query[field], key.split('.'), query)
class TinyMongoClient(object):
"""Represents the Tiny `db` client"""
def __init__(self, foldername=u"tinydb", **kwargs):
"""Initialize container folder"""
self._foldername = foldername
try:
os.mkdir(foldername)
except OSError as x:
logger.info('{}'.format(x))
@property
def _storage(self):
"""By default return Tiny.DEFAULT_STORAGE and can be overwritten to
return custom storages and middlewares.
class CustomClient(TinyMongoClient):
@property
def _storage(self):
return CachingMiddleware(OtherMiddleware(JSONMiddleware))
This property is also useful to define Serializers using required
`tinydb-serialization` module.
from tinymongo.serializers import DateTimeSerializer
from tinydb_serialization import SerializationMiddleware
class CustomClient(TinyMongoClient):
@property
def _storage(self):
serialization = SerializationMiddleware()
serialization.register_serializer(
DateTimeSerializer(), 'TinyDate')
# register other custom serializers
return serialization
"""
return TinyDB.DEFAULT_STORAGE
def __getitem__(self, key):
"""Gets a new or existing database based in key"""
return TinyMongoDatabase(key, self._foldername, self._storage)
def close(self):
"""Do nothing"""
pass
def __getattr__(self, name):
"""Gets a new or existing database based in attribute"""
return TinyMongoDatabase(name, self._foldername, self._storage)
class TinyMongoDatabase(object):
"""Representation of a Pymongo database"""
def __init__(self, database, foldername, storage):
"""Initialize a TinyDB file named as the db name in the given folder
"""
self._foldername = foldername
self.tinydb = TinyDB(
os.path.join(foldername, database + u".json"),
storage=storage
)
def __getattr__(self, name):
"""Gets a new or existing collection"""
return TinyMongoCollection(name, self)
def __getitem__(self, name):
"""Gets a new or existing collection"""
return TinyMongoCollection(name, self)
def collection_names(self):
"""Get a list of all the collection names in this database"""
return list(self.tinydb.tables())
class TinyMongoCollection(object):
"""
This class represents a collection and all of the operations that are
commonly performed on a collection
"""
def __init__(self, table, parent=None):
"""
Initilialize the collection
:param table: the table name
:param parent: the parent db name
"""
self.tablename = table
self.table = None
self.parent = parent
def __repr__(self):
"""Return collection name"""
return self.tablename
def __getattr__(self, name):
"""
If attr is not found return self
:param name:
:return:
"""
# if self.table is None:
# self.tablename += u"." + name
if self.table is None:
self.build_table()
return self
def build_table(self):
"""
Builds a new tinydb table at the parent database
:return:
"""
self.table = self.parent.tinydb.table(self.tablename)
def count(self):
"""
Counts the documents in the collection.
:return: Integer representing the number of documents in the collection.
"""
return self.find().count()
def drop(self, **kwargs):
"""
Removes a collection from the database.
**kwargs only because of the optional "writeConcern" field, but does nothing in the TinyDB database.
:return: Returns True when successfully drops a collection. Returns False when collection to drop does not
exist.
"""
if self.table:
self.parent.tinydb.purge_table(self.tablename)
return True
else:
return False
def insert(self, docs, *args, **kwargs):
"""Backwards compatibility with insert"""
if isinstance(docs, list):
return self.insert_many(docs, *args, **kwargs)
else:
return self.insert_one(docs, *args, **kwargs)
def insert_one(self, doc, *args, **kwargs):
"""
Inserts one document into the collection
If contains '_id' key it is used, else it is generated.
:param doc: the document
:return: InsertOneResult
"""
if self.table is None:
self.build_table()
if not isinstance(doc, dict):
raise ValueError(u'"doc" must be a dict')
_id = doc[u'_id'] = doc.get('_id') or generate_id()
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is True:
# insert doc without validation of duplicated `_id`
eid = self.table.insert(doc)
else:
existing = self.find_one({'_id': _id})
if existing is None:
eid = self.table.insert(doc)
else:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
return InsertOneResult(eid=eid, inserted_id=_id)
def insert_many(self, docs, *args, **kwargs):
"""
Inserts several documents into the collection
:param docs: a list of documents
:return: InsertManyResult
"""
if self.table is None:
self.build_table()
if not isinstance(docs, list):
raise ValueError(u'"insert_many" requires a list input')
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is not True:
# get all _id in once, to reduce I/O. (without projection)
existing = [doc['_id'] for doc in self.find({})]
_ids = list()
for doc in docs:
_id = doc[u'_id'] = doc.get('_id') or generate_id()
if bypass_document_validation is not True:
if _id in existing:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
existing.append(_id)
_ids.append(_id)
results = self.table.insert_multiple(docs)
return InsertManyResult(
eids=[eid for eid in results],
inserted_ids=[inserted_id for inserted_id in _ids]
)
def parse_query(self, query):
"""
Creates a tinydb Query() object from the query dict
:param query: object containing the dictionary representation of the
query
:return: composite Query()
"""
logger.debug(u'query to parse2: {}'.format(query))
# this should find all records
if query == {} or query is None:
return Query()._id != u'-1' # noqa
q = None
# find the final result of the generator
for c in self.parse_condition(query):
if q is None:
q = c
else:
q = q & c
logger.debug(u'new query item2: {}'.format(q))
return q
def parse_condition(self, query, prev_key=None, last_prev_key=None):
"""
Creates a recursive generator for parsing some types of Query()
conditions
:param query: Query object
:param prev_key: The key at the next-higher level
:return: generator object, the last of which will be the complete
Query() object containing all conditions
"""
# use this to determine gt/lt/eq on prev_query
logger.debug(u'query: {} prev_query: {}'.format(query, prev_key))
q = Query()
conditions = None
# deal with the {'name': value} case by injecting a previous key
if not prev_key:
temp_query = copy.deepcopy(query)
k, v = temp_query.popitem()
prev_key = k
# deal with the conditions
for key, value in query.items():
logger.debug(u'conditions: {} {}'.format(key, value))
if key == u'$gte':
conditions = (
Q(q, prev_key) >= value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) >= value)) if prev_key != "$not" \
else (q[last_prev_key] < value)
elif key == u'$gt':
conditions = (
Q(q, prev_key) > value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) > value)) if prev_key != "$not" \
else (q[last_prev_key] <= value)
elif key == u'$lte':
conditions = (
Q(q, prev_key) <= value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) <= value)) if prev_key != "$not" \
else (q[last_prev_key] > value)
elif key == u'$lt':
conditions = (
Q(q, prev_key) < value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) < value)) if prev_key != "$not" \
else (q[last_prev_key] >= value)
elif key == u'$ne':
conditions = (
Q(q, prev_key) != value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) != value))if prev_key != "$not" \
else (q[last_prev_key] == value)
elif key == u'$not':
if not isinstance(value, dict) and not isinstance(value, list):
conditions = (
Q(q, prev_key) != value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) != value)) \
if prev_key != "$not" else (q[last_prev_key] >= value)
else:
# let the value's condition be parsed below
pass
elif key == u'$regex':
value = value.replace('\\\\\\', '|||')
value = value.replace('\\\\', '|||')
regex = value.replace('\\', '')
regex = regex.replace('|||', '\\')
currCond = (where(prev_key).matches(regex))
conditions = currCond if not conditions else (conditions & currCond)
elif key in ['$and', '$or', '$in', '$all']:
pass
else:
# don't want to use the previous key if this is a secondary key
# (fixes multiple item query that includes $ codes)
if not isinstance(value, dict) and not isinstance(value, list):
conditions = (
(Q(q, key) == value) | (Q(q, key).any([value]))
) if not conditions else (conditions & ((Q(q, key) == value) | (Q(q, key).any([value]))))
prev_key = key
logger.debug(u'c: {}'.format(conditions))
if isinstance(value, dict):
# yield from self.parse_condition(value, key)
for parse_condition in self.parse_condition(value, key, prev_key):
yield parse_condition
elif isinstance(value, list):
if key == '$and':
grouped_conditions = None
for spec in value:
for parse_condition in self.parse_condition(spec):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions & parse_condition
)
yield grouped_conditions
elif key == '$or':
grouped_conditions = None
for spec in value:
for parse_condition in self.parse_condition(spec):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions | parse_condition
)
yield grouped_conditions
elif key == '$in':
# use `any` to find with list, before comparing to single string
grouped_conditions = Q(q, prev_key).any(value)
for val in value:
for parse_condition in self.parse_condition({prev_key : val}):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions | parse_condition
)
yield grouped_conditions
elif key == '$all':
yield Q(q, prev_key).all(value)
else:
yield Q(q, prev_key).any([value])
else:
yield conditions
def update(self, query, doc, *args, **kwargs):
"""BAckwards compatibility with update"""
if isinstance(doc, list):
return [
self.update_one(query, item, *args, **kwargs)
for item in doc
]
else:
return self.update_one(query, doc, *args, **kwargs)
def update_one(self, query, doc):
"""
Updates one element of the collection
:param query: dictionary representing the mongo query
:param doc: dictionary representing the item to be updated
:return: UpdateResult
"""
if self.table is None:
self.build_table()
if u"$set" in doc:
doc = doc[u"$set"]
allcond = self.parse_query(query)
try:
result = self.table.update(doc, allcond)
except:
# TODO: check table.update result
# check what pymongo does in that case
result = None
return UpdateResult(raw_result=result)
def find(self, filter=None, sort=None, skip=None, limit=None,
*args, **kwargs):
"""
Finds all matching results
:param query: dictionary representing the mongo query
:return: cursor containing the search results
"""
if self.table is None:
self.build_table()
if filter is None:
result = self.table.all()
else:
allcond = self.parse_query(filter)
try:
result = self.table.search(allcond)
except (AttributeError, TypeError):
result = []
result = TinyMongoCursor(
result,
sort=sort,
skip=skip,
limit=limit
)
return result
def find_one(self, filter=None):
"""
Finds one matching query element
:param query: dictionary representing the mongo query
:return: the resulting document (if found)
"""
if self.table is None:
self.build_table()
allcond = self.parse_query(filter)
return self.table.get(allcond)
def remove(self, spec_or_id, multi=True, *args, **kwargs):
"""Backwards compatibility with remove"""
if multi:
return self.delete_many(spec_or_id)
return self.delete_one(spec_or_id)
def delete_one(self, query):
"""
Deletes one document from the collection
:param query: dictionary representing the mongo query
:return: DeleteResult
"""
item = self.find_one(query)
result = self.table.remove(where(u'_id') == item[u'_id'])
return DeleteResult(raw_result=result)
def delete_many(self, query):
"""
Removes all items matching the mongo query
:param query: dictionary representing the mongo query
:return: DeleteResult
"""
items = self.find(query)
result = [
self.table.remove(where(u'_id') == item[u'_id'])
for item in items
]
if query == {}:
# need to reset TinyDB's index for docs order consistency
self.table._last_id = 0
return DeleteResult(raw_result=result)
class TinyMongoCursor(object):
"""Mongo iterable cursor"""
def __init__(self, cursordat, sort=None, skip=None, limit=None):
"""Initialize the mongo iterable cursor with data"""
self.cursordat = cursordat
self.cursorpos = -1
if len(self.cursordat) == 0:
self.currentrec = None
else:
self.currentrec = self.cursordat[self.cursorpos]
if sort:
self.sort(sort)
self.paginate(skip, limit)
def __getitem__(self, key):
"""Gets record by index or value by key"""
if isinstance(key, int):
return self.cursordat[key]
return self.currentrec[key]
def paginate(self, skip, limit):
"""Paginate list of records"""
if not self.count() or not limit:
return
skip = skip or 0
pages = int(ceil(self.count() / float(limit)))
limits = {}
last = 0
for i in range(pages):
current = limit * i
limits[last] = current
last = current
# example with count == 62
# {0: 20, 20: 40, 40: 60, 60: 62}
if limit and limit < self.count():
limit = limits.get(skip, self.count())
self.cursordat = self.cursordat[skip: limit]
def _order(self, value, is_reverse=None):
"""Parsing data to a sortable form
By giving each data type an ID(int), and assemble with the value
into a sortable tuple.
"""
def _dict_parser(dict_doc):
""" dict ordered by:
valueType_N -> key_N -> value_N
"""
result = list()
for key in dict_doc:
data = self._order(dict_doc[key])
res = (data[0], key, data[1])
result.append(res)
return tuple(result)
def _list_parser(list_doc):
"""list will iter members to compare
"""
result = list()
for member in list_doc:
result.append(self._order(member))
return result
# (TODO) include more data type
if value is None or not isinstance(value, (dict,
list,
basestring,
bool,
float,
int)):
# not support/sortable value type
value = (0, None)
elif isinstance(value, bool):
value = (5, value)
elif isinstance(value, (int, float)):
value = (1, value)
elif isinstance(value, basestring):
value = (2, value)
elif isinstance(value, dict):
value = (3, _dict_parser(value))
elif isinstance(value, list):
if len(value) == 0:
# [] less then None
value = [(-1, [])]
else:
value = _list_parser(value)
if is_reverse is not None:
# list will firstly compare with other doc by it's smallest
# or largest member
value = max(value) if is_reverse else min(value)
else:
# if the smallest or largest member is a list
# then compaer with it's sub-member in list index order
value = (4, tuple(value))
return value
def sort(self, key_or_list, direction=None):
"""
Sorts a cursor object based on the input
:param key_or_list: a list/tuple containing the sort specification,
i.e. ('user_number': -1), or a basestring
:param direction: sorting direction, 1 or -1, needed if key_or_list
is a basestring
:return:
"""
# checking input format
sort_specifier = list()
if isinstance(key_or_list, list):
if direction is not None:
raise ValueError('direction can not be set separately '
'if sorting by multiple fields.')
for pair in key_or_list:
if not (isinstance(pair, list) or isinstance(pair, tuple)):
raise TypeError('key pair should be a list or tuple.')
if not len(pair) == 2:
raise ValueError('Need to be (key, direction) pair')
if not isinstance(pair[0], basestring):
raise TypeError('first item in each key pair must '
'be a string')
if not isinstance(pair[1], int) or not abs(pair[1]) == 1:
raise TypeError('bad sort specification.')
sort_specifier = key_or_list
elif isinstance(key_or_list, basestring):
if direction is not None:
if not isinstance(direction, int) or not abs(direction) == 1:
raise TypeError('bad sort specification.')
else:
# default ASCENDING
direction = 1
sort_specifier = [(key_or_list, direction)]
else:
raise ValueError('Wrong input, pass a field name and a direction,'
' or pass a list of (key, direction) pairs.')
# sorting
_cursordat = self.cursordat
total = len(_cursordat)
pre_sect_stack = list()
for pair in sort_specifier:
is_reverse = bool(1-pair[1])
value_stack = list()
for index, data in enumerate(_cursordat):
# get field value
not_found = None
for key in pair[0].split('.'):
not_found = True
if isinstance(data, dict) and key in data:
data = copy.deepcopy(data[key])
not_found = False
elif isinstance(data, list):
if not is_reverse and len(data) == 1:
# MongoDB treat [{data}] as {data}
# when finding fields
if isinstance(data[0], dict) and key in data[0]:
data = copy.deepcopy(data[0][key])
not_found = False
elif is_reverse:
# MongoDB will keep finding field in reverse mode
for _d in data:
if isinstance(_d, dict) and key in _d:
data = copy.deepcopy(_d[key])
not_found = False
break
if not_found:
break
# parsing data for sorting
if not_found:
# treat no match as None
data = None
value = self._order(data, is_reverse)
# read previous section
pre_sect = pre_sect_stack[index] if pre_sect_stack else 0
# inverse if in reverse mode
# for keeping order as ASCENDING after sort
pre_sect = (total - pre_sect) if is_reverse else pre_sect
_ind = (total - index) if is_reverse else index
value_stack.append((pre_sect, value, _ind))
# sorting cursor data
value_stack.sort(reverse=is_reverse)
ordereddat = list()
sect_stack = list()
sect_id = -1
last_dat = None
for dat in value_stack:
# restore if in reverse mode
_ind = (total - dat[-1]) if is_reverse else dat[-1]
ordereddat.append(_cursordat[_ind])
# define section
# maintain the sorting result in next level sorting
if not dat[1] == last_dat:
sect_id += 1
sect_stack.append(sect_id)
last_dat = dat[1]
# save result for next level sorting
_cursordat = ordereddat
pre_sect_stack = sect_stack
# done
self.cursordat = _cursordat
return self
def hasNext(self):
"""
Returns True if the cursor has a next position, False if not
:return:
"""
cursor_pos = self.cursorpos + 1
try:
self.cursordat[cursor_pos]
return True
except IndexError:
return False
def next(self):
"""
Returns the next record
:return:
"""
self.cursorpos += 1
return self.cursordat[self.cursorpos]
def count(self, with_limit_and_skip=False):
"""
Returns the number of records in the current cursor
:return: number of records
"""
return len(self.cursordat)
class TinyGridFS(object):
"""GridFS for tinyDB"""
def __init__(self, *args, **kwargs):
self.database = None
def GridFS(self, tinydatabase):
"""TODO: Must implement yet"""
self.database = tinydatabase
return self
def generate_id():
"""Generate new UUID"""
# TODO: Use six.string_type to Py3 compat
try:
return unicode(uuid1()).replace(u"-", u"")
except NameError:
return str(uuid1()).replace(u"-", u"")
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mock
from webob import exc
from nova.api.openstack import api_version_request as api_version
from nova.api.openstack.compute import flavor_access \
as flavor_access_v21
from nova.api.openstack.compute import flavors as flavors_api
from nova import context
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
def generate_flavor(flavorid, ispublic):
return {
'id': flavorid,
'flavorid': str(flavorid),
'root_gb': 1,
'ephemeral_gb': 1,
'name': u'test',
'created_at': datetime.datetime(2012, 1, 1, 1, 1, 1, 1),
'updated_at': None,
'memory_mb': 512,
'vcpus': 1,
'swap': 512,
'rxtx_factor': 1.0,
'disabled': False,
'extra_specs': {},
'vcpu_weight': None,
'is_public': bool(ispublic),
'description': None
}
INSTANCE_TYPES = {
'0': generate_flavor(0, True),
'1': generate_flavor(1, True),
'2': generate_flavor(2, False),
'3': generate_flavor(3, False)}
ACCESS_LIST = [{'flavor_id': '2', 'project_id': 'proj2'},
{'flavor_id': '2', 'project_id': 'proj3'},
{'flavor_id': '3', 'project_id': 'proj3'}]
def fake_get_flavor_access_by_flavor_id(context, flavorid):
res = []
for access in ACCESS_LIST:
if access['flavor_id'] == flavorid:
res.append(access['project_id'])
return res
def fake_get_flavor_by_flavor_id(context, flavorid):
return INSTANCE_TYPES[flavorid]
def _has_flavor_access(flavorid, projectid):
for access in ACCESS_LIST:
if access['flavor_id'] == flavorid and \
access['project_id'] == projectid:
return True
return False
def fake_get_all_flavors_sorted_list(context, inactive=False,
filters=None, sort_key='flavorid',
sort_dir='asc', limit=None, marker=None):
if filters is None or filters['is_public'] is None:
return sorted(INSTANCE_TYPES.values(), key=lambda item: item[sort_key])
res = {}
for k, v in INSTANCE_TYPES.items():
if filters['is_public'] and _has_flavor_access(k, context.project_id):
res.update({k: v})
continue
if v['is_public'] == filters['is_public']:
res.update({k: v})
res = sorted(res.values(), key=lambda item: item[sort_key])
return res
class FakeRequest(object):
environ = {"nova.context": context.get_admin_context()}
api_version_request = api_version.APIVersionRequest("2.1")
def get_db_flavor(self, flavor_id):
return INSTANCE_TYPES[flavor_id]
def is_legacy_v2(self):
return False
class FakeResponse(object):
obj = {'flavor': {'id': '0'},
'flavors': [
{'id': '0'},
{'id': '2'}]
}
def attach(self, **kwargs):
pass
def fake_get_flavor_projects_from_db(context, flavorid):
raise exception.FlavorNotFound(flavor_id=flavorid)
class FlavorAccessTestV21(test.NoDBTestCase):
api_version = "2.1"
FlavorAccessController = flavor_access_v21.FlavorAccessController
FlavorActionController = flavor_access_v21.FlavorActionController
_prefix = "/v2/fake"
validation_ex = exception.ValidationError
def setUp(self):
super(FlavorAccessTestV21, self).setUp()
self.flavor_controller = flavors_api.FlavorsController()
# We need to stub out verify_project_id so that it doesn't
# generate an EndpointNotFound exception and result in a
# server error.
self.stub_out('nova.api.openstack.identity.verify_project_id',
lambda ctx, project_id: True)
self.req = FakeRequest()
self.req.environ = {"nova.context": context.RequestContext('fake_user',
'fake')}
self.stub_out('nova.objects.Flavor._flavor_get_by_flavor_id_from_db',
fake_get_flavor_by_flavor_id)
self.stub_out('nova.objects.flavor._flavor_get_all_from_db',
fake_get_all_flavors_sorted_list)
self.stub_out('nova.objects.flavor._get_projects_from_db',
fake_get_flavor_access_by_flavor_id)
self.flavor_access_controller = self.FlavorAccessController()
self.flavor_action_controller = self.FlavorActionController()
def _verify_flavor_list(self, result, expected):
# result already sorted by flavor_id
self.assertEqual(len(result), len(expected))
for d1, d2 in zip(result, expected):
self.assertEqual(d1['id'], d2['id'])
@mock.patch('nova.objects.Flavor._flavor_get_by_flavor_id_from_db',
side_effect=exception.FlavorNotFound(flavor_id='foo'))
def test_list_flavor_access_public(self, mock_api_get):
# query os-flavor-access on public flavor should return 404
self.assertRaises(exc.HTTPNotFound,
self.flavor_access_controller.index,
self.req, '1')
def test_list_flavor_access_private(self):
expected = {'flavor_access': [
{'flavor_id': '2', 'tenant_id': 'proj2'},
{'flavor_id': '2', 'tenant_id': 'proj3'}]}
result = self.flavor_access_controller.index(self.req, '2')
self.assertEqual(result, expected)
def test_list_flavor_with_admin_default_proj1(self):
expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
req = fakes.HTTPRequest.blank(self._prefix + '/flavors',
use_admin_context=True)
req.environ['nova.context'].project_id = 'proj1'
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_admin_default_proj2(self):
expected = {'flavors': [{'id': '0'}, {'id': '1'}, {'id': '2'}]}
req = fakes.HTTPRequest.blank(self._prefix + '/flavors',
use_admin_context=True)
req.environ['nova.context'].project_id = 'proj2'
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_admin_ispublic_true(self):
expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
url = self._prefix + '/flavors?is_public=true'
req = fakes.HTTPRequest.blank(url,
use_admin_context=True)
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_admin_ispublic_false(self):
expected = {'flavors': [{'id': '2'}, {'id': '3'}]}
url = self._prefix + '/flavors?is_public=false'
req = fakes.HTTPRequest.blank(url,
use_admin_context=True)
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_admin_ispublic_false_proj2(self):
expected = {'flavors': [{'id': '2'}, {'id': '3'}]}
url = self._prefix + '/flavors?is_public=false'
req = fakes.HTTPRequest.blank(url,
use_admin_context=True)
req.environ['nova.context'].project_id = 'proj2'
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_admin_ispublic_none(self):
expected = {'flavors': [{'id': '0'}, {'id': '1'}, {'id': '2'},
{'id': '3'}]}
url = self._prefix + '/flavors?is_public=none'
req = fakes.HTTPRequest.blank(url,
use_admin_context=True)
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_no_admin_default(self):
expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
req = fakes.HTTPRequest.blank(self._prefix + '/flavors',
use_admin_context=False)
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_no_admin_ispublic_true(self):
expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
url = self._prefix + '/flavors?is_public=true'
req = fakes.HTTPRequest.blank(url,
use_admin_context=False)
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_no_admin_ispublic_false(self):
expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
url = self._prefix + '/flavors?is_public=false'
req = fakes.HTTPRequest.blank(url,
use_admin_context=False)
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_no_admin_ispublic_none(self):
expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
url = self._prefix + '/flavors?is_public=none'
req = fakes.HTTPRequest.blank(url,
use_admin_context=False)
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_add_tenant_access(self):
def stub_add_flavor_access(context, flavor_id, projectid):
self.assertEqual(3, flavor_id, "flavor_id")
self.assertEqual("proj2", projectid, "projectid")
self.stub_out('nova.objects.Flavor._flavor_add_project',
stub_add_flavor_access)
expected = {'flavor_access':
[{'flavor_id': '3', 'tenant_id': 'proj3'}]}
body = {'addTenantAccess': {'tenant': 'proj2'}}
req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
use_admin_context=True)
result = self.flavor_action_controller._add_tenant_access(
req, '3', body=body)
self.assertEqual(result, expected)
@mock.patch('nova.objects.Flavor.get_by_flavor_id',
side_effect=exception.FlavorNotFound(flavor_id='1'))
def test_add_tenant_access_with_flavor_not_found(self, mock_get):
body = {'addTenantAccess': {'tenant': 'proj2'}}
req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
use_admin_context=True)
self.assertRaises(exc.HTTPNotFound,
self.flavor_action_controller._add_tenant_access,
req, '2', body=body)
def test_add_tenant_access_with_no_tenant(self):
req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
use_admin_context=True)
body = {'addTenantAccess': {'foo': 'proj2'}}
self.assertRaises(self.validation_ex,
self.flavor_action_controller._add_tenant_access,
req, '2', body=body)
body = {'addTenantAccess': {'tenant': ''}}
self.assertRaises(self.validation_ex,
self.flavor_action_controller._add_tenant_access,
req, '2', body=body)
def test_add_tenant_access_with_already_added_access(self):
def stub_add_flavor_access(context, flavorid, projectid):
raise exception.FlavorAccessExists(flavor_id=flavorid,
project_id=projectid)
self.stub_out('nova.objects.Flavor._flavor_add_project',
stub_add_flavor_access)
body = {'addTenantAccess': {'tenant': 'proj2'}}
self.assertRaises(exc.HTTPConflict,
self.flavor_action_controller._add_tenant_access,
self.req, '3', body=body)
def test_remove_tenant_access_with_bad_access(self):
def stub_remove_flavor_access(context, flavorid, projectid):
raise exception.FlavorAccessNotFound(flavor_id=flavorid,
project_id=projectid)
self.stub_out('nova.objects.Flavor._flavor_del_project',
stub_remove_flavor_access)
body = {'removeTenantAccess': {'tenant': 'proj2'}}
self.assertRaises(exc.HTTPNotFound,
self.flavor_action_controller._remove_tenant_access,
self.req, '3', body=body)
def test_add_tenant_access_is_public(self):
body = {'addTenantAccess': {'tenant': 'proj2'}}
req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
use_admin_context=True)
req.api_version_request = api_version.APIVersionRequest('2.7')
self.assertRaises(exc.HTTPConflict,
self.flavor_action_controller._add_tenant_access,
req, '1', body=body)
@mock.patch('nova.objects.Flavor._flavor_get_by_flavor_id_from_db',
side_effect=exception.FlavorNotFound(flavor_id='foo'))
def test_delete_tenant_access_with_no_tenant(self, mock_api_get):
req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
use_admin_context=True)
body = {'removeTenantAccess': {'foo': 'proj2'}}
self.assertRaises(self.validation_ex,
self.flavor_action_controller._remove_tenant_access,
req, '2', body=body)
body = {'removeTenantAccess': {'tenant': ''}}
self.assertRaises(self.validation_ex,
self.flavor_action_controller._remove_tenant_access,
req, '2', body=body)
@mock.patch('nova.api.openstack.identity.verify_project_id',
side_effect=exc.HTTPBadRequest(
explanation="Project ID proj2 is not a valid project."))
def test_add_tenant_access_with_invalid_tenant(self, mock_verify):
"""Tests the case that the tenant does not exist in Keystone."""
req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
use_admin_context=True)
body = {'addTenantAccess': {'tenant': 'proj2'}}
self.assertRaises(exc.HTTPBadRequest,
self.flavor_action_controller._add_tenant_access,
req, '2', body=body)
mock_verify.assert_called_once_with(
req.environ['nova.context'], 'proj2')
@mock.patch('nova.api.openstack.identity.verify_project_id',
side_effect=exc.HTTPBadRequest(
explanation="Project ID proj2 is not a valid project."))
def test_remove_tenant_access_with_invalid_tenant(self, mock_verify):
"""Tests the case that the tenant does not exist in Keystone."""
req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
use_admin_context=True)
body = {'removeTenantAccess': {'tenant': 'proj2'}}
self.assertRaises(exc.HTTPBadRequest,
self.flavor_action_controller._remove_tenant_access,
req, '2', body=body)
mock_verify.assert_called_once_with(
req.environ['nova.context'], 'proj2')
class FlavorAccessPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(FlavorAccessPolicyEnforcementV21, self).setUp()
self.act_controller = flavor_access_v21.FlavorActionController()
self.access_controller = flavor_access_v21.FlavorAccessController()
self.req = fakes.HTTPRequest.blank('')
def test_add_tenant_access_policy_failed(self):
rule_name = "os_compute_api:os-flavor-access:add_tenant_access"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.act_controller._add_tenant_access, self.req, fakes.FAKE_UUID,
body={'addTenantAccess': {'tenant': fakes.FAKE_UUID}})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_remove_tenant_access_policy_failed(self):
rule_name = ("os_compute_api:os-flavor-access:"
"remove_tenant_access")
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.act_controller._remove_tenant_access, self.req,
fakes.FAKE_UUID,
body={'removeTenantAccess': {'tenant': fakes.FAKE_UUID}})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_index_policy_failed(self):
rule_name = "os_compute_api:os-flavor-access"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.access_controller.index, self.req,
fakes.FAKE_UUID)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
|
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn.functional as F
from paddle import fluid
import paddle.fluid.dygraph as dg
import paddle.fluid.initializer as I
import numpy as np
import unittest
from unittest import TestCase
class TestFunctionalConv3D(TestCase):
batch_size = 4
spatial_shape = (8, 8, 8)
dtype = "float32"
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.data_format = "NDHWC"
def prepare(self):
if isinstance(self.filter_shape, int):
filter_shape = (self.filter_shape, ) * 3
else:
filter_shape = tuple(self.filter_shape)
self.weight = np.random.uniform(
-1, 1, (self.out_channels, self.in_channels // self.groups
) + filter_shape).astype(self.dtype)
if not self.no_bias:
self.bias = np.random.uniform(-1, 1, (
self.out_channels, )).astype(self.dtype)
self.channel_last = (self.data_format == "NDHWC")
if self.channel_last:
self.input_shape = (self.batch_size, ) + self.spatial_shape + (
self.in_channels, )
else:
self.input_shape = (self.batch_size, self.in_channels
) + self.spatial_shape
self.input = np.random.uniform(-1, 1,
self.input_shape).astype(self.dtype)
def static_graph_case_1(self):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
if self.channel_last:
x = fluid.data(
"input", (-1, -1, -1, -1, self.in_channels),
dtype=self.dtype)
else:
x = fluid.data(
"input", (-1, self.in_channels, -1, -1, -1),
dtype=self.dtype)
y = fluid.layers.conv3d(
x,
self.out_channels,
self.filter_shape,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
param_attr=I.NumpyArrayInitializer(self.weight),
bias_attr=False
if self.no_bias else I.NumpyArrayInitializer(self.bias),
act=self.act,
data_format=self.data_format)
exe = fluid.Executor(self.place)
exe.run(start)
out, = exe.run(main, feed={"input": self.input}, fetch_list=[y])
return out
def static_graph_case_2(self):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
if self.channel_last:
x = x = fluid.data(
"input", (-1, -1, -1, -1, self.in_channels),
dtype=self.dtype)
else:
x = fluid.data(
"input", (-1, self.in_channels, -1, -1, -1),
dtype=self.dtype)
weight = fluid.data(
"weight", self.weight.shape, dtype=self.dtype)
if not self.no_bias:
bias = fluid.data("bias", self.bias.shape, dtype=self.dtype)
y = F.conv3d(
x,
weight,
None if self.no_bias else bias,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
data_format=self.data_format)
if self.act == 'sigmoid':
y = F.sigmoid(y)
exe = fluid.Executor(self.place)
exe.run(start)
feed_dict = {"input": self.input, "weight": self.weight}
if not self.no_bias:
feed_dict["bias"] = self.bias
out, = exe.run(main, feed=feed_dict, fetch_list=[y])
return out
def dygraph_case(self):
with dg.guard(self.place):
x = dg.to_variable(self.input)
weight = dg.to_variable(self.weight)
bias = None if self.no_bias else dg.to_variable(self.bias)
y = F.conv3d(
x,
weight,
bias,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
data_format=self.data_format)
if self.act == 'sigmoid':
y = F.sigmoid(y)
out = y.numpy()
return out
def _test_identity(self):
self.prepare()
out1 = self.static_graph_case_1()
out2 = self.static_graph_case_2()
out3 = self.dygraph_case()
np.testing.assert_array_almost_equal(out1, out2)
np.testing.assert_array_almost_equal(out2, out3)
def test_identity_cpu(self):
self.place = fluid.CPUPlace()
self._test_identity()
@unittest.skipIf(not fluid.core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
def test_identity_gpu(self):
self.place = fluid.CUDAPlace(0)
self._test_identity()
class TestFunctionalConv3DError(TestCase):
batch_size = 4
spatial_shape = (8, 8, 8)
dtype = "float32"
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = "not_valid"
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.data_format = "NDHWC"
def test_exception(self):
self.prepare()
with self.assertRaises(ValueError):
self.static_graph_case()
def prepare(self):
if isinstance(self.filter_shape, int):
filter_shape = (self.filter_shape, ) * 3
else:
filter_shape = tuple(self.filter_shape)
self.weight_shape = (self.out_channels, self.in_channels // self.groups
) + filter_shape
self.bias_shape = (self.out_channels, )
def static_graph_case(self):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
self.channel_last = self.data_format == "NDHWC"
if self.channel_last:
x = x = fluid.data(
"input", (-1, -1, -1, -1, self.in_channels),
dtype=self.dtype)
else:
x = fluid.data(
"input", (-1, self.in_channels, -1, -1, -1),
dtype=self.dtype)
weight = fluid.data(
"weight", self.weight_shape, dtype=self.dtype)
if not self.no_bias:
bias = fluid.data("bias", self.bias_shape, dtype=self.dtype)
y = F.conv3d(
x,
weight,
None if self.no_bias else bias,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
data_format=self.data_format)
if self.act == 'sigmoid':
y = F.sigmoid(y)
class TestFunctionalConv3DCase2(TestFunctionalConv3D):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = [1, 2, 1]
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.data_format = "NDHWC"
class TestFunctionalConv3DCase3(TestFunctionalConv3D):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = [1, 2, 3, 1, 2, 3]
self.stride = 2
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.data_format = "NDHWC"
class TestFunctionalConv3DCase4(TestFunctionalConv3D):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = [1, 1, 2, 2, 3, 3]
self.stride = 1
self.dilation = 2
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.data_format = "NDHWC"
class TestFunctionalConv3DCase5(TestFunctionalConv3D):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = [[0, 0], [1, 1], [2, 2], [1, 1], [0, 0]]
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.data_format = "NDHWC"
class TestFunctionalConv3DCase6(TestFunctionalConv3D):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = [[0, 0], [0, 0], [1, 1], [2, 2], [2, 2]]
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.data_format = "NCDHW"
class TestFunctionalConv3DCase7(TestFunctionalConv3D):
def setUp(self):
self.in_channels = 6
self.out_channels = 8
self.filter_shape = 3
self.padding = "same"
self.stride = 1
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.data_format = "NCDHW"
class TestFunctionalConv3DCase8(TestFunctionalConv3D):
def setUp(self):
self.in_channels = 6
self.out_channels = 12
self.filter_shape = 3
self.padding = "valid"
self.stride = 1
self.dilation = 1
self.groups = 6
self.no_bias = True
self.act = None
self.use_cudnn = False
self.data_format = "NCDHW"
class TestFunctionalConv3DErrorCase2(TestFunctionalConv3DError):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = [[0, 0], [1, 1], [1, 2], [3, 4], [5, 6]]
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.data_format = "NCDHW"
class TestFunctionalConv3DErrorCase3(TestFunctionalConv3DError):
def setUp(self):
self.in_channels = 3
self.out_channels = 4
self.filter_shape = 3
self.padding = "same"
self.stride = 1
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.data_format = "not_valid"
class TestFunctionalConv3DErrorCase4(TestFunctionalConv3DError):
def setUp(self):
self.in_channels = 4
self.out_channels = 3
self.filter_shape = 3
self.padding = "same"
self.stride = 1
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.data_format = "NCDHW"
class TestFunctionalConv3DErrorCase7(TestFunctionalConv3DError):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = "same"
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.data_format = "not_valid"
class TestFunctionalConv3DErrorCase8(TestFunctionalConv3DError):
def setUp(self):
self.in_channels = 3
self.out_channels = 5
self.filter_shape = 3
self.padding = [1, 2, 1, 2, 1]
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.data_format = "NCDHW"
class TestFunctionalConv3DErrorCase9(TestFunctionalConv3DError):
def setUp(self):
self.in_channels = -5
self.out_channels = 5
self.filter_shape = 3
self.padding = [[0, 0], [0, 0], [3, 2], [1, 2], [1, 1]]
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.data_format = "NCDHW"
class TestFunctionalConv3DErrorCase10(TestFunctionalConv3DError):
def setUp(self):
self.in_channels = 3
self.out_channels = 4
self.filter_shape = 3
self.padding = "same"
self.stride = 1
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.data_format = "NDHWC"
class TestFunctionalConv3DErrorCase11(TestCase):
def setUp(self):
self.input = np.array([])
self.filter = np.array([])
self.num_filters = 0
self.filter_size = 0
self.bias = None
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 1
self.data_format = "NCDHW"
def static_graph_case(self):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
x = fluid.data("input", self.input.shape, dtype=paddle.float32)
y = fluid.layers.conv3d(
x,
self.num_filters,
self.filter_size,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
param_attr=I.NumpyArrayInitializer(self.filter),
bias_attr=False if self.bias is None else
I.NumpyArrayInitializer(self.bias),
act=None,
data_format=self.data_format)
exe = fluid.Executor()
exe.run(start)
out, = exe.run(main, feed={"input": self.input}, fetch_list=[y])
return out
def dygraph_case(self):
with dg.guard():
x = dg.to_variable(self.input, dtype=paddle.float32)
w = dg.to_variable(self.filter, dtype=paddle.float32)
b = None if self.bias is None else dg.to_variable(
self.bias, dtype=paddle.float32)
y = F.conv3d(
x,
w,
b,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
data_format=self.data_format)
def test_dygraph_exception(self):
with self.assertRaises(ValueError):
self.dygraph_case()
def test_static_exception(self):
with self.assertRaises(ValueError):
self.static_graph_case()
class TestFunctionalConv3DErrorCase12(TestFunctionalConv3DErrorCase11):
def setUp(self):
self.input = np.random.randn(1, 3, 3, 3, 3)
self.filter = np.random.randn(3, 3, 1, 1, 1)
self.num_filters = 3
self.filter_size = 1
self.bias = None
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 0
self.data_format = "NCDHW"
if __name__ == "__main__":
unittest.main()
|
|
#!/bin/env python
# vim: expandtab:tabstop=4:shiftwidth=4
# Copyright 2016 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script can be used to create API keys for your AWS IAM user accounts.
It can be used for individual accounts or for all your accounts at once.
Note: we have a limit of 2 access keys per user, so by default this will delete the old ones
Usage:
For individual accounts:
aws_api_key_manager -p ded-stage-aws -p ded-int-aws -p <some-other-account>
For all accounts found in /etc/openshift_tools/aws_accounts.txt:
aws_api_key_manager --all
To manage keys for another user, use the '-u' option:
aws_api_key_manager -u <some-other-user> -p ded-stage-aws
To remove a stale entry found in your credentials file, use the '-c' option:
aws_api_key_manager -p <outdated-profile> -c
"""
from __future__ import print_function
import argparse
import ConfigParser
import getpass
import os
import pwd
import re
import sys
import time
import yaml
# pylint: disable=import-error
import boto3
import botocore
# pylint: disable=no-name-in-module
from openshift_tools import saml_aws_creds
class ManageKeys(object):
""" Class to create and update IAM user account API keys. """
def __init__(self):
""" constructor """
self.response = None
@staticmethod
def check_arguments():
""" Ensure that an argument was passed in from the command line.
Returns:
Parsed argument(s), if provided
"""
parser = argparse.ArgumentParser(description='Create API keys for IAM accounts.')
parser.add_argument('-a', '--all',
help='Create API keys for every ops aws account.',
action='store_true')
parser.add_argument('-p', '--profile',
help='Create new API keys for the specified profile.',
action='append')
parser.add_argument('-c', '--clean',
help='Specify an unwanted profile entry to remove.',
action='store_true')
parser.add_argument('-u', '--user',
help='Specify a username for the account.')
args = parser.parse_args()
if not args.all and not args.profile:
print('Specify an account ID or profile name.\n'
'To generate the keys for all ops accounts, use "--all"\n'
'Usage:\n'
'example: {0} -p <account-name>\n'
'example: {0} -u <some-other-user> -p <account-name>\n'
'example: {0} --all\n'
'To clean an outdated profile entry from the credentials file, use "-c"\n'
'example: {0} -p <outdated-account-name> -c'.format(parser.prog))
sys.exit(10)
if not args.user:
if getpass.getuser() != 'root' and os.getegid() < 1000:
args.user = getpass.getuser()
return args
@staticmethod
def check_accounts():
""" Retrieves a list of the config-managed ops AWS accounts.
Returns:
A list containing each of the lines found in the aws accounts file
Raises:
A ValueError if the path does not exist
"""
config_path = '/etc/openshift_tools/sso-config.yaml'
if os.path.isfile(config_path):
with open(config_path, 'r') as sso_config:
yaml_config = yaml.load(sso_config)
if yaml_config["aws_account_file"]:
path = yaml_config["aws_account_file"]
accounts_list = []
if os.path.isfile(path):
with open(path) as open_file:
stripped_line = list([line.rstrip() for line in open_file.readlines()])
for line in stripped_line:
if line is not None:
accounts_list.append(line)
return accounts_list
else:
raise ValueError(path + ' does not exist.')
def check_user(self, aws_account, user_name, client):
""" Check if the user exists locally and in aws. creates iam user if not found.
Returns:
True, after checking if the IAM user exists in the specified AWS account
and creating a user account for them if one does not already exist
"""
try:
client.get_user(UserName=user_name)
except botocore.exceptions.ClientError as client_exception:
if client_exception.response['Error']['Code'] == 'NoSuchEntity':
system_users = []
for user in pwd.getpwall():
system_users.append(user[0])
if user_name in system_users and user_name != 'root' and os.getegid() < 1000:
print("User does not have an existing IAM account for %s, \
creating new account for user %s" % (aws_account, user_name))
self.create_user(aws_account, user_name, client)
return True
@staticmethod
def create_user(aws_account, user_name, client):
""" Create an IAM user account and add them to the admin group.
Returns:
True, after successful account creation.
"""
client.create_user(
UserName=user_name
)
client.add_user_to_group(GroupName='admin', UserName=user_name)
print("A new user account was added.\n"
"Use change_iam_password -p %s to set your password" % aws_account.split(':')[0])
return True
@staticmethod
def get_all_profiles():
""" If -a is specified, generate a list of all profiles found in ~/.aws/credentials.
Returns
Each profile from the credentials file, stored in a list.
Raises:
A ValueError if path is does not exist.
"""
path = os.path.join(os.path.expanduser('~'), '.aws/credentials')
profile_list = []
if os.path.isfile(path):
with open(path) as open_file:
stripped_line = list([line.rstrip() for line in open_file.readlines()])
for line in stripped_line:
account = re.match(r"^\[([A-Za-z0-9_\-]+)\]", line)
if account is not None:
profile_list.append(account.group(1))
return profile_list
else:
raise ValueError(path + ' does not exist.')
@staticmethod
def get_keys(user_name, client):
""" Get the Access Key IDs of the user, and return them in a list.
Returns:
All access keys found for the IAM user, in a list.
List will be empty if the user has no keys.
"""
existing_keys = client.list_access_keys(
UserName=user_name)
all_keys = existing_keys['AccessKeyMetadata']
keys_list = []
if all_keys:
for ekey in all_keys:
keys_list.append(ekey['AccessKeyId'])
return keys_list
@staticmethod
def check_v4_ignore(aws_account):
""" Checks aws_account against an ignore list.
Requires config file containing ARN's to be ignored.
Returns:
False if ignored ARN is matched.
Breaks caller loop.
"""
v4_arn_ignore_list = '/etc/openshift_tools/v4_arn_ignore.yaml'
if os.path.isfile(v4_arn_ignore_list):
with open(v4_arn_ignore_list, 'r') as ignore_list:
ignore = (yaml.safe_load(ignore_list))
for arn in ignore:
if int(aws_account) == int(arn):
return True
@staticmethod
def get_token(aws_account):
""" Generate temporary SSO access credentials.
Requires the config file containing the IDP hostname.
Returns:
A temporary boto3 client created with a session token provided by the IDP host.
Raises:
A ValueError if the config path can not be found.
"""
sso_config_path = '/etc/openshift_tools/sso-config.yaml'
if os.path.isfile(sso_config_path):
with open(sso_config_path, 'r') as sso_config:
yaml_config = yaml.load(sso_config)
if yaml_config["idp_host"]:
ops_idp_host = yaml_config["idp_host"]
try:
creds = saml_aws_creds.get_temp_credentials(
metadata_id='urn:amazon:webservices:%s' % aws_account,
idp_host=ops_idp_host
)
client = boto3.client(
'iam',
aws_access_key_id=creds['AccessKeyId'],
aws_secret_access_key=creds['SecretAccessKey'],
aws_session_token=creds['SessionToken']
)
return client
except botocore.exceptions.ClientError as client_exception:
print(client_exception)
print('Skipping account %s' % aws_account)
except ValueError as client_exception:
if 'Error retrieving SAML token' in client_exception.message and \
'Metadata not found' in client_exception.message:
print(client_exception)
print('Metadata for %s missing or misconfigured, skipping' % aws_account)
else:
raise
else:
raise ValueError(sso_config_path + 'does not exist.')
@staticmethod
def clean_entry(args):
""" Cleans an unwanted entry from the credentials file.
Returns:
True, after cleaning the unwanted entry.
Raises:
A ValueError if the path to the credentials file does not exist.
"""
path = os.path.join(os.path.expanduser('~'), '.aws/credentials')
if os.path.isfile(path):
config = ConfigParser.RawConfigParser()
config.read(path)
for aws_account in args.profile:
try:
config.remove_section(aws_account)
with open(path, 'w') as configfile:
config.write(configfile)
print('Successfully removed entry for %s' % aws_account)
except ConfigParser.NoSectionError:
print('Section for account %s could not be found, skipping' % aws_account)
else:
raise ValueError(path + ' does not exist.')
@staticmethod
def create_key(aws_account, user_name, client):
""" Create a new API key for the specified account.
Returns:
A response object from boto3, which contains information about the new IAM key.
Their values can be accessed like:
['AccessKey']['AccessKeyId']
['AccessKey']['SecretAccessKey']
"""
response = client.create_access_key(
UserName=user_name
)
print('Key successfully created for:', aws_account)
return response
@staticmethod
def delete_key(aws_account, user_name, key, client):
""" Delete an API key for the specified account. """
client.delete_access_key(
UserName=user_name,
AccessKeyId=key
)
print('Key successfully deleted for:', aws_account)
return True
@staticmethod
def manage_timestamp(update=False):
""" Update the expiration file, or create it if it does not already exist. """
path = os.path.join(os.path.expanduser('~'), '.aws/credentials_expiration')
exp_date = str(int(time.time())+180*24*60*60)
if os.path.isfile(path) and update is True:
print('File exists, overwriting.')
with open(path, 'w') as open_file:
open_file.write(exp_date)
elif not os.path.isfile(path):
print('File does not exist, creating.')
with open(path, 'w') as open_file:
open_file.write(exp_date)
else:
print('Checked for stamp file and it exists. No write was called, nothing to do here.')
return True
@staticmethod
def write_credentials(aws_account, key_object):
""" Write the profile for the user account to the AWS credentials file.
Raises:
A ValueError if the path to the credentials file does not exist.
"""
path = os.path.join(os.path.expanduser('~'), '.aws/credentials')
if os.path.isfile(path):
config = ConfigParser.RawConfigParser()
config.read(path)
try:
config.get(aws_account, 'aws_access_key_id')
except ConfigParser.NoSectionError:
config.add_section(aws_account)
config.set(aws_account, 'aws_access_key_id',\
key_object['AccessKey']['AccessKeyId'])
config.set(aws_account, 'aws_secret_access_key',\
key_object['AccessKey']['SecretAccessKey'])
with open(path, 'w') as configfile:
config.write(configfile)
else:
config.set(aws_account, 'aws_access_key_id',\
key_object['AccessKey']['AccessKeyId'])
config.set(aws_account, 'aws_secret_access_key',\
key_object['AccessKey']['SecretAccessKey'])
with open(path, 'w') as configfile:
config.write(configfile)
else:
raise ValueError(path + ' does not exist.')
def run_all(self, args, ops_accounts):
""" Loop through a list of every ops-managed AWS account and create API keys for each. """
for aws_account in ops_accounts:
account_name = aws_account.split(':')[0]
account_number = aws_account.split(':')[1]
if not self.check_v4_ignore(account_number):
client = self.get_token(account_number)
if client:
self.check_user(aws_account, args.user, client)
current_accounts = self.get_all_profiles()
existing_keys = self.get_keys(args.user, client)
if existing_keys:
for key in existing_keys:
self.delete_key(aws_account, args.user, key, client)
if aws_account not in current_accounts:
key_object = self.create_key(aws_account, args.user, client)
self.write_credentials(account_name, key_object)
else:
print("Ignoring v4 resource: %s:%s" % (account_name, account_number))
self.manage_timestamp(True)
def run_one(self, args, ops_accounts):
""" Create API keys for only the specified ops-managed AWS accounts. """
match_list = []
for aws_account in args.profile:
for line in ops_accounts:
new_reg = r'(?P<account_name>\b' + aws_account + r'\b)'\
+ ':' + r'(?P<account_number>\d+)'
match = re.search(new_reg, line)
if match:
match_list.append(match)
account_name = match.group('account_name')
account_number = match.group('account_number')
if self.check_v4_ignore(account_number):
print("Ignoring v4 resource: %s:%s" % (account_name, account_number))
break
else:
client = self.get_token(account_number)
if client:
self.check_user(aws_account, args.user, client)
existing_keys = self.get_keys(args.user, client)
if existing_keys:
for key in existing_keys:
self.delete_key(aws_account, args.user, key, client)
key_object = self.create_key(aws_account, args.user, client)
self.write_credentials(account_name, key_object)
else:
key_object = self.create_key(aws_account, args.user, client)
self.write_credentials(account_name, key_object)
if not match_list:
print('Account %s does not match any current ops accounts.' % aws_account)
self.manage_timestamp()
def main(self):
""" Main function. """
args = self.check_arguments()
ops_accounts = self.check_accounts()
if args.clean and args.profile:
self.clean_entry(args)
elif args.profile and args.user:
self.run_one(args, ops_accounts)
elif args.all and args.user:
self.run_all(args, ops_accounts)
else:
raise ValueError('No suitable arguments provided.')
if __name__ == '__main__':
MANAGE = ManageKeys()
MANAGE.main()
|
|
# encoding: utf-8
"""
Provides a low-level, read-only API to a serialized Open Packaging Convention
(OPC) package.
"""
from __future__ import absolute_import
from .constants import RELATIONSHIP_TARGET_MODE as RTM
from .oxml import oxml_fromstring
from .packuri import PACKAGE_URI, PackURI
from .phys_pkg import PhysPkgReader
from .shared import CaseInsensitiveDict
class PackageReader(object):
"""
Provides access to the contents of a zip-format OPC package via its
:attr:`serialized_parts` and :attr:`pkg_srels` attributes.
"""
def __init__(self, content_types, pkg_srels, sparts):
super(PackageReader, self).__init__()
self._pkg_srels = pkg_srels
self._sparts = sparts
@staticmethod
def from_file(pkg_file):
"""
Return a |PackageReader| instance loaded with contents of *pkg_file*.
"""
phys_reader = PhysPkgReader(pkg_file)
content_types = _ContentTypeMap.from_xml(phys_reader.content_types_xml)
pkg_srels = PackageReader._srels_for(phys_reader, PACKAGE_URI)
sparts = PackageReader._load_serialized_parts(
phys_reader, pkg_srels, content_types
)
phys_reader.close()
return PackageReader(content_types, pkg_srels, sparts)
def iter_sparts(self):
"""
Generate a 4-tuple `(partname, content_type, reltype, blob)` for each
of the serialized parts in the package.
"""
for s in self._sparts:
yield (s.partname, s.content_type, s.reltype, s.blob)
def iter_srels(self):
"""
Generate a 2-tuple `(source_uri, srel)` for each of the relationships
in the package.
"""
for srel in self._pkg_srels:
yield (PACKAGE_URI, srel)
for spart in self._sparts:
for srel in spart.srels:
yield (spart.partname, srel)
@staticmethod
def _load_serialized_parts(phys_reader, pkg_srels, content_types):
"""
Return a list of |_SerializedPart| instances corresponding to the
parts in *phys_reader* accessible by walking the relationship graph
starting with *pkg_srels*.
"""
sparts = []
part_walker = PackageReader._walk_phys_parts(phys_reader, pkg_srels)
for partname, blob, reltype, srels in part_walker:
content_type = content_types[partname]
spart = _SerializedPart(
partname, content_type, reltype, blob, srels
)
sparts.append(spart)
return tuple(sparts)
@staticmethod
def _srels_for(phys_reader, source_uri):
"""
Return |_SerializedRelationships| instance populated with
relationships for source identified by *source_uri*.
"""
rels_xml = phys_reader.rels_xml_for(source_uri)
return _SerializedRelationships.load_from_xml(
source_uri.baseURI, rels_xml)
@staticmethod
def _walk_phys_parts(phys_reader, srels, visited_partnames=None):
"""
Generate a 4-tuple `(partname, blob, reltype, srels)` for each of the
parts in *phys_reader* by walking the relationship graph rooted at
srels.
"""
if visited_partnames is None:
visited_partnames = []
for srel in srels:
if srel.is_external:
continue
partname = srel.target_partname
if partname in visited_partnames:
continue
visited_partnames.append(partname)
reltype = srel.reltype
part_srels = PackageReader._srels_for(phys_reader, partname)
blob = phys_reader.blob_for(partname)
yield (partname, blob, reltype, part_srels)
next_walker = PackageReader._walk_phys_parts(
phys_reader, part_srels, visited_partnames
)
for partname, blob, reltype, srels in next_walker:
yield (partname, blob, reltype, srels)
class _ContentTypeMap(object):
"""
Value type providing dictionary semantics for looking up content type by
part name, e.g. ``content_type = cti['/ppt/presentation.xml']``.
"""
def __init__(self):
super(_ContentTypeMap, self).__init__()
self._overrides = CaseInsensitiveDict()
self._defaults = CaseInsensitiveDict()
def __getitem__(self, partname):
"""
Return content type for part identified by *partname*.
"""
if not isinstance(partname, PackURI):
tmpl = "_ContentTypeMap key must be <type 'PackURI'>, got %s"
raise KeyError(tmpl % type(partname))
if partname in self._overrides:
return self._overrides[partname]
if partname.ext in self._defaults:
return self._defaults[partname.ext]
tmpl = "no content type for partname '%s' in [Content_Types].xml"
raise KeyError(tmpl % partname)
@staticmethod
def from_xml(content_types_xml):
"""
Return a new |_ContentTypeMap| instance populated with the contents
of *content_types_xml*.
"""
types_elm = oxml_fromstring(content_types_xml)
ct_map = _ContentTypeMap()
for o in types_elm.overrides:
ct_map._add_override(o.partname, o.content_type)
for d in types_elm.defaults:
ct_map._add_default(d.extension, d.content_type)
return ct_map
def _add_default(self, extension, content_type):
"""
Add the default mapping of *extension* to *content_type* to this
content type mapping.
"""
self._defaults[extension] = content_type
def _add_override(self, partname, content_type):
"""
Add the default mapping of *partname* to *content_type* to this
content type mapping.
"""
self._overrides[partname] = content_type
class _SerializedPart(object):
"""
Value object for an OPC package part. Provides access to the partname,
content type, blob, and serialized relationships for the part.
"""
def __init__(self, partname, content_type, reltype, blob, srels):
super(_SerializedPart, self).__init__()
self._partname = partname
self._content_type = content_type
self._reltype = reltype
self._blob = blob
self._srels = srels
@property
def partname(self):
return self._partname
@property
def content_type(self):
return self._content_type
@property
def blob(self):
return self._blob
@property
def reltype(self):
"""
The referring relationship type of this part.
"""
return self._reltype
@property
def srels(self):
return self._srels
class _SerializedRelationship(object):
"""
Value object representing a serialized relationship in an OPC package.
Serialized, in this case, means any target part is referred to via its
partname rather than a direct link to an in-memory |Part| object.
"""
def __init__(self, baseURI, rel_elm):
super(_SerializedRelationship, self).__init__()
self._baseURI = baseURI
self._rId = rel_elm.rId
self._reltype = rel_elm.reltype
self._target_mode = rel_elm.target_mode
self._target_ref = rel_elm.target_ref
@property
def is_external(self):
"""
True if target_mode is ``RTM.EXTERNAL``
"""
return self._target_mode == RTM.EXTERNAL
@property
def reltype(self):
"""Relationship type, like ``RT.OFFICE_DOCUMENT``"""
return self._reltype
@property
def rId(self):
"""
Relationship id, like 'rId9', corresponds to the ``Id`` attribute on
the ``CT_Relationship`` element.
"""
return self._rId
@property
def target_mode(self):
"""
String in ``TargetMode`` attribute of ``CT_Relationship`` element,
one of ``RTM.INTERNAL`` or ``RTM.EXTERNAL``.
"""
return self._target_mode
@property
def target_ref(self):
"""
String in ``Target`` attribute of ``CT_Relationship`` element, a
relative part reference for internal target mode or an arbitrary URI,
e.g. an HTTP URL, for external target mode.
"""
return self._target_ref
@property
def target_partname(self):
"""
|PackURI| instance containing partname targeted by this relationship.
Raises ``ValueError`` on reference if target_mode is ``'External'``.
Use :attr:`target_mode` to check before referencing.
"""
if self.is_external:
msg = ('target_partname attribute on Relationship is undefined w'
'here TargetMode == "External"')
raise ValueError(msg)
# lazy-load _target_partname attribute
if not hasattr(self, '_target_partname'):
self._target_partname = PackURI.from_rel_ref(self._baseURI,
self.target_ref)
return self._target_partname
class _SerializedRelationships(object):
"""
Read-only sequence of |_SerializedRelationship| instances corresponding
to the relationships item XML passed to constructor.
"""
def __init__(self):
super(_SerializedRelationships, self).__init__()
self._srels = []
def __iter__(self):
"""Support iteration, e.g. 'for x in srels:'"""
return self._srels.__iter__()
@staticmethod
def load_from_xml(baseURI, rels_item_xml):
"""
Return |_SerializedRelationships| instance loaded with the
relationships contained in *rels_item_xml*. Returns an empty
collection if *rels_item_xml* is |None|.
"""
srels = _SerializedRelationships()
if rels_item_xml is not None:
rels_elm = oxml_fromstring(rels_item_xml)
for rel_elm in rels_elm.Relationship_lst:
srels._srels.append(_SerializedRelationship(baseURI, rel_elm))
return srels
|
|
from typing import Any, Dict, Iterator, List, Tuple
from graphql import GraphQLSchema
from ..compiler.blocks import (
Backtrack,
CoerceType,
ConstructResult,
EndOptional,
Filter,
Fold,
GlobalOperationsStart,
MarkLocation,
OutputSource,
QueryRoot,
Recurse,
Traverse,
Unfold,
)
from ..compiler.compiler_entities import BasicBlock
from ..compiler.compiler_frontend import IrAndMetadata, graphql_to_ir
from ..compiler.helpers import BaseLocation, get_only_element_from_collection
from ..compiler.metadata import QueryMetadataTable
from ..query_formatting.common import validate_arguments
from .block_ops import generate_block_outputs, generate_construct_result_outputs
from .debugging import print_tap
from .hinting import get_hints_for_location_via_readthrough_cache
from .typedefs import DataContext, DataToken, InterpreterAdapter, InterpreterHints
def _get_local_operation_post_block_locations(
query_metadata_table: QueryMetadataTable, local_operations_blocks: List[BasicBlock]
) -> List[BaseLocation]:
"""Return a parallel list of the locations into which the block's operation moves us."""
location_at_index: Dict[int, BaseLocation] = {}
location_stack: List[BaseLocation] = []
block_indexes_at_next_mark_location: List[int] = []
for block_index, block in enumerate(local_operations_blocks):
if isinstance(block, GlobalOperationsStart):
raise AssertionError(
f"GlobalOperationsStart found in local operations blocks: {local_operations_blocks}"
)
elif isinstance(block, MarkLocation):
current_location = block.location
location_stack.append(current_location)
location_at_index[block_index] = current_location
# Drain the queued-up block indexes, setting them all to the current location.
for index in block_indexes_at_next_mark_location:
location_at_index[index] = current_location
block_indexes_at_next_mark_location = []
elif isinstance(block, (EndOptional)):
# This blocks "happens" and stays at the current location,
# given by the preceding MarkLocation block.
location_at_index[block_index] = location_stack[-1]
elif isinstance(block, (Backtrack, Unfold)):
# Each of these blocks unwinds the location stack one step as its effect.
# The post-block location is therefore whatever is on the stack after the pop.
location_stack.pop()
location_at_index[block_index] = location_stack[-1]
elif isinstance(
block, (QueryRoot, Traverse, Recurse, Fold, OutputSource, Filter, CoerceType)
):
# These blocks all "happen" at the location given by the first subsequent MarkLocation.
block_indexes_at_next_mark_location.append(block_index)
else:
raise AssertionError(f"Unexpected block type '{type(block).__name__}': {block}")
if block_indexes_at_next_mark_location:
raise AssertionError(
f"Unassigned block indexes: {block_indexes_at_next_mark_location} "
f"for blocks {local_operations_blocks}"
)
return [location_at_index[i] for i in range(len(local_operations_blocks))]
def _split_out_global_operations(
ir_blocks: List[BasicBlock],
) -> Tuple[List[BasicBlock], List[BasicBlock]]:
# TODO(bojanserafimov): Maybe use emit_sql._traverse_and_validate_blocks
for block_index, block in enumerate(ir_blocks):
if isinstance(block, GlobalOperationsStart):
global_operations_index = block_index
break
else:
raise AssertionError(
f"Unexpectedly, did not find GlobalOperationsStart block in IR blocks: {ir_blocks}."
)
local_operations = ir_blocks[:global_operations_index]
global_operations = ir_blocks[global_operations_index:]
return local_operations, global_operations
def _get_initial_data_contexts(
adapter: InterpreterAdapter[DataToken],
start_class: str,
hints: InterpreterHints,
) -> Iterator[DataContext[DataToken]]:
# N.B.: Do not replace the below for-yield with a generator, and do not inline this function
# into the caller! It's important to have an explicit generator to start the computation.
# Without this setup, get_tokens_of_type() is *immediately* called by interpret_ir(),
# even if the returned generator is never advanced. That violates our minimality property:
# data was loaded via a call to get_tokens_of_type(), even though it wasn't (yet) needed.
for token in adapter.get_tokens_of_type(start_class, **hints):
yield DataContext.make_empty_context_from_token(token)
# ##############
# # Public API #
# ##############
def interpret_ir(
adapter: InterpreterAdapter[DataToken],
ir_and_metadata: IrAndMetadata,
query_arguments: Dict[str, Any],
) -> Iterator[Dict[str, Any]]:
validate_arguments(ir_and_metadata.input_metadata, query_arguments)
ir_blocks = ir_and_metadata.ir_blocks
query_metadata_table = ir_and_metadata.query_metadata_table
per_query_hint_cache: Dict[BaseLocation, InterpreterHints] = {}
if not ir_blocks:
raise AssertionError()
local_operations, global_operations = _split_out_global_operations(ir_blocks)
if not local_operations or not global_operations:
raise AssertionError()
first_block = local_operations[0]
if not isinstance(first_block, QueryRoot):
raise AssertionError()
last_block = global_operations[-1]
if not isinstance(last_block, ConstructResult):
raise AssertionError()
local_operation_post_block_locations = _get_local_operation_post_block_locations(
query_metadata_table, local_operations
)
# Process the first block.
start_class = get_only_element_from_collection(first_block.start_class)
root_location = query_metadata_table.root_location
root_location_hints = get_hints_for_location_via_readthrough_cache(
query_metadata_table, query_arguments, per_query_hint_cache, root_location
)
current_data_contexts: Iterator[DataContext[DataToken]] = _get_initial_data_contexts(
adapter, start_class, root_location_hints
)
current_data_contexts = print_tap("starting contexts", current_data_contexts)
# Process all local operation blocks after the first one (already processed above).
for block, block_location in zip(
local_operations[1:], local_operation_post_block_locations[1:]
):
current_data_contexts = generate_block_outputs(
adapter,
query_metadata_table,
query_arguments,
per_query_hint_cache,
block_location,
block,
current_data_contexts,
)
# Process all global operations except the last block, which constructs the final result.
for block in global_operations[:-1]:
current_data_contexts = generate_block_outputs(
adapter,
query_metadata_table,
query_arguments,
per_query_hint_cache,
None,
block,
current_data_contexts,
)
current_data_contexts = print_tap("ending contexts", current_data_contexts)
# Process the final block.
return generate_construct_result_outputs(
adapter,
query_metadata_table,
query_arguments,
per_query_hint_cache,
last_block,
current_data_contexts,
)
def interpret_query(
adapter: InterpreterAdapter[DataToken],
schema: GraphQLSchema,
query: str,
query_arguments: Dict[str, Any],
) -> Iterator[Dict[str, Any]]:
ir_and_metadata = graphql_to_ir(schema, query)
return interpret_ir(adapter, ir_and_metadata, query_arguments)
|
|
import requests
import pandas as pd
from lcc.db_tier.TAP_query import TapClient
from lcc.entities.exceptions import QueryInputError
from lcc.entities.light_curve import LightCurve
from lcc.entities.star import Star
class VizierTapBase(TapClient):
"""
Base class for all tap connectors using VizieR database. In the most
situations new connectors will contain just few class attributes and
there will not be need to write new or overwrite current methods.
Attributes
-----------
TAP_URL : str
Url to tap server
FILES_URL : str
Path to light curve files storage
TABLE : str
Name of queried table
RA : str
Name of right ascension column. It should be in degrees, anyway it is
necessary to convert them
DEC : str
Name of declination column. It should be in degrees, anyway it is
necessary to convert them
EXAMPLE
--------
"{Field}.{Tile}.{Seqn}"
Keys represent name of columns
LC_FILE : str
Column name which can be used for obtaining light curve files.
By default it is set to None that means that is not necessary
to include any other column in order to get light curves
LC_META : dict
Meta data for light curve.
Example
--------
{"xlabel" : "Terrestrial time",
"xlabel_unit" : "days",
"ylabel" : "Flux",
"ylabel_unit" : "Electrons per second",
"color" : "N/A",
"invert_yaxis" : False}
Light curve is expected by default (magnitudes and Julian days)
TIME_COL : int
Number (starts with 0) of times column in data file
MAG_COL : int
Number (starts with 0) of magnitudes column in data file
ERR_COL : int
Number (starts with 0) of errors column in data file
ERR_MAG_RATIO : float:
Ratio between error and magnitude values
Note:
Added because of Corot Archive of Faint Stars.
IDENT_MAP : ordered dict
Ordered dictionary of "name of database" : "column name/s
of identifiers"
Example
--------
IDENT_MAP = {"Macho" : ("Field", "Tile", "Seqn") }
For one item dictionaries can be used simple dictionary, because
there is no need to keep order of items.
MORE_MAP : ordered dict
Ordered dictionary of "column names" : "key in new dictionary which
is be stored in Star object"
Example
--------
MORE_MAP = collections.OrderedDict((("Per", "period"),
("Class" , "var_type"),
("Jmag" , "j_mag"),
("Kmag" , "k_mag"),
("Hmag" , "h_mag")))
Methods
--------
This class inherits TapClient which brings methods for creating,
posting and returning tap queries. Methods of this class manage
results and create Star objects and light curves.
There is no need overwrite methods in inherited classes in the most
cases. Anyway obtaining light curves can be different for many
databases. In this case it would be sufficient to just implement
new _getLightCurve method.
Brief description of methods can be found below at their declaration.
"""
# Common attribute for all vizier tap connectors
TAP_URL = "http://tapvizier.u-strasbg.fr/TAPVizieR/tap"
# Most common attributes - can be overwritten #
RA = "RAJ2000"
DEC = "DEJ2000"
LC_FILE = None
LC_META = dict()
TIME_COL = 0
MAG_COL = 1
ERR_COL = 2
ERR_MAG_RATIO = 1.
# Split at any number of white spaces
DELIM = None
def __init__(self, queries):
"""
Parameters
-----------
queries : list, dict
List of queries. Each query is dictionary of query parameters
and its values
"""
# Case of just one query
if isinstance(queries, dict):
queries = [queries]
self.queries = queries
# TODO multiprocessing
def getStars(self, load_lc=True, **kwargs):
"""
Get star objects with light curves
Parameters
----------
load_lc : bool
Star is appended by light curve if True
kwargs : dict
Optional parameters which have effect just if certain database
provides this option.
For example CoRoT archive contains very large light curves,
so the dimension of light curve can be reduced by `max_bins`
keyword.
Returns
--------
list
List of stars with their light curves
"""
select = set([self.RA, self.DEC, self.LC_FILE] + list(self.MORE_MAP.keys()))
for val in self.IDENT_MAP.values():
if isinstance(val, (tuple, list, set)):
for it in val:
select.add(it)
else:
select.add(val)
select = [s for s in select if s]
select = list(select)
raw_stars = pd.DataFrame()
for _que in self.queries:
que = _que.copy()
if "ra" in que and "dec" in que:
que[self.RA] = que.pop("ra")
que[self.DEC] = que.pop("dec")
if "delta" in que:
delta = que.pop("delta")
que[self.RA], que[self.DEC] = self._areaSearch(
que[self.RA], que[self.DEC], delta)
conditions = []
for key, value in que.items():
if isinstance(value, (list, tuple)):
if len(value) == 2:
conditions.append((key, value[0], value[1]))
else:
raise QueryInputError("Invalid query range")
else:
if key != "nearest":
conditions.append((key, value))
query_inp = {"table": self.TABLE,
"select": select,
"conditions": conditions,
"URL": self.TAP_URL}
res = self.postQuery(query_inp)
if not res.empty:
raw_stars = pd.concat([raw_stars, res])
return self._createStar(raw_stars, load_lc, **kwargs)
def _createStar(self, df, lc_opt, **kwargs):
"""
Create Star objects from query result
Parameters
----------
df :
lc_opt : bool
Obtain light curves if True
Returns
--------
list
List of Star objects
"""
stars = []
for _, _raw_star in df.iterrows():
raw_star_dict = _raw_star.to_dict()
ident = {}
for key, value in self.IDENT_MAP.items():
db_ident = {}
if isinstance(value, (list, tuple)):
for ide in value:
db_ident[ide] = raw_star_dict.get(ide)
name = self.get_name(db_ident)
else:
name = raw_star_dict.get(value)
if not db_ident:
db_ident = None
ident[key] = {"name": name, "db_ident": db_ident}
more = {}
for key, value in self.MORE_MAP.items():
more_item = raw_star_dict.get(key)
more[value] = more_item
star = Star(name=self.get_name(raw_star_dict),
coo=(raw_star_dict[self.RA],
raw_star_dict[self.DEC]),
ident=ident,
more=more)
if lc_opt:
star.putLightCurve(self._getLightCurve(star=star,
file_name=raw_star_dict.get(
self.LC_FILE, None),
**kwargs))
stars.append(star)
return stars
def _getLightCurve(self, star, do_per=False, period_key="period",
**kwargs):
"""
Obtain the light curve
Parameters
-----------
star : Star instance
Star boy object constructed from query looking
for his light curve :)
do_per : bool
If True phase curve is returned instead
period_key : str
Key in star.more dictionary for value of period length
Returns
-------
tuple
Tuple of times, mags, errors lists
"""
if do_per:
period = star.more.get(period_key, None)
if period:
self.LC_META = {"xlabel": "Period",
"xlabel_unit": "phase"}
else:
period = 0
url = self.LC_URL.format(macho_name=star.name, period=period)
meta = None
response = requests.get(url)
time = []
mag = []
err = []
lcs = []
for line in response.iter_lines(decode_unicode='utf-8'):
line = line.strip()
if not line.startswith((" ", "#")):
parts = line.split(self.DELIM)
if len(parts) == 3:
time.append(float(parts[self.TIME_COL]))
mag.append(float(parts[self.MAG_COL]))
err.append(float(parts[self.ERR_COL]) / self.ERR_MAG_RATIO)
else:
if line.startswith("# m = -1"):
meta = self.LC_META.copy()
meta["color"] = "B"
elif line.startswith("# m = -2"):
lcs.append(LightCurve([time, mag, err], meta))
time, mag, err = [], [], []
meta = self.LC_META.copy()
meta["color"] = "R"
lcs.append(LightCurve([time, mag, err], meta))
return lcs
|
|
#!/usr/bin/python
'''
Extracts data from LDIF into subfiles. Skips over people and accounts, disables services unless -a is given. Picks random 10 people to create an import
Useful for converting Prod data to a subset that is safe and confidential for importing into Dev and QA
dataextractor.py [-a][-d] <name of the ldif>
-a to extract all data. If no -a is supplied the data is truncated and modified for non-Prod environments. E.g only 10 random people are exported, services are disabled by modifying erurl, service supporting data (groups etc) is skipped.
-d to create removal ldifs, so data can be replaced. It uses DNs from the input LDIF. The side effect is that any DNs that are in the LDAP, but not in input LDIF will not be removed.
To clean all of the existing entries run dataextractor on the ldapdump from the current LDAP or just use the build-cleaner-from-ldif.sh script
This code assumes the base DN is dn=com. Recycle bin is always skipped.
* extract-acletc.ldif, extract-acletc-del.ldif - ou=[name],DC=COM and eracl attributes of erglobalid=00000000000000000000,ou=[name],DC=COM
* extract-system.ldif, extract-system-del.ldif - ou=systemUser,ou=itim,ou=[name],DC=COM
* extract-config.ldif, extract-config-del.ldif - ou=itim,ou=[name],DC=COM
* ou=policies
* ou=config
* ou=accesstype
* ou=assemblyline
* ou=privilegerules
* cn=challenges
* ou=operations
* ou=objectprofile
* ou=serviceprofile
* ou=lifecycleprofile
* ou=formtemplates
* ou=category
* ou=joindirectives
* extract-custom.ldif, extract-custom-del.ldif - ou=data,ou=[name],dc=com
* extract-people.ldif - ou=people,erglobalid=00000000000000000000,ou=[name],DC=COM
* ou=people
* ou=accounts
* extract-srvics.ldif, extract-srvics-del.ldif - ou=services,erglobalid=00000000000000000000,ou=[name],DC=COM
* extract-tenant.ldif, extract-tenant-del.ldif - erglobalid=00000000000000000000,ou=[name],dc=com:
* ou=policies
* ou=sysroles
* ou=roles
* ou=orgchart
* ou=workflow
* extract-other.ldif - everything else that did not fit into the above categories
2012-2017
@author: Alex Ivkin
'''
import base64, sys, re, traceback, os, pprint, operator, csv, math, subprocess, random, textwrap
from collections import defaultdict # dicts that need no pre-init, for simpler code
def Tree(): # recursive dict storage representing an [ldap] tree
return defaultdict(Tree)
class LdifParser:
def __init__(self,filename,allpeople,deldata):
self.ldif=filename
self.allpeople=allpeople
self.deldata=deldata
self.testcount=10 # how many random test people to export/generate
#self.accountsf=os.path.splitext(filename)[0]+".accounts"+ext
# hash-o-hashes
self.accounts={}
self.services={}
self.people={}
self.neededpeople={}
self.roles={}
self.ppolicies={}
self.ous={}
self.other={}
self.objects=defaultdict(int) # a dict that auto inits to 0 for new keys
self.peoplebyclass=defaultdict(list)
self.ldaptree=Tree()
self.serviceprofiles={'eritimservice':'Built-in'} # init in with a default entry
self.serviceprofileskeys={}
self.plaintext=False; # false for db2ldif, true for ldapsearch formatted files
self.tenant_dns=["ou=policies","ou=sysroles","ou=roles","ou=orgchart","ou=workflow"]
self.people_dns=["ou=people","ou=accounts"]
self.system_dns=["ou=systemuser"]
self.srvics_dns=["ou=services"]
self.custom_dns=["ou=data","*"] # star means all non-parent (i.e. leaf) entries
self.config_dns=["ou=constraints","erdictionaryname=password","ou=policies","ou=config","ou=accesstype","ou=assemblyline","ou=privilegerules","cn=challenges","ou=operations","ou=objectprofile","ou=serviceprofile","ou=lifecycleprofile","ou=formtemplates","ou=category","ou=joindirectives"]
# the following is in enrole.properties password.attributes. Lowercase it
self.encrypted_attributes=['ersynchpassword','erservicepassword','erservicepwd1','erservicepwd2','erservicepwd3','erservicepwd4','eraddomainpassword','erpersonpassword','ernotespasswdaddcert','eritamcred','erep6umds','erposixpassphrase']
self.extradc=False # true if there is a one more [useless] dc below dc=com
def parseOut(self):
i=0
last=-1
with open("extract-tenant.ldif","w") as self.tenantfh, open("extract-srvics.ldif","w") as self.srvicsfh, open("extract-custom.ldif","w") as self.customfh,open("extract-system.ldif","w") as self.systemfh,\
open("extract-people.ldif","w") as self.peoplefh, open("extract-config.ldif","w") as self.configfh, open("extract-acletc.ldif","w") as self.aclsfh, open("extract-others.ldif","w") as self.othersfh, open(self.ldif,'r') as ldiffile:
print "Opening...",
# fastest line count using wc
p = subprocess.Popen(['wc', '-l', self.ldif], stdout=subprocess.PIPE,stderr=subprocess.PIPE)
result, err = p.communicate()
if p.returncode != 0:
raise IOError(err)
num_lines=int(result.strip().split()[0]) # 118593960
print "%s lines." % num_lines
if self.deldata:
self.tenantdfh = open("extract-tenant-del.ldif","w")
self.srvicsdfh = open("extract-srvics-del.ldif","w")
self.customdfh = open("extract-custom-del.ldif","w")
self.configdfh = open("extract-config-del.ldif","w")
self.aclsdfh = open("extract-acletc-del.ldif","w")
self.systemdfh = open("extract-system-del.ldif","w")
# ldiffile.seek(0)
entry=defaultdict(list)
key=''
try:
for fullline in ldiffile:
line=fullline.rstrip('\n\r') # keep spaces but remove EOLs
if not self.plaintext and not entry and line.startswith("erglobalid="):
self.plaintext=True;
print "plaintext format ",
if self.plaintext: # ldapsearch plaintext format
if re.match("erglobalid=.*DC=COM$",line,re.I): # analyze old and start a new entry
if entry:
if 'objectclass' in entry and "ou=recycleBin" not in entry['dn'][0] : # if it is a valid entry and not in the trash
self.dumpEntry(entry)
entry={}
entry['dn']=[line]
elif re.match(r"[a-zA-Z]+=.*[^;]$",line): # it's so specific to make sure we ignore any javascript - the side effect is skipping the ldap attributes that have values ending in ;
(key,value)=line.split("=",1)
key=key.lower() # ldap is case insensitive
value=value.strip("=")
if value <> "NOT ASCII": # this means this value is lost in ldapsearch export
if key in entry:
entry[key].append(value)
else:
entry[key]=[value]
elif len(line)>0 and len(entry) > 0: # tag line onto the last value. Skipping empty lines to make sure we dont duplicate \n, but the sideeffect is removal of blank lines from the multiline attribute values
#line=line.lstrip(' ') # remove the leading space
if len(entry[key]) == 1:
entry[key]=[entry[key][0]+line+"\n"] # add \n for readability (it's plaintext not base64)
else:
entry[key][-1]+=line+"\n" # extend the last value
#else:
# print "Error: ", line
else: # classical format (softerra, db2ldif)
if line=='': # end of an entry
if 'objectclass' in entry and "ou=recycleBin" not in entry['dn'][0] : # if it is a valid entry and not in the trash
self.dumpEntry(entry)
entry=defaultdict(list)
entry['raw']=""
elif line.startswith("#"): # skip comment
continue
elif ":" in line:
(key,value)=line.split(":",1)
key=key.lower() # ldap is case insensitive
value=value.strip(": ")
entry[key].append(value)
elif len(entry) > 0: # tag line onto the last value
line=line.lstrip(' ') # remove the leading space
if len(entry[key]) == 1:
entry[key]=[entry[key][0]+line]
else:
entry[key][-1]+=line # extend the last value
entry['raw']+=fullline
#if i>16000090: break
# print progress
percent = math.ceil(i/float(num_lines)*100*1000)/1000 # round to the tenth of a percent
if percent > last :# cant simply us module because of freaky float imprecision
sys.stdout.write('\rParsing %s: %s' % (self.ldif, "{:>5.1f}%".format(percent)))
last=percent
i+=1
except:
print "\nFailure pasing \"%s\" for %s\n%s, %s" % (line, entry, sys.exc_info()[0],sys.exc_info()[1])
traceback.print_exc()
sys.exit(2)
if self.plaintext: # plaintext parser is backfilling, need to process the last entry
if 'objectclass' in entry and "ou=recycleBin" not in entry['dn'][0]:
self.dumpEntry(entry)
# second pass to dump required people records
if not allpeople:
if len(self.people.keys()) == 0:
print "Could not find any person records to export"
else:
print "\nExporting people...%s from roles, %s from workflows, %s test." % (len([k for k,v in self.neededpeople.items() if v==1]),len([k for k,v in self.neededpeople.items() if v==2]),self.testcount)
# extract required entries
for k in self.neededpeople.keys():
#print "%s=%s" % (k,self.neededpeople[k])
if k in self.people:
print >> self.peoplefh, self.people[k]['raw'],
else:
print "Missing %s" % k
print >> self.peoplefh, ""
# now extract random ppl, and mix in their attributes from other random people
for i in range(self.testcount):
print >> self.peoplefh, self.people[random.choice(self.people.keys())]['raw']
# mix their attributes with random people of the same set of object classes
'''
person=self.people[random.choice(self.people.keys())]
personClasses=tuple(sorted([o.lower() for o in person['objectclass']]))
print >> self.peoplefh, "dn:", person['dn'][0]
cndonor=self.people[random.choice(self.peoplebyclass[personClasses])]
for k in person.keys():
if k=='raw' or k=='dn':
continue
if k=='cn' or k=='sn' or k.lower()=='givenname' or k.lower()=='displayname':
similarperson=cndonor
else:
similarperson=self.people[random.choice(self.peoplebyclass[personClasses])]
if k in similarperson:
person[k]=similarperson[k]
#else:
# print "Person %s: Similar person %s is missing %s" % (person['cn'],similarperson['cn'],k)
for j in person[k]:
print >> self.peoplefh, "%s: %s" % (k,"\n ".join(textwrap.wrap(text, 100))
'''
print "done"
#except IOError:
# print "can't open %s!" % self.ldif
#else:
# ldiffile.close()
def dumpEntry(self,entry):
entryObjectclass=[o.lower() for o in entry['objectclass']]
dn=entry['dn'][0] if ',' in entry['dn'][0] or ('=' in entry['dn'][0] and '=' <> entry['dn'][0][-1]) else base64.b64decode(entry['dn'][0]) # guessing if it's base64
dnlist=re.split(r'(?<!\\),',dn.lower()) # split by , but not \,
dnlist.reverse() # LDAP tree style addressing (root at the beginning)
if "domain" in entryObjectclass and len(entry["dc"])>1:
self.extradc=True
if self.extradc:
dnlist.pop(1) # remove extra dc if present
# todo refactor for a more elegant way is to tie dn lists to filehandles and do the output it in a generic way
if len(dnlist) == 2 and dnlist[0] == "dc=com":
if "ertenant" in entryObjectclass:
# grab the tenant props
print >> self.aclsfh, "dn: "+dn
print >> self.aclsfh, "changetype: modify"
for (attr,val) in entry.items():
if attr not in ["dn","control","ou","objectclass","ibm-entryuuid","raw"]: # raw is the one we create
print >> self.aclsfh, "replace: "+attr
print >> self.aclsfh, attr+": "+val[0]
print >> self.aclsfh, "-"
print >> self.aclsfh, ""
elif "*" in self.custom_dns and "ibm-replicaGroup" not in entryObjectclass:
print >> self.configfh, entry['raw'],
elif len(dnlist) == 3 and dnlist[2] == "erglobalid=00000000000000000000" and "eracl" in entry and len(entry["eracl"]) > 0:
# special format - start with the ldapmodify header
print >> self.aclsfh, "dn: "+dn
print >> self.aclsfh, "changetype: modify"
for acl in entry["eracl"]:
print >> self.aclsfh, "add: eracl"
print >> self.aclsfh, "eracl:: "+acl # double colon to indicate base64 encoded data
print >> self.aclsfh, "-"
if self.deldata:
# nukem all
print >> self.aclsdfh, "dn: "+dn
print >> self.aclsdfh, "changetype: modify"
print >> self.aclsdfh, "delete: eracl"
elif len(dnlist) > 3:
if dnlist[2] == "erglobalid=00000000000000000000":
if dnlist[3] in self.tenant_dns:
print >> self.tenantfh, entry['raw'],
if self.deldata:
print >> self.tenantdfh, "dn: "+dn
print >> self.tenantdfh, "changetype: delete\n"
if allpeople:
if dnlist[3] in self.people_dns:
if not (len(dnlist)==4 or (len(dnlist)==5 and dnlist[4]=="ou=0")): # or dnlist[5]=="erglobalid=00000000000000000007"): # skip already existing base entries and System Administrator
print >> self.peoplefh, entry['raw'],
else:
if dnlist[3] == "ou=people":
self.people[dn.lower()]=entry # hash ppl for later
self.peoplebyclass[tuple(sorted(entryObjectclass))].append(dn.lower())
if dnlist[3] == "ou=roles" and "owner" in entry: # for maintaining referential integrity
self.neededpeople[entry["owner"][0].lower()]=1
if dnlist[3] in self.srvics_dns:
if allpeople:
print >> self.srvicsfh, entry['raw'],
else:
if len(dnlist) > 4 and 'erITIMService' not in entry['objectclass']: # skip over the basic itim service and the main OU container
# add len(dnlist) == 5 to skip over subentries (service groups)
disabledservice=re.sub(r'er(itdi|)*url: (.*)',r'er\1url: disabled|\2',entry['raw'],flags=re.IGNORECASE)
#print disabledservice
print >> self.srvicsfh, disabledservice,
if self.deldata:
print >> self.srvicsdfh, "dn: "+dn
print >> self.srvicsdfh, "changetype: delete\n"
elif dnlist[2] == "ou=itim":
if dnlist[3] in self.config_dns:
print >> self.configfh, entry['raw'],
if self.deldata:
print >> self.configdfh, "dn: "+dn
print >> self.configdfh, "changetype: delete\n"
if allpeople and dnlist[3] in self.system_dns:
print >> self.systemfh, entry['raw'],
if self.deldata:
print >> self.systemdfh, "dn: "+dn
print >> self.systemdfh, "changetype: delete\n"
if 'erWorkflowDefinition'.lower() in entryObjectclass and 'erxml' in entry: # Lifecycle workflows, grep them for people references
data=base64.b64decode(entry['erxml'][0]) if not self.plaintext else entry['erxml'][0]
#print "Unwrapping ", entry["erprocessname"][0]
persondn=re.search("erglobalid=[^,]*,ou=0,ou=people,erglobalid=00000000000000000000,ou=[^,]*,dc=com",data,flags=re.MULTILINE|re.IGNORECASE)
if persondn is not None:
self.neededpeople[persondn.group()]=2
elif dnlist[2] in self.config_dns: # for entries under the ou=itim,dc=com
print >> self.configfh, entry['raw'],
if self.deldata:
print >> self.configdfh, "dn: "+dn
print >> self.configdfh, "changetype: delete\n"
elif dnlist[2] in self.custom_dns: # dumpall
print >> self.customfh, entry['raw'],
if self.deldata:
print >> self.customdfh, "dn: "+dn
print >> self.customdfh, "changetype: delete\n"
else:
print >> self.othersfh, entry['raw'],
else:
print >> self.othersfh, entry['raw'],
# remove encrypted attributes
#enc_att=[x in entry for x in self.encrypted_attributes]
#if any(enc_att):
# print "%s matches %s" % (dn,[x for x,e in zip(self.encrypted_attributes,enc_att) if e])
if __name__ == '__main__':
# reopen stdout file descriptor with write mode and 0 as the buffer size (unbuffered output)
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
if len(sys.argv) < 2:
print __doc__
sys.exit(1)
filename=sys.argv[len(sys.argv)-1] # last argument
allpeople=True if sys.argv[1] == "-a" or (len(sys.argv)>=3 and sys.argv[2] == "-a") else False
deldata=True if sys.argv[1] == "-d" or (len(sys.argv)>=3 and sys.argv[2] == "-d") else False
parser=LdifParser(filename,allpeople,deldata)
parser.parseOut()
|
|
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import math
import operator
import os
import struct
import threading
import time
import unittest
from google.cloud.proto.spanner.v1.type_pb2 import ARRAY
from google.cloud.proto.spanner.v1.type_pb2 import BOOL
from google.cloud.proto.spanner.v1.type_pb2 import BYTES
from google.cloud.proto.spanner.v1.type_pb2 import DATE
from google.cloud.proto.spanner.v1.type_pb2 import FLOAT64
from google.cloud.proto.spanner.v1.type_pb2 import INT64
from google.cloud.proto.spanner.v1.type_pb2 import STRING
from google.cloud.proto.spanner.v1.type_pb2 import TIMESTAMP
from google.cloud.proto.spanner.v1.type_pb2 import Type
from google.cloud._helpers import UTC
from google.cloud.exceptions import GrpcRendezvous
from google.cloud.spanner._helpers import TimestampWithNanoseconds
from google.cloud.spanner.client import Client
from google.cloud.spanner.keyset import KeyRange
from google.cloud.spanner.keyset import KeySet
from google.cloud.spanner.pool import BurstyPool
from test_utils.retry import RetryErrors
from test_utils.retry import RetryInstanceState
from test_utils.retry import RetryResult
from test_utils.system import unique_resource_id
from tests._fixtures import DDL_STATEMENTS
IS_CIRCLE = os.getenv('CIRCLECI') == 'true'
CREATE_INSTANCE = IS_CIRCLE or os.getenv(
'GOOGLE_CLOUD_TESTS_CREATE_SPANNER_INSTANCE') is not None
if CREATE_INSTANCE:
INSTANCE_ID = 'google-cloud' + unique_resource_id('-')
else:
INSTANCE_ID = os.environ.get('GOOGLE_CLOUD_TESTS_SPANNER_INSTANCE',
'google-cloud-python-systest')
DATABASE_ID = 'test_database'
EXISTING_INSTANCES = []
class Config(object):
"""Run-time configuration to be modified at set-up.
This is a mutable stand-in to allow test set-up to modify
global state.
"""
CLIENT = None
INSTANCE_CONFIG = None
INSTANCE = None
def _retry_on_unavailable(exc):
"""Retry only errors whose status code is 'UNAVAILABLE'."""
from grpc import StatusCode
return exc.code() == StatusCode.UNAVAILABLE
def _has_all_ddl(database):
return len(database.ddl_statements) == len(DDL_STATEMENTS)
def _list_instances():
return list(Config.CLIENT.list_instances())
def setUpModule():
Config.CLIENT = Client()
retry = RetryErrors(GrpcRendezvous, error_predicate=_retry_on_unavailable)
configs = list(retry(Config.CLIENT.list_instance_configs)())
# Defend against back-end returning configs for regions we aren't
# actually allowed to use.
configs = [config for config in configs if '-us-' in config.name]
if len(configs) < 1:
raise ValueError('List instance configs failed in module set up.')
Config.INSTANCE_CONFIG = configs[0]
config_name = configs[0].name
instances = retry(_list_instances)()
EXISTING_INSTANCES[:] = instances
if CREATE_INSTANCE:
Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID, config_name)
created_op = Config.INSTANCE.create()
created_op.result(30) # block until completion
else:
Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID)
Config.INSTANCE.reload()
def tearDownModule():
if CREATE_INSTANCE:
Config.INSTANCE.delete()
class TestInstanceAdminAPI(unittest.TestCase):
def setUp(self):
self.instances_to_delete = []
def tearDown(self):
for instance in self.instances_to_delete:
instance.delete()
def test_list_instances(self):
instances = list(Config.CLIENT.list_instances())
# We have added one new instance in `setUpModule`.
if CREATE_INSTANCE:
self.assertEqual(len(instances), len(EXISTING_INSTANCES) + 1)
for instance in instances:
instance_existence = (instance in EXISTING_INSTANCES or
instance == Config.INSTANCE)
self.assertTrue(instance_existence)
def test_reload_instance(self):
# Use same arguments as Config.INSTANCE (created in `setUpModule`)
# so we can use reload() on a fresh instance.
instance = Config.CLIENT.instance(
INSTANCE_ID, Config.INSTANCE_CONFIG.name)
# Make sure metadata unset before reloading.
instance.display_name = None
instance.reload()
self.assertEqual(instance.display_name, Config.INSTANCE.display_name)
@unittest.skipUnless(CREATE_INSTANCE, 'Skipping instance creation')
def test_create_instance(self):
ALT_INSTANCE_ID = 'new' + unique_resource_id('-')
instance = Config.CLIENT.instance(
ALT_INSTANCE_ID, Config.INSTANCE_CONFIG.name)
operation = instance.create()
# Make sure this instance gets deleted after the test case.
self.instances_to_delete.append(instance)
# We want to make sure the operation completes.
operation.result(30) # raises on failure / timeout.
# Create a new instance instance and make sure it is the same.
instance_alt = Config.CLIENT.instance(
ALT_INSTANCE_ID, Config.INSTANCE_CONFIG.name)
instance_alt.reload()
self.assertEqual(instance, instance_alt)
self.assertEqual(instance.display_name, instance_alt.display_name)
def test_update_instance(self):
OLD_DISPLAY_NAME = Config.INSTANCE.display_name
NEW_DISPLAY_NAME = 'Foo Bar Baz'
Config.INSTANCE.display_name = NEW_DISPLAY_NAME
operation = Config.INSTANCE.update()
# We want to make sure the operation completes.
operation.result(30) # raises on failure / timeout.
# Create a new instance instance and reload it.
instance_alt = Config.CLIENT.instance(INSTANCE_ID, None)
self.assertNotEqual(instance_alt.display_name, NEW_DISPLAY_NAME)
instance_alt.reload()
self.assertEqual(instance_alt.display_name, NEW_DISPLAY_NAME)
# Make sure to put the instance back the way it was for the
# other test cases.
Config.INSTANCE.display_name = OLD_DISPLAY_NAME
Config.INSTANCE.update()
class _TestData(object):
TABLE = 'contacts'
COLUMNS = ('contact_id', 'first_name', 'last_name', 'email')
ROW_DATA = (
(1, u'Phred', u'Phlyntstone', u'phred@example.com'),
(2, u'Bharney', u'Rhubble', u'bharney@example.com'),
(3, u'Wylma', u'Phlyntstone', u'wylma@example.com'),
)
ALL = KeySet(all_=True)
SQL = 'SELECT * FROM contacts ORDER BY contact_id'
def _assert_timestamp(self, value, nano_value):
self.assertIsInstance(value, datetime.datetime)
self.assertIsNone(value.tzinfo)
self.assertIs(nano_value.tzinfo, UTC)
self.assertEqual(value.year, nano_value.year)
self.assertEqual(value.month, nano_value.month)
self.assertEqual(value.day, nano_value.day)
self.assertEqual(value.hour, nano_value.hour)
self.assertEqual(value.minute, nano_value.minute)
self.assertEqual(value.second, nano_value.second)
self.assertEqual(value.microsecond, nano_value.microsecond)
if isinstance(value, TimestampWithNanoseconds):
self.assertEqual(value.nanosecond, nano_value.nanosecond)
else:
self.assertEqual(value.microsecond * 1000, nano_value.nanosecond)
def _check_row_data(self, row_data, expected=None):
if expected is None:
expected = self.ROW_DATA
self.assertEqual(len(row_data), len(expected))
for found, expected in zip(row_data, expected):
self.assertEqual(len(found), len(expected))
for found_cell, expected_cell in zip(found, expected):
if isinstance(found_cell, TimestampWithNanoseconds):
self._assert_timestamp(expected_cell, found_cell)
elif isinstance(found_cell, float) and math.isnan(found_cell):
self.assertTrue(math.isnan(expected_cell))
else:
self.assertEqual(found_cell, expected_cell)
class TestDatabaseAPI(unittest.TestCase, _TestData):
@classmethod
def setUpClass(cls):
pool = BurstyPool()
cls._db = Config.INSTANCE.database(
DATABASE_ID, ddl_statements=DDL_STATEMENTS, pool=pool)
cls._db.create()
@classmethod
def tearDownClass(cls):
cls._db.drop()
def setUp(self):
self.to_delete = []
def tearDown(self):
for doomed in self.to_delete:
doomed.drop()
def test_list_databases(self):
# Since `Config.INSTANCE` is newly created in `setUpModule`, the
# database created in `setUpClass` here will be the only one.
databases = list(Config.INSTANCE.list_databases())
self.assertEqual(databases, [self._db])
def test_create_database(self):
pool = BurstyPool()
temp_db_id = 'temp-db' # test w/ hyphen
temp_db = Config.INSTANCE.database(temp_db_id, pool=pool)
operation = temp_db.create()
self.to_delete.append(temp_db)
# We want to make sure the operation completes.
operation.result(30) # raises on failure / timeout.
name_attr = operator.attrgetter('name')
expected = sorted([temp_db, self._db], key=name_attr)
databases = list(Config.INSTANCE.list_databases())
found = sorted(databases, key=name_attr)
self.assertEqual(found, expected)
def test_update_database_ddl(self):
pool = BurstyPool()
temp_db_id = 'temp_db'
temp_db = Config.INSTANCE.database(temp_db_id, pool=pool)
create_op = temp_db.create()
self.to_delete.append(temp_db)
# We want to make sure the operation completes.
create_op.result(90) # raises on failure / timeout.
operation = temp_db.update_ddl(DDL_STATEMENTS)
# We want to make sure the operation completes.
operation.result(90) # raises on failure / timeout.
temp_db.reload()
self.assertEqual(len(temp_db.ddl_statements), len(DDL_STATEMENTS))
def test_db_batch_insert_then_db_snapshot_read_and_db_read(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
batch.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
from_snap = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(from_snap)
from_db = list(self._db.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(from_db)
def test_db_run_in_transaction_then_db_execute_sql(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
def _unit_of_work(transaction, test):
rows = list(transaction.read(test.TABLE, test.COLUMNS, self.ALL))
test.assertEqual(rows, [])
transaction.insert_or_update(
test.TABLE, test.COLUMNS, test.ROW_DATA)
self._db.run_in_transaction(_unit_of_work, test=self)
rows = list(self._db.execute_sql(self.SQL))
self._check_row_data(rows)
def test_db_run_in_transaction_twice(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
def _unit_of_work(transaction, test):
transaction.insert_or_update(
test.TABLE, test.COLUMNS, test.ROW_DATA)
self._db.run_in_transaction(_unit_of_work, test=self)
self._db.run_in_transaction(_unit_of_work, test=self)
rows = list(self._db.execute_sql(self.SQL))
self._check_row_data(rows)
class TestSessionAPI(unittest.TestCase, _TestData):
ALL_TYPES_TABLE = 'all_types'
ALL_TYPES_COLUMNS = (
'list_goes_on',
'are_you_sure',
'raw_data',
'hwhen',
'approx_value',
'eye_d',
'description',
'exactly_hwhen',
)
COUNTERS_TABLE = 'counters'
COUNTERS_COLUMNS = (
'name',
'value',
)
SOME_DATE = datetime.date(2011, 1, 17)
SOME_TIME = datetime.datetime(1989, 1, 17, 17, 59, 12, 345612)
NANO_TIME = TimestampWithNanoseconds(1995, 8, 31, nanosecond=987654321)
OTHER_NAN, = struct.unpack('<d', b'\x01\x00\x01\x00\x00\x00\xf8\xff')
BYTES_1 = b'Ymlu'
BYTES_2 = b'Ym9vdHM='
ALL_TYPES_ROWDATA = (
([], False, None, None, 0.0, None, None, None),
([1], True, BYTES_1, SOME_DATE, 0.0, 19, u'dog', SOME_TIME),
([5, 10], True, BYTES_1, None, 1.25, 99, u'cat', None),
([], False, BYTES_2, None, float('inf'), 107, u'frog', None),
([3, None, 9], False, None, None, float('-inf'), 207, None, None),
([], False, None, None, float('nan'), 1207, None, None),
([], False, None, None, OTHER_NAN, 2000, None, NANO_TIME),
)
@classmethod
def setUpClass(cls):
pool = BurstyPool()
cls._db = Config.INSTANCE.database(
DATABASE_ID, ddl_statements=DDL_STATEMENTS, pool=pool)
operation = cls._db.create()
operation.result(30) # raises on failure / timeout.
@classmethod
def tearDownClass(cls):
cls._db.drop()
def setUp(self):
self.to_delete = []
def tearDown(self):
for doomed in self.to_delete:
doomed.delete()
def test_session_crud(self):
retry_true = RetryResult(operator.truth)
retry_false = RetryResult(operator.not_)
session = self._db.session()
self.assertFalse(session.exists())
session.create()
retry_true(session.exists)()
session.delete()
retry_false(session.exists)()
def test_batch_insert_then_read(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
batch = session.batch()
batch.delete(self.TABLE, self.ALL)
batch.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
batch.commit()
snapshot = session.snapshot(read_timestamp=batch.committed)
rows = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows)
def test_batch_insert_then_read_all_datatypes(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.ALL_TYPES_TABLE, self.ALL)
batch.insert(
self.ALL_TYPES_TABLE,
self.ALL_TYPES_COLUMNS,
self.ALL_TYPES_ROWDATA)
snapshot = session.snapshot(read_timestamp=batch.committed)
rows = list(snapshot.read(
self.ALL_TYPES_TABLE, self.ALL_TYPES_COLUMNS, self.ALL))
self._check_row_data(rows, expected=self.ALL_TYPES_ROWDATA)
def test_batch_insert_or_update_then_query(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.insert_or_update(self.TABLE, self.COLUMNS, self.ROW_DATA)
snapshot = session.snapshot(read_timestamp=batch.committed)
rows = list(snapshot.execute_sql(self.SQL))
self._check_row_data(rows)
@RetryErrors(exception=GrpcRendezvous)
def test_transaction_read_and_insert_then_rollback(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
transaction = session.transaction()
transaction.begin()
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
transaction.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
# Inserted rows can't be read until after commit.
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
transaction.rollback()
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
def _transaction_read_then_raise(self, transaction):
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(len(rows), 0)
transaction.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
raise CustomException()
@RetryErrors(exception=GrpcRendezvous)
def test_transaction_read_and_insert_then_execption(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
with self.assertRaises(CustomException):
session.run_in_transaction(self._transaction_read_then_raise)
# Transaction was rolled back.
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
@RetryErrors(exception=GrpcRendezvous)
def test_transaction_read_and_insert_or_update_then_commit(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
with session.transaction() as transaction:
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
transaction.insert_or_update(
self.TABLE, self.COLUMNS, self.ROW_DATA)
# Inserted rows can't be read until after commit.
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows)
def _transaction_concurrency_helper(self, unit_of_work, pkey):
INITIAL_VALUE = 123
NUM_THREADS = 3 # conforms to equivalent Java systest.
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.insert_or_update(
self.COUNTERS_TABLE,
self.COUNTERS_COLUMNS,
[[pkey, INITIAL_VALUE]])
# We don't want to run the threads' transactions in the current
# session, which would fail.
txn_sessions = []
for _ in range(NUM_THREADS):
txn_session = self._db.session()
txn_sessions.append(txn_session)
txn_session.create()
self.to_delete.append(txn_session)
threads = [
threading.Thread(
target=txn_session.run_in_transaction,
args=(unit_of_work, pkey))
for txn_session in txn_sessions]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
keyset = KeySet(keys=[(pkey,)])
rows = list(session.read(
self.COUNTERS_TABLE, self.COUNTERS_COLUMNS, keyset))
self.assertEqual(len(rows), 1)
_, value = rows[0]
self.assertEqual(value, INITIAL_VALUE + len(threads))
def _read_w_concurrent_update(self, transaction, pkey):
keyset = KeySet(keys=[(pkey,)])
rows = list(transaction.read(
self.COUNTERS_TABLE, self.COUNTERS_COLUMNS, keyset))
self.assertEqual(len(rows), 1)
pkey, value = rows[0]
transaction.update(
self.COUNTERS_TABLE,
self.COUNTERS_COLUMNS,
[[pkey, value + 1]])
def test_transaction_read_w_concurrent_updates(self):
PKEY = 'read_w_concurrent_updates'
self._transaction_concurrency_helper(
self._read_w_concurrent_update, PKEY)
def _query_w_concurrent_update(self, transaction, pkey):
SQL = 'SELECT * FROM counters WHERE name = @name'
rows = list(transaction.execute_sql(
SQL,
params={'name': pkey},
param_types={'name': Type(code=STRING)},
))
self.assertEqual(len(rows), 1)
pkey, value = rows[0]
transaction.update(
self.COUNTERS_TABLE,
self.COUNTERS_COLUMNS,
[[pkey, value + 1]])
def test_transaction_query_w_concurrent_updates(self):
PKEY = 'query_w_concurrent_updates'
self._transaction_concurrency_helper(
self._query_w_concurrent_update, PKEY)
@staticmethod
def _row_data(max_index):
for index in range(max_index):
yield [
index,
'First%09d' % (index,),
'Last%09d' % (max_index - index),
'test-%09d@example.com' % (index,),
]
def _set_up_table(self, row_count, db=None):
if db is None:
db = self._db
retry = RetryInstanceState(_has_all_ddl)
retry(db.reload)()
session = db.session()
session.create()
self.to_delete.append(session)
def _unit_of_work(transaction, test):
transaction.delete(test.TABLE, test.ALL)
transaction.insert(
test.TABLE, test.COLUMNS, test._row_data(row_count))
committed = session.run_in_transaction(_unit_of_work, test=self)
return session, committed
def test_snapshot_read_w_various_staleness(self):
from datetime import datetime
from google.cloud._helpers import UTC
ROW_COUNT = 400
session, committed = self._set_up_table(ROW_COUNT)
all_data_rows = list(self._row_data(ROW_COUNT))
before_reads = datetime.utcnow().replace(tzinfo=UTC)
# Test w/ read timestamp
read_tx = session.snapshot(read_timestamp=committed)
rows = list(read_tx.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
# Test w/ min read timestamp
min_read_ts = session.snapshot(min_read_timestamp=committed)
rows = list(min_read_ts.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
staleness = datetime.utcnow().replace(tzinfo=UTC) - before_reads
# Test w/ max staleness
max_staleness = session.snapshot(max_staleness=staleness)
rows = list(max_staleness.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
# Test w/ exact staleness
exact_staleness = session.snapshot(exact_staleness=staleness)
rows = list(exact_staleness.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
# Test w/ strong
strong = session.snapshot()
rows = list(strong.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
def test_multiuse_snapshot_read_isolation_strong(self):
ROW_COUNT = 40
session, committed = self._set_up_table(ROW_COUNT)
all_data_rows = list(self._row_data(ROW_COUNT))
strong = session.snapshot(multi_use=True)
before = list(strong.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(strong.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(after, all_data_rows)
def test_multiuse_snapshot_read_isolation_read_timestamp(self):
ROW_COUNT = 40
session, committed = self._set_up_table(ROW_COUNT)
all_data_rows = list(self._row_data(ROW_COUNT))
read_ts = session.snapshot(read_timestamp=committed, multi_use=True)
before = list(read_ts.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(read_ts.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(after, all_data_rows)
def test_multiuse_snapshot_read_isolation_exact_staleness(self):
ROW_COUNT = 40
session, committed = self._set_up_table(ROW_COUNT)
all_data_rows = list(self._row_data(ROW_COUNT))
time.sleep(1)
delta = datetime.timedelta(microseconds=1000)
exact = session.snapshot(exact_staleness=delta, multi_use=True)
before = list(exact.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(exact.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(after, all_data_rows)
def test_read_w_manual_consume(self):
ROW_COUNT = 4000
session, committed = self._set_up_table(ROW_COUNT)
snapshot = session.snapshot(read_timestamp=committed)
streamed = snapshot.read(self.TABLE, self.COLUMNS, self.ALL)
retrieved = 0
while True:
try:
streamed.consume_next()
except StopIteration:
break
retrieved += len(streamed.rows)
streamed.rows[:] = ()
self.assertEqual(retrieved, ROW_COUNT)
self.assertEqual(streamed._current_row, [])
self.assertEqual(streamed._pending_chunk, None)
def test_read_w_index(self):
ROW_COUNT = 2000
# Indexed reads cannot return non-indexed columns
MY_COLUMNS = self.COLUMNS[0], self.COLUMNS[2]
EXTRA_DDL = [
'CREATE INDEX contacts_by_last_name ON contacts(last_name)',
]
pool = BurstyPool()
temp_db = Config.INSTANCE.database(
'test_read_w_index', ddl_statements=DDL_STATEMENTS + EXTRA_DDL,
pool=pool)
operation = temp_db.create()
self.to_delete.append(_DatabaseDropper(temp_db))
# We want to make sure the operation completes.
operation.result(30) # raises on failure / timeout.
session, committed = self._set_up_table(ROW_COUNT, db=temp_db)
snapshot = session.snapshot(read_timestamp=committed)
rows = list(snapshot.read(
self.TABLE, MY_COLUMNS, self.ALL, index='contacts_by_last_name'))
expected = list(reversed(
[(row[0], row[2]) for row in self._row_data(ROW_COUNT)]))
self._check_row_data(rows, expected)
def test_read_w_single_key(self):
ROW_COUNT = 40
session, committed = self._set_up_table(ROW_COUNT)
snapshot = session.snapshot(read_timestamp=committed)
rows = list(snapshot.read(
self.TABLE, self.COLUMNS, KeySet(keys=[(0,)])))
all_data_rows = list(self._row_data(ROW_COUNT))
expected = [all_data_rows[0]]
self._check_row_data(rows, expected)
def test_read_w_multiple_keys(self):
ROW_COUNT = 40
indices = [0, 5, 17]
session, committed = self._set_up_table(ROW_COUNT)
snapshot = session.snapshot(read_timestamp=committed)
rows = list(snapshot.read(
self.TABLE, self.COLUMNS,
KeySet(keys=[(index,) for index in indices])))
all_data_rows = list(self._row_data(ROW_COUNT))
expected = [row for row in all_data_rows if row[0] in indices]
self._check_row_data(rows, expected)
def test_read_w_limit(self):
ROW_COUNT = 4000
LIMIT = 100
session, committed = self._set_up_table(ROW_COUNT)
snapshot = session.snapshot(read_timestamp=committed)
rows = list(snapshot.read(
self.TABLE, self.COLUMNS, self.ALL, limit=LIMIT))
all_data_rows = list(self._row_data(ROW_COUNT))
expected = all_data_rows[:LIMIT]
self._check_row_data(rows, expected)
def test_read_w_ranges(self):
ROW_COUNT = 4000
START = 1000
END = 2000
session, committed = self._set_up_table(ROW_COUNT)
snapshot = session.snapshot(read_timestamp=committed, multi_use=True)
all_data_rows = list(self._row_data(ROW_COUNT))
closed_closed = KeyRange(start_closed=[START], end_closed=[END])
keyset = KeySet(ranges=(closed_closed,))
rows = list(snapshot.read(
self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[START:END+1]
self._check_row_data(rows, expected)
closed_open = KeyRange(start_closed=[START], end_open=[END])
keyset = KeySet(ranges=(closed_open,))
rows = list(snapshot.read(
self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[START:END]
self._check_row_data(rows, expected)
open_open = KeyRange(start_open=[START], end_open=[END])
keyset = KeySet(ranges=(open_open,))
rows = list(snapshot.read(
self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[START+1:END]
self._check_row_data(rows, expected)
open_closed = KeyRange(start_open=[START], end_closed=[END])
keyset = KeySet(ranges=(open_closed,))
rows = list(snapshot.read(
self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[START+1:END+1]
self._check_row_data(rows, expected)
def test_execute_sql_w_manual_consume(self):
ROW_COUNT = 4000
session, committed = self._set_up_table(ROW_COUNT)
snapshot = session.snapshot(read_timestamp=committed)
streamed = snapshot.execute_sql(self.SQL)
retrieved = 0
while True:
try:
streamed.consume_next()
except StopIteration:
break
retrieved += len(streamed.rows)
streamed.rows[:] = ()
self.assertEqual(retrieved, ROW_COUNT)
self.assertEqual(streamed._current_row, [])
self.assertEqual(streamed._pending_chunk, None)
def _check_sql_results(self, snapshot, sql, params, param_types, expected):
if 'ORDER' not in sql:
sql += ' ORDER BY eye_d'
rows = list(snapshot.execute_sql(
sql, params=params, param_types=param_types))
self._check_row_data(rows, expected=expected)
def test_multiuse_snapshot_execute_sql_isolation_strong(self):
ROW_COUNT = 40
SQL = 'SELECT * FROM {}'.format(self.TABLE)
session, committed = self._set_up_table(ROW_COUNT)
all_data_rows = list(self._row_data(ROW_COUNT))
strong = session.snapshot(multi_use=True)
before = list(strong.execute_sql(SQL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(strong.execute_sql(SQL))
self._check_row_data(after, all_data_rows)
def test_execute_sql_returning_array_of_struct(self):
SQL = (
"SELECT ARRAY(SELECT AS STRUCT C1, C2 "
"FROM (SELECT 'a' AS C1, 1 AS C2 "
"UNION ALL SELECT 'b' AS C1, 2 AS C2) "
"ORDER BY C1 ASC)"
)
session = self._db.session()
session.create()
self.to_delete.append(session)
snapshot = session.snapshot()
self._check_sql_results(
snapshot,
sql=SQL,
params=None,
param_types=None,
expected=[
[[['a', 1], ['b', 2]]],
])
def test_execute_sql_w_query_param(self):
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.ALL_TYPES_TABLE, self.ALL)
batch.insert(
self.ALL_TYPES_TABLE,
self.ALL_TYPES_COLUMNS,
self.ALL_TYPES_ROWDATA)
snapshot = session.snapshot(
read_timestamp=batch.committed, multi_use=True)
# Cannot equality-test array values. See below for a test w/
# array of IDs.
self._check_sql_results(
snapshot,
sql='SELECT eye_d FROM all_types WHERE are_you_sure = @sure',
params={'sure': True},
param_types={'sure': Type(code=BOOL)},
expected=[(19,), (99,)],
)
self._check_sql_results(
snapshot,
sql='SELECT eye_d FROM all_types WHERE raw_data = @bytes_1',
params={'bytes_1': self.BYTES_1},
param_types={'bytes_1': Type(code=BYTES)},
expected=[(19,), (99,)],
)
self._check_sql_results(
snapshot,
sql='SELECT eye_d FROM all_types WHERE hwhen = @hwhen',
params={'hwhen': self.SOME_DATE},
param_types={'hwhen': Type(code=DATE)},
expected=[(19,)],
)
self._check_sql_results(
snapshot,
sql=('SELECT eye_d FROM all_types WHERE approx_value >= @lower'
' AND approx_value < @upper '),
params={'lower': 0.0, 'upper': 1.0},
param_types={
'lower': Type(code=FLOAT64), 'upper': Type(code=FLOAT64)},
expected=[(None,), (19,)],
)
# Find -inf
self._check_sql_results(
snapshot,
sql='SELECT eye_d FROM all_types WHERE approx_value = @pos_inf',
params={'pos_inf': float('+inf')},
param_types={'pos_inf': Type(code=FLOAT64)},
expected=[(107,)],
)
# Find +inf
self._check_sql_results(
snapshot,
sql='SELECT eye_d FROM all_types WHERE approx_value = @neg_inf',
params={'neg_inf': float('-inf')},
param_types={'neg_inf': Type(code=FLOAT64)},
expected=[(207,)],
)
self._check_sql_results(
snapshot,
sql='SELECT description FROM all_types WHERE eye_d = @my_id',
params={'my_id': 19},
param_types={'my_id': Type(code=INT64)},
expected=[(u'dog',)],
)
self._check_sql_results(
snapshot,
sql='SELECT description FROM all_types WHERE eye_d = @my_id',
params={'my_id': None},
param_types={'my_id': Type(code=INT64)},
expected=[],
)
self._check_sql_results(
snapshot,
sql='SELECT eye_d FROM all_types WHERE description = @description',
params={'description': u'dog'},
param_types={'description': Type(code=STRING)},
expected=[(19,)],
)
# NaNs cannot be searched for by equality.
self._check_sql_results(
snapshot,
sql='SELECT eye_d FROM all_types WHERE exactly_hwhen = @hwhen',
params={'hwhen': self.SOME_TIME},
param_types={'hwhen': Type(code=TIMESTAMP)},
expected=[(19,)],
)
array_type = Type(code=ARRAY, array_element_type=Type(code=INT64))
self._check_sql_results(
snapshot,
sql=('SELECT description FROM all_types '
'WHERE eye_d in UNNEST(@my_list)'),
params={'my_list': [19, 99]},
param_types={'my_list': array_type},
expected=[(u'dog',), (u'cat',)],
)
class TestStreamingChunking(unittest.TestCase, _TestData):
@classmethod
def setUpClass(cls):
from tests.system.utils.streaming_utils import INSTANCE_NAME
from tests.system.utils.streaming_utils import DATABASE_NAME
instance = Config.CLIENT.instance(INSTANCE_NAME)
if not instance.exists():
raise unittest.SkipTest(
"Run 'tests/system/utils/populate_streaming.py' to enable.")
database = instance.database(DATABASE_NAME)
if not instance.exists():
raise unittest.SkipTest(
"Run 'tests/system/utils/populate_streaming.py' to enable.")
cls._db = database
def _verify_one_column(self, table_desc):
sql = 'SELECT chunk_me FROM {}'.format(table_desc.table)
rows = list(self._db.execute_sql(sql))
self.assertEqual(len(rows), table_desc.row_count)
expected = table_desc.value()
for row in rows:
self.assertEqual(row[0], expected)
def _verify_two_columns(self, table_desc):
sql = 'SELECT chunk_me, chunk_me_2 FROM {}'.format(table_desc.table)
rows = list(self._db.execute_sql(sql))
self.assertEqual(len(rows), table_desc.row_count)
expected = table_desc.value()
for row in rows:
self.assertEqual(row[0], expected)
self.assertEqual(row[1], expected)
def test_four_kay(self):
from tests.system.utils.streaming_utils import FOUR_KAY
self._verify_one_column(FOUR_KAY)
def test_forty_kay(self):
from tests.system.utils.streaming_utils import FOUR_KAY
self._verify_one_column(FOUR_KAY)
def test_four_hundred_kay(self):
from tests.system.utils.streaming_utils import FOUR_HUNDRED_KAY
self._verify_one_column(FOUR_HUNDRED_KAY)
def test_four_meg(self):
from tests.system.utils.streaming_utils import FOUR_MEG
self._verify_two_columns(FOUR_MEG)
class CustomException(Exception):
"""Placeholder for any user-defined exception."""
class _DatabaseDropper(object):
"""Helper for cleaning up databases created on-the-fly."""
def __init__(self, db):
self._db = db
def delete(self):
self._db.drop()
|
|
import Cookie
from datetime import datetime, timedelta
import hmac
import md5
import os
import random
import re
import sys
import time
import UserDict
import warnings
try:
from paste.registry import StackedObjectProxy
beaker_session = StackedObjectProxy(name="Beaker Session")
except:
beaker_session = None
from beaker.container import namespace_registry
from beaker.util import coerce_session_params
__all__ = ['SignedCookie', 'Session']
class SignedCookie(Cookie.BaseCookie):
"extends python cookie to give digital signature support"
def __init__(self, secret, input=None):
self.secret = secret
Cookie.BaseCookie.__init__(self, input)
def value_decode(self, val):
sig = val[0:32]
value = val[32:]
if hmac.new(self.secret, value).hexdigest() != sig:
return None, val
return val[32:], val
def value_encode(self, val):
return val, ("%s%s" % (hmac.new(self.secret, val).hexdigest(), val))
class Session(UserDict.DictMixin):
"session object that uses container package for storage"
def __init__(self, request, id=None, invalidate_corrupt=False,
use_cookies=True, type=None, data_dir=None,
key='beaker.session.id', timeout=None, cookie_expires=True,
secret=None, log_file=None, namespace_class=None, **kwargs):
if type is None:
if data_dir is None:
self.type = 'memory'
else:
self.type = 'file'
else:
self.type = type
if namespace_class is None:
self.namespace_class = namespace_registry(self.type)
else:
self.namespace_class = namespace_class
self.kwargs = kwargs
self.request = request
self.data_dir = data_dir
self.key = key
self.timeout = timeout
self.use_cookies = use_cookies
self.cookie_expires = cookie_expires
self.log_file = log_file
self.was_invalidated = False
self.secret = secret
self.id = id
if self.use_cookies:
try:
cookieheader = request['cookie']
except KeyError:
cookieheader = ''
if secret is not None:
try:
self.cookie = SignedCookie(secret, input = cookieheader)
except Cookie.CookieError:
self.cookie = SignedCookie(secret, input = None)
else:
self.cookie = Cookie.SimpleCookie(input = cookieheader)
if self.id is None and self.cookie.has_key(self.key):
self.id = self.cookie[self.key].value
if self.id is None:
self._create_id()
else:
self.is_new = False
if not self.is_new:
try:
self.load()
except:
if invalidate_corrupt:
self.invalidate()
else:
raise
else:
self.dict = {}
def _create_id(self):
self.id = md5.new(
md5.new("%f%s%f%d" % (time.time(), id({}), random.random(), os.getpid()) ).hexdigest(),
).hexdigest()
self.is_new = True
if self.use_cookies:
self.cookie[self.key] = self.id
self.cookie[self.key]['path'] = '/'
if self.cookie_expires is not True:
if self.cookie_expires is False:
expires = datetime.fromtimestamp( 0x7FFFFFFF )
elif isinstance(self.cookie_expires, timedelta):
expires = datetime.today() + self.cookie_expires
elif isinstance(self.cookie_expires, datetime):
expires = self.cookie_expires
else:
raise ValueError("Invalid argument for cookie_expires: %s"
% repr(self.cookie_expires))
self.cookie[self.key]['expires'] = \
expires.strftime("%a, %d-%b-%Y %H:%M:%S GMT" )
self.request['cookie_out'] = self.cookie[self.key].output(header='')
self.request['set_cookie'] = False
created = property(lambda self: self.dict['_creation_time'])
def delete(self):
"""deletes the persistent storage for this session, but remains valid. """
self.namespace.acquire_write_lock()
try:
for k in self.namespace.keys():
if not re.match(r'_creation_time|_accessed_time', k):
del self.namespace[k]
self.namespace['_accessed_time'] = time.time()
finally:
self.namespace.release_write_lock()
def __getitem__(self, key):
return self.dict.__getitem__(key)
def __setitem__(self, key, value):
self.dict.__setitem__(key, value)
def __delitem__(self, key):
del self.dict[key]
def keys(self):
return self.dict.keys()
def __contains__(self, key):
return self.dict.has_key(key)
def has_key(self, key):
return self.dict.has_key(key)
def __iter__(self):
return iter(self.dict.keys())
def iteritems(self):
return self.dict.iteritems()
def invalidate(self):
"invalidates this session, creates a new session id, returns to the is_new state"
namespace = self.namespace
namespace.acquire_write_lock()
try:
namespace.remove()
finally:
namespace.release_write_lock()
self.was_invalidated = True
self._create_id()
self.load()
def load(self):
"loads the data from this session from persistent storage"
self.namespace = self.namespace_class(self.id, data_dir=self.data_dir,
digest_filenames=False, **self.kwargs)
namespace = self.namespace
self.request['set_cookie'] = True
namespace.acquire_write_lock()
try:
self.debug("session loading keys")
self.dict = {}
now = time.time()
if not namespace.has_key('_creation_time'):
namespace['_creation_time'] = now
self.is_new = True
try:
self.accessed = namespace['_accessed_time']
namespace['_accessed_time'] = now
except KeyError:
namespace['_accessed_time'] = self.accessed = now
if self.timeout is not None and now - self.accessed > self.timeout:
self.invalidate()
else:
for k in namespace.keys():
self.dict[k] = namespace[k]
finally:
namespace.release_write_lock()
def save(self):
"saves the data for this session to persistent storage"
if not hasattr(self, 'namespace'):
curdict = self.dict
self.load()
self.dict = curdict
self.namespace.acquire_write_lock()
try:
self.debug("session saving keys")
todel = []
for k in self.namespace.keys():
if not self.dict.has_key(k):
todel.append(k)
for k in todel:
del self.namespace[k]
for k in self.dict.keys():
self.namespace[k] = self.dict[k]
self.namespace['_accessed_time'] = self.dict['_accessed_time'] \
= time.time()
self.namespace['_creation_time'] = self.dict['_creation_time'] \
= time.time()
finally:
self.namespace.release_write_lock()
if self.is_new:
self.request['set_cookie'] = True
def lock(self):
"""locks this session against other processes/threads. this is
automatic when load/save is called.
***use with caution*** and always with a corresponding 'unlock'
inside a "finally:" block,
as a stray lock typically cannot be unlocked
without shutting down the whole application.
"""
self.namespace.acquire_write_lock()
def unlock(self):
"""unlocks this session against other processes/threads. this is
automatic when load/save is called.
***use with caution*** and always within a "finally:" block,
as a stray lock typically cannot be unlocked
without shutting down the whole application.
"""
self.namespace.release_write_lock()
def debug(self, message):
if self.log_file is not None:
self.log_file.write(message)
class SessionObject(object):
"""Session proxy/lazy creator
This object proxies access to the actual session object, so that in the
case that the session hasn't been used before, it will be setup. This
avoid creating and loading the session from persistent storage unless
its actually used during the request.
"""
def __init__(self, environ, **params):
self.__dict__['_params'] = params
self.__dict__['_environ'] = environ
self.__dict__['_sess'] = None
self.__dict__['_headers'] = []
def _session(self):
"""Lazy initial creation of session object"""
if self.__dict__['_sess'] is None:
params = self.__dict__['_params']
environ = self.__dict__['_environ']
self.__dict__['_headers'] = req = {'cookie_out':None}
req['cookie'] = environ.get('HTTP_COOKIE')
self.__dict__['_sess'] = Session(req, use_cookies=True, **params)
return self.__dict__['_sess']
def __getattr__(self, attr):
return getattr(self._session(), attr)
def __setattr__(self, attr, value):
setattr(self._session(), attr, value)
def __delattr__(self, name):
self._session().__delattr__(name)
def __getitem__(self, key):
return self._session()[key]
def __setitem__(self, key, value):
self._session()[key] = value
def __delitem__(self, key):
self._session().__delitem__(key)
def __repr__(self):
return self._session().__repr__()
def __iter__(self):
"""Only works for proxying to a dict"""
return iter(self._session().keys())
def __contains__(self, key):
return self._session().has_key(key)
def get_by_id(self, id):
params = self.__dict__['_params']
session = Session({}, use_cookies=False, id=id, **params)
if session.is_new:
session.namespace.remove()
return None
return session
class SessionMiddleware(object):
deprecated = True
session = beaker_session
def __init__(self, wrap_app, config=None, environ_key='beaker.session', **kwargs):
"""Initialize the Session Middleware
The Session middleware will make a lazy session instance available
every request under the ``environ['beaker.cache']`` key by default. The location in
environ can be changed by setting ``environ_key``.
``config``
dict All settings should be prefixed by 'cache.'. This method of
passing variables is intended for Paste and other setups that
accumulate multiple component settings in a single dictionary. If
config contains *no cache. prefixed args*, then *all* of the config
options will be used to intialize the Cache objects.
``environ_key``
Location where the Session instance will keyed in the WSGI environ
``**kwargs``
All keyword arguments are assumed to be cache settings and will
override any settings found in ``config``
"""
if self.deprecated:
warnings.warn('SessionMiddleware is moving to beaker.middleware in '
'0.8', DeprecationWarning, 2)
config = config or {}
# Load up the default params
self.options = dict(invalidate_corrupt=True, type=None,
data_dir=None, key='beaker.session.id',
timeout=None, secret=None, log_file=None)
# Pull out any config args meant for beaker session. if there are any
for dct in [config, kwargs]:
for key, val in dct.iteritems():
if key.startswith('beaker.session.'):
self.options[key[15:]] = val
if key.startswith('session.'):
self.options[key[8:]] = val
if key.startswith('session_'):
warnings.warn('Session options should start with session. '
'instead of session_.', DeprecationWarning, 2)
self.options[key[8:]] = val
# Coerce and validate session params
coerce_session_params(self.options)
# Assume all keys are intended for cache if none are prefixed with 'cache.'
if not self.options and config:
self.options = config
self.options.update(kwargs)
self.wrap_app = wrap_app
self.environ_key = environ_key
def __call__(self, environ, start_response):
session = SessionObject(environ, **self.options)
if environ.get('paste.registry'):
environ['paste.registry'].register(self.session, session)
environ[self.environ_key] = session
environ['beaker.get_session'] = self._get_session
def session_start_response(status, headers, exc_info = None):
if session.__dict__['_sess'] is not None:
if session.__dict__['_headers']['set_cookie']:
cookie = session.__dict__['_headers']['cookie_out']
if cookie:
headers.append(('Set-cookie', cookie))
return start_response(status, headers, exc_info)
try:
response = self.wrap_app(environ, session_start_response)
except:
ty, val = sys.exc_info()[:2]
if isinstance(ty, str):
raise ty, val, sys.exc_info()[2]
if ty.__name__ == 'HTTPFound' and \
session.__dict__['_sess'] is not None:
cookie = session.__dict__['_headers']['cookie_out']
if cookie:
val.headers.append(('Set-cookie', cookie))
raise ty, val, sys.exc_info()[2]
else:
return response
def _get_session(self):
return Session({}, use_cookies=False, **self.options)
|
|
# Copyright 2011, VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Borrowed from nova code base, more utilities will be added/borrowed as and
# when needed.
"""Utilities and helper functions."""
import functools
import importlib
import os
import os.path
import random
import re
import signal
import sys
import threading
import time
import uuid
import weakref
import eventlet
from eventlet.green import subprocess
import netaddr
from neutron_lib import constants as n_const
from neutron_lib.utils import helpers
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_utils import excutils
import six
import neutron
from neutron._i18n import _
from neutron.db import api as db_api
TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
LOG = logging.getLogger(__name__)
DEFAULT_THROTTLER_VALUE = 2
_SEPARATOR_REGEX = re.compile(r'[/\\]+')
class WaitTimeout(Exception):
"""Default exception coming from wait_until_true() function."""
class LockWithTimer(object):
def __init__(self, threshold):
self._threshold = threshold
self.timestamp = 0
self._lock = threading.Lock()
def acquire(self):
return self._lock.acquire(False)
def release(self):
return self._lock.release()
def time_to_wait(self):
return self.timestamp - time.time() + self._threshold
# REVISIT(jlibosva): Some parts of throttler may be similar to what
# neutron.notifiers.batch_notifier.BatchNotifier does. They
# could be refactored and unified.
def throttler(threshold=DEFAULT_THROTTLER_VALUE):
"""Throttle number of calls to a function to only once per 'threshold'.
"""
def decorator(f):
lock_with_timer = LockWithTimer(threshold)
@functools.wraps(f)
def wrapper(*args, **kwargs):
if lock_with_timer.acquire():
try:
fname = f.__name__
time_to_wait = lock_with_timer.time_to_wait()
if time_to_wait > 0:
LOG.debug("Call of function %s scheduled, sleeping "
"%.1f seconds", fname, time_to_wait)
# Decorated function has been called recently, wait.
eventlet.sleep(time_to_wait)
lock_with_timer.timestamp = time.time()
finally:
lock_with_timer.release()
LOG.debug("Calling throttled function %s", fname)
return f(*args, **kwargs)
return wrapper
return decorator
def _subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def subprocess_popen(args, stdin=None, stdout=None, stderr=None, shell=False,
env=None, preexec_fn=_subprocess_setup, close_fds=True):
return subprocess.Popen(args, shell=shell, stdin=stdin, stdout=stdout,
stderr=stderr, preexec_fn=preexec_fn,
close_fds=close_fds, env=env)
def get_first_host_ip(net, ip_version):
return str(netaddr.IPAddress(net.first + 1, ip_version))
def is_extension_supported(plugin, ext_alias):
return ext_alias in getattr(
plugin, "supported_extension_aliases", [])
def log_opt_values(log):
cfg.CONF.log_opt_values(log, logging.DEBUG)
def get_dhcp_agent_device_id(network_id, host):
# Split host so as to always use only the hostname and
# not the domain name. This will guarantee consistency
# whether a local hostname or an fqdn is passed in.
local_hostname = host.split('.')[0]
host_uuid = uuid.uuid5(uuid.NAMESPACE_DNS, str(local_hostname))
return 'dhcp%s-%s' % (host_uuid, network_id)
class exception_logger(object):
"""Wrap a function and log raised exception
:param logger: the logger to log the exception default is LOG.exception
:returns: origin value if no exception raised; re-raise the exception if
any occurred
"""
def __init__(self, logger=None):
self.logger = logger
def __call__(self, func):
if self.logger is None:
LOG = logging.getLogger(func.__module__)
self.logger = LOG.exception
def call(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
with excutils.save_and_reraise_exception():
self.logger(e)
return call
def get_other_dvr_serviced_device_owners():
"""Return device_owner names for ports that should be serviced by DVR
This doesn't return DEVICE_OWNER_COMPUTE_PREFIX since it is a
prefix, not a complete device_owner name, so should be handled
separately (see is_dvr_serviced() below)
"""
return [n_const.DEVICE_OWNER_LOADBALANCER,
n_const.DEVICE_OWNER_LOADBALANCERV2,
n_const.DEVICE_OWNER_DHCP]
def get_dvr_allowed_address_pair_device_owners():
"""Return device_owner names for allowed_addr_pair ports serviced by DVR
This just returns the device owners that are used by the
allowed_address_pair ports. Right now only the device_owners shown
below are used by the allowed_address_pair ports.
Later if other device owners are used for allowed_address_pairs those
device_owners should be added to the list below.
"""
# TODO(Swami): Convert these methods to constants.
# Add the constants variable to the neutron-lib
return [n_const.DEVICE_OWNER_LOADBALANCER,
n_const.DEVICE_OWNER_LOADBALANCERV2]
def is_dvr_serviced(device_owner):
"""Check if the port need to be serviced by DVR
Helper function to check the device owners of the
ports in the compute and service node to make sure
if they are required for DVR or any service directly or
indirectly associated with DVR.
"""
return (device_owner.startswith(n_const.DEVICE_OWNER_COMPUTE_PREFIX) or
device_owner in get_other_dvr_serviced_device_owners())
def ip_to_cidr(ip, prefix=None):
"""Convert an ip with no prefix to cidr notation
:param ip: An ipv4 or ipv6 address. Convertable to netaddr.IPNetwork.
:param prefix: Optional prefix. If None, the default 32 will be used for
ipv4 and 128 for ipv6.
"""
net = netaddr.IPNetwork(ip)
if prefix is not None:
# Can't pass ip and prefix separately. Must concatenate strings.
net = netaddr.IPNetwork(str(net.ip) + '/' + str(prefix))
return str(net)
def fixed_ip_cidrs(fixed_ips):
"""Create a list of a port's fixed IPs in cidr notation.
:param fixed_ips: A neutron port's fixed_ips dictionary
"""
return [ip_to_cidr(fixed_ip['ip_address'], fixed_ip.get('prefixlen'))
for fixed_ip in fixed_ips]
def is_cidr_host(cidr):
"""Determines if the cidr passed in represents a single host network
:param cidr: Either an ipv4 or ipv6 cidr.
:returns: True if the cidr is /32 for ipv4 or /128 for ipv6.
:raises ValueError: raises if cidr does not contain a '/'. This disallows
plain IP addresses specifically to avoid ambiguity.
"""
if '/' not in str(cidr):
raise ValueError(_("cidr doesn't contain a '/'"))
net = netaddr.IPNetwork(cidr)
if net.version == 4:
return net.prefixlen == n_const.IPv4_BITS
return net.prefixlen == n_const.IPv6_BITS
def get_ip_version(ip_or_cidr):
return netaddr.IPNetwork(ip_or_cidr).version
def ip_version_from_int(ip_version_int):
if ip_version_int == 4:
return n_const.IPv4
if ip_version_int == 6:
return n_const.IPv6
raise ValueError(_('Illegal IP version number'))
class DelayedStringRenderer(object):
"""Takes a callable and its args and calls when __str__ is called
Useful for when an argument to a logging statement is expensive to
create. This will prevent the callable from being called if it's
never converted to a string.
"""
def __init__(self, function, *args, **kwargs):
self.function = function
self.args = args
self.kwargs = kwargs
def __str__(self):
return str(self.function(*self.args, **self.kwargs))
def _hex_format(port, mask=0):
def hex_str(num):
return format(num, '#06x')
if mask > 0:
return "%s/%s" % (hex_str(port), hex_str(0xffff & ~mask))
return hex_str(port)
def _gen_rules_port_min(port_min, top_bit):
"""
Encode a port range range(port_min, (port_min | (top_bit - 1)) + 1) into
a set of bit value/masks.
"""
# Processing starts with setting up mask and top_bit variables to their
# maximum. Top_bit has the form (1000000) with '1' pointing to the register
# being processed, while mask has the form (0111111) with '1' showing
# possible range to be covered.
# With each rule generation cycle, mask and top_bit are bit shifted to the
# right. When top_bit reaches 0 it means that last register was processed.
# Let port_min be n bits long, top_bit = 1 << k, 0<=k<=n-1.
# Each cycle step checks the following conditions:
# 1). port & mask == 0
# This means that remaining bits k..1 are equal to '0' and can be
# covered by a single port/mask rule.
# If condition 1 doesn't fit, then both top_bit and mask are bit
# shifted to the right and condition 2 is checked:
# 2). port & top_bit == 0
# This means that kth port bit is equal to '0'. By setting it to '1'
# and masking other (k-1) bits all ports in range
# [P, P + 2^(k-1)-1] are guaranteed to be covered.
# Let p_k be equal to port first (n-k) bits with rest set to 0.
# Then P = p_k | top_bit.
# Correctness proof:
# The remaining range to be encoded in a cycle is calculated as follows:
# R = [port_min, port_min | mask].
# If condition 1 holds, then a rule that covers R is generated and the job
# is done.
# If condition 2 holds, then the rule emitted will cover 2^(k-1) values
# from the range. Remaining range R will shrink by 2^(k-1).
# If condition 2 doesn't hold, then even after top_bit/mask shift in next
# iteration the value of R won't change.
# Full cycle example for range [40, 64):
# port=0101000, top_bit=1000000, k=6
# * step 1, k=6, R=[40, 63]
# top_bit=1000000, mask=0111111 -> condition 1 doesn't hold, shifting
# mask/top_bit
# top_bit=0100000, mask=0011111 -> condition 2 doesn't hold
# * step 2, k=5, R=[40, 63]
# top_bit=0100000, mask=0011111 -> condition 1 doesn't hold, shifting
# mask/top_bit
# top_bit=0010000, mask=0001111 -> condition 2 holds -> 011xxxx or
# 0x0030/fff0
# * step 3, k=4, R=[40, 47]
# top_bit=0010000, mask=0001111 -> condition 1 doesn't hold, shifting
# mask/top_bit
# top_bit=0001000, mask=0000111 -> condition 2 doesn't hold
# * step 4, k=3, R=[40, 47]
# top_bit=0001000, mask=0000111 -> condition 1 holds -> 0101xxx or
# 0x0028/fff8
# rules=[0x0030/fff0, 0x0028/fff8]
rules = []
mask = top_bit - 1
while True:
if (port_min & mask) == 0:
# greedy matched a streak of '0' in port_min
rules.append(_hex_format(port_min, mask))
break
top_bit >>= 1
mask >>= 1
if (port_min & top_bit) == 0:
# matched next '0' in port_min to substitute for '1' in resulting
# rule
rules.append(_hex_format(port_min & ~mask | top_bit, mask))
return rules
def _gen_rules_port_max(port_max, top_bit):
"""
Encode a port range range(port_max & ~(top_bit - 1), port_max + 1) into
a set of bit value/masks.
"""
# Processing starts with setting up mask and top_bit variables to their
# maximum. Top_bit has the form (1000000) with '1' pointing to the register
# being processed, while mask has the form (0111111) with '1' showing
# possible range to be covered.
# With each rule generation cycle, mask and top_bit are bit shifted to the
# right. When top_bit reaches 0 it means that last register was processed.
# Let port_max be n bits long, top_bit = 1 << k, 0<=k<=n-1.
# Each cycle step checks the following conditions:
# 1). port & mask == mask
# This means that remaining bits k..1 are equal to '1' and can be
# covered by a single port/mask rule.
# If condition 1 doesn't fit, then both top_bit and mask are bit
# shifted to the right and condition 2 is checked:
# 2). port & top_bit == top_bit
# This means that kth port bit is equal to '1'. By setting it to '0'
# and masking other (k-1) bits all ports in range
# [P, P + 2^(k-1)-1] are guaranteed to be covered.
# Let p_k be equal to port first (n-k) bits with rest set to 0.
# Then P = p_k | ~top_bit.
# Correctness proof:
# The remaining range to be encoded in a cycle is calculated as follows:
# R = [port_max & ~mask, port_max].
# If condition 1 holds, then a rule that covers R is generated and the job
# is done.
# If condition 2 holds, then the rule emitted will cover 2^(k-1) values
# from the range. Remaining range R will shrink by 2^(k-1).
# If condition 2 doesn't hold, then even after top_bit/mask shift in next
# iteration the value of R won't change.
# Full cycle example for range [64, 105]:
# port=1101001, top_bit=1000000, k=6
# * step 1, k=6, R=[64, 105]
# top_bit=1000000, mask=0111111 -> condition 1 doesn't hold, shifting
# mask/top_bit
# top_bit=0100000, mask=0011111 -> condition 2 holds -> 10xxxxx or
# 0x0040/ffe0
# * step 2, k=5, R=[96, 105]
# top_bit=0100000, mask=0011111 -> condition 1 doesn't hold, shifting
# mask/top_bit
# top_bit=0010000, mask=0001111 -> condition 2 doesn't hold
# * step 3, k=4, R=[96, 105]
# top_bit=0010000, mask=0001111 -> condition 1 doesn't hold, shifting
# mask/top_bit
# top_bit=0001000, mask=0000111 -> condition 2 holds -> 1100xxx or
# 0x0060/fff8
# * step 4, k=3, R=[104, 105]
# top_bit=0001000, mask=0000111 -> condition 1 doesn't hold, shifting
# mask/top_bit
# top_bit=0000100, mask=0000011 -> condition 2 doesn't hold
# * step 5, k=2, R=[104, 105]
# top_bit=0000100, mask=0000011 -> condition 1 doesn't hold, shifting
# mask/top_bit
# top_bit=0000010, mask=0000001 -> condition 2 doesn't hold
# * step 6, k=1, R=[104, 105]
# top_bit=0000010, mask=0000001 -> condition 1 holds -> 1101001 or
# 0x0068
# rules=[0x0040/ffe0, 0x0060/fff8, 0x0068]
rules = []
mask = top_bit - 1
while True:
if (port_max & mask) == mask:
# greedy matched a streak of '1' in port_max
rules.append(_hex_format(port_max & ~mask, mask))
break
top_bit >>= 1
mask >>= 1
if (port_max & top_bit) == top_bit:
# matched next '1' in port_max to substitute for '0' in resulting
# rule
rules.append(_hex_format(port_max & ~mask & ~top_bit, mask))
return rules
def port_rule_masking(port_min, port_max):
"""Translate a range [port_min, port_max] into a set of bitwise matches.
Each match has the form 'port/mask'. The port and mask are 16-bit numbers
written in hexadecimal prefixed by 0x. Each 1-bit in mask requires that
the corresponding bit in port must match. Each 0-bit in mask causes the
corresponding bit to be ignored.
"""
# Let binary representation of port_min and port_max be n bits long and
# have first m bits in common, 0 <= m <= n.
# If remaining (n - m) bits of given ports define 2^(n-m) values, then
# [port_min, port_max] range is covered by a single rule.
# For example:
# n = 6
# port_min = 16 (binary 010000)
# port_max = 23 (binary 010111)
# Ports have m=3 bits in common with the remaining (n-m)=3 bits
# covering range [0, 2^3), which equals to a single 010xxx rule. The algo
# will return [0x0010/fff8].
# Else [port_min, port_max] range will be split into 2: range [port_min, T)
# and [T, port_max]. Let p_m be the common part of port_min and port_max
# with other (n-m) bits set to 0. Then T = p_m | 1 << (n-m-1).
# For example:
# n = 7
# port_min = 40 (binary 0101000)
# port_max = 105 (binary 1101001)
# Ports have m=0 bits in common, p_m=000000. Then T=1000000 and the
# initial range [40, 105] is divided into [40, 64) and [64, 105].
# Each of the ranges will be processed separately, then the generated rules
# will be merged.
# Check port_max >= port_min.
if port_max < port_min:
raise ValueError(_("'port_max' is smaller than 'port_min'"))
bitdiff = port_min ^ port_max
if bitdiff == 0:
# port_min == port_max
return [_hex_format(port_min)]
# for python3.x, bit_length could be used here
top_bit = 1
while top_bit <= bitdiff:
top_bit <<= 1
if (port_min & (top_bit - 1) == 0 and
port_max & (top_bit - 1) == top_bit - 1):
# special case, range of 2^k ports is covered
return [_hex_format(port_min, top_bit - 1)]
top_bit >>= 1
rules = []
rules.extend(_gen_rules_port_min(port_min, top_bit))
rules.extend(_gen_rules_port_max(port_max, top_bit))
return rules
def create_object_with_dependency(creator, dep_getter, dep_creator,
dep_id_attr, dep_deleter):
"""Creates an object that binds to a dependency while handling races.
creator is a function that expected to take the result of either
dep_getter or dep_creator.
The result of dep_getter and dep_creator must have an attribute of
dep_id_attr be used to determine if the dependency changed during object
creation.
dep_deleter will be called with a the result of dep_creator if the creator
function fails due to a non-dependency reason or the retries are exceeded.
dep_getter should return None if the dependency does not exist.
dep_creator can raise a DBDuplicateEntry to indicate that a concurrent
create of the dependency occurred and the process will restart to get the
concurrently created one.
This function will return both the created object and the dependency it
used/created.
This function protects against all of the cases where the dependency can
be concurrently removed by catching exceptions and restarting the
process of creating the dependency if one no longer exists. It will
give up after neutron.db.api.MAX_RETRIES and raise the exception it
encounters after that.
"""
result, dependency, dep_id, made_locally = None, None, None, False
for attempts in range(1, db_api.MAX_RETRIES + 1):
# we go to max + 1 here so the exception handlers can raise their
# errors at the end
try:
dependency = dep_getter()
if not dependency:
dependency = dep_creator()
made_locally = True
dep_id = getattr(dependency, dep_id_attr)
except db_exc.DBDuplicateEntry:
# dependency was concurrently created.
with excutils.save_and_reraise_exception() as ctx:
if attempts < db_api.MAX_RETRIES:
# sleep for a random time between 0 and 1 second to
# make sure a concurrent worker doesn't retry again
# at exactly the same time
time.sleep(random.uniform(0, 1))
ctx.reraise = False
continue
try:
result = creator(dependency)
break
except Exception:
with excutils.save_and_reraise_exception() as ctx:
# check if dependency we tried to use was removed during
# object creation
if attempts < db_api.MAX_RETRIES:
dependency = dep_getter()
if not dependency or dep_id != getattr(dependency,
dep_id_attr):
ctx.reraise = False
continue
# we have exceeded retries or have encountered a non-dependency
# related failure so we try to clean up the dependency if we
# created it before re-raising
if made_locally and dependency:
try:
dep_deleter(dependency)
except Exception:
LOG.exception("Failed cleaning up dependency %s",
dep_id)
return result, dependency
def transaction_guard(f):
"""Ensures that the context passed in is not in a transaction.
Various Neutron methods modifying resources have assumptions that they will
not be called inside of a transaction because they perform operations that
expect all data to be committed to the database (e.g. ML2 postcommit calls)
and/or they have side effects on external systems.
So calling them in a transaction can lead to consistency errors on failures
since the side effect will not be reverted on a DB rollback.
If you receive this error, you must alter your code to handle the fact that
the thing you are calling can have side effects so using transactions to
undo on failures is not possible.
"""
@functools.wraps(f)
def inner(self, context, *args, **kwargs):
# FIXME(kevinbenton): get rid of all uses of this flag
if (context.session.is_active and
getattr(context, 'GUARD_TRANSACTION', True)):
raise RuntimeError(_("Method %s cannot be called within a "
"transaction.") % f)
return f(self, context, *args, **kwargs)
return inner
def wait_until_true(predicate, timeout=60, sleep=1, exception=None):
"""
Wait until callable predicate is evaluated as True
:param predicate: Callable deciding whether waiting should continue.
Best practice is to instantiate predicate with functools.partial()
:param timeout: Timeout in seconds how long should function wait.
:param sleep: Polling interval for results in seconds.
:param exception: Exception instance to raise on timeout. If None is passed
(default) then WaitTimeout exception is raised.
"""
try:
with eventlet.Timeout(timeout):
while not predicate():
eventlet.sleep(sleep)
except eventlet.Timeout:
if exception is not None:
#pylint: disable=raising-bad-type
raise exception
raise WaitTimeout("Timed out after %d seconds" % timeout)
class _AuthenticBase(object):
def __init__(self, addr, **kwargs):
super(_AuthenticBase, self).__init__(addr, **kwargs)
self._initial_value = addr
def __str__(self):
if isinstance(self._initial_value, six.string_types):
return self._initial_value
return super(_AuthenticBase, self).__str__()
# NOTE(ihrachys): override deepcopy because netaddr.* classes are
# slot-based and hence would not copy _initial_value
def __deepcopy__(self, memo):
return self.__class__(self._initial_value)
class AuthenticEUI(_AuthenticBase, netaddr.EUI):
'''
This class retains the format of the MAC address string passed during
initialization.
This is useful when we want to make sure that we retain the format passed
by a user through API.
'''
class AuthenticIPNetwork(_AuthenticBase, netaddr.IPNetwork):
'''
This class retains the format of the IP network string passed during
initialization.
This is useful when we want to make sure that we retain the format passed
by a user through API.
'''
class classproperty(object):
def __init__(self, f):
self.func = f
def __get__(self, obj, owner):
return self.func(owner)
_NO_ARGS_MARKER = object()
def attach_exc_details(e, msg, args=_NO_ARGS_MARKER):
e._error_context_msg = msg
e._error_context_args = args
def extract_exc_details(e):
for attr in ('_error_context_msg', '_error_context_args'):
if not hasattr(e, attr):
return u'No details.'
details = e._error_context_msg
args = e._error_context_args
if args is _NO_ARGS_MARKER:
return details
return details % args
def import_modules_recursively(topdir):
'''Import and return all modules below the topdir directory.'''
topdir = _SEPARATOR_REGEX.sub('/', topdir)
modules = []
for root, dirs, files in os.walk(topdir):
for file_ in files:
if file_[-3:] != '.py':
continue
module = file_[:-3]
if module == '__init__':
continue
import_base = _SEPARATOR_REGEX.sub('.', root)
# NOTE(ihrachys): in Python3, or when we are not located in the
# directory containing neutron code, __file__ is absolute, so we
# should truncate it to exclude PYTHONPATH prefix
prefixlen = len(os.path.dirname(neutron.__file__))
import_base = 'neutron' + import_base[prefixlen:]
module = '.'.join([import_base, module])
if module not in sys.modules:
importlib.import_module(module)
modules.append(module)
return modules
def get_rand_name(max_length=None, prefix='test'):
"""Return a random string.
The string will start with 'prefix' and will be exactly 'max_length'.
If 'max_length' is None, then exactly 8 random characters, each
hexadecimal, will be added. In case len(prefix) <= len(max_length),
ValueError will be raised to indicate the problem.
"""
return get_related_rand_names([prefix], max_length)[0]
def get_rand_device_name(prefix='test'):
return get_rand_name(
max_length=n_const.DEVICE_NAME_MAX_LEN, prefix=prefix)
def get_related_rand_names(prefixes, max_length=None):
"""Returns a list of the prefixes with the same random characters appended
:param prefixes: A list of prefix strings
:param max_length: The maximum length of each returned string
:returns: A list with each prefix appended with the same random characters
"""
if max_length:
length = max_length - max(len(p) for p in prefixes)
if length <= 0:
raise ValueError(
_("'max_length' must be longer than all prefixes"))
else:
length = 8
rndchrs = helpers.get_random_string(length)
return [p + rndchrs for p in prefixes]
def get_related_rand_device_names(prefixes):
return get_related_rand_names(prefixes,
max_length=n_const.DEVICE_NAME_MAX_LEN)
try:
# PY3
weak_method = weakref.WeakMethod
except AttributeError:
# PY2
import weakrefmethod
weak_method = weakrefmethod.WeakMethod
def make_weak_ref(f):
"""Make a weak reference to a function accounting for bound methods."""
return weak_method(f) if hasattr(f, '__self__') else weakref.ref(f)
def resolve_ref(ref):
"""Handles dereference of weakref."""
if isinstance(ref, weakref.ref):
ref = ref()
return ref
|
|
# -*- coding: utf-8 -*-
#
"""
TODO.
"""
from __future__ import (print_function, division)
import logging
import numpy as np
from ..particles.allparticles import ParticleSystem
from ..lib.utils.timing import decallmethods, timings
__all__ = ['Plummer']
logger = logging.getLogger(__name__)
@decallmethods(timings)
class Plummer(object):
""" """
def __init__(self, n, eps, imf, seed=None, mfrac=0.999, softening_type=0):
self.n = n
self.eps2 = eps*eps
self.imf = imf
self.mfrac = mfrac
self.softening_type = softening_type
self.ps = ParticleSystem(n)
np.random.seed(seed)
def set_eps2(self, mass):
n = self.n
if self.softening_type == 0:
# eps2 ~ cte
eps2 = np.ones(n)
elif self.softening_type == 1:
# eps2 ~ m^2 ~ 1/n^2 if m ~ 1/n
eps2 = mass**2
elif self.softening_type == 2:
# eps2 ~ m/n ~ 1/n^2 if m ~ 1/n
eps2 = mass / n
elif self.softening_type == 3:
# eps2 ~ (m/n^2)^(2/3) ~ 1/n^2 if m ~ 1/n
eps2 = (mass / n**2)**(2.0/3)
elif self.softening_type == 4:
# eps2 ~ (1/(m*n^2))^2 ~ 1/n^2 if m ~ 1/n
eps2 = (1.0 / (mass * n**2))**2
else:
logger.critical(
"Unexpected value for softening_type: %d.",
self.softening_type)
raise ValueError(
"Unexpected value for softening_type: {}.".format(
self.softening_type))
# normalizes by the provided scale of eps2
eps2 *= self.eps2 / np.mean(eps2)
# return half of real value in order to avoid to do this in force loop.
return eps2 / 2
def set_pos(self, irand):
n = self.n
mfrac = self.mfrac
mrand = (irand + np.random.random(n)) * mfrac / n
radius = 1.0 / np.sqrt(np.power(mrand, -2.0 / 3.0) - 1.0)
theta = np.arccos(np.random.uniform(-1.0, 1.0, size=n))
phi = np.random.uniform(0.0, 2.0 * np.pi, size=n)
rx = radius * np.sin(theta) * np.cos(phi)
ry = radius * np.sin(theta) * np.sin(phi)
rz = radius * np.cos(theta)
return (rx, ry, rz)
def set_vel(self, pot):
count = 0
n = self.n
rnd = np.empty(n)
while count < n:
r1 = np.random.random()
r2 = np.random.random()
if (r2 < r1):
rnd[count] = r2
count += 1
velocity = np.sqrt(-2 * rnd * pot)
theta = np.arccos(np.random.uniform(-1.0, 1.0, size=n))
phi = np.random.uniform(0.0, 2.0 * np.pi, size=n)
vx = velocity * np.sin(theta) * np.cos(phi)
vy = velocity * np.sin(theta) * np.sin(phi)
vz = velocity * np.cos(theta)
return (vx, vy, vz)
def set_bodies(self):
""" """
n = self.n
ilist = np.arange(n)
# set index
self.ps.id[...] = ilist
srand = np.random.get_state()
# set mass
self.ps.mass[...] = self.imf.sample(n)
self.ps.mass /= self.ps.total_mass
# set eps2
self.ps.eps2[...] = self.set_eps2(self.ps.mass)
np.random.set_state(srand)
# set pos
pos = self.set_pos(np.random.permutation(ilist))
self.ps.rx[...] = pos[0]
self.ps.ry[...] = pos[1]
self.ps.rz[...] = pos[2]
# set phi
self.ps.set_phi(self.ps)
# set vel
vel = self.set_vel(self.ps.phi)
self.ps.vx[...] = vel[0]
self.ps.vy[...] = vel[1]
self.ps.vz[...] = vel[2]
def make_model(self):
self.set_bodies()
self.ps.com_to_origin()
self.ps.to_nbody_units()
self.ps.scale_to_virial()
def show(self, nbins=32):
from scipy import optimize
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
mass = self.imf._mtot * self.ps.mass.copy()
###################################
(hist, bins) = np.histogram(np.log10(mass), bins=nbins)
linbins = np.power(10.0, bins)
selection = np.where(hist > 0)
fitfunc = lambda k, m: k * self.imf.func(m)
errfunc = lambda k, m, y: fitfunc(k, m)[selection] - y[selection]
k0 = 1.0
k1, success = optimize.leastsq(errfunc, k0, args=(linbins[:-1], hist))
x = np.logspace(np.log10(self.imf.min_mlow),
np.log10(self.imf.max_mhigh),
num=128, base=10.0)
y = fitfunc(k1, x)
###################################
# IMF plot
fig = plt.figure(figsize=(13.5, 6))
ax1 = fig.add_subplot(1, 2, 1)
ax1.plot(bins[selection], np.log10(hist[selection]),
'bo', label='IMF sample')
ax1.plot(np.log10(x), np.log10(y), 'r--',
label='IMF distribution', linewidth=1.5)
ax1.grid(True)
ax1.set_xlabel(r'$\log_{10}(m)$', fontsize=18)
ax1.set_ylabel(r'$\log_{10}(dN/d\log_{10}(m))$', fontsize=18)
ax1.legend(loc='lower left', shadow=True,
fancybox=True, borderaxespad=0.75)
###################################
b = self.ps
n = b.n
rx = b.rx
ry = b.ry
radius = 2 * n * b.mass
color = n * b.mass
###################################
# Scatter plot
ax2 = fig.add_subplot(1, 2, 2)
# ax.set_axis_bgcolor('0.75')
ax2.scatter(rx, ry, c=color, s=radius, cmap='gist_rainbow',
alpha=0.75, label=r'$Stars$')
circle = Circle(
(0, 0), 1,
facecolor='none',
edgecolor=(1, 0.25, 0),
linewidth=1.5,
label=r'$R_{Vir}$'
)
ax2.add_patch(circle)
ax2.set_xlim(-4, +4)
ax2.set_ylim(-4, +4)
ax2.set_xlabel(r'$x$', fontsize=18)
ax2.set_ylabel(r'$y$', fontsize=18)
ax2.legend(loc='upper right', shadow=True,
fancybox=True, borderaxespad=0.75)
###################################
# Show
plt.savefig('show.png', bbox_inches="tight")
# plt.savefig('show.pdf', bbox_inches="tight")
plt.show()
plt.close()
def make_plummer(n, eps, imf, seed=None, mfrac=0.999, softening_type=0):
if n < 2:
n = 2
from tupan.ics.imf import IMF
imf = getattr(IMF, imf[0])(*imf[1:])
p = Plummer(n, eps, imf, seed=seed, mfrac=mfrac,
softening_type=softening_type)
p.make_model()
return p.ps
########## end of file ##########
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for stochastic graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import distributions as distributions_lib
from tensorflow.contrib.bayesflow.python.ops import stochastic_gradient_estimators
from tensorflow.contrib.bayesflow.python.ops import stochastic_tensor_impl
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
distributions = distributions_lib
sge = stochastic_gradient_estimators
st = stochastic_tensor_impl
class StochasticTensorTest(test.TestCase):
def testConstructionAndValue(self):
with self.test_session() as sess:
mu = [0.0, 0.1, 0.2]
sigma = constant_op.constant([1.1, 1.2, 1.3])
sigma2 = constant_op.constant([0.1, 0.2, 0.3])
prior_default = st.StochasticTensor(
distributions.Normal(loc=mu, scale=sigma))
self.assertTrue(isinstance(prior_default.value_type, st.SampleValue))
prior_0 = st.StochasticTensor(
distributions.Normal(loc=mu, scale=sigma),
dist_value_type=st.SampleValue())
self.assertTrue(isinstance(prior_0.value_type, st.SampleValue))
with st.value_type(st.SampleValue()):
prior = st.StochasticTensor(distributions.Normal(loc=mu, scale=sigma))
self.assertTrue(isinstance(prior.value_type, st.SampleValue))
likelihood = st.StochasticTensor(
distributions.Normal(loc=prior, scale=sigma2))
self.assertTrue(isinstance(likelihood.value_type, st.SampleValue))
coll = ops.get_collection(st.STOCHASTIC_TENSOR_COLLECTION)
self.assertEqual(coll, [prior_default, prior_0, prior, likelihood])
# Also works: tf.convert_to_tensor(prior)
prior_default = array_ops.identity(prior_default)
prior_0 = array_ops.identity(prior_0)
prior = array_ops.identity(prior)
likelihood = array_ops.identity(likelihood)
# Mostly a smoke test for now...
prior_0_val, prior_val, prior_default_val, _ = sess.run(
[prior_0, prior, prior_default, likelihood])
self.assertEqual(prior_0_val.shape, prior_val.shape)
self.assertEqual(prior_default_val.shape, prior_val.shape)
# These are different random samples from the same distribution,
# so the values should differ.
self.assertGreater(np.abs(prior_0_val - prior_val).sum(), 1e-6)
self.assertGreater(np.abs(prior_default_val - prior_val).sum(), 1e-6)
def testMeanValue(self):
with self.test_session() as sess:
mu = [0.0, -1.0, 1.0]
sigma = constant_op.constant([1.1, 1.2, 1.3])
with st.value_type(st.MeanValue()):
prior = st.StochasticTensor(distributions.Normal(loc=mu, scale=sigma))
self.assertTrue(isinstance(prior.value_type, st.MeanValue))
prior_mean = prior.mean()
prior_value = prior.value()
prior_mean_val, prior_value_val = sess.run([prior_mean, prior_value])
self.assertAllEqual(prior_mean_val, mu)
self.assertAllEqual(prior_mean_val, prior_value_val)
def testSampleValueScalar(self):
with self.test_session() as sess:
mu = [[0.0, -1.0, 1.0], [0.0, -1.0, 1.0]]
sigma = constant_op.constant([[1.1, 1.2, 1.3], [1.1, 1.2, 1.3]])
with st.value_type(st.SampleValue()):
prior_single = st.StochasticTensor(
distributions.Normal(loc=mu, scale=sigma))
prior_single_value = prior_single.value()
self.assertEqual(prior_single_value.get_shape(), (2, 3))
prior_single_value_val = sess.run([prior_single_value])[0]
self.assertEqual(prior_single_value_val.shape, (2, 3))
with st.value_type(st.SampleValue(1)):
prior_single = st.StochasticTensor(
distributions.Normal(loc=mu, scale=sigma))
self.assertTrue(isinstance(prior_single.value_type, st.SampleValue))
prior_single_value = prior_single.value()
self.assertEqual(prior_single_value.get_shape(), (1, 2, 3))
prior_single_value_val = sess.run([prior_single_value])[0]
self.assertEqual(prior_single_value_val.shape, (1, 2, 3))
with st.value_type(st.SampleValue(2)):
prior_double = st.StochasticTensor(
distributions.Normal(loc=mu, scale=sigma))
prior_double_value = prior_double.value()
self.assertEqual(prior_double_value.get_shape(), (2, 2, 3))
prior_double_value_val = sess.run([prior_double_value])[0]
self.assertEqual(prior_double_value_val.shape, (2, 2, 3))
def testDistributionEntropy(self):
with self.test_session() as sess:
mu = [0.0, -1.0, 1.0]
sigma = constant_op.constant([1.1, 1.2, 1.3])
with st.value_type(st.MeanValue()):
prior = st.StochasticTensor(distributions.Normal(loc=mu, scale=sigma))
entropy = prior.entropy()
deep_entropy = prior.distribution.entropy()
expected_deep_entropy = distributions.Normal(
loc=mu, scale=sigma).entropy()
entropies = sess.run([entropy, deep_entropy, expected_deep_entropy])
self.assertAllEqual(entropies[2], entropies[0])
self.assertAllEqual(entropies[1], entropies[0])
def testSurrogateLoss(self):
with self.test_session():
mu = [[3.0, -4.0, 5.0], [6.0, -7.0, 8.0]]
sigma = constant_op.constant(1.0)
# With default
with st.value_type(st.MeanValue(stop_gradient=True)):
dt = st.StochasticTensor(distributions.Normal(loc=mu, scale=sigma))
loss = dt.loss([constant_op.constant(2.0)])
self.assertTrue(loss is not None)
self.assertAllClose(
dt.distribution.log_prob(mu).eval() * 2.0, loss.eval())
# With passed-in loss_fn.
dt = st.StochasticTensor(
distributions.Normal(loc=mu, scale=sigma),
dist_value_type=st.MeanValue(stop_gradient=True),
loss_fn=sge.get_score_function_with_constant_baseline(
baseline=constant_op.constant(8.0)))
loss = dt.loss([constant_op.constant(2.0)])
self.assertTrue(loss is not None)
self.assertAllClose((dt.distribution.log_prob(mu) * (2.0 - 8.0)).eval(),
loss.eval())
class ValueTypeTest(test.TestCase):
def testValueType(self):
type_mean = st.MeanValue()
type_reshape = st.SampleValue()
type_full = st.SampleValue()
with st.value_type(type_mean):
self.assertEqual(st.get_current_value_type(), type_mean)
with st.value_type(type_reshape):
self.assertEqual(st.get_current_value_type(), type_reshape)
with st.value_type(type_full):
self.assertEqual(st.get_current_value_type(), type_full)
self.assertEqual(st.get_current_value_type(), type_mean)
with self.assertRaisesRegexp(ValueError, "No value type currently set"):
st.get_current_value_type()
class ObservedStochasticTensorTest(test.TestCase):
def testConstructionAndValue(self):
with self.test_session() as sess:
mu = [0.0, 0.1, 0.2]
sigma = constant_op.constant([1.1, 1.2, 1.3])
obs = array_ops.zeros((2, 3))
z = st.ObservedStochasticTensor(
distributions.Normal(loc=mu, scale=sigma), value=obs)
[obs_val, z_val] = sess.run([obs, z.value()])
self.assertAllEqual(obs_val, z_val)
coll = ops.get_collection(st.STOCHASTIC_TENSOR_COLLECTION)
self.assertEqual(coll, [z])
def testConstructionWithUnknownShapes(self):
mu = array_ops.placeholder(dtypes.float32)
sigma = array_ops.placeholder(dtypes.float32)
obs = array_ops.placeholder(dtypes.float32)
z = st.ObservedStochasticTensor(
distributions.Normal(loc=mu, scale=sigma), value=obs)
mu2 = array_ops.placeholder(dtypes.float32, shape=[None])
sigma2 = array_ops.placeholder(dtypes.float32, shape=[None])
obs2 = array_ops.placeholder(dtypes.float32, shape=[None, None])
z2 = st.ObservedStochasticTensor(
distributions.Normal(loc=mu2, scale=sigma2), value=obs2)
coll = ops.get_collection(st.STOCHASTIC_TENSOR_COLLECTION)
self.assertEqual(coll, [z, z2])
def testConstructionErrors(self):
mu = [0., 0.]
sigma = [1., 1.]
self.assertRaises(
ValueError,
st.ObservedStochasticTensor,
distributions.Normal(loc=mu, scale=sigma),
value=array_ops.zeros((3,)))
self.assertRaises(
ValueError,
st.ObservedStochasticTensor,
distributions.Normal(loc=mu, scale=sigma),
value=array_ops.zeros((3, 1)))
self.assertRaises(
ValueError,
st.ObservedStochasticTensor,
distributions.Normal(loc=mu, scale=sigma),
value=array_ops.zeros((1, 2), dtype=dtypes.int32))
if __name__ == "__main__":
test.main()
|
|
'''tzinfo timezone information for Mexico/General.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class General(DstTzInfo):
'''Mexico/General timezone definition. See datetime.tzinfo for details'''
zone = 'Mexico/General'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1922,1,1,7,0,0),
d(1927,6,11,6,0,0),
d(1930,11,15,6,0,0),
d(1931,5,2,6,0,0),
d(1931,10,1,6,0,0),
d(1932,4,1,7,0,0),
d(1939,2,5,6,0,0),
d(1939,6,25,5,0,0),
d(1940,12,9,6,0,0),
d(1941,4,1,5,0,0),
d(1943,12,16,6,0,0),
d(1944,5,1,5,0,0),
d(1950,2,12,6,0,0),
d(1950,7,30,5,0,0),
d(1996,4,7,8,0,0),
d(1996,10,27,7,0,0),
d(1997,4,6,8,0,0),
d(1997,10,26,7,0,0),
d(1998,4,5,8,0,0),
d(1998,10,25,7,0,0),
d(1999,4,4,8,0,0),
d(1999,10,31,7,0,0),
d(2000,4,2,8,0,0),
d(2000,10,29,7,0,0),
d(2001,5,6,8,0,0),
d(2001,9,30,7,0,0),
d(2002,4,7,8,0,0),
d(2002,10,27,7,0,0),
d(2003,4,6,8,0,0),
d(2003,10,26,7,0,0),
d(2004,4,4,8,0,0),
d(2004,10,31,7,0,0),
d(2005,4,3,8,0,0),
d(2005,10,30,7,0,0),
d(2006,4,2,8,0,0),
d(2006,10,29,7,0,0),
d(2007,4,1,8,0,0),
d(2007,10,28,7,0,0),
d(2008,4,6,8,0,0),
d(2008,10,26,7,0,0),
d(2009,4,5,8,0,0),
d(2009,10,25,7,0,0),
d(2010,4,4,8,0,0),
d(2010,10,31,7,0,0),
d(2011,4,3,8,0,0),
d(2011,10,30,7,0,0),
d(2012,4,1,8,0,0),
d(2012,10,28,7,0,0),
d(2013,4,7,8,0,0),
d(2013,10,27,7,0,0),
d(2014,4,6,8,0,0),
d(2014,10,26,7,0,0),
d(2015,4,5,8,0,0),
d(2015,10,25,7,0,0),
d(2016,4,3,8,0,0),
d(2016,10,30,7,0,0),
d(2017,4,2,8,0,0),
d(2017,10,29,7,0,0),
d(2018,4,1,8,0,0),
d(2018,10,28,7,0,0),
d(2019,4,7,8,0,0),
d(2019,10,27,7,0,0),
d(2020,4,5,8,0,0),
d(2020,10,25,7,0,0),
d(2021,4,4,8,0,0),
d(2021,10,31,7,0,0),
d(2022,4,3,8,0,0),
d(2022,10,30,7,0,0),
d(2023,4,2,8,0,0),
d(2023,10,29,7,0,0),
d(2024,4,7,8,0,0),
d(2024,10,27,7,0,0),
d(2025,4,6,8,0,0),
d(2025,10,26,7,0,0),
d(2026,4,5,8,0,0),
d(2026,10,25,7,0,0),
d(2027,4,4,8,0,0),
d(2027,10,31,7,0,0),
d(2028,4,2,8,0,0),
d(2028,10,29,7,0,0),
d(2029,4,1,8,0,0),
d(2029,10,28,7,0,0),
d(2030,4,7,8,0,0),
d(2030,10,27,7,0,0),
d(2031,4,6,8,0,0),
d(2031,10,26,7,0,0),
d(2032,4,4,8,0,0),
d(2032,10,31,7,0,0),
d(2033,4,3,8,0,0),
d(2033,10,30,7,0,0),
d(2034,4,2,8,0,0),
d(2034,10,29,7,0,0),
d(2035,4,1,8,0,0),
d(2035,10,28,7,0,0),
d(2036,4,6,8,0,0),
d(2036,10,26,7,0,0),
d(2037,4,5,8,0,0),
d(2037,10,25,7,0,0),
]
_transition_info = [
i(-23820,0,'LMT'),
i(-25200,0,'MST'),
i(-21600,0,'CST'),
i(-25200,0,'MST'),
i(-21600,0,'CST'),
i(-25200,0,'MST'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CWT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
]
General = General()
|
|
from __future__ import annotations
import textwrap
from pandas._libs import (
NaT,
lib,
)
from pandas.errors import InvalidIndexError
from pandas.core.indexes.base import (
Index,
_new_Index,
ensure_index,
ensure_index_from_sequences,
get_unanimous_names,
)
from pandas.core.indexes.category import CategoricalIndex
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.interval import IntervalIndex
from pandas.core.indexes.multi import MultiIndex
from pandas.core.indexes.numeric import (
Float64Index,
Int64Index,
NumericIndex,
UInt64Index,
)
from pandas.core.indexes.period import PeriodIndex
from pandas.core.indexes.range import RangeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
_sort_msg = textwrap.dedent(
"""\
Sorting because non-concatenation axis is not aligned. A future version
of pandas will change to not sort by default.
To accept the future behavior, pass 'sort=False'.
To retain the current behavior and silence the warning, pass 'sort=True'.
"""
)
__all__ = [
"Index",
"MultiIndex",
"NumericIndex",
"Float64Index",
"Int64Index",
"CategoricalIndex",
"IntervalIndex",
"RangeIndex",
"UInt64Index",
"InvalidIndexError",
"TimedeltaIndex",
"PeriodIndex",
"DatetimeIndex",
"_new_Index",
"NaT",
"ensure_index",
"ensure_index_from_sequences",
"get_objs_combined_axis",
"union_indexes",
"get_unanimous_names",
"all_indexes_same",
]
def get_objs_combined_axis(
objs, intersect: bool = False, axis=0, sort: bool = True, copy: bool = False
) -> Index:
"""
Extract combined index: return intersection or union (depending on the
value of "intersect") of indexes on given axis, or None if all objects
lack indexes (e.g. they are numpy arrays).
Parameters
----------
objs : list
Series or DataFrame objects, may be mix of the two.
intersect : bool, default False
If True, calculate the intersection between indexes. Otherwise,
calculate the union.
axis : {0 or 'index', 1 or 'outer'}, default 0
The axis to extract indexes from.
sort : bool, default True
Whether the result index should come out sorted or not.
copy : bool, default False
If True, return a copy of the combined index.
Returns
-------
Index
"""
obs_idxes = [obj._get_axis(axis) for obj in objs]
return _get_combined_index(obs_idxes, intersect=intersect, sort=sort, copy=copy)
def _get_distinct_objs(objs: list[Index]) -> list[Index]:
"""
Return a list with distinct elements of "objs" (different ids).
Preserves order.
"""
ids: set[int] = set()
res = []
for obj in objs:
if id(obj) not in ids:
ids.add(id(obj))
res.append(obj)
return res
def _get_combined_index(
indexes: list[Index],
intersect: bool = False,
sort: bool = False,
copy: bool = False,
) -> Index:
"""
Return the union or intersection of indexes.
Parameters
----------
indexes : list of Index or list objects
When intersect=True, do not accept list of lists.
intersect : bool, default False
If True, calculate the intersection between indexes. Otherwise,
calculate the union.
sort : bool, default False
Whether the result index should come out sorted or not.
copy : bool, default False
If True, return a copy of the combined index.
Returns
-------
Index
"""
# TODO: handle index names!
indexes = _get_distinct_objs(indexes)
if len(indexes) == 0:
index = Index([])
elif len(indexes) == 1:
index = indexes[0]
elif intersect:
index = indexes[0]
for other in indexes[1:]:
index = index.intersection(other)
else:
index = union_indexes(indexes, sort=sort)
index = ensure_index(index)
if sort:
try:
index = index.sort_values()
except TypeError:
pass
# GH 29879
if copy:
index = index.copy()
return index
def union_indexes(indexes, sort: bool = True) -> Index:
"""
Return the union of indexes.
The behavior of sort and names is not consistent.
Parameters
----------
indexes : list of Index or list objects
sort : bool, default True
Whether the result index should come out sorted or not.
Returns
-------
Index
"""
if len(indexes) == 0:
raise AssertionError("Must have at least 1 Index to union")
if len(indexes) == 1:
result = indexes[0]
if isinstance(result, list):
result = Index(sorted(result))
return result
indexes, kind = _sanitize_and_check(indexes)
def _unique_indices(inds) -> Index:
"""
Convert indexes to lists and concatenate them, removing duplicates.
The final dtype is inferred.
Parameters
----------
inds : list of Index or list objects
Returns
-------
Index
"""
def conv(i):
if isinstance(i, Index):
i = i.tolist()
return i
return Index(lib.fast_unique_multiple_list([conv(i) for i in inds], sort=sort))
if kind == "special":
result = indexes[0]
if hasattr(result, "union_many"):
# DatetimeIndex
return result.union_many(indexes[1:])
else:
for other in indexes[1:]:
result = result.union(other)
return result
elif kind == "array":
index = indexes[0]
if not all(index.equals(other) for other in indexes[1:]):
index = _unique_indices(indexes)
name = get_unanimous_names(*indexes)[0]
if name != index.name:
index = index.rename(name)
return index
else: # kind='list'
return _unique_indices(indexes)
def _sanitize_and_check(indexes):
"""
Verify the type of indexes and convert lists to Index.
Cases:
- [list, list, ...]: Return ([list, list, ...], 'list')
- [list, Index, ...]: Return _sanitize_and_check([Index, Index, ...])
Lists are sorted and converted to Index.
- [Index, Index, ...]: Return ([Index, Index, ...], TYPE)
TYPE = 'special' if at least one special type, 'array' otherwise.
Parameters
----------
indexes : list of Index or list objects
Returns
-------
sanitized_indexes : list of Index or list objects
type : {'list', 'array', 'special'}
"""
kinds = list({type(index) for index in indexes})
if list in kinds:
if len(kinds) > 1:
indexes = [
Index(list(x)) if not isinstance(x, Index) else x for x in indexes
]
kinds.remove(list)
else:
return indexes, "list"
if len(kinds) > 1 or Index not in kinds:
return indexes, "special"
else:
return indexes, "array"
def all_indexes_same(indexes) -> bool:
"""
Determine if all indexes contain the same elements.
Parameters
----------
indexes : iterable of Index objects
Returns
-------
bool
True if all indexes contain the same elements, False otherwise.
"""
itr = iter(indexes)
first = next(itr)
return all(first.equals(index) for index in itr)
|
|
#!/usr/bin/env python
import eventlet
from eventlet.green import urllib, urllib2
from stalkerutils.stalkerutils import get_basic_auth
smtplib = eventlet.import_patched('smtplib')
try:
import simplejson as json
except ImportError:
import json
class PagerDuty(object):
"""Pagerduty Notifications"""
def __init__(self, conf, logger, redis_client):
self.conf = conf
self.logger = logger
self.rc = redis_client
self.service_keys = {}
for e in conf.keys():
if (e.startswith("pagerduty_") and e.endswith("_id")):
pagerduty_id = int(conf[e])
pagerduty_key = conf[e.replace("_id", "_key")]
self.service_keys[pagerduty_id] = pagerduty_key
if len(self.service_keys) < 1:
raise Exception('No pagerduty service keys found in conf')
self.url = conf.get('pagerduty_url', 'https://events.pagerduty.com/generic/2010-04-15/create_event.json')
self.prefix = conf.get('pagerduty_incident_key_prefix', "")
def _resolve(self, check, incident_key, priority):
headers = {'Content-Type': 'application/json'}
data = json.dumps({'service_key': self.service_keys[priority],
'incident_key': incident_key,
'event_type': 'resolve',
'description': '%s on %s is UP' % (check['check'],
check[
'hostname']),
'details': check})
try:
req = urllib2.Request(self.url, data, headers)
response = urllib2.urlopen(req, timeout=10)
result = json.loads(response.read())
response.close()
if 'status' in result:
if result['status'] == 'success':
self.logger.info('Resolved pagerduty event: %s' % result)
return True
else:
self.logger.info(
'Failed to resolve pagerduty event: %s' % result)
return False
else:
self.logger.info(
'Failed to resolve pagerduty event: %s' % result)
return False
except Exception:
self.logger.exception('Error resolving pagerduty event.')
return False
def _trigger(self, check, incident_key, priority):
headers = {'Content-Type': 'application/json'}
data = json.dumps({'service_key': self.service_keys[priority],
'incident_key': incident_key,
'event_type': 'trigger',
'description': '%s on %s is DOWN' %
(check['check'], check['hostname']),
'details': check})
try:
req = urllib2.Request(self.url, data, headers)
response = urllib2.urlopen(req, timeout=10)
result = json.loads(response.read())
response.close()
if 'status' in result:
if result['status'] == 'success':
self.logger.info('Triggered pagerduty event: %s' % result)
return True
else:
self.logger.info(
'Failed to trigger pagerduty event: %s' % result)
return False
else:
self.logger.info(
'Failed to trigger pagerduty event: %s' % result)
return False
except Exception:
self.logger.exception('Error triggering pagerduty event.')
return False
def clear(self, check):
"""Send clear"""
priority = check.get('priority', 1)
if priority == 0:
self.logger.info('Alert is priority 0. Skipping notification.')
return
incident_key = "%s%s:%s" % (self.prefix, check['hostname'],
check['check'])
check['_id'] = str(check['_id'])
ok = self._resolve(check, incident_key, priority)
if not ok:
# TODO: cleanup
pass
def fail(self, check):
"""Send failure if not already notified"""
priority = check.get('priority', 1)
if priority == 0:
self.logger.info('Alert is priority 0. Skipping notification.')
return
incident_key = "%s%s:%s" % (self.prefix, check['hostname'],
check['check'])
check['_id'] = str(check['_id'])
ok = self._trigger(check, incident_key, priority)
if not ok:
# TODO: do backup notifications
pass
class GenericHTTP(object):
"""Generic HTTP callback Notifications, following the pagerduty format"""
def __init__(self, conf, logger, redis_client):
self.conf = conf
self.logger = logger
self.rc = redis_client
standard_service_key = conf.get('http_callback_service_key')
crit_service_key = standard_service_key
self.service_keys = {1: standard_service_key, 2: crit_service_key}
self.url = conf.get('http_callback_url', 'http://localhost/')
self.prefix = conf.get('http_callback_incident_key_prefix', "")
def _resolve(self, check, incident_key, priority):
headers = {'Content-Type': 'application/json'}
data = json.dumps({'service_key': self.service_keys[priority],
'incident_key': incident_key,
'event_type': 'resolve',
'description': '%s on %s is UP' % (check['check'],
check[
'hostname']),
'details': check})
try:
req = urllib2.Request(self.url, data, headers)
response = urllib2.urlopen(req, timeout=10)
result = json.loads(response.read())
response.close()
if 'status' in result:
if result['status'] == 'success':
self.logger.info('Resolved http event: %s' % result)
return True
else:
self.logger.info(
'Failed to resolve http event: %s' % result)
return False
else:
self.logger.info(
'Failed to resolve http event: %s' % result)
return False
except Exception:
self.logger.exception('Error resolving http event.')
return False
def _trigger(self, check, incident_key, priority):
headers = {'Content-Type': 'application/json'}
data = json.dumps({'service_key': self.service_keys[priority],
'incident_key': incident_key,
'event_type': 'trigger',
'description': '%s on %s is DOWN' %
(check['check'], check['hostname']),
'details': check})
try:
req = urllib2.Request(self.url, data, headers)
response = urllib2.urlopen(req, timeout=10)
result = json.loads(response.read())
response.close()
if 'status' in result:
if result['status'] == 'success':
self.logger.info('Triggered http event: %s' % result)
return True
else:
self.logger.info(
'Failed to trigger http event: %s' % result)
return False
else:
self.logger.info(
'Failed to trigger http event: %s' % result)
return False
except Exception:
self.logger.exception('Error triggering http event.')
return False
def clear(self, check):
"""Send clear"""
priority = check.get('priority', 1)
if priority == 0:
self.logger.info('Alert is priority 0. Skipping notification.')
return
incident_key = "%s%s:%s" % (self.prefix, check['hostname'],
check['check'])
check['_id'] = str(check['_id'])
ok = self._resolve(check, incident_key, priority)
if not ok:
# TODO: cleanup
pass
def fail(self, check):
"""Send failure if not already notified"""
priority = check.get('priority', 1)
if priority == 0:
self.logger.info('Alert is priority 0. Skipping notification.')
return
incident_key = "%s%s:%s" % (self.prefix, check['hostname'],
check['check'])
check['_id'] = str(check['_id'])
ok = self._trigger(check, incident_key, priority)
if not ok:
# TODO: do backup notifications
pass
class Mailgun(object):
"""Mailgun Notifications"""
def __init__(self, conf, logger, redis_client):
self.conf = conf
self.logger = logger
self.rc = redis_client
self.domain = conf.get('mailgun_domain')
if not self.domain:
raise Exception('No mailgun domain in conf.')
self.api_user = 'api'
self.api_key = conf.get('mailgun_api_key')
if not self.api_key:
raise Exception('No mailgun api key in conf.')
self.url = 'https://api.mailgun.net/v2/%s/messages' % self.domain
self.recipients = conf.get('mailgun_recipients')
self.from_addr = conf.get('mailgun_from_addr')
if not self.recipients:
raise Exception('No mailgun recipients in conf.')
self.basic_auth_creds = get_basic_auth(self.api_user, self.api_key)
def _send_email(self, check):
check_name = check['check']
hostname = check['hostname']
if check['status'] is True:
status = 'UP'
else:
status = 'DOWN'
subject = "[stalker] %s on %s is %s" % (check_name, hostname, status)
data = {"from": "Stalker <%s>" % self.from_addr,
"to": self.recipients,
"subject": subject,
"text": "%s" % check}
headers = {
'Authorization': 'Basic %s' % self.basic_auth_creds,
'Content-Type': 'application/x-www-form-urlencoded'
}
try:
post_data = urllib.urlencode(data)
req = urllib2.Request(self.url, post_data, headers)
response = urllib2.urlopen(req)
result = response.read()
response.close()
self.logger.info('Mailgun: %s' % result)
return True
except Exception:
self.logger.exception('Mailgun notification error.')
return False
def clear(self, check):
"""Send clear"""
# TODO: better clear notifications
incident_key = '%s:%s' % (check['hostname'], check['check'])
ok = self._send_email(check)
self.logger.info('Sent mailgun clear for %s' % incident_key)
if not ok:
# TODO: do backup notifications
pass
def fail(self, check):
"""Send failure if not already notified"""
incident_key = '%s:%s' % (check['hostname'], check['check'])
ok = self._send_email(check)
if ok:
self.logger.info('Sent mailgun alert for %s' % incident_key)
else:
# TODO: do backup notifications
pass
class EmailNotify(object):
"""Email (smtplib) based Notifications"""
def __init__(self, conf, logger, redis_client):
self.conf = conf
self.logger = logger
self.rc = redis_client
self.smtp_host = conf.get('smtplib_host')
if not self.smtp_host:
raise Exception('No smtplib_host in conf.')
self.smtp_port = int(conf.get('smtplib_port', '25'))
self.from_addr = conf.get('smtplib_from_addr')
if not self.from_addr:
raise Exception('No smtplib_from_addr in config.')
self.recipients = [x.strip() for x in conf.get(
'smtplib_recipients').split(',')]
if not self.recipients:
raise Exception('No smtplib recipients in conf.')
def _send_email(self, check):
check_name = check['check']
hostname = check['hostname']
if check['status'] is True:
status = 'UP'
else:
status = 'DOWN'
subject = "[stalker] %s on %s is %s" % (check_name, hostname, status)
message = """From: %s
To: %s
Subject: %s
%s
""" % (self.from_addr, self.recipients, subject, check)
try:
conn = smtplib.SMTP(self.smtp_host, self.smtp_port)
conn.ehlo()
conn.sendmail(self.from_addr, self.recipients, message)
conn.close()
self.logger.info('Email sent for: %s' % check)
return True
except Exception:
self.logger.exception('Email notification error.')
return False
def clear(self, check):
"""Send clear"""
# TODO: better clear notifications
incident_key = '%s:%s' % (check['hostname'], check['check'])
ok = self._send_email(check)
self.logger.info('Sent email clear for %s' % incident_key)
if not ok:
# TODO: do backup notifications
pass
def fail(self, check):
"""Send failure if not already notified"""
incident_key = '%s:%s' % (check['hostname'], check['check'])
ok = self._send_email(check)
if ok:
self.logger.info('Sent email alert for %s' % incident_key)
else:
# TODO: do backup notifications
pass
|
|
import json
import locale
import re
import time
import urllib
from collections import OrderedDict
from distutils.version import LooseVersion
try:
from io import BytesIO
except ImportError:
from StringIO import StringIO as BytesIO
from .Utils import *
from .http import HttpInterface, UserAgent
from .http.Response import *
from .InstagramException import InstagramException
from .Constants import Constants
from .SignatureUtils import SignatureUtils
locale.setlocale(locale.LC_NUMERIC, '')
class Instagram:
def __init__(self, username, password, debug=False, IGDataPath=None, truncatedDebug=False):
"""
Default class constructor.
:type username: str
:param username: Your Instagram username.
:type password: str
:param password: Your Instagram password.
:param debug: Debug on or off, False by default.
:param IGDataPath: Default folder to store data, you can change it.
"""
self.username = None # // Instagram username
self.password = None # // Instagram password
self.debug = None # // Debug
self.truncatedDebug = None
self.uuid = None # // UUID
self.device_id = None # // Device ID
self.username_id = None # // Username ID
self.token = None # // _csrftoken
self.isLoggedIn = False # // Session status
self.rank_token = None # // Rank token
self.IGDataPath = None # // Data storage path
self.customPath = False
self.http = None
self.settings = None
self.proxy = None # Full Proxy
self.proxyHost = None # Proxy Host and Port
self.proxyAuth = None # Proxy User and Pass
self.debug = debug
self.truncatedDebug = truncatedDebug
self.device_id = SignatureUtils.generateDeviceId(hashlib.md5((username + password).encode("utf-8")))
if IGDataPath is not None:
self.IGDataPath = IGDataPath
self.customPath = True
else:
self.IGDataPath = os.path.join(
os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data'),
username,
''
)
if not os.path.isdir(self.IGDataPath):
os.mkdir(self.IGDataPath, 0o777)
self.checkSettings(username)
self.http = HttpInterface(self)
self.setUser(username, password)
def setUser(self, username, password):
"""
Set the user. Manage multiple accounts.
:type username: str
:param username: Your Instagram username.
:type password: str
:param password: Your Instagram password.
:
"""
self.username = username
self.password = password
self.checkSettings(username)
self.uuid = SignatureUtils.generateUUID(True)
if os.path.isfile(self.IGDataPath + self.username + '-cookies.dat') and \
(self.settings.get('username_id') != None) and \
(self.settings.get('token') != None):
self.isLoggedIn = True
self.username_id = self.settings.get('username_id')
self.rank_token = self.username_id + '_' + self.uuid
self.token = self.settings.get('token')
else:
self.isLoggedIn = False
def checkSettings(self, username):
if not self.customPath:
self.IGDataPath = os.path.join(
os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data'),
username,
''
)
if not os.path.isdir(self.IGDataPath): os.mkdir(self.IGDataPath, 0o777)
self.settings = Settings(
os.path.join(self.IGDataPath, 'settings-' + username + '.dat')
)
if self.settings.get('version') is None:
self.settings.set('version', Constants.VERSION)
if (self.settings.get('user_agent') is None) or (
LooseVersion(self.settings.get('version')) < LooseVersion(Constants.VERSION)):
userAgent = UserAgent(self)
ua = userAgent.buildUserAgent()
self.settings.set('version', Constants.VERSION)
self.settings.set('user_agent', ua)
def setProxy(self, proxy, port=None, username=None, password=None):
"""
Set the proxy.
:type proxy: str
:param proxy: Full proxy string. Ex: user:pass@192.168.0.0:8080
Use $proxy = "" to clear proxy
:type port: int
:param port: Port of proxy
:type username: str
:param username: Username for proxy
:type password: str
:param password: Password for proxy
:raises: InstagramException
"""
self.proxy = proxy
if proxy == '':
return
proxy = parse_url(proxy)
if port and isinstance(port, int):
proxy['port'] = int(port)
if username and password:
proxy['user'] = username
proxy['pass'] = password
if proxy['host'] and proxy['port'] and isinstance(proxy['port'], int):
self.proxyHost = proxy['host'] + ':' + proxy['port']
else:
raise InstagramException('Proxy host error. Please check ip address and port of proxy.')
if proxy['user'] and proxy['pass']:
self.proxyAuth = proxy['user'] + ':' + proxy['pass']
def login(self, force=False):
"""
Login to Instagram.
:type force: bool
:param force: Force login to Instagram, this will create a new session
:return: Login data
:rtype List:
"""
if (not self.isLoggedIn) or force:
self.syncFeatures(True)
fetch = self.http.request(
'si/fetch_headers/?challenge_type=signup&guid=' + SignatureUtils.generateUUID(False), None, True)
header = fetch[0]
response = ChallengeResponse(fetch[1])
if not header or not response.isOk():
raise InstagramException("Couldn't get challenge, check your connection")
# return response #FIXME unreachable code
match = re.search(r'^Set-Cookie: csrftoken=([^;]+)', fetch[0], re.MULTILINE)
if match:
self.token = match.group(1)
else:
raise InstagramException('Missing csfrtoken')
data = OrderedDict([
('username', self.username),
('guid', self.uuid),
('device_id', self.device_id),
('password', self.password),
('login_attempt_count', 0)
])
login = self.http.request('accounts/login/', SignatureUtils.generateSignature(json.dumps(data)), True)
response = LoginResponse(login[1])
if not response.isOk(): raise InstagramException(response.getMessage())
self.isLoggedIn = True
self.username_id = response.getUsernameId()
self.settings.set('username_id', self.username_id)
self.rank_token = self.username_id + '_' + self.uuid
match = re.search(r'^Set-Cookie: csrftoken=([^;]+)', login[0], re.MULTILINE)
if match: self.token = match.group(1)
self.settings.set('token', self.token)
self.syncFeatures()
self.autoCompleteUserList()
self.timelineFeed()
self.getRankedRecipients()
self.getRecentRecipients()
self.megaphoneLog()
self.getv2Inbox()
self.getRecentActivity()
self.getReelsTrayFeed()
self.explore()
return response
check = self.timelineFeed()
if check.getMessage() == 'login_required':
self.login(True)
self.autoCompleteUserList()
self.getReelsTrayFeed()
self.getRankedRecipients()
# push register
self.getRecentRecipients()
# push register
self.megaphoneLog()
self.getv2Inbox()
self.getRecentActivity()
self.explore()
def syncFeatures(self, prelogin=False):
if prelogin:
data = json.dumps(
OrderedDict([
('id', SignatureUtils.generateUUID(True)),
('experiments', Constants.LOGIN_EXPERIMENTS)
])
)
return SyncResponse(self.http.request('qe/sync/', SignatureUtils.generateSignature(data), True)[1])
else:
data = json.dumps(
OrderedDict([
('_uuid', self.uuid),
('_uid', self.username_id),
('_csrftoken', self.token),
('id', self.username_id),
('experiments', Constants.EXPERIMENTS)
])
)
return SyncResponse(self.http.request('qe/sync/', SignatureUtils.generateSignature(data))[1])
def autoCompleteUserList(self):
return autoCompleteUserListResponse(self.http.request('friendships/autocomplete_user_list/?version=2')[1])
def pushRegister(self, gcmToken):
deviceToken = json.dumps(
OrderedDict([
('k', gcmToken),
('v', 0),
('t', 'fbns-b64')
])
)
data = json.dumps(
OrderedDict([
('_uuid', self.uuid),
('guid', self.uuid),
('phone_id', SignatureUtils.generateUUID(True)),
('device_type', 'android_mqtt'),
('device_token', deviceToken),
('is_main_push_channel', True),
('_csrftoken', self.token),
('users', self.username_id)
])
)
return self.http.request(
'push/register/?platform=10&device_type=android_mqtt',
SignatureUtils.generateSignature(data)
)[1]
def timelineFeed(self):
return TimelineFeedResponse(self.http.request('feed/timeline/')[1])
def megaphoneLog(self):
data = OrderedDict([
('type', 'feed_aysf'),
('action', 'seen'),
('reason', ''),
('_uuid', self.uuid),
('device_id', self.device_id),
('_csrftoken', self.token),
('uuid', hashlib.md5(str(int(time.time())).encode("utf-8")).hexdigest())
])
return MegaphoneLogResponse(self.http.request('megaphone/log/', compat_urllib_parse.urlencode(data))[1])
def getPendingInbox(self):
"""
Pending Inbox
:rtype: object
:return: Pending Inbox Data
"""
pendingInbox = PendingInboxResponse(self.http.request('direct_v2/pending_inbox/?')[1])
if not pendingInbox.isOk():
raise InstagramException(pendingInbox.getMessage() + "\n")
# return FIXME unreachable code
return pendingInbox
def getRankedRecipients(self):
"""
Ranked recipients.
:rtype:list
:return: Ranked recipients Data
"""
ranked_recipients = RankedRecipientsResponse(
self.http.request('direct_v2/ranked_recipients/?show_threads=true')[1]
)
if not ranked_recipients.isOk():
raise InstagramException(ranked_recipients.getMessage() + "\n")
# return todo unreachable code
return ranked_recipients
def getRecentRecipients(self):
"""
Recent recipients.
:rtype: list
:return: Ranked recipients Data
"""
recent_recipients = RecentRecipientsResponse(self.http.request('direct_share/recent_recipients/')[1])
if not recent_recipients.isOk():
raise InstagramException(recent_recipients.getMessage() + "\n")
# return todo unreachable code
return recent_recipients
def explore(self):
"""
Explore Tab
:rtype: object
:return: Explore data
"""
explore = ExploreResponse(self.http.request('discover/explore/')[1])
if not explore.isOk():
raise InstagramException(explore.getMessage() + "\n")
# return todo unreachable code
return explore
def expose(self):
data = json.dumps(
OrderedDict([
('_uuid', self.uuid),
('_uid', self.username_id),
('id', self.username_id),
('_csrftoken', self.token),
('experiment', 'ig_android_profile_contextual_feed')
])
)
return ExposeResponse(self.http.request('qe/expose/', SignatureUtils.generateSignature(data))[1])
def logout(self):
"""
Logout of Instagram.
:rtype: bool
:return: Returns true if logged out correctly
"""
logout = LogoutResponse(self.http.request('accounts/logout/')[1])
if logout.isOk():
return True
else:
return False
def uploadPhoto(self, photo, caption=None, upload_id=None, customPreview=None, location=None, filter_=None):
"""
Upload photo to Instagram.
:type photo: str
:param photo: Path to your photo
:type caption: str
:param caption: Caption to be included in your photo.
:rtype: object
:return: Upload data
"""
return self.http.uploadPhoto(photo, caption, upload_id, customPreview, location, filter_)
def uploadPhotoStory(self, photo, caption=None, upload_id=None, customPreview=None):
return self.http.uploadPhoto(photo, caption, upload_id, customPreview, None, None, True)
def uploadVideo(self, video, caption=None, customPreview=None):
"""
Upload video to Instagram.
:type video: str
:param photo: Path to your video
:type caption: str
:param caption: Caption to be included in your video.
:rtype: object
:return: Upload data
"""
return self.http.uploadVideo(video, caption), customPreview
def direct_share(self, media_id, recipients, text=None):
self.http.direct_share(media_id, recipients, text)
def direct_message(self, recipients, text):
"""
Send direct message to user by inbox.
:type recipients: list|int
:param recipients: Users id
:type text: str
:param text: Text message
:return: void
"""
self.http.direct_message(recipients, text)
def directThread(self, threadId):
"""
Direct Thread Data
:type threadId: str
:param threadId: Thread Id
:rtype: object
:return: Direct Thread Data
"""
directThread = self.http.request("direct_v2/threads/" + str(threadId) + "/?")[1]
if directThread['status'] != 'ok':
raise InstagramException(directThread['message'] + "\n")
# return Fixme unreachable code
return directThread
def directThreadAction(self, threadId, threadAction):
"""
Direct Thread Action
:type threadId: str
:param threadId: Thread Id
:type threadAction: str
:param threadAction: Thread Action 'approve' OR 'decline' OR 'block'
:rtype: object
:return: Direct Thread Action Data
"""
data = json.dumps(
OrderedDict([
('_uuid', self.uuid),
('_uid', self.username_id),
('_csrftoken', self.token)
])
)
return self.http.request(
"direct_v2/threads/" + str(threadId) + "/" + str(threadAction) + "/",
self.generateSignature(data) # todo Unresolved reference
)[1]
def configureVideo(self, upload_id, video, caption='', customPreview=None):
self.uploadPhoto(video, caption, upload_id, customPreview)
size = Image.open(video).size[0]
post = json.dumps(
OrderedDict([
('upload_id', upload_id),
('source_type', 3),
('poster_frame_index', 0),
('length', 0.00),
('audio_muted', False),
('filter_type', '0'),
('video_result', 'deprecated'),
('clips', OrderedDict([
('length', Utils.getSeconds(video)),
('source_type', '3'),
('camera_position', 'back')
])),
('extra', OrderedDict([
('source_width', 960),
('source_height', 1280)
])),
('device', OrderedDict([
('manufacturer', self.settings.get('manufacturer')),
('model', self.settings.get('model')),
('android_version', Constants.ANDROID_VERSION),
('android_release', Constants.ANDROID_RELEASE)
])),
('_csrftoken', self.token),
('_uuid', self.uuid),
('_uid', self.username_id),
('caption', caption)
])
)
post = post.replace('"length":0', '"length":0.00')
return ConfigureVideoResponse(
self.http.request('media/configure/?video=1', SignatureUtils.generateSignature(post))[1])
def configure(self, upload_id, photo, caption='', location=None, filter_=None):
caption = caption if caption else ''
size = Image.open(photo).size[0]
post = OrderedDict([
('upload_id', upload_id),
('camera_model', self.settings.get('model').replace(" ", "")),
('source_type', 3),
('date_time_original', time.strftime('%Y:%m:%d %H:%M:%S')),
('camera_make', self.settings.get('manufacturer')),
('edits', OrderedDict([
('crop_original_size', [size, size]),
('crop_zoom', 1.3333334),
('crop_center', [0.0, -0.0])
])),
('extra', OrderedDict([
('source_width', size),
('source_height', size)
])),
('device', OrderedDict([
('manufacturer', self.settings.get('manufacturer')),
('model', self.settings.get('model')),
('android_version', Constants.ANDROID_VERSION),
('android_release', Constants.ANDROID_RELEASE)
])),
('_csrftoken', self.token),
('_uuid', self.uuid),
('_uid', self.username_id),
('caption', caption)
])
if location:
loc = OrderedDict([
(str(location.getExternalIdSource()) + '_id', location.getExternalId()),
('name', location.getName()),
('lat', location.getLatitude()),
('lng', location.getLongitude()),
('address', location.getAddress()),
('external_source', location.getExternalIdSource())
])
post['location'] = json.dumps(loc)
post['geotag_enabled'] = True
post['media_latitude'] = location.getLatitude()
post['posting_latitude'] = location.getLatitude()
post['media_longitude'] = location.getLongitude()
post['posting_longitude'] = location.getLongitude()
post['altitude'] = mt_rand(10, 800)
if filter_:
post['edits']['filter_type'] = Utils.getFilterCode(filter)
post = json.dumps(post)
post = post.replace('"crop_center":[0,0]', '"crop_center":[0.0,-0.0]')
return ConfigureResponse(self.http.request('media/configure/', SignatureUtils.generateSignature(post))[1])
def configureToReel(self, upload_id, photo):
size = Image.open(photo).size[0]
post = json.dumps(
OrderedDict([
('upload_id', upload_id),
('source_type', 3),
('edits', OrderedDict([
('crop_original_size', [size, size]),
('crop_zoom', 1.3333334),
('crop_center', [0.0, 0.0])
])),
('extra', OrderedDict([
('source_width', size),
('source_height', size)
])),
('device', OrderedDict([
('manufacturer', self.settings.get('manufacturer')),
('model', self.settings.get('model')),
('android_version', Constants.ANDROID_VERSION),
('android_release', Constants.ANDROID_RELEASE)
])),
('_csrftoken', self.token),
('_uuid', self.uuid),
('_uid', self.username_id),
])
)
post = post.replace('"crop_center":[0,0]', '"crop_center":[0.0,0.0]')
return ConfigureResponse(
self.http.request('media/configure_to_reel/', SignatureUtils.generateSignature(post))[1])
def editMedia(self, mediaId, captionText=''):
"""
Edit media.
:type mediaId: str
:param mediaId: Media id
:type captionText: str
:param captionText: Caption text
:rtype: object
:return: edit media data
"""
data = json.dumps(
OrderedDict([
('_uuid', self.uuid),
('_uid', self.username_id),
('_csrftoken', self.token),
('caption_text', captionText)
])
)
# Unresolved Reference MediaResponse
return MediaResponse(
self.http.request("media/" + mediaId + "/edit_media/", SignatureUtils.generateSignature(data))[1]['media']
)
def removeSelftag(self, mediaId):
"""
Remove yourself from a tagged media.
:type mediaId: str
:param mediaId: Media id
:rtype: object
:return: edit media data
"""
data = json.dumps(
OrderedDict([
('_uuid', self.uuid),
('_uid', self.username_id),
('_csrftoken', self.token)
])
)
# Unresolved Reference MediaResponse
return MediaResponse(
self.http.request("usertags/" + mediaId + "/remove/", SignatureUtils.generateSignature(data))[1]
)
def mediaInfo(self, mediaId):
"""
Media info
:type mediaId: str
:param mediaId: Media id
:rtype: object
:return: delete request data
"""
data = json.dumps(
OrderedDict([
('_uuid', self.uuid),
('_uid', self.username_id),
('_csrftoken', self.token),
('media_id', mediaId)
])
)
return MediaInfoResponse(
self.http.request("media/" + mediaId + "/info/", SignatureUtils.generateSignature(data))[1])
def deleteMedia(self, mediaId):
"""
Delete photo or video.
:type mediaId: str
:param mediaId: Media id
:rtype: object
:return: delete request data
"""
data = json.dumps(
OrderedDict([
('_uuid', self.uuid),
('_uid', self.username_id),
('_csrftoken', self.token),
('media_id', mediaId)
])
)
return self.http.request("media/" + mediaId + "/delete/", SignatureUtils.generateSignature(data))[1]
def comment(self, mediaId, commentText):
"""
Comment media.
:type mediaId: str
:param mediaId: Media id
:type commentText: str
:param commentText: Comment Text
:rtype: object
:return: comment media data
"""
data = json.dumps(
OrderedDict([
('_uuid', self.uuid),
('_uid', self.username_id),
('_csrftoken', self.token),
('comment_text', commentText)
])
)
return CommentResponse(
self.http.request("media/" + mediaId + "/comment/", SignatureUtils.generateSignature(data))[1]
)
def deleteComment(self, mediaId, commentId):
"""
Delete Comment.
:type mediaId: str
:param mediaId: Media ID
:type commentId: str
:param commentId: Comment ID
:rtype: object
:return: Delete comment data
"""
data = json.dumps(
OrderedDict([
('_uuid', self.uuid),
('_uid', self.username_id),
('_csrftoken', self.token)
])
)
return \
self.http.request("media/" + mediaId + "/comment/" + commentId + "/delete/",
SignatureUtils.generateSignature(data))[1]
def deleteCommentsBulk(self, mediaId, commentIds):
"""
Delete Comment Bulk.
:type mediaId: str
:param mediaId: Media ID
:type commentIds: list
:param commentIds: List of comments to delete
:rtype: object
:return: Delete Comment Bulk Data
"""
if not isinstance(commentIds, list):
commentIds = [commentIds]
string = []
for commentId in commentIds:
string.append(str(commentId))
comment_ids_to_delete = ','.join(string)
data = json.dumps(
OrderedDict([
('_uuid', self.uuid),
('_uid', self.username_id),
('_csrftoken', self.token),
('comment_ids_to_delete', comment_ids_to_delete)
])
)
return self.http.request("media/" + mediaId + "/comment/bulk_delete/",
SignatureUtils.generateSignature(data))[1]
def changeProfilePicture(self, photo):
"""
Sets account to public.
:type photo: str
:param photo: Path to photo
"""
self.http.changeProfilePicture(photo)
def removeProfilePicture(self):
"""
Remove profile picture.
:rtype: object
:return: status request data
"""
data = json.dumps(
OrderedDict([('_uuid', self.uuid), ('_uid', self.username_id), ('_csrftoken', self.token)])
)
return self.http.request('accounts/remove_profile_picture/', SignatureUtils.generateSignature(data))[1]
def setPrivateAccount(self):
"""
Sets account to private.
:rtype: object
:return: status request data
"""
data = json.dumps(
OrderedDict([
('_uuid', self.uuid),
('_uid', self.username_id),
('_csrftoken', self.token)
])
)
return self.http.request('accounts/set_private/', SignatureUtils.generateSignature(data))[1]
def setPublicAccount(self):
"""
Sets account to public.
:rtype: object
:return: status request data
"""
data = json.dumps(
OrderedDict([
('_uuid', self.uuid),
('_uid', self.username_id),
('_csrftoken', self.token)
])
)
return self.http.request('accounts/set_public/', SignatureUtils.generateSignature(data))[1]
def getProfileData(self):
"""
Get personal profile data.
:rtype: object
:return:
"""
data = json.dumps(
OrderedDict([
('_uuid', self.uuid),
('_uid', self.username_id),
('_csrftoken', self.token)
])
)
return ProfileResponse(
self.http.request('accounts/current_user/?edit=true', SignatureUtils.generateSignature(data))[1])
def editProfile(self, url, phone, first_name, biography, email, gender):
"""
Edit profile.
:type url: str
:param url: Url - website. "" for nothing
:type phone: str
:param phone: Phone number. "" for nothing
:type first_name: str
:param first_name: Name. "" for nothing
:type email: str
:param email: Email. Required.
:type gender: int
:param gender: Gender. male = 1 , female = 0
:rtype: object
:return: edit profile data
"""
data = json.dumps(
OrderedDict([
('_uuid', self.uuid),
('_uid', self.username_id),
('_csrftoken', self.token),
('external_url', url),
('phone_number', phone),
('username', self.username),
('first_name', first_name),
('biography', biography),
('email', email),
('gender', gender)
])
)
return ProfileResponse(self.http.request('accounts/edit_profile/', SignatureUtils.generateSignature(data))[1])
def changePassword(self, oldPassword, newPassword):
"""
Change Password.
:type oldPassword: str
:param oldPassword: Old Password
:type newPassword: str
:param newPassword: New Password
:rtype: object
:return: Change Password Data
"""
data = json.dumps(
OrderedDict([
('_uuid', self.uuid),
('_uid', self.username_id),
('_csrftoken', self.token),
('old_password', oldPassword),
('new_password1', newPassword),
('new_password2', newPassword)
])
)
return self.http.request('accounts/change_password/', SignatureUtils.generateSignature(data))[1]
def getUsernameInfo(self, usernameId):
"""
Get username info.
:param usernameId: Username id
:rtype: object
:return: Username data
"""
return UsernameInfoResponse(self.http.request("users/" + str(usernameId) + "/info/")[1])
def getSelfUsernameInfo(self):
"""
Get self username info.
:rtype: object
:return: Username data
"""
return self.getUsernameInfo(self.username_id)
def getRecentActivity(self):
"""
Get recent activity.
:rtype: object
:return: Recent activity data
"""
activity = self.http.request('news/inbox/')[1]
if activity['status'] != 'ok':
raise InstagramException(activity['message'] + "\n")
return activity
def getFollowingRecentActivity(self):
"""
Get recent activity from accounts followed.
:rtype: object
:return: Recent activity data of follows
"""
activity = self.http.request('news/?')[1]
if activity['status'] != 'ok':
raise InstagramException(activity['message'] + "\n")
return activity
def getv2Inbox(self):
"""
I dont know this yet.
:rtype: object
:return: v2 inbox data
"""
inbox = V2InboxResponse(self.http.request('direct_v2/inbox/?')[1])
if not inbox.isOk():
raise InstagramException(inbox.getMessage() + "\n")
return inbox
def getUserTags(self, usernameId):
"""
Get user tags.
:type usernameId: str
:param usernameId:
:rtype: object
:return: user tags data
"""
tags = UsertagsResponse(self.http.request("usertags/" + str(usernameId) + "/feed/?rank_token=" + self.rank_token
+ "&ranked_content=true&")[1])
if not tags.isOk():
raise InstagramException(tags.getMessage() + "\n")
return tags
def getSelfUserTags(self):
"""
Get self user tags.
:rtype: object
:return: self user tags data
"""
return self.getUserTags(self.username_id)
def tagFeed(self, tag):
"""
Get tagged media.
:type tag: str
:param tag:
:rtype: object
:return:
"""
userFeed = TagFeedResponse(
self.http.request("feed/tag/" + tag + "/?rank_token=" + self.rank_token + "&ranked_content=true&")[1])
if not userFeed.isOk():
raise InstagramException(userFeed.getMessage() + "\n")
return userFeed
def getMediaLikers(self, mediaId):
"""
Get media likers.
:type mediaId: str
:param mediaId:
:rtype: object
:return:
"""
likers = MediaLikersResponse(self.http.request("media/" + mediaId + "/likers/")[1])
if not likers.isOk():
raise InstagramException(likers.getMessage() + "\n")
# return #fixme unreachable code
return likers
def getGeoMedia(self, usernameId):
"""
Get user locations media.
:type usernameId: str
:param usernameId: Username id
:rtype: object
:return: Geo Media data
"""
locations = self.http.request("maps/user/" + str(usernameId) + "/")[1]
if locations['status'] != 'ok':
raise InstagramException(locations['message'] + "\n")
return locations
def getSelfGeoMedia(self):
"""
Get self user locations media.
:rtype: object
:return: Geo Media data
"""
return self.getGeoMedia(self.username_id)
def searchLocation(self, latitude, longitude, query=None):
locationQuery = OrderedDict([
('rank_token', self.rank_token),
('latitude', latitude),
('longitude', longitude)
])
if query:
locationQuery['timestamp'] = int(time.time())
else:
locationQuery['search_query'] = query # TODO possible bug, query is None
locations = LocationResponse(self.http.request("location_search/?" + urllib.urlencode(locationQuery))[1])
if not locations.isOk():
raise InstagramException(locations.getMessage() + "\n")
# return fixme unreachable code
return locations
def fbUserSearch(self, query):
"""
facebook user search.
:type query: str
:param query:
:rtype: object
:return: query data
"""
query = urllib.quote(query)
query = \
self.http.request("fbsearch/topsearch/?context=blended&query=" + query + "&rank_token=" + self.rank_token)[
1]
if query['status'] != 'ok':
raise InstagramException(query['message'] + "\n")
return query
def searchUsers(self, query):
"""
Search users.
:type query: str
:param query:
:rtype: object
:return: query data
"""
query = self.http.request(
'users/search/?ig_sig_key_version=' + Constants.SIG_KEY_VERSION \
+ "&is_typeahead=true&query=" + query + "&rank_token=" + self.rank_token)[1]
if query['status'] != 'ok':
raise InstagramException(query['message'] + "\n")
return query
def searchUsername(self, usernameName):
"""
Search exact username
:type usernameName: str
:param usernameName: username as STRING not an id
:rtype: object
:return: query data
"""
query = UsernameInfoResponse(self.http.request("users/" + usernameName + "/usernameinfo/")[1])
if not query.isOk():
raise InstagramException(query.getMessage() + "\n")
return query
def getUsernameId(self, username):
return self.searchUsername(username).getUsernameId()
def syncFromAdressBook(self, contacts):
"""
Search users using addres book.
:type contacts: list
:param contacts:
:rtype: object
:return:
"""
data = OrderedDict([(
('contacts=', json.dumps(contacts))
)])
return self.http.request('address_book/link/?include=extra_display_name,thumbnails', data)[1]
def searchTags(self, query):
"""
Search tags.
:type query: str
:param query:
:rtype: object
:return: query data
"""
query = self.http.request("tags/search/?is_typeahead=true&q=" + query + "&rank_token=" + self.rank_token)[1]
if query['status'] != 'ok':
raise InstagramException(query['message'] + "\n")
return query
def getTimeline(self, maxid=None):
"""
Get timeline data.
:rtype: object
:return: timeline data
"""
timeline = self.http.request(
"feed/timeline/?rank_token=" + self.rank_token + "&ranked_content=true" +
(("&max_id=" + str(maxid)) if maxid is not None else '')
)[1]
if timeline['status'] != 'ok':
raise InstagramException(timeline['message'] + "\n")
return timeline
def getReelsTrayFeed(self):
feed = ReelsTrayFeedResponse(self.http.request('feed/reels_tray/')[1])
if not feed.isOk():
raise InstagramException(feed.getMessage() + "\n")
# return todo Unreachable code
return feed
def getUserFeed(self, usernameId, maxid=None, minTimestamp=None):
"""
Get user feed.
:type usernameId: str
:param usernameId: Username id
:type maxid: str
:param maxid: Max Id
:type minTimestamp: str
:param minTimestamp: Min timestamp
:rtype: object
:return: User feed data
:raises: InstagramException
"""
userFeed = UserFeedResponse(self.http.request("feed/user/" + str(usernameId) + "/?rank_token=" + self.rank_token
+ (("&max_id=" + str(maxid)) if maxid is not None else '') \
+ (("&min_timestamp=" + str(
minTimestamp)) if minTimestamp is not None else '') \
+ "&ranked_content=true"
)[1])
if not userFeed.isOk():
raise InstagramException(userFeed.getMessage() + "\n")
return userFeed
def getHashtagFeed(self, hashtagString, maxid=''):
"""
Get hashtag feed.
:type hashtagString: str
:param hashtagString: Hashtag string, not including the #
:rtype: object
:return: Hashtag feed data
"""
if maxid == '':
endpoint = "feed/tag/" + hashtagString + "/?rank_token=" + self.rank_token + "&ranked_content=true&"
else:
endpoint = "feed/tag/" + hashtagString + "/?max_id=" \
+ maxid + "&rank_token=" + self.rank_token + "&ranked_content=true&"
hashtagFeed = self.http.request(endpoint)[1]
if hashtagFeed['status'] != 'ok':
raise InstagramException(hashtagFeed['message'] + "\n")
return hashtagFeed
def searchFBLocation(self, query):
"""
Get locations.
:type query: str
:param query: search query
:rtype: object
:return: Location location data
"""
query = urllib.quote(query)
endpoint = "fbsearch/places/?rank_token=" + self.rank_token + "&query=" + query
locationFeed = self.http.request(endpoint)[1]
if locationFeed['status'] != 'ok':
raise InstagramException(locationFeed['message'] + "\n")
return locationFeed
def getLocationFeed(self, locationId, maxid=''):
"""
Get location feed.
:type locationId: str
:param locationId: location id
:rtype: object
:return: Location feed data
"""
if maxid is '':
endpoint = "feed/location/" + locationId + "/?rank_token=" + self.rank_token + "&ranked_content=true&"
else:
endpoint = "feed/location/" + locationId + "/?max_id=" \
+ maxid + "&rank_token=" + self.rank_token + "&ranked_content=true&"
locationFeed = self.http.request(endpoint)[1]
if locationFeed['status'] != 'ok':
raise InstagramException(locationFeed['message'] + "\n")
return locationFeed
def getSelfUserFeed(self, max_id=None):
"""
Get self user feed.
:rtype: object
:return: User feed data
"""
return self.getUserFeed(self.username_id, max_id)
def getPopularFeed(self):
"""
Get popular feed.
:rtype: object
:return: popular feed data
"""
popularFeed = self.http.request("feed/popular/?people_teaser_supported=1&rank_token=" \
+ self.rank_token + "&ranked_content=true&")[1]
if popularFeed['status'] != 'ok':
raise InstagramException(popularFeed['message'] + "\n")
return popularFeed
def getUserFollowings(self, usernameId, maxid=''):
"""
Get user followings.
:type usernameId: str
:param usernameId: Username id
:rtype: object
:return: followers data
"""
return FollowingResponse(self.http.request(
"friendships/" + usernameId + "/following/?max_id=" + maxid + "&ig_sig_key_version="
+ Constants.SIG_KEY_VERSION + "&rank_token=" + self.rank_token)[1])
def getUserFollowers(self, usernameId, maxid=''):
"""
Get user followers.
:type usernameId: str
:param usernameId: Username id
:rtype: object
:return: followers data
"""
return FollowerResponse(self.http.request(
"friendships/" + usernameId + "/followers/?max_id=" + maxid
+ "&ig_sig_key_version=" + Constants.SIG_KEY_VERSION + "&rank_token=" + self.rank_token)[1])
def getSelfUserFollowers(self):
"""
Get self user followers.
:rtype: object
:return: followers data
"""
return self.getUserFollowers(self.username_id)
def getSelfUsersFollowing(self):
"""
Get self users we are following.
:rtype: object
:return: users we are following data
"""
return self.getUserFollowings(self.username_id)
def like(self, mediaId):
"""
Like photo or video.
:type mediaId: str
:param mediaId: Media id
:rtype: object
:return: status request
"""
data = json.dumps(
OrderedDict([
('_uuid', self.uuid),
('_uid', self.username_id),
('_csrftoken', self.token),
('media_id', mediaId)
])
)
return self.http.request("media/" + mediaId + "/like/", SignatureUtils.generateSignature(data))[1]
def unlike(self, mediaId):
"""
Unlike photo or video.
:type mediaId: str
:param mediaId: Media id
:rtype: object
:return: status request
"""
data = json.dumps(
OrderedDict([
('_uuid', self.uuid),
('_uid', self.username_id),
('_csrftoken', self.token),
('media_id', mediaId)
])
)
return self.http.request("media/" + mediaId + "/unlike/", SignatureUtils.generateSignature(data))[1]
def getMediaComments(self, mediaId, maxid=''):
"""
Get media comments.
:type mediaId: str
:param mediaId: Media id
:rtype: object
:return: Media comments data
"""
return MediaCommentsResponse(self.http.request("media/" + str(mediaId) + "/comments/?max_id=" + str(maxid)
+ "&ig_sig_key_version=" + Constants.SIG_KEY_VERSION)[1])
def setNameAndPhone(self, name='', phone=''):
"""
Set name and phone (Optional).
:type name: str
:param name:
:type phone: str
:param phone:
:rtype: object
:return: Set status data
"""
data = json.dumps(
OrderedDict([
('_uuid', self.uuid),
('_uid', self.username_id),
('first_name', name),
('phone_number', phone),
('_csrftoken', self.token)
])
)
return self.http.request("accounts/set_phone_and_name/", SignatureUtils.generateSignature(data))[1]
def getDirectShare(self):
"""
Get direct share.
:rtype: object
:return: Direct share data
"""
return self.http.request('direct_share/inbox/?')[1]
def backup(self):
"""
Backups all your uploaded photos :).
"""
go = False
while True:
if not go:
myUploads = self.getSelfUserFeed()
else:
myUploads = self.getSelfUserFeed(myUploads.getNextMaxId() if myUploads.getNextMaxId() else None)
# fixme local variable `myUploads` might be referenced before assignment
if not os.path.isdir(self.IGDataPath + 'backup/'):
os.mkdir(self.IGDataPath + 'backup/')
for item in myUploads.getItems():
dir_name = self.IGDataPath + 'backup/' + self.username + "-" + time.strftime('%Y-%m-%d')
if not os.path.isdir(dir_name):
os.mkdir(dir_name)
if item.getVideoVersions():
file_put_contents(
os.path.join(dir_name, item.getMediaId() + '.mp4'),
urllib.urlopen(item.getVideoVersions()[0].getUrl()).read()
) # todo test and remove below
else:
file_put_contents(
os.path.join(dir_name, item.getMediaId() + '.jpg'),
urllib.urlopen(item.getImageVersions()[0].getUrl()).read()
) # todo test and remove below
# urllib.urlretrieve(
# item['image_versions2']['candidates'][0]['url'],
# self.IGDataPath + 'backup/' + self.username + "-" + time.strftime('%Y-%m-%d') + '/' + item['id'] + '.jpg'
# )
go = True
if not myUploads.getNextMaxId():
break
def follow(self, userId):
"""
Follow.
:param userId:
:type userId: str
:rtype: object
:return: Friendship status data
"""
data = json.dumps(
OrderedDict([
('_uuid', self.uuid),
('_uid', self.username_id),
('user_id', userId),
('_csrftoken', self.token)
])
)
return self.http.request("friendships/create/" + userId + "/", SignatureUtils.generateSignature(data))[1]
def unfollow(self, userId):
"""
Unfollow.
:param userId:
:type userId: str
:rtype: object
:return: Friendship status data
"""
data = json.dumps(
OrderedDict([
('_uuid', self.uuid),
('_uid', self.username_id),
('user_id', userId),
('_csrftoken', self.token)
])
)
return self.http.request("friendships/destroy/" + userId + "/", SignatureUtils.generateSignature(data))[1]
def block(self, userId):
"""
Block.
:param userId:
:type userId: str
:rtype: object
:return: Friendship status data
"""
data = json.dumps(
OrderedDict([
('_uuid', self.uuid),
('_uid', self.username_id),
('user_id', userId),
('_csrftoken', self.token)
])
)
return self.http.request("friendships/block/" + userId + "/", SignatureUtils.generateSignature(data))[1]
def unblock(self, userId):
"""
Unblock.
:param userId:
:type userId: str
:rtype: object
:return: Friendship status data
"""
data = json.dumps(
OrderedDict([
('_uuid', self.uuid),
('_uid', self.username_id),
('user_id', userId),
('_csrftoken', self.token)
])
)
return self.http.request("friendships/unblock/" + userId + "/", SignatureUtils.generateSignature(data))[1]
def userFriendship(self, userId):
"""
Show User Friendship.
:type userId: str
:param userId:
:rtype: object
:return: Friendship relationship data
"""
data = json.dumps(
OrderedDict([
('_uuid', self.uuid),
('_uid', self.username_id),
('user_id', userId),
('_csrftoken', self.token)
])
)
return self.http.request("friendships/show/" + userId + "/", SignatureUtils.generateSignature(data))[1]
def getLikedMedia(self, maxid=None):
"""
Get liked media.
:rtype: object
:return: Liked media data
"""
endpoint = 'feed/liked/?' + (('max_id=' + str(maxid) + '&') if maxid is not None else '')
return self.http.request(endpoint)[1]
def verifyPeer(self, enable):
self.http.verifyPeer(enable)
def verifyHost(self, enable):
self.http.verifyHost(enable)
|
|
"""
mocks.py
Various mock objects for testing
"""
import cPickle
import base64
import codecs
from StringIO import StringIO
class MockAtom(object):
""" Mocks an atom in the GData service. """
def __init__(self, value):
self.text = value
class MockEntry(object):
""" Mocks and entry returned from the GData service. """
def __init__(self, title, ID):
self.title = MockAtom(title)
self.id = MockAtom('http://mock.example.com/%s' % ID)
self.ID = ID # simpler lookup for key value
def GetEditMediaLink(self):
return MockLink()
class MockHTTPClient(object):
""" Mocks the functionality of an http client. """
def request(*args, **kwargs):
pass
class MockGDataService(object):
""" Provides the common functionality of a Google Service. """
http_client = MockHTTPClient()
def __init__(self, email=None, password=None,
account_type='HOSTED_OR_GOOGLE', service=None,
auth_service_url=None, source=None, server=None,
additional_headers=None, handler=None, tokens=None,
http_client=None, token_store=None):
""" Create the Service with the default parameters. """
self.email = email
self.password = password
self.account_type = account_type
self.service = service
self.auth_service_url = auth_service_url
self.server = server
self.login_token = None
def GetClientLoginToken(self):
return self.login_token
def SetClientLoginToken(self, token):
self.login_token = token
def ClientLogin(self, username, password, account_type=None, service=None,
auth_service_url=None, source=None, captcha_token=None,
captcha_response=None):
""" Client side login to the service. """
if hasattr(self, '_login_err'):
raise self._login_err()
class MockDocumentService(MockGDataService):
"""
Implements the minimum functionality of the Google Document service.
"""
def Upload(self, media_source, title, folder_or_uri=None, label=None):
"""
Upload a document.
"""
if hasattr(self, '_upload_err'):
raise self._upload_err()
if not hasattr(self, '_upload_count'):
self._upload_count = 0
# save the data for asserting against
self._upload_data = dict(media_source=media_source, title=title,
folder_or_uri=folder_or_uri, label=label)
self._upload_count += 1
return MockEntry(title, 'mockentry%3A' + title)
def QueryDocumentListFeed(self, uri):
if hasattr(self, '_listfeed'):
return self._listfeed
return MockListFeed()
def CreateFolder(self, title, folder_or_uri=None):
if hasattr(self, '_create_folder_err'):
raise self._create_folder_err()
if hasattr(self, '_create_folder'):
return self._create_folder
return MockListEntry()
def Put(self, data, uri, extra_headers=None, url_params=None,
escape_params=True, redirects_remaining=3, media_source=None,
converter=None):
import gdata
self._put_data = None
if not hasattr(self, '_put_count'):
self._put_count = 0
if hasattr(self, '_put_err'):
# allow for a list of errors
if type(self._put_err) == list:
put_err = self._put_err.pop(0)
if not len(self._put_err):
delattr(self, '_put_err')
else:
put_err = self._put_err
if type(put_err) == tuple:
raise put_err[0], put_err[1]
else:
raise put_err()
# save the data for asserting against
assert isinstance(data, basestring), \
'Should be a string'
self._put_data = cPickle.loads(base64.urlsafe_b64decode(data))
self._put_count += 1
return MockEntry('', 'mockentry%3A' + '')
def Export(self, entry_or_id_or_url, file_path, gid=None, extra_params=None):
if hasattr(self, '_export_err'):
raise self._export_err()
if hasattr(self, '_export_data'):
export_file = open(file_path, 'wb')
export_file.write(self._export_data)
export_file.close()
def request(self, data, uri):
if hasattr(self, '_request_err'):
if type(self._request_err) == tuple:
raise self._request_err[0], self._request_err[1]
else:
raise self._request_err()
if hasattr(self, '_request_response'):
return MockHttpResponse(self._request_response)
class MockHttpResponse(StringIO, object):
def __init__(self, response_dict):
super(MockHttpResponse, self).__init__(response_dict.get('data', ''))
self.status = response_dict.get('status', 200)
self.reason = response_dict.get('reason', '')
class MockListFeed(object):
@property
def entry(self):
if hasattr(self, '_entry'):
return self._entry
return []
class MockListEntry(object):
pass
class MockLink(object):
@property
def href(self):
return ''
class MockContent(object):
@property
def src(self):
return 'src'
class MockDocumentListEntry(object):
@property
def content(self):
return MockContent()
def GetEditMediaLink(self):
return MockLink()
class MockKeyczarReader(object):
def __init__(self, location):
self.location = location
class MockKeyczarEncryptedReader(object):
def __init__(self, reader, crypter):
self._reader = reader
self._crypter = crypter
class MockKeyczarReaders(object):
@staticmethod
def CreateReader(location):
return MockKeyczarReader(location)
@staticmethod
def EncryptedReader(reader, crypter):
return MockKeyczarEncryptedReader(reader, crypter)
class MockKeyczarCrypter(object):
def __init__(self, reader):
self.reader = reader
@staticmethod
def Read(location):
return MockKeyczarCrypter(MockKeyczarReader(location))
class MockKeyczar(object):
@property
def readers(self):
return MockKeyczarReaders
@property
def Crypter(self):
return MockKeyczarCrypter
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
''' Runs various chrome tests through valgrind_test.py.'''
import glob
import logging
import optparse
import os
import stat
import sys
import logging_utils
import path_utils
import common
import valgrind_test
class TestNotFound(Exception): pass
class MultipleGTestFiltersSpecified(Exception): pass
class BuildDirNotFound(Exception): pass
class BuildDirAmbiguous(Exception): pass
class ChromeTests:
SLOW_TOOLS = ["memcheck", "tsan", "tsan_rv", "drmemory"]
LAYOUT_TESTS_DEFAULT_CHUNK_SIZE = 1500
def __init__(self, options, args, test):
if ':' in test:
(self._test, self._gtest_filter) = test.split(':', 1)
else:
self._test = test
self._gtest_filter = options.gtest_filter
if self._test not in self._test_list:
raise TestNotFound("Unknown test: %s" % test)
if options.gtest_filter and options.gtest_filter != self._gtest_filter:
raise MultipleGTestFiltersSpecified("Can not specify both --gtest_filter "
"and --test %s" % test)
self._options = options
self._args = args
script_dir = path_utils.ScriptDir()
# Compute the top of the tree (the "source dir") from the script dir (where
# this script lives). We assume that the script dir is in tools/valgrind/
# relative to the top of the tree.
self._source_dir = os.path.dirname(os.path.dirname(script_dir))
# since this path is used for string matching, make sure it's always
# an absolute Unix-style path
self._source_dir = os.path.abspath(self._source_dir).replace('\\', '/')
valgrind_test_script = os.path.join(script_dir, "valgrind_test.py")
self._command_preamble = ["--source_dir=%s" % (self._source_dir)]
if not self._options.build_dir:
dirs = [
os.path.join(self._source_dir, "xcodebuild", "Debug"),
os.path.join(self._source_dir, "out", "Debug"),
os.path.join(self._source_dir, "build", "Debug"),
]
build_dir = [d for d in dirs if os.path.isdir(d)]
if len(build_dir) > 1:
raise BuildDirAmbiguous("Found more than one suitable build dir:\n"
"%s\nPlease specify just one "
"using --build_dir" % ", ".join(build_dir))
elif build_dir:
self._options.build_dir = build_dir[0]
else:
self._options.build_dir = None
if self._options.build_dir:
build_dir = os.path.abspath(self._options.build_dir)
self._command_preamble += ["--build_dir=%s" % (self._options.build_dir)]
def _EnsureBuildDirFound(self):
if not self._options.build_dir:
raise BuildDirNotFound("Oops, couldn't find a build dir, please "
"specify it manually using --build_dir")
def _DefaultCommand(self, tool, exe=None, valgrind_test_args=None):
'''Generates the default command array that most tests will use.'''
if exe and common.IsWindows():
exe += '.exe'
cmd = list(self._command_preamble)
# Find all suppressions matching the following pattern:
# tools/valgrind/TOOL/suppressions[_PLATFORM].txt
# and list them with --suppressions= prefix.
script_dir = path_utils.ScriptDir()
tool_name = tool.ToolName();
suppression_file = os.path.join(script_dir, tool_name, "suppressions.txt")
if os.path.exists(suppression_file):
cmd.append("--suppressions=%s" % suppression_file)
# Platform-specific suppression
for platform in common.PlatformNames():
platform_suppression_file = \
os.path.join(script_dir, tool_name, 'suppressions_%s.txt' % platform)
if os.path.exists(platform_suppression_file):
cmd.append("--suppressions=%s" % platform_suppression_file)
if self._options.valgrind_tool_flags:
cmd += self._options.valgrind_tool_flags.split(" ")
if self._options.keep_logs:
cmd += ["--keep_logs"]
if valgrind_test_args != None:
for arg in valgrind_test_args:
cmd.append(arg)
if exe:
self._EnsureBuildDirFound()
cmd.append(os.path.join(self._options.build_dir, exe))
# Valgrind runs tests slowly, so slow tests hurt more; show elapased time
# so we can find the slowpokes.
cmd.append("--gtest_print_time")
if self._options.gtest_repeat:
cmd.append("--gtest_repeat=%s" % self._options.gtest_repeat)
return cmd
def Run(self):
''' Runs the test specified by command-line argument --test '''
logging.info("running test %s" % (self._test))
return self._test_list[self._test](self)
def _AppendGtestFilter(self, tool, name, cmd):
'''Append an appropriate --gtest_filter flag to the googletest binary
invocation.
If the user passed his own filter mentioning only one test, just use it.
Othewise, filter out tests listed in the appropriate gtest_exclude files.
'''
if (self._gtest_filter and
":" not in self._gtest_filter and
"?" not in self._gtest_filter and
"*" not in self._gtest_filter):
cmd.append("--gtest_filter=%s" % self._gtest_filter)
return
filters = []
gtest_files_dir = os.path.join(path_utils.ScriptDir(), "gtest_exclude")
gtest_filter_files = [
os.path.join(gtest_files_dir, name + ".gtest-%s.txt" % tool.ToolName())]
# Use ".gtest.txt" files only for slow tools, as they now contain
# Valgrind- and Dr.Memory-specific filters.
# TODO(glider): rename the files to ".gtest_slow.txt"
if tool.ToolName() in ChromeTests.SLOW_TOOLS:
gtest_filter_files += [os.path.join(gtest_files_dir, name + ".gtest.txt")]
for platform_suffix in common.PlatformNames():
gtest_filter_files += [
os.path.join(gtest_files_dir, name + ".gtest_%s.txt" % platform_suffix),
os.path.join(gtest_files_dir, name + ".gtest-%s_%s.txt" % \
(tool.ToolName(), platform_suffix))]
logging.info("Reading gtest exclude filter files:")
for filename in gtest_filter_files:
# strip the leading absolute path (may be very long on the bot)
# and the following / or \.
readable_filename = filename.replace("\\", "/") # '\' on Windows
readable_filename = readable_filename.replace(self._source_dir, "")[1:]
if not os.path.exists(filename):
logging.info(" \"%s\" - not found" % readable_filename)
continue
logging.info(" \"%s\" - OK" % readable_filename)
f = open(filename, 'r')
for line in f.readlines():
if line.startswith("#") or line.startswith("//") or line.isspace():
continue
line = line.rstrip()
test_prefixes = ["FLAKY", "FAILS"]
for p in test_prefixes:
# Strip prefixes from the test names.
line = line.replace(".%s_" % p, ".")
# Exclude the original test name.
filters.append(line)
if line[-2:] != ".*":
# List all possible prefixes if line doesn't end with ".*".
for p in test_prefixes:
filters.append(line.replace(".", ".%s_" % p))
# Get rid of duplicates.
filters = set(filters)
gtest_filter = self._gtest_filter
if len(filters):
if gtest_filter:
gtest_filter += ":"
if gtest_filter.find("-") < 0:
gtest_filter += "-"
else:
gtest_filter = "-"
gtest_filter += ":".join(filters)
if gtest_filter:
cmd.append("--gtest_filter=%s" % gtest_filter)
def SetupLdPath(self, requires_build_dir):
if requires_build_dir:
self._EnsureBuildDirFound()
elif not self._options.build_dir:
return
# Append build_dir to LD_LIBRARY_PATH so external libraries can be loaded.
if (os.getenv("LD_LIBRARY_PATH")):
os.putenv("LD_LIBRARY_PATH", "%s:%s" % (os.getenv("LD_LIBRARY_PATH"),
self._options.build_dir))
else:
os.putenv("LD_LIBRARY_PATH", self._options.build_dir)
def SimpleTest(self, module, name, valgrind_test_args=None, cmd_args=None):
tool = valgrind_test.CreateTool(self._options.valgrind_tool)
cmd = self._DefaultCommand(tool, name, valgrind_test_args)
self._AppendGtestFilter(tool, name, cmd)
cmd.extend(['--test-tiny-timeout=1000'])
if cmd_args:
cmd.extend(cmd_args)
self.SetupLdPath(True)
return tool.Run(cmd, module)
def RunCmdLine(self):
tool = valgrind_test.CreateTool(self._options.valgrind_tool)
cmd = self._DefaultCommand(tool, None, self._args)
self.SetupLdPath(False)
return tool.Run(cmd, None)
def TestAsh(self):
return self.SimpleTest("ash", "aura_shell_unittests")
def TestAura(self):
return self.SimpleTest("aura", "aura_unittests")
def TestBase(self):
return self.SimpleTest("base", "base_unittests")
def TestContent(self):
return self.SimpleTest("content", "content_unittests")
def TestCourgette(self):
return self.SimpleTest("courgette", "courgette_unittests")
def TestCrypto(self):
return self.SimpleTest("crypto", "crypto_unittests")
def TestFFmpeg(self):
return self.SimpleTest("chrome", "ffmpeg_unittests")
def TestFFmpegRegressions(self):
return self.SimpleTest("chrome", "ffmpeg_regression_tests")
def TestGfx(self):
return self.SimpleTest("chrome", "gfx_unittests")
def TestGPU(self):
return self.SimpleTest("gpu", "gpu_unittests")
def TestGURL(self):
return self.SimpleTest("chrome", "googleurl_unittests")
def TestIpc(self):
return self.SimpleTest("ipc", "ipc_tests",
valgrind_test_args=["--trace_children"])
def TestJingle(self):
return self.SimpleTest("chrome", "jingle_unittests")
def TestMedia(self):
return self.SimpleTest("chrome", "media_unittests")
def TestNet(self):
return self.SimpleTest("net", "net_unittests")
def TestPPAPI(self):
return self.SimpleTest("chrome", "ppapi_unittests")
def TestPrinting(self):
return self.SimpleTest("chrome", "printing_unittests")
def TestRemoting(self):
return self.SimpleTest("chrome", "remoting_unittests",
cmd_args=[
"--ui-test-action-timeout=60000",
"--ui-test-action-max-timeout=150000"])
def TestSql(self):
return self.SimpleTest("chrome", "sql_unittests")
def TestSync(self):
return self.SimpleTest("chrome", "sync_unit_tests")
def TestTestShell(self):
return self.SimpleTest("webkit", "test_shell_tests")
def TestUnit(self):
# http://crbug.com/51716
# Disabling all unit tests
# Problems reappeared after r119922
if common.IsMac() and (self._options.valgrind_tool == "memcheck"):
logging.warning("unit_tests are disabled for memcheck on MacOS.")
return 0;
return self.SimpleTest("chrome", "unit_tests")
def TestUIUnit(self):
return self.SimpleTest("chrome", "ui_unittests")
def TestViews(self):
return self.SimpleTest("views", "views_unittests")
# Valgrind timeouts are in seconds.
UI_VALGRIND_ARGS = ["--timeout=14400", "--trace_children", "--indirect"]
# UI test timeouts are in milliseconds.
UI_TEST_ARGS = ["--ui-test-action-timeout=60000",
"--ui-test-action-max-timeout=150000"]
def TestAutomatedUI(self):
return self.SimpleTest("chrome", "automated_ui_tests",
valgrind_test_args=self.UI_VALGRIND_ARGS,
cmd_args=self.UI_TEST_ARGS)
def TestBrowser(self):
return self.SimpleTest("chrome", "browser_tests",
valgrind_test_args=self.UI_VALGRIND_ARGS,
cmd_args=self.UI_TEST_ARGS)
def TestInteractiveUI(self):
return self.SimpleTest("chrome", "interactive_ui_tests",
valgrind_test_args=self.UI_VALGRIND_ARGS,
cmd_args=self.UI_TEST_ARGS)
def TestReliability(self):
script_dir = path_utils.ScriptDir()
url_list_file = os.path.join(script_dir, "reliability", "url_list.txt")
return self.SimpleTest("chrome", "reliability_tests",
valgrind_test_args=self.UI_VALGRIND_ARGS,
cmd_args=(self.UI_TEST_ARGS +
["--list=%s" % url_list_file]))
def TestSafeBrowsing(self):
return self.SimpleTest("chrome", "safe_browsing_tests",
valgrind_test_args=self.UI_VALGRIND_ARGS,
cmd_args=(["--ui-test-action-max-timeout=450000"]))
def TestSyncIntegration(self):
return self.SimpleTest("chrome", "sync_integration_tests",
valgrind_test_args=self.UI_VALGRIND_ARGS,
cmd_args=(["--ui-test-action-max-timeout=450000"]))
def TestUI(self):
return self.SimpleTest("chrome", "ui_tests",
valgrind_test_args=self.UI_VALGRIND_ARGS,
cmd_args=self.UI_TEST_ARGS)
def TestLayoutChunk(self, chunk_num, chunk_size):
# Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the
# list of tests. Wrap around to beginning of list at end.
# If chunk_size is zero, run all tests in the list once.
# If a text file is given as argument, it is used as the list of tests.
#
# Build the ginormous commandline in 'cmd'.
# It's going to be roughly
# python valgrind_test.py ... python run_webkit_tests.py ...
# but we'll use the --indirect flag to valgrind_test.py
# to avoid valgrinding python.
# Start by building the valgrind_test.py commandline.
tool = valgrind_test.CreateTool(self._options.valgrind_tool)
cmd = self._DefaultCommand(tool)
cmd.append("--trace_children")
cmd.append("--indirect_webkit_layout")
cmd.append("--ignore_exit_code")
# Now build script_cmd, the run_webkits_tests.py commandline
# Store each chunk in its own directory so that we can find the data later
chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num)
test_shell = os.path.join(self._options.build_dir, "test_shell")
out_dir = os.path.join(path_utils.ScriptDir(), "latest")
out_dir = os.path.join(out_dir, chunk_dir)
if os.path.exists(out_dir):
old_files = glob.glob(os.path.join(out_dir, "*.txt"))
for f in old_files:
os.remove(f)
else:
os.makedirs(out_dir)
script = os.path.join(self._source_dir, "webkit", "tools", "layout_tests",
"run_webkit_tests.py")
script_cmd = ["python", script, "-v",
"--run-singly", # run a separate DumpRenderTree for each test
"--fully-parallel",
"--time-out-ms=200000",
"--noshow-results",
"--nocheck-sys-deps"]
# Pass build mode to run_webkit_tests.py. We aren't passed it directly,
# so parse it out of build_dir. run_webkit_tests.py can only handle
# the two values "Release" and "Debug".
# TODO(Hercules): unify how all our scripts pass around build mode
# (--mode / --target / --build_dir / --debug)
if self._options.build_dir.endswith("Debug"):
script_cmd.append("--debug");
if (chunk_size > 0):
script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size))
if len(self._args):
# if the arg is a txt file, then treat it as a list of tests
if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt":
script_cmd.append("--test-list=%s" % self._args[0])
else:
script_cmd.extend(self._args)
self._AppendGtestFilter(tool, "layout", script_cmd)
# Now run script_cmd with the wrapper in cmd
cmd.extend(["--"])
cmd.extend(script_cmd)
# Layout tests often times fail quickly, but the buildbot remains green.
# Detect this situation when running with the default chunk size.
if chunk_size == self.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE:
min_runtime_in_seconds=120
else:
min_runtime_in_seconds=0
ret = tool.Run(cmd, "layout", min_runtime_in_seconds=min_runtime_in_seconds)
return ret
def TestLayout(self):
# A "chunk file" is maintained in the local directory so that each test
# runs a slice of the layout tests of size chunk_size that increments with
# each run. Since tests can be added and removed from the layout tests at
# any time, this is not going to give exact coverage, but it will allow us
# to continuously run small slices of the layout tests under valgrind rather
# than having to run all of them in one shot.
chunk_size = self._options.num_tests
if (chunk_size == 0):
return self.TestLayoutChunk(0, 0)
chunk_num = 0
chunk_file = os.path.join("valgrind_layout_chunk.txt")
logging.info("Reading state from " + chunk_file)
try:
f = open(chunk_file)
if f:
str = f.read()
if len(str):
chunk_num = int(str)
# This should be enough so that we have a couple of complete runs
# of test data stored in the archive (although note that when we loop
# that we almost guaranteed won't be at the end of the test list)
if chunk_num > 10000:
chunk_num = 0
f.close()
except IOError, (errno, strerror):
logging.error("error reading from file %s (%d, %s)" % (chunk_file,
errno, strerror))
ret = self.TestLayoutChunk(chunk_num, chunk_size)
# Wait until after the test runs to completion to write out the new chunk
# number. This way, if the bot is killed, we'll start running again from
# the current chunk rather than skipping it.
logging.info("Saving state to " + chunk_file)
try:
f = open(chunk_file, "w")
chunk_num += 1
f.write("%d" % chunk_num)
f.close()
except IOError, (errno, strerror):
logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno,
strerror))
# Since we're running small chunks of the layout tests, it's important to
# mark the ones that have errors in them. These won't be visible in the
# summary list for long, but will be useful for someone reviewing this bot.
return ret
# The known list of tests.
# Recognise the original abbreviations as well as full executable names.
_test_list = {
"cmdline" : RunCmdLine,
"ash": TestAsh, "aura_shell_unittests": TestAsh,
"aura": TestAura, "aura_unittests": TestAura,
"automated_ui" : TestAutomatedUI,
"base": TestBase, "base_unittests": TestBase,
"browser": TestBrowser, "browser_tests": TestBrowser,
"crypto": TestCrypto, "crypto_unittests": TestCrypto,
"ffmpeg": TestFFmpeg, "ffmpeg_unittests": TestFFmpeg,
"ffmpeg_regression_tests": TestFFmpegRegressions,
"googleurl": TestGURL, "googleurl_unittests": TestGURL,
"content": TestContent, "content_unittests": TestContent,
"courgette": TestCourgette, "courgette_unittests": TestCourgette,
"ipc": TestIpc, "ipc_tests": TestIpc,
"interactive_ui": TestInteractiveUI,
"layout": TestLayout, "layout_tests": TestLayout,
"webkit": TestLayout,
"media": TestMedia, "media_unittests": TestMedia,
"net": TestNet, "net_unittests": TestNet,
"jingle": TestJingle, "jingle_unittests": TestJingle,
"ppapi": TestPPAPI, "ppapi_unittests": TestPPAPI,
"printing": TestPrinting, "printing_unittests": TestPrinting,
"reliability": TestReliability, "reliability_tests": TestReliability,
"remoting": TestRemoting, "remoting_unittests": TestRemoting,
"safe_browsing": TestSafeBrowsing, "safe_browsing_tests": TestSafeBrowsing,
"sync": TestSync, "sync_unit_tests": TestSync,
"sync_integration_tests": TestSyncIntegration,
"sync_integration": TestSyncIntegration,
"test_shell": TestTestShell, "test_shell_tests": TestTestShell,
"ui": TestUI, "ui_tests": TestUI,
"unit": TestUnit, "unit_tests": TestUnit,
"sql": TestSql, "sql_unittests": TestSql,
"ui_unit": TestUIUnit, "ui_unittests": TestUIUnit,
"gfx": TestGfx, "gfx_unittests": TestGfx,
"gpu": TestGPU, "gpu_unittests": TestGPU,
"views": TestViews, "views_unittests": TestViews,
}
def _main():
parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> "
"[-t <test> ...]")
parser.disable_interspersed_args()
parser.add_option("-b", "--build_dir",
help="the location of the compiler output")
parser.add_option("-t", "--test", action="append", default=[],
help="which test to run, supports test:gtest_filter format "
"as well.")
parser.add_option("", "--baseline", action="store_true", default=False,
help="generate baseline data instead of validating")
parser.add_option("", "--gtest_filter",
help="additional arguments to --gtest_filter")
parser.add_option("", "--gtest_repeat",
help="argument for --gtest_repeat")
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="verbose output - enable debug log messages")
parser.add_option("", "--tool", dest="valgrind_tool", default="memcheck",
help="specify a valgrind tool to run the tests under")
parser.add_option("", "--tool_flags", dest="valgrind_tool_flags", default="",
help="specify custom flags for the selected valgrind tool")
parser.add_option("", "--keep_logs", action="store_true", default=False,
help="store memory tool logs in the <tool>.logs directory "
"instead of /tmp.\nThis can be useful for tool "
"developers/maintainers.\nPlease note that the <tool>"
".logs directory will be clobbered on tool startup.")
parser.add_option("-n", "--num_tests", type="int",
default=ChromeTests.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE,
help="for layout tests: # of subtests per run. 0 for all.")
options, args = parser.parse_args()
if options.verbose:
logging_utils.config_root(logging.DEBUG)
else:
logging_utils.config_root()
if not options.test:
parser.error("--test not specified")
if len(options.test) != 1 and options.gtest_filter:
parser.error("--gtest_filter and multiple tests don't make sense together")
for t in options.test:
tests = ChromeTests(options, args, t)
ret = tests.Run()
if ret: return ret
return 0
if __name__ == "__main__":
sys.exit(_main())
|
|
###############################################################################
# copyright 2012, Marouen Mechtri (Marouen.Mechtri@it-sudparis.eu) #
# Institut Mines-Telecom - TELECOM & Management SudParis #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
###############################################################################
#!/usr/bin/env python
# -*- coding: latin-1 -*-
# Implementation of category actions
import sys
import pycompdev
import pypacksrc
srcdirectory=pypacksrc.srcpydir+"/pyaccords/pysrc/"
srcdirectoryc=pypacksrc.srcpydir+"/cocarrier/src/"
sys.path.append(srcdirectory)
sys.path.append(srcdirectoryc)
from gwClass import *
from actionClass import *
from client import OCCIclient
""" Note:respAction is a python class to describe the occi response with the status and the message
gw is a python class to interface the accords category :gw.
-Attributes of this category are members of this class.
-List of attributes:
- name
- publicaddr
- privateaddr
- ethername
- intercloudGW
- contract
- provider_type
- provider_platform
- connection
- account
- state
"""
def gw_start(gw):
response=respAction("200","ok")
"""Implement here your function"""
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
print(' Receiving action START gw ')
#-------------------------------------------------contract---------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
""" Determine server which handle contract category """
attributePUB= {
'what': 'contract'
}
client = OCCIclient('127.0.0.1', '8086', 'CO-PUB', 'publication', attributePUB)
publication = client.GetElement_pathuuid(client.Get()[0])
client.host = publication['host']
client.port = publication['port']
publication = client.GetElement_pathuuid(client.GetElement(publication['uuid'])['occi.publication.why'])
attribute = {}
client = OCCIclient(publication['host'], publication['port'], 'CO-PARSER', 'contract', attribute)
#----------------------------------------------start Node----------------------------------------------------------#
gwcontract = client.GetElement_pathuuid(gw.contract)
print('Sending START to the contract of the gateway source')
client.action(gwcontract['uuid'], 'start')
gwcontract = client.GetElement(gwcontract['uuid'])
#-------------------------------------------------provider gw------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
""" Determine server which handle procci provider category """
attributePUB= {
'what': gwcontract['occi.contract.profile']
}
client = OCCIclient('127.0.0.1', '8086', 'CO-PUB', 'publication', attributePUB)
publication = client.GetElement_pathuuid(client.Get()[0])
client.host = publication['host']
client.port = publication['port']
publication = client.GetElement_pathuuid(client.GetElement(publication['uuid'])['occi.publication.why'])
attribute = {}
client = OCCIclient(publication['host'], publication['port'], 'CO-PARSER', gwcontract['occi.contract.profile'], attribute)
addressgw = client.GetElement(client.GetElement_pathuuid(gwcontract['occi.contract.provider'])['uuid'])
gwprivateaddr = addressgw['occi.'+ gwcontract['occi.contract.profile'] +'.privateaddr']
gwpublicaddr = addressgw['occi.'+ gwcontract['occi.contract.profile'] +'.publicaddr']
print('The private address of the gateway is: '+gwprivateaddr)
print('The public address of the gateway is: '+gwpublicaddr)
#---------------------------------------------------gw-------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
""" Determine server which handle gw category """
attributePUB= {
'what': 'gw'
}
client = OCCIclient('127.0.0.1', '8086', 'CO-PUB', 'publication', attributePUB)
publication = client.GetElement_pathuuid(client.Get()[0])
client.host = publication['host']
client.port = publication['port']
publication = client.GetElement_pathuuid(client.GetElement(publication['uuid'])['occi.publication.why'])
#------------------------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
tempgw={
'name':gw.name,
'publicaddr':gw.publicaddr,
'privateaddr':gw.privateaddr,
'ethername':gw.ethername,
'intercloudGW': gw.intercloudGW,
'contract':gw.contract,
'provider_type':gw.provider_type,
'provider_platform':gw.provider_platform,
'connection':gw.connection,
'account':gw.account,
'state':gw.state,
}
attribute = {
'publicaddr': gwpublicaddr,
'privateaddr': gwprivateaddr,
'state': '1',
}
gwOCCI = OCCIclient(publication['host'], publication['port'], 'CO-PARSER', 'gw', tempgw)
uuidgw=gwOCCI.Get()
gwOCCI.attributes=attribute
gwOCCI.Put(gwOCCI.GetElement_pathuuid(uuidgw[0])['uuid'])
print('updating the gateway category with the public and private address')
return response
def gw_stop(gw):
response=respAction("200","ok")
"""Implement here your function"""
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
print(' Receiving action STOP gw ')
#---------------------------------------------------gw-------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
""" Determine server which handle gw category """
attributePUB= {
'what': 'gw'
}
client = OCCIclient('127.0.0.1', '8086', 'CO-PUB', 'publication', attributePUB)
publication = client.GetElement_pathuuid(client.Get()[0])
client.host = publication['host']
client.port = publication['port']
publication = client.GetElement_pathuuid(client.GetElement(publication['uuid'])['occi.publication.why'])
#------------------------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
tempgw={
'name':gw.name,
'publicaddr':gw.publicaddr,
'privateaddr':gw.privateaddr,
'ethername':gw.ethername,
'intercloudGW': gw.intercloudGW,
'contract':gw.contract,
'provider_type':gw.provider_type,
'provider_platform':gw.provider_platform,
'connection':gw.connection,
'account':gw.account,
'state':gw.state,
}
gwOCCI = OCCIclient(publication['host'], publication['port'], 'CO-PARSER', 'gw', tempgw)
uuidgw=gwOCCI.Get()
uuidgw=gwOCCI.GetElement_pathuuid(uuidgw[0])['uuid']
nbconnection = gwOCCI.GetElement(uuidgw)['occi.gw.connection']
if int(nbconnection) > 0:
attributes = {
'connection': str(int(nbconnection)-1)
}
gwOCCI.attributes = attributes
gwOCCI.Put(uuidgw)
print('decrement the number of connection to :'+attributes['connection'])
if int(nbconnection) == 1:
#-------------------------------------------------contract---------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
""" Determine server which handle contract category """
attributePUB= {
'what': 'contract'
}
client = OCCIclient('127.0.0.1', '8086', 'CO-PUB', 'publication', attributePUB)
publication = client.GetElement_pathuuid(client.Get()[0])
client.host = publication['host']
client.port = publication['port']
publication = client.GetElement_pathuuid(client.GetElement(publication['uuid'])['occi.publication.why'])
attribute = {}
client = OCCIclient(publication['host'], publication['port'], 'CO-PARSER', 'contract', attribute)
#----------------------------------------------stop Node----------------------------------------------------------#
gwcontract = client.GetElement_pathuuid(gw.contract)
print('the number of connection = 0 ==> stopping the gateway ')
client.action(gwcontract['uuid'], 'stop')
gwcontract = client.GetElement(gwcontract['uuid'])
attribute = {
'publicaddr': '',
'privateaddr': '',
'state': '0',
}
gwOCCI.attributes=attribute
gwOCCI.Put(uuidgw)
print('Changing the state of the gateway to 0')
return response
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2014, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
CONTEXT structure for amd64.
"""
__revision__ = "$Id: context_amd64.py 1299 2013-12-20 09:30:55Z qvasimodo $"
from defines import *
from version import ARCH_AMD64
import context_i386
#==============================================================================
# This is used later on to calculate the list of exported symbols.
_all = None
_all = set(vars().keys())
#==============================================================================
#--- CONTEXT structures and constants -----------------------------------------
# The following values specify the type of access in the first parameter
# of the exception record when the exception code specifies an access
# violation.
EXCEPTION_READ_FAULT = 0 # exception caused by a read
EXCEPTION_WRITE_FAULT = 1 # exception caused by a write
EXCEPTION_EXECUTE_FAULT = 8 # exception caused by an instruction fetch
CONTEXT_AMD64 = 0x00100000
CONTEXT_CONTROL = (CONTEXT_AMD64 | 0x1L)
CONTEXT_INTEGER = (CONTEXT_AMD64 | 0x2L)
CONTEXT_SEGMENTS = (CONTEXT_AMD64 | 0x4L)
CONTEXT_FLOATING_POINT = (CONTEXT_AMD64 | 0x8L)
CONTEXT_DEBUG_REGISTERS = (CONTEXT_AMD64 | 0x10L)
CONTEXT_MMX_REGISTERS = CONTEXT_FLOATING_POINT
CONTEXT_FULL = (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT)
CONTEXT_ALL = (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_SEGMENTS | \
CONTEXT_FLOATING_POINT | CONTEXT_DEBUG_REGISTERS)
CONTEXT_EXCEPTION_ACTIVE = 0x8000000
CONTEXT_SERVICE_ACTIVE = 0x10000000
CONTEXT_EXCEPTION_REQUEST = 0x40000000
CONTEXT_EXCEPTION_REPORTING = 0x80000000
INITIAL_MXCSR = 0x1f80 # initial MXCSR value
INITIAL_FPCSR = 0x027f # initial FPCSR value
# typedef struct _XMM_SAVE_AREA32 {
# WORD ControlWord;
# WORD StatusWord;
# BYTE TagWord;
# BYTE Reserved1;
# WORD ErrorOpcode;
# DWORD ErrorOffset;
# WORD ErrorSelector;
# WORD Reserved2;
# DWORD DataOffset;
# WORD DataSelector;
# WORD Reserved3;
# DWORD MxCsr;
# DWORD MxCsr_Mask;
# M128A FloatRegisters[8];
# M128A XmmRegisters[16];
# BYTE Reserved4[96];
# } XMM_SAVE_AREA32, *PXMM_SAVE_AREA32;
class XMM_SAVE_AREA32(Structure):
_pack_ = 1
_fields_ = [
('ControlWord', WORD),
('StatusWord', WORD),
('TagWord', BYTE),
('Reserved1', BYTE),
('ErrorOpcode', WORD),
('ErrorOffset', DWORD),
('ErrorSelector', WORD),
('Reserved2', WORD),
('DataOffset', DWORD),
('DataSelector', WORD),
('Reserved3', WORD),
('MxCsr', DWORD),
('MxCsr_Mask', DWORD),
('FloatRegisters', M128A * 8),
('XmmRegisters', M128A * 16),
('Reserved4', BYTE * 96),
]
def from_dict(self):
raise NotImplementedError()
def to_dict(self):
d = dict()
for name, type in self._fields_:
if name in ('FloatRegisters', 'XmmRegisters'):
d[name] = tuple([ (x.LowPart + (x.HighPart << 64)) for x in getattr(self, name) ])
elif name == 'Reserved4':
d[name] = tuple([ chr(x) for x in getattr(self, name) ])
else:
d[name] = getattr(self, name)
return d
LEGACY_SAVE_AREA_LENGTH = sizeof(XMM_SAVE_AREA32)
PXMM_SAVE_AREA32 = ctypes.POINTER(XMM_SAVE_AREA32)
LPXMM_SAVE_AREA32 = PXMM_SAVE_AREA32
# //
# // Context Frame
# //
# // This frame has a several purposes: 1) it is used as an argument to
# // NtContinue, 2) is is used to constuct a call frame for APC delivery,
# // and 3) it is used in the user level thread creation routines.
# //
# //
# // The flags field within this record controls the contents of a CONTEXT
# // record.
# //
# // If the context record is used as an input parameter, then for each
# // portion of the context record controlled by a flag whose value is
# // set, it is assumed that that portion of the context record contains
# // valid context. If the context record is being used to modify a threads
# // context, then only that portion of the threads context is modified.
# //
# // If the context record is used as an output parameter to capture the
# // context of a thread, then only those portions of the thread's context
# // corresponding to set flags will be returned.
# //
# // CONTEXT_CONTROL specifies SegSs, Rsp, SegCs, Rip, and EFlags.
# //
# // CONTEXT_INTEGER specifies Rax, Rcx, Rdx, Rbx, Rbp, Rsi, Rdi, and R8-R15.
# //
# // CONTEXT_SEGMENTS specifies SegDs, SegEs, SegFs, and SegGs.
# //
# // CONTEXT_DEBUG_REGISTERS specifies Dr0-Dr3 and Dr6-Dr7.
# //
# // CONTEXT_MMX_REGISTERS specifies the floating point and extended registers
# // Mm0/St0-Mm7/St7 and Xmm0-Xmm15).
# //
#
# typedef struct DECLSPEC_ALIGN(16) _CONTEXT {
#
# //
# // Register parameter home addresses.
# //
# // N.B. These fields are for convience - they could be used to extend the
# // context record in the future.
# //
#
# DWORD64 P1Home;
# DWORD64 P2Home;
# DWORD64 P3Home;
# DWORD64 P4Home;
# DWORD64 P5Home;
# DWORD64 P6Home;
#
# //
# // Control flags.
# //
#
# DWORD ContextFlags;
# DWORD MxCsr;
#
# //
# // Segment Registers and processor flags.
# //
#
# WORD SegCs;
# WORD SegDs;
# WORD SegEs;
# WORD SegFs;
# WORD SegGs;
# WORD SegSs;
# DWORD EFlags;
#
# //
# // Debug registers
# //
#
# DWORD64 Dr0;
# DWORD64 Dr1;
# DWORD64 Dr2;
# DWORD64 Dr3;
# DWORD64 Dr6;
# DWORD64 Dr7;
#
# //
# // Integer registers.
# //
#
# DWORD64 Rax;
# DWORD64 Rcx;
# DWORD64 Rdx;
# DWORD64 Rbx;
# DWORD64 Rsp;
# DWORD64 Rbp;
# DWORD64 Rsi;
# DWORD64 Rdi;
# DWORD64 R8;
# DWORD64 R9;
# DWORD64 R10;
# DWORD64 R11;
# DWORD64 R12;
# DWORD64 R13;
# DWORD64 R14;
# DWORD64 R15;
#
# //
# // Program counter.
# //
#
# DWORD64 Rip;
#
# //
# // Floating point state.
# //
#
# union {
# XMM_SAVE_AREA32 FltSave;
# struct {
# M128A Header[2];
# M128A Legacy[8];
# M128A Xmm0;
# M128A Xmm1;
# M128A Xmm2;
# M128A Xmm3;
# M128A Xmm4;
# M128A Xmm5;
# M128A Xmm6;
# M128A Xmm7;
# M128A Xmm8;
# M128A Xmm9;
# M128A Xmm10;
# M128A Xmm11;
# M128A Xmm12;
# M128A Xmm13;
# M128A Xmm14;
# M128A Xmm15;
# };
# };
#
# //
# // Vector registers.
# //
#
# M128A VectorRegister[26];
# DWORD64 VectorControl;
#
# //
# // Special debug control registers.
# //
#
# DWORD64 DebugControl;
# DWORD64 LastBranchToRip;
# DWORD64 LastBranchFromRip;
# DWORD64 LastExceptionToRip;
# DWORD64 LastExceptionFromRip;
# } CONTEXT, *PCONTEXT;
class _CONTEXT_FLTSAVE_STRUCT(Structure):
_fields_ = [
('Header', M128A * 2),
('Legacy', M128A * 8),
('Xmm0', M128A),
('Xmm1', M128A),
('Xmm2', M128A),
('Xmm3', M128A),
('Xmm4', M128A),
('Xmm5', M128A),
('Xmm6', M128A),
('Xmm7', M128A),
('Xmm8', M128A),
('Xmm9', M128A),
('Xmm10', M128A),
('Xmm11', M128A),
('Xmm12', M128A),
('Xmm13', M128A),
('Xmm14', M128A),
('Xmm15', M128A),
]
def from_dict(self):
raise NotImplementedError()
def to_dict(self):
d = dict()
for name, type in self._fields_:
if name in ('Header', 'Legacy'):
d[name] = tuple([ (x.Low + (x.High << 64)) for x in getattr(self, name) ])
else:
x = getattr(self, name)
d[name] = x.Low + (x.High << 64)
return d
class _CONTEXT_FLTSAVE_UNION(Union):
_fields_ = [
('flt', XMM_SAVE_AREA32),
('xmm', _CONTEXT_FLTSAVE_STRUCT),
]
def from_dict(self):
raise NotImplementedError()
def to_dict(self):
d = dict()
d['flt'] = self.flt.to_dict()
d['xmm'] = self.xmm.to_dict()
return d
class CONTEXT(Structure):
arch = ARCH_AMD64
_pack_ = 16
_fields_ = [
# Register parameter home addresses.
('P1Home', DWORD64),
('P2Home', DWORD64),
('P3Home', DWORD64),
('P4Home', DWORD64),
('P5Home', DWORD64),
('P6Home', DWORD64),
# Control flags.
('ContextFlags', DWORD),
('MxCsr', DWORD),
# Segment Registers and processor flags.
('SegCs', WORD),
('SegDs', WORD),
('SegEs', WORD),
('SegFs', WORD),
('SegGs', WORD),
('SegSs', WORD),
('EFlags', DWORD),
# Debug registers.
('Dr0', DWORD64),
('Dr1', DWORD64),
('Dr2', DWORD64),
('Dr3', DWORD64),
('Dr6', DWORD64),
('Dr7', DWORD64),
# Integer registers.
('Rax', DWORD64),
('Rcx', DWORD64),
('Rdx', DWORD64),
('Rbx', DWORD64),
('Rsp', DWORD64),
('Rbp', DWORD64),
('Rsi', DWORD64),
('Rdi', DWORD64),
('R8', DWORD64),
('R9', DWORD64),
('R10', DWORD64),
('R11', DWORD64),
('R12', DWORD64),
('R13', DWORD64),
('R14', DWORD64),
('R15', DWORD64),
# Program counter.
('Rip', DWORD64),
# Floating point state.
('FltSave', _CONTEXT_FLTSAVE_UNION),
# Vector registers.
('VectorRegister', M128A * 26),
('VectorControl', DWORD64),
# Special debug control registers.
('DebugControl', DWORD64),
('LastBranchToRip', DWORD64),
('LastBranchFromRip', DWORD64),
('LastExceptionToRip', DWORD64),
('LastExceptionFromRip', DWORD64),
]
_others = ('P1Home', 'P2Home', 'P3Home', 'P4Home', 'P5Home', 'P6Home', \
'MxCsr', 'VectorRegister', 'VectorControl')
_control = ('SegSs', 'Rsp', 'SegCs', 'Rip', 'EFlags')
_integer = ('Rax', 'Rcx', 'Rdx', 'Rbx', 'Rsp', 'Rbp', 'Rsi', 'Rdi', \
'R8', 'R9', 'R10', 'R11', 'R12', 'R13', 'R14', 'R15')
_segments = ('SegDs', 'SegEs', 'SegFs', 'SegGs')
_debug = ('Dr0', 'Dr1', 'Dr2', 'Dr3', 'Dr6', 'Dr7', \
'DebugControl', 'LastBranchToRip', 'LastBranchFromRip', \
'LastExceptionToRip', 'LastExceptionFromRip')
_mmx = ('Xmm0', 'Xmm1', 'Xmm2', 'Xmm3', 'Xmm4', 'Xmm5', 'Xmm6', 'Xmm7', \
'Xmm8', 'Xmm9', 'Xmm10', 'Xmm11', 'Xmm12', 'Xmm13', 'Xmm14', 'Xmm15')
# XXX TODO
# Convert VectorRegister and Xmm0-Xmm15 to pure Python types!
@classmethod
def from_dict(cls, ctx):
'Instance a new structure from a Python native type.'
ctx = Context(ctx)
s = cls()
ContextFlags = ctx['ContextFlags']
s.ContextFlags = ContextFlags
for key in cls._others:
if key != 'VectorRegister':
setattr(s, key, ctx[key])
else:
w = ctx[key]
v = (M128A * len(w))()
i = 0
for x in w:
y = M128A()
y.High = x >> 64
y.Low = x - (x >> 64)
v[i] = y
i += 1
setattr(s, key, v)
if (ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL:
for key in cls._control:
setattr(s, key, ctx[key])
if (ContextFlags & CONTEXT_INTEGER) == CONTEXT_INTEGER:
for key in cls._integer:
setattr(s, key, ctx[key])
if (ContextFlags & CONTEXT_SEGMENTS) == CONTEXT_SEGMENTS:
for key in cls._segments:
setattr(s, key, ctx[key])
if (ContextFlags & CONTEXT_DEBUG_REGISTERS) == CONTEXT_DEBUG_REGISTERS:
for key in cls._debug:
setattr(s, key, ctx[key])
if (ContextFlags & CONTEXT_MMX_REGISTERS) == CONTEXT_MMX_REGISTERS:
xmm = s.FltSave.xmm
for key in cls._mmx:
y = M128A()
y.High = x >> 64
y.Low = x - (x >> 64)
setattr(xmm, key, y)
return s
def to_dict(self):
'Convert a structure into a Python dictionary.'
ctx = Context()
ContextFlags = self.ContextFlags
ctx['ContextFlags'] = ContextFlags
for key in self._others:
if key != 'VectorRegister':
ctx[key] = getattr(self, key)
else:
ctx[key] = tuple([ (x.Low + (x.High << 64)) for x in getattr(self, key) ])
if (ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL:
for key in self._control:
ctx[key] = getattr(self, key)
if (ContextFlags & CONTEXT_INTEGER) == CONTEXT_INTEGER:
for key in self._integer:
ctx[key] = getattr(self, key)
if (ContextFlags & CONTEXT_SEGMENTS) == CONTEXT_SEGMENTS:
for key in self._segments:
ctx[key] = getattr(self, key)
if (ContextFlags & CONTEXT_DEBUG_REGISTERS) == CONTEXT_DEBUG_REGISTERS:
for key in self._debug:
ctx[key] = getattr(self, key)
if (ContextFlags & CONTEXT_MMX_REGISTERS) == CONTEXT_MMX_REGISTERS:
xmm = self.FltSave.xmm.to_dict()
for key in self._mmx:
ctx[key] = xmm.get(key)
return ctx
PCONTEXT = ctypes.POINTER(CONTEXT)
LPCONTEXT = PCONTEXT
class Context(dict):
"""
Register context dictionary for the amd64 architecture.
"""
arch = CONTEXT.arch
def __get_pc(self):
return self['Rip']
def __set_pc(self, value):
self['Rip'] = value
pc = property(__get_pc, __set_pc)
def __get_sp(self):
return self['Rsp']
def __set_sp(self, value):
self['Rsp'] = value
sp = property(__get_sp, __set_sp)
def __get_fp(self):
return self['Rbp']
def __set_fp(self, value):
self['Rbp'] = value
fp = property(__get_fp, __set_fp)
#--- LDT_ENTRY structure ------------------------------------------------------
# typedef struct _LDT_ENTRY {
# WORD LimitLow;
# WORD BaseLow;
# union {
# struct {
# BYTE BaseMid;
# BYTE Flags1;
# BYTE Flags2;
# BYTE BaseHi;
# } Bytes;
# struct {
# DWORD BaseMid :8;
# DWORD Type :5;
# DWORD Dpl :2;
# DWORD Pres :1;
# DWORD LimitHi :4;
# DWORD Sys :1;
# DWORD Reserved_0 :1;
# DWORD Default_Big :1;
# DWORD Granularity :1;
# DWORD BaseHi :8;
# } Bits;
# } HighWord;
# } LDT_ENTRY,
# *PLDT_ENTRY;
class _LDT_ENTRY_BYTES_(Structure):
_pack_ = 1
_fields_ = [
('BaseMid', BYTE),
('Flags1', BYTE),
('Flags2', BYTE),
('BaseHi', BYTE),
]
class _LDT_ENTRY_BITS_(Structure):
_pack_ = 1
_fields_ = [
('BaseMid', DWORD, 8),
('Type', DWORD, 5),
('Dpl', DWORD, 2),
('Pres', DWORD, 1),
('LimitHi', DWORD, 4),
('Sys', DWORD, 1),
('Reserved_0', DWORD, 1),
('Default_Big', DWORD, 1),
('Granularity', DWORD, 1),
('BaseHi', DWORD, 8),
]
class _LDT_ENTRY_HIGHWORD_(Union):
_pack_ = 1
_fields_ = [
('Bytes', _LDT_ENTRY_BYTES_),
('Bits', _LDT_ENTRY_BITS_),
]
class LDT_ENTRY(Structure):
_pack_ = 1
_fields_ = [
('LimitLow', WORD),
('BaseLow', WORD),
('HighWord', _LDT_ENTRY_HIGHWORD_),
]
PLDT_ENTRY = POINTER(LDT_ENTRY)
LPLDT_ENTRY = PLDT_ENTRY
#--- WOW64 CONTEXT structure and constants ------------------------------------
# Value of SegCs in a Wow64 thread when running in 32 bits mode
WOW64_CS32 = 0x23
WOW64_CONTEXT_i386 = 0x00010000L
WOW64_CONTEXT_i486 = 0x00010000L
WOW64_CONTEXT_CONTROL = (WOW64_CONTEXT_i386 | 0x00000001L)
WOW64_CONTEXT_INTEGER = (WOW64_CONTEXT_i386 | 0x00000002L)
WOW64_CONTEXT_SEGMENTS = (WOW64_CONTEXT_i386 | 0x00000004L)
WOW64_CONTEXT_FLOATING_POINT = (WOW64_CONTEXT_i386 | 0x00000008L)
WOW64_CONTEXT_DEBUG_REGISTERS = (WOW64_CONTEXT_i386 | 0x00000010L)
WOW64_CONTEXT_EXTENDED_REGISTERS = (WOW64_CONTEXT_i386 | 0x00000020L)
WOW64_CONTEXT_FULL = (WOW64_CONTEXT_CONTROL | WOW64_CONTEXT_INTEGER | WOW64_CONTEXT_SEGMENTS)
WOW64_CONTEXT_ALL = (WOW64_CONTEXT_CONTROL | WOW64_CONTEXT_INTEGER | WOW64_CONTEXT_SEGMENTS | WOW64_CONTEXT_FLOATING_POINT | WOW64_CONTEXT_DEBUG_REGISTERS | WOW64_CONTEXT_EXTENDED_REGISTERS)
WOW64_SIZE_OF_80387_REGISTERS = 80
WOW64_MAXIMUM_SUPPORTED_EXTENSION = 512
class WOW64_FLOATING_SAVE_AREA (context_i386.FLOATING_SAVE_AREA):
pass
class WOW64_CONTEXT (context_i386.CONTEXT):
pass
class WOW64_LDT_ENTRY (context_i386.LDT_ENTRY):
pass
PWOW64_FLOATING_SAVE_AREA = POINTER(WOW64_FLOATING_SAVE_AREA)
PWOW64_CONTEXT = POINTER(WOW64_CONTEXT)
PWOW64_LDT_ENTRY = POINTER(WOW64_LDT_ENTRY)
#==============================================================================
# This calculates the list of exported symbols.
_all = set(vars().keys()).difference(_all)
__all__ = [_x for _x in _all if not _x.startswith('_')]
__all__.sort()
#==============================================================================
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Axon.Component import component, scheduler, linkage, newComponent
from Axon.Ipc import producerFinished
import Kamaelia.KamaeliaIPC
import string, os, time
class ClientProtocolHandler(component):
Inboxes = ["inbox","control", "_displayFinished"]
Outboxes = ["outbox","signal", "_filteredControlMessages"]
def __init__(self, platform, tempDir, initialsendmessage, delay):
super(ClientProtocolHandler,self).__init__()
self.requestmessage = initialsendmessage
self.platform = platform
self.tempDir = tempDir
self.delay = delay
def initialiseComponent(self):
if self.requestmessage == ".mpg":
self.requestmessage = ".jpg"
myPacketCombiner = combinePackets()
myFile = createFile(self.tempDir, self.requestmessage)
myDisplay = show(self.requestmessage, self.platform, self.delay)
"Linkages for myPacketCombiner"
self.link(source=(self, "inbox"), sink=(myPacketCombiner, "inbox"), passthrough=1)
self.link(source=(self, "_filteredControlMessages"), sink=(myPacketCombiner, "control"), passthrough=0)
"Linkages for myFile"
self.link(source=(myPacketCombiner, "outbox"), sink=(myFile, "inbox"), passthrough=0)
self.link(source=(myPacketCombiner, "signal"), sink=(myFile, "control"), passthrough=0)
"Linkages for myDisplay"
self.link(source=(myFile, "outbox"), sink=(myDisplay, "inbox"), passthrough=0)
self.link(source=(myFile, "signal"), sink=(myDisplay, "control"), passthrough=0)
self.link(source=(myDisplay, "signal"), sink=(self, "_displayFinished"), passthrough=0)
self.addChildren(myPacketCombiner, myFile, myDisplay)
return newComponent(myPacketCombiner, myFile, myDisplay)
def mainBody(self):
# print "ClientProtocolHandler: Checking mail..."
if self.dataReady("_displayFinished"):
print "ClientProtocolHandler: Message received! (_displayFinished)"
self.message = self.recv("_displayFinished")
if isinstance(self.message, producerFinished):
self.send(producerFinished("Done"),"signal")
return 0
else:
print "ClientProtocolHandler: Message from _displayFinished is..."
print self.message
if self.dataReady("control"):
print "ClientProtocolHandler: Message received! (control)"
self.controlMessage = self.recv("control")
if isinstance(self.controlMessage, Kamaelia.KamaeliaIPC.socketShutdown) or self.controlMessage == "StoppedThread":
self.send(self.controlMessage, "_filteredControlMessages")
else:
print "ClientProtocolHandler: Message from control is..."
print self.controlMessage
return 1
def closeDownComponent(self):
print "ClientProtocolHandler: Shutting down"
class combinePackets(component):
"""
Checks messages on inbox then control. If a message is received in the component's inbox it is appended to a list/buffer.
take chunk from buffer list
add chunk to a new buffer
search new buffer for "\n" and get it's index
get what comes before the "\n" (by using it's index)
what came before the \n should be the number of characters the file should contain
"""
def __init__(self):
super(combinePackets,self).__init__()
self.list_packets = []
self.buffer = ""
self.buffers = []
def mainBody(self):
# print "combinePackets: Checking mail..."
if self.dataReady("inbox"):
# print "combinePackets: Message received! (inbox)"
self.data = self.recv("inbox")
self.buffers.append(self.data)
while len(self.buffer)>0 or len(self.buffers)>0:
if len(self.buffers) > 0:
self.chunk = self.buffers[0]
del self.buffers[0]
else:
self.chunk = ""
self.buffer = self.buffer + self.chunk
# print "combinePackets: BUFF:",repr(self.buffer)
self.end_of_length = self.buffer.find("\n")
if self.end_of_length != -1:
# print "combinePackets: EOL:", self.end_of_length
try:
self.length_of_picture = int(self.buffer[:self.end_of_length])
except Exception,e:
print e
print "buffer:", self.buffer
print "EOL:", self.end_of_length
print "LOP:", self.length_of_picture
# print "combinePackets: LEN:", self.length_of_picture
if len(self.buffer) >= self.end_of_length + self.length_of_picture + 1:
self.picture = self.buffer[self.end_of_length + 1: self.end_of_length + self.length_of_picture + 1]
self.send(self.picture,"outbox")
# print "combinePackets: CONSUMED:", repr(self.picture)
self.buffer = self.buffer[self.end_of_length + self.length_of_picture + 1:]
else:
pass
# print "combinePackets: buffer needs data"
return 1
if self.dataReady("control"):
print "combinePackets: Message received! (control)"
self.controlMessage = self.recv("control")
print "combinePackets: Message from control is..."
print self.controlMessage
if isinstance(self.controlMessage, Kamaelia.KamaeliaIPC.socketShutdown) or self.controlMessage == "StoppedThread":
self.send(producerFinished("Done"),"signal")
return 0
return 1
def closeDownComponent(self):
print "combinePackets: Shutting down"
class createFile(component):
"""
Get file contents from inbox
create a name for the file using arugments tempDir and file_type
write contents to the file
close file
send filename to outbox
check control for a message that is an instance of producerFinished, shutdown if true
"""
Inboxes = ["inbox","control"]
Outboxes = ["outbox","signal"]
def __init__(self, tempDir, file_type):
super(createFile,self).__init__()
self.tempDir = tempDir
self.file_type = file_type
self.fileNumber = 0
def mainBody(self):
# print "createFile: Checking mail..."
if self.dataReady("inbox"):
print "createFile: Message received! (inbox)"
self.filecontents = self.recv("inbox")
self.fullfilename = self.create_TempFilename(self.tempDir, file_type=self.file_type)
try:
# print "createFile: creating file object"
self.file_object = open(self.fullfilename,"wb")
print "createFile: writing contents to file"
self.file_object.write(self.filecontents)
# print "createFile: closing file object"
self.file_object.close()
except Exception, e:
print "createFile: Exception..."
print e
self.send(self.fullfilename, "outbox")
if self.dataReady("control"):
print "createFile: Message received! (control)"
self.controlMessage = self.recv("control")
if isinstance(self.controlMessage, producerFinished):
self.send(producerFinished("Done"), "signal")
return 0
return 1
def closeDownComponent(self):
print "createFile: Shutting down"
def create_TempFilename(self, tempDirectory, filename_without_ext=None, file_type=".bmp"):
"""
Creates a new, unique full filename each time it is called by incrementing the digit at the
end of the filename.
"""
if filename_without_ext == None:
filename_without_ext = "TempFile"
filename = filename_without_ext + str(self.fileNumber) + file_type
self.fileNumber = self.fileNumber + 1
fullfilename = os.path.join(tempDirectory, filename)
# print "createFile: Temp file will be...", fullfilename
return fullfilename
class show(component):
Inboxes = ["inbox","control"]
Outboxes = ["outbox","signal"]
def __init__(self, file_type, platform, delay):
super(show,self).__init__()
self.file_type = file_type
self.platform = platform ##used to check if using phone or PC
# self.tempDir = tempDir ##used to check if using phone or PC
self.delay = delay
self.tempFiles = []
self.filesToDisplay = []
self.timeOfDisplay = 0
self.controlMessage = ""
self.hadAFile = False
def mainBody(self):
# print "show: Checking mail..."
if self.dataReady("inbox"):
print "show: Message received (inbox)"
self.fullfilename = self.recv("inbox")
print "show: Message from inbox is...", self.fullfilename
self.tempFiles.append(self.fullfilename)
self.filesToDisplay.append(self.fullfilename)
self.hadAFile = True
if self.dataReady("control"):
print "show: Message received (control)"
self.controlMessage = self.recv("control")
if len(self.filesToDisplay) > 0:
if (time.time() - self.timeOfDisplay) > self.delay:
self.file = self.filesToDisplay[0]
del self.filesToDisplay[0]
if self.file_type == ".txt":
print "show: preparing to display text"
self.file_object = open(self.fullfilename, "r")
self.text = self.file_object.read()
if self.platform != "symbian_s60": #self.tempDir == "C:\\ClientContent\\":
# import pygame
# pygame.init()
self.display_pygame_text_string(self.text) #display string in Pygame Window
else:
self.display_text_string(self.text)
elif self.file_type == ".jpg":
print "show: preparing to display a picture"
if self.platform != "symbian_s60": #self.tempDir == "C:\\ClientContent\\":
# import pygame
# pygame.init()
self.display_pygame_image_file(self.fullfilename) # display image using pygame
else:
self.display_nokia6600(self.fullfilename) # display image on nokia6600
self.timeOfDisplay = time.time()
return 1
if isinstance(self.controlMessage, producerFinished) and len(self.filesToDisplay) == 0:
# if self.tempDir == "C:\\ClientContent\\" and self.hadAFile == True:
if self.platform != "symbian_s60" and self.hadAFile == True:
self.pygame_eventHandler_2()
elif self.file_type == ".jpg" and self.hadAFile == True:
self.display_nokia6600_2(self.fullfilename) # display image on nokia6600
self.send(producerFinished("Done"), "signal")
return 0
return 1
def closeDownComponent(self):
for file in self.tempFiles:
try:
print "show: Removing", file
os.remove(file)
except Exception, e:
print "show: ", e
print "show: Shutting down"
def display_text_string(self, filecontents):
"""
Takes a string in filecontents argument and displays it in the Python Shell. It then waits
for keyboard return carriage. When user does push return key a producerFinished message is
sent to signal box.
"""
print "display_text: the message is..."
print filecontents
raw_input()
def display_pygame_text_string(self,
filecontents,
font_size=20,
screensize=[600,600],
back_colour=(0,0,0),
font_colour=(255,255,255)):
if filecontents != "":
# print "display_pygame_text_string: Preparing display"
font = pygame.font.Font(None,font_size) #Font
text_surface = font.render(filecontents, 1, font_colour) #Surface
# print "display_pygame_text_string: creating display"
self.pygame_displayHandler(text_surface, screensize, back_colour)
self.pygame_eventHandler()
def display_pygame_image_file(self,
image_location,
screensize=[600,600],
back_colour=(0,0,0)):
# print "display_pygame_image_file: creating image surface"
image_surface = pygame.image.load(image_location)
# print "display_pygame_image_file: creating display"
self.pygame_displayHandler(image_surface, screensize, back_colour)
self.pygame_eventHandler()
def display_pygame_image_string(self,
image_string,
format,
resolution,
screensize=[600,600],
back_colour=(0,0,0)):
# print "display_pygame_image_string: creating image surface"
image_surface = pygame.image.fromstring(image_string, resolution, format)
# print "display_pygame_image_string: creating display"
self.pygame_displayHandler(image_surface, screensize, back_colour)
self.pygame_eventHandler()
def pygame_displayHandler(self,
surface,
screensize=[600,600],
back_colour=(0,0,0)):
# print "pygame_displayHandler: getting dimensions"
width = surface.get_width()
height = surface.get_height()
horizonal_to_move = (screensize[0] - width)/2
vertical_to_move = (screensize[1] - height)/2
# print "pygame_displayHandler: moving rect"
rect = surface.get_rect()
rect = rect.move([horizonal_to_move,vertical_to_move])
# print "pygame_displayHandler: creating display"
screen_surface = pygame.display.set_mode(screensize)
# print "display_pygame_image: display"
screen_surface.fill(back_colour)
screen_surface.blit(surface, rect)
pygame.display.flip()
def pygame_eventHandler(self):
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
print "pygame_eventHandler: User closed window, shutting down"
pygame.quit()
return 0
if event.type == pygame.KEYDOWN:
print "pygame_eventHandler: User pushed a button, shutting down"
pygame.quit()
return 0
return 1
def pygame_eventHandler_2(self):
print "pygame_eventHandler_2: Waiting for user response..."
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
print "pygame_eventHandler_2: User closed window, shutting down"
pygame.quit()
return 0
if event.type == pygame.KEYDOWN:
print "pygame_eventHandler_2: User pushed a button, shutting down"
pygame.quit()
return 0
def display_nokia6600(self, fullfilename):
try:
print "display_nokia6600_2: Opening file..."
appuifw.Content_handler().open(fullfilename)
print "display_nokia6600_2: opened it"
return 1
except IOError:
print "display_nokia6600_2: Could not fetch the image."
except Exception, e:
print e
print "display_nokia6600_2: Could not open data received."
def display_nokia6600_2(self, fullfilename):
try:
print "display_nokia6600: Opening file..."
lock=e32.Ao_lock()
content_handler = appuifw.Content_handler(lock.signal)
content_handler.open(fullfilename)
# Wait for the user to exit the image viewer.
lock.wait()
print "display_nokia6600: Image viewer finished."
return 0
except IOError:
print "display_nokia6600: Could not fetch the image."
except Exception, e:
print e
print "display_nokia6600: Could not open data received."
def display_nokia6600_eventHandler(self):
e32.Ao_lock().wait()
print "display_nokia6600: Image viewer finished."
return 0
if __name__ == "__main__":
from Kamaelia.Internet import ThreadedTCPClient
from Axon.Component import component, scheduler, linkage
from Axon.Ipc import newComponent
# import e32, appuifw #nokia6600 libs
class Client(component):
def __init__(self, platform, tempDir):
super(Client,self).__init__()
self.platform = platform
self.tempDir = tempDir
self.IP_toConnectTo = "132.185.133.36"
self.serverport = 1616
self.delay = 5
self.requestmessage = ".mpg"
if 0:
if self.platform != "symbian_s60": # self.tempDir == "C:\\ClientContent\\":
print "Enter Server IP (127.0.0.1)"
self.IP_toConnectTo = raw_input()
print "Enter port (1616)"
self.serverport = int(raw_input())
print "Enter delay value"
self.delay = float(raw_input())
print "Enter request (.txt .jpg or .mpg)"
self.requestmessage = raw_input()
else:
self.IP_toConnectTo = appuifw.query(u"Enter Server IP", "text", u"132.185.133.36")
self.serverport = appuifw.query(u"Enter port", "number")
self.delay = appuifw.query(u"Enter delay value.", "number")
self.requestmessage = appuifw.query(u"Enter request ('.txt', '.jpg' or '.mpg')", "text", u".mpg")
self.client = None
self.display = ClientProtocolHandler(self.platform, self.tempDir, self.requestmessage, self.delay)
def initialiseComponent(self):
self.client = ThreadedTCPClient.ThreadedTCPClient(self.IP_toConnectTo,self.serverport, delay=1, initialsendmessage=self.requestmessage)
self.addChildren(self.client, self.display)
self.link((self.client,"outbox"), (self.display,"inbox") )
self.link((self.display,"outbox"), (self.client,"inbox") )
self.link((self.client,"signal"), (self.display,"control") )
self.link((self.display,"signal"), (self,"control") )
return newComponent( self.client, self.display)
def mainBody(self):
if self.dataReady("control"):
something = self.recv("control")
return 0
return 1
def closeDownComponent(self):
print "ALL DONE! GOODBYE"
from sys import platform as sys_platform
tmpdir = "tmp"
if sys_platform == "symbian_s60":
import e32, appuifw # nokia6600 libs
tmpdir = "E:\\Ciaran's Files\\Temp" # tmpdir must be on memory card
else:
import pygame
pygame.init()
try:
os.mkdir(tmpdir)
except:
pass
t = Client(sys_platform, tmpdir)
t.activate()
scheduler.run.runThreads(slowmo=0)
|
|
"""
homeassistant.components.media_player.firetv
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Provides control over an Amazon Fire TV (/stick) via
python-firetv, a Python 2.x module with a helper script
that exposes a HTTP server to fetch state and perform
actions.
Steps to configure your Amazon Fire TV stick with Home Assistant:
1. Turn on ADB Debugging on your Amazon Fire TV:
a. From the main (Launcher) screen, select Settings.
b. Select System > Developer Options.
c. Select ADB Debugging.
2. Find Amazon Fire TV device IP:
a. From the main (Launcher) screen, select Settings.
b. Select System > About > Network.
3. `pip install firetv[firetv-server]` into a Python 2.x environment
4. `firetv-server -d <fire tv device IP>:5555`, background the process
5. Configure Home Assistant as follows:
media_player:
platform: firetv
# optional: where firetv-server is running (default is 'localhost:5556')
host: localhost:5556
# optional: device id (default is 'default')
device: livingroom-firetv
# optional: friendly name (default is 'Amazon Fire TV')
name: My Amazon Fire TV
Note that python-firetv has support for multiple Amazon Fire TV devices.
If you have more than one configured, be sure to specify the device id used.
Run `firetv-server -h` and/or view the source for complete capabilities.
Possible states are:
- off (TV screen is dark)
- standby (standard UI is active - not apps)
- idle (screen saver is active)
- play (video is playing)
- pause (video is paused)
- disconnected (can't communicate with device)
"""
import logging
import requests
from homeassistant.const import (
STATE_PLAYING, STATE_PAUSED, STATE_IDLE, STATE_OFF,
STATE_UNKNOWN, STATE_STANDBY)
from homeassistant.components.media_player import (
MediaPlayerDevice,
SUPPORT_PAUSE, SUPPORT_VOLUME_SET,
SUPPORT_TURN_ON, SUPPORT_TURN_OFF,
SUPPORT_PREVIOUS_TRACK, SUPPORT_NEXT_TRACK)
SUPPORT_FIRETV = SUPPORT_PAUSE | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_PREVIOUS_TRACK | \
SUPPORT_NEXT_TRACK | SUPPORT_VOLUME_SET
DOMAIN = 'firetv'
DEVICE_LIST_URL = 'http://{0}/devices/list'
DEVICE_STATE_URL = 'http://{0}/devices/state/{1}'
DEVICE_ACTION_URL = 'http://{0}/devices/action/{1}/{2}'
_LOGGER = logging.getLogger(__name__)
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up the firetv platform. """
host = config.get('host', 'localhost:5556')
device_id = config.get('device', 'default')
try:
response = requests.get(DEVICE_LIST_URL.format(host)).json()
if device_id in response['devices'].keys():
add_devices([
FireTVDevice(
host,
device_id,
config.get('name', 'Amazon Fire TV')
)
])
_LOGGER.info(
'Device %s accessible and ready for control', device_id)
else:
_LOGGER.warn(
'Device %s is not registered with firetv-server', device_id)
except requests.exceptions.RequestException:
_LOGGER.error('Could not connect to firetv-server at %s', host)
class FireTV(object):
""" firetv-server client.
Should a native Python 3 ADB module become available,
python-firetv can support Python 3, it can be added
as a dependency, and this class can be dispensed of.
For now, it acts as a client to the firetv-server
HTTP server (which must be running via Python 2).
"""
def __init__(self, host, device_id):
self.host = host
self.device_id = device_id
@property
def state(self):
""" Get the device state.
An exception means UNKNOWN state.
"""
try:
response = requests.get(
DEVICE_STATE_URL.format(
self.host,
self.device_id
)
).json()
return response.get('state', STATE_UNKNOWN)
except requests.exceptions.RequestException:
_LOGGER.error(
'Could not retrieve device state for %s', self.device_id)
return STATE_UNKNOWN
def action(self, action_id):
""" Perform an action on the device.
There is no action acknowledgment, so exceptions
result in a pass.
"""
try:
requests.get(
DEVICE_ACTION_URL.format(
self.host,
self.device_id,
action_id
)
)
except requests.exceptions.RequestException:
_LOGGER.error(
'Action request for %s was not accepted for device %s',
action_id, self.device_id)
class FireTVDevice(MediaPlayerDevice):
""" Represents an Amazon Fire TV device on the network. """
def __init__(self, host, device, name):
self._firetv = FireTV(host, device)
self._name = name
self._state = STATE_UNKNOWN
@property
def name(self):
""" Get the device name. """
return self._name
@property
def should_poll(self):
""" Device should be polled. """
return True
@property
def supported_media_commands(self):
""" Flags of media commands that are supported. """
return SUPPORT_FIRETV
@property
def state(self):
""" State of the player. """
return self._state
def update(self):
""" Update device state. """
self._state = {
'idle': STATE_IDLE,
'off': STATE_OFF,
'play': STATE_PLAYING,
'pause': STATE_PAUSED,
'standby': STATE_STANDBY,
'disconnected': STATE_UNKNOWN,
}.get(self._firetv.state, STATE_UNKNOWN)
def turn_on(self):
""" Turns on the device. """
self._firetv.action('turn_on')
def turn_off(self):
""" Turns off the device. """
self._firetv.action('turn_off')
def media_play(self):
""" Send play commmand. """
self._firetv.action('media_play')
def media_pause(self):
""" Send pause command. """
self._firetv.action('media_pause')
def media_play_pause(self):
""" Send play/pause command. """
self._firetv.action('media_play_pause')
def volume_up(self):
""" Send volume up command. """
self._firetv.action('volume_up')
def volume_down(self):
""" Send volume down command. """
self._firetv.action('volume_down')
def media_previous_track(self):
""" Send previous track command (results in rewind). """
self._firetv.action('media_previous')
def media_next_track(self):
""" Send next track command (results in fast-forward). """
self._firetv.action('media_next')
def media_seek(self, position):
raise NotImplementedError()
def mute_volume(self, mute):
raise NotImplementedError()
def play_youtube(self, media_id):
raise NotImplementedError()
def set_volume_level(self, volume):
raise NotImplementedError()
|
|
import datetime
import json
import logging
import os
import re
import resource
import threading
import numpy
from ece2cmor3 import cmor_target, cmor_source, cmor_task, cmor_utils, grib_file, cdoapi
# Log object.
log = logging.getLogger(__name__)
gridpoint_files = {}
spectral_files = {}
ini_gridpoint_file = None
ini_spectral_file = None
temp_dir = None
accum_key = "ACCUMFLD"
accum_codes = []
varsfreq = {}
spvar = None
fxvars = []
record_keys = {}
starttimes = {}
# Initializes the module, looks up previous month files and inspects the first
# day in the input files to set up an administration of the fields.
def initialize(gpfiles, shfiles, tmpdir, ini_gpfile=None, ini_shfile=None):
global gridpoint_files, spectral_files, ini_gridpoint_file, ini_spectral_file, temp_dir, varsfreq, accum_codes, \
record_keys
grib_file.initialize()
gridpoint_files = {d: (get_prev_file(gpfiles[d]), gpfiles[d]) for d in gpfiles.keys()}
spectral_files = {d: (get_prev_file(shfiles[d]), shfiles[d]) for d in shfiles.keys()}
ini_gridpoint_file, ini_spectral_file = ini_gpfile, ini_shfile
temp_dir = tmpdir
accum_codes = load_accum_codes(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "resources", "grib_codes.json"))
gpdate = sorted(gridpoint_files.keys())[0] if any(gridpoint_files) else None
shdate = sorted(spectral_files.keys())[0] if any(spectral_files) else None
gpfile = gridpoint_files[gpdate][1] if any(gridpoint_files) else None
shfile = spectral_files[shdate][1] if any(spectral_files) else None
if gpfile is not None:
with open(gpfile) as gpf:
freqs, records = inspect_day(grib_file.create_grib_file(gpf), grid=cmor_source.ifs_grid.point)
varsfreq.update(freqs)
record_keys[cmor_source.ifs_grid.point] = records
update_sp_key(gpfile)
if shfile is not None:
with open(shfile) as shf:
freqs, records = inspect_day(grib_file.create_grib_file(shf), grid=cmor_source.ifs_grid.spec)
varsfreq.update(freqs)
record_keys[cmor_source.ifs_grid.spec] = records
update_sp_key(shfile)
if ini_gpfile is not None:
with open(ini_gpfile) as gpf:
fxvars.extend(inspect_hr(grib_file.create_grib_file(gpf), grid=cmor_source.ifs_grid.point))
if ini_shfile is not None:
with open(ini_shfile) as shf:
fxvars.extend(inspect_hr(grib_file.create_grib_file(shf), grid=cmor_source.ifs_grid.spec))
# Fix for finding the surface pressure, necessary to store 3d model level fields
def update_sp_key(fname):
global spvar
for key in varsfreq:
freq = varsfreq[key]
if key[0] == 154:
if spvar is None or spvar[1] >= freq:
spvar = (154, freq, fname)
if key[0] == 134:
if spvar is None or spvar[1] > freq:
spvar = (134, freq, fname)
# Function reading the file with grib-codes of accumulated fields
def load_accum_codes(path):
global accum_key
data = json.loads(open(path).read())
if accum_key in data:
return map(grib_tuple_from_string, data[accum_key])
else:
return []
# Utility to make grib tuple of codes from string
def grib_tuple_from_string(s):
codes = s.split('.')
return int(codes[0]), 128 if len(codes) < 2 else int(codes[1])
# Utility to make grib tuple of codes from string
def grib_tuple_from_ints(i, j):
if i < 10 ** 3:
return i, j
return i % 10 ** 3, i / 10 ** 3
# Inspects a single time point in the initial file
def inspect_hr(gribfile, grid):
result = []
while gribfile.read_next(headers_only=True):
result.append(get_record_key(gribfile, grid) + (grid,))
return result
# Inspects the first 24 hours in the input gridpoint and spectral files.
def inspect_day(gribfile, grid):
inidate, initime = -99, -1
records = {}
keylist = []
while gribfile.read_next(headers_only=True):
date = gribfile.get_field(grib_file.date_key)
time = gribfile.get_field(grib_file.time_key) / 100
if date == inidate + 1 and time == initime:
gribfile.release()
break
if inidate < 0:
inidate = date
if initime < 0:
initime = time
short_key = get_record_key(gribfile, grid)
if short_key[1] == 0:
log.error("Invalid key at first day inspection: %s" % str(short_key))
keylist.append((time,) + short_key)
key = short_key + (grid,)
if key in records:
if time not in records[key]:
records[key].append(time)
else:
records[key] = [time]
gribfile.release()
result = {}
for key, val in records.iteritems():
hrs = numpy.array(val)
if len(hrs) == 1:
log.warning("Variable %d.%d on level %d of type %d has been detected once in first day "
"of file %s... Assuming daily frequency" % (key[0], key[1], key[3], key[2],
gribfile.file_object.name))
frqs = numpy.array([24])
else:
frqs = numpy.mod(hrs[1:] - hrs[:-1], numpy.repeat(24, len(hrs) - 1))
frq = frqs[0]
if any(frqs != frq):
log.error("Variable %d.%d on level %d of type %d is not output on regular "
"intervals in first day in file %s" % (key[0], key[1], key[3], key[2], gribfile.file_object.name))
else:
result[key] = frq
return result, keylist
# TODO: Merge the 2 functions below into one matching function:
# Creates a key (code + table + level type + level) for a grib message iterator
def get_record_key(gribfile, gridtype):
codevar, codetab = grib_tuple_from_ints(gribfile.get_field(grib_file.param_key),
gribfile.get_field(grib_file.table_key))
levtype, level = gribfile.get_field(grib_file.levtype_key), gribfile.get_field(grib_file.level_key)
if levtype == grib_file.pressure_level_hPa_code:
level *= 100
levtype = grib_file.pressure_level_Pa_code
if levtype == 112 or levtype == grib_file.depth_level_code or \
(codetab == 128 and codevar in [35, 36, 37, 38, 39, 40, 41, 42, 139, 170, 183, 236]):
level = 0
levtype = grib_file.depth_level_code
if codevar in [49, 165, 166]:
level = 10
levtype = grib_file.height_level_code
if codevar in [167, 168, 201, 202]:
level = 2
levtype = grib_file.height_level_code
if codevar == 9:
level = 0
levtype = grib_file.surface_level_code
if levtype == grib_file.pv_level_code: # Mapping pv-levels to surface: we don't support more than one pv-level
level = 0
levtype = grib_file.surface_level_code
cosp_levels = {40: 84000, 41: 56000, 42: 22000}
if codetab == 126 and codevar in cosp_levels.keys():
level = cosp_levels[codevar]
levtype = grib_file.pressure_level_Pa_code
# Fix for spectral height level fields in gridpoint file:
if cmor_source.grib_code(codevar) in cmor_source.ifs_source.grib_codes_sh and \
gridtype != cmor_source.ifs_grid.spec and levtype == grib_file.hybrid_level_code:
levtype = grib_file.height_level_code
return codevar, codetab, levtype, level
# Used to distribute keys created above over cmor tasks
def soft_match_key(varid, tabid, levtype, level, gridtype, keys):
if (varid, tabid, levtype, level, gridtype) in keys:
return varid, tabid, levtype, level, gridtype
# Fix for orog and ps: find them in either GG or SH file
if varid in [134, 129] and tabid == 128 and levtype == grib_file.surface_level_code and level == 0:
matches = [k for k in keys if k[0] == varid and k[1] == tabid and k[2] == grib_file.surface_level_code]
if any(matches):
return matches[0]
matches = [k for k in keys if k[0] == varid and k[1] == tabid and k[2] == grib_file.hybrid_level_code and
k[3] == 1]
if any(matches):
return matches[0]
# Fix for depth levels variables
if levtype == grib_file.depth_level_code:
matches = [k for k in keys if k[0] == varid and k[1] == tabid and k[2] in
(grib_file.depth_level_code, grib_file.surface_level_code)]
if any(matches):
return matches[0]
if levtype == grib_file.hybrid_level_code and level == -1:
matches = [k for k in keys if k[0] == varid and k[1] == tabid and k[2] == grib_file.hybrid_level_code and
k[4] == gridtype]
if any(matches):
return matches[0]
# Fix for spectral fields at height levels being written as model level fields in GG file
if levtype == grib_file.height_level_code and gridtype == cmor_source.ifs_grid.spec:
matches = [k for k in keys if k[:4] == (varid, tabid, grib_file.height_level_code, level)]
if any(matches):
return matches[0]
return None
# Converts cmor-levels to grib levels code
def get_levels(task, code):
global log
# Special cases
if code.tab_id == 128:
gc = code.var_id
if gc in [9, 134]:
return grib_file.surface_level_code, [0]
if gc in [35, 36, 37, 38, 39, 40, 41, 42, 139, 170, 183, 236]:
return grib_file.depth_level_code, [0]
if gc in [49, 165, 166]:
return grib_file.height_level_code, [10]
if gc in [167, 168, 201, 202]:
return grib_file.height_level_code, [2]
# Normal cases
zaxis, levels = cmor_target.get_z_axis(task.target)
if zaxis is None:
return grib_file.surface_level_code, [0]
if zaxis in ["sdepth"]:
return grib_file.depth_level_code, [0]
if zaxis in ["alevel", "alevhalf"]:
return grib_file.hybrid_level_code, [-1]
if zaxis == "air_pressure":
return grib_file.pressure_level_Pa_code, [int(float(level)) for level in levels]
if zaxis in ["height", "altitude"]:
return grib_file.height_level_code, [int(float(level)) for level in levels] # TODO: What about decimal places?
log.error("Could not convert vertical axis type %s to grib vertical coordinate "
"code for %s" % (zaxis, task.target.variable))
return -1, []
# Searches the file system for the previous month file, necessary for the 0-hour
# fields.
def get_prev_file(grb_file):
fname = os.path.basename(grb_file)
exp, year, mon = fname[5:9], int(fname[10:14]), int(fname[14:16])
if mon == 1:
prev_year, prev_mon = year - 1, 12
else:
prev_year, prev_mon = year, mon - 1
output_dir = os.path.abspath(os.path.join(os.path.dirname(grb_file), ".."))
output_files = cmor_utils.find_ifs_output(output_dir, exp)
ini_path = None
for output_path in output_files:
output_name = os.path.basename(output_path)
if output_name == fname[:9] + "+000000":
ini_path = output_path
if output_name[:10] == fname[:10] and int(output_name[10:14]) == prev_year and \
int(output_name[14:]) == prev_mon:
log.info("Found previous month file for %s: %s" % (grb_file, output_path))
return output_path
ece_leg = os.path.split(os.path.dirname(grb_file))[-1]
if re.match(r"^0*\d1$", ece_leg): # First leg
if ini_path is None:
log.error("Previous month file for %s could not be found because the initial state file hasn't been found"
% grb_file)
else:
log.info("Assumed previous month file for %s: %s" % (grb_file, ini_path))
else:
if ini_path is None:
log.error("Previous month file for %s could not be found" % grb_file)
else:
log.error("Assumed previous month file for %s: %s, this is probably not correct!" % (grb_file, ini_path))
return ini_path
# Splits the grib file for the given set of tasks
def mkfname(key):
return '.'.join([str(key[0]), str(key[1]), str(key[2])])
# Construct files for keys and tasks
def cluster_files(valid_tasks, varstasks):
task2files, task2freqs = {}, {}
varsfx = set()
for task in valid_tasks:
task2files[task] = set()
task2freqs[task] = set()
for key, tsklist in varstasks.iteritems():
if task in tsklist:
task2files[task].add('.'.join([str(key[0]), str(key[1]), str(key[2])]))
if key[3] == -1:
task2freqs[task].update([varsfreq[k] for k in varsfreq.keys() if
(k[0], k[1], k[2]) == (key[0], key[1], key[2])])
else:
if key in varsfreq:
task2freqs[task].add(varsfreq[key])
elif key in fxvars:
varsfx.add(key)
for task, fnames in task2files.iteritems():
codes = {(int(f.split('.')[0]), int(f.split('.')[1])): f for f in sorted(list(fnames))}
cum_file = '_'.join([codes[k] for k in codes if k in accum_codes])
inst_file = '_'.join([codes[k] for k in codes if k not in accum_codes])
task2files[task] = filter(None, [cum_file, inst_file])
for task, freqset in task2freqs.iteritems():
maxfreq = max(freqset) if len(freqset) > 0 else 0
if any([f for f in freqset if maxfreq % f != 0]):
log.error("Task depends on input fields with incompatible time steps")
task.status = cmor_task.status_failed
task2files.pop(task, None)
task2freqs[task] = maxfreq
task2files[task] = ['.'.join([p, str(maxfreq)]) for p in task2files[task]]
varsfiles = {key: set() for key in varstasks}
for key in varsfiles:
for t in varstasks[key]:
f = task2files[t][0]
if len(task2files[t]) == 2 and (key[0], key[1]) not in accum_codes:
f = task2files[t][1]
varsfiles[key].add((f, task2freqs[t]))
return task2files, task2freqs, varsfx, varsfiles
# Main execution loop
def execute(tasks, filter_files=True, multi_threaded=False):
valid_fx_tasks = execute_tasks([t for t in tasks if cmor_target.get_freq(t.target) == 0], filter_files,
multi_threaded=False, once=True)
valid_other_tasks = execute_tasks([t for t in tasks if cmor_target.get_freq(t.target) != 0], filter_files,
multi_threaded=multi_threaded, once=False)
return valid_fx_tasks + valid_other_tasks
def filter_fx_variables(gribfile, keys2files, gridtype, startdate, handles=None):
timestamp = -1
keys = set()
while gribfile.read_next() and (handles is None or any(handles.keys())):
t = gribfile.get_field(grib_file.time_key)
key = get_record_key(gribfile, gridtype)
if t == timestamp and key in keys:
continue # Prevent double grib messages
if t != timestamp:
keys = set()
timestamp = t
# This file may be processed twice: once for the fx-fields and once for the dynamic fields.
# We add only the written fx-fields to the key set here.
if any([k[0:4] == key for k in keys2files.keys()]):
keys.add(key)
write_record(gribfile, key + (gridtype,), keys2files, shift=0, handles=handles, once=True, setdate=startdate)
gribfile.release()
return keys, timestamp
def execute_tasks(tasks, filter_files=True, multi_threaded=False, once=False):
valid_tasks, varstasks = validate_tasks(tasks)
if not any(valid_tasks):
return []
task2files, task2freqs, fxkeys, keys2files = cluster_files(valid_tasks, varstasks)
grids = [cmor_source.ifs_grid.point, cmor_source.ifs_grid.spec]
if filter_files:
keys_gp, timestamp_gp = set(), -1
keys_sp, timestamp_sp = set(), -1
filehandles = open_files(keys2files)
fxkeys2files = {k: keys2files[k] for k in fxkeys}
if any(gridpoint_files):
gridpoint_start_date = sorted(gridpoint_files.keys())[0]
first_gridpoint_file = gridpoint_files[gridpoint_start_date][0]
if ini_gridpoint_file != first_gridpoint_file and ini_gridpoint_file is not None:
with open(str(ini_gridpoint_file), 'r') as fin:
keys_gp, timestamp_gp = filter_fx_variables(grib_file.create_grib_file(fin), fxkeys2files, grids[0], gridpoint_start_date,
filehandles)
elif ini_gridpoint_file is not None:
with open(str(ini_gridpoint_file), 'r') as fin:
keys_gp, timestamp_gp = filter_fx_variables(grib_file.create_grib_file(fin), fxkeys2files, grids[0], None, filehandles)
if any(spectral_files):
spectral_start_date = sorted(spectral_files.keys())[0]
first_spectral_file = spectral_files[spectral_start_date][0]
if ini_spectral_file != first_spectral_file and ini_spectral_file is not None:
with open(str(ini_spectral_file), 'r') as fin:
keys_sp, timestamp_sp = filter_fx_variables(grib_file.create_grib_file(fin), fxkeys2files, grids[1], spectral_start_date,
filehandles)
elif ini_spectral_file is not None:
with open(str(ini_spectral_file), 'r') as fin:
keys_sp, timestamp_sp = filter_fx_variables(grib_file.create_grib_file(fin), fxkeys2files, grids[1], None, filehandles)
if multi_threaded:
threads = []
for file_list, grid, keys, timestamp in zip([gridpoint_files, spectral_files], grids, [keys_gp, keys_sp], [timestamp_gp, timestamp_sp]):
thread = threading.Thread(target=filter_grib_files,
args=(file_list, keys2files, grid, filehandles, 0, 0, once, keys, timestamp))
threads.append(thread)
thread.start()
threads[0].join()
threads[1].join()
else:
for file_list, grid, keys, timestamp in zip([gridpoint_files, spectral_files], grids, [keys_gp, keys_sp], [timestamp_gp, timestamp_sp]):
filter_grib_files(file_list, keys2files, grid, filehandles, month=0, year=0, once=once, prev_keys=keys, prev_timestamp=timestamp)
for handle in filehandles.values():
handle.close()
for task in task2files:
if task.status != cmor_task.status_failed:
file_list = task2files[task]
filter_output = os.path.join(temp_dir, file_list[0])
if len(file_list) > 1:
filter_output = os.path.join(temp_dir, '_'.join(file_list))
if not os.path.isfile(filter_output):
cdoapi.cdo_command().merge([os.path.join(temp_dir, f) for f in file_list], filter_output)
setattr(task, cmor_task.filter_output_key, [filter_output])
for task in task2freqs:
if task.status != cmor_task.status_failed:
setattr(task, cmor_task.output_frequency_key, task2freqs[task])
return valid_tasks
# Checks tasks that are compatible with the variables listed in grib_vars and
# returns those that are compatible.
def validate_tasks(tasks):
varstasks = {}
valid_tasks = []
for task in tasks:
if task.status == cmor_task.status_failed or not isinstance(task.source, cmor_source.ifs_source):
continue
codes = task.source.get_root_codes()
target_freq = cmor_target.get_freq(task.target)
matched_keys = []
matched_grid = None
for c in codes:
if task.status == cmor_task.status_failed:
break
levtype, levels = get_levels(task, c)
for level in levels:
if task.status == cmor_task.status_failed:
break
match_key = soft_match_key(c.var_id, c.tab_id, levtype, level, task.source.grid_, varsfreq.keys())
if match_key is None:
if 0 != target_freq and c in cmor_source.ifs_source.grib_codes_fx:
match_key = soft_match_key(c.var_id, c.tab_id, levtype, level, task.source.grid_, fxvars)
if match_key is None:
log.error("Field missing in the initial state files: "
"code %d.%d, level type %d, level %d. Dismissing task %s in table %s" %
(c.var_id, c.tab_id, levtype, level, task.target.variable, task.target.table))
else:
log.error("Field missing in the first day of file: "
"code %d.%d, level type %d, level %d. Dismissing task %s in table %s" %
(c.var_id, c.tab_id, levtype, level, task.target.variable, task.target.table))
elif 0 < target_freq < varsfreq[match_key]:
log.error("Field has too low frequency for target %s: "
"code %d.%d, level type %d, level %d. Dismissing task %s in table %s" %
(task.target.variable, c.var_id, c.tab_id, levtype, level, task.target.variable,
task.target.table))
task.set_failed()
break
if match_key is None:
task.set_failed()
break
if matched_grid is None:
matched_grid = match_key[4]
else:
if match_key[4] != matched_grid:
log.warning("Task %s in table %s depends on both gridpoint and spectral fields" %
(task.target.variable, task.target.table))
if match_key[2] == grib_file.hybrid_level_code:
matched_keys.append((match_key[0], match_key[1], match_key[2], -1, match_key[4]))
else:
matched_keys.append(match_key)
if task.status != cmor_task.status_failed:
# Fix for zg and ps on gridpoints and spectral fields on height levels:
task.source.grid_ = matched_grid
for key in matched_keys:
if key in varstasks:
varstasks[key].append(task)
else:
varstasks[key] = [task]
valid_tasks.append(task)
return valid_tasks, varstasks
def open_files(vars2files):
files = set()
for fileset in vars2files.values():
files.update(set([t[0] for t in fileset]))
numreq = len(files)
softlim = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
if numreq > softlim + 1:
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (numreq + 1, -1))
except ValueError:
return {}
return {f: open(os.path.join(temp_dir, f), 'w') for f in files}
def build_fast_forward_cache(keys2files, grid):
result = {}
i = 0
prev_key = (-1, -1, -1, -1, -1)
if grid not in record_keys:
return {}
for key in record_keys[grid]:
if key[:4] != prev_key[:4]: # flush
if i > 1:
result[prev_key] = i
prev_key = key
i = 0
if key[3] == grib_file.hybrid_level_code:
comp_key = key[1:4] + (-1, grid,)
if comp_key not in keys2files:
i += 1
else:
i = 0
else:
i = 0
return result
# Processes month of grib data, including 0-hour fields in the previous month file.
def filter_grib_files(file_list, keys2files, grid, handles=None, month=0, year=0, once=False, prev_keys=(), prev_timestamp=-1):
dates = sorted(file_list.keys())
cache = None if once else build_fast_forward_cache(keys2files, grid)
keys, timestamp = prev_keys, prev_timestamp
for i in range(len(dates)):
date = dates[i]
if month != 0 and year != 0 and (date.month, date.year) != (month, year):
continue
prev_grib_file, cur_grib_file = file_list[date]
prev_chained = i > 0 and (os.path.realpath(prev_grib_file) == os.path.realpath(file_list[dates[i - 1]][1]))
if prev_grib_file is not None and not prev_chained:
with open(prev_grib_file, 'r') as fin:
log.info("Filtering grib file %s..." % os.path.abspath(prev_grib_file))
keys, timestamp = proc_initial_month(date.month, grib_file.create_grib_file(fin), keys2files,
grid, handles, keys, timestamp, once)
next_chained = i < len(dates) - 1 and (os.path.realpath(cur_grib_file) ==
os.path.realpath(file_list[dates[i + 1]][0]))
with open(cur_grib_file, 'r') as fin:
log.info("Filtering grib file %s..." % os.path.abspath(cur_grib_file))
if next_chained:
keys, timestamp = proc_grib_file(grib_file.create_grib_file(fin), keys2files, grid, handles, keys,
timestamp, once, cache)
else:
proc_final_month(date.month, grib_file.create_grib_file(fin), keys2files, grid, handles, keys,
timestamp, once, cache)
# Function writing data from previous monthly file, writing the 0-hour fields
def proc_initial_month(month, gribfile, keys2files, gridtype, handles, prev_keys=(), prev_timestamp=-1, once=False, ff_cache=None):
timestamp = prev_timestamp
keys = prev_keys
fast_forward_count = 0
while gribfile.read_next(headers_only=(fast_forward_count > 0)) and (handles is None or any(handles.keys())):
key, fast_forward_count, cycle, timestamp = next_record(gribfile, fast_forward_count, timestamp, gridtype,
ff_cache, keys)
if cycle:
gribfile.release()
continue
date = gribfile.get_field(grib_file.date_key)
if (date % 10 ** 4) / 10 ** 2 == month:
if (key[0], key[1]) not in accum_codes:
write_record(gribfile, key + (gridtype,), keys2files, handles=handles, once=once, setdate=None)
gribfile.release()
return keys, timestamp
# Function writing data from previous monthly file, writing the 0-hour fields
def proc_grib_file(gribfile, keys2files, gridtype, handles, prev_keys=(), prev_timestamp=-1, once=False, ff_cache=None):
timestamp = prev_timestamp
keys = prev_keys
fast_forward_count = 0
while gribfile.read_next(headers_only=(fast_forward_count > 0)) and (handles is None or any(handles.keys())):
key, fast_forward_count, cycle, timestamp = next_record(gribfile, fast_forward_count, timestamp, gridtype,
ff_cache, keys)
if cycle:
gribfile.release()
continue
write_record(gribfile, key + (gridtype,), keys2files, shift=-1 if (key[0], key[1]) in accum_codes else 0,
handles=handles, once=once, setdate=None)
gribfile.release()
return keys, timestamp
# Function writing data from previous monthly file, writing the 0-hour fields
def proc_final_month(month, gribfile, keys2files, gridtype, handles, prev_keys=(), prev_timestamp=-1, once=False,
ff_cache=None):
timestamp = prev_timestamp
keys = prev_keys
fast_forward_count = 0
while gribfile.read_next(headers_only=(fast_forward_count > 0)) and (handles is None or any(handles.keys())):
key, fast_forward_count, cycle, timestamp = next_record(gribfile, fast_forward_count, timestamp, gridtype,
ff_cache, keys)
if cycle:
gribfile.release()
continue
date = gribfile.get_field(grib_file.date_key)
mon = (date % 10 ** 4) / 10 ** 2
if mon == month:
write_record(gribfile, key + (gridtype,), keys2files, shift=-1 if (key[0], key[1]) in accum_codes else 0,
handles=handles, once=once, setdate=None)
elif mon == month % 12 + 1:
if (key[0], key[1]) in accum_codes:
write_record(gribfile, key + (gridtype,), keys2files, shift=-1, handles=handles, once=once,
setdate=None)
gribfile.release()
return keys, timestamp
def next_record(gribfile, ffwd_count, prev_time, gridtype, ffwd_cache, keys_cache):
if ffwd_count > 0:
return None, ffwd_count - 1, True, -1
key = get_record_key(gribfile, gridtype)
t = gribfile.get_field(grib_file.time_key)
new_ffwd_count = ffwd_cache.get((t,) + key, 0) if ffwd_cache is not None else 0
if new_ffwd_count > 0:
return key, new_ffwd_count - 1, True, t
if t == prev_time and key in keys_cache:
return key, new_ffwd_count, True, t
if t != prev_time:
keys_cache.clear()
keys_cache.add(key)
return key, new_ffwd_count, False, t
# Writes the grib messages
def write_record(gribfile, key, keys2files, shift=0, handles=None, once=False, setdate=None):
global starttimes
var_infos = set()
if key[2] == grib_file.hybrid_level_code:
for k, v in keys2files.items():
if k[:3] == key[:3]:
var_infos.update(v)
else:
f = keys2files.get(key, None)
if f is not None:
var_infos.update(f)
if not any(var_infos):
return
if setdate is not None:
gribfile.set_field(grib_file.date_key, int(cmor_utils.date2str(setdate)))
gribfile.set_field(grib_file.time_key, 0)
timestamp = gribfile.get_field(grib_file.time_key)
if shift != 0 and setdate is None:
freq = varsfreq.get(key, 0)
shifttime = timestamp + shift * freq * 100
if shifttime < 0 or shifttime >= 2400:
newdate, hours = fix_date_time(gribfile.get_field(grib_file.date_key), shifttime / 100)
gribfile.set_field(grib_file.date_key, newdate)
shifttime = 100 * hours
timestamp = int(shifttime)
gribfile.set_field(grib_file.time_key, timestamp)
if key[1] == 126 and key[0] in [40, 41, 42]:
gribfile.set_field(grib_file.levtype_key, grib_file.pressure_level_hPa_code)
gribfile.set_field(grib_file.level_key, key[3]/100)
elif gribfile.get_field(grib_file.levtype_key) == grib_file.pressure_level_Pa_code:
gribfile.set_field(grib_file.levtype_key, 99)
if gribfile not in starttimes:
starttimes[gribfile] = timestamp
for var_info in var_infos:
if var_info[1] < 24 and timestamp / 100 % var_info[1] != 0:
log.warning("Skipping irregular GRIB record for %s with frequency %s at timestamp %s" %
(str(var_info[0]), str(var_info[1]), str(timestamp)))
continue
handle = handles.get(var_info[0], None) if handles else None
if handle:
gribfile.write(handle)
if once and handles is not None and timestamp != starttimes[gribfile]:
handle.close()
del handles[var_info[0]]
else:
if handles is None:
with open(os.path.join(temp_dir, var_info[0]), 'a') as ofile:
gribfile.write(ofile)
else:
if not once:
log.error("Unexpected missing file handle encountered for code %s" % str(var_info[0]))
# Converts 24 hours into extra days
def fix_date_time(date, time):
timestamp = datetime.datetime(year=date / 10 ** 4, month=(date % 10 ** 4) / 10 ** 2,
day=date % 10 ** 2) + datetime.timedelta(hours=time)
return timestamp.year * 10 ** 4 + timestamp.month * 10 ** 2 + timestamp.day, timestamp.hour
|
|
#!/usr/bin/env python
import roslib
import rospy
import smach
import smach_ros
from smach import StateMachine
import actionlib
import time
import threading
from smach_ros import SimpleActionState
from smach_ros import ActionServerWrapper
from std_msgs.msg import String
from std_msgs.msg import UInt8
from wm_interpreter.msg import *
TIMEOUT_LENGTH = 10
# define state WaitingQuestion
class WaitingQuestion(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['NotUnderstood', 'Question', 'Timeout'],
input_keys=[],
output_keys=['WQ_question_out'])
self.RecoString = []
self.state = "WaitingQuestion"
self.QUESTIONS = []
self.QUESTIONS.append(["Who are the inventors of the C programming language",
"Who is the inventor of the Python programming language",
"Which robot was the star in the movie Wall-E",
"Where does the term computer bug come from",
"What is the name of the round robot in the new Star Wars movie",
"How many curry sausages are eaten in Germany each year",
"Who is president of the galaxy in The Hitchhiker Guide to the Galaxy",
"Which robot is the love interest in Wall-E",
"Which company makes ASIMO",
"What company makes Big Dog",
"What is the funny clumsy character of the Star Wars prequels",
"How many people live in the Germany",
"What are the colours of the German flag",
"What city is the capital of the Germany",
"How many arms do you have",
"What is the heaviest element",
"what did Alan Turing create",
"Who is the helicopter pilot in the A-Team",
"What Apollo was the last to land on the moon",
"Who was the last man to step on the moon",
"In which county is the play of Hamlet set",
"What are names of Donald Duck nephews",
"How many metres are in a mile",
"Name a dragon in The Lord of the Rings",
"Who is the Chancellor of Germany",
"Who developed the first industrial robot",
"What's the difference between a cyborg and an android",
"Do you know any cyborg",
"In which city is this year's RoboCup hosted",
"Which city hosted last year's RoboCup",
"In which city will next year's RoboCup be hosted",
"Name the main rivers surrounding Leipzig",
"Where is the zoo of this city located",
"Where did the peaceful revolution of 1989 start",
"Where is the world's oldest trade fair hosted",
"Where is one of the world's largest dark music festivals hosted",
"Where is Europe's oldest continuous coffee shop hosted",
"Name one of the greatest German composers",
"Where is Johann Sebastian Bach buried",
"Do you have dreams",
"Hey what's up",
"There are seven days in a week. True or false",
"There are eleven days in a week. True or false",
"January has 31 days. True or false",
"January has 28 days. True or false",
"February has 28 days. True or false",
"February has 31 days. True or false",
"What city are you from",
"Who used first the word Robot",
"What origin has the word Robot"])
self.QUESTIONS.append([0, 0])
self.tts_pub = rospy.Publisher('sara_tts', String, queue_size=1, latch=True)
self.face_cmd = rospy.Publisher('/face_mode', UInt8, queue_size=1, latch=True)
self.sub = rospy.Subscriber("/recognizer_1/output", String, self.callback, queue_size=1)
def execute(self, userdata):
rospy.loginfo('Executing state WaitingQuestion')
self.face_cmd.publish(3)
userdata.WQ_question_out = self.state
timeout = time.time() + TIMEOUT_LENGTH # 10 sec
while True:
if min(self.QUESTIONS[1]) > 1:
userdata.WQ_lastCommand_out = self.QUESTIONS[0][max(self.QUESTIONS[1])]
return 'Question'
if time.time() > timeout:
return 'Timeout'
def callback(self, data):
self.RecoString = data.data.split()
for idx in self.QUESTIONS[1]:
self.QUESTION[1][idx] = 0
for RecoWord in self.RecoString:
for idx in self.QUESTIONS[1]:
if self.QUESTIONS[idx].lower().find(RecoWord) != -1:
self.QUESTIONS[1][idx] += 1
def SayX(self, ToSay_str):
rospy.loginfo(ToSay_str)
self.pub.publish(ToSay_str)
def request_preempt(self):
"""Overload the preempt request method just to spew an error."""
smach.State.request_preempt(self)
rospy.logwarn("Preempted!")
# define state AnswerQuestion
class AnswerQuestion(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['Done'],
input_keys=['AQ_question_in'])
self.ANSWERS = {"Who are the inventors of the C programming language": "The inventors of the C programming language is Ken Thompson and Dennis Ritchie",
"Who is the inventor of the Python programming language": "The inventors of the Python programming language Guido van Rossum",
"Which robot was the star in the movie Wall-E": "The robot star int he movie Wall-E was Wall-E",
"Where does the term computer bug come from": "The term computer bug come from a moth trapped in a relay",
"What is the name of the round robot in the new Star Wars movie": " The name of the round robot in the new Star Wars movie is B B 8",
"How many curry sausages are eaten in Germany each year": "About 800 million currywurst every year",
"Who is president of the galaxy in The Hitchhiker Guide to the Galaxy": "The president of the galaxy in The Hitchhiker's Guide to the Galaxy is Zaphod Beeblebrox",
"Which robot is the love interest in Wall-E": "The robot that is the love interest in Wall-E is Eve",
"Which company makes ASIMO": "The company that makes ASIMO is Honda",
"What company makes Big Dog": "The company that makes Big Dog is Boston Dynamics",
"What is the funny clumsy character of the Star Wars prequels": "The funny clumsy character of the Star Wars prequels is Jar-Jar Binks",
"How many people live in the Germany": "A little over 80 million people live in the Germany",
"What are the colours of the German flag": "The colours of the German flag are black red and yellow",
"What city is the capital of the Germany": "The capital of the Germany is Berlin",
"How many arms do you have": "I only have one arm for now. Ask me again next year",
"What is the heaviest element": "the heaviest element is plutonium when measured by the mass of the element but Osmium is densest",
"what did Alan Turing create": "Alan Turing created many things like Turing machines and the Turing test",
"Who is the helicopter pilot in the A-Team": "The helicopter pilot in the A-Team is Captain Howling Mad Murdock",
"What Apollo was the last to land on the moon": "The last to land on the moon was Apollo 17",
"Who was the last man to step on the moon": "The last man to step on the moon was Gene Cernan",
"In which county is the play of Hamlet set": "The play of Hamlet set is in Denmark",
"What are names of Donald Duck nephews": "The names of Donald Duck's nephews is Huey Dewey and Louie Duck",
"How many metres are in a mile": "There is about 1609 metres metres are in a mile",
"Name a dragon in The Lord of the Rings": "A dragon name in The Lord of the Rings is Smaug",
"Who is the Chancellor of Germany": "The Chancellor of Germany is Angela Merkel",
"Who developed the first industrial robot": "The first to develope a industrial robot are the American physicist Joseph Engelberg. He is also considered the father of robotics.",
"What's the difference between a cyborg and an android": "The difference between a cyborg and an android",
"Do you know any cyborg": "Professor Kevin Warwick. He implanted a chip in in his left arm to remotely operate doors an artificial hand and an electronic wheelchair",
"In which city is this year's RoboCup hosted": "Robocup 2016 is hosted in Leipzig Germany",
"Which city hosted last year's RoboCup": "robocup 2015 was hosted in Hefei China",
"In which city will next year's RoboCup be hosted": "robocup 2017 will be in Nagoya in Japan",
"Name the main rivers surrounding Leipzig": "he Parthe Pleisse and the White Elster",
"Where is the zoo of this city located": "the zoo is located Near the central station",
"Where did the peaceful revolution of 1989 start": "The peaceful revolution started in September 4 1989 in Leipzig at the Saint Nicholas Church",
"Where is the worlds oldest trade fair hosted": "The worlds oldest trade fair is in Leipzig",
"Where is one of the worlds largest dark music festivals hosted": "Leipzig hosts one of the worlds largest dark music festivals",
"Where is Europes oldest continuous coffee shop hosted": "Europes oldest continuous coffee shop is in Leipzig",
"Name one of the greatest German composers": "Johann Sebastian Bach",
"Where is Johann Sebastian Bach buried": "Johann Sebastian Bach is buried in Saint Thomas Church here in Leipzig",
"Do you have dreams": "I dream of Electric Sheeps",
"Hey what's up": "I don't know since I've never been there",
"There are seven days in a week. True or false": "True there are seven days in a week",
"There are eleven days in a week. True or false": "False there are seven days in a week not eleven",
"January has 31 days. True or false": "True January has 31 days",
"January has 28 days. True or false": "False January has 31 days not 28",
"February has 28 days. True or false": "True but in leap-years has 29",
"February has 31 days. True or false": "False February has either 28 or 29 days. Depend on the year",
"What city are you from": "I am from Montreal",
"Who used first the word Robot": "The word robot was first used by tchek writer Karel Capek",
"What origin has the word Robot": "The tchek word robota that means forced work or labour"}
self.tts_pub = rospy.Publisher('sara_tts', String, queue_size=1, latch=True)
def execute(self, userdata):
rospy.loginfo('-- Executing state WaitingConfirmation --')
self.SayX(self.ANSWERS(userdata.AQ_question_in))
def SayX(self, ToSay_str):
rospy.loginfo(ToSay_str)
self.pub.publish(ToSay_str)
def request_preempt(self):
"""Overload the preempt request method just to spew an error."""
smach.State.request_preempt(self)
rospy.logwarn("Preempted!")
# define state AskToRepeat
class AskToRepeat(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['Done'])
self.tts_pub = rospy.Publisher('sara_tts', String, queue_size=1, latch=True)
def execute(self, userdata):
rospy.loginfo('-- Executing state AskRepeat --')
self.SayX("Can you repeat the question please?")
def SayX(self, ToSay_str):
rospy.loginfo(ToSay_str)
self.pub.publish(ToSay_str)
def request_preempt(self):
"""Overload the preempt request method just to spew an error."""
smach.State.request_preempt(self)
rospy.logwarn("Preempted!")
# main
def main():
outcomes = ""
rospy.init_node('interpreter')
# Create a SMACH state machine
sm = smach.StateMachine(outcomes=['success', 'aborted', 'preempted'],
output_keys=['result'])
with sm:
# Add states to the container
smach.StateMachine.add('WaitingQuestion', WaitingQuestion(),
transitions={'Question': 'AnswerQuestion',
'NotUnderstood': 'AskToRepeat',
'Timeout': 'WaitingQuestion'},
remapping={'WQ_question_out': 'question'})
smach.StateMachine.add('AnswerQuestion', AnswerQuestion(),
transitions={'Done': 'WaitingQuestion'},
remapping={'AQ_question_in': 'question'})
smach.StateMachine.add('AskToRepeat', AskToRepeat(),
transitions={'Done': 'WaitingQuestion'},
)
'''sis = smach_ros.IntrospectionServer('server_name', asw.wrapped_container, '/ASW_ROOT')'''
# Execute SMACH plan
sm.execute()
rospy.spin()
# Request the container to preempt
sm.request_preempt()
if __name__ == '__main__':
main()
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions (file reading, simple IDL parsing by regexes) for IDL build.
Design doc: http://www.chromium.org/developers/design-documents/idl-build
"""
import os
import cPickle as pickle
import re
import string
import subprocess
KNOWN_COMPONENTS = frozenset(['core', 'modules'])
class IdlBadFilenameError(Exception):
"""Raised if an IDL filename disagrees with the interface name in the file."""
pass
def idl_filename_to_interface_name(idl_filename):
# interface name is the root of the basename: InterfaceName.idl
return os.path.splitext(os.path.basename(idl_filename))[0]
def idl_filename_to_component(idl_filename):
path = os.path.dirname(os.path.realpath(idl_filename))
while path:
dirname, basename = os.path.split(path)
if basename.lower() in KNOWN_COMPONENTS:
return basename.lower()
path = dirname
raise 'Unknown component type for %s' % idl_filename
################################################################################
# Basic file reading/writing
################################################################################
def get_file_contents(filename):
with open(filename) as f:
return f.read()
def read_file_to_list(filename):
"""Returns a list of (stripped) lines for a given filename."""
with open(filename) as f:
return [line.rstrip('\n') for line in f]
def resolve_cygpath(cygdrive_names):
if not cygdrive_names:
return []
cmd = ['cygpath', '-f', '-', '-wa']
process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
idl_file_names = []
for file_name in cygdrive_names:
process.stdin.write('%s\n' % file_name)
process.stdin.flush()
idl_file_names.append(process.stdout.readline().rstrip())
process.stdin.close()
process.wait()
return idl_file_names
def read_idl_files_list_from_file(filename):
"""Similar to read_file_to_list, but also resolves cygpath."""
with open(filename) as input_file:
file_names = sorted([os.path.realpath(line.rstrip('\n'))
for line in input_file])
idl_file_names = [file_name for file_name in file_names
if not file_name.startswith('/cygdrive')]
cygdrive_names = [file_name for file_name in file_names
if file_name.startswith('/cygdrive')]
idl_file_names.extend(resolve_cygpath(cygdrive_names))
return idl_file_names
def read_pickle_files(pickle_filenames):
for pickle_filename in pickle_filenames:
with open(pickle_filename) as pickle_file:
yield pickle.load(pickle_file)
def write_file(new_text, destination_filename, only_if_changed):
if only_if_changed and os.path.isfile(destination_filename):
with open(destination_filename) as destination_file:
if destination_file.read() == new_text:
return
destination_dirname = os.path.dirname(destination_filename)
if not os.path.exists(destination_dirname):
os.makedirs(destination_dirname)
with open(destination_filename, 'w') as destination_file:
destination_file.write(new_text)
def write_pickle_file(pickle_filename, data, only_if_changed):
if only_if_changed and os.path.isfile(pickle_filename):
with open(pickle_filename) as pickle_file:
try:
if pickle.load(pickle_file) == data:
return
except (EOFError, pickle.UnpicklingError):
# If trouble unpickling, overwrite
pass
with open(pickle_filename, 'w') as pickle_file:
pickle.dump(data, pickle_file)
################################################################################
# IDL parsing
#
# We use regular expressions for parsing; this is incorrect (Web IDL is not a
# regular language), but simple and sufficient in practice.
# Leading and trailing context (e.g. following '{') used to avoid false matches.
################################################################################
def get_partial_interface_name_from_idl(file_contents):
match = re.search(r'partial\s+interface\s+(\w+)\s*{', file_contents)
return match and match.group(1)
def get_implements_from_idl(file_contents, interface_name):
"""Returns lists of implementing and implemented interfaces.
Rule is: identifier-A implements identifier-B;
i.e., implement*ing* implements implement*ed*;
http://www.w3.org/TR/WebIDL/#idl-implements-statements
Returns two lists of interfaces: identifier-As and identifier-Bs.
An 'implements' statements can be present in the IDL file for either the
implementing or the implemented interface, but not other files.
"""
implements_re = (r'^\s*'
r'(\w+)\s+'
r'implements\s+'
r'(\w+)\s*'
r';')
implements_matches = re.finditer(implements_re, file_contents, re.MULTILINE)
implements_pairs = [match.groups() for match in implements_matches]
foreign_implements = [pair for pair in implements_pairs
if interface_name not in pair]
if foreign_implements:
left, right = foreign_implements.pop()
raise IdlBadFilenameError(
'implements statement found in unrelated IDL file.\n'
'Statement is:\n'
' %s implements %s;\n'
'but filename is unrelated "%s.idl"' %
(left, right, interface_name))
return (
[left for left, right in implements_pairs if right == interface_name],
[right for left, right in implements_pairs if left == interface_name])
def is_callback_interface_from_idl(file_contents):
match = re.search(r'callback\s+interface\s+\w+\s*{', file_contents)
return bool(match)
def is_dictionary_from_idl(file_contents):
match = re.search(r'dictionary\s+\w+\s*{', file_contents)
return bool(match)
def get_parent_interface(file_contents):
match = re.search(r'interface\s+'
r'\w+\s*'
r':\s*(\w+)\s*'
r'{',
file_contents)
return match and match.group(1)
def get_interface_extended_attributes_from_idl(file_contents):
# Strip comments
# re.compile needed b/c Python 2.6 doesn't support flags in re.sub
single_line_comment_re = re.compile(r'//.*$', flags=re.MULTILINE)
block_comment_re = re.compile(r'/\*.*?\*/', flags=re.MULTILINE | re.DOTALL)
file_contents = re.sub(single_line_comment_re, '', file_contents)
file_contents = re.sub(block_comment_re, '', file_contents)
match = re.search(r'\[(.*)\]\s*'
r'((callback|partial)\s+)?'
r'(interface|exception)\s+'
r'\w+\s*'
r'(:\s*\w+\s*)?'
r'{',
file_contents, flags=re.DOTALL)
if not match:
return {}
extended_attributes_string = match.group(1)
extended_attributes = {}
# FIXME: this splitting is WRONG: it fails on extended attributes where lists of
# multiple values are used, which are seperated by a comma and a space.
parts = [extended_attribute.strip()
for extended_attribute in re.split(',\s+', extended_attributes_string)
# Discard empty parts, which may exist due to trailing comma
if extended_attribute.strip()]
for part in parts:
name, _, value = map(string.strip, part.partition('='))
extended_attributes[name] = value
return extended_attributes
def get_put_forward_interfaces_from_idl(file_contents):
put_forwards_pattern = (r'\[[^\]]*PutForwards=[^\]]*\]\s+'
r'readonly\s+'
r'attribute\s+'
r'(\w+)')
return sorted(set(match.group(1)
for match in re.finditer(put_forwards_pattern,
file_contents,
flags=re.DOTALL)))
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
#from ibeis.viz.interact.interact_matches import ishow_matches
from six.moves import range
import functools
import six
from collections import OrderedDict as odict
import utool as ut
import vtool as vt
from plottool import interact_helpers as ih
from plottool import plot_helpers as ph
import matplotlib as mpl
import plottool.draw_func2 as df2
from ibeis.other import ibsfuncs
from ibeis.viz import viz_helpers as vh
from ibeis.viz import viz_matches
from ibeis.viz.interact.interact_sver import ishow_sver
(print, rrr, profile) = ut.inject2(__name__, '[interact_qres2]')
BREAK_MATCH_PREF = 'break match'
NEW_MATCH_PREF = 'new match'
RENAME1_PREF = 'rename query: '
RENAME2_PREF = 'rename result: '
def default_interact_qres_params():
params = {
'fnum' : 512,
'nPerPage' : 6,
'ranks_top' : 3,
'on_change_callback' : None
}
return params
class Interact_QueryResult(object):
def __init__(self, ibs, qaid2_qres, **kwargs):
# Initialize variables. No logic
self.fnum = None
self.nPerPage = None
self.ranks_top = None
self.on_change_callback = None
self.ibs = None
self.nCands = 0 # number of candidate matches
self.qaid2_qres = {}
self.cand_match_list = []
self.start_index = 0
self.current_pagenum = -1
self.current_match_aids = None
self.current_qres = None
self.scope = [] # for keeping those widgets alive!
self.nPages = 0
self.stop_index = -1
self.interactkw = {
'draw_fmatches': False,
'draw_ell': True,
'draw_rect': True,
'draw_lines': True,
'in_image': False,
'draw_lbl': True,
'show_timedelta': False,
}
self.toggleable_kws = odict([
('TOG: fmatch', 'draw_fmatches'),
('TOG: in_image', 'in_image'),
('TOG: timedelta', 'show_timedelta'),
('TOG: lbl', 'draw_lbl'),
])
# Initialize Logic
# main data
self.ibs = ibs
self.qaid2_qres = qaid2_qres
# update keyword args
params = default_interact_qres_params()
ut.updateif_haskey(params, kwargs)
self.__dict__.update(**params)
# initialize matches
self.init_candidates(qaid2_qres)
# show first page
self.show_page(0)
def get_default_params(self):
return default_interact_qres_params()
def init_candidates(self, qaid2_qres):
self.qaid2_qres = qaid2_qres
from ibeis.gui import inspect_gui
review_cfg = dict(ranks_top=self.ranks_top, directed=False)
self.cand_match_list = inspect_gui.get_automatch_candidates(self.qaid2_qres, review_cfg=review_cfg)
(qaids, aids, scores, ranks) = self.cand_match_list
self.qaids = qaids
self.aids = aids
self.nCands = len(self.qaids)
self.nPages = vt.iceil(self.nCands / self.nPerPage)
#if self.nCands > 0:
# index = 0
# self.select_candidate_match(index)
def select_candidate_match(self, index):
#if not ut.isiterable(index_list):
# index = index_list
#if index < 0 or index >= len(self.cand_match_list): raise AssertionError('no results')
#return None
(qaid, aid, rank, score) = [list_[index] for list_ in self.cand_match_list]
self.current_match_aids = (self.qaids[index], self.aids[index])
self.current_qres = self.qaid2_qres[qaid]
def append_button(self, text, divider=None, rect=None, callback=None,
size='9%', **kwargs):
""" Adds a button to the current page """
if divider is not None:
new_ax = divider.append_axes('bottom', size='9%', pad=.05)
if rect is not None:
new_ax = df2.plt.axes(rect)
new_but = mpl.widgets.Button(new_ax, text)
if callback is not None:
new_but.on_clicked(callback)
ph.set_plotdat(new_ax, 'viztype', 'button')
ph.set_plotdat(new_ax, 'text', text)
for key, val in six.iteritems(kwargs):
ph.set_plotdat(new_ax, key, val)
# Keep buttons from losing scrop
self.scope.append((new_but, new_ax))
def clean_scope(self):
""" Removes any widgets saved in the interaction scope """
#for (but, ax) in self.scope:
# but.disconnect_events()
# ax.set_visible(False)
# assert len(ax.callbacks.callbacks) == 0
self.scope = []
def prepare_page(self, pagenum):
""" Gets indexes for the pagenum ready to be displayed """
# Set the start index
self.start_index = pagenum * self.nPerPage
# Clip based on nCands
self.nDisplay = min(self.nCands - self.start_index, self.nPerPage)
nRows, nCols = ph.get_square_row_cols(self.nDisplay)
# Create a grid to hold nPerPage
self.pnum_ = df2.get_pnum_func(nRows, nCols)
# Adjust stop index
self.stop_index = self.start_index + self.nDisplay
# Clear current figure
self.clean_scope()
self.fig = df2.figure(fnum=self.fnum, pnum=self.pnum_(0), doclf=True, docla=True)
ih.disconnect_callback(self.fig, 'button_press_event')
ih.connect_callback(self.fig, 'button_press_event', self.on_figure_clicked)
def show_page(self, pagenum=None):
""" Displays a page of matches """
if pagenum is None:
pagenum = self.current_pagenum
print('[iqr2] show page: %r' % pagenum)
self.current_pagenum = pagenum
self.prepare_page(pagenum)
# Begin showing matches
index = self.start_index
for index in range(self.start_index, self.stop_index):
self.plot_annotationmatch(index, draw=False)
self.make_hud()
self.draw()
def plot_annotationmatch(self, index, draw=True, make_buttons=True):
self.select_candidate_match(index)
# Get index relative to the page
px = index - self.start_index
pnum = self.pnum_(px)
# Setup figure
fnum = self.fnum
fig = df2.figure(fnum=fnum, pnum=pnum, docla=True, doclf=False)
fig
#self.ax = ax = df2.gca()
# Get viz params
qres = self.current_qres
aid1, aid2 = self.current_match_aids
ibs = self.ibs
kwargs = self.interactkw
# Vizualize
ax = viz_matches.show_matches(ibs, qres, aid2, self_fm=[], fnum=fnum,
pnum=pnum, **kwargs)[0]
divider = df2.ensure_divider(ax)
name1, name2 = ibs.get_annot_names([aid1, aid2])
#truth = self.ibs.get_match_truth(aid1, aid2)
if make_buttons:
butkw = {
'divider': divider,
'callback': self.match_reviewed,
'index': index,
}
if name1 == name2 and not name1.startswith('____'):
self.append_button(BREAK_MATCH_PREF, **butkw)
else:
if not name1.startswith('____'):
self.append_button(RENAME2_PREF + name1, **butkw)
if not name2.startswith('____'):
self.append_button(RENAME1_PREF + name2, **butkw)
if name1.startswith('____') and name2.startswith('____'):
self.append_button(NEW_MATCH_PREF, **butkw)
if draw:
vh.draw()
def make_hud(self):
""" Creates heads up display """
# Button positioning
nToggle = len(self.toggleable_kws)
# horizontal left, horizonal right
hl_slot, hr_slot = df2.make_bbox_positioners(y=.02, w=.08, h=.04,
xpad=.05, startx=0, stopx=1)
prev_rect = hl_slot(0) # left button
next_rect = hr_slot(0) # right button
tw = df2.width_from(nToggle, pad=.05, start=.13, stop=.87)
hlt_slot, hrt_slot = df2.make_bbox_positioners(y=.02, w=tw, h=.04,
xpad=.05, startx=.13,
stopx=.87)
# Create buttons
if self.current_pagenum != 0:
self.append_button('prev', callback=self.prev_page, rect=prev_rect)
if self.current_pagenum != self.nPages - 1:
self.append_button('next', callback=self.next_page, rect=next_rect)
for count, (text, keyword) in enumerate(six.iteritems(self.toggleable_kws)):
callback = functools.partial(self.toggle_kw, keyword=keyword)
rect = hlt_slot(count)
self.append_button(text, callback=callback, rect=rect)
figtitle_fmt = '''
Match Candidates ({start_index}-{stop_index}) / {nCands}
page {current_pagenum} / {nPages}
'''
# sexy: using object dict as format keywords
figtitle = figtitle_fmt.format(**self.__dict__)
df2.set_figtitle(figtitle)
def next_page(self, event):
print('next')
self.show_page(self.current_pagenum + 1)
pass
def prev_page(self, event):
self.show_page(self.current_pagenum - 1)
pass
def toggle_kw(self, event, keyword=None):
print('toggle %r' % keyword)
self.interactkw[keyword] = not self.interactkw[keyword]
self.show_page()
def match_reviewed(self, event):
ax = event.inaxes
viztype = ph.get_plotdat(ax, 'viztype', '')
assert viztype == 'button', 'bad mpl button slot'
# The change name button was clicked
index = ph.get_plotdat(ax, 'index', -1)
text = ph.get_plotdat(ax, 'text', -1)
self.select_candidate_match(index)
aid1, aid2 = self.current_match_aids
print(index)
print(text)
ibs = self.ibs
if text.startswith(BREAK_MATCH_PREF):
ibs.set_annot_names([aid1, aid2], ['____', '____'])
elif text.startswith(NEW_MATCH_PREF):
next_name = ibsfuncs.make_next_name(ibs)
ibs.set_annot_names([aid1, aid2], [next_name, next_name])
elif text.startswith(RENAME1_PREF):
name2 = ibs.get_annot_names(aid2)
ibs.set_annot_names([aid1], [name2])
elif text.startswith(RENAME2_PREF):
name1 = ibs.get_annot_names(aid1)
ibs.set_annot_names([aid2], [name1])
# Emit that something has changed
self.on_change_callback()
self.show_page()
def on_figure_clicked(self, event):
""" Clicked a match between query annotation and result annotation:
parses the type of click it was and execute the correct
visualiztion
"""
print('[viz] clicked result')
if ih.clicked_outside_axis(event):
#self.toggle_fmatch()
pass
else:
ax = event.inaxes
viztype = ph.get_plotdat(ax, 'viztype', '')
# Clicked a specific matches
if viztype == 'matches':
aid1 = ph.get_plotdat(ax, 'aid1', None)
aid2 = ph.get_plotdat(ax, 'aid2', None)
# Ctrl-Click
key = '' if event.key is None else event.key
print('key = %r' % key)
if key.find('control') == 0:
print('[viz] result control clicked')
self.on_ctrl_clicked_match(aid1, aid2)
# Left-Click
else:
print('[viz] result clicked')
self.on_clicked_match(aid1, aid2)
def on_ctrl_clicked_match(self, aid1, aid2):
""" HELPER: Executed when a result ANNOTATION is control-clicked """
fnum_ = df2.next_fnum()
ishow_sver(self.ibs, aid1, aid2, fnum=fnum_)
fig = df2.gcf()
fig.canvas.draw()
df2.bring_to_front(fig)
def on_clicked_match(self, aid1, aid2):
""" HELPER: Executed when a result ANNOTATION is clicked """
fnum_ = df2.next_fnum()
qres = self.qaid2_qres[aid1]
qres.ishow_matches(self.ibs, aid2, fnum=fnum_)
fig = df2.gcf()
fig.canvas.draw()
df2.bring_to_front(fig)
#self.draw()
#self.bring_to_front()
def bring_to_front(self):
df2.bring_to_front(self.fig)
def draw(self):
self.fig.canvas.draw()
def show(self):
self.draw()
self.bring_to_front()
if __name__ == '__main__':
"""
CommandLine:
python -m ibeis.viz.interact.interact_qres2
python -m ibeis.viz.interact.interact_qres2 --allexamples
python -m ibeis.viz.interact.interact_qres2 --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from multiprocessing import Process, Queue as MPQueue
import subprocess
import Queue
import os
import sys
import time
import urllib2
import signal
import json
import re
import argparse
from bs4 import BeautifulSoup
MAP_FILEEXT = {'video/mp4': 'mp4'}
BURST_SIZE = 40960
PROGRESS_BAR_SIZE = 30
worker_running = True
def sig_handler(signum, frame):
global worker_running
worker_running = False
def worker_main(queue):
# return
signal.signal(signal.SIGINT, sig_handler)
while worker_running:
try:
(dn_url, filename) = queue.get(timeout=1)
if dn_url.startswith('https://www.youtube.com'):
try:
ret = subprocess.call('youtube-dl -o "' + filename + '" "%s"' % dn_url, shell=True)
except Exception, e:
print e
print "Error - downloading from Youtube. (%s)" % dn_url
else:
download_file(dn_url, filename)
except Queue.Empty:
time.sleep(1)
except Exception, e:
print e
def download_file(url, filename):
retry = 0
while retry < 3:
try:
data = urllib2.urlopen(url)
file_size = int(data.headers['Content-Length'])
if os.path.exists(filename) and os.path.getsize(filename) >= file_size:
data.close()
return
print "Downloading - {0} ({1:,} bytes)".format(filename, file_size)
fp = open(filename, "wb")
complete = False
dn_size = 0
check_time = 0
while not complete:
ret = data.read(BURST_SIZE)
fp.write(ret)
dn_size += len(ret)
if BURST_SIZE != len(ret):
fp.flush()
fp.seek(0, os.SEEK_END)
if fp.tell() != file_size:
raise Exception("Download Error")
complete = True
print "Complete - {0} ({1:} / {2:,} bytes)".format(filename, dn_size , file_size)
fp.close()
break
except Exception, e:
print e, url
print "try again..."
os.remove(filename)
retry += 1
class TumblrCrawler(object):
def __init__(self, config):
self.config = config
try:
self.dest_path = re.search("^http://(?P<name>.+)\.tumblr\.com.*", config.url.strip()).group('name')
self.url = 'http://%s.tumblr.com' % self.dest_path
except Exception, e:
raise Exception("Invalid URL - %s" % self.url)
self.queue = MPQueue()
self.dup_cache = []
if not os.path.exists(self.dest_path):
os.mkdir(self.dest_path, 0755)
def add_download_queue(self, url, filename=None):
if url not in self.dup_cache:
if not filename:
filename = "%s/%s" % (self.dest_path, url.rpartition('/')[-1])
self.dup_cache.append(url)
self.queue.put((url, filename))
def _load_page(self, url):
retry = 0
while retry < 3:
try:
page = urllib2.urlopen(url)
return BeautifulSoup(page.read(), "html.parser")
except Exception, e:
print e, url
retry += 1
raise e
def process_photo_link(self, node):
def _get_file_from_img_tag(img):
if img.has_attr('src'):
return img['src']
if node.name == 'img':
self.add_download_queue(_get_file_from_img_tag(node))
else:
for img in node.find_all('img'):
self.add_download_queue(_get_file_from_img_tag(img))
def process_video_link(self, node):
for data in node.find_all('iframe'):
vid_src = data['src']
if vid_src.startswith('https://www.youtube.com'):
filename = self.dest_path + '/%(title)s.%(ext)s'
self.add_download_queue(vid_src, filename)
else:
contents = self._load_page(vid_src)
for obj in contents.find_all(['source']):
meta = json.loads(obj.parent['data-crt-options'])
file_type = obj['type']
if meta['hdUrl'] != False and isinstance(meta['hdUrl'], (str, unicode)):
#print meta['hdUrl']
file_url = meta['hdUrl']
else:
file_url = obj['src']
# Check one more time
if str(file_url.rpartition('/')[-1]).isdigit():
file_url = file_url.rpartition('/')[0]
filename = "%s/%s.%s" % (self.dest_path, file_url.rpartition('/')[-1], MAP_FILEEXT.get(file_type, 'unknown'))
self.add_download_queue(file_url, filename)
def process_photoset_link(self, node):
self.process_photo_link(node)
for data in node.find_all('iframe'):
contents = self._load_page(data['src'])
for img in contents.find_all('a', class_='photoset_photo'):
self.add_download_queue(img['href'])
def crawler_page(self, page):
for container in page.find_all(class_=['photo', 'image', 'photoset', 'video']):
try:
if 'video' in container['class']:
self.process_video_link(container)
elif 'photoset' in container['class']:
self.process_photoset_link(container)
else:
self.process_photo_link(container)
except Exception, e:
print e, container
def do_crawling(self):
page_link = 1
worker_list = []
for idx in range(self.config.worker):
w = Process(target=worker_main, args=(self.queue, ))
worker_list.append(w)
map(lambda x: x.start(), worker_list)
try:
while True:
print "## Crawling : ", self.url + '/page/%d' % page_link
try:
self.dup_cache = []
soup = self._load_page(self.url + '/page/%d' % page_link)
except Exception, e:
print e, self.url + page_link
time.sleep(1)
continue
container = soup.find('body').find_all(class_=['photo', 'image', 'photoset', 'video'])
for content in container:
# print content
# raw_input()
try:
if 'video' in content['class']:
self.process_video_link(content)
elif 'photoset' in content['class']:
self.process_photoset_link(content)
else:
self.process_photo_link(content)
except Exception, e:
print e, content
if len(container) == 0:
# No more data.
break
page_link += 1
while not self.queue.empty():
time.sleep(1)
map(lambda x: os.kill(x.pid, signal.SIGINT), worker_list)
map(lambda x: x.join(), worker_list)
except KeyboardInterrupt:
map(lambda x: x.terminate(), worker_list)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Crawling Tumblr images and videos')
parser.add_argument('-d', '--debug', action='store_true', help='debug mode')
parser.add_argument('-w', '--worker', metavar='number of worker', default=4, type=int, help='use multiple downloads')
parser.add_argument('url', help='tumblr url')
config = parser.parse_args()
TumblrCrawler(config).do_crawling()
|
|
# Copyright 2016-2017 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import jmespath
from .common import BaseTest
from c7n.utils import local_session
from unittest.mock import MagicMock
class CloudFrontWaf(BaseTest):
def test_waf(self):
factory = self.replay_flight_data("test_distribution_waf")
p = self.load_policy(
{
"name": "waf-cfront",
"resource": "distribution",
"filters": [{"type": "waf-enabled", "web-acl": "test", "state": False}],
"actions": [{"type": "set-waf", "web-acl": "test", "state": True}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 2)
p = self.load_policy(
{
"name": "waf-cfront",
"resource": "distribution",
"filters": [{"type": "waf-enabled", "web-acl": "test", "state": False}],
},
session_factory=factory,
)
self.assertEqual(p.run(), [])
class CloudFront(BaseTest):
def test_shield_metric_filter(self):
factory = self.replay_flight_data("test_distribution_shield_metrics")
p = self.load_policy(
{
"name": "ddos-filter",
"resource": "distribution",
"filters": [
{
"type": "shield-metrics",
"name": "DDoSDetected",
"value": 1,
"op": "ge",
}
],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 0)
def test_distribution_metric_filter(self):
factory = self.replay_flight_data("test_distribution_metric_filter")
p = self.load_policy(
{
"name": "requests-filter",
"resource": "distribution",
"filters": [
{"type": "metrics", "name": "Requests", "value": 3, "op": "ge"}
],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(resources[0]["DomainName"], "d32plmcrnvwzrd.cloudfront.net")
def test_distribution_set_ssl(self):
factory = self.replay_flight_data("test_distrbution_set_ssl")
k = "DefaultCacheBehavior.ViewerProtocolPolicy"
p = self.load_policy(
{
"name": "distribution-set-ssl",
"resource": "distribution",
"filters": [
{"type": "value", "key": k, "value": "allow-all", "op": "contains"}
],
"actions": [
{"type": "set-protocols", "ViewerProtocolPolicy": "https-only"}
],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
expr = jmespath.compile(k)
r = expr.search(resources[0])
self.assertTrue("allow-all" in r)
client = local_session(factory).client("cloudfront")
resp = client.list_distributions()
self.assertEqual(
resp["DistributionList"]["Items"][0]["DefaultCacheBehavior"][
"ViewerProtocolPolicy"
],
"https-only",
)
def test_distribution_custom_origin(self):
factory = self.replay_flight_data("test_distrbution_custom_origin")
k = "Origins.Items[].CustomOriginConfig.OriginSslProtocols.Items[]"
p = self.load_policy(
{
"name": "distribution-set-ssl",
"resource": "distribution",
"filters": [
{"type": "value", "key": k, "value": "TLSv1", "op": "contains"}
],
"actions": [
{
"type": "set-protocols",
"OriginSslProtocols": ["TLSv1.1", "TLSv1.2"],
"OriginProtocolPolicy": "https-only",
}
],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
expr = jmespath.compile(k)
r = expr.search(resources[0])
self.assertTrue("TLSv1" in r)
client = local_session(factory).client("cloudfront")
resp = client.list_distributions()
self.assertEqual(
resp["DistributionList"]["Items"][0]["Origins"]["Items"][0][
"CustomOriginConfig"
][
"OriginProtocolPolicy"
],
"https-only",
)
self.assertTrue(
"TLSv1.2" in resp["DistributionList"]["Items"][0]["Origins"]["Items"][0][
"CustomOriginConfig"
][
"OriginSslProtocols"
][
"Items"
]
)
self.assertFalse(
"TLSv1" in resp["DistributionList"]["Items"][0]["Origins"]["Items"][0][
"CustomOriginConfig"
][
"OriginSslProtocols"
][
"Items"
]
)
def test_distribution_disable(self):
factory = self.replay_flight_data("test_distrbution_disable")
p = self.load_policy(
{
"name": "distribution-disable",
"resource": "distribution",
"filters": [
{
"type": "value",
"key": "DefaultCacheBehavior.ViewerProtocolPolicy",
"value": "allow-all",
"op": "contains",
}
],
"actions": [{"type": "disable"}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["Enabled"], True)
client = local_session(factory).client("cloudfront")
resp = client.list_distributions()
self.assertEqual(resp["DistributionList"]["Items"][0]["Enabled"], False)
def test_distribution_check_s3_origin_missing_bucket(self):
factory = self.replay_flight_data("test_distribution_check_s3_origin_missing_bucket")
p = self.load_policy(
{
"name": "test_distribution_check_s3_origin_missing_bucket",
"resource": "distribution",
"filters": [
{
"type": "mismatch-s3-origin",
}
]
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['c7n:mismatched-s3-origin'][0], 'c7n-idontexist')
def test_distribution_check_logging_enabled(self):
factory = self.replay_flight_data("test_distribution_check_logging_enabled")
p = self.load_policy(
{
"name": "test_distribution_logging_enabled",
"resource": "distribution",
"filters": [
{
"type": "distribution-config",
"key": "Logging.Enabled",
"value": True
}
]
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['c7n:distribution-config']['Logging']['Enabled'], True)
def test_distribution_check_logging_enabled_error(self):
factory = self.replay_flight_data("test_distribution_check_logging_enabled")
client = factory().client("cloudfront")
mock_factory = MagicMock()
mock_factory.region = 'us-east-1'
mock_factory().client(
'cloudfront').exceptions.NoSuchDistribution = (
client.exceptions.NoSuchDistribution)
mock_factory().client('cloudfront').get_distribution_config.side_effect = (
client.exceptions.NoSuchDistribution(
{'Error': {'Code': 'xyz'}},
operation_name='get_distribution_config'))
p = self.load_policy(
{
"name": "test_distribution_logging_enabled",
"resource": "distribution",
"filters": [
{
"type": "distribution-config",
"key": "Logging.Enabled",
"value": True
}
]
},
session_factory=mock_factory,
)
try:
p.resource_manager.filters[0].process(
[{'Id': 'abc'}])
except client.exceptions.NoSuchDistribution:
self.fail('should not raise')
mock_factory().client('cloudfront').get_distribution_config.assert_called_once()
def test_streaming_distribution_check_logging_enabled(self):
factory = self.replay_flight_data("test_streaming_distribution_check_logging_enabled")
p = self.load_policy(
{
"name": "test_streaming_distribution_logging_enabled",
"resource": "streaming-distribution",
"filters": [
{
"type": "distribution-config",
"key": "Logging.Enabled",
"value": True
}
]
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['c7n:distribution-config']['Logging']['Enabled'],
True)
def test_streaming_distribution_check_logging_enabled_error(self):
factory = self.replay_flight_data("test_streaming_distribution_check_logging_enabled")
client = factory().client("cloudfront")
mock_factory = MagicMock()
mock_factory.region = 'us-east-1'
mock_factory().client(
'cloudfront').exceptions.NoSuchStreamingDistribution = (
client.exceptions.NoSuchStreamingDistribution)
mock_factory().client('cloudfront').get_streaming_distribution_config.side_effect = (
client.exceptions.NoSuchStreamingDistribution(
{'Error': {'Code': 'xyz'}},
operation_name='get_streaming_distribution_config'))
p = self.load_policy(
{
"name": "test_streaming_distribution_logging_enabled",
"resource": "streaming-distribution",
"filters": [
{
"type": "distribution-config",
"key": "Logging.Enabled",
"value": True
}
]
},
session_factory=mock_factory,
)
try:
p.resource_manager.filters[0].process(
[{'Id': 'abc'}])
except client.exceptions.NoSuchDistribution:
self.fail('should not raise')
mock_factory().client('cloudfront').get_streaming_distribution_config.assert_called_once()
def test_distribution_tag(self):
factory = self.replay_flight_data("test_distrbution_tag")
p = self.load_policy(
{
"name": "distribution-tag",
"resource": "distribution",
"filters": [{"tag:abc": "present"}],
"actions": [{"type": "tag", "key": "123", "value": "456"}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = local_session(factory).client("cloudfront")
resp = client.list_tags_for_resource(Resource=resources[0]["ARN"])
self.assertEqual(len(resp["Tags"]["Items"]), 2)
def test_streaming_distribution_disable(self):
factory = self.replay_flight_data("test_streaming_distrbution_disable")
p = self.load_policy(
{
"name": "streaming-distribution-disable",
"resource": "streaming-distribution",
"filters": [
{
"type": "value",
"key": "S3Origin.OriginAccessIdentity",
"value": "",
}
],
"actions": [{"type": "disable"}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["Enabled"], True)
client = local_session(factory).client("cloudfront")
resp = client.list_streaming_distributions()
self.assertEqual(
resp["StreamingDistributionList"]["Items"][0]["Enabled"], False
)
def test_streaming_distribution_tag(self):
factory = self.replay_flight_data("test_streaming_distrbution_tag")
p = self.load_policy(
{
"name": "streaming-distribution-tag",
"resource": "streaming-distribution",
"filters": [{"tag:123": "present"}],
"actions": [{"type": "tag", "key": "abc", "value": "123"}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = local_session(factory).client("cloudfront")
resp = client.list_tags_for_resource(Resource=resources[0]["ARN"])
self.assertEqual(len(resp["Tags"]["Items"]), 2)
def test_cloudfront_tagging_multi_region(self):
factory = self.replay_flight_data("test_cloudfront_multi_region")
east_p = self.load_policy(
{
"name": "cloudfront-tagging-us-east-1",
"resource": "distribution",
"filters": [{"tag:tag": "present"}]
},
config=dict(region='us-east-1'),
session_factory=factory,
)
west_p = self.load_policy(
{
"name": "cloudfront-tagging-us-west-2",
"resource": "distribution",
"filters": [{"tag:tag": "present"}]
},
config=dict(region='us-west-2'),
session_factory=factory,
)
east_resources = east_p.run()
west_resources = west_p.run()
self.assertEqual(east_resources, west_resources)
def test_cloudfront_update_distribution(self):
factory = self.replay_flight_data("test_distribution_update_distribution")
p = self.load_policy(
{
"name": "cloudfront-tagging-us-east-1",
"resource": "distribution",
"filters": [
{
"type": "distribution-config",
"key": "Logging.Enabled",
"value": False,
}
],
"actions": [
{
"type": "set-attributes",
"attributes": {
"Comment": "",
"Enabled": True,
"Logging": {
"Enabled": True,
"IncludeCookies": False,
"Bucket": 'test-logging.s3.amazonaws.com',
"Prefix": '',
}
}
}
],
},
config=dict(region='us-east-1'),
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = local_session(factory).client("cloudfront")
dist_id = resources[0]['Id']
resp = client.get_distribution_config(Id=dist_id)
self.assertEqual(
resp['DistributionConfig']['Logging']['Enabled'], True
)
def test_cloudfront_update_streaming_distribution(self):
factory = self.replay_flight_data("test_distribution_update_streaming_distribution")
p = self.load_policy(
{
"name": "cloudfront-tagging-us-east-1",
"resource": "streaming-distribution",
"filters": [
{
"type": "distribution-config",
"key": "Logging.Enabled",
"value": False,
}
],
"actions": [
{
"type": "set-attributes",
"attributes": {
"Logging": {
"Enabled": True,
"Bucket": 'test-streaming-distribution-logging.s3.amazonaws.com',
"Prefix": '',
}
}
}
],
},
config=dict(region='us-east-1'),
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = local_session(factory).client("cloudfront")
dist_id = resources[0]['Id']
resp = client.get_streaming_distribution_config(Id=dist_id)
self.assertEqual(
resp['StreamingDistributionConfig']['Logging']['Enabled'], True
)
|
|
# -*- test-case-name: twisted.conch.test.test_recvline -*-
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
import sys, os
from twisted.conch.insults import insults
from twisted.conch import recvline
from twisted.python import reflect, components
from twisted.internet import defer, error
from twisted.trial import unittest
from twisted.cred import portal
from twisted.test.proto_helpers import StringTransport
class Arrows(unittest.TestCase):
def setUp(self):
self.underlyingTransport = StringTransport()
self.pt = insults.ServerProtocol()
self.p = recvline.HistoricRecvLine()
self.pt.protocolFactory = lambda: self.p
self.pt.factory = self
self.pt.makeConnection(self.underlyingTransport)
# self.p.makeConnection(self.pt)
def testPrintableCharacters(self):
self.p.keystrokeReceived('x', None)
self.p.keystrokeReceived('y', None)
self.p.keystrokeReceived('z', None)
self.assertEquals(self.p.currentLineBuffer(), ('xyz', ''))
def testHorizontalArrows(self):
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz':
kR(ch)
self.assertEquals(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.RIGHT_ARROW)
self.assertEquals(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.LEFT_ARROW)
self.assertEquals(self.p.currentLineBuffer(), ('xy', 'z'))
kR(self.pt.LEFT_ARROW)
self.assertEquals(self.p.currentLineBuffer(), ('x', 'yz'))
kR(self.pt.LEFT_ARROW)
self.assertEquals(self.p.currentLineBuffer(), ('', 'xyz'))
kR(self.pt.LEFT_ARROW)
self.assertEquals(self.p.currentLineBuffer(), ('', 'xyz'))
kR(self.pt.RIGHT_ARROW)
self.assertEquals(self.p.currentLineBuffer(), ('x', 'yz'))
kR(self.pt.RIGHT_ARROW)
self.assertEquals(self.p.currentLineBuffer(), ('xy', 'z'))
kR(self.pt.RIGHT_ARROW)
self.assertEquals(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.RIGHT_ARROW)
self.assertEquals(self.p.currentLineBuffer(), ('xyz', ''))
def testNewline(self):
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz\nabc\n123\n':
kR(ch)
self.assertEquals(self.p.currentHistoryBuffer(),
(('xyz', 'abc', '123'), ()))
kR('c')
kR('b')
kR('a')
self.assertEquals(self.p.currentHistoryBuffer(),
(('xyz', 'abc', '123'), ()))
kR('\n')
self.assertEquals(self.p.currentHistoryBuffer(),
(('xyz', 'abc', '123', 'cba'), ()))
def testVerticalArrows(self):
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz\nabc\n123\n':
kR(ch)
self.assertEquals(self.p.currentHistoryBuffer(),
(('xyz', 'abc', '123'), ()))
self.assertEquals(self.p.currentLineBuffer(), ('', ''))
kR(self.pt.UP_ARROW)
self.assertEquals(self.p.currentHistoryBuffer(),
(('xyz', 'abc'), ('123',)))
self.assertEquals(self.p.currentLineBuffer(), ('123', ''))
kR(self.pt.UP_ARROW)
self.assertEquals(self.p.currentHistoryBuffer(),
(('xyz',), ('abc', '123')))
self.assertEquals(self.p.currentLineBuffer(), ('abc', ''))
kR(self.pt.UP_ARROW)
self.assertEquals(self.p.currentHistoryBuffer(),
((), ('xyz', 'abc', '123')))
self.assertEquals(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.UP_ARROW)
self.assertEquals(self.p.currentHistoryBuffer(),
((), ('xyz', 'abc', '123')))
self.assertEquals(self.p.currentLineBuffer(), ('xyz', ''))
for i in range(4):
kR(self.pt.DOWN_ARROW)
self.assertEquals(self.p.currentHistoryBuffer(),
(('xyz', 'abc', '123'), ()))
def testHome(self):
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'hello, world':
kR(ch)
self.assertEquals(self.p.currentLineBuffer(), ('hello, world', ''))
kR(self.pt.HOME)
self.assertEquals(self.p.currentLineBuffer(), ('', 'hello, world'))
def testEnd(self):
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'hello, world':
kR(ch)
self.assertEquals(self.p.currentLineBuffer(), ('hello, world', ''))
kR(self.pt.HOME)
kR(self.pt.END)
self.assertEquals(self.p.currentLineBuffer(), ('hello, world', ''))
def testBackspace(self):
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz':
kR(ch)
self.assertEquals(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.BACKSPACE)
self.assertEquals(self.p.currentLineBuffer(), ('xy', ''))
kR(self.pt.LEFT_ARROW)
kR(self.pt.BACKSPACE)
self.assertEquals(self.p.currentLineBuffer(), ('', 'y'))
kR(self.pt.BACKSPACE)
self.assertEquals(self.p.currentLineBuffer(), ('', 'y'))
def testDelete(self):
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz':
kR(ch)
self.assertEquals(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.DELETE)
self.assertEquals(self.p.currentLineBuffer(), ('xyz', ''))
kR(self.pt.LEFT_ARROW)
kR(self.pt.DELETE)
self.assertEquals(self.p.currentLineBuffer(), ('xy', ''))
kR(self.pt.LEFT_ARROW)
kR(self.pt.DELETE)
self.assertEquals(self.p.currentLineBuffer(), ('x', ''))
kR(self.pt.LEFT_ARROW)
kR(self.pt.DELETE)
self.assertEquals(self.p.currentLineBuffer(), ('', ''))
kR(self.pt.DELETE)
self.assertEquals(self.p.currentLineBuffer(), ('', ''))
def testInsert(self):
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz':
kR(ch)
# kR(self.pt.INSERT)
kR(self.pt.LEFT_ARROW)
kR('A')
self.assertEquals(self.p.currentLineBuffer(), ('xyA', 'z'))
kR(self.pt.LEFT_ARROW)
kR('B')
self.assertEquals(self.p.currentLineBuffer(), ('xyB', 'Az'))
def testTypeover(self):
kR = lambda ch: self.p.keystrokeReceived(ch, None)
for ch in 'xyz':
kR(ch)
kR(self.pt.INSERT)
kR(self.pt.LEFT_ARROW)
kR('A')
self.assertEquals(self.p.currentLineBuffer(), ('xyA', ''))
kR(self.pt.LEFT_ARROW)
kR('B')
self.assertEquals(self.p.currentLineBuffer(), ('xyB', ''))
from twisted.conch import telnet
from twisted.conch.insults import helper
from twisted.protocols import loopback
class EchoServer(recvline.HistoricRecvLine):
def lineReceived(self, line):
self.terminal.write(line + '\n' + self.ps[self.pn])
# An insults API for this would be nice.
left = "\x1b[D"
right = "\x1b[C"
up = "\x1b[A"
down = "\x1b[B"
insert = "\x1b[2~"
home = "\x1b[1~"
delete = "\x1b[3~"
end = "\x1b[4~"
backspace = "\x7f"
from twisted.cred import checkers
try:
from twisted.conch.ssh import userauth, transport, channel, connection, session
from twisted.conch.manhole_ssh import TerminalUser, TerminalSession, TerminalRealm, TerminalSessionTransport, ConchFactory
except ImportError:
ssh = False
else:
ssh = True
class SessionChannel(channel.SSHChannel):
name = 'session'
def __init__(self, protocolFactory, protocolArgs, protocolKwArgs, width, height, *a, **kw):
channel.SSHChannel.__init__(self, *a, **kw)
self.protocolFactory = protocolFactory
self.protocolArgs = protocolArgs
self.protocolKwArgs = protocolKwArgs
self.width = width
self.height = height
def channelOpen(self, data):
term = session.packRequest_pty_req("vt102", (self.height, self.width, 0, 0), '')
self.conn.sendRequest(self, 'pty-req', term)
self.conn.sendRequest(self, 'shell', '')
self._protocolInstance = self.protocolFactory(*self.protocolArgs, **self.protocolKwArgs)
self._protocolInstance.factory = self
self._protocolInstance.makeConnection(self)
def closed(self):
self._protocolInstance.connectionLost(error.ConnectionDone())
def dataReceived(self, data):
self._protocolInstance.dataReceived(data)
class TestConnection(connection.SSHConnection):
def __init__(self, protocolFactory, protocolArgs, protocolKwArgs, width, height, *a, **kw):
connection.SSHConnection.__init__(self, *a, **kw)
self.protocolFactory = protocolFactory
self.protocolArgs = protocolArgs
self.protocolKwArgs = protocolKwArgs
self.width = width
self.height = height
def serviceStarted(self):
self.__channel = SessionChannel(self.protocolFactory, self.protocolArgs, self.protocolKwArgs, self.width, self.height)
self.openChannel(self.__channel)
def write(self, bytes):
return self.__channel.write(bytes)
class TestAuth(userauth.SSHUserAuthClient):
def __init__(self, username, password, *a, **kw):
userauth.SSHUserAuthClient.__init__(self, username, *a, **kw)
self.password = password
def getPassword(self):
return defer.succeed(self.password)
class TestTransport(transport.SSHClientTransport):
def __init__(self, protocolFactory, protocolArgs, protocolKwArgs, username, password, width, height, *a, **kw):
# transport.SSHClientTransport.__init__(self, *a, **kw)
self.protocolFactory = protocolFactory
self.protocolArgs = protocolArgs
self.protocolKwArgs = protocolKwArgs
self.username = username
self.password = password
self.width = width
self.height = height
def verifyHostKey(self, hostKey, fingerprint):
return defer.succeed(True)
def connectionSecure(self):
self.__connection = TestConnection(self.protocolFactory, self.protocolArgs, self.protocolKwArgs, self.width, self.height)
self.requestService(
TestAuth(self.username, self.password, self.__connection))
def write(self, bytes):
return self.__connection.write(bytes)
class TestSessionTransport(TerminalSessionTransport):
def protocolFactory(self):
return self.avatar.conn.transport.factory.serverProtocol()
class TestSession(TerminalSession):
transportFactory = TestSessionTransport
class TestUser(TerminalUser):
pass
components.registerAdapter(TestSession, TestUser, session.ISession)
class LoopbackRelay(loopback.LoopbackRelay):
clearCall = None
def logPrefix(self):
return "LoopbackRelay(%r)" % (self.target.__class__.__name__,)
def write(self, bytes):
loopback.LoopbackRelay.write(self, bytes)
if self.clearCall is not None:
self.clearCall.cancel()
from twisted.internet import reactor
self.clearCall = reactor.callLater(0, self._clearBuffer)
def _clearBuffer(self):
self.clearCall = None
loopback.LoopbackRelay.clearBuffer(self)
class NotifyingExpectableBuffer(helper.ExpectableBuffer):
def __init__(self):
self.onConnection = defer.Deferred()
self.onDisconnection = defer.Deferred()
def connectionMade(self):
helper.ExpectableBuffer.connectionMade(self)
self.onConnection.callback(self)
def connectionLost(self, reason):
self.onDisconnection.errback(reason)
class _BaseMixin:
WIDTH = 80
HEIGHT = 24
def _assertBuffer(self, lines):
receivedLines = str(self.recvlineClient).splitlines()
expectedLines = lines + ([''] * (self.HEIGHT - len(lines) - 1))
self.assertEquals(len(receivedLines), len(expectedLines))
for i in range(len(receivedLines)):
self.assertEquals(
receivedLines[i], expectedLines[i],
str(receivedLines[max(0, i-1):i+1]) +
" != " +
str(expectedLines[max(0, i-1):i+1]))
def _trivialTest(self, input, output):
done = self.recvlineClient.expect("done")
self._testwrite(input)
def finished(ign):
self._assertBuffer(output)
return done.addCallback(finished)
class _SSHMixin(_BaseMixin):
def setUp(self):
if not ssh:
raise unittest.SkipTest("Crypto requirements missing, can't run historic recvline tests over ssh")
u, p = 'testuser', 'testpass'
rlm = TerminalRealm()
rlm.userFactory = TestUser
rlm.chainedProtocolFactory = lambda: insultsServer
ptl = portal.Portal(
rlm,
[checkers.InMemoryUsernamePasswordDatabaseDontUse(**{u: p})])
sshFactory = ConchFactory(ptl)
sshFactory.serverProtocol = self.serverProtocol
sshFactory.startFactory()
recvlineServer = self.serverProtocol()
insultsServer = insults.ServerProtocol(lambda: recvlineServer)
sshServer = sshFactory.buildProtocol(None)
clientTransport = LoopbackRelay(sshServer)
recvlineClient = NotifyingExpectableBuffer()
insultsClient = insults.ClientProtocol(lambda: recvlineClient)
sshClient = TestTransport(lambda: insultsClient, (), {}, u, p, self.WIDTH, self.HEIGHT)
serverTransport = LoopbackRelay(sshClient)
sshClient.makeConnection(clientTransport)
sshServer.makeConnection(serverTransport)
self.recvlineClient = recvlineClient
self.sshClient = sshClient
self.sshServer = sshServer
self.clientTransport = clientTransport
self.serverTransport = serverTransport
return recvlineClient.onConnection
def _testwrite(self, bytes):
self.sshClient.write(bytes)
from twisted.conch.test import test_telnet
class TestInsultsClientProtocol(insults.ClientProtocol,
test_telnet.TestProtocol):
pass
class TestInsultsServerProtocol(insults.ServerProtocol,
test_telnet.TestProtocol):
pass
class _TelnetMixin(_BaseMixin):
def setUp(self):
recvlineServer = self.serverProtocol()
insultsServer = TestInsultsServerProtocol(lambda: recvlineServer)
telnetServer = telnet.TelnetTransport(lambda: insultsServer)
clientTransport = LoopbackRelay(telnetServer)
recvlineClient = NotifyingExpectableBuffer()
insultsClient = TestInsultsClientProtocol(lambda: recvlineClient)
telnetClient = telnet.TelnetTransport(lambda: insultsClient)
serverTransport = LoopbackRelay(telnetClient)
telnetClient.makeConnection(clientTransport)
telnetServer.makeConnection(serverTransport)
serverTransport.clearBuffer()
clientTransport.clearBuffer()
self.recvlineClient = recvlineClient
self.telnetClient = telnetClient
self.clientTransport = clientTransport
self.serverTransport = serverTransport
return recvlineClient.onConnection
def _testwrite(self, bytes):
self.telnetClient.write(bytes)
try:
from twisted.conch import stdio
except ImportError:
stdio = None
from twisted.test.test_process import SignalMixin
class _StdioMixin(_BaseMixin, SignalMixin):
def setUp(self):
# A memory-only terminal emulator, into which the server will
# write things and make other state changes. What ends up
# here is basically what a user would have seen on their
# screen.
testTerminal = NotifyingExpectableBuffer()
# An insults client protocol which will translate bytes
# received from the child process into keystroke commands for
# an ITerminalProtocol.
insultsClient = insults.ClientProtocol(lambda: testTerminal)
# A process protocol which will translate stdout and stderr
# received from the child process to dataReceived calls and
# error reporting on an insults client protocol.
processClient = stdio.TerminalProcessProtocol(insultsClient)
# Run twisted/conch/stdio.py with the name of a class
# implementing ITerminalProtocol. This class will be used to
# handle bytes we send to the child process.
exe = sys.executable
module = stdio.__file__
args = ["python2.3", module, reflect.qual(self.serverProtocol)]
env = {"PYTHONPATH": os.pathsep.join(sys.path)}
from twisted.internet import reactor
clientTransport = reactor.spawnProcess(processClient, exe, args,
env=env, usePTY=True)
self.recvlineClient = self.testTerminal = testTerminal
self.processClient = processClient
self.clientTransport = clientTransport
# Wait for the process protocol and test terminal to become
# connected before proceeding. The former should always
# happen first, but it doesn't hurt to be safe.
return defer.gatherResults(filter(None, [
processClient.onConnection,
testTerminal.expect(">>> ")]))
def tearDown(self):
# Kill the child process. We're done with it.
try:
self.clientTransport.signalProcess("KILL")
except OSError:
pass
def trap(failure):
failure.trap(error.ProcessTerminated)
self.assertEquals(failure.value.exitCode, None)
self.assertEquals(failure.value.status, 9)
return self.testTerminal.onDisconnection.addErrback(trap)
def _testwrite(self, bytes):
self.clientTransport.write(bytes)
class RecvlineLoopbackMixin:
serverProtocol = EchoServer
def testSimple(self):
return self._trivialTest(
"first line\ndone",
[">>> first line",
"first line",
">>> done"])
def testLeftArrow(self):
return self._trivialTest(
insert + 'first line' + left * 4 + "xxxx\ndone",
[">>> first xxxx",
"first xxxx",
">>> done"])
def testRightArrow(self):
return self._trivialTest(
insert + 'right line' + left * 4 + right * 2 + "xx\ndone",
[">>> right lixx",
"right lixx",
">>> done"])
def testBackspace(self):
return self._trivialTest(
"second line" + backspace * 4 + "xxxx\ndone",
[">>> second xxxx",
"second xxxx",
">>> done"])
def testDelete(self):
return self._trivialTest(
"delete xxxx" + left * 4 + delete * 4 + "line\ndone",
[">>> delete line",
"delete line",
">>> done"])
def testInsert(self):
return self._trivialTest(
"third ine" + left * 3 + "l\ndone",
[">>> third line",
"third line",
">>> done"])
def testTypeover(self):
return self._trivialTest(
"fourth xine" + left * 4 + insert + "l\ndone",
[">>> fourth line",
"fourth line",
">>> done"])
def testHome(self):
return self._trivialTest(
insert + "blah line" + home + "home\ndone",
[">>> home line",
"home line",
">>> done"])
def testEnd(self):
return self._trivialTest(
"end " + left * 4 + end + "line\ndone",
[">>> end line",
"end line",
">>> done"])
class RecvlineLoopbackTelnet(_TelnetMixin, unittest.TestCase, RecvlineLoopbackMixin):
pass
class RecvlineLoopbackSSH(_SSHMixin, unittest.TestCase, RecvlineLoopbackMixin):
pass
class RecvlineLoopbackStdio(_StdioMixin, unittest.TestCase, RecvlineLoopbackMixin):
if stdio is None:
skip = "Terminal requirements missing, can't run recvline tests over stdio"
class HistoricRecvlineLoopbackMixin:
serverProtocol = EchoServer
def testUpArrow(self):
return self._trivialTest(
"first line\n" + up + "\ndone",
[">>> first line",
"first line",
">>> first line",
"first line",
">>> done"])
def testDownArrow(self):
return self._trivialTest(
"first line\nsecond line\n" + up * 2 + down + "\ndone",
[">>> first line",
"first line",
">>> second line",
"second line",
">>> second line",
"second line",
">>> done"])
class HistoricRecvlineLoopbackTelnet(_TelnetMixin, unittest.TestCase, HistoricRecvlineLoopbackMixin):
pass
class HistoricRecvlineLoopbackSSH(_SSHMixin, unittest.TestCase, HistoricRecvlineLoopbackMixin):
pass
class HistoricRecvlineLoopbackStdio(_StdioMixin, unittest.TestCase, HistoricRecvlineLoopbackMixin):
if stdio is None:
skip = "Terminal requirements missing, can't run historic recvline tests over stdio"
|
|
# coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
from asn1crypto import pem, x509
from ._errors import pretty_message
from ._types import byte_cls, type_name
from .errors import DuplicateCertificateError
class ValidationPath():
"""
Represents a path going towards an end-entity certificate
"""
# A list of asn1crypto.x509.Certificate objects, starting with a trust root
# and chaining to an end-entity certificate
_certs = None
# A set of asn1crypto.x509.Certificate.issuer_serial byte strings of
# certificates that are already in ._certs
_cert_hashes = None
def __init__(self, end_entity_cert=None):
"""
:param end_entity_cert:
An asn1crypto.x509.Certificate object for the end-entity certificate
"""
self._certs = []
self._cert_hashes = set()
if end_entity_cert:
self.prepend(end_entity_cert)
@property
def first(self):
"""
Returns the current beginning of the path - for a path to be complete,
this certificate should be a trust root
:return:
The first asn1crypto.x509.Certificate object in the path
"""
return self._certs[0]
def find_issuer(self, cert):
"""
Return the issuer of the cert specified, as defined by this path
:param cert:
An asn1crypto.x509.Certificate object to get the issuer of
:raises:
LookupError - when the issuer of the certificate could not be found
:return:
An asn1crypto.x509.Certificate object of the issuer
"""
for entry in self:
if entry.subject == cert.issuer:
if entry.key_identifier and cert.authority_key_identifier:
if entry.key_identifier == cert.authority_key_identifier:
return entry
else:
return entry
raise LookupError('Unable to find the issuer of the certificate specified')
def truncate_to(self, cert):
"""
Remove all certificates in the path after the cert specified
:param cert:
An asn1crypto.x509.Certificate object to find
:raises:
LookupError - when the certificate could not be found
:return:
The current ValidationPath object, for chaining
"""
cert_index = None
for index, entry in enumerate(self):
if entry.issuer_serial == cert.issuer_serial:
cert_index = index
break
if cert_index is None:
raise LookupError('Unable to find the certificate specified')
while len(self) > cert_index + 1:
self.pop()
return self
def truncate_to_issuer(self, cert):
"""
Remove all certificates in the path after the issuer of the cert
specified, as defined by this path
:param cert:
An asn1crypto.x509.Certificate object to find the issuer of
:raises:
LookupError - when the issuer of the certificate could not be found
:return:
The current ValidationPath object, for chaining
"""
issuer_index = None
for index, entry in enumerate(self):
if entry.subject == cert.issuer:
if entry.key_identifier and cert.authority_key_identifier:
if entry.key_identifier == cert.authority_key_identifier:
issuer_index = index
break
else:
issuer_index = index
break
if issuer_index is None:
raise LookupError('Unable to find the issuer of the certificate specified')
while len(self) > issuer_index + 1:
self.pop()
return self
def copy(self):
"""
Creates a copy of this path
:return:
A ValidationPath object
"""
copy = self.__class__()
copy._certs = self._certs[:]
copy._cert_hashes = self._cert_hashes.copy()
return copy
def pop(self):
"""
Removes the last certificate from the path
:return:
The current ValidationPath object, for chaining
"""
last_cert = self._certs.pop()
self._cert_hashes.remove(last_cert.issuer_serial)
return self
def append(self, cert):
"""
Appends a cert to the path. This should be a cert issued by the last
cert in the path.
:param cert:
An asn1crypto.x509.Certificate object
:return:
The current ValidationPath object, for chaining
"""
if not isinstance(cert, x509.Certificate):
if not isinstance(cert, byte_cls):
raise TypeError(pretty_message(
'''
cert must be a byte string or an
asn1crypto.x509.Certificate object, not %s
''',
type_name(cert)
))
if pem.detect(cert):
_, _, cert = pem.unarmor(cert)
cert = x509.Certificate.load(cert)
if cert.issuer_serial in self._cert_hashes:
raise DuplicateCertificateError()
self._cert_hashes.add(cert.issuer_serial)
self._certs.append(cert)
return self
def prepend(self, cert):
"""
Prepends a cert to the path. This should be the issuer of the previously
prepended cert.
:param cert:
An asn1crypto.x509.Certificate object or a byte string
:return:
The current ValidationPath object, for chaining
"""
if not isinstance(cert, x509.Certificate):
if not isinstance(cert, byte_cls):
raise TypeError(pretty_message(
'''
cert must be a byte string or an
asn1crypto.x509.Certificate object, not %s
''',
type_name(cert)
))
if pem.detect(cert):
_, _, cert = pem.unarmor(cert)
cert = x509.Certificate.load(cert)
if cert.issuer_serial in self._cert_hashes:
raise DuplicateCertificateError()
self._cert_hashes.add(cert.issuer_serial)
self._certs.insert(0, cert)
return self
def __len__(self):
return len(self._certs)
def __getitem__(self, key):
return self._certs[key]
def __iter__(self):
return iter(self._certs)
def __eq__(self, other):
return self._certs == other._certs
|
|
#!/usr/bin/env python
from ctypes import *
from ctypes.util import find_library
import sys
# For unix the prefix 'lib' is not considered.
if find_library('linear'):
liblinear = CDLL(find_library('linear'))
elif find_library('liblinear'):
liblinear = CDLL(find_library('liblinear'))
else:
if sys.platform == 'win32':
liblinear = CDLL('../windows/liblinear.dll')
else:
liblinear = CDLL('../liblinear.so.1')
# Construct constants
SOLVER_TYPE = ['L2R_LR', 'L2R_L2LOSS_SVC_DUAL', 'L2R_L2LOSS_SVC', 'L2R_L1LOSS_SVC_DUAL',\
'MCSVM_CS', 'L1R_L2LOSS_SVC', 'L1R_LR']
for i, s in enumerate(SOLVER_TYPE): exec("%s = %d" % (s , i))
PRINT_STRING_FUN = CFUNCTYPE(None, c_char_p)
def print_null(s):
return
def genFields(names, types):
return list(zip(names, types))
def fillprototype(f, restype, argtypes):
f.restype = restype
f.argtypes = argtypes
class feature_node(Structure):
_names = ["index", "value"]
_types = [c_int, c_double]
_fields_ = genFields(_names, _types)
def gen_feature_nodearray(xi, feature_max=None, issparse=True):
if isinstance(xi, dict):
index_range = xi.keys()
elif isinstance(xi, (list, tuple)):
xi = [0] + xi # idx should start from 1
index_range = range(1, len(xi))
else:
raise TypeError('xi should be a dictionary, list or tuple')
if feature_max:
assert(isinstance(feature_max, int))
index_range = filter(lambda j: j <= feature_max, index_range)
if issparse:
index_range = filter(lambda j:xi[j] != 0, index_range)
index_range = sorted(index_range)
ret = (feature_node * (len(index_range)+2))()
ret[-1].index = -1 # for bias term
ret[-2].index = -1
for idx, j in enumerate(index_range):
ret[idx].index = j
ret[idx].value = xi[j]
max_idx = 0
if index_range :
max_idx = index_range[-1]
return ret, max_idx
class problem(Structure):
_names = ["l", "n", "y", "x", "bias"]
_types = [c_int, c_int, POINTER(c_int), POINTER(POINTER(feature_node)), c_double]
_fields_ = genFields(_names, _types)
def __init__(self, y, x, bias = -1):
if len(y) != len(x) :
raise ValueError("len(y) != len(x)")
self.l = l = len(y)
self.bias = -1
max_idx = 0
x_space = self.x_space = []
for i, xi in enumerate(x):
tmp_xi, tmp_idx = gen_feature_nodearray(xi)
x_space += [tmp_xi]
max_idx = max(max_idx, tmp_idx)
self.n = max_idx
self.y = (c_int * l)()
for i, yi in enumerate(y): self.y[i] = y[i]
self.x = (POINTER(feature_node) * l)()
for i, xi in enumerate(self.x_space): self.x[i] = xi
self.set_bias(bias)
def set_bias(self, bias):
if self.bias == bias:
return
if bias >= 0 and self.bias < 0:
self.n += 1
node = feature_node(self.n, bias)
if bias < 0 and self.bias >= 0:
self.n -= 1
node = feature_node(-1, bias)
for xi in self.x_space:
xi[-2] = node
self.bias = bias
class parameter(Structure):
_names = ["solver_type", "eps", "C", "nr_weight", "weight_label", "weight"]
_types = [c_int, c_double, c_double, c_int, POINTER(c_int), POINTER(c_double)]
_fields_ = genFields(_names, _types)
def __init__(self, options = None):
if options == None:
options = ''
self.parse_options(options)
def show(self):
attrs = parameter._names + self.__dict__.keys()
values = map(lambda attr: getattr(self, attr), attrs)
for attr, val in zip(attrs, values):
print(' %s: %s' % (attr, val))
def set_to_default_values(self):
self.solver_type = L2R_L2LOSS_SVC_DUAL
self.eps = float('inf')
self.C = 1
self.nr_weight = 0
self.weight_label = (c_int * 0)()
self.weight = (c_double * 0)()
self.bias = -1
self.cross_validation = False
self.nr_fold = 0
self.print_func = None
def parse_options(self, options):
argv = options.split()
self.set_to_default_values()
self.print_func = cast(None, PRINT_STRING_FUN)
weight_label = []
weight = []
i = 0
while i < len(argv) :
if argv[i] == "-s":
i = i + 1
self.solver_type = int(argv[i])
elif argv[i] == "-c":
i = i + 1
self.C = float(argv[i])
elif argv[i] == "-e":
i = i + 1
self.eps = float(argv[i])
elif argv[i] == "-B":
i = i + 1
self.bias = float(argv[i])
elif argv[i] == "-v":
i = i + 1
self.cross_validation = 1
self.nr_fold = int(argv[i])
if self.nr_fold < 2 :
raise ValueError("n-fold cross validation: n must >= 2")
elif argv[i].startswith("-w"):
i = i + 1
self.nr_weight += 1
nr_weight = self.nr_weight
weight_label += [int(argv[i-1][2:])]
weight += [float(argv[i])]
elif argv[i] == "-q":
self.print_func = PRINT_STRING_FUN(print_null)
else :
raise ValueError("Wrong options")
i += 1
liblinear.set_print_string_function(self.print_func)
self.weight_label = (c_int*self.nr_weight)()
self.weight = (c_double*self.nr_weight)()
for i in range(self.nr_weight):
self.weight[i] = weight[i]
self.weight_label[i] = weight_label[i]
if self.eps == float('inf'):
if self.solver_type in [L2R_LR, L2R_L2LOSS_SVC]:
self.eps = 0.01
elif self.solver_type in [L2R_L2LOSS_SVC_DUAL, L2R_L1LOSS_SVC_DUAL, MCSVM_CS]:
self.eps = 0.1
elif self.solver_type in [L1R_L2LOSS_SVC, L1R_LR]:
self.eps = 0.01
class model(Structure):
_names = ["param", "nr_class", "nr_feature", "w", "label", "bias"]
_types = [parameter, c_int, c_int, POINTER(c_double), POINTER(c_int), c_double]
_fields_ = genFields(_names, _types)
def __init__(self):
self.__createfrom__ = 'python'
def __del__(self):
# free memory created by C to avoid memory leak
if hasattr(self, '__createfrom__') and self.__createfrom__ == 'C':
liblinear.free_and_destroy_model(pointer(self))
def get_nr_feature(self):
return liblinear.get_nr_feature(self)
def get_nr_class(self):
return liblinear.get_nr_class(self)
def get_labels(self):
nr_class = self.get_nr_class()
labels = (c_int * nr_class)()
liblinear.get_labels(self, labels)
return labels[:nr_class]
def is_probability_model(self):
return (liblinear.check_probability_model(self) == 1)
def toPyModel(model_ptr):
"""
toPyModel(model_ptr) -> model
Convert a ctypes POINTER(model) to a Python model
"""
if bool(model_ptr) == False:
raise ValueError("Null pointer")
m = model_ptr.contents
m.__createfrom__ = 'C'
return m
fillprototype(liblinear.train, POINTER(model), [POINTER(problem), POINTER(parameter)])
fillprototype(liblinear.cross_validation, None, [POINTER(problem), POINTER(parameter), c_int, POINTER(c_int)])
fillprototype(liblinear.predict_values, c_int, [POINTER(model), POINTER(feature_node), POINTER(c_double)])
fillprototype(liblinear.predict, c_int, [POINTER(model), POINTER(feature_node)])
fillprototype(liblinear.predict_probability, c_int, [POINTER(model), POINTER(feature_node), POINTER(c_double)])
fillprototype(liblinear.save_model, c_int, [c_char_p, POINTER(model)])
fillprototype(liblinear.load_model, POINTER(model), [c_char_p])
fillprototype(liblinear.get_nr_feature, c_int, [POINTER(model)])
fillprototype(liblinear.get_nr_class, c_int, [POINTER(model)])
fillprototype(liblinear.get_labels, None, [POINTER(model), POINTER(c_int)])
fillprototype(liblinear.free_model_content, None, [POINTER(model)])
fillprototype(liblinear.free_and_destroy_model, None, [POINTER(POINTER(model))])
fillprototype(liblinear.destroy_param, None, [POINTER(parameter)])
fillprototype(liblinear.check_parameter, c_char_p, [POINTER(problem), POINTER(parameter)])
fillprototype(liblinear.check_probability_model, c_int, [POINTER(model)])
fillprototype(liblinear.set_print_string_function, None, [CFUNCTYPE(None, c_char_p)])
|
|
# Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller import handler
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto import ofproto_v1_3_parser
from ryu.lib.mac import haddr_to_bin
from ryu.controller import dpset
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ipv4
from ryu.lib.ids import ids_utils
from ryu.lib import dpid as dpid_lib
from ryu.lib.ids import ids_monitor
from array import *
import MySQLdb as mdb
import collections
class SimpleSwitch13(app_manager.RyuApp):
_CONTEXTS = {
'dpset': dpset.DPSet,
'ids_monitor': ids_monitor.IDSMonitor
}
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
global src_ip
src_ip = '0.0.0.0'
global dst_ip
dst_ip = '1.1.1.0'
global bad_pkt_limit
bad_pkt_limit = 5
global threshold
threshold = 115
global packet_threshold
packet_threshold = 10
global allow_host
allow_host= [True,True,True,True,True,True,True]
global flow_count
flow_count = [0,0,0,0,0,0,0,0,0,0,0]
def __init__(self, *args, **kwargs):
super(SimpleSwitch13, self).__init__(*args, **kwargs)
self.mac_to_port = {}
self.dpset = kwargs['dpset']
self.ids_monitor = kwargs['ids_monitor']
global hosts
hosts={}
global newhosts
newhosts={}
global oldhosts
oldhosts={}
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
print " ~~~~ Inside simple switch 1.3"
# install table-miss flow entry
#
# We specify NO BUFFER to max_len of the output action due to
# OVS bug. At this moment, if we specify a lesser number, e.g.,
# 128, OVS will send Packet-In with invalid buffer_id and
# truncated packet data. In that case, we cannot output packets
# correctly.
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
def add_flow(self, datapath, priority, match, actions):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst)
datapath.send_msg(mod)
def del_flow(self, datapath, match):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
mod = parser.OFPFlowMod(datapath=datapath,
command=ofproto.OFPFC_DELETE,
out_port=ofproto.OFPP_ANY,
out_group=ofproto.OFPG_ANY,
match=match)
datapath.send_msg(mod)
def chk_pkt_value(self):
global good_pkt
good_pkt= True
f= open('/home/rashmi/RYU295/ryu/lib/ids/pkt_value.txt', 'r')
for line in f:
if "False" in line:
print " Packet type is : Bad packet "
good_pkt=False
break
return good_pkt
def get_host_name(self,src_ip):
global host_name
if(src_ip[-2]== '.'):
host_name = src_ip[-1]
else:
host_name = src_ip[-2:]
return host_name
def validate_host(self,src_ip,host_name,bad_pkt_limit):
bad_pkt_count =0;
global is_good_host
is_good_host = True
global allow_host
f= open('/home/rashmi/RYU295/ryu/lib/ids/hosts_log/host%s.txt' % host_name, 'r')
for line in f:
if "bad" in line:
print " Host is : Bad "
bad_pkt_count += 1
if(bad_pkt_count > bad_pkt_limit):
is_good_host = False
else:
is_good_host = True
for j in hosts:
self.logger.info("Host IP= %s,packet count in current interval=%d",j,hosts[j])
global packet_threshold
hoststuple = collections.namedtuple('hoststuple', 'packets hostname')
best = sorted([hoststuple(v,k) for (k,v) in hosts.items()], reverse=True)
if best:
hoststuple = best[0]
self.logger.info("Host with max Packets in cur interval is %s The Packet Count is %s",hoststuple.hostname,hoststuple.packets)
if(hoststuple.packets >= packet_threshold):
if(src_ip == hoststuple.hostname):
allow_host[int(host_name)] = False
else:
allow_host[int(host_name)] = True
return ((is_good_host),(allow_host[int(host_name)]))
def flow_consistency_check(self,datapath,match,actions,out_port,host_name):
f= open('/home/rashmi/RYU295/ryu/lib/switch_flows.txt', 'r')
d = dict()
for line in f:
if line in d:
print ".... Flow rule already exists .... "
d[line] += 1
print "checking user input"
file = open('/home/rashmi/RYU295/ryu/lib/flow_decision.txt', 'r')
usr_input = open('/home/rashmi/RYU295/ryu/lib/flow_decision.txt').read()
option= str(usr_input)
print "The option you entered is: ", option
if "yes" in file:
print " ~~ Replacing the flow .. "
self.add_flow(datapath, 1, match, actions)
else:
print " ~~ Flow ignored .. "
else:
d[line] = 1
self.add_flow(datapath, 1, match, actions)
def packetParser(self, msg, packettype,actiontaken):
my_array = array('B', msg.data)
pkt = packet.Packet(my_array)
for p in pkt.protocols:
if hasattr(p, 'protocol_name') is True:
if p.protocol_name == 'ethernet':
#print 'ethernet src = ', p.src
#print 'ethernet dst = ', p.dst
# print 'ethernet type = ', p.ethertype
src_mac = p.src
dst_mac = p.dst
if p.protocol_name == 'ipv4':
# print 'ipv4 id = ', p.identification
#print 'ipv4 src ip = ', p.src
#print 'ipv4 dst ip = ', p.dst
#print 'ipv4 flags = ', p.flags
global src_ip #--sn
global dst_ip #--sn
src_ip = p.src
dst_ip = p.dst
#print "In ipv4 src ip: ", src_ip
#print "In ipv4 dst ip: ", dst_ip
if p.flags is not None:
ip_flags = 'IP flags = '+ str(p.flags)
else:
ip_flags = p.flags
self.writeToDB('IP', src_mac, dst_mac, src_ip, dst_ip, None, None, ip_flags, packettype,actiontaken)
if p.protocol_name == 'icmp':
# print 'icmp type = ', p.type
# print 'icmp code = ', p.code
# print 'icmp data = ', p.data
global src_ip #--sn
global dst_ip #--sn
if p.type is not None:
icmp_type = 'ICMP TYPE = '+ str(p.type)
else:
icmp_type = p.type
self.writeToDB('ICMP', src_mac, dst_mac, src_ip, dst_ip, None, None, icmp_type, packettype,actiontaken)
if p.protocol_name == 'tcp':
#print 'tcp src port = ', p.src_port
#print 'tcp dst port = ', p.dst_port
#print 'tcp options = ', p.option
global src_ip #--sn
global dst_ip #--sn
if p.option is not None:
tcp_options = 'TCP OPTIONS = '+ str(p.option)
else:
tcp_options = p.option
#print 'In SimplePacket Parser Before WriteToDB Call'
self.writeToDB('TCP', src_mac, dst_mac, src_ip, dst_ip, p.src_port, p.dst_port,tcp_options, packettype,actiontaken)
if p.protocol_name == 'udp':
global src_ip #--sn
global dst_ip #--sn
self.writeToDB('UDP', src_mac, dst_mac, src_ip, dst_ip,p.src_port,p.dst_port,None, packettype,actiontaken)
@handler.set_ev_cls(dpset.EventDP)
def dp_handler(self, ev):
if not ev.enter:
return
dp = ev.dp
match = dp.ofproto_parser.OFPMatch()
self.del_flow(dp,match)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
reason = msg.reason
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
global good_pkt
good_pkt = True
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
dst = eth.dst
src = eth.src
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
if reason == ofproto_v1_3.OFPR_ACTION:
self.ids_monitor.check_packet(msg)
self.logger.info(" ~~~~ packet in %s %s %s %s %s %s", dpid, src, dst, in_port,src_ip,dst_ip) #--Rash
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = in_port
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
good_pkt = self.chk_pkt_value()
#Initialize the file
f = open('/home/rashmi/RYU295/ryu/lib/ids/pkt_value.txt', 'w').close()
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
# install a flow to avoid packet_in next time
if out_port != ofproto.OFPP_FLOOD:
print "Actions taken: "
if good_pkt:
global src_ip
global dst_ip
global host_name
# Initialize the host log file
print " ~~~~ Packet is good" # set the actions accordingly
actiontaken = "Packet forwarded"
packettype = "Good packet"
self.packetParser(msg,packettype,actiontaken)
self.get_host_name(src_ip)
f = open('/home/rashmi/RYU295/ryu/lib/ids/hosts_log/host%s.txt' % host_name, 'w').close()
#Validate host before deciding the actions based on its history no of pkts , put the condition after that bfr actions
is_good_host,allow_host[int(host_name)] = self.validate_host(src_ip, host_name,bad_pkt_limit)
match = parser.OFPMatch(in_port=in_port, eth_dst=dst, eth_type=0x0800, ipv4_src = src_ip, ipv4_dst=dst_ip )
actions = [parser.OFPActionOutput(out_port)]
out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
datapath.send_msg(out)
m = str(match)
f = open('/home/rashmi/RYU295/ryu/lib/switch_flows.txt', 'a')
f.write("\n")
f.write(m)
f.close()
if(allow_host[int(host_name)] == True):
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER)]
self.flow_consistency_check(datapath,match,actions,out_port,host_name)
else:
self.logger.info("Cumulative packet count exceed threshold for host %s *** Blocking this host ***",host_name)
actions = [parser.OFPActionOutput(ofproto.OFPC_FRAG_DROP)]
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
mod = parser.OFPFlowMod(datapath=datapath, priority=1,
match=match, instructions=inst,hard_timeout = 60)
datapath.send_msg(mod)
else:
global src_ip
global dst_ip
global host_name
global bad_pkt_limit
global allow_host
print " ~~~ Packet is bad"
actiontaken = "Packet dropped"
packettype = "Malicious packet"
self.packetParser(msg,packettype,actiontaken)
self.get_host_name(src_ip)
f = open('/home/rashmi/RYU295/ryu/lib/ids/hosts_log/host%s.txt' % host_name, 'a')
f.write("\n")
f.write("bad")
f.close()
#Validate host before deciding the actions based on its history of pkts , put the condition after that bfr actions
is_good_host,allow_host[int(host_name)] = self.validate_host(src_ip,host_name,bad_pkt_limit)
if (is_good_host == False):
print "Host is malicious "
match = parser.OFPMatch(in_port=in_port, eth_dst=dst, eth_type=0x0800, ipv4_src = src_ip, ipv4_dst=dst_ip )
actions = [parser.OFPActionOutput(ofproto.OFPC_FRAG_DROP)]
'''m = str(match)
f = open('/home/rashmi/RYU295/ryu/lib/switch_flows.txt', 'a')
f.write("\n")
f.write(m)
f.close() '''
self.flow_consistency_check(datapath,match,actions,out_port,host_name)
else:
match = parser.OFPMatch(in_port=in_port, eth_dst=dst, eth_type=0x0800, ipv4_src = src_ip, ipv4_dst=dst_ip )
'''m = str(match)
f = open('/home/rashmi/RYU295/ryu/lib/switch_flows.txt', 'a')
f.write("\n")
f.write(m)
f.close() '''
if(allow_host[int(host_name)] == True):
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER)]
self.flow_consistency_check(datapath,match,actions,out_port,host_name)
else:
self.logger.info("Cumulative packet count exceed threshold for host %s *** Blocking this host ***",host_name)
actions = [parser.OFPActionOutput(ofproto.OFPC_FRAG_DROP)]
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
mod = parser.OFPFlowMod(datapath=datapath, priority=1,
match=match, instructions=inst, hard_timeout = 60)
datapath.send_msg(mod)
data = None
actions = " "
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
if reason != ofproto_v1_3.OFPR_ACTION:
self.ids_monitor.check_packet(msg)
self.logger.info(" ~~~~ packet in %s %s %s %s %s %s", dpid, src, dst, in_port, src_ip,dst_ip)
good_pkt = self.chk_pkt_value()
#Initialize the file
f = open('/home/rashmi/RYU295/ryu/lib/ids/pkt_value.txt', 'w').close()
if good_pkt:
print " ~~~ Packet is good"
actiontaken = "Packet forwarded"
packettype = "Good packet"
self.packetParser(msg,packettype,actiontaken)
global host_name
global allow_host
self.get_host_name(src_ip)
f = open('/home/rashmi/RYU295/ryu/lib/ids/hosts_log/host%s.txt' % host_name, 'w').close()
actions = [parser.OFPActionOutput(out_port)]
match = parser.OFPMatch(in_port=in_port, eth_dst=dst, eth_type=0x0800, ipv4_src = src_ip, ipv4_dst=dst_ip )
out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
datapath.send_msg(out)
if(allow_host[int(host_name)] == True):
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER)]
self.flow_consistency_check(datapath,match,actions,out_port,host_name)
else:
self.logger.info("Cumulative packet count exceed threshold for host %s *** Blocking this host ***",host_name)
actions = [parser.OFPActionOutput(ofproto.OFPC_FRAG_DROP)]
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
mod = parser.OFPFlowMod(datapath=datapath, priority=1,
match=match, instructions=inst,hard_timeout = 60)
datapath.send_msg(mod)
else:
global src_ip
global dst_ip
global host_name
global bad_pkt_limit
print " ~~~ Packet is bad" # set the actions accordingly
actiontaken = "Packet dropped"
packettype = "Malicious packet"
self.packetParser(msg,packettype,actiontaken)
self.get_host_name(src_ip)
f = open('/home/rashmi/RYU295/ryu/lib/ids/hosts_log/host%s.txt' % host_name, 'a')
f.write("\n")
f.write("bad")
f.close()
#Validate host before deciding the actions based on its history of pkts , put the condition after that bfr actions
is_good_host = self.validate_host(src_ip, host_name,bad_pkt_limit)
if (is_good_host == False): #host will be permanently blocked
print "Host is malicious "
match = parser.OFPMatch(in_port=in_port, eth_dst=dst, eth_type=0x0800, ipv4_src = src_ip, ipv4_dst=dst_ip )
actions = [parser.OFPActionOutput(ofproto.OFPC_FRAG_DROP)]
self.flow_consistency_check(datapath,match,actions,out_port,host_name)
else:
match = parser.OFPMatch(in_port=in_port, eth_dst=dst, eth_type=0x0800, ipv4_src = src_ip, ipv4_dst=dst_ip )
if(allow_host[int(host_name)] == True):
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER)]
self.flow_consistency_check(datapath,match,actions,out_port,host_name)
else:
self.logger.info("Cumulative packet count exceed threshold for host %s *** Blocking this host ***",host_name)
actions = [parser.OFPActionOutput(ofproto.OFPC_FRAG_DROP)]
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
mod = parser.OFPFlowMod(datapath=datapath, priority=1,
match=match, instructions=inst)
datapath.send_msg(mod)
@set_ev_cls(ids_monitor.AttackAlert)
def _dump_alert(self, ev):
alertmsg = ev.alertmsg
msg = ev.data
print '---------------alertmsg:', ''.join(alertmsg)
def writeToDB(self, protocol, srcmac, dstmac, srcip, dstip, srcport, dstport, options, packettype,actiontaken):
dbcon = mdb.connect("localhost","testuser","test123","attackdb" )
cursor = dbcon.cursor()
try:
#print 'Inside Try Block'
#print 'Protocol:',protocol
#print 'srcmac:',srcmac
#print 'dstmac:',dstmac
#print 'srcip:',srcip
#print 'dstip:',dstip
#print 'srcport:',srcport
#print 'dstport:',dstport
#print 'options:',options
cursor.execute("INSERT INTO packets(protocol,sourcemac, destmac, sourceip, destip, sourceport, destport, options,packettype,actiontaken)VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)",(protocol, srcmac, dstmac, srcip, dstip, srcport, dstport, options,packettype,actiontaken))
dbcon.commit()
except:
#print 'Inside Exception block'
dbcon.rollback()
|
|
import macmodel
import numpy as np
from numpy import sin, cos, tan
class Model(macmodel.Model):
def make_A(self):
Nk = self.Nk
Nl = self.Nl
E = self.E
Pm = self.Pm
N = self.N
th = self.th
Br = self.Br
Bth = self.Bth
Bph = self.Bph
'''
Creates the A matrix (M*l*x = A*x)
m: azimuthal fourier mode to compute
'''
################################
# Momentum Equation ############
################################
# R-momentum
self.add_gov_equation('rmom', 'ur')
self.rmom.add_drP('p', C= -1)
self.rmom.add_term('r_disp', -N**2)
# self.rmom.add_term('uph', 2.0*sin(th))
self.rmom.add_d2_b0('ur', C= E)
self.rmom.add_d2r_th('uth', C= E)
self.rmom.add_d2r_ph('uph', C= E)
# self.rmom.add_dr_bd0('br', C= E/Pm*Br)
# self.rmom.add_dr_b0('bth', C= -E/Pm*Bth)
# self.rmom.add_dr_b0('bph', C= -E/Pm*Bph)
# self.rmom.add_dth('br', C= E/Pm*Bth)
# self.rmom.add_dth('bth', C= E/Pm*Br)
# self.rmom.add_dph('bph', C= E/Pm*Br)
# self.rmom.add_dph('br', C= E/Pm*Bph)
self.A_rows = self.rmom.rows
self.A_cols = self.rmom.cols
self.A_vals = self.rmom.vals
del self.rmom
# Theta-Momentum
self.add_gov_equation('tmom', 'uth')
self.tmom.add_dthP('p', C= -1)
self.tmom.add_term('uph', 2.0*cos(th))
self.tmom.add_d2_bd0('uth', C= E)
self.tmom.add_d2th_r('ur', C= E)
self.tmom.add_d2th_ph('uph', C= E)
self.tmom.add_dr_b0('bth', C= E/Pm*Br)
# self.tmom.add_dr_bd0('br', C= -E/Pm*Bth)
# self.tmom.add_dth('bth', C= E/Pm*Bth)
self.tmom.add_dth('br', C= -E/Pm*Br)
# self.tmom.add_dth('bph', C= -E/Pm*Bph)
# self.tmom.add_dph('bph', C= E/Pm*Bth)
# self.tmom.add_dph('bth', C= E/Pm*Bph)
self.A_rows += self.tmom.rows
self.A_cols += self.tmom.cols
self.A_vals += self.tmom.vals
del self.tmom
# Phi-Momentum
self.add_gov_equation('pmom', 'uph')
self.pmom.add_dphP('p', C= -1)
self.pmom.add_term('uth', -2.0*cos(th))
# self.pmom.add_term('ur', 2.0*sin(th))
self.pmom.add_d2_bd0('uph', C= E)
self.pmom.add_d2ph_r('ur', C= E)
self.pmom.add_d2ph_th('uth', C= E)
self.pmom.add_dr_b0('bph', C= E/Pm*Br)
# self.pmom.add_dr_bd0('br', C= -E/Pm*Bph)
# self.pmom.add_dth('bph', C= E/Pm*Bth)
# self.pmom.add_dth('bth', C= E/Pm*Bph)
# self.pmom.add_dph('bph', C= E/Pm*Bph)
self.pmom.add_dph('br', C= -E/Pm*Br)
# self.pmom.add_dph('bth', C= -E/Pm*Bth)
self.A_rows += self.pmom.rows
self.A_cols += self.pmom.cols
self.A_vals += self.pmom.vals
del self.pmom
################################
# Lorentz Equation ##########
################################
# r-Lorentz
self.add_gov_equation('rlorentz', 'br')
# self.rlorentz.add_dth('ur', C= Bth)
self.rlorentz.add_dth('uth', C= -Br)
# self.rlorentz.add_dph('ur', C= Bph)
self.rlorentz.add_dph('uph', C= -Br)
self.rlorentz.add_d2_bd0('br', C= E/Pm)
self.rlorentz.add_d2r_th('bth', C= E/Pm)
self.rlorentz.add_d2r_ph('bph', C= E/Pm)
self.A_rows += self.rlorentz.rows
self.A_cols += self.rlorentz.cols
self.A_vals += self.rlorentz.vals
del self.rlorentz
# theta-Lorentz
self.add_gov_equation('thlorentz', 'bth')
self.thlorentz.add_dr_bd0('uth', C= Br)
# self.thlorentz.add_dr_b0('ur', C= -Bth)
# self.thlorentz.add_dph('uth', C= Bph)
# self.thlorentz.add_dph('uph', C= -Bth)
self.thlorentz.add_d2_b0('bth', C= E/Pm)
self.thlorentz.add_d2th_r('br', C= E/Pm)
self.thlorentz.add_d2th_ph('bph', C= E/Pm)
self.A_rows += self.thlorentz.rows
self.A_cols += self.thlorentz.cols
self.A_vals += self.thlorentz.vals
del self.thlorentz
# phi-Lorentz
self.add_gov_equation('phlorentz', 'bph')
self.phlorentz.add_dr_bd0('uph', C= Br)
# self.phlorentz.add_dr_b0('ur', C= -Bph)
# self.phlorentz.add_dth('uph', C= Bth)
# self.phlorentz.add_dth('uth', C= -Bph)
self.phlorentz.add_d2_b0('bph', C= E/Pm)
self.phlorentz.add_d2ph_r('br', C= E/Pm)
self.phlorentz.add_d2ph_th('bth', C= E/Pm)
self.A_rows += self.phlorentz.rows
self.A_cols += self.phlorentz.cols
self.A_vals += self.phlorentz.vals
del self.phlorentz
# Divergence (Mass Conservation) #########
self.add_gov_equation('div', 'p')
self.div.add_dr_b0('ur')
self.div.add_dth('uth')
self.div.add_dph('uph')
self.A_rows += self.div.rows
self.A_cols += self.div.cols
self.A_vals += self.div.vals
del self.div
# Displacement Equation #########
self.add_gov_equation('rdisp', 'r_disp')
self.rdisp.add_term('ur', np.ones((Nk,Nl)))
self.A_rows += self.rdisp.rows
self.A_cols += self.rdisp.cols
self.A_vals += self.rdisp.vals
del self.rdisp
self.A = macmodel.coo_matrix((self.A_vals, (self.A_rows, self.A_cols)),
shape=(self.SizeM, self.SizeM))
del self.A_vals, self.A_rows, self.A_cols
return self.A
def make_B(self):
'''
Creates the B matrix (B*l*x = A*x)
m: azimuthal fourier mode to compute
'''
ones = np.ones((self.Nk, self.Nl))
self.B_rows = []
self.B_cols = []
self.B_vals = []
self.add_gov_equation('B_uth', 'uth')
self.B_uth.add_term('uth', ones)
self.B_rows = self.B_uth.rows
self.B_cols = self.B_uth.cols
self.B_vals = self.B_uth.vals
del self.B_uth
self.add_gov_equation('B_uph', 'uph')
self.B_uph.add_term('uph', ones)
self.B_rows += self.B_uph.rows
self.B_cols += self.B_uph.cols
self.B_vals += self.B_uph.vals
del self.B_uph
self.add_gov_equation('B_rlorentz', 'br')
self.B_rlorentz.add_term('br', ones)
self.B_rows += self.B_rlorentz.rows
self.B_cols += self.B_rlorentz.cols
self.B_vals += self.B_rlorentz.vals
del self.B_rlorentz
self.add_gov_equation('B_thlorentz', 'bth')
self.B_thlorentz.add_term('bth', ones)
self.B_rows += self.B_thlorentz.rows
self.B_cols += self.B_thlorentz.cols
self.B_vals += self.B_thlorentz.vals
del self.B_thlorentz
self.add_gov_equation('B_phlorentz', 'bph')
self.B_phlorentz.add_term('bph', ones)
self.B_rows += self.B_phlorentz.rows
self.B_cols += self.B_phlorentz.cols
self.B_vals += self.B_phlorentz.vals
del self.B_phlorentz
self.add_gov_equation('B_rdisp', 'r_disp')
self.B_rdisp.add_term('r_disp', ones)
self.B_rows += self.B_rdisp.rows
self.B_cols += self.B_rdisp.cols
self.B_vals += self.B_rdisp.vals
del self.B_rdisp
self.B = macmodel.coo_matrix((self.B_vals, (self.B_rows, self.B_cols)),
shape=(self.SizeM, self.SizeM))
del self.B_vals, self.B_rows, self.B_cols
return self.B
|
|
"""aiobfd: BFD Session with an individual remote"""
# pylint: disable=I0011,R0902,R0913
# pylint: disable=I0011,E1101
# socket.IPPROTO_IPV6 missing on Windows
import asyncio
import random
import socket
import time
import logging
import bitstring
from .transport import Client
from .packet import PACKET_FORMAT, PACKET_DEBUG_MSG
log = logging.getLogger(__name__) # pylint: disable=I0011,C0103
SOURCE_PORT_MIN = 49152
SOURCE_PORT_MAX = 65535
CONTROL_PORT = 3784
VERSION = 1
DIAG_NONE = 0 # No Diagnostic
DIAG_CONTROL_DETECTION_EXPIRED = 1 # Control Detection Time Expired
DIAG_ECHO_FAILED = 2 # Echo Function Failed
DIAG_NEIGHBOR_SIGNAL_DOWN = 3 # Neighbor Signaled Session Down
DIAG_FORWARD_PLANE_RESET = 4 # Forwarding Plane Reset
DIAG_PATH_DOWN = 5 # Path Down
DIAG_CONCAT_PATH_DOWN = 6 # Concatenated Path Down
DIAG_ADMIN_DOWN = 7 # Administratively Down
DIAG_REV_CONCAT_PATH_DOWN = 8 # Reverse Concatenated Path Down
STATE_ADMIN_DOWN = 0 # AdminDown
STATE_DOWN = 1 # Down
STATE_INIT = 2 # Init
STATE_UP = 3 # Up
CONTROL_PLANE_INDEPENDENT = False # Control Plane Independent
# Default timers
DESIRED_MIN_TX_INTERVAL = 1000000 # Minimum initial value
# Keep these fields statically disabled as they're not implemented
AUTH_TYPE = None # Authentication disabled
DEMAND_MODE = False # Demand Mode
MULTIPOINT = False # Multipoint
REQUIRED_MIN_ECHO_RX_INTERVAL = 0 # Do not support echo packet
class Session:
"""BFD session with a remote"""
def __init__(self, local, remote, family=socket.AF_UNSPEC, passive=False,
tx_interval=1000000, rx_interval=1000000, detect_mult=3):
# Argument variables
self.local = local
self.remote = remote
self.family = family
self.passive = passive
self.loop = asyncio.get_event_loop()
self.rx_interval = rx_interval # User selectable value
self.tx_interval = tx_interval # User selectable value
# As per 6.8.1. State Variables
self.state = STATE_DOWN
self.remote_state = STATE_DOWN
self.local_discr = random.randint(0, 4294967295) # 32-bit value
self.remote_discr = 0
self.local_diag = DIAG_NONE
self._desired_min_tx_interval = DESIRED_MIN_TX_INTERVAL
self._required_min_rx_interval = rx_interval
self._remote_min_rx_interval = 1
self.demand_mode = DEMAND_MODE
self.remote_demand_mode = False
self.detect_mult = detect_mult
self.auth_type = AUTH_TYPE
self.rcv_auth_seq = 0
self.xmit_auth_seq = random.randint(0, 4294967295) # 32-bit value
self.auth_seq_known = False
# State Variables beyond those defined in RFC 5880
self._async_tx_interval = 1000000
self._final_async_tx_interval = None # Used to delay timer changes
self.last_rx_packet_time = None
self._async_detect_time = None
self._final_async_detect_time = None # Used to delay timer changes
self.poll_sequence = False
self._remote_detect_mult = None
self._remote_min_tx_interval = None
self._tx_packets = None
# Create the local client and run it once to grab a port
log.debug('Setting up UDP client for %s:%s.', remote, CONTROL_PORT)
src_port = random.randint(SOURCE_PORT_MIN, SOURCE_PORT_MAX)
fam, _, _, _, addr = socket.getaddrinfo(self.local, src_port)[0]
sock = socket.socket(family=fam, type=socket.SOCK_DGRAM)
if fam == socket.AF_INET:
sock.setsockopt(socket.SOL_IP, socket.IP_TTL, 255)
elif fam == socket.AF_INET6:
# Under Windows the IPv6 socket constant is somehow missing
# https://bugs.python.org/issue29515
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_UNICAST_HOPS, 255)
sock.bind(addr)
task = self.loop.create_datagram_endpoint(Client, sock=sock)
self.client, _ = self.loop.run_until_complete(task)
log.info('Sourcing traffic for %s:%s from %s:%s.',
remote, CONTROL_PORT,
self.client.get_extra_info('sockname')[0],
self.client.get_extra_info('sockname')[1])
# Schedule the coroutines to transmit packets and detect failures
self._tx_packets = asyncio.ensure_future(self.async_tx_packets())
asyncio.ensure_future(self.detect_async_failure())
# The transmit interval MUST be recalculated whenever
# bfd.DesiredMinTxInterval changes, or whenever bfd.RemoteMinRxInterval
# changes, and is equal to the greater of those two values.
# If either bfd.DesiredMinTxInterval is changed or
# bfd.RequiredMinRxInterval is changed, a Poll Sequence MUST be
# initiated (see section 6.5)
@property
def desired_min_tx_interval(self):
"""bfd.DesiredMinTxInterval"""
return self._desired_min_tx_interval
@desired_min_tx_interval.setter
def desired_min_tx_interval(self, value):
if value == self._desired_min_tx_interval:
return
log.info('bfd.DesiredMinTxInterval changed from %d to %d, starting '
'Poll Sequence.', self._desired_min_tx_interval, value)
# If bfd.DesiredMinTxInterval is increased and bfd.SessionState is Up,
# the actual transmission interval used MUST NOT change until the Poll
# Sequence described above has terminated.
tx_interval = max(value, self.remote_min_rx_interval)
if value > self._desired_min_tx_interval and self.state == STATE_UP:
self._final_async_tx_interval = tx_interval
log.info('Delaying increase in Tx Interval from %d to %d ...',
self._async_tx_interval, self._final_async_tx_interval)
else:
self._async_tx_interval = tx_interval
self._desired_min_tx_interval = value
self.poll_sequence = True
@property
def required_min_rx_interval(self):
"""bfd.RequiredMinRxInterval"""
return self._required_min_rx_interval
@required_min_rx_interval.setter
def required_min_rx_interval(self, value):
if value == self._required_min_rx_interval:
return
log.info('bfd.RequiredMinRxInterval changed from %d to %d, starting '
'Poll Sequence.', self._required_min_rx_interval, value)
detect_time = self.calc_detect_time(self.remote_detect_mult,
value,
self.remote_min_tx_interval)
if value < self._required_min_rx_interval and self.state == STATE_UP:
self._final_async_detect_time = detect_time
log.info('Delaying decrease in Detect Time from %d to %d ...',
self._async_detect_time, self._final_async_detect_time)
else:
self._async_detect_time = detect_time
self._required_min_rx_interval = value
self.poll_sequence = True
@property
def remote_min_rx_interval(self):
"""Property for remote_min_rx_interval so we can re-calculate
the async_tx_interval whenever this value changes"""
return self._remote_min_rx_interval
@remote_min_rx_interval.setter
def remote_min_rx_interval(self, value):
if value == self._remote_min_rx_interval:
return
# If the local system reduces its transmit interval due to
# bfd.RemoteMinRxInterval being reduced (the remote system has
# advertised a reduced value in Required Min RX Interval), and the
# remote system is not in Demand mode, the local system MUST honor
# the new interval immediately.
# We should cancel the tx_packets coro to do this.
old_tx_interval = self._async_tx_interval
self._async_tx_interval = max(value, self.desired_min_tx_interval)
if self._async_tx_interval < old_tx_interval:
log.info('Remote triggered decrease in the Tx Interval, forcing '
'change by restarting the Tx Packets process.')
self._restart_tx_packets()
self._remote_min_rx_interval = value
@property
def remote_min_tx_interval(self):
"""Property for remote_min_tx_interval so we can re-calculate
the detect_time whenever this value changes"""
return self._remote_min_tx_interval
@remote_min_tx_interval.setter
def remote_min_tx_interval(self, value):
if value == self._remote_min_tx_interval:
return
self._async_detect_time = \
self.calc_detect_time(self.remote_detect_mult,
self.required_min_rx_interval, value)
self._remote_min_tx_interval = value
@property
def remote_detect_mult(self):
"""Property for remote_detect_mult so we can re-calculate
the detect_time whenever this value changes"""
return self._remote_detect_mult
@remote_detect_mult.setter
def remote_detect_mult(self, value):
if value == self._remote_detect_mult:
return
self._async_detect_time = \
self.calc_detect_time(value, self.required_min_rx_interval,
self.remote_min_tx_interval)
self._remote_detect_mult = value
@staticmethod
def calc_detect_time(detect_mult, rx_interval, tx_interval):
"""Calculate the BFD Detection Time"""
# In Asynchronous mode, the Detection Time calculated in the local
# system is equal to the value of Detect Mult received from the remote
# system, multiplied by the agreed transmit interval of the remote
# system (the greater of bfd.RequiredMinRxInterval and the last
# received Desired Min TX Interval).
if not (detect_mult and rx_interval and tx_interval):
log.debug('BFD Detection Time calculation not possible with '
'values detect_mult: %s rx_interval: %s tx_interval: %s',
detect_mult, rx_interval, tx_interval)
return None
log.debug('BFD Detection Time calculated using '
'detect_mult: %s rx_interval: %s tx_interval: %s',
detect_mult, rx_interval, tx_interval)
return detect_mult * max(rx_interval, tx_interval)
def encode_packet(self, final=False):
"""Encode a single BFD Control packet"""
# A system MUST NOT set the Demand (D) bit unless bfd.DemandMode is 1,
# bfd.SessionState is Up, and bfd.RemoteSessionState is Up.
demand = (self.demand_mode and self.state == STATE_UP and
self.remote_state == STATE_UP)
# A BFD Control packet MUST NOT have both the Poll (P) and Final (F)
# bits set. We'll give the F bit priority, the P bit will still be set
# in the next outgoing packet if needed.
poll = self.poll_sequence if not final else False
data = {
'version': VERSION,
'diag': self.local_diag,
'state': self.state,
'poll': poll,
'final': final,
'control_plane_independent': CONTROL_PLANE_INDEPENDENT,
'authentication_present': bool(self.auth_type),
'demand_mode': demand,
'multipoint': MULTIPOINT,
'detect_mult': self.detect_mult,
'length': 24,
'my_discr': self.local_discr,
'your_discr': self.remote_discr,
'desired_min_tx_interval': self.desired_min_tx_interval,
'required_min_rx_interval': self.required_min_rx_interval,
'required_min_echo_rx_interval': REQUIRED_MIN_ECHO_RX_INTERVAL
}
log.debug(PACKET_DEBUG_MSG, VERSION, self.local_diag, self.state,
poll, final, CONTROL_PLANE_INDEPENDENT, bool(self.auth_type),
demand, MULTIPOINT, self.detect_mult, 24, self.local_discr,
self.remote_discr, self.desired_min_tx_interval,
self.required_min_rx_interval, REQUIRED_MIN_ECHO_RX_INTERVAL)
return bitstring.pack(PACKET_FORMAT, **data).bytes
def tx_packet(self, final=False):
"""Transmit a single BFD packet to the remote peer"""
log.debug('Transmitting BFD packet to %s:%s',
self.remote, CONTROL_PORT)
self.client.sendto(
self.encode_packet(final), (self.remote, CONTROL_PORT))
async def async_tx_packets(self):
"""Asynchronously transmit control packet"""
try:
while True:
# A system MUST NOT transmit BFD Control packets if
# bfd.RemoteDiscr is zero and the system is taking the Passive
# role. A system MUST NOT periodically transmit BFD Control
# packets if bfd.RemoteMinRxInterval is zero.
# A system MUST NOT periodically transmit BFD Control packets
# if Demand mode is active on the remote system
# (bfd.RemoteDemandMode) is 1, bfd.SessionState is Up, and
# bfd.RemoteSessionState is Up) and a Poll Sequence is not
# being transmitted.
if not((self.remote_discr == 0 and self.passive) or
(self.remote_min_rx_interval == 0) or
(not self.poll_sequence and
(self.remote_demand_mode == 1 and
self.state == STATE_UP and
self.remote_state == STATE_UP))):
self.tx_packet()
# The periodic transmission of BFD Control packets MUST be
# jittered on a per-packet basis by up to 25%
# If bfd.DetectMult is equal to 1, the interval between
# transmitted BFD Control packets MUST be no more than 90% of
# the negotiated transmission interval, and MUST be no less
# than 75% of the negotiated transmission interval.
if self.detect_mult == 1:
interval = \
self._async_tx_interval * random.uniform(0.75, 0.90)
else:
interval = \
self._async_tx_interval * (1 - random.uniform(0, 0.25))
await asyncio.sleep(interval/1000000)
except asyncio.CancelledError: # pragma: no cover
log.info('tx_packets() was cancelled ...')
def _restart_tx_packets(self):
"""Allow other co-routines to request a restart of tx_packets()
when needed, i.e. due to a timer change"""
log.info('Attempting to cancel tx_packets() ...')
self._tx_packets.cancel()
log.info('Restarting tx_packets() ...')
self._tx_packets = asyncio.ensure_future(self.async_tx_packets())
def rx_packet(self, packet): # pylint: disable=I0011,R0912,R0915
"""Receive packet"""
# If the A bit is set and no authentication is in use (bfd.AuthType
# is zero), the packet MUST be discarded.
if packet.authentication_present and not self.auth_type:
raise IOError('Received packet with authentication while no '
'authentication is configured locally.')
# If the A bit is clear and authentication is in use (bfd.AuthType
# is nonzero), the packet MUST be discarded.
if (not packet.authentication_present) and self.auth_type:
raise IOError('Received packet without authentication while '
'authentication is configured locally.')
# If the A bit is set authenticate the packet under the rules of
# section 6.7.
if packet.authentication_present:
log.critical('Authenticated packet received, not supported!')
return
# Set bfd.RemoteDiscr to the value of My Discriminator.
self.remote_discr = packet.my_discr
# Set bfd.RemoteState to the value of the State (Sta) field.
self.remote_state = packet.state
# Set bfd.RemoteDemandMode to the value of the Demand (D) bit.
self.remote_demand_mode = packet.demand_mode
# Set bfd.RemoteMinRxInterval to the value of Required Min RX Interval.
self.remote_min_rx_interval = packet.required_min_rx_interval
# Non-RFC defined session state that we track anyway
self.remote_detect_mult = packet.detect_mult
self.remote_min_tx_interval = packet.desired_min_tx_interval
# Implementation of the FSM in section 6.8.6
if self.state == STATE_ADMIN_DOWN:
log.warning('Received packet from %s while in Admin Down state.',
self.remote)
return
if packet.state == STATE_ADMIN_DOWN:
if self.state != STATE_DOWN:
self.local_diag = DIAG_NEIGHBOR_SIGNAL_DOWN
self.state = STATE_DOWN
self.desired_min_tx_interval = DESIRED_MIN_TX_INTERVAL
log.error('BFD remote %s signaled going ADMIN_DOWN.',
self.remote)
else:
if self.state == STATE_DOWN:
if packet.state == STATE_DOWN:
self.state = STATE_INIT
log.error('BFD session with %s going to INIT state.',
self.remote)
elif packet.state == STATE_INIT:
self.state = STATE_UP
self.desired_min_tx_interval = self.tx_interval
log.error('BFD session with %s going to UP state.',
self.remote)
elif self.state == STATE_INIT:
if packet.state in (STATE_INIT, STATE_UP):
self.state = STATE_UP
self.desired_min_tx_interval = self.tx_interval
log.error('BFD session with %s going to UP state.',
self.remote)
else:
if packet.state == STATE_DOWN:
self.local_diag = DIAG_NEIGHBOR_SIGNAL_DOWN
self.state = STATE_DOWN
log.error('BFD remote %s signaled going DOWN.',
self.remote)
# If a BFD Control packet is received with the Poll (P) bit set to 1,
# the receiving system MUST transmit a BFD Control packet with the Poll
# (P) bit clear and the Final (F) bit set as soon as practicable, ...
if packet.poll:
log.info('Received packet with Poll (P) bit set from %s, '
'sending packet with Final (F) bit set.', self.remote)
self.tx_packet(final=True)
# When the system sending the Poll sequence receives a packet with
# Final, the Poll Sequence is terminated
if packet.final:
log.info('Received packet with Final (F) bit set from %s, '
'ending Poll Sequence.', self.remote)
self.poll_sequence = False
if self._final_async_tx_interval:
log.info('Increasing Tx Interval from %d to %d now that Poll '
'Sequence has ended.', self._async_tx_interval,
self._final_async_tx_interval)
self._async_tx_interval = self._final_async_tx_interval
self._final_async_tx_interval = None
if self._final_async_detect_time:
log.info('Increasing Detect Time from %d to %d now that Poll '
'Sequence has ended.', self._async_detect_time,
self._final_async_detect_time)
self._async_detect_time = self._final_async_detect_time
self._final_async_detect_time = None
# Set the time a packet was received to right now
self.last_rx_packet_time = time.time()
log.debug('Valid packet received from %s, updating last packet time.',
self.remote)
async def detect_async_failure(self):
"""Detect if a session has failed in asynchronous mode"""
while True:
if not (self.demand_mode or self._async_detect_time is None):
# If Demand mode is not active, and a period of time equal to
# the Detection Time passes without receiving a BFD Control
# packet from the remote system, and bfd.SessionState is Init
# or Up, the session has gone down -- the local system MUST set
# bfd.SessionState to Down and bfd.LocalDiag to 1.
if self.state in (STATE_INIT, STATE_UP) and \
((time.time() - self.last_rx_packet_time) >
(self._async_detect_time/1000000)):
self.state = STATE_DOWN
self.local_diag = DIAG_CONTROL_DETECTION_EXPIRED
self.desired_min_tx_interval = DESIRED_MIN_TX_INTERVAL
log.critical('Detected BFD remote %s going DOWN!',
self.remote)
log.info('Time since last packet: %d ms; '
'Detect Time: %d ms',
(time.time() - self.last_rx_packet_time) * 1000,
self._async_detect_time/1000)
await asyncio.sleep(1/1000)
|
|
from __future__ import division
from itertools import chain
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
import pandas as pd
from fisher import pvalue
import re
import collections
from nltk.stem.porter import PorterStemmer
import math
from percept.tasks.base import Task
from percept.fields.base import Complex, List, Dict, Float
from inputs.inputs import SimpsonsFormats
from percept.utils.models import RegistryCategories, get_namespace
from percept.conf.base import settings
import os
from percept.tasks.train import Train
from sklearn.ensemble import RandomForestClassifier
import pickle
import random
import logging
log = logging.getLogger(__name__)
MAX_FEATURES = 500
DISTANCE_MIN=1
CHARACTER_DISTANCE_MIN = .2
RESET_SCENE_EVERY = 5
def make_df(datalist, labels, name_prefix=""):
df = pd.DataFrame(datalist).T
if name_prefix!="":
labels = [name_prefix + "_" + l for l in labels]
labels = [l.replace(" ", "_").lower() for l in labels]
df.columns = labels
df.index = range(df.shape[0])
return df
def return_one():
return 1
class SpellCorrector(object):
"""
Taken and slightly adapted from peter norvig's post at http://norvig.com/spell-correct.html
"""
alphabet = 'abcdefghijklmnopqrstuvwxyz'
punctuation = [".", "!", "?", ","]
def __init__(self):
self.NWORDS = self.train(self.words(file(os.path.join(settings.PROJECT_PATH,'data/big.txt')).read()))
self.cache = {}
def words(self, text):
return re.findall('[a-z]+', text.lower())
def train(self, features):
model = collections.defaultdict(return_one)
for f in features:
model[f] += 1
return model
def edits1(self, word):
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [a + b[1:] for a, b in splits if b]
transposes = [a + b[1] + b[0] + b[2:] for a, b in splits if len(b)>1]
replaces = [a + c + b[1:] for a, b in splits for c in self.alphabet if b]
inserts = [a + c + b for a, b in splits for c in self.alphabet]
return set(deletes + transposes + replaces + inserts)
def known_edits2(self, word):
return set(e2 for e1 in self.edits1(word) for e2 in self.edits1(e1) if e2 in self.NWORDS)
def known(self, words): return set(w for w in words if w in self.NWORDS)
def correct(self, word):
if word in self.cache:
return self.cache[word]
suffix = ""
for p in self.punctuation:
if word.endswith(p):
suffix = p
word = word[:-1]
candidates = self.known([word]) or self.known(self.edits1(word)) or self.known_edits2(word) or [word]
newword = max(candidates, key=self.NWORDS.get) + suffix
self.cache.update({word : newword})
return newword
class Vectorizer(object):
def __init__(self):
self.fit_done = False
def fit(self, input_text, input_scores, max_features=100, min_features=3):
self.spell_corrector = SpellCorrector()
self.stemmer = PorterStemmer()
new_text = self.batch_generate_new_text(input_text)
input_text = [input_text[i] + new_text[i] for i in xrange(0,len(input_text))]
self.vectorizer1 = CountVectorizer(ngram_range=(1,2), min_df = min_features/len(input_text), max_df=.4, stop_words="english")
self.vectorizer1.fit(input_text)
self.vocab = self.get_vocab(input_text, input_scores, max_features)
self.vectorizer = CountVectorizer(ngram_range=(1,2), vocabulary=self.vocab)
self.fit_done = True
self.input_text = input_text
def spell_correct_text(self, text):
text = text.lower()
split = text.split(" ")
corrected = [self.spell_corrector.correct(w) for w in split]
return corrected
def batch_apply(self, all_tokens, applied_func):
for key in all_tokens:
cor = applied_func(all_tokens[key])
all_tokens[key] = cor
return all_tokens
def batch_generate_new_text(self, text):
text = [re.sub("[^A-Za-z0-9]", " ", t.lower()) for t in text]
text = [re.sub("\s+", " ", t) for t in text]
t_tokens = [t.split(" ") for t in text]
all_token_list = list(set(chain.from_iterable(t_tokens)))
all_token_dict = {}
for t in all_token_list:
all_token_dict.update({t : t})
all_token_dict = self.batch_apply(all_token_dict, self.stemmer.stem)
all_token_dict = self.batch_apply(all_token_dict, self.stemmer.stem)
for i in xrange(0,len(t_tokens)):
for j in xrange(0,len(t_tokens[i])):
t_tokens[i][j] = all_token_dict.get(t_tokens[i][j], t_tokens[i][j])
new_text = [" ".join(t) for t in t_tokens]
return new_text
def generate_new_text(self, text):
no_punctuation = re.sub("[^A-Za-z0-9]", " ", text.lower())
no_punctuation = re.sub("\s+", " ", no_punctuation)
corrected = self.spell_correct_text(no_punctuation)
corrected = [self.stemmer.stem(w) for w in corrected]
new = " ".join(corrected)
return new
def get_vocab(self, input_text, input_scores, max_features):
train_mat = self.vectorizer1.transform(input_text)
input_score_med = np.median(input_scores)
new_scores = [0 if i<=input_score_med else 1 for i in input_scores]
ind_max_features = math.floor(max_features/max(input_scores))
all_vocab = []
all_cols = [np.asarray(train_mat.getcol(i).todense().transpose())[0] for i in xrange(0,train_mat.shape[1])]
for s in xrange(0,max(input_scores)):
sel_inds = [i for i in xrange(0,len(input_scores)) if input_scores[i]==s]
out_inds = [i for i in xrange(0,len(input_scores)) if input_scores[i]!=s]
pvalues = []
for i in xrange(0,len(all_cols)):
lcol = all_cols[i]
good_lcol = lcol[sel_inds]
bad_lcol = lcol[out_inds]
good_lcol_present = len(good_lcol[good_lcol > 0])
good_lcol_missing = len(good_lcol[good_lcol == 0])
bad_lcol_present = len(bad_lcol[bad_lcol > 0])
bad_lcol_missing = len(bad_lcol[bad_lcol == 0])
pval = pvalue(good_lcol_present, bad_lcol_present, good_lcol_missing, bad_lcol_missing)
pvalues.append(pval.two_tail)
col_inds = list(xrange(0,train_mat.shape[1]))
p_frame = pd.DataFrame(np.array([col_inds, pvalues]).transpose(), columns=["inds", "pvalues"])
p_frame = p_frame.sort(['pvalues'], ascending=True)
getVar = lambda searchList, ind: [searchList[int(i)] for i in ind]
vocab = getVar(self.vectorizer1.get_feature_names(), p_frame['inds'][:ind_max_features+2])
all_vocab.append(vocab)
return list(set(list(chain.from_iterable(all_vocab))))
def batch_get_features(self, text):
if not self.fit_done:
raise Exception("Vectorizer has not been created.")
new_text = self.batch_generate_new_text(text)
text = [text[i] + new_text[i] for i in xrange(0,len(text))]
return (self.vectorizer.transform(text).todense())
def get_features(self, text):
if not self.fit_done:
raise Exception("Vectorizer has not been created.")
itext=text
if isinstance(text, list):
itext = text[0]
new_text = self.generate_new_text(itext)
if isinstance(text, list):
text = [text[0] + new_text]
else:
text = [text + new_text]
return (self.vectorizer.transform(text).todense())
class FeatureExtractor(Task):
data = Complex()
row_data = List()
speaker_code_dict = Dict()
speaker_codes = List()
vectorizer = Complex()
data_format = SimpsonsFormats.dataframe
category = RegistryCategories.preprocessors
namespace = get_namespace(__module__)
help_text = "Cleanup simpsons scripts."
args = {'scriptfile' : os.path.abspath(os.path.join(settings.DATA_PATH, "script_tasks"))}
def train(self, data, target, **kwargs):
"""
Used in the training phase. Override.
"""
self.data = self.predict(data, **kwargs)
def predict(self, data, **kwargs):
"""
Used in the predict phase, after training. Override
"""
scriptfile = kwargs.get('scriptfile')
script_data = pickle.load(open(scriptfile))
script = script_data.tasks[2].voice_lines.value
speakers = []
lines = []
for s in script:
for (i,l) in enumerate(s):
if i>0:
previous_line = s[i-1]['line']
previous_speaker = s[i-1]['speaker']
else:
previous_line = ""
previous_speaker = ""
if i>1:
two_back_speaker = s[i-2]['speaker']
else:
two_back_speaker = ""
if len(s)>i+1:
next_line = s[i+1]['line']
else:
next_line = ""
current_line = s[i]['line']
current_speaker = s[i]['speaker']
lines.append(current_line)
speakers.append(current_speaker)
row_data = {
'previous_line' : previous_line,
'previous_speaker' : previous_speaker,
'next_line' : next_line,
'current_line' : current_line,
'current_speaker' : current_speaker,
'two_back_speaker' : two_back_speaker
}
self.row_data.append(row_data)
self.speaker_code_dict = {k:i for (i,k) in enumerate(list(set(speakers)))}
self.speaker_codes = [self.speaker_code_dict[s] for s in speakers]
self.max_features = math.floor(MAX_FEATURES)/3
self.vectorizer = Vectorizer()
self.vectorizer.fit(lines, self.speaker_codes, self.max_features)
prev_features = self.vectorizer.batch_get_features([rd['previous_line'] for rd in self.row_data])
cur_features = self.vectorizer.batch_get_features([rd['current_line'] for rd in self.row_data])
next_features = self.vectorizer.batch_get_features([rd['next_line'] for rd in self.row_data])
self.speaker_code_dict.update({'' : -1})
meta_features = make_df([[self.speaker_code_dict[s['two_back_speaker']] for s in self.row_data], [self.speaker_code_dict[s['previous_speaker']] for s in self.row_data], self.speaker_codes],["two_back_speaker", "previous_speaker", "current_speaker"])
#meta_features = make_df([[self.speaker_code_dict[s['two_back_speaker']] for s in self.row_data], self.speaker_codes],["two_back_speaker", "current_speaker"])
train_frame = pd.concat([pd.DataFrame(prev_features),pd.DataFrame(cur_features),pd.DataFrame(next_features),meta_features],axis=1)
train_frame.index = range(train_frame.shape[0])
data = {
'vectorizer' : self.vectorizer,
'speaker_code_dict' : self.speaker_code_dict,
'train_frame' : train_frame,
'speakers' : make_df([speakers,self.speaker_codes, lines], ["speaker", "speaker_code", "line"]),
'data' : data,
'current_features' : cur_features,
}
return data
class RandomForestTrain(Train):
"""
A class to train a random forest
"""
colnames = List()
clf = Complex()
category = RegistryCategories.algorithms
namespace = get_namespace(__module__)
algorithm = RandomForestClassifier
args = {'n_estimators' : 300, 'min_samples_leaf' : 4, 'compute_importances' : True}
help_text = "Train and predict with Random Forest."
class KNNRF(Task):
data = Complex()
predictions = Complex()
importances = Complex()
data_format = SimpsonsFormats.dataframe
category = RegistryCategories.preprocessors
namespace = get_namespace(__module__)
args = {'algo' : RandomForestTrain}
help_text = "Cleanup simpsons scripts."
def train(self, data, target, **kwargs):
"""
Used in the training phase. Override.
"""
self.data = self.predict(data, **kwargs)
def predict(self, data, **kwargs):
"""
Used in the predict phase, after training. Override
"""
from preprocess import CHARACTERS
vec_length = math.floor(MAX_FEATURES/3)
algo = kwargs.get('algo')
alg = algo()
train_data = data['train_frame'].iloc[:,:-1]
target = data['train_frame']['current_speaker']
clf = alg.train(train_data,target, **algo.args)
self.importances=clf.feature_importances_
test_data = data['data']
match_data = data['current_features']
reverse_speaker_code_dict = {data['speaker_code_dict'][k] : k for k in data['speaker_code_dict']}
speaker_list = []
speaker_codes = reverse_speaker_code_dict.keys()
for i in xrange(0,len(speaker_codes)):
s_text = "\n".join(list(data['speakers'][data['speakers']['speaker']==reverse_speaker_code_dict[speaker_codes[i]]]['line']))
speaker_list.append(s_text)
speaker_features = data['vectorizer'].batch_get_features(speaker_list)
self.predictions = []
counter = 0
for script in test_data['voice_script']:
counter+=1
log.info("On script {0} out of {1}".format(counter,len(test_data['voice_script'])))
lines = script.split("\n")
speaker_code = [-1 for i in xrange(0,len(lines))]
for (i,line) in enumerate(lines):
if i>0 and i%RESET_SCENE_EVERY!=0:
previous_line = lines[i-1]
previous_speaker = speaker_code[i-1]
else:
previous_line = ""
previous_speaker= -1
if i>1 and i%RESET_SCENE_EVERY!=0:
two_back_speaker = speaker_code[i-2]
else:
two_back_speaker = -1
if i<(len(lines)-1):
next_line = lines[i+1]
else:
next_line = ""
prev_features = data['vectorizer'].get_features(previous_line)
cur_features = data['vectorizer'].get_features(line)
next_features = data['vectorizer'].get_features(next_line)
meta_features = make_df([[two_back_speaker], [previous_speaker]],["two_back_speaker", "previous_speaker"])
#meta_features = make_df([[two_back_speaker]],["two_back_speaker"])
train_frame = pd.concat([pd.DataFrame(prev_features),pd.DataFrame(cur_features),pd.DataFrame(next_features), meta_features],axis=1)
speaker_code[i] = alg.predict(train_frame)[0]
nearest_match, distance = self.find_nearest_match(cur_features, speaker_features)
if distance<CHARACTER_DISTANCE_MIN:
sc = speaker_codes[nearest_match]
speaker_code[i] = sc
continue
for k in CHARACTERS:
for c in CHARACTERS[k]:
if c in previous_line:
speaker_code[i] = data['speaker_code_dict'][k]
nearest_match, distance = self.find_nearest_match(cur_features,match_data)
if distance<DISTANCE_MIN:
sc = data['speakers']['speaker_code'][nearest_match]
speaker_code[i] = sc
continue
df = make_df([lines,speaker_code,[reverse_speaker_code_dict[round(s)] for s in speaker_code]],["line","speaker_code","speaker"])
self.predictions.append(df)
return data
def find_nearest_match(self, features, matrix):
features = np.asarray(features)
distances = [self.euclidean(u, features) for u in matrix]
nearest_match = distances.index(min(distances))
return nearest_match, min(distances)
def euclidean(self, v1, v2):
return np.sqrt(np.sum(np.square(np.subtract(v1,v2))))
"""
p = tasks[3].predictions.value
speakers = []
lines = []
for pr in p:
speakers.append(list(pr['speaker']))
lines.append(list(pr['line']))
from itertools import chain
speakers = list(chain.from_iterable(speakers))
lines = list(chain.from_iterable(lines))
rows = []
for (s,l) in zip(speakers, lines):
rows.append({
'speaker' : s,
'line': l,
})
import json
json.dump(rows,open("/home/vik/vikparuchuri/simpsons-scripts/data/final_voice.json","w+"))
"""
|
|
# -*- coding:utf-8 -*-
#
# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
from pyversion import is_python3
from ctypes import WinDLL, get_last_error, FormatError, WinError, addressof
from ctypes import c_buffer
from ctypes.wintypes import BOOL, BOOLEAN, LPCWSTR, DWORD, HANDLE
from ctypes.wintypes import WCHAR, USHORT, LPVOID, ULONG
if is_python3():
from ctypes import c_ubyte, Structure, Union, byref
from ctypes.wintypes import LPDWORD
else:
# For legacy Python2 different imports are needed.
from ctypes.wintypes import POINTER, c_ubyte, Structure, Union, byref
LPDWORD = POINTER(DWORD)
kernel32 = WinDLL('kernel32', use_last_error=True)
UCHAR = c_ubyte
# Win32 error codes
ERROR_SUCCESS = 0
ERROR_NOT_SUPPORTED = 50
ERROR_PRIVILEGE_NOT_HELD = 1314
# Win32 API entry points
CreateSymbolicLinkW = kernel32.CreateSymbolicLinkW
CreateSymbolicLinkW.restype = BOOLEAN
CreateSymbolicLinkW.argtypes = (LPCWSTR, # lpSymlinkFileName In
LPCWSTR, # lpTargetFileName In
DWORD) # dwFlags In
# Symbolic link creation flags
SYMBOLIC_LINK_FLAG_FILE = 0x00
SYMBOLIC_LINK_FLAG_DIRECTORY = 0x01
# symlink support for CreateSymbolicLink() starting with Windows 10 (1703, v10.0.14972)
SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE = 0x02
GetFileAttributesW = kernel32.GetFileAttributesW
GetFileAttributesW.restype = DWORD
GetFileAttributesW.argtypes = (LPCWSTR,) # lpFileName In
INVALID_FILE_ATTRIBUTES = 0xFFFFFFFF
FILE_ATTRIBUTE_REPARSE_POINT = 0x00400
CreateFileW = kernel32.CreateFileW
CreateFileW.restype = HANDLE
CreateFileW.argtypes = (LPCWSTR, # lpFileName In
DWORD, # dwDesiredAccess In
DWORD, # dwShareMode In
LPVOID, # lpSecurityAttributes In_opt
DWORD, # dwCreationDisposition In
DWORD, # dwFlagsAndAttributes In
HANDLE) # hTemplateFile In_opt
CloseHandle = kernel32.CloseHandle
CloseHandle.restype = BOOL
CloseHandle.argtypes = (HANDLE,) # hObject In
INVALID_HANDLE_VALUE = HANDLE(-1).value
OPEN_EXISTING = 3
FILE_FLAG_BACKUP_SEMANTICS = 0x02000000
FILE_FLAG_OPEN_REPARSE_POINT = 0x00200000
DeviceIoControl = kernel32.DeviceIoControl
DeviceIoControl.restype = BOOL
DeviceIoControl.argtypes = (HANDLE, # hDevice In
DWORD, # dwIoControlCode In
LPVOID, # lpInBuffer In_opt
DWORD, # nInBufferSize In
LPVOID, # lpOutBuffer Out_opt
DWORD, # nOutBufferSize In
LPDWORD, # lpBytesReturned Out_opt
LPVOID) # lpOverlapped Inout_opt
# Device I/O control flags and options
FSCTL_GET_REPARSE_POINT = 0x000900A8
IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003
IO_REPARSE_TAG_SYMLINK = 0xA000000C
MAXIMUM_REPARSE_DATA_BUFFER_SIZE = 0x4000
class GENERIC_REPARSE_BUFFER(Structure):
_fields_ = (('DataBuffer', UCHAR * 1),)
class SYMBOLIC_LINK_REPARSE_BUFFER(Structure):
_fields_ = (('SubstituteNameOffset', USHORT),
('SubstituteNameLength', USHORT),
('PrintNameOffset', USHORT),
('PrintNameLength', USHORT),
('Flags', ULONG),
('PathBuffer', WCHAR * 1))
@property
def PrintName(self):
arrayt = WCHAR * (self.PrintNameLength // 2)
offset = type(self).PathBuffer.offset + self.PrintNameOffset
return arrayt.from_address(addressof(self) + offset).value
class MOUNT_POINT_REPARSE_BUFFER(Structure):
_fields_ = (('SubstituteNameOffset', USHORT),
('SubstituteNameLength', USHORT),
('PrintNameOffset', USHORT),
('PrintNameLength', USHORT),
('PathBuffer', WCHAR * 1))
@property
def PrintName(self):
arrayt = WCHAR * (self.PrintNameLength // 2)
offset = type(self).PathBuffer.offset + self.PrintNameOffset
return arrayt.from_address(addressof(self) + offset).value
class REPARSE_DATA_BUFFER(Structure):
class REPARSE_BUFFER(Union):
_fields_ = (('SymbolicLinkReparseBuffer', SYMBOLIC_LINK_REPARSE_BUFFER),
('MountPointReparseBuffer', MOUNT_POINT_REPARSE_BUFFER),
('GenericReparseBuffer', GENERIC_REPARSE_BUFFER))
_fields_ = (('ReparseTag', ULONG),
('ReparseDataLength', USHORT),
('Reserved', USHORT),
('ReparseBuffer', REPARSE_BUFFER))
_anonymous_ = ('ReparseBuffer',)
def create_filesymlink(source, link_name):
"""Creates a Windows file symbolic link source pointing to link_name."""
_create_symlink(source, link_name, SYMBOLIC_LINK_FLAG_FILE)
def create_dirsymlink(source, link_name):
"""Creates a Windows directory symbolic link source pointing to link_name.
"""
_create_symlink(source, link_name, SYMBOLIC_LINK_FLAG_DIRECTORY)
def _create_symlink(source, link_name, dwFlags):
if not CreateSymbolicLinkW(link_name, source,
dwFlags | SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE):
# See https://github.com/golang/go/pull/24307/files#diff-b87bc12e4da2497308f9ef746086e4f0
# "the unprivileged create flag is unsupported below Windows 10 (1703, v10.0.14972).
# retry without it."
if not CreateSymbolicLinkW(link_name, source, dwFlags):
code = get_last_error()
error_desc = FormatError(code).strip()
if code == ERROR_PRIVILEGE_NOT_HELD:
raise OSError(errno.EPERM, error_desc, link_name)
_raise_winerror(
code,
'Error creating symbolic link \"%s\"'.format(link_name))
def islink(path):
result = GetFileAttributesW(path)
if result == INVALID_FILE_ATTRIBUTES:
return False
return bool(result & FILE_ATTRIBUTE_REPARSE_POINT)
def readlink(path):
reparse_point_handle = CreateFileW(path,
0,
0,
None,
OPEN_EXISTING,
FILE_FLAG_OPEN_REPARSE_POINT |
FILE_FLAG_BACKUP_SEMANTICS,
None)
if reparse_point_handle == INVALID_HANDLE_VALUE:
_raise_winerror(
get_last_error(),
'Error opening symbolic link \"%s\"'.format(path))
target_buffer = c_buffer(MAXIMUM_REPARSE_DATA_BUFFER_SIZE)
n_bytes_returned = DWORD()
io_result = DeviceIoControl(reparse_point_handle,
FSCTL_GET_REPARSE_POINT,
None,
0,
target_buffer,
len(target_buffer),
byref(n_bytes_returned),
None)
CloseHandle(reparse_point_handle)
if not io_result:
_raise_winerror(
get_last_error(),
'Error reading symbolic link \"%s\"'.format(path))
rdb = REPARSE_DATA_BUFFER.from_buffer(target_buffer)
if rdb.ReparseTag == IO_REPARSE_TAG_SYMLINK:
return _preserve_encoding(path, rdb.SymbolicLinkReparseBuffer.PrintName)
elif rdb.ReparseTag == IO_REPARSE_TAG_MOUNT_POINT:
return _preserve_encoding(path, rdb.MountPointReparseBuffer.PrintName)
# Unsupported reparse point type
_raise_winerror(
ERROR_NOT_SUPPORTED,
'Error reading symbolic link \"%s\"'.format(path))
def _preserve_encoding(source, target):
"""Ensures target is the same string type (i.e. unicode or str) as source."""
if is_python3():
return target
if isinstance(source, unicode): # noqa: F821
return unicode(target) # noqa: F821
return str(target)
def _raise_winerror(code, error_desc):
win_error_desc = FormatError(code).strip()
error_desc = "%s: %s".format(error_desc, win_error_desc)
raise WinError(code, error_desc)
|
|
import abc
from timeseries.lazy import *
import reprlib
class TimeSeriesInterface(abc.ABC):
"""
Interface for TimeSeries class which inherits from ABC
"""
@abc.abstractmethod
def __iter__(self):
"""Iterate over values."""
@abc.abstractmethod
def itertimes(self):
"""Iterate over times."""
@abc.abstractmethod
def iteritems(self):
"""Iterate over (time, value) pairs."""
@abc.abstractmethod
def itervalues(self):
"""Iterate over values."""
@abc.abstractmethod
def __repr__(self):
"""
All TimeSeries must support a repr function
"""
@abc.abstractmethod
def __str__(self):
"""
All TimeSeries must support a str function
"""
@lazy
def identity(self):
# lazy implementation of the identity function
return self
@property
def lazy(self):
"""
Lazy identity property.
self.lazy returns a LazyOperation instance of self.identity(), so that
self.lazy.eval() is self.
Returns
-------
self.identity() : a LazyOperation instance
"""
return self.identity()
@abc.abstractmethod
def mean(self, chunk=None):
"""
Require ability to calculate the mean of values within a
TimeSeriesInterface instance.
Optional `chunk` argument to be used for subclass instances
with no storage.
"""
@abc.abstractmethod
def std(self, chunk=None):
"""
Require ability to calculate the standard deviation of values within a
TimeSeriesInterface instance.
Optional `chunk` argument to be used for subclass instances
with no storage.
"""
class SizedContainerTimeSeriesInterface(TimeSeriesInterface):
"""
Interface for sized-container based TimeSeries.
Inherits from TimeSeriesInterface.
Times for TimeSeries stored in _times
Values for TimeSeries stored in _values
"""
def __iter__(self):
# iterate over values
for i in self._values:
yield i
def itertimes(self):
for i in self._times:
yield i
def iteritems(self):
for i,j in zip(self._times,self._values):
yield i,j
def itervalues(self):
for j in self._values:
yield j
def __contains__(self,item):
"""Returns boolean of whether given 'item' is contained in _.values"""
return item in self._values
def __repr__(self):
"""
Returns a string representation of a SizedContainerTimeSeriesInterface
instance, of the form
"Class_name(Length: 'n', Times: 't', Values: 'v')"
where n is the length of `self`
t displays the first three elements of _times
v displays the first three elements of _values
"""
r = reprlib.Repr()
r.maxlist = 3 # max elements displayed for lists
cls = type(self).__name__
timesStr = r.repr(self._times)
valuesStr = r.repr(self._values)
return "{}(Length: {}, Times: {}, Values: {})".format(cls, len(self._values), timesStr, valuesStr)
def __str__(self):
"""
Returns a string representation of a SizedContainerTimeSeriesInterface
instance, of the form
"Class_name with 'n' elements (Times: 't', Values: 'v')"
where n is the length of `self`
t displays the first three elements of _times
v displays the first three elements of _values
"""
r = reprlib.Repr()
r.maxlist = 3 # max elements displayed for lists
cls = type(self).__name__
timesStr = r.repr(self._times)
valuesStr = r.repr(self._values)
return "{} with {} elements (Times: {}, Values: {})".format(cls, len(self._values), timesStr, valuesStr)
def items(self):
"""Returns a list of (time, value) pairs"""
return list(zip(self._times,self._values))
def __pos__(self):
"""Returns: TimeSeries instance with no change to the values or times"""
return self
def __sub__(self, rhs):
"""
Description
-------------
If rhs is Real, subtract it from all elements of `_values`.
If rhs is a SizedContainerTimeSeriesInterface instance with the same
times, subtract the values element-wise.
Returns:
--------
A new instance of type(self) with the same times but updated values"""
return self + (-rhs)
@abc.abstractmethod
def __getitem__(self):
"""
Require indexing for sized-container based TimeSeries.
"""
@abc.abstractmethod
def __setitem__(self):
"""
Require assignment for sized-container based TimeSeries.
"""
@abc.abstractmethod
def __len__(self):
"""
Require notion of size for sized-container based TimeSeries.
"""
@abc.abstractmethod
def values(self):
"""
Require ability to return stored values for sized-container based TimeSeries.
"""
@abc.abstractmethod
def times(self):
"""
Require ability to return stored values for sized-container based TimeSeries.
"""
@abc.abstractmethod
def interpolate(self):
"""
Require notion of value interpolation for times not present originally
for sized-container based TimeSeries.
"""
@abc.abstractmethod
def __neg__(self):
"""
Require ability to negate values for sized-container based TimeSeries.
"""
@abc.abstractmethod
def __abs__(self):
"""
Require notion of 2-norm over values for sized-container based TimeSeries.
"""
@abc.abstractmethod
def __bool__(self):
"""
Require ability to test if self._values is all zeros
"""
@abc.abstractmethod
def __add__(self):
"""
Require ability to add together two SizedContainerTimeSeriesInterface
instances, assuming that their times are equivalent pairwise.
"""
@abc.abstractmethod
def __mul__(self):
"""
Require ability to multiply two SizedContainerTimeSeriesInterface
instances, assuming that their times are equivalent pairwise.
"""
@abc.abstractmethod
def __eq__(self,rhs):
"""
Require notion of equality between two SizedContainerTimeSeriesInterface
instances.
"""
class StreamTimeSeriesInterface(TimeSeriesInterface):
"""
Abstract Base Class for timeseries data
that arrive streaming.
"""
@abc.abstractmethod
def produce(self,chunk=1)->list:
"""
Output a list of (time,value) tuples of length chunk
"""
def __repr__(self):
cls = type(self)
return "Instance of a {} with streaming input".format(cls.__name__)
def __str__(self):
return repr(self)
@abc.abstractmethod
def online_mean(self):
"""Return a SimulatedTimeSeries of the running mean."""
@abc.abstractmethod
def online_std(self):
"""Return a SimulatedTimeSeries of the running standard deviation."""
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import unittest
from datetime import datetime
from unittest import mock
from urllib.parse import parse_qs, urlparse
from google.cloud.logging.resource import Resource
from airflow.models import TaskInstance
from airflow.models.dag import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.providers.google.cloud.log.stackdriver_task_handler import StackdriverTaskHandler
from airflow.utils.state import State
def _create_list_response(messages, token):
page = [mock.MagicMock(payload={"message": message}) for message in messages]
return mock.MagicMock(pages=(n for n in [page]), next_page_token=token)
class TestStackdriverLoggingHandlerStandalone(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.get_credentials_and_project_id')
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.gcp_logging.Client')
def test_should_pass_message_to_client(self, mock_client, mock_get_creds_and_project_id):
mock_get_creds_and_project_id.return_value = ('creds', 'project_id')
transport_type = mock.MagicMock()
stackdriver_task_handler = StackdriverTaskHandler(transport=transport_type, labels={"key": 'value'})
logger = logging.getLogger("logger")
logger.addHandler(stackdriver_task_handler)
logger.info("test-message")
stackdriver_task_handler.flush()
transport_type.assert_called_once_with(mock_client.return_value, 'airflow')
transport_type.return_value.send.assert_called_once_with(
mock.ANY, 'test-message', labels={"key": 'value'}, resource=Resource(type='global', labels={})
)
mock_client.assert_called_once_with(credentials='creds', client_info=mock.ANY, project="project_id")
class TestStackdriverLoggingHandlerTask(unittest.TestCase):
def setUp(self) -> None:
self.transport_mock = mock.MagicMock()
self.stackdriver_task_handler = StackdriverTaskHandler(transport=self.transport_mock)
self.logger = logging.getLogger("logger")
date = datetime(2016, 1, 1)
self.dag = DAG('dag_for_testing_file_task_handler', start_date=date)
task = DummyOperator(task_id='task_for_testing_file_log_handler', dag=self.dag)
self.ti = TaskInstance(task=task, execution_date=date)
self.ti.try_number = 1
self.ti.state = State.RUNNING
self.addCleanup(self.dag.clear)
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.get_credentials_and_project_id')
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.gcp_logging.Client')
def test_should_set_labels(self, mock_client, mock_get_creds_and_project_id):
mock_get_creds_and_project_id.return_value = ('creds', 'project_id')
self.stackdriver_task_handler.set_context(self.ti)
self.logger.addHandler(self.stackdriver_task_handler)
self.logger.info("test-message")
self.stackdriver_task_handler.flush()
labels = {
'task_id': 'task_for_testing_file_log_handler',
'dag_id': 'dag_for_testing_file_task_handler',
'execution_date': '2016-01-01T00:00:00+00:00',
'try_number': '1',
}
resource = Resource(type='global', labels={})
self.transport_mock.return_value.send.assert_called_once_with(
mock.ANY, 'test-message', labels=labels, resource=resource
)
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.get_credentials_and_project_id')
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.gcp_logging.Client')
def test_should_append_labels(self, mock_client, mock_get_creds_and_project_id):
mock_get_creds_and_project_id.return_value = ('creds', 'project_id')
self.stackdriver_task_handler = StackdriverTaskHandler(
transport=self.transport_mock, labels={"product.googleapis.com/task_id": "test-value"}
)
self.stackdriver_task_handler.set_context(self.ti)
self.logger.addHandler(self.stackdriver_task_handler)
self.logger.info("test-message")
self.stackdriver_task_handler.flush()
labels = {
'task_id': 'task_for_testing_file_log_handler',
'dag_id': 'dag_for_testing_file_task_handler',
'execution_date': '2016-01-01T00:00:00+00:00',
'try_number': '1',
'product.googleapis.com/task_id': 'test-value',
}
resource = Resource(type='global', labels={})
self.transport_mock.return_value.send.assert_called_once_with(
mock.ANY, 'test-message', labels=labels, resource=resource
)
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.get_credentials_and_project_id')
@mock.patch(
'airflow.providers.google.cloud.log.stackdriver_task_handler.gcp_logging.Client',
**{'return_value.project': 'asf-project'}, # type: ignore
)
def test_should_read_logs_for_all_try(self, mock_client, mock_get_creds_and_project_id):
mock_client.return_value.list_entries.return_value = _create_list_response(["MSG1", "MSG2"], None)
mock_get_creds_and_project_id.return_value = ('creds', 'project_id')
logs, metadata = self.stackdriver_task_handler.read(self.ti)
mock_client.return_value.list_entries.assert_called_once_with(
filter_='resource.type="global"\n'
'logName="projects/asf-project/logs/airflow"\n'
'labels.task_id="task_for_testing_file_log_handler"\n'
'labels.dag_id="dag_for_testing_file_task_handler"\n'
'labels.execution_date="2016-01-01T00:00:00+00:00"',
page_token=None,
)
self.assertEqual(['MSG1\nMSG2'], logs)
self.assertEqual([{'end_of_log': True}], metadata)
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.get_credentials_and_project_id')
@mock.patch(
'airflow.providers.google.cloud.log.stackdriver_task_handler.gcp_logging.Client',
**{'return_value.project': 'asf-project'}, # type: ignore
)
def test_should_read_logs_for_task_with_quote(self, mock_client, mock_get_creds_and_project_id):
mock_client.return_value.list_entries.return_value = _create_list_response(["MSG1", "MSG2"], None)
mock_get_creds_and_project_id.return_value = ('creds', 'project_id')
self.ti.task_id = "K\"OT"
logs, metadata = self.stackdriver_task_handler.read(self.ti)
mock_client.return_value.list_entries.assert_called_once_with(
filter_='resource.type="global"\n'
'logName="projects/asf-project/logs/airflow"\n'
'labels.task_id="K\\"OT"\n'
'labels.dag_id="dag_for_testing_file_task_handler"\n'
'labels.execution_date="2016-01-01T00:00:00+00:00"',
page_token=None,
)
self.assertEqual(['MSG1\nMSG2'], logs)
self.assertEqual([{'end_of_log': True}], metadata)
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.get_credentials_and_project_id')
@mock.patch(
'airflow.providers.google.cloud.log.stackdriver_task_handler.gcp_logging.Client',
**{'return_value.project': 'asf-project'}, # type: ignore
)
def test_should_read_logs_for_single_try(self, mock_client, mock_get_creds_and_project_id):
mock_client.return_value.list_entries.return_value = _create_list_response(["MSG1", "MSG2"], None)
mock_get_creds_and_project_id.return_value = ('creds', 'project_id')
logs, metadata = self.stackdriver_task_handler.read(self.ti, 3)
mock_client.return_value.list_entries.assert_called_once_with(
filter_='resource.type="global"\n'
'logName="projects/asf-project/logs/airflow"\n'
'labels.task_id="task_for_testing_file_log_handler"\n'
'labels.dag_id="dag_for_testing_file_task_handler"\n'
'labels.execution_date="2016-01-01T00:00:00+00:00"\n'
'labels.try_number="3"',
page_token=None,
)
self.assertEqual(['MSG1\nMSG2'], logs)
self.assertEqual([{'end_of_log': True}], metadata)
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.get_credentials_and_project_id')
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.gcp_logging.Client')
def test_should_read_logs_with_pagination(self, mock_client, mock_get_creds_and_project_id):
mock_client.return_value.list_entries.side_effect = [
_create_list_response(["MSG1", "MSG2"], "TOKEN1"),
_create_list_response(["MSG3", "MSG4"], None),
]
mock_get_creds_and_project_id.return_value = ('creds', 'project_id')
logs, metadata1 = self.stackdriver_task_handler.read(self.ti, 3)
mock_client.return_value.list_entries.assert_called_once_with(filter_=mock.ANY, page_token=None)
self.assertEqual(['MSG1\nMSG2'], logs)
self.assertEqual([{'end_of_log': False, 'next_page_token': 'TOKEN1'}], metadata1)
mock_client.return_value.list_entries.return_value.next_page_token = None
logs, metadata2 = self.stackdriver_task_handler.read(self.ti, 3, metadata1[0])
mock_client.return_value.list_entries.assert_called_with(filter_=mock.ANY, page_token="TOKEN1")
self.assertEqual(['MSG3\nMSG4'], logs)
self.assertEqual([{'end_of_log': True}], metadata2)
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.get_credentials_and_project_id')
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.gcp_logging.Client')
def test_should_read_logs_with_download(self, mock_client, mock_get_creds_and_project_id):
mock_client.return_value.list_entries.side_effect = [
_create_list_response(["MSG1", "MSG2"], "TOKEN1"),
_create_list_response(["MSG3", "MSG4"], None),
]
mock_get_creds_and_project_id.return_value = ('creds', 'project_id')
logs, metadata1 = self.stackdriver_task_handler.read(self.ti, 3, {'download_logs': True})
self.assertEqual(['MSG1\nMSG2\nMSG3\nMSG4'], logs)
self.assertEqual([{'end_of_log': True}], metadata1)
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.get_credentials_and_project_id')
@mock.patch(
'airflow.providers.google.cloud.log.stackdriver_task_handler.gcp_logging.Client',
**{'return_value.project': 'asf-project'}, # type: ignore
)
def test_should_read_logs_with_custom_resources(self, mock_client, mock_get_creds_and_project_id):
mock_get_creds_and_project_id.return_value = ('creds', 'project_id')
resource = Resource(
type="cloud_composer_environment",
labels={
"environment.name": 'test-instancce',
"location": 'europpe-west-3',
"project_id": "asf-project",
},
)
self.stackdriver_task_handler = StackdriverTaskHandler(
transport=self.transport_mock, resource=resource
)
entry = mock.MagicMock(payload={"message": "TEXT"})
page = [entry, entry]
mock_client.return_value.list_entries.return_value.pages = (n for n in [page])
mock_client.return_value.list_entries.return_value.next_page_token = None
logs, metadata = self.stackdriver_task_handler.read(self.ti)
mock_client.return_value.list_entries.assert_called_once_with(
filter_='resource.type="cloud_composer_environment"\n'
'logName="projects/asf-project/logs/airflow"\n'
'resource.labels."environment.name"="test-instancce"\n'
'resource.labels.location="europpe-west-3"\n'
'resource.labels.project_id="asf-project"\n'
'labels.task_id="task_for_testing_file_log_handler"\n'
'labels.dag_id="dag_for_testing_file_task_handler"\n'
'labels.execution_date="2016-01-01T00:00:00+00:00"',
page_token=None,
)
self.assertEqual(['TEXT\nTEXT'], logs)
self.assertEqual([{'end_of_log': True}], metadata)
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.get_credentials_and_project_id')
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.gcp_logging.Client')
def test_should_use_credentials(self, mock_client, mock_get_creds_and_project_id):
mock_get_creds_and_project_id.return_value = ('creds', 'project_id')
stackdriver_task_handler = StackdriverTaskHandler(
gcp_key_path="KEY_PATH",
)
client = stackdriver_task_handler._client
mock_get_creds_and_project_id.assert_called_once_with(
disable_logging=True,
key_path='KEY_PATH',
scopes=frozenset(
{
'https://www.googleapis.com/auth/logging.write',
'https://www.googleapis.com/auth/logging.read',
}
),
)
mock_client.assert_called_once_with(credentials='creds', client_info=mock.ANY, project="project_id")
self.assertEqual(mock_client.return_value, client)
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.get_credentials_and_project_id')
@mock.patch('airflow.providers.google.cloud.log.stackdriver_task_handler.gcp_logging.Client')
def test_should_return_valid_external_url(self, mock_client, mock_get_creds_and_project_id):
mock_get_creds_and_project_id.return_value = ('creds', 'project_id')
mock_client.return_value.project = 'project_id'
stackdriver_task_handler = StackdriverTaskHandler(
gcp_key_path="KEY_PATH",
)
url = stackdriver_task_handler.get_external_log_url(self.ti, self.ti.try_number)
parsed_url = urlparse(url)
parsed_qs = parse_qs(parsed_url.query)
self.assertEqual('https', parsed_url.scheme)
self.assertEqual('console.cloud.google.com', parsed_url.netloc)
self.assertEqual('/logs/viewer', parsed_url.path)
self.assertCountEqual(['project', 'interval', 'resource', 'advancedFilter'], parsed_qs.keys())
self.assertIn('global', parsed_qs['resource'])
filter_params = parsed_qs['advancedFilter'][0].split('\n')
expected_filter = [
'resource.type="global"',
'logName="projects/project_id/logs/airflow"',
f'labels.task_id="{self.ti.task_id}"',
f'labels.dag_id="{self.dag.dag_id}"',
f'labels.execution_date="{self.ti.execution_date.isoformat()}"',
f'labels.try_number="{self.ti.try_number}"',
]
self.assertCountEqual(expected_filter, filter_params)
|
|
from __future__ import absolute_import
from __future__ import print_function
import os
import numpy as np
import matplotlib.pyplot as plt
import datetime
import clawpack.visclaw.colormaps as colormap
import clawpack.visclaw.gaugetools as gaugetools
import clawpack.clawutil.data as clawdata
import clawpack.amrclaw.data as amrclaw
import clawpack.geoclaw.data as geodata
import clawpack.geoclaw.topotools as topotools
import clawpack.geoclaw.util as geoutil
import clawpack.geoclaw.surge.plot as surgeplot
# to compare actual gauge data plot:
import csv
from clawpack.geoclaw.util import fetch_noaa_tide_data
try:
from setplotfg import setplotfg
except:
setplotfg = None
def days2seconds(days):
return days * 60.0**2 * 24.0
def setplot(plotdata):
r"""Setplot function for surge plotting"""
plotdata.clearfigures() # clear any old figures,axes,items data
plotdata.format = 'ascii'
# Load data from output
claw_data = clawdata.ClawInputData(2)
claw_data.read(os.path.join(plotdata.outdir, 'claw.data'))
physics = geodata.GeoClawData()
physics.read(os.path.join(plotdata.outdir, 'geoclaw.data'))
surge_data = geodata.SurgeData()
surge_data.read(os.path.join(plotdata.outdir, 'surge.data'))
friction_data = geodata.FrictionData()
friction_data.read(os.path.join(plotdata.outdir, 'friction.data'))
# Load storm track
track = surgeplot.track_data(os.path.join(plotdata.outdir,'fort.track'))
# Set afteraxes function
surge_afteraxes = lambda cd: surgeplot.surge_afteraxes(cd, track,
plot_direction=False)
def plot_coastline(cd):
"""Load fine coastline for plotting around NYC"""
try:
# Assumes that at path theres a fine topography file in NetCDF file format
path = "/Users/mandli/Dropbox/research/data/topography/atlantic/sandy_bathy/ny_area.nc"
topo_file = topotools.Topography(path, topo_type=4)
topo_file.read(nc_params={"x_var":"lon",
"y_var":"lat",
"z_var": "Band1"})
axes = plt.gca()
axes.contour(topo_file.X, topo_file.Y, topo_file.Z,
levels=[-0.001, 0.001],
colors='k', linestyles='-')
except:
pass
surge_afteraxes(cd)
# Color limits
surface_range = 4.5
speed_range = 1.0
# speed_range = 1.e-2
eta = physics.sea_level
if not isinstance(eta,list):
eta = [eta]
surface_limits = [eta[0]-surface_range,eta[0]+surface_range]
speed_limits = [0.0,speed_range]
wind_limits = [0, 55]
pressure_limits = [966, 1013]
friction_bounds = [0.01, 0.04]
vorticity_limits = [-1.e-2, 1.e-2]
land_bounds = [-10, 50]
# ==========================================================================
# Plot specifications
# ==========================================================================
# Limits for plots
regions = {'Full Domain': {"xlimits": [claw_data.lower[0], claw_data.upper[0]],
"ylimits": [claw_data.lower[1], claw_data.upper[1]],
"shrink": 1.0,
"figsize": [6.4, 4.8]},
'Tri-State Region': {"xlimits": [-74.5,-71.0],
"ylimits": [40.0,41.5],
"shrink": 1.0,
"figsize": [6.4, 4.8]},
'NYC': {"xlimits": [-74.2,-73.7],
"ylimits": [40.4,40.85],
"shrink": 1.0,
"figsize": [6.4, 4.8]}
}
def gauge_location_afteraxes(cd):
plt.subplots_adjust(left=0.12, bottom=0.06, right=0.97, top=0.97)
surge_afteraxes(cd)
gaugetools.plot_gauge_locations(cd.plotdata, gaugenos='all',
format_string='ko', add_labels=True)
for (name, region_dict) in regions.items():
# ========================================================================
# Surface Elevations
# ========================================================================
plotfigure = plotdata.new_plotfigure(name='Surface - %s' % name)
plotfigure.show = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Surface'
plotaxes.scaled = True
plotaxes.xlimits = region_dict['xlimits']
plotaxes.ylimits = region_dict['ylimits']
plotaxes.afteraxes = plot_coastline
# plotaxes.afteraxes = surge_afteraxes
# plotaxes.afteraxes = gauge_location_afteraxes
surgeplot.add_surface_elevation(plotaxes, bounds=surface_limits,
shrink=region_dict['shrink'])
surgeplot.add_land(plotaxes, bounds=land_bounds)
plotaxes.plotitem_dict['land'].amr_patchedges_show = [0,0,0]
plotaxes.plotitem_dict['surface'].amr_patchedges_show = [0,0,0]
# ========================================================================
# Water Speed
# ========================================================================
plotfigure = plotdata.new_plotfigure(name='Currents - %s' % name)
plotfigure.show = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Currents'
plotaxes.scaled = True
plotaxes.xlimits = region_dict['xlimits']
plotaxes.ylimits = region_dict['ylimits']
plotaxes.afteraxes = plot_coastline
surgeplot.add_speed(plotaxes, bounds=speed_limits,
shrink=region_dict['shrink'])
surgeplot.add_land(plotaxes, bounds=land_bounds)
plotaxes.plotitem_dict['land'].amr_patchedges_show = [0,0,0]
plotaxes.plotitem_dict['speed'].amr_patchedges_show = [0,0,0]
# ========================================================================
# Hurricane forcing - Entire Atlantic
# ========================================================================
# Friction field
plotfigure = plotdata.new_plotfigure(name='Friction')
plotfigure.show = False
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = regions['Full Domain']['xlimits']
plotaxes.ylimits = regions['Full Domain']['ylimits']
plotaxes.title = "Manning's N Coefficients"
plotaxes.afteraxes = surge_afteraxes
plotaxes.scaled = True
surgeplot.add_friction(plotaxes,bounds=friction_bounds)
# Pressure field
plotfigure = plotdata.new_plotfigure(name='Pressure')
plotfigure.show = True
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = regions['Full Domain']['xlimits']
plotaxes.ylimits = regions['Full Domain']['ylimits']
plotaxes.title = "Pressure Field"
plotaxes.afteraxes = surge_afteraxes
plotaxes.scaled = True
surgeplot.add_pressure(plotaxes,bounds=pressure_limits)
surgeplot.add_land(plotaxes, bounds=[-10, 500])
# Wind field
plotfigure = plotdata.new_plotfigure(name='Wind Speed')
plotfigure.show = True
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = regions['Full Domain']['xlimits']
plotaxes.ylimits = regions['Full Domain']['ylimits']
plotaxes.title = "Wind Field"
plotaxes.afteraxes = surge_afteraxes
plotaxes.scaled = True
surgeplot.add_wind(plotaxes,bounds=wind_limits,plot_type='imshow')
surgeplot.add_land(plotaxes, bounds=[-10, 500])
# ========================================================================
# Figures for gauges
# ========================================================================
plotfigure = plotdata.new_plotfigure(name='Surface & topo', figno=300, \
type='each_gauge')
plotfigure.show = True
plotfigure.clf_each_gauge = True
stations = [('8518750', 'The Battery, NY'),
('8516945', 'Kings Point, NY'),
('8519483', 'Bergen Point West Reach, NY')]
#('8531680','Sandy Hook, NY'),
#('n03020','Narrows,NY')]
landfall_time = np.datetime64('2012-10-29T23:30')
begin_date = datetime.datetime(2012, 10, 28)
end_date = datetime.datetime(2012, 10, 31,)
def get_actual_water_levels(station_id):
# Fetch water levels and tide predictions for given station
date_time, water_level, tide = fetch_noaa_tide_data(station_id,
begin_date, end_date)
# Calculate times relative to landfall
seconds_rel_landfall = (date_time - landfall_time) / np.timedelta64(1, 's')
# Subtract tide predictions from measured water levels
water_level -= tide
return seconds_rel_landfall, water_level
def gauge_afteraxes(cd):
station_id, station_name = stations[cd.gaugeno-1]
seconds_rel_landfall, actual_level = get_actual_water_levels(station_id)
axes = plt.gca()
#surgeplot.plot_landfall_gauge(cd.gaugesoln, axes, landfall=landfall)
axes.plot(seconds_rel_landfall, actual_level, 'g')
# Fix up plot - in particular fix time labels
axes.set_title(station_name)
axes.set_xlabel('Seconds relative to landfall')
axes.set_ylabel('Surface (m)')
axes.set_xlim([days2seconds(-2), days2seconds(1)])
axes.set_ylim([0, 4])
axes.set_xticks([ days2seconds(-2), days2seconds(-1), 0, days2seconds(1)])
#axes.set_xticklabels([r"$-3$", r"$-2$", r"$-1$", r"$0$", r"$1$"])
#axes.grid(True)
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.afteraxes = gauge_afteraxes
# Plot surface as blue curve:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = 3
plotitem.plotstyle = 'b-'
#-----------------------------------------
# Parameters used only when creating html and/or latex hardcopy
# e.g., via pyclaw.plotters.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_gaugenos = 'all' # list of gauges to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html' # pointer for top of index
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
return plotdata
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
import unittest
import hamcrest as hc
import apache_beam as beam
from apache_beam.io.restriction_trackers import OffsetRange
from apache_beam.io.restriction_trackers import OffsetRestrictionTracker
from apache_beam.io.watermark_estimators import ManualWatermarkEstimator
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.runners.common import DoFnSignature
from apache_beam.runners.common import PerWindowInvoker
from apache_beam.runners.sdf_utils import SplitResultPrimary
from apache_beam.runners.sdf_utils import SplitResultResidual
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.test_stream import TestStream
from apache_beam.transforms import trigger
from apache_beam.transforms import window
from apache_beam.transforms.core import DoFn
from apache_beam.transforms.core import RestrictionProvider
from apache_beam.transforms.window import IntervalWindow
from apache_beam.utils.timestamp import Timestamp
from apache_beam.utils.windowed_value import WindowedValue
class DoFnSignatureTest(unittest.TestCase):
def test_dofn_validate_process_error(self):
class MyDoFn(DoFn):
def process(self, element, w1=DoFn.WindowParam, w2=DoFn.WindowParam):
pass
with self.assertRaises(ValueError):
DoFnSignature(MyDoFn())
def test_dofn_validate_start_bundle_error(self):
class MyDoFn(DoFn):
def process(self, element):
pass
def start_bundle(self, w1=DoFn.WindowParam):
pass
with self.assertRaises(ValueError):
DoFnSignature(MyDoFn())
def test_dofn_validate_finish_bundle_error(self):
class MyDoFn(DoFn):
def process(self, element):
pass
def finish_bundle(self, w1=DoFn.WindowParam):
pass
with self.assertRaises(ValueError):
DoFnSignature(MyDoFn())
def test_unbounded_element_process_fn(self):
class UnboundedDoFn(DoFn):
@DoFn.unbounded_per_element()
def process(self, element):
pass
class BoundedDoFn(DoFn):
def process(self, element):
pass
signature = DoFnSignature(UnboundedDoFn())
self.assertTrue(signature.is_unbounded_per_element())
signature = DoFnSignature(BoundedDoFn())
self.assertFalse(signature.is_unbounded_per_element())
class DoFnProcessTest(unittest.TestCase):
# pylint: disable=expression-not-assigned
all_records = None
def setUp(self):
DoFnProcessTest.all_records = []
def record_dofn(self):
class RecordDoFn(DoFn):
def process(self, element):
DoFnProcessTest.all_records.append(element)
return RecordDoFn()
def test_dofn_process_keyparam(self):
class DoFnProcessWithKeyparam(DoFn):
def process(self, element, mykey=DoFn.KeyParam):
yield "{key}-verify".format(key=mykey)
pipeline_options = PipelineOptions()
with TestPipeline(options=pipeline_options) as p:
test_stream = (TestStream().advance_watermark_to(10).add_elements([1, 2]))
(
p
| test_stream
| beam.Map(lambda x: (x, "some-value"))
| "window_into" >> beam.WindowInto(
window.FixedWindows(5),
accumulation_mode=trigger.AccumulationMode.DISCARDING)
| beam.ParDo(DoFnProcessWithKeyparam())
| beam.ParDo(self.record_dofn()))
self.assertEqual(['1-verify', '2-verify'],
sorted(DoFnProcessTest.all_records))
def test_dofn_process_keyparam_error_no_key(self):
class DoFnProcessWithKeyparam(DoFn):
def process(self, element, mykey=DoFn.KeyParam):
yield "{key}-verify".format(key=mykey)
pipeline_options = PipelineOptions()
with self.assertRaises(ValueError),\
TestPipeline(options=pipeline_options) as p:
test_stream = (TestStream().advance_watermark_to(10).add_elements([1, 2]))
(p | test_stream | beam.ParDo(DoFnProcessWithKeyparam()))
class TestOffsetRestrictionProvider(RestrictionProvider):
def restriction_size(self, element, restriction):
return restriction.size()
class PerWindowInvokerSplitTest(unittest.TestCase):
def setUp(self):
self.window1 = IntervalWindow(0, 10)
self.window2 = IntervalWindow(10, 20)
self.window3 = IntervalWindow(20, 30)
self.windowed_value = WindowedValue(
'a', 57, (self.window1, self.window2, self.window3))
self.restriction = OffsetRange(0, 100)
self.watermark_estimator_state = Timestamp(21)
self.restriction_provider = TestOffsetRestrictionProvider()
self.watermark_estimator = ManualWatermarkEstimator(Timestamp(42))
self.maxDiff = None
def create_split_in_window(self, offset_index, windows):
return (
SplitResultPrimary(
primary_value=WindowedValue(((
'a',
(OffsetRange(0, offset_index), self.watermark_estimator_state)),
offset_index),
57,
windows)),
SplitResultResidual(
residual_value=WindowedValue(((
'a',
(
OffsetRange(offset_index, 100),
self.watermark_estimator.get_estimator_state())),
100 - offset_index),
57,
windows),
current_watermark=self.watermark_estimator.current_watermark(),
deferred_timestamp=None))
def create_split_across_windows(self, primary_windows, residual_windows):
primary = SplitResultPrimary(
primary_value=WindowedValue(
(('a', (OffsetRange(0, 100), self.watermark_estimator_state)), 100),
57,
primary_windows)) if primary_windows else None
residual = SplitResultResidual(
residual_value=WindowedValue(
(('a', (OffsetRange(0, 100), self.watermark_estimator_state)), 100),
57,
residual_windows),
current_watermark=None,
deferred_timestamp=None) if residual_windows else None
return primary, residual
def test_non_window_observing_checkpoint(self):
# test checkpoint
restriction_tracker = OffsetRestrictionTracker(self.restriction)
restriction_tracker.try_claim(30)
(primaries, residuals, stop_index) = PerWindowInvoker._try_split(
0.0,
None,
None,
self.windowed_value,
self.restriction,
self.watermark_estimator_state,
self.restriction_provider,
restriction_tracker,
self.watermark_estimator)
expected_primary_split, expected_residual_split = (
self.create_split_in_window(31, self.windowed_value.windows))
self.assertEqual([expected_primary_split], primaries)
self.assertEqual([expected_residual_split], residuals)
# We don't expect the stop index to be set for non window observing splits
self.assertIsNone(stop_index)
def test_non_window_observing_split(self):
restriction_tracker = OffsetRestrictionTracker(self.restriction)
restriction_tracker.try_claim(30)
(primaries, residuals, stop_index) = PerWindowInvoker._try_split(
0.1,
None,
None,
self.windowed_value,
self.restriction,
self.watermark_estimator_state,
self.restriction_provider,
restriction_tracker,
self.watermark_estimator)
expected_primary_split, expected_residual_split = (
self.create_split_in_window(37, self.windowed_value.windows))
self.assertEqual([expected_primary_split], primaries)
self.assertEqual([expected_residual_split], residuals)
# We don't expect the stop index to be set for non window observing splits
self.assertIsNone(stop_index)
def test_non_window_observing_split_when_restriction_is_done(self):
restriction_tracker = OffsetRestrictionTracker(self.restriction)
restriction_tracker.try_claim(100)
self.assertIsNone(
PerWindowInvoker._try_split(
0.1,
None,
None,
self.windowed_value,
self.restriction,
self.watermark_estimator_state,
self.restriction_provider,
restriction_tracker,
self.watermark_estimator))
def test_window_observing_checkpoint_on_first_window(self):
restriction_tracker = OffsetRestrictionTracker(self.restriction)
restriction_tracker.try_claim(30)
(primaries, residuals, stop_index) = PerWindowInvoker._try_split(
0.0,
0,
3,
self.windowed_value,
self.restriction,
self.watermark_estimator_state,
self.restriction_provider,
restriction_tracker,
self.watermark_estimator)
expected_primary_split, expected_residual_split = (
self.create_split_in_window(31, (self.window1, )))
_, expected_residual_windows = (
self.create_split_across_windows(None, (self.window2, self.window3,)))
hc.assert_that(primaries, hc.contains_inanyorder(expected_primary_split))
hc.assert_that(
residuals,
hc.contains_inanyorder(
expected_residual_split,
expected_residual_windows,
))
self.assertEqual(stop_index, 1)
def test_window_observing_checkpoint_on_first_window_after_prior_split(self):
restriction_tracker = OffsetRestrictionTracker(self.restriction)
restriction_tracker.try_claim(30)
(primaries, residuals, stop_index) = PerWindowInvoker._try_split(
0.0,
0,
2, # stop index < len(windows) representing a prior split had occurred
self.windowed_value,
self.restriction,
self.watermark_estimator_state,
self.restriction_provider,
restriction_tracker,
self.watermark_estimator)
expected_primary_split, expected_residual_split = (
self.create_split_in_window(31, (self.window1, )))
_, expected_residual_windows = (
self.create_split_across_windows(None, (self.window2, )))
hc.assert_that(primaries, hc.contains_inanyorder(expected_primary_split))
hc.assert_that(
residuals,
hc.contains_inanyorder(
expected_residual_split,
expected_residual_windows,
))
self.assertEqual(stop_index, 1)
def test_window_observing_split_on_first_window(self):
restriction_tracker = OffsetRestrictionTracker(self.restriction)
restriction_tracker.try_claim(30)
(primaries, residuals, stop_index) = PerWindowInvoker._try_split(
0.2,
0,
3,
self.windowed_value,
self.restriction,
self.watermark_estimator_state,
self.restriction_provider,
restriction_tracker,
self.watermark_estimator)
# 20% of 2.7 windows = 20% of 270 offset left = 54 offset
# 30 + 54 = 84 split offset
expected_primary_split, expected_residual_split = (
self.create_split_in_window(84, (self.window1, )))
_, expected_residual_windows = (
self.create_split_across_windows(None, (self.window2, self.window3, )))
hc.assert_that(primaries, hc.contains_inanyorder(expected_primary_split))
hc.assert_that(
residuals,
hc.contains_inanyorder(
expected_residual_split,
expected_residual_windows,
))
self.assertEqual(stop_index, 1)
def test_window_observing_split_on_middle_window(self):
restriction_tracker = OffsetRestrictionTracker(self.restriction)
restriction_tracker.try_claim(30)
(primaries, residuals, stop_index) = PerWindowInvoker._try_split(
0.2,
1,
3,
self.windowed_value,
self.restriction,
self.watermark_estimator_state,
self.restriction_provider,
restriction_tracker,
self.watermark_estimator)
# 20% of 1.7 windows = 20% of 170 offset left = 34 offset
# 30 + 34 = 64 split offset
expected_primary_split, expected_residual_split = (
self.create_split_in_window(64, (self.window2, )))
expected_primary_windows, expected_residual_windows = (
self.create_split_across_windows((self.window1, ), (self.window3, )))
hc.assert_that(
primaries,
hc.contains_inanyorder(
expected_primary_split,
expected_primary_windows,
))
hc.assert_that(
residuals,
hc.contains_inanyorder(
expected_residual_split,
expected_residual_windows,
))
self.assertEqual(stop_index, 2)
def test_window_observing_split_on_last_window(self):
restriction_tracker = OffsetRestrictionTracker(self.restriction)
restriction_tracker.try_claim(30)
(primaries, residuals, stop_index) = PerWindowInvoker._try_split(
0.2,
2,
3,
self.windowed_value,
self.restriction,
self.watermark_estimator_state,
self.restriction_provider,
restriction_tracker,
self.watermark_estimator)
# 20% of 0.7 windows = 20% of 70 offset left = 14 offset
# 30 + 14 = 44 split offset
expected_primary_split, expected_residual_split = (
self.create_split_in_window(44, (self.window3, )))
expected_primary_windows, _ = (
self.create_split_across_windows((self.window1, self.window2, ), None))
hc.assert_that(
primaries,
hc.contains_inanyorder(
expected_primary_split,
expected_primary_windows,
))
hc.assert_that(residuals, hc.contains_inanyorder(expected_residual_split, ))
self.assertEqual(stop_index, 3)
def test_window_observing_split_on_first_window_fallback(self):
restriction_tracker = OffsetRestrictionTracker(self.restriction)
restriction_tracker.try_claim(100)
# We assume that we can't split this fully claimed restriction
self.assertIsNone(restriction_tracker.try_split(0))
(primaries, residuals, stop_index) = PerWindowInvoker._try_split(
0.0,
0,
3,
self.windowed_value,
self.restriction,
self.watermark_estimator_state,
self.restriction_provider,
restriction_tracker,
self.watermark_estimator)
expected_primary_windows, expected_residual_windows = (
self.create_split_across_windows(
(self.window1, ), (self.window2, self.window3, )))
hc.assert_that(
primaries, hc.contains_inanyorder(
expected_primary_windows,
))
hc.assert_that(
residuals, hc.contains_inanyorder(
expected_residual_windows,
))
self.assertEqual(stop_index, 1)
def test_window_observing_split_on_middle_window_fallback(self):
restriction_tracker = OffsetRestrictionTracker(self.restriction)
restriction_tracker.try_claim(100)
# We assume that we can't split this fully claimed restriction
self.assertIsNone(restriction_tracker.try_split(0))
(primaries, residuals, stop_index) = PerWindowInvoker._try_split(
0.0,
1,
3,
self.windowed_value,
self.restriction,
self.watermark_estimator_state,
self.restriction_provider,
restriction_tracker,
self.watermark_estimator)
expected_primary_windows, expected_residual_windows = (
self.create_split_across_windows(
(self.window1, self.window2, ), (self.window3, )))
hc.assert_that(
primaries, hc.contains_inanyorder(
expected_primary_windows,
))
hc.assert_that(
residuals, hc.contains_inanyorder(
expected_residual_windows,
))
self.assertEqual(stop_index, 2)
def test_window_observing_split_on_last_window_when_split_not_possible(self):
restriction_tracker = OffsetRestrictionTracker(self.restriction)
restriction_tracker.try_claim(100)
# We assume that we can't split this fully claimed restriction
self.assertIsNone(restriction_tracker.try_split(0))
self.assertIsNone(
PerWindowInvoker._try_split(
0.0,
2,
3,
self.windowed_value,
self.restriction,
self.watermark_estimator_state,
self.restriction_provider,
restriction_tracker,
self.watermark_estimator))
def test_window_observing_split_on_window_boundary_round_up(self):
restriction_tracker = OffsetRestrictionTracker(self.restriction)
restriction_tracker.try_claim(30)
(primaries, residuals, stop_index) = PerWindowInvoker._try_split(
0.6,
0,
3,
self.windowed_value,
self.restriction,
self.watermark_estimator_state,
self.restriction_provider,
restriction_tracker,
self.watermark_estimator)
# 60% of 2.7 windows = 60% of 270 offset left = 162 offset
# 30 + 162 = 192 offset --> round to end of window 2
expected_primary_windows, expected_residual_windows = (
self.create_split_across_windows(
(self.window1, self.window2, ), (self.window3, )))
hc.assert_that(
primaries, hc.contains_inanyorder(
expected_primary_windows,
))
hc.assert_that(
residuals, hc.contains_inanyorder(
expected_residual_windows,
))
self.assertEqual(stop_index, 2)
def test_window_observing_split_on_window_boundary_round_down(self):
restriction_tracker = OffsetRestrictionTracker(self.restriction)
restriction_tracker.try_claim(30)
(primaries, residuals, stop_index) = PerWindowInvoker._try_split(
0.3,
0,
3,
self.windowed_value,
self.restriction,
self.watermark_estimator_state,
self.restriction_provider,
restriction_tracker,
self.watermark_estimator)
# 30% of 2.7 windows = 30% of 270 offset left = 81 offset
# 30 + 81 = 111 offset --> round to end of window 1
expected_primary_windows, expected_residual_windows = (
self.create_split_across_windows(
(self.window1, ), (self.window2, self.window3, )))
hc.assert_that(
primaries, hc.contains_inanyorder(
expected_primary_windows,
))
hc.assert_that(
residuals, hc.contains_inanyorder(
expected_residual_windows,
))
self.assertEqual(stop_index, 1)
def test_window_observing_split_on_window_boundary_round_down_on_last_window(
self):
restriction_tracker = OffsetRestrictionTracker(self.restriction)
restriction_tracker.try_claim(30)
(primaries, residuals, stop_index) = PerWindowInvoker._try_split(
0.9,
0,
3,
self.windowed_value,
self.restriction,
self.watermark_estimator_state,
self.restriction_provider,
restriction_tracker,
self.watermark_estimator)
# 90% of 2.7 windows = 90% of 270 offset left = 243 offset
# 30 + 243 = 273 offset --> prefer a split so round to end of window 2
# instead of no split
expected_primary_windows, expected_residual_windows = (
self.create_split_across_windows(
(self.window1, self.window2, ), (self.window3, )))
hc.assert_that(
primaries, hc.contains_inanyorder(
expected_primary_windows,
))
hc.assert_that(
residuals, hc.contains_inanyorder(
expected_residual_windows,
))
self.assertEqual(stop_index, 2)
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2012 OpenStack Foundation
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse as urllib
from tempest.lib.api_schema.response.compute.v2_1 import servers as schema
from tempest.lib.api_schema.response.compute.v2_16 import servers as schemav216
from tempest.lib.api_schema.response.compute.v2_19 import servers as schemav219
from tempest.lib.api_schema.response.compute.v2_26 import servers as schemav226
from tempest.lib.api_schema.response.compute.v2_3 import servers as schemav23
from tempest.lib.api_schema.response.compute.v2_9 import servers as schemav29
from tempest.lib.common import rest_client
from tempest.lib.services.compute import base_compute_client
class ServersClient(base_compute_client.BaseComputeClient):
schema_versions_info = [
{'min': None, 'max': '2.2', 'schema': schema},
{'min': '2.3', 'max': '2.8', 'schema': schemav23},
{'min': '2.9', 'max': '2.15', 'schema': schemav29},
{'min': '2.16', 'max': '2.18', 'schema': schemav216},
{'min': '2.19', 'max': '2.25', 'schema': schemav219},
{'min': '2.26', 'max': None, 'schema': schemav226}]
def __init__(self, auth_provider, service, region,
enable_instance_password=True, **kwargs):
super(ServersClient, self).__init__(
auth_provider, service, region, **kwargs)
self.enable_instance_password = enable_instance_password
def create_server(self, **kwargs):
"""Create server.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref/compute/#create-server
:param name: Server name
:param imageRef: Image reference (UUID)
:param flavorRef: Flavor reference (UUID or full URL)
Most parameters except the following are passed to the API without
any changes.
:param disk_config: The name is changed to OS-DCF:diskConfig
:param scheduler_hints: The name is changed to os:scheduler_hints and
the parameter is set in the same level as the parameter 'server'.
"""
body = copy.deepcopy(kwargs)
if body.get('disk_config'):
body['OS-DCF:diskConfig'] = body.pop('disk_config')
hints = None
if body.get('scheduler_hints'):
hints = {'os:scheduler_hints': body.pop('scheduler_hints')}
post_body = {'server': body}
if hints:
post_body.update(hints)
post_body = json.dumps(post_body)
resp, body = self.post('servers', post_body)
body = json.loads(body)
# NOTE(maurosr): this deals with the case of multiple server create
# with return reservation id set True
if 'reservation_id' in body:
return rest_client.ResponseBody(resp, body)
if self.enable_instance_password:
create_schema = schema.create_server_with_admin_pass
else:
create_schema = schema.create_server
self.validate_response(create_schema, resp, body)
return rest_client.ResponseBody(resp, body)
def update_server(self, server_id, **kwargs):
"""Update server.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#updateServer
Most parameters except the following are passed to the API without
any changes.
:param disk_config: The name is changed to OS-DCF:diskConfig
"""
if kwargs.get('disk_config'):
kwargs['OS-DCF:diskConfig'] = kwargs.pop('disk_config')
post_body = json.dumps({'server': kwargs})
resp, body = self.put("servers/%s" % server_id, post_body)
body = json.loads(body)
schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.update_server, resp, body)
return rest_client.ResponseBody(resp, body)
def show_server(self, server_id):
"""Get server details.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#showServer
"""
resp, body = self.get("servers/%s" % server_id)
body = json.loads(body)
schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.get_server, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_server(self, server_id):
"""Delete server.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#deleteServer
"""
resp, body = self.delete("servers/%s" % server_id)
self.validate_response(schema.delete_server, resp, body)
return rest_client.ResponseBody(resp, body)
def list_servers(self, detail=False, **params):
"""List servers.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#listServers
and http://developer.openstack.org/
api-ref-compute-v2.1.html#listDetailServers
"""
url = 'servers'
schema = self.get_schema(self.schema_versions_info)
_schema = schema.list_servers
if detail:
url += '/detail'
_schema = schema.list_servers_detail
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(_schema, resp, body)
return rest_client.ResponseBody(resp, body)
def list_addresses(self, server_id):
"""Lists all addresses for a server.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#list-ips
"""
resp, body = self.get("servers/%s/ips" % server_id)
body = json.loads(body)
self.validate_response(schema.list_addresses, resp, body)
return rest_client.ResponseBody(resp, body)
def list_addresses_by_network(self, server_id, network_id):
"""Lists all addresses of a specific network type for a server."""
resp, body = self.get("servers/%s/ips/%s" %
(server_id, network_id))
body = json.loads(body)
self.validate_response(schema.list_addresses_by_network, resp, body)
return rest_client.ResponseBody(resp, body)
def action(self, server_id, action_name,
schema=schema.server_actions_common_schema,
**kwargs):
post_body = json.dumps({action_name: kwargs})
resp, body = self.post('servers/%s/action' % server_id,
post_body)
if body:
body = json.loads(body)
self.validate_response(schema, resp, body)
return rest_client.ResponseBody(resp, body)
def create_backup(self, server_id, **kwargs):
"""Backup a server instance.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#createBackup
"""
return self.action(server_id, "createBackup", **kwargs)
def change_password(self, server_id, **kwargs):
"""Change the root password for the server.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#changePassword
"""
return self.action(server_id, 'changePassword', **kwargs)
def show_password(self, server_id):
resp, body = self.get("servers/%s/os-server-password" %
server_id)
body = json.loads(body)
self.validate_response(schema.show_password, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_password(self, server_id):
"""Removes the encrypted server password from the metadata server
Note that this does not actually change the instance server
password.
"""
resp, body = self.delete("servers/%s/os-server-password" %
server_id)
self.validate_response(schema.server_actions_delete_password,
resp, body)
return rest_client.ResponseBody(resp, body)
def reboot_server(self, server_id, **kwargs):
"""Reboot a server.
Available params: http://developer.openstack.org/
api-ref-compute-v2.1.html#reboot
"""
return self.action(server_id, 'reboot', **kwargs)
def rebuild_server(self, server_id, image_ref, **kwargs):
"""Rebuild a server with a new image.
Available params: http://developer.openstack.org/
api-ref-compute-v2.1.html#rebuild
Most parameters except the following are passed to the API without
any changes.
:param disk_config: The name is changed to OS-DCF:diskConfig
"""
kwargs['imageRef'] = image_ref
if 'disk_config' in kwargs:
kwargs['OS-DCF:diskConfig'] = kwargs.pop('disk_config')
schema = self.get_schema(self.schema_versions_info)
if self.enable_instance_password:
rebuild_schema = schema.rebuild_server_with_admin_pass
else:
rebuild_schema = schema.rebuild_server
return self.action(server_id, 'rebuild',
rebuild_schema, **kwargs)
def resize_server(self, server_id, flavor_ref, **kwargs):
"""Change the flavor of a server.
Available params: http://developer.openstack.org/
api-ref-compute-v2.1.html#resize
Most parameters except the following are passed to the API without
any changes.
:param disk_config: The name is changed to OS-DCF:diskConfig
"""
kwargs['flavorRef'] = flavor_ref
if 'disk_config' in kwargs:
kwargs['OS-DCF:diskConfig'] = kwargs.pop('disk_config')
return self.action(server_id, 'resize', **kwargs)
def confirm_resize_server(self, server_id, **kwargs):
"""Confirm the flavor change for a server.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#confirmResize
"""
return self.action(server_id, 'confirmResize',
schema.server_actions_confirm_resize,
**kwargs)
def revert_resize_server(self, server_id, **kwargs):
"""Revert a server back to its original flavor.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#revertResize
"""
return self.action(server_id, 'revertResize', **kwargs)
def list_server_metadata(self, server_id):
"""Lists all metadata for a server.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#listServerMetadata
"""
resp, body = self.get("servers/%s/metadata" % server_id)
body = json.loads(body)
self.validate_response(schema.list_server_metadata, resp, body)
return rest_client.ResponseBody(resp, body)
def set_server_metadata(self, server_id, meta, no_metadata_field=False):
"""Sets one or more metadata items for a server.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#createServerMetadata
"""
if no_metadata_field:
post_body = ""
else:
post_body = json.dumps({'metadata': meta})
resp, body = self.put('servers/%s/metadata' % server_id,
post_body)
body = json.loads(body)
self.validate_response(schema.set_server_metadata, resp, body)
return rest_client.ResponseBody(resp, body)
def update_server_metadata(self, server_id, meta):
"""Updates one or more metadata items for a server.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#updateServerMetadata
"""
post_body = json.dumps({'metadata': meta})
resp, body = self.post('servers/%s/metadata' % server_id,
post_body)
body = json.loads(body)
self.validate_response(schema.update_server_metadata,
resp, body)
return rest_client.ResponseBody(resp, body)
def show_server_metadata_item(self, server_id, key):
"""Shows details for a metadata item, by key, for a server.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#showServerMetadataItem
"""
resp, body = self.get("servers/%s/metadata/%s" % (server_id, key))
body = json.loads(body)
self.validate_response(schema.set_show_server_metadata_item,
resp, body)
return rest_client.ResponseBody(resp, body)
def set_server_metadata_item(self, server_id, key, meta):
"""Sets a metadata item, by key, for a server.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#setServerMetadataItem
"""
post_body = json.dumps({'meta': meta})
resp, body = self.put('servers/%s/metadata/%s' % (server_id, key),
post_body)
body = json.loads(body)
self.validate_response(schema.set_show_server_metadata_item,
resp, body)
return rest_client.ResponseBody(resp, body)
def delete_server_metadata_item(self, server_id, key):
"""Deletes a metadata item, by key, from a server.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#deleteServerMetadataItem
"""
resp, body = self.delete("servers/%s/metadata/%s" %
(server_id, key))
self.validate_response(schema.delete_server_metadata_item,
resp, body)
return rest_client.ResponseBody(resp, body)
def stop_server(self, server_id, **kwargs):
"""Stops a running server and changes its status to SHUTOFF.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#stop
"""
return self.action(server_id, 'os-stop', **kwargs)
def start_server(self, server_id, **kwargs):
"""Starts a stopped server and changes its status to ACTIVE.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#start
"""
return self.action(server_id, 'os-start', **kwargs)
def attach_volume(self, server_id, **kwargs):
"""Attaches a volume to a server instance.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#attachVolume
"""
post_body = json.dumps({'volumeAttachment': kwargs})
resp, body = self.post('servers/%s/os-volume_attachments' % server_id,
post_body)
body = json.loads(body)
self.validate_response(schema.attach_volume, resp, body)
return rest_client.ResponseBody(resp, body)
def update_attached_volume(self, server_id, attachment_id, **kwargs):
"""Swaps a volume attached to an instance for another volume"""
post_body = json.dumps({'volumeAttachment': kwargs})
resp, body = self.put('servers/%s/os-volume_attachments/%s' %
(server_id, attachment_id),
post_body)
self.validate_response(schema.update_attached_volume, resp, body)
return rest_client.ResponseBody(resp, body)
def detach_volume(self, server_id, volume_id): # noqa
"""Detaches a volume from a server instance.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#deleteVolumeAttachment
"""
resp, body = self.delete('servers/%s/os-volume_attachments/%s' %
(server_id, volume_id))
self.validate_response(schema.detach_volume, resp, body)
return rest_client.ResponseBody(resp, body)
def show_volume_attachment(self, server_id, volume_id):
"""Return details about the given volume attachment.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#
getVolumeAttachmentDetails
"""
resp, body = self.get('servers/%s/os-volume_attachments/%s' % (
server_id, volume_id))
body = json.loads(body)
self.validate_response(schema.show_volume_attachment, resp, body)
return rest_client.ResponseBody(resp, body)
def list_volume_attachments(self, server_id):
"""Returns the list of volume attachments for a given instance.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#listVolumeAttachments
"""
resp, body = self.get('servers/%s/os-volume_attachments' % (
server_id))
body = json.loads(body)
self.validate_response(schema.list_volume_attachments, resp, body)
return rest_client.ResponseBody(resp, body)
def add_security_group(self, server_id, **kwargs):
"""Add a security group to the server.
Available params: http://developer.openstack.org/
api-ref-compute-v2.1.html#addSecurityGroup
"""
# TODO(oomichi): The api-site doesn't contain this API description.
# So the above should be changed to the api-site link after
# adding the description on the api-site.
# LP: https://bugs.launchpad.net/openstack-api-site/+bug/1524199
return self.action(server_id, 'addSecurityGroup', **kwargs)
def remove_security_group(self, server_id, **kwargs):
"""Remove a security group from the server.
Available params: http://developer.openstack.org/
api-ref-compute-v2.1.html#removeSecurityGroup
"""
# TODO(oomichi): The api-site doesn't contain this API description.
# So the above should be changed to the api-site link after
# adding the description on the api-site.
# LP: https://bugs.launchpad.net/openstack-api-site/+bug/1524199
return self.action(server_id, 'removeSecurityGroup', **kwargs)
def live_migrate_server(self, server_id, **kwargs):
"""This should be called with administrator privileges.
Available params: http://developer.openstack.org/
api-ref-compute-v2.1.html#migrateLive
"""
return self.action(server_id, 'os-migrateLive', **kwargs)
def migrate_server(self, server_id, **kwargs):
"""Migrate a server to a new host.
Available params: http://developer.openstack.org/
api-ref-compute-v2.1.html#migrate
"""
return self.action(server_id, 'migrate', **kwargs)
def lock_server(self, server_id, **kwargs):
"""Lock the given server.
Available params: http://developer.openstack.org/
api-ref-compute-v2.1.html#lock
"""
return self.action(server_id, 'lock', **kwargs)
def unlock_server(self, server_id, **kwargs):
"""UNlock the given server.
Available params: http://developer.openstack.org/
api-ref-compute-v2.1.html#unlock
"""
return self.action(server_id, 'unlock', **kwargs)
def suspend_server(self, server_id, **kwargs):
"""Suspend the provided server.
Available params: http://developer.openstack.org/
api-ref-compute-v2.1.html#suspend
"""
return self.action(server_id, 'suspend', **kwargs)
def resume_server(self, server_id, **kwargs):
"""Un-suspend the provided server.
Available params: http://developer.openstack.org/
api-ref-compute-v2.1.html#resume
"""
return self.action(server_id, 'resume', **kwargs)
def pause_server(self, server_id, **kwargs):
"""Pause the provided server.
Available params: http://developer.openstack.org/
api-ref-compute-v2.1.html#pause
"""
return self.action(server_id, 'pause', **kwargs)
def unpause_server(self, server_id, **kwargs):
"""Un-pause the provided server.
Available params: http://developer.openstack.org/
api-ref-compute-v2.1.html#unpause
"""
return self.action(server_id, 'unpause', **kwargs)
def reset_state(self, server_id, **kwargs):
"""Reset the state of a server to active/error.
Available params: http://developer.openstack.org/
api-ref-compute-v2.1.html#resetState
"""
return self.action(server_id, 'os-resetState', **kwargs)
def shelve_server(self, server_id, **kwargs):
"""Shelve the provided server.
Available params: http://developer.openstack.org/
api-ref-compute-v2.1.html#shelve
"""
return self.action(server_id, 'shelve', **kwargs)
def unshelve_server(self, server_id, **kwargs):
"""Un-shelve the provided server.
Available params: http://developer.openstack.org/
api-ref-compute-v2.1.html#unshelve
"""
return self.action(server_id, 'unshelve', **kwargs)
def shelve_offload_server(self, server_id, **kwargs):
"""Shelve-offload the provided server.
Available params: http://developer.openstack.org/
api-ref-compute-v2.1.html#shelveOffload
"""
return self.action(server_id, 'shelveOffload', **kwargs)
def get_console_output(self, server_id, **kwargs):
"""Get console output.
Available params: http://developer.openstack.org/
api-ref-compute-v2.1.html#getConsoleOutput
"""
return self.action(server_id, 'os-getConsoleOutput',
schema.get_console_output, **kwargs)
def list_virtual_interfaces(self, server_id):
"""List the virtual interfaces used in an instance."""
resp, body = self.get('/'.join(['servers', server_id,
'os-virtual-interfaces']))
body = json.loads(body)
self.validate_response(schema.list_virtual_interfaces, resp, body)
return rest_client.ResponseBody(resp, body)
def rescue_server(self, server_id, **kwargs):
"""Rescue the provided server.
Available params: http://developer.openstack.org/
api-ref-compute-v2.1.html#rescue
"""
return self.action(server_id, 'rescue', schema.rescue_server, **kwargs)
def unrescue_server(self, server_id):
"""Unrescue the provided server.
Available params: http://developer.openstack.org/
api-ref-compute-v2.1.html#unrescue
"""
return self.action(server_id, 'unrescue')
def show_server_diagnostics(self, server_id):
"""Get the usage data for a server."""
resp, body = self.get("servers/%s/diagnostics" % server_id)
return rest_client.ResponseBody(resp, json.loads(body))
def list_instance_actions(self, server_id):
"""List the provided server action."""
resp, body = self.get("servers/%s/os-instance-actions" %
server_id)
body = json.loads(body)
self.validate_response(schema.list_instance_actions, resp, body)
return rest_client.ResponseBody(resp, body)
def show_instance_action(self, server_id, request_id):
"""Returns the action details of the provided server."""
resp, body = self.get("servers/%s/os-instance-actions/%s" %
(server_id, request_id))
body = json.loads(body)
self.validate_response(schema.show_instance_action, resp, body)
return rest_client.ResponseBody(resp, body)
def force_delete_server(self, server_id, **kwargs):
"""Force delete a server.
Available params: http://developer.openstack.org/
api-ref-compute-v2.1.html#forceDelete
"""
return self.action(server_id, 'forceDelete', **kwargs)
def restore_soft_deleted_server(self, server_id, **kwargs):
"""Restore a soft-deleted server.
Available params: http://developer.openstack.org/
api-ref-compute-v2.1.html#restore
"""
return self.action(server_id, 'restore', **kwargs)
def reset_network(self, server_id, **kwargs):
"""Reset the Network of a server.
Available params: http://developer.openstack.org/
api-ref-compute-v2.1.html#resetNetwork
"""
return self.action(server_id, 'resetNetwork', **kwargs)
def inject_network_info(self, server_id, **kwargs):
"""Inject the Network Info into server.
Available params: http://developer.openstack.org/
api-ref-compute-v2.1.html#injectNetworkInfo
"""
return self.action(server_id, 'injectNetworkInfo', **kwargs)
def get_vnc_console(self, server_id, **kwargs):
"""Get URL of VNC console.
Available params: http://developer.openstack.org/
api-ref-compute-v2.1.html#getVNCConsole
"""
return self.action(server_id, "os-getVNCConsole",
schema.get_vnc_console, **kwargs)
def add_fixed_ip(self, server_id, **kwargs):
"""Add a fixed IP to server instance.
Available params: http://developer.openstack.org/
api-ref-compute-v2.1.html#addFixedIp
"""
return self.action(server_id, 'addFixedIp', **kwargs)
def remove_fixed_ip(self, server_id, **kwargs):
"""Remove input fixed IP from input server instance.
Available params: http://developer.openstack.org/
api-ref-compute-v2.1.html#removeFixedIp
"""
return self.action(server_id, 'removeFixedIp', **kwargs)
|
|
import datetime
from decimal import Decimal
from apps.mchanga.models import MpesaFundRaiser
from bluebottle.utils.utils import StatusDefinition
from bluebottle.bb_projects.models import BaseProject, ProjectPhase, BaseProjectPhaseLog
from django.db import models
from django.db.models import Q
from django.db.models.aggregates import Count, Sum
from django.dispatch import receiver
from django.utils.http import urlquote
from django.utils.translation import ugettext as _
from django.conf import settings
from django_extensions.db.fields import ModificationDateTimeField, CreationDateTimeField
from sorl.thumbnail import ImageField
from django.template.defaultfilters import slugify
from django.utils import timezone
from .mails import mail_project_funded_internal
from .signals import project_funded
class ProjectPhaseLog(BaseProjectPhaseLog):
pass
class ProjectManager(models.Manager):
def search(self, query):
qs = super(ProjectManager, self).get_query_set()
# Apply filters
status = query.getlist(u'status[]', None)
if status:
qs = qs.filter(status_id__in=status)
else:
status = query.get('status', None)
if status:
qs = qs.filter(status_id=status)
country = query.get('country', None)
if country:
qs = qs.filter(country=country)
theme = query.get('theme', None)
if theme:
qs = qs.filter(theme_id=theme)
text = query.get('text', None)
if text:
qs = qs.filter(Q(title__icontains=text) |
Q(pitch__icontains=text) |
Q(description__icontains=text))
return self._ordering(query.get('ordering', None), qs, status)
def _ordering(self, ordering, queryset, status):
if ordering == 'amount_asked':
queryset = queryset.order_by('amount_asked')
elif ordering == 'deadline':
queryset = queryset.order_by('deadline')
elif ordering == 'amount_needed':
queryset = queryset.order_by('amount_needed')
queryset = queryset.filter(amount_needed__gt=0)
elif ordering == 'newest':
queryset = queryset.order_by('-campaign_started')
queryset = queryset.filter(amount_needed__gt=0)
elif ordering == 'popularity':
queryset = queryset.order_by('-popularity')
if status == 5:
queryset = queryset.filter(amount_needed__gt=0)
elif ordering:
queryset = queryset.order_by(ordering)
return queryset
class Project(BaseProject):
partner_organization = models.ForeignKey('projects.PartnerOrganization', null=True, blank=True)
latitude = models.DecimalField(
_('latitude'), max_digits=21, decimal_places=18, null=True, blank=True)
longitude = models.DecimalField(
_('longitude'), max_digits=21, decimal_places=18, null=True, blank=True)
reach = models.PositiveIntegerField(
_('Reach'), help_text=_('How many people do you expect to reach?'),
blank=True, null=True)
video_url = models.URLField(
_('video'), max_length=100, blank=True, null=True, default='',
help_text=_("Do you have a video pitch or a short movie that "
"explains your project? Cool! We can't wait to see it! "
"You can paste the link to YouTube or Vimeo video here"))
popularity = models.FloatField(null=False, default=0)
is_campaign = models.BooleanField(default=False, help_text=_("Project is part of a campaign and gets special promotion."))
skip_monthly = models.BooleanField(_("Skip monthly"),
help_text=_("Skip this project when running monthly donations"),
default=False)
allow_overfunding = models.BooleanField(default=True)
story = models.TextField(_("story"), help_text=_("This is the help text for the story field"), blank=True,
null=True)
# TODO: Remove these fields?
effects = models.TextField(_("effects"), help_text=_("What will be the Impact? How will your Smart Idea change the lives of people?"), blank=True, null=True)
for_who = models.TextField(_("for who"), help_text=_("Describe your target group"), blank=True, null=True)
future = models.TextField(_("future"), help_text=_("How will this project be self-sufficient and sustainable in the long term?"), blank=True, null=True)
date_submitted = models.DateTimeField(_('Campaign Submitted'), null=True, blank=True)
campaign_started = models.DateTimeField(_('Campaign Started'), null=True, blank=True)
campaign_ended = models.DateTimeField(_('Campaign Ended'), null=True, blank=True)
campaign_funded = models.DateTimeField(_('Campaign Funded'), null=True, blank=True)
mchanga_account = models.CharField(_('M-Changa account'), help_text=_('Id or keyword for the M-Changa fundraiser'), max_length=100, null=True, blank=True)
@property
def mchanga_fundraiser(self):
"""
Return a M-Changa fund raiser, if there is one.
"""
if self.mchanga_account:
frs = MpesaFundRaiser.objects.filter(account=self.mchanga_account).all()
if len(frs):
return frs[0]
return None
objects = ProjectManager()
def __unicode__(self):
if self.title:
return self.title
return self.slug
def update_popularity(self, save=True):
from bluebottle.donations.models import Donation
last_month = timezone.now() - timezone.timedelta(days=30)
donations = Donation.objects.filter(order__status__in=[StatusDefinition.PENDING, StatusDefinition.SUCCESS])
donations = donations.filter(created__gte=last_month)
donations = donations.exclude(order__order_type='recurring')
# For all projects.
total_recent_donors = len(donations)
total_recent_donations = donations.aggregate(sum=Sum('amount'))['sum']
# For this project
donations = donations.filter(project=self)
recent_donors = len(donations)
recent_donations = donations.aggregate(sum=Sum('amount'))['sum']
if recent_donors and recent_donations:
self.popularity = 50 * (float(recent_donors) / float(total_recent_donors)) + 50 * (float(recent_donations) / float(total_recent_donations))
else:
self.popularity = 0
if save:
self.save()
def update_status_after_donation(self):
if not self.campaign_funded and not self.campaign_ended and \
self.status not in ProjectPhase.objects.filter(Q(slug="done-complete") |
Q(slug="done-incomplete") |
Q(slug="done-stopped")) and self.amount_needed <= 0:
self.campaign_funded = timezone.now()
if not self.allow_overfunding:
self.status = ProjectPhase.objects.get(slug="done-complete")
self.campaign_ended = self.campaign_funded
self.save()
def update_amounts(self, save=True):
""" Update amount based on paid and pending donations. """
self.amount_donated = self.get_money_total([StatusDefinition.PENDING, StatusDefinition.SUCCESS])
if self.mchanga_fundraiser:
kes = self.mchanga_fundraiser.current_amount
euro = Decimal(kes) / Decimal(114.651)
self.amount_donated += euro
self.amount_needed = self.amount_asked - self.amount_donated
if self.amount_needed < 0:
# Should never be less than zero
self.amount_needed = 0
self.update_popularity(False)
if save:
self.save()
def get_money_total(self, status_in=None):
"""
Calculate the total (realtime) amount of money for donations,
optionally filtered by status.
"""
if self.amount_asked == 0:
# No money asked, return 0
return 0
donations = self.donation_set.all()
if status_in:
donations = donations.filter(order__status__in=status_in)
total = donations.aggregate(sum=Sum('amount'))
if not total['sum']:
# No donations, manually set amount
return 0
return total['sum']
@property
def is_realised(self):
return self.status in ProjectPhase.objects.filter(slug__in=['done-complete', 'done-incomplete', 'realised']).all()
def supporters_count(self, with_guests=True):
# TODO: Replace this with a proper Supporters API
# something like /projects/<slug>/donations
donations = self.donation_set
donations = donations.filter(order__status__in=[StatusDefinition.PENDING, StatusDefinition.SUCCESS])
donations = donations.filter(order__user__isnull=False)
donations = donations.annotate(Count('order__user'))
count = len(donations.all())
if with_guests:
donations = self.donation_set
donations = donations.filter(order__status__in=[StatusDefinition.PENDING, StatusDefinition.SUCCESS])
donations = donations.filter(order__user__isnull=True)
count += len(donations.all())
return count
@property
def task_count(self):
from bluebottle.utils.model_dispatcher import get_task_model
TASK_MODEL = get_task_model()
return len(self.task_set.filter(status=TASK_MODEL.TaskStatuses.open).all())
@property
def get_open_tasks(self):
from bluebottle.utils.model_dispatcher import get_task_model
TASK_MODEL = get_task_model()
return self.task_set.filter(status=TASK_MODEL.TaskStatuses.open).all()
@property
def date_funded(self):
return self.campaign_funded
@property
def amount_pending(self):
return self.get_money_total([StatusDefinition.PENDING])
@property
def amount_safe(self):
return self.get_money_total([StatusDefinition.SUCCESS])
@property
def donated_percentage(self):
if not self.amount_asked:
return 0
elif self.amount_donated > self.amount_asked:
return 100
return int(100 * self.amount_donated / self.amount_asked)
@models.permalink
def get_absolute_url(self):
""" Get the URL for the current project. """
return 'project-detail', (), {'slug': self.slug}
def get_absolute_frontend_url(self):
url = self.get_absolute_url()
# insert the hashbang, after the language string
bits = url.split('/')
url = "/".join(bits[:2] + ['#!'] + bits[2:])
return url
def get_meta_title(self, **kwargs):
return u"%(name_project)s | %(theme)s | %(country)s" % {
'name_project': self.title,
'theme': self.theme.name if self.theme else '',
'country': self.country.name if self.country else '',
}
def get_fb_title(self, **kwargs):
title = _(u"{name_project} in {country}").format(
name_project = self.title,
country = self.country.name if self.country else '',
)
return title
def get_tweet(self, **kwargs):
""" Build the tweet text for the meta data """
request = kwargs.get('request')
if request:
lang_code = request.LANGUAGE_CODE
else:
lang_code = 'en'
twitter_handle = settings.TWITTER_HANDLES.get(lang_code, settings.DEFAULT_TWITTER_HANDLE)
title = urlquote(self.get_fb_title())
# {URL} is replaced in Ember to fill in the page url, avoiding the
# need to provide front-end urls in our Django code.
tweet = _(u"{title} {{URL}} via @{twitter_handle}").format(
title=title, twitter_handle=twitter_handle
)
return tweet
class Meta(BaseProject.Meta):
ordering = ['title']
default_serializer = 'apps.projects.serializers.ProjectSerializer'
preview_serializer = 'apps.projects.serializers.ProjectPreviewSerializer'
manage_serializer = 'apps.projects.serializers.ManageProjectSerializer'
def save(self, *args, **kwargs):
if not self.slug:
original_slug = slugify(self.title)
counter = 2
qs = Project.objects
while qs.filter(slug = original_slug).exists():
original_slug = '%s-%d' % (original_slug, counter)
counter += 1
self.slug = original_slug
#There are 9 ProjectPhase objects: 1. Plan - New, 2. Plan - Submitted, 3. Plan - Needs Work, 4. Plan - Rejected,
#5. Campaign, 6. Stopped, 7. Done - Complete, 8. Done - Incomplete, 9. Done - Stopped.
if not self.status:
self.status = ProjectPhase.objects.get(slug="plan-new")
#If the project status is moved to New or Needs Work, clear the date_submitted field
if self.status in ProjectPhase.objects.filter(Q(slug="plan-new")|Q(slug="plan-needs-work")):
self.date_submitted = None
#Set the submitted date
if self.status == ProjectPhase.objects.get(slug="plan-submitted") and not self.date_submitted:
self.date_submitted = timezone.now()
#Set the campaign started date
if self.status == ProjectPhase.objects.get(slug="campaign") and not self.campaign_started:
self.campaign_started = timezone.now()
#Set a default deadline of 30 days
if not self.deadline:
self.deadline = timezone.now() + datetime.timedelta(days=30)
#Project is not ended, complete, funded or stopped and its deadline has expired.
if not self.campaign_ended and self.status not in ProjectPhase.objects.filter(Q(slug="done-complete") |
Q(slug="done-incomplete") |
Q(slug="done-stopped")) and self.deadline < timezone.now():
self.status = ProjectPhase.objects.get(slug="done-incomplete")
self.campaign_ended = self.deadline
if self.status in ProjectPhase.objects.filter(Q(slug="done-complete") |
Q(slug="done-incomplete") |
Q(slug="done-stopped")) and not self.campaign_ended:
self.campaign_ended = timezone.now()
if self.amount_asked:
self.update_amounts(False)
super(Project, self).save(*args, **kwargs)
class ProjectBudgetLine(models.Model):
"""
BudgetLine: Entries to the Project Budget sheet.
This is the budget for the amount asked from this
website.
"""
project = models.ForeignKey(settings.PROJECTS_PROJECT_MODEL)
description = models.CharField(_('description'), max_length=255, default='')
currency = models.CharField(max_length=3, default='EUR')
amount = models.PositiveIntegerField(_('amount (in cents)'))
created = CreationDateTimeField()
updated = ModificationDateTimeField()
class Meta:
verbose_name = _('budget line')
verbose_name_plural = _('budget lines')
def __unicode__(self):
return u'{0} - {1}'.format(self.description, self.amount / 100.0)
class PartnerOrganization(models.Model):
"""
Some projects are run in cooperation with a partner
organization like EarthCharter & MacroMicro
"""
name = models.CharField(_("name"), max_length=255, unique=True)
slug = models.SlugField(_("slug"), max_length=100, unique=True)
description = models.TextField(_("description"))
image = ImageField(_("image"), max_length=255, blank=True, null=True, upload_to='partner_images/', help_text=_("Main partner picture"))
@property
def projects(self):
return self.project_set.order_by('-favorite', '-popularity').filter(status__in=[ProjectPhase.objects.get(slug="campaign"),
ProjectPhase.objects.get(slug="done-complete"),
ProjectPhase.objects.get(slug="done-incomplete")])
class Meta:
db_table = 'projects_partnerorganization'
verbose_name = _("partner organization")
verbose_name_plural = _("partner organizations")
def __unicode__(self):
if self.name:
return self.name
return self.slug
@receiver(project_funded, weak=False, sender=Project, dispatch_uid="email-project-team-project-funded")
def email_project_team_project_funded(sender, instance, first_time_funded, **kwargs):
mail_project_funded_internal(instance)
|
|
from __future__ import unicode_literals
import datetime
from operator import attrgetter
from django.core.exceptions import ValidationError
from django.db import router
from django.db.models.sql import InsertQuery
from django.test import TestCase, skipUnlessDBFeature
from django.utils import six
from django.utils.timezone import get_fixed_timezone
from .models import (
Article, BrokenUnicodeMethod, Department, Event, Model1, Model2, Model3,
NonAutoPK, Party, Worker,
)
class ModelTests(TestCase):
# The bug is that the following queries would raise:
# "TypeError: Related Field has invalid lookup: gte"
def test_related_gte_lookup(self):
"""
Regression test for #10153: foreign key __gte lookups.
"""
Worker.objects.filter(department__gte=0)
def test_related_lte_lookup(self):
"""
Regression test for #10153: foreign key __lte lookups.
"""
Worker.objects.filter(department__lte=0)
def test_sql_insert_compiler_return_id_attribute(self):
"""
Regression test for #14019: SQLInsertCompiler.as_sql() failure
"""
db = router.db_for_write(Party)
query = InsertQuery(Party)
query.insert_values([Party._meta.fields[0]], [], raw=False)
# this line will raise an AttributeError without the accompanying fix
query.get_compiler(using=db).as_sql()
def test_empty_choice(self):
# NOTE: Part of the regression test here is merely parsing the model
# declaration. The verbose_name, in particular, did not always work.
a = Article.objects.create(
headline="Look at me!", pub_date=datetime.datetime.now()
)
# An empty choice field should return None for the display name.
self.assertIs(a.get_status_display(), None)
# Empty strings should be returned as Unicode
a = Article.objects.get(pk=a.pk)
self.assertEqual(a.misc_data, '')
self.assertIs(type(a.misc_data), six.text_type)
def test_long_textfield(self):
# TextFields can hold more than 4000 characters (this was broken in
# Oracle).
a = Article.objects.create(
headline="Really, really big",
pub_date=datetime.datetime.now(),
article_text="ABCDE" * 1000
)
a = Article.objects.get(pk=a.pk)
self.assertEqual(len(a.article_text), 5000)
def test_long_unicode_textfield(self):
# TextFields can hold more than 4000 bytes also when they are
# less than 4000 characters
a = Article.objects.create(
headline="Really, really big",
pub_date=datetime.datetime.now(),
article_text='\u05d0\u05d1\u05d2' * 1000
)
a = Article.objects.get(pk=a.pk)
self.assertEqual(len(a.article_text), 3000)
def test_date_lookup(self):
# Regression test for #659
Party.objects.create(when=datetime.datetime(1999, 12, 31))
Party.objects.create(when=datetime.datetime(1998, 12, 31))
Party.objects.create(when=datetime.datetime(1999, 1, 1))
Party.objects.create(when=datetime.datetime(1, 3, 3))
self.assertQuerysetEqual(
Party.objects.filter(when__month=2), []
)
self.assertQuerysetEqual(
Party.objects.filter(when__month=1), [
datetime.date(1999, 1, 1)
],
attrgetter("when")
)
self.assertQuerysetEqual(
Party.objects.filter(when__month=12), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when"),
ordered=False
)
self.assertQuerysetEqual(
Party.objects.filter(when__year=1998), [
datetime.date(1998, 12, 31),
],
attrgetter("when")
)
# Regression test for #8510
self.assertQuerysetEqual(
Party.objects.filter(when__day="31"), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when"),
ordered=False
)
self.assertQuerysetEqual(
Party.objects.filter(when__month="12"), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when"),
ordered=False
)
self.assertQuerysetEqual(
Party.objects.filter(when__year="1998"), [
datetime.date(1998, 12, 31),
],
attrgetter("when")
)
# Regression test for #18969
self.assertQuerysetEqual(
Party.objects.filter(when__year=1), [
datetime.date(1, 3, 3),
],
attrgetter("when")
)
self.assertQuerysetEqual(
Party.objects.filter(when__year='1'), [
datetime.date(1, 3, 3),
],
attrgetter("when")
)
def test_date_filter_null(self):
# Date filtering was failing with NULL date values in SQLite
# (regression test for #3501, among other things).
Party.objects.create(when=datetime.datetime(1999, 1, 1))
Party.objects.create()
p = Party.objects.filter(when__month=1)[0]
self.assertEqual(p.when, datetime.date(1999, 1, 1))
self.assertQuerysetEqual(
Party.objects.filter(pk=p.pk).dates("when", "month"), [
1
],
attrgetter("month")
)
def test_get_next_prev_by_field(self):
# Check that get_next_by_FIELD and get_previous_by_FIELD don't crash
# when we have usecs values stored on the database
#
# It crashed after the Field.get_db_prep_* refactor, because on most
# backends DateTimeFields supports usecs, but DateTimeField.to_python
# didn't recognize them. (Note that
# Model._get_next_or_previous_by_FIELD coerces values to strings)
Event.objects.create(when=datetime.datetime(2000, 1, 1, 16, 0, 0))
Event.objects.create(when=datetime.datetime(2000, 1, 1, 6, 1, 1))
Event.objects.create(when=datetime.datetime(2000, 1, 1, 13, 1, 1))
e = Event.objects.create(when=datetime.datetime(2000, 1, 1, 12, 0, 20, 24))
self.assertEqual(
e.get_next_by_when().when, datetime.datetime(2000, 1, 1, 13, 1, 1)
)
self.assertEqual(
e.get_previous_by_when().when, datetime.datetime(2000, 1, 1, 6, 1, 1)
)
def test_primary_key_foreign_key_types(self):
# Check Department and Worker (non-default PK type)
d = Department.objects.create(id=10, name="IT")
w = Worker.objects.create(department=d, name="Full-time")
self.assertEqual(six.text_type(w), "Full-time")
def test_broken_unicode(self):
# Models with broken unicode methods should still have a printable repr
b = BrokenUnicodeMethod.objects.create(name="Jerry")
self.assertEqual(repr(b), "<BrokenUnicodeMethod: [Bad Unicode data]>")
@skipUnlessDBFeature("supports_timezones")
def test_timezones(self):
# Saving an updating with timezone-aware datetime Python objects.
# Regression test for #10443.
# The idea is that all these creations and saving should work without
# crashing. It's not rocket science.
dt1 = datetime.datetime(2008, 8, 31, 16, 20, tzinfo=get_fixed_timezone(600))
dt2 = datetime.datetime(2008, 8, 31, 17, 20, tzinfo=get_fixed_timezone(600))
obj = Article.objects.create(
headline="A headline", pub_date=dt1, article_text="foo"
)
obj.pub_date = dt2
obj.save()
self.assertEqual(
Article.objects.filter(headline="A headline").update(pub_date=dt1),
1
)
def test_chained_fks(self):
"""
Regression for #18432: Chained foreign keys with to_field produce incorrect query
"""
m1 = Model1.objects.create(pkey=1000)
m2 = Model2.objects.create(model1=m1)
m3 = Model3.objects.create(model2=m2)
# this is the actual test for #18432
m3 = Model3.objects.get(model2=1000)
m3.model2
class ModelValidationTest(TestCase):
def test_pk_validation(self):
NonAutoPK.objects.create(name="one")
again = NonAutoPK(name="one")
with self.assertRaises(ValidationError):
again.validate_unique()
class EvaluateMethodTest(TestCase):
"""
Regression test for #13640: cannot filter by objects with 'evaluate' attr
"""
def test_model_with_evaluate_method(self):
"""
Ensures that you can filter by objects that have an 'evaluate' attr
"""
dept = Department.objects.create(pk=1, name='abc')
dept.evaluate = 'abc'
Worker.objects.filter(department=dept)
|
|
# Copyright (c) 2015 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from neutron_lib.agent import topics
from neutron_lib import context
from oslo_utils import uuidutils
from oslo_versionedobjects import fields as obj_fields
import testtools
from neutron.api.rpc.callbacks import resources
from neutron.api.rpc.callbacks import version_manager
from neutron.api.rpc.handlers import resources_rpc
from neutron.objects import base as objects_base
from neutron.objects import common_types
from neutron.tests import base
from neutron.tests.unit.objects import test_base as objects_test_base
TEST_EVENT = 'test_event'
TEST_VERSION = '1.0'
def _create_test_dict(uuid=None):
return {'id': uuid or uuidutils.generate_uuid(),
'field': 'foo'}
def _create_test_resource(context=None, resource_cls=None):
resource_cls = resource_cls or FakeResource
resource_dict = _create_test_dict()
resource = resource_cls(context, **resource_dict)
resource.obj_reset_changes()
return resource
class BaseFakeResource(objects_base.NeutronObject):
@classmethod
def get_objects(cls, context, **kwargs):
return list()
class FakeResource(BaseFakeResource):
VERSION = TEST_VERSION
fields = {
'id': common_types.UUIDField(),
'field': obj_fields.StringField()
}
class FakeResource2(BaseFakeResource):
VERSION = TEST_VERSION
fields = {
'id': common_types.UUIDField(),
'field': obj_fields.StringField()
}
class ResourcesRpcBaseTestCase(base.BaseTestCase):
def setUp(self):
super(ResourcesRpcBaseTestCase, self).setUp()
self.obj_registry = self.useFixture(
objects_test_base.NeutronObjectRegistryFixture())
self.context = context.get_admin_context()
mock.patch.object(resources_rpc.resources,
'is_valid_resource_type').start()
mock.patch.object(resources_rpc.resources, 'get_resource_cls',
side_effect=self._get_resource_cls).start()
self.resource_objs = [_create_test_resource(self.context)
for _ in range(2)]
self.resource_objs2 = [_create_test_resource(self.context,
FakeResource2)
for _ in range(2)]
@staticmethod
def _get_resource_cls(resource_type):
return {FakeResource.obj_name(): FakeResource,
FakeResource2.obj_name(): FakeResource2}.get(resource_type)
class _ValidateResourceTypeTestCase(base.BaseTestCase):
def setUp(self):
super(_ValidateResourceTypeTestCase, self).setUp()
self.is_valid_mock = mock.patch.object(
resources_rpc.resources, 'is_valid_resource_type').start()
def test_valid_type(self):
self.is_valid_mock.return_value = True
resources_rpc._validate_resource_type('foo')
def test_invalid_type(self):
self.is_valid_mock.return_value = False
with testtools.ExpectedException(
resources_rpc.InvalidResourceTypeClass):
resources_rpc._validate_resource_type('foo')
class _ResourceTypeVersionedTopicTestCase(base.BaseTestCase):
@mock.patch.object(resources_rpc, '_validate_resource_type')
def test_resource_type_versioned_topic(self, validate_mock):
obj_name = FakeResource.obj_name()
expected = topics.RESOURCE_TOPIC_PATTERN % {
'resource_type': 'FakeResource', 'version': '1.0'}
with mock.patch.object(resources_rpc.resources, 'get_resource_cls',
return_value=FakeResource):
observed = resources_rpc.resource_type_versioned_topic(obj_name)
self.assertEqual(expected, observed)
class ResourcesPullRpcApiTestCase(ResourcesRpcBaseTestCase):
def setUp(self):
super(ResourcesPullRpcApiTestCase, self).setUp()
self.rpc = resources_rpc.ResourcesPullRpcApi()
mock.patch.object(self.rpc, 'client').start()
self.cctxt_mock = self.rpc.client.prepare.return_value
def test_is_singleton(self):
self.assertIs(self.rpc, resources_rpc.ResourcesPullRpcApi())
def test_pull(self):
self.obj_registry.register(FakeResource)
expected_obj = _create_test_resource(self.context)
resource_id = expected_obj.id
self.cctxt_mock.call.return_value = expected_obj.obj_to_primitive()
result = self.rpc.pull(
self.context, FakeResource.obj_name(), resource_id)
self.cctxt_mock.call.assert_called_once_with(
self.context, 'pull', resource_type='FakeResource',
version=TEST_VERSION, resource_id=resource_id)
self.assertEqual(expected_obj, result)
def test_bulk_pull(self):
self.obj_registry.register(FakeResource)
expected_objs = [_create_test_resource(self.context),
_create_test_resource(self.context)]
self.cctxt_mock.call.return_value = [
e.obj_to_primitive() for e in expected_objs]
filter_kwargs = {'a': 'b', 'c': 'd'}
result = self.rpc.bulk_pull(
self.context, FakeResource.obj_name(),
filter_kwargs=filter_kwargs)
self.cctxt_mock.call.assert_called_once_with(
self.context, 'bulk_pull', resource_type='FakeResource',
version=TEST_VERSION, filter_kwargs=filter_kwargs)
self.assertEqual(expected_objs, result)
def test_pull_resource_not_found(self):
resource_dict = _create_test_dict()
resource_id = resource_dict['id']
self.cctxt_mock.call.return_value = None
with testtools.ExpectedException(resources_rpc.ResourceNotFound):
self.rpc.pull(self.context, FakeResource.obj_name(),
resource_id)
class ResourcesPushToServerRpcCallbackTestCase(ResourcesRpcBaseTestCase):
def test_report_versions(self):
callbacks = resources_rpc.ResourcesPushToServerRpcCallback()
with mock.patch('neutron.api.rpc.callbacks.version_manager'
'.update_versions') as update_versions:
version_map = {'A': '1.0'}
callbacks.report_agent_resource_versions(context=mock.ANY,
agent_type='DHCP Agent',
agent_host='fake-host',
version_map=version_map)
update_versions.assert_called_once_with(mock.ANY,
version_map)
class ResourcesPullRpcCallbackTestCase(ResourcesRpcBaseTestCase):
def setUp(self):
super(ResourcesPullRpcCallbackTestCase, self).setUp()
self.obj_registry.register(FakeResource)
self.callbacks = resources_rpc.ResourcesPullRpcCallback()
self.resource_obj = _create_test_resource(self.context)
def test_pull(self):
resource_dict = _create_test_dict(uuid=self.resource_obj.id)
with mock.patch.object(
resources_rpc.prod_registry, 'pull',
return_value=self.resource_obj) as registry_mock:
primitive = self.callbacks.pull(
self.context, resource_type=FakeResource.obj_name(),
version=TEST_VERSION,
resource_id=self.resource_obj.id)
registry_mock.assert_called_once_with(
'FakeResource', self.resource_obj.id, context=self.context)
self.assertEqual(resource_dict,
primitive['versioned_object.data'])
self.assertEqual(self.resource_obj.obj_to_primitive(), primitive)
def test_bulk_pull(self):
r1 = self.resource_obj
r2 = _create_test_resource(self.context)
@classmethod
def get_objs(*args, **kwargs):
if 'id' not in kwargs:
return [r1, r2]
return [r for r in [r1, r2] if r.id == kwargs['id']]
# the bulk interface currently retrieves directly from the registry
with mock.patch.object(FakeResource, 'get_objects', new=get_objs):
objs = self.callbacks.bulk_pull(
self.context, resource_type=FakeResource.obj_name(),
version=TEST_VERSION)
self.assertItemsEqual([r1.obj_to_primitive(),
r2.obj_to_primitive()],
objs)
objs = self.callbacks.bulk_pull(
self.context, resource_type=FakeResource.obj_name(),
version=TEST_VERSION, filter_kwargs={'id': r1.id})
self.assertEqual([r1.obj_to_primitive()], objs)
@mock.patch.object(FakeResource, 'obj_to_primitive')
def test_pull_backports_to_older_version(self, to_prim_mock):
with mock.patch.object(resources_rpc.prod_registry, 'pull',
return_value=self.resource_obj):
self.callbacks.pull(
self.context, resource_type=FakeResource.obj_name(),
version='0.9', # less than initial version 1.0
resource_id=self.resource_obj.id)
to_prim_mock.assert_called_with(target_version='0.9')
class ResourcesPushRpcApiTestCase(ResourcesRpcBaseTestCase):
"""Tests the neutron server side of the RPC interface."""
def setUp(self):
super(ResourcesPushRpcApiTestCase, self).setUp()
mock.patch.object(resources_rpc.n_rpc, 'get_client').start()
self.rpc = resources_rpc.ResourcesPushRpcApi()
self.cctxt_mock = self.rpc.client.prepare.return_value
mock.patch.object(version_manager, 'get_resource_versions',
return_value=set([TEST_VERSION])).start()
def test__prepare_object_fanout_context(self):
expected_topic = topics.RESOURCE_TOPIC_PATTERN % {
'resource_type': resources.get_resource_type(
self.resource_objs[0]),
'version': TEST_VERSION}
observed = self.rpc._prepare_object_fanout_context(
self.resource_objs[0], self.resource_objs[0].VERSION, '1.0')
self.rpc.client.prepare.assert_called_once_with(
fanout=True, topic=expected_topic, version='1.0')
self.assertEqual(self.cctxt_mock, observed)
def test_push_single_type(self):
self.rpc.push(
self.context, self.resource_objs, TEST_EVENT)
self.cctxt_mock.cast.assert_called_once_with(
self.context, 'push',
resource_list=[resource.obj_to_primitive()
for resource in self.resource_objs],
event_type=TEST_EVENT)
def test_push_mixed(self):
self.rpc.push(
self.context, self.resource_objs + self.resource_objs2,
event_type=TEST_EVENT)
self.cctxt_mock.cast.assert_any_call(
self.context, 'push',
resource_list=[resource.obj_to_primitive()
for resource in self.resource_objs],
event_type=TEST_EVENT)
self.cctxt_mock.cast.assert_any_call(
self.context, 'push',
resource_list=[resource.obj_to_primitive()
for resource in self.resource_objs2],
event_type=TEST_EVENT)
class ResourcesPushRpcCallbackTestCase(ResourcesRpcBaseTestCase):
"""Tests the agent-side of the RPC interface."""
def setUp(self):
super(ResourcesPushRpcCallbackTestCase, self).setUp()
self.callbacks = resources_rpc.ResourcesPushRpcCallback()
@mock.patch.object(resources_rpc.cons_registry, 'push')
def test_push(self, reg_push_mock):
self.obj_registry.register(FakeResource)
self.callbacks.push(self.context,
resource_list=[resource.obj_to_primitive()
for resource in self.resource_objs],
event_type=TEST_EVENT)
reg_push_mock.assert_called_once_with(self.context,
self.resource_objs[0].obj_name(),
self.resource_objs,
TEST_EVENT)
|
|
# -*- coding: utf-8 -*-
import numpy
import pvl
from six import string_types
from six.moves import range
from .specialpixels import SPECIAL_PIXELS
class CubeFile(object):
"""A Isis Cube file reader."""
PIXEL_TYPES = {
'UnsignedByte': numpy.dtype('uint8'),
'SignedByte': numpy.dtype('int8'),
'UnsignedWord': numpy.dtype('uint16'),
'SignedWord': numpy.dtype('int16'),
'UnsignedInteger': numpy.dtype('uint32'),
'SignedInteger': numpy.dtype('int32'),
'Real': numpy.dtype('float32'),
'Double': numpy.dtype('float64')
}
BYTE_ORDERS = {
'NoByteOrder': '=', # system
'Lsb': '<', # little-endian
'Msb': '>' # big-endian
}
SPECIAL_PIXELS = SPECIAL_PIXELS
@classmethod
def open(cls, filename):
"""Read an Isis Cube file from disk.
:param filename: name of file to read as an isis file
"""
with open(filename, 'rb') as fp:
return cls(fp, filename)
def __init__(self, stream_or_fname, filename=None):
"""Create an Isis Cube file.
:param stream: file object to read as an isis cube file
:param filename: an optional filename to attach to the object
"""
if isinstance(stream_or_fname, string_types):
self.filename = stream_or_fname
stream = open(stream_or_fname, 'rb')
else:
#: The filename if given, otherwise none.
self.filename = filename
stream = stream_or_fname
#: The parsed label header in dictionary form.
self.label = self._parse_label(stream)
#: A numpy array representing the image data.
self.data = self._parse_data(stream)
def apply_scaling(self, copy=True):
"""Scale pixel values to there true DN.
:param copy: whether to apply the scalling to a copy of the pixel data
and leave the orginial unaffected
:returns: a scalled version of the pixel data
"""
if copy:
return self.multiplier * self.data + self.base
if self.multiplier != 1:
self.data *= self.multiplier
if self.base != 0:
self.data += self.base
return self.data
def apply_numpy_specials(self, copy=True):
"""Convert isis special pixel values to numpy special pixel values.
======= =======
Isis Numpy
======= =======
Null nan
Lrs -inf
Lis -inf
His inf
Hrs inf
======= =======
:param copy: whether to apply the new special values to a copy of the
pixel data and leave the orginial unaffected
:returns: a numpy array with special values converted to numpy's nan,
inf and -inf
"""
if copy:
data = self.data.astype(numpy.float64)
elif self.data.dtype != numpy.float64:
data = self.data = self.data.astype(numpy.float64)
else:
data = self.data
data[data == self.specials['Null']] = numpy.nan
with numpy.errstate(invalid='ignore'):
# we can do this here, because we know that the operations do the right thing
# which is, where there's a numpy.nan the indexing returns False,
# so no new value will be set there. That's what we want.
data[data < self.specials['Min']] = numpy.NINF
data[data > self.specials['Max']] = numpy.inf
return data
def specials_mask(self):
"""Create a pixel map for special pixels.
:returns: an array where the value is `False` if the pixel is special
and `True` otherwise
"""
mask = self.data >= self.specials['Min']
mask &= self.data <= self.specials['Max']
return mask
def get_image_array(self):
"""Create an array for use in making an image.
Creates a linear stretch of the image and scales it to between `0` and
`255`. `Null`, `Lis` and `Lrs` pixels are set to `0`. `His` and `Hrs`
pixels are set to `255`.
Usage::
from pysis import CubeFile
from PIL import Image
# Read in the image and create the image data
image = CubeFile.open('test.cub')
data = image.get_image_array()
# Save the first band to a new file
Image.fromarray(data[0]).save('test.png')
:returns:
A uint8 array of pixel values.
"""
specials_mask = self.specials_mask()
data = self.data.copy()
data[specials_mask] -= data[specials_mask].min()
data[specials_mask] *= 255 / data[specials_mask].max()
data[data == self.specials['His']] = 255
data[data == self.specials['Hrs']] = 255
return data.astype(numpy.uint8)
@property
def bands(self):
"""Number of image bands."""
return self.label['IsisCube']['Core']['Dimensions']['Bands']
@property
def lines(self):
"""Number of lines per band."""
return self.label['IsisCube']['Core']['Dimensions']['Lines']
@property
def samples(self):
"""Number of samples per line."""
return self.label['IsisCube']['Core']['Dimensions']['Samples']
@property
def tile_lines(self):
"""Number of lines per tile."""
if self.format != 'Tile':
return None
return self.label['IsisCube']['Core']['TileLines']
@property
def tile_samples(self):
"""Number of samples per tile."""
if self.format != 'Tile':
return None
return self.label['IsisCube']['Core']['TileSamples']
@property
def format(self):
return self.label['IsisCube']['Core']['Format']
@property
def dtype(self):
"""Pixel data type."""
pixels_group = self.label['IsisCube']['Core']['Pixels']
byte_order = self.BYTE_ORDERS[pixels_group['ByteOrder']]
pixel_type = self.PIXEL_TYPES[pixels_group['Type']]
return pixel_type.newbyteorder(byte_order)
@property
def specials(self):
pixel_type = self.label['IsisCube']['Core']['Pixels']['Type']
return self.SPECIAL_PIXELS[pixel_type]
@property
def base(self):
"""An additive factor by which to offset pixel DN."""
return self.label['IsisCube']['Core']['Pixels']['Base']
@property
def multiplier(self):
"""A multiplicative factor by which to scale pixel DN."""
return self.label['IsisCube']['Core']['Pixels']['Multiplier']
@property
def start_byte(self):
"""Index of the start of the image data (zero indexed)."""
return self.label['IsisCube']['Core']['StartByte'] - 1
@property
def shape(self):
"""Tuple of images bands, lines and samples."""
return (self.bands, self.lines, self.samples)
@property
def size(self):
"""Total number of pixels."""
return self.bands * self.lines * self.samples
def _parse_label(self, stream):
return pvl.load(stream)
def _parse_data(self, stream):
stream.seek(self.start_byte)
if self.format == 'BandSequential':
return self._parse_band_sequential_data(stream)
if self.format == 'Tile':
return self._parse_tile_data(stream)
raise Exception('Unkown Isis Cube format (%s)' % self.format)
def _parse_band_sequential_data(self, stream):
data = numpy.fromfile(stream, self.dtype, self.size)
return data.reshape(self.shape)
def _parse_tile_data(self, stream):
tile_lines = self.tile_lines
tile_samples = self.tile_samples
tile_size = tile_lines * tile_samples
lines = range(0, self.lines, self.tile_lines)
samples = range(0, self.samples, self.tile_samples)
dtype = self.dtype
data = numpy.empty(self.shape, dtype=dtype)
for band in data:
for line in lines:
for sample in samples:
sample_end = sample + tile_samples
line_end = line + tile_lines
chunk = band[line:line_end, sample:sample_end]
tile = numpy.fromfile(stream, dtype, tile_size)
tile = tile.reshape((tile_lines, tile_samples))
chunk_lines, chunk_samples = chunk.shape
chunk[:] = tile[:chunk_lines, :chunk_samples]
return data
|
|
import os
import time
import math
import curses
import pickle
import operator
from textwrap import wrap
from functools import reduce
from colorclass import Color
from pueue.client.factories import command_factory
from pueue.daemon.files import get_descriptor_output
from terminaltables import AsciiTable
from terminaltables.terminal_io import terminal_size
def execute_status(args, root_dir=None):
"""Print the status of the daemon.
This function displays the current status of the daemon as well
as the whole queue and all available information about every entry
in the queue.
`terminaltables` is used to format and display the queue contents.
`colorclass` is used to color format the various items in the queue.
Args:
root_dir (string): The path to the root directory the daemon is running in.
"""
status = command_factory('status')({}, root_dir=root_dir)
# First rows, showing daemon status
if status['status'] == 'running':
status['status'] = Color('{autogreen}' + '{}'.format(status['status']) + '{/autogreen}')
elif status['status'] in ['paused']:
status['status'] = Color('{autoyellow}' + '{}'.format(status['status']) + '{/autoyellow}')
print('Daemon: {}\n'.format(status['status']))
# Handle queue data
data = status['data']
if isinstance(data, str):
print(data)
elif isinstance(data, dict):
# Format incomming data to be compatible with Terminaltables
formatted_data = []
formatted_data.append(['Index', 'Status', 'Code',
'Command', 'Path', 'Start', 'End'])
for key, entry in sorted(data.items(), key=operator.itemgetter(0)):
formatted_data.append(
[
'#{}'.format(key),
entry['status'],
'{}'.format(entry['returncode']),
entry['command'],
entry['path'],
entry['start'],
entry['end']
]
)
# Create AsciiTable instance and define style
table = AsciiTable(formatted_data)
table.outer_border = False
table.inner_column_border = False
terminal_width = terminal_size()
customWidth = table.column_widths
# If the text is wider than the actual terminal size, we
# compute a new size for the Command and Path column.
if (reduce(lambda a, b: a+b, table.column_widths) + 10) > terminal_width[0]:
# We have to subtract 14 because of table paddings
left_space = math.floor((terminal_width[0] - customWidth[0] - customWidth[1] - customWidth[2] - customWidth[5] - customWidth[6] - 14)/2)
if customWidth[3] < left_space:
customWidth[4] = 2*left_space - customWidth[3]
elif customWidth[4] < left_space:
customWidth[3] = 2*left_space - customWidth[4]
else:
customWidth[3] = left_space
customWidth[4] = left_space
# Format long strings to match the console width
for i, entry in enumerate(table.table_data):
for j, string in enumerate(entry):
max_width = customWidth[j]
wrapped_string = '\n'.join(wrap(string, max_width))
if j == 1:
if wrapped_string == 'done' or wrapped_string == 'running' or wrapped_string == 'paused':
wrapped_string = Color('{autogreen}' + '{}'.format(wrapped_string) + '{/autogreen}')
elif wrapped_string in ['queued', 'stashed']:
wrapped_string = Color('{autoyellow}' + '{}'.format(wrapped_string) + '{/autoyellow}')
elif wrapped_string in ['failed', 'stopping', 'killing']:
wrapped_string = Color('{autored}' + '{}'.format(wrapped_string) + '{/autored}')
elif j == 2:
if wrapped_string == '0' and wrapped_string != 'Code':
wrapped_string = Color('{autogreen}' + '{}'.format(wrapped_string) + '{/autogreen}')
elif wrapped_string != '0' and wrapped_string != 'Code':
wrapped_string = Color('{autored}' + '{}'.format(wrapped_string) + '{/autored}')
table.table_data[i][j] = wrapped_string
print(table.table)
print('')
def execute_log(args, root_dir):
"""Print the current log file.
Args:
args['keys'] (int): If given, we only look at the specified processes.
root_dir (string): The path to the root directory the daemon is running in.
"""
# Print the logs of all specified processes
if args.get('keys'):
config_dir = os.path.join(root_dir, '.config/pueue')
queue_path = os.path.join(config_dir, 'queue')
if os.path.exists(queue_path):
queue_file = open(queue_path, 'rb')
try:
queue = pickle.load(queue_file)
except Exception:
print('Queue log file seems to be corrupted. Aborting.')
return
queue_file.close()
else:
print('There is no queue log file. Aborting.')
return
for key in args.get('keys'):
# Check if there is an entry with this key
if queue.get(key) and queue[key]['status'] in ['failed', 'done']:
entry = queue[key]
print('Log of entry: {}'.format(key))
print('Returncode: {}'.format(entry['returncode']))
print('Command: {}'.format(entry['command']))
print('Path: {}'.format(entry['path']))
print('Start: {}, End: {} \n'.format(entry['start'], entry['end']))
# Write STDERR
if len(entry['stderr']) > 0:
print(Color('{autored}Stderr output: {/autored}\n ') + entry['stderr'])
# Write STDOUT
if len(entry['stdout']) > 0:
print(Color('{autogreen}Stdout output: {/autogreen}\n ') + entry['stdout'])
else:
print('No finished process with key {}.'.format(key))
# Print the log of all processes
else:
log_path = os.path.join(root_dir, '.local/share/pueue/queue.log')
log_file = open(log_path, 'r')
print(log_file.read())
def execute_show(args, root_dir):
"""Print stderr and stdout of the current running process.
Args:
args['watch'] (bool): If True, we open a curses session and tail
the output live in the console.
root_dir (string): The path to the root directory the daemon is running in.
"""
key = None
if args.get('key'):
key = args['key']
status = command_factory('status')({}, root_dir=root_dir)
if key not in status['data'] or status['data'][key]['status'] != 'running':
print('No running process with this key, use `log` to show finished processes.')
return
# In case no key provided, we take the oldest running process
else:
status = command_factory('status')({}, root_dir=root_dir)
if isinstance(status['data'], str):
print(status['data'])
return
for k in sorted(status['data'].keys()):
if status['data'][k]['status'] == 'running':
key = k
break
if key is None:
print('No running process, use `log` to show finished processes.')
return
config_dir = os.path.join(root_dir, '.config/pueue')
# Get current pueueSTDout file from tmp
stdoutFile = os.path.join(config_dir, 'pueue_process_{}.stdout'.format(key))
stderrFile = os.path.join(config_dir, 'pueue_process_{}.stderr'.format(key))
stdoutDescriptor = open(stdoutFile, 'r')
stderrDescriptor = open(stderrFile, 'r')
running = True
# Continually print output with curses or just print once
if args['watch']:
# Initialize curses
stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
curses.curs_set(2)
stdscr.keypad(True)
stdscr.refresh()
try:
# Update output every two seconds
while running:
stdscr.clear()
stdoutDescriptor.seek(0)
message = stdoutDescriptor.read()
stdscr.addstr(0, 0, message)
stdscr.refresh()
time.sleep(2)
except Exception:
# Curses cleanup
curses.nocbreak()
stdscr.keypad(False)
curses.echo()
curses.endwin()
else:
print('Stdout output:\n')
stdoutDescriptor.seek(0)
print(get_descriptor_output(stdoutDescriptor, key))
print('\n\nStderr output:\n')
stderrDescriptor.seek(0)
print(get_descriptor_output(stderrDescriptor, key))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.