code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# coding=utf-8
from django.contrib.auth.forms import PasswordResetForm, SetPasswordForm
from django.contrib.auth.tokens import default_token_generator
from django.contrib.auth.views import password_reset_confirm
from django.template.response import TemplateResponse
from django.utils.translation import ugettext as _
from xadmin.sites import site
from xadmin.views.base import BaseAdminPlugin, BaseAdminView, csrf_protect_m
from xadmin.views.website import LoginView
class ResetPasswordSendView(BaseAdminView):
need_site_permission = False
password_reset_form = PasswordResetForm
password_reset_template = 'xadmin/auth/password_reset/form.html'
password_reset_done_template = 'xadmin/auth/password_reset/done.html'
password_reset_token_generator = default_token_generator
password_reset_from_email = None
password_reset_email_template = 'xadmin/auth/password_reset/email.html'
password_reset_subject_template = None
def get(self, request, *args, **kwargs):
context = super(ResetPasswordSendView, self).get_context()
context['form'] = kwargs.get('form', self.password_reset_form())
return TemplateResponse(request, self.password_reset_template, context,
current_app=self.admin_site.name)
@csrf_protect_m
def post(self, request, *args, **kwargs):
form = self.password_reset_form(request.POST)
if form.is_valid():
opts = {
'use_https': request.is_secure(),
'token_generator': self.password_reset_token_generator,
'email_template_name': self.password_reset_email_template,
'request': request,
'domain_override': request.get_host()
}
if self.password_reset_from_email:
opts['from_email'] = self.password_reset_from_email
if self.password_reset_subject_template:
opts['subject_template_name'] = self.password_reset_subject_template
form.save(**opts)
context = super(ResetPasswordSendView, self).get_context()
return TemplateResponse(request, self.password_reset_done_template, context,
current_app=self.admin_site.name)
else:
return self.get(request, form=form)
site.register_view(r'^xadmin/password_reset/$', ResetPasswordSendView, name='xadmin_password_reset')
class ResetLinkPlugin(BaseAdminPlugin):
def block_form_bottom(self, context, nodes):
reset_link = self.get_admin_url('xadmin_password_reset')
return '<div class="text-info" style="margin-top:15px;"><a href="%s"><i class="fa fa-question-sign"></i> %s</a></div>' % (reset_link, _('Forgotten your password or username?'))
site.register_plugin(ResetLinkPlugin, LoginView)
class ResetPasswordComfirmView(BaseAdminView):
need_site_permission = False
password_reset_set_form = SetPasswordForm
password_reset_confirm_template = 'xadmin/auth/password_reset/confirm.html'
password_reset_token_generator = default_token_generator
def do_view(self, request, uidb36, token, *args, **kwargs):
context = super(ResetPasswordComfirmView, self).get_context()
return password_reset_confirm(request, uidb36, token,
template_name=self.password_reset_confirm_template,
token_generator=self.password_reset_token_generator,
set_password_form=self.password_reset_set_form,
post_reset_redirect=self.get_admin_url('xadmin_password_reset_complete'),
current_app=self.admin_site.name, extra_context=context)
def get(self, request, uidb36, token, *args, **kwargs):
return self.do_view(request, uidb36, token)
def post(self, request, uidb36, token, *args, **kwargs):
return self.do_view(request, uidb36, token)
def get_media(self):
return super(ResetPasswordComfirmView, self).get_media() + \
self.vendor('xadmin.page.form.js', 'xadmin.form.css')
site.register_view(r'^xadmin/password_reset/(?P<uidb36>[0-9A-Za-z]{1,13})-(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
ResetPasswordComfirmView, name='xadmin_password_reset_confirm')
class ResetPasswordCompleteView(BaseAdminView):
need_site_permission = False
password_reset_complete_template = 'xadmin/auth/password_reset/complete.html'
def get(self, request, *args, **kwargs):
context = super(ResetPasswordCompleteView, self).get_context()
context['login_url'] = self.get_admin_url('index')
return TemplateResponse(request, self.password_reset_complete_template, context,
current_app=self.admin_site.name)
site.register_view(r'^xadmin/password_reset/complete/$', ResetPasswordCompleteView, name='xadmin_password_reset_complete')
|
LennonChin/Django-Practices
|
MxOnline/extra_apps/xadmin/plugins/passwords.py
|
Python
|
apache-2.0
| 4,992
|
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import atexit
import datetime
import json
import os
import threading
import time
import logging
from functools import wraps
import elasticsearch as es7
import elasticsearch6 as es6
import pytz
from elasticsearch import helpers as helpers7
from elasticsearch6 import helpers as helpers6
from .common import Config, INDEX_NAME, INDEX_TYPE, get_es_template
from . import fl_logging
class Handler(object):
def __init__(self, name):
self._name = name
def emit(self, name, value, tags=None, index_type='metrics'):
"""
Do whatever it takes to actually log the specified logging record.
This version is intended to be implemented by subclasses and so
raises a NotImplementedError.
"""
raise NotImplementedError('emit must be implemented '
'by Handler subclasses')
def get_name(self):
return self._name
def flush(self):
pass
class LoggingHandler(Handler):
def __init__(self):
super(LoggingHandler, self).__init__('logging')
def emit(self, name, value, tags=None, index_type='metrics'):
fl_logging.debug('[metrics] name[%s] value[%s] tags[%s]',
name, value, str(tags))
class ElasticSearchHandler(Handler):
"""
Emit documents to ElasticSearch
"""
def __init__(self, ip, port):
super(ElasticSearchHandler, self).__init__('elasticsearch')
self._es = es7.Elasticsearch([ip], port=port,
http_auth=(Config.ES_USERNAME,
Config.ES_PASSWORD))
self._helpers = helpers7
self._version = int(self._es.info()['version']['number'].split('.')[0])
# ES 6.8 has differences in APIs compared to ES 7.6,
# These `put_template`s is supposed to be done during deployment, here
# is for old clients.
if self._version == 6:
self._es = es6.Elasticsearch([ip], port=port)
self._helpers = helpers6
for index_type, index_name in INDEX_NAME.items():
if not self._es.indices.exists_template(
'{}-template'.format(index_name)
):
self._create_template_and_index(index_type)
# suppress ES logger
logging.getLogger('elasticsearch').setLevel(logging.CRITICAL)
self._emit_batch = []
self._batch_size = Config.ES_BATCH_SIZE
self._lock = threading.RLock()
def emit(self, name, value, tags=None, index_type='metrics'):
assert index_type in INDEX_TYPE
if tags is None:
tags = {}
document = self._produce_document(name, value, tags, index_type)
if not Config.METRICS_TO_STDOUT:
# if filebeat not yet refurbished, directly emit to ES
action = {'_index': INDEX_NAME[index_type],
'_source': document}
if self._version == 6:
action['_type'] = '_doc'
with self._lock:
# emit when there are enough documents
self._emit_batch.append(action)
if len(self._emit_batch) >= self._batch_size:
self.flush()
else:
# if filebeat refurbished,
# print to std out and use filebeat to ship to ES
document['index_type__'] = index_type
print(json.dumps(document))
def flush(self):
emit_batch = []
with self._lock:
if self._emit_batch:
emit_batch = self._emit_batch
self._emit_batch = []
if emit_batch:
fl_logging.info('Emitting %d documents to ES', len(emit_batch))
self._helpers.bulk(self._es, emit_batch)
@staticmethod
def _produce_document(name, value, tags, index_type):
application_id = os.environ.get('APPLICATION_ID', '')
if application_id:
tags['application_id'] = str(application_id)
if index_type == 'metrics':
tags['process_time'] = datetime.datetime.now(tz=pytz.utc) \
.isoformat(timespec='microseconds')
document = {
"name": name,
"value": value,
"tags": tags
}
else:
document = {
"tags": tags
}
return document
def _create_template_and_index(self, index_type):
"""
Args:
index_type: ES index type.
Creates a template and an index on ES.
"""
assert index_type in INDEX_TYPE
self._es.indices.put_template(
name='{}-template'.format(INDEX_NAME[index_type]),
body=get_es_template(index_type, self._version)
)
try:
self._es.indices.create(index=INDEX_NAME[index_type])
return
# index may have been created by other jobs
except (es6.exceptions.RequestError, es7.exceptions.RequestError) as e:
# if due to other reasons, re-raise exception
if e.info['error']['type'] != 'resource_already_exists_exception':
raise e
class Metrics(object):
def __init__(self):
self.handlers = []
self._lock = threading.RLock()
self.handler_initialized = False
atexit.register(self.flush_handler)
def init_handlers(self):
with self._lock:
if self.handler_initialized:
return
logging_handler = LoggingHandler()
self.add_handler(logging_handler)
es_host = os.environ.get('ES_HOST', '')
es_port = os.environ.get('ES_PORT', '')
if es_host and es_port:
es_handler = ElasticSearchHandler(es_host, es_port)
self.add_handler(es_handler)
self.handler_initialized = True
def add_handler(self, hdlr):
"""
Add the specified handler to this logger.
"""
with self._lock:
if hdlr not in self.handlers:
self.handlers.append(hdlr)
def remove_handler(self, hdlr):
"""
Remove the specified handler from this logger.
"""
with self._lock:
if hdlr in self.handlers:
self.handlers.remove(hdlr)
def emit(self, name, value, tags=None, index_type='metrics'):
self.init_handlers()
if not self.handlers or len(self.handlers) == 0:
fl_logging.info('No handlers. Not emitting.')
return
for hdlr in self.handlers:
try:
hdlr.emit(name, value, tags, index_type)
except Exception as e: # pylint: disable=broad-except
fl_logging.warning('Handler [%s] emit failed. Error repr: [%s]',
hdlr.get_name(), repr(e))
def flush_handler(self):
for hdlr in self.handlers:
try:
hdlr.flush()
except Exception as e: # pylint: disable=broad-except
fl_logging.warning('Handler [%s] flush failed. '
'Some metrics might not be emitted. '
'Error repr: %s',
hdlr.get_name(), repr(e))
_metrics_client = Metrics()
def emit(name, value, tags=None, index_type='metrics'):
_metrics_client.emit(name, value, tags, index_type)
# Currently no actual differences among the methods below
emit_counter = emit
emit_store = emit
emit_timer = emit
def timer(func_name, tags=None):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
time_start = time.time()
result = func(*args, **kwargs)
time_end = time.time()
time_spend = time_end - time_start
emit(func_name, time_spend, tags)
return result
return wrapper
return decorator
|
bytedance/fedlearner
|
fedlearner/common/metrics.py
|
Python
|
apache-2.0
| 8,599
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from neural_compressor.conf.config import Quantization_Conf
from neural_compressor.experimental import Quantization, common
from neural_compressor.experimental.common import Metric
from .metric import METRICS
class QuantizationINC(Quantization):
def __init__(self,
framework: str,
conf='',
approach='post_training_static_quant',
tuning_strategy='bayesian',
accuracy_criterion: dict = None,
timeout=0,
max_trials=1,
inputs=None,
outputs=None
):
"""
Create a Intel Neural Compressor Quantization object. To understand INC quantization,
please refer to https://github.com/intel/neural-compressor/blob/master/docs/Quantization.md.
:param framework: 'tensorflow', 'pytorch', 'pytorch_fx', 'pytorch_ipex', 'onnxrt_integer',
'onnxrt_qlinear' or 'mxnet'; allow new framework backend extension.
Default: 'pytorch_fx'. Consistent with Intel Neural Compressor
Quantization.
:param conf: A path to conf yaml file for quantization.
Default: None, using default config.
:param approach: 'post_training_static_quant', 'post_training_dynamic_quant',
'quant_aware_training'.
Default: 'post_training_static_quant'.
:param tuning_strategy: 'bayesian', 'basic', 'mse', 'sigopt'. Default: 'bayesian'.
:param accuracy_criterion: Tolerable accuracy drop.
accuracy_criterion = {'relative': 0.1, 'higher_is_better':True}
allows relative accuracy loss: 1%. accuracy_criterion = {
'absolute': 0.99, 'higher_is_better':False} means accuracy
< 0.99 must be satisfied.
:param timeout: Tuning timeout (seconds). Default: 0, which means early stop.
combine with max_trials field to decide when to exit.
:param max_trials: Max tune times. Default: 1.
Combine with timeout field to decide when to exit.
:param inputs: For tensorflow to specify names of inputs. e.g. inputs=['img',]
:param outputs: For tensorflow to specify names of outputs. e.g. outputs=['logits',]
"""
qconf = Quantization_Conf(conf)
cfg = qconf.usr_cfg
# Override default config
cfg.model.framework = framework
cfg.quantization.approach = approach
cfg.tuning.strategy.name = tuning_strategy
if accuracy_criterion:
cfg.tuning.accuracy_criterion = accuracy_criterion
cfg.tuning.exit_policy.timeout = timeout
cfg.tuning.exit_policy.max_trials = max_trials
cfg.model.inputs = inputs
cfg.model.outputs = outputs
super().__init__(qconf)
def post_training_quantize(self, model, calib_dataloader=None, val_dataloader=None,
metric=None):
self.check(calib_dataloader, val_dataloader, metric)
self.model = common.Model(model)
def func(data):
# TODO: only x, y are supported here for onnx quantization
import torch
x, y = zip(*data)
if isinstance(x[0], torch.Tensor):
x = torch.stack(x, dim=0).numpy()
if isinstance(y[0], torch.Tensor):
y = torch.stack(y, dim=0).numpy()
return x, y
if calib_dataloader:
if "pytorch" in self.cfg.model.framework or "tensorflow" in self.cfg.model.framework:
self.calib_dataloader = calib_dataloader
if "onnx" in self.cfg.model.framework:
import torch
assert isinstance(calib_dataloader, torch.utils.data.DataLoader), \
"Only torch dataloader is supported for onnx quantization."
# add a collate_fn to transform torch dataloader to a numpy dataloader
calib_dataloader.collate_fn = func
self.calib_dataloader = calib_dataloader
if val_dataloader:
if "pytorch" in self.cfg.model.framework or "tensorflow" in self.cfg.model.framework:
self.eval_dataloader = val_dataloader
if "onnx" in self.cfg.model.framework:
import torch
assert isinstance(val_dataloader, torch.utils.data.DataLoader), \
"Only torch dataloader is supported for onnx quantization."
# add a collate_fn to transform torch dataloader to a numpy dataloader
val_dataloader.collate_fn = func
self.eval_dataloader = val_dataloader
if metric:
framework = self.cfg.model.framework
if 'pytorch' in framework:
framework_metric = METRICS['pytorch']
elif 'onnx' in framework:
framework_metric = METRICS['onnx']
else:
framework_metric = METRICS[framework]
class MyMetric(framework_metric):
def __init__(self):
"""
This local class is to resolve dumping issue in tensorflow.
In tensorflow, INC will try to dump the metric to yaml which
somehow causes unexpected error. So we moved metric assignment
to the new local class to avoid that.
"""
self.metric = metric
self.metric = Metric(
MyMetric,
name=f"{framework}_{type(metric).__name__}_"
f"{framework_metric.get_next_metric_id()}"
)
quantized = self()
# unset the collate_fn and set back to default_collate
# TODO: use users' original collate function
if "onnx" in self.cfg.model.framework:
from torch.utils.data.dataloader import default_collate
if calib_dataloader:
calib_dataloader.collate_fn = default_collate
if val_dataloader:
val_dataloader.collate_fn = default_collate
if quantized:
return quantized
else:
raise RuntimeError("Found no quantized model satisfying accuracy criterion.")
def check(self, calib_dataloader, val_dataloader, metric):
"""
Call before self.__call__() to check if the object is well-initialized
for quantization.
"""
if self.cfg.quantization.approach == 'post_training_static_quant':
assert calib_dataloader, \
"calib_calib_dataloader must not be None when approach is " \
"post-training static quantization."
if self.cfg.quantization.approach == 'post_training_dynamic_quant':
assert calib_dataloader is None, \
"calib_calib_dataloader must be None when approach is " \
"post-training dynamic quantization."
if metric and not val_dataloader:
raise RuntimeError("val_dataloader must be specified when metric is not None.")
|
intel-analytics/BigDL
|
python/nano/src/bigdl/nano/quantization/neural_compressor/quantization.py
|
Python
|
apache-2.0
| 7,996
|
"""An HTTP proxy that supports IPv6 as well as the HTTP CONNECT method, among
other things."""
# Standard libary imports
import socket
import thread
import select
__version__ = '0.1.0 Draft 1'
BUFFER_LENGTH = 8192
VERSION = 'Python Proxy/{}'.format(__version__)
HTTP_VERSION = 'HTTP/1.1'
class ConnectionHandler(object):
"""Handles connections between the HTTP client and HTTP server."""
def __init__(self, connection, _, timeout):
self.client = connection
self.client_buffer = ''
self.timeout = timeout
self.target = None
method, path, protocol = self.get_base_header()
if method == 'CONNECT':
self.method_connect(path)
else:
self.method_others(method, path, protocol)
def get_base_header(self):
"""Return a tuple of (method, path, protocol) from the recieved
message."""
while 1:
self.client_buffer += self.client.recv(BUFFER_LENGTH)
end = self.client_buffer.find('\n')
if end != -1:
break
print '{}'.format(self.client_buffer[:end])
data = (self.client_buffer[:end+1]).split()
self.client_buffer = self.client_buffer[end+1:]
return data
def method_connect(self, path):
"""Handle HTTP CONNECT messages."""
self._connect_target(path)
self.client.send('{http_version} 200 Connection established\n'
'Proxy-agent: {version}\n\n'.format(
http_version=HTTP_VERSION,
version=VERSION))
self.client_buffer = ''
self._read_write()
def method_others(self, method, path, protocol):
"""Handle all non-HTTP CONNECT messages."""
path = path[7:]
i = path.find('/')
host = path[:i]
path = path[i:]
self._connect_target(host)
self.target.send('{method} {path} {protocol}\n{client_buffer}'.format(
method=method,
path=path,
protocol=protocol,
client_buffer=self.client_buffer))
self.client_buffer = ''
self._read_write()
def _connect_target(self, host):
"""Create a connection to the HTTP server specified by *host*."""
i = host.find(':')
if i != -1:
port = int(host[i+1:])
host = host[:i]
else:
port = 80
(soc_family, _, _, _, address) = socket.getaddrinfo(host, port)[0]
self.target = socket.socket(soc_family)
self.target.connect(address)
def _read_write(self):
"""Read data from client connection and forward to server
connection."""
time_out_max = self.timeout/3
socs = [self.client, self.target]
count = 0
while 1:
count += 1
(recv, _, error) = select.select(socs, [], socs, 3)
if error:
break
if recv:
for in_ in recv:
data = in_.recv(BUFFER_LENGTH)
if in_ is self.client:
out = self.target
else:
out = self.client
if data:
out.send(data)
count = 0
if count == time_out_max:
break
self.client.close()
self.target.close()
def start_server(host='localhost', port=8080, ipv_6=False, timeout=60,
handler=ConnectionHandler):
"""Start the HTTP proxy server."""
if ipv_6:
soc_type = socket.AF_INET6
else:
soc_type = socket.AF_INET
soc = socket.socket(soc_type)
soc.bind((host, port))
print 'Serving on {0}:{1}.'.format(host, port)
soc.listen(0)
while 1:
thread.start_new_thread(handler, soc.accept()+(timeout,))
if __name__ == '__main__':
start_server()
|
jeffknupp/kickstarter_video_two
|
proxy.py
|
Python
|
apache-2.0
| 3,925
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import sys
import os
import multiprocessing as mp
import subprocess as sp
import tempfile
import shlex
import shutil
"""
Compress all netcdf files under a directories.
compress_nc.py ~/exps/access/cm_1440x1080-1/archive/ ./'
This will recursively search under that directory and compress every single netcdf file.
"""
def compress_netcdf_file(filename, compression_level=7):
"""
Use nccopy to compress a netcdf file.
"""
_, tmp = tempfile.mkstemp()
cmd = 'nccopy -d {} {} {}'.format(compression_level, filename, tmp)
print(cmd)
ret = sp.call(shlex.split(cmd))
assert(ret == 0)
# Put a file lock on 'filename'?
shutil.move(tmp, filename)
def find_netcdf_files(path):
"""
Return full path of all netcdf files under 'path'
"""
netcdf_files = []
for root, dirs, files in os.walk(path):
for file in files:
if file.endswith(".nc"):
full_path = os.path.join(os.path.abspath(root), file)
if not os.path.islink(full_path):
netcdf_files.append(full_path)
return netcdf_files
def main():
parser = argparse.ArgumentParser()
parser.add_argument("dirs", nargs='+',
help="A list of directories to search for netcdf files.")
args = parser.parse_args()
all_netcdf_files = []
for d in args.dirs:
all_netcdf_files.extend(find_netcdf_files(d))
# Make sure there are no duplicates.
all_netcdf_files = list(set(all_netcdf_files))
pool = mp.Pool()
results = pool.map(compress_netcdf_file, all_netcdf_files)
pool.close()
pool.join()
if __name__ == "__main__":
sys.exit(main())
|
CWSL/access-cm-tools
|
operations/compress_nc.py
|
Python
|
apache-2.0
| 1,765
|
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from posixpath import join as pjoin
import pytest
from pandas.util.testing import assert_frame_equal
import pandas as pd
from ibis.compat import unittest
from ibis.impala.compat import ImpylaError
from ibis.impala.tests.common import ImpalaE2E
from ibis.tests.util import assert_equal
import ibis
import ibis.util as util
def _tmp_name():
return 'tmp_partition_{0}'.format(util.guid())
class TestPartitioning(ImpalaE2E, unittest.TestCase):
@classmethod
def setUpClass(cls):
ImpalaE2E.setup_e2e(cls)
df = pd.DataFrame({'year': [2009, 2009, 2009, 2010, 2010, 2010],
'month': [1, 2, 3, 1, 2, 3],
'value': [1, 2, 3, 4, 5, 6]})
df = pd.concat([df] * 10, ignore_index=True)
df['id'] = df.index.values
cls.df = df
cls.db = cls.con.database(cls.tmp_db)
cls.pd_name = _tmp_name()
cls.db.create_table(cls.pd_name, df)
def test_is_partitioned(self):
schema = ibis.schema([('foo', 'string'),
('year', 'int32'),
('month', 'int16')])
name = _tmp_name()
self.db.create_table(name, schema=schema,
partition=['year', 'month'])
assert self.db.table(name).is_partitioned
def test_create_table_with_partition_column(self):
schema = ibis.schema([('year', 'int32'),
('month', 'int8'),
('day', 'int8'),
('value', 'double')])
name = _tmp_name()
self.con.create_table(name, schema=schema,
database=self.tmp_db,
partition=['year', 'month'])
self.temp_tables.append(name)
# the partition column get put at the end of the table
ex_schema = ibis.schema([('day', 'int8'),
('value', 'double'),
('year', 'int32'),
('month', 'int8')])
table_schema = self.con.get_schema(name, database=self.tmp_db)
assert_equal(table_schema, ex_schema)
partition_schema = self.db.table(name).partition_schema()
expected = ibis.schema([('year', 'int32'),
('month', 'int8')])
assert_equal(partition_schema, expected)
def test_create_partitioned_separate_schema(self):
schema = ibis.schema([('day', 'int8'),
('value', 'double')])
part_schema = ibis.schema([('year', 'int32'),
('month', 'int8')])
name = _tmp_name()
self.con.create_table(name, schema=schema, partition=part_schema)
self.temp_tables.append(name)
# the partition column get put at the end of the table
ex_schema = ibis.schema([('day', 'int8'),
('value', 'double'),
('year', 'int32'),
('month', 'int8')])
table_schema = self.con.get_schema(name)
assert_equal(table_schema, ex_schema)
partition_schema = self.con.table(name).partition_schema()
assert_equal(partition_schema, part_schema)
def test_unpartitioned_table_get_schema(self):
tname = 'functional_alltypes'
with self.assertRaises(ImpylaError):
self.con.table(tname).partition_schema()
def test_insert_select_partitioned_table(self):
pytest.skip('IMPALA-2750')
df = self.df
unpart_t = self.db.table(self.pd_name)
part_keys = ['year', 'month']
part_t = self._create_partitioned_table(unpart_t.schema(),
part_keys)
unique_keys = df[part_keys].drop_duplicates()
for i, (year, month) in enumerate(unique_keys.itertuples(index=False)):
select_stmt = unpart_t[(unpart_t.year == year) &
(unpart_t.month == month)]
# test both styles of insert
if i:
part = {'year': year, 'month': month}
else:
part = [year, month]
part_t.insert(select_stmt, partition=part)
self._verify_partitioned_table(part_t, df, unique_keys)
def test_insert_overwrite_partition(self):
pass
def test_dynamic_partitioning(self):
pass
def test_add_drop_partition(self):
pytest.skip('HIVE-12613')
schema = ibis.schema([('foo', 'string'),
('year', 'int32'),
('month', 'int16')])
name = _tmp_name()
self.db.create_table(name, schema=schema,
partition=['year', 'month'])
table = self.db.table(name)
part = {'year': 2007, 'month': 4}
path = '/tmp/tmp-{0}'.format(util.guid())
table.add_partition(part, location=path)
assert len(table.partitions()) == 2
table.drop_partition(part)
assert len(table.partitions()) == 1
def test_set_partition_location(self):
pass
def test_load_data_partition(self):
df = self.df
unpart_t = self.db.table(self.pd_name)
part_keys = ['year', 'month']
part_t = self._create_partitioned_table(unpart_t.schema(),
part_keys)
# trim the runtime of this test
df = df[df.month == 1].reset_index(drop=True)
unique_keys = df[part_keys].drop_duplicates()
hdfs_dir = pjoin(self.tmp_dir, 'load-data-partition')
df2 = df.drop(['year', 'month'], axis='columns')
csv_props = {
'serialization.format': ',',
'field.delim': ','
}
for i, (year, month) in enumerate(unique_keys.itertuples(index=False)):
chunk = df2[(df.year == year) & (df.month == month)]
chunk_path = pjoin(hdfs_dir, '{0}.csv'.format(i))
self.con.write_dataframe(chunk, chunk_path)
# test both styles of insert
if i:
part = {'year': year, 'month': month}
else:
part = [year, month]
part_t.add_partition(part)
part_t.alter_partition(part, format='text',
serde_properties=csv_props)
part_t.load_data(chunk_path, partition=part)
self.hdfs.rmdir(hdfs_dir)
self._verify_partitioned_table(part_t, df, unique_keys)
def _verify_partitioned_table(self, part_t, df, unique_keys):
result = (part_t.execute()
.sort_index(by='id')
.reset_index(drop=True)
[df.columns])
assert_frame_equal(result, df)
parts = part_t.partitions()
# allow for the total line
assert len(parts) == (len(unique_keys) + 1)
def _create_partitioned_table(self, schema, part_keys, location=None):
part_name = _tmp_name()
self.db.create_table(part_name,
schema=schema,
partition=part_keys)
self.temp_tables.append(part_name)
return self.db.table(part_name)
def test_drop_partition(self):
pass
def test_repartition_automated(self):
pass
|
laserson/ibis
|
ibis/impala/tests/test_partition.py
|
Python
|
apache-2.0
| 7,977
|
import sys, csv, string
def generate_R_input(report_path, output_path):
confidence_values = []
report_file = open(report_path, 'r')
first_line = True
for line in report_file:
if first_line:
first_line = False
continue
line = line.strip().split(',')
if line[5] == 'Yes':
confidence_values.append(line[6])
elif line[5] == 'No':
confid = 1 - float(line[6])
confidence_values.append(confid)
output_file = open(output_path, 'w')
output_file.write('Confidence' + '\n')
first_val = True
for confid in confidence_values:
if not first_val:
output_file.write(str(confid) + '\n')
else:
output_file.write(str(confid) + '\n')
first_val = False
output_file.close()
if __name__ =="__main__":
_report_path = sys.argv[1]
_output_path = sys.argv[2]
generate_R_input(_report_path, _output_path)
|
emvecchi/mss
|
src/utils/crowdflower/retrieve_stats.py
|
Python
|
apache-2.0
| 925
|
from kubeflow.kubeflow.crud_backend import api, status
def pvc_status(pvc):
"""
Set the status of the pvc
"""
if pvc.metadata.deletion_timestamp is not None:
return status.create_status(status.STATUS_PHASE.TERMINATING,
"Deleting Volume...")
if pvc.status.phase == "Bound":
return status.create_status(status.STATUS_PHASE.READY, "Bound")
# The PVC is in Pending state, we check the Events to find out why
evs = api.v1_core.list_namespaced_event(
namespace=pvc.metadata.namespace,
field_selector=api.events_field_selector(
"PersistentVolumeClaim", pvc.metadata.name
),
).items
# If there are no events, then the PVC was just created
if len(evs) == 0:
return status.create_status(status.STATUS_PHASE.WAITING,
"Provisioning Volume...")
msg = f"Pending: {evs[0].message}"
state = evs[0].reason
if evs[0].reason == "WaitForFirstConsumer":
phase = status.STATUS_PHASE.UNAVAILABLE
msg = (
"Pending: This volume will be bound when its first consumer"
" is created. E.g., when you first browse its contents, or"
" attach it to a notebook server"
)
elif evs[0].reason == "Provisioning":
phase = status.STATUS_PHASE.WAITING
elif evs[0].reason == "FailedBinding":
phase = status.STATUS_PHASE.WARNING
elif evs[0].type == "Warning":
phase = status.STATUS_PHASE.WARNING
elif evs[0].type == "Normal":
phase = status.STATUS_PHASE.READY
return status.create_status(phase, msg, state)
def viewer_status(viewer):
"""
Return a string representing the status of that viewer. If a deletion
timestamp is set we want to return a `Terminating` state.
"""
try:
ready = viewer["status"]["ready"]
except KeyError:
return status.STATUS_PHASE.UNINITIALIZED
if "deletionTimestamp" in viewer["metadata"]:
return status.STATUS_PHASE.TERMINATING
if not ready:
return status.STATUS_PHASE.WAITING
return status.STATUS_PHASE.READY
|
kubeflow/kubeflow
|
components/crud-web-apps/volumes/backend/apps/common/status.py
|
Python
|
apache-2.0
| 2,168
|
# Copyright 2014 Novo Nordisk Foundation Center for Biosustainability, DTU.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
import os
import pickle
import time
from collections import namedtuple
from math import sqrt
from tempfile import mkstemp
import inspyred
import numpy
import pytest
from inspyred.ec import Bounder
from inspyred.ec.emo import Pareto
from ordered_set import OrderedSet
from cameo import config, fba
from cameo.core.manipulation import swap_cofactors
from cameo.parallel import SequentialView
from cameo.strain_design import OptGene
from cameo.strain_design.heuristic.evolutionary.archives import (BestSolutionArchive,
Individual)
from cameo.strain_design.heuristic.evolutionary.decoders import (GeneSetDecoder,
ReactionSetDecoder,
SetDecoder)
from cameo.strain_design.heuristic.evolutionary.evaluators import KnockoutEvaluator
from cameo.strain_design.heuristic.evolutionary.generators import (linear_set_generator,
multiple_chromosome_set_generator,
set_generator)
from cameo.strain_design.heuristic.evolutionary.genomes import MultipleChromosomeGenome
from cameo.strain_design.heuristic.evolutionary.metrics import (euclidean_distance,
manhattan_distance)
from cameo.strain_design.heuristic.evolutionary.multiprocess.migrators import MultiprocessingMigrator
from cameo.strain_design.heuristic.evolutionary.objective_functions import (MultiObjectiveFunction,
YieldFunction,
biomass_product_coupled_min_yield,
biomass_product_coupled_yield,
number_of_knockouts,
product_yield)
from cameo.strain_design.heuristic.evolutionary.optimization import (NADH_NADPH,
CofactorSwapOptimization,
EvaluatorWrapper,
HeuristicOptimization,
ReactionKnockoutOptimization,
SolutionSimplification,
TargetOptimizationResult,
set_distance_function,
GeneKnockoutOptimization)
from cameo.strain_design.heuristic.evolutionary.variators import (_do_set_n_point_crossover,
multiple_chromosome_set_indel,
multiple_chromosome_set_mutation,
set_indel,
set_mutation,
set_n_point_crossover)
from cobra.flux_analysis import find_essential_genes, find_essential_reactions
from cameo.util import RandomGenerator as Random
try:
from cameo.parallel import RedisQueue
except ImportError:
RedisQueue = None
CI = bool(os.getenv('CI', False))
if os.getenv('REDIS_PORT_6379_TCP_ADDR'):
REDIS_HOST = os.getenv('REDIS_PORT_6379_TCP_ADDR') # wercker
else:
REDIS_HOST = 'localhost'
SEED = 1234
CURRENT_PATH = os.path.dirname(__file__)
SOLUTIONS = [
[[1, 2, 3], 0.1],
[[1, 3, 2, 4], 0.1],
[[2, 3, 4], 0.45],
[[62, 51, 4], 0.2],
[[5, 3, 4, 51], 0.9],
[[5, 23, 41, 51], 0.9],
[[5, 3, 4, 51, 31], 0.9],
[[5, 3, 4, 51], 0.9],
[[44, 12, 42, 51], 0.0],
[[52, 22, 4, 11], 0.0]
]
@pytest.fixture(scope='function')
def generators(model):
mockup_evolutionary_algorithm = namedtuple("EA", ["bounder"])
args = {}
args.setdefault('representation', [r.id for r in model.reactions])
random = Random()
return args, random, mockup_evolutionary_algorithm
@pytest.fixture(scope="module")
def objectives():
single_objective_function = product_yield('product', 'substrate')
multi_objective_function = MultiObjectiveFunction([
product_yield('product', 'substrate'),
number_of_knockouts()
])
return single_objective_function, multi_objective_function
class TestMetrics:
def test_euclidean_distance(self):
distance = euclidean_distance({'a': 9}, {'a': 3})
assert distance == sqrt((9 - 3) ** 2)
def test_manhattan_distance(self):
distance = manhattan_distance({'a': 9}, {'a': 3})
assert distance == abs(9 - 3)
class TestBestSolutionArchive:
def test_solution_string(self):
sol1 = Individual(SOLUTIONS[0][0], SOLUTIONS[0][1])
sol2 = Individual(SOLUTIONS[1][0], SOLUTIONS[1][1])
sol3 = Individual(SOLUTIONS[2][0], SOLUTIONS[2][1])
assert sol1.__str__() == "[1, 2, 3] - 0.1 sense: max"
assert sol2.__str__() == "[1, 2, 3, 4] - 0.1 sense: max"
assert sol3.__str__() == "[2, 3, 4] - 0.45 sense: max"
def test_solution_comparison_maximization(self):
sol1 = Individual(SOLUTIONS[0][0], SOLUTIONS[0][1])
sol2 = Individual(SOLUTIONS[1][0], SOLUTIONS[1][1])
sol3 = Individual(SOLUTIONS[2][0], SOLUTIONS[2][1])
# test ordering
assert sol1.__cmp__(sol2) == -1
assert sol1.__cmp__(sol1) == 0
assert sol1.__cmp__(sol3) == 1
assert sol1 < sol2
assert sol1 == sol1
assert sol1 > sol3
# test gt and lt
assert sol1.__lt__(sol2)
assert sol1.__gt__(sol3)
assert not sol1.__lt__(sol1)
assert not sol1.__gt__(sol1)
assert not sol2.__lt__(sol1)
assert not sol3.__gt__(sol1)
# testing issubset
assert sol1.issubset(sol2), "Solution 1 is subset of Solution 2"
assert not sol2.issubset(sol1), "Solution 2 is not subset of Solution 1"
assert sol3.issubset(sol2), "Solution 3 is subset of Solution 2"
assert not sol2.issubset(sol3), "Solution 2 is not subset of Solution 3"
assert not sol1.issubset(sol3), "Solution 1 is subset of Solution 3"
assert not sol2.issubset(sol3), "Solution 3 is not subset of Solution 1"
# test difference
l = len(sol2.symmetric_difference(sol1))
assert l == 1, "Difference between Solution 2 and 1 is (%s)" % sol2.symmetric_difference(sol1)
l = len(sol3.symmetric_difference(sol2))
assert l == 1, "Difference between Solution 3 and 1 is (%s)" % sol3.symmetric_difference(sol2)
l = len(sol3.symmetric_difference(sol1))
assert l == 2, "Difference between Solution 1 and 3 is (%s)" % sol3.symmetric_difference(sol1)
assert sol1.improves(sol2), "Solution 1 is better than Solution 2"
assert sol3.improves(sol2), "Solution 3 is better than Solution 2"
assert not sol3.improves(sol1), "Solution 3 does not improve Solution 1"
assert not sol2.improves(sol1), "Solution 2 does not improve Solution 1"
assert not sol2.improves(sol3), "Solution 2 does not improve Solution 3"
def test_solution_comparison_minimization(self):
sol1 = Individual(SOLUTIONS[0][0], SOLUTIONS[0][1], maximize=False)
sol2 = Individual(SOLUTIONS[1][0], SOLUTIONS[1][1], maximize=False)
sol3 = Individual(SOLUTIONS[2][0], SOLUTIONS[2][1], maximize=False)
# test ordering
assert sol1.__cmp__(sol2) == -1
assert sol1.__cmp__(sol1) == 0
assert sol1.__cmp__(sol3) == -1
assert sol3.__cmp__(sol1) == 1
assert sol1 < sol2
assert sol1 == sol1
assert sol1 < sol3
# test gt and lt
assert sol1.__lt__(sol2)
assert sol1.__lt__(sol3)
assert not sol1.__gt__(sol1)
assert not sol1.__lt__(sol1)
assert sol2.__gt__(sol1)
assert not sol3.__lt__(sol1)
# testing issubset
assert sol1.issubset(sol2), "Solution 1 is subset of Solution 2"
assert not sol2.issubset(sol1), "Solution 2 is not subset of Solution 1"
assert sol3.issubset(sol2), "Solution 3 is subset of Solution 2"
assert not sol2.issubset(sol3), "Solution 2 is not subset of Solution 3"
assert not sol1.issubset(sol3), "Solution 1 is subset of Solution 3"
assert not sol2.issubset(sol3), "Solution 3 is not subset of Solution 1"
# test difference
l = len(sol2.symmetric_difference(sol1))
assert l == 1, "Difference between Solution 2 and 1 is (%s)" % sol2.symmetric_difference(sol1)
l = len(sol3.symmetric_difference(sol2))
assert l == 1, "Difference between Solution 3 and 1 is (%s)" % sol3.symmetric_difference(sol2)
l = len(sol3.symmetric_difference(sol1))
assert l == 2, "Difference between Solution 1 and 3 is (%s)" % sol3.symmetric_difference(sol1)
assert sol1.improves(sol2), "Solution 1 is better than Solution 2"
assert not sol3.improves(sol2), "Solution 3 is not better than Solution 2"
assert not sol3.improves(sol1), "Solution 3 does not improve Solution 1"
assert not sol2.improves(sol1), "Solution 2 does not improve Solution 1"
assert not sol2.improves(sol3), "Solution 2 does not improve Solution 3"
def test_add_greater_solution_with_same_fitness(self):
size = 1
pool = BestSolutionArchive()
pool.add(SOLUTIONS[0][0], SOLUTIONS[0][1], None, True, size)
pool.add(SOLUTIONS[1][0], SOLUTIONS[1][1], None, True, size)
assert pool.length() == 1, "Pool must keep one solution (length=%s)" % pool.length()
best_solution = set(SOLUTIONS[0][0])
best_fitness = SOLUTIONS[0][1]
sol = pool.get(0)
assert sol.candidate == best_solution, "Best solution set must be the first"
assert sol.fitness == best_fitness, "Best solution fitness must be the first"
def test_add_smaller_solution_with_same_fitness(self):
size = 1
pool = BestSolutionArchive()
pool.add(SOLUTIONS[1][0], SOLUTIONS[1][1], None, True, size)
pool.add(SOLUTIONS[0][0], SOLUTIONS[0][1], None, True, size)
assert pool.length() == 1, "Pool must keep one solution (length=%s)" % pool.length()
solution = set(SOLUTIONS[0][0])
fitness = SOLUTIONS[0][1]
sol = pool.get(0)
assert sol.candidate == solution, "Best solution must be the first (%s)" % sol.candidate
assert sol.fitness == fitness, "Best fitness must be the first (%s)" % sol.fitness
def test_uniqueness_of_solutions(self):
size = 2
pool = BestSolutionArchive()
pool.add(SOLUTIONS[1][0], SOLUTIONS[1][1], None, True, size)
pool.add(SOLUTIONS[1][0], SOLUTIONS[1][1], None, True, size)
assert pool.length() == 1, "Added repeated solution"
def test_pool_size_limit(self):
size = 1
pool = BestSolutionArchive()
pool.add(SOLUTIONS[0][0], SOLUTIONS[0][1], None, True, size)
pool.add(SOLUTIONS[1][0], SOLUTIONS[1][1], None, True, size)
pool.add(SOLUTIONS[2][0], SOLUTIONS[2][1], None, True, size)
pool.add(SOLUTIONS[3][0], SOLUTIONS[3][1], None, True, size)
pool.add(SOLUTIONS[4][0], SOLUTIONS[4][1], None, True, size)
pool.add(SOLUTIONS[5][0], SOLUTIONS[5][1], None, True, size)
pool.add(SOLUTIONS[6][0], SOLUTIONS[6][1], None, True, size)
pool.add(SOLUTIONS[7][0], SOLUTIONS[7][1], None, True, size)
pool.add(SOLUTIONS[8][0], SOLUTIONS[8][1], None, True, size)
pool.add(SOLUTIONS[9][0], SOLUTIONS[9][1], None, True, size)
assert pool.length() <= 1, "Pool must keep one solution (length=%s)" % pool.length()
size = 2
pool = BestSolutionArchive()
pool.add(SOLUTIONS[0][0], SOLUTIONS[0][1], None, True, size)
pool.add(SOLUTIONS[1][0], SOLUTIONS[1][1], None, True, size)
pool.add(SOLUTIONS[2][0], SOLUTIONS[2][1], None, True, size)
pool.add(SOLUTIONS[3][0], SOLUTIONS[3][1], None, True, size)
pool.add(SOLUTIONS[4][0], SOLUTIONS[4][1], None, True, size)
pool.add(SOLUTIONS[5][0], SOLUTIONS[5][1], None, True, size)
pool.add(SOLUTIONS[6][0], SOLUTIONS[6][1], None, True, size)
pool.add(SOLUTIONS[7][0], SOLUTIONS[7][1], None, True, size)
pool.add(SOLUTIONS[8][0], SOLUTIONS[8][1], None, True, size)
pool.add(SOLUTIONS[9][0], SOLUTIONS[9][1], None, True, size)
assert pool.length() <= 2, "Pool must keep one solution (length=%s)" % pool.length()
size = 3
pool = BestSolutionArchive()
pool.add(SOLUTIONS[0][0], SOLUTIONS[0][1], None, True, size)
pool.add(SOLUTIONS[1][0], SOLUTIONS[1][1], None, True, size)
pool.add(SOLUTIONS[2][0], SOLUTIONS[2][1], None, True, size)
pool.add(SOLUTIONS[3][0], SOLUTIONS[3][1], None, True, size)
pool.add(SOLUTIONS[4][0], SOLUTIONS[4][1], None, True, size)
pool.add(SOLUTIONS[5][0], SOLUTIONS[5][1], None, True, size)
pool.add(SOLUTIONS[6][0], SOLUTIONS[6][1], None, True, size)
pool.add(SOLUTIONS[7][0], SOLUTIONS[7][1], None, True, size)
pool.add(SOLUTIONS[8][0], SOLUTIONS[8][1], None, True, size)
pool.add(SOLUTIONS[9][0], SOLUTIONS[9][1], None, True, size)
assert pool.length() <= 3, "Pool must keep one solution (length=%s)" % pool.length()
size = 4
pool = BestSolutionArchive()
pool.add(SOLUTIONS[0][0], SOLUTIONS[0][1], None, True, size)
pool.add(SOLUTIONS[1][0], SOLUTIONS[1][1], None, True, size)
pool.add(SOLUTIONS[2][0], SOLUTIONS[2][1], None, True, size)
pool.add(SOLUTIONS[3][0], SOLUTIONS[3][1], None, True, size)
pool.add(SOLUTIONS[4][0], SOLUTIONS[4][1], None, True, size)
pool.add(SOLUTIONS[5][0], SOLUTIONS[5][1], None, True, size)
pool.add(SOLUTIONS[6][0], SOLUTIONS[6][1], None, True, size)
pool.add(SOLUTIONS[7][0], SOLUTIONS[7][1], None, True, size)
pool.add(SOLUTIONS[8][0], SOLUTIONS[8][1], None, True, size)
pool.add(SOLUTIONS[9][0], SOLUTIONS[9][1], None, True, size)
assert pool.length() <= 4, "Pool must keep one solution (length=%s)" % pool.length()
def test_callable_pool(self):
pool = BestSolutionArchive()
size = 3
args = {'max_archive_size': 3}
population = [Individual(SOLUTIONS[0][0], SOLUTIONS[0][1]),
Individual(SOLUTIONS[1][0], SOLUTIONS[1][1]),
Individual(SOLUTIONS[2][0], SOLUTIONS[2][1]),
Individual(SOLUTIONS[3][0], SOLUTIONS[3][1]),
Individual(SOLUTIONS[4][0], SOLUTIONS[4][1]),
Individual(SOLUTIONS[5][0], SOLUTIONS[5][1]),
Individual(SOLUTIONS[6][0], SOLUTIONS[6][1])]
archive = pool(None, population, [], args)
assert pool.length() == size
for sol in pool:
assert sol in archive
class TestObjectiveFunctions:
class _MockupSolution:
def __init__(self):
self._primal = {}
def set_primal(self, k, v):
self._primal[k] = v
def get_primal_by_id(self, k):
return self._primal[k]
@property
def fluxes(self):
return self._primal
def _assert_is_pickable(self, of):
assert isinstance(pickle.dumps(of), bytes)
def test_base_yield_function(self, model):
solution = self._MockupSolution()
solution.set_primal('EX_ac_lp_e_rp_', 2)
solution.set_primal('EX_glc_lp_e_rp_', -10)
of = YieldFunction(model.reactions.EX_ac_lp_e_rp_, model.reactions.EX_glc_lp_e_rp_)
self._assert_is_pickable(of)
with pytest.raises(ValueError):
YieldFunction({}, model.reactions.EX_glc_lp_e_rp_)
with pytest.raises(ValueError):
YieldFunction(None, model.reactions.EX_glc_lp_e_rp_)
with pytest.raises(ValueError):
YieldFunction([], model.reactions.EX_glc_lp_e_rp_)
with pytest.raises(ValueError):
YieldFunction(1, model.reactions.EX_glc_lp_e_rp_)
with pytest.raises(ValueError):
YieldFunction(model.reactions.EX_ac_lp_e_rp_, [])
with pytest.raises(ValueError):
YieldFunction(model.reactions.EX_ac_lp_e_rp_, 1)
with pytest.raises(ValueError):
YieldFunction(model.reactions.EX_ac_lp_e_rp_, {})
with pytest.raises(ValueError):
YieldFunction(model.reactions.EX_ac_lp_e_rp_, [])
def test_biomass_product_coupled_yield(self):
solution = self._MockupSolution()
solution.set_primal('biomass', 0.6)
solution.set_primal('product', 2)
solution.set_primal('substrate', -10)
of = biomass_product_coupled_yield("biomass", "product", "substrate")
assert of.name == "bpcy = (biomass * product) / substrate"
self._assert_is_pickable(of)
fitness = of(None, solution, None)
assert round(abs((0.6 * 2) / 10 - fitness), 7) == 0
solution.set_primal('substrate', 0)
fitness = of(None, solution, None)
assert 0 == fitness
solution.set_primal('substrate2', -5)
solution.set_primal('substrate', -5)
of2 = biomass_product_coupled_yield("biomass", "product", ["substrate", "substrate2"])
assert of2.name == "bpcy = (biomass * product) / (substrate + substrate2)"
self._assert_is_pickable(of2)
fitness = of2(None, solution, None)
assert round(abs((0.6 * 2) / 10 - fitness), 7) == 0
def test_biomass_product_coupled_min_yield(self, model):
biomass = "Biomass_Ecoli_core_N_lp_w_fsh_GAM_rp__Nmet2"
product = "EX_ac_lp_e_rp_"
substrate = "EX_glc_lp_e_rp_"
solution = self._MockupSolution()
solution.set_primal(biomass, 0.263136)
solution.set_primal(product, 16.000731)
solution.set_primal(substrate, -10)
of = biomass_product_coupled_min_yield(biomass, product, substrate)
self._assert_is_pickable(of)
assert of.name == "bpcy = (%s * min(%s)) / %s" % (biomass, product, substrate)
reactions = [model.reactions.get_by_id(r) for r in ['ATPS4r', 'CO2t', 'GLUDy', 'PPS', 'PYK']]
with model:
for r in reactions:
r.knock_out()
fitness = of(model, solution, reactions)
assert round(abs(0.414851 - fitness), 5) == 0
def test_product_yield(self):
solution = self._MockupSolution()
solution.set_primal('biomass', 0.6)
solution.set_primal('product', 2)
solution.set_primal('substrate', -10)
of = product_yield("product", "substrate", carbon_yield=False)
assert of.name == "yield = (product / substrate)"
self._assert_is_pickable(of)
fitness = of(None, solution, None)
assert round(abs(2.0 / 10.0 - fitness), 7) == 0
solution.set_primal('substrate', 0)
fitness = of(None, solution, None)
assert 0 == fitness
solution.set_primal('substrate', -5)
solution.set_primal('substrate2', -5)
of2 = product_yield('product', ['substrate', 'substrate2'], carbon_yield=False)
assert of2.name == "yield = (product / (substrate + substrate2))"
self._assert_is_pickable(of2)
fitness = of2(None, solution, None)
assert round(abs(2.0 / 10.0 - fitness), 7) == 0
def test_number_of_knockouts(self):
of_max = number_of_knockouts(sense='max')
assert of_max.name == "max knockouts"
of_min = number_of_knockouts(sense='min')
assert of_min.name == "min knockouts"
f1 = of_max(None, None, ['a', 'b'])
f2 = of_max(None, None, ['a', 'b', 'c'])
assert f2 > f1
f1 = of_min(None, None, ['a', 'b'])
f2 = of_min(None, None, ['a', 'b', 'c'])
assert f1 > f2
class TestKnockoutEvaluator:
def test_initializer(self, model):
objective1 = biomass_product_coupled_yield(
"Biomass_Ecoli_core_N_lp_w_fsh_GAM_rp__Nmet2",
"EX_ac_lp_e_rp_",
"EX_glc_lp_e_rp_")
decoder = ReactionSetDecoder(["PGI", "PDH", "FUM", "FBA", "G6PDH2r", "FRD7", "PGL", "PPC"], model)
evaluator = KnockoutEvaluator(model, decoder, objective1, fba, {})
assert evaluator.decoder == decoder
assert evaluator.objective_function == objective1
assert hasattr(evaluator, "__call__")
objective2 = product_yield("EX_ac_lp_e_rp_", "EX_glc_lp_e_rp_")
evaluator = KnockoutEvaluator(model, decoder, MultiObjectiveFunction([objective1, objective2]), fba, {})
assert evaluator.objective_function.objectives == [objective1, objective2]
def test_invalid_initializers(self, model):
objective1 = biomass_product_coupled_yield(
"Biomass_Ecoli_core_N_lp_w_fsh_GAM_rp__Nmet2",
"EX_ac_lp_e_rp_",
"EX_glc_lp_e_rp_")
decoder = ReactionSetDecoder(["PGI", "PDH", "FUM", "FBA", "G6PDH2r", "FRD7", "PGL", "PPC"], model)
with pytest.raises(ValueError):
KnockoutEvaluator(model, decoder, 1, fba, {})
with pytest.raises(ValueError):
KnockoutEvaluator(model, decoder, None, fba, {})
with pytest.raises(ValueError):
KnockoutEvaluator(model, decoder, [], fba, {})
with pytest.raises(ValueError):
KnockoutEvaluator(model, decoder, [2, 3], fba, {})
with pytest.raises(ValueError):
KnockoutEvaluator(model, decoder, [objective1], fba, {})
with pytest.raises(ValueError):
KnockoutEvaluator(model, None, [], fba, {})
with pytest.raises(ValueError):
KnockoutEvaluator(model, True, [], fba, {})
def test_evaluate_single_objective(self, model):
representation = ["ATPS4r", "PYK", "GLUDy", "PPS", "CO2t", "PDH",
"FUM", "FBA", "G6PDH2r", "FRD7", "PGL", "PPC"]
decoder = ReactionSetDecoder(representation, model)
objective1 = biomass_product_coupled_yield(
"Biomass_Ecoli_core_N_lp_w_fsh_GAM_rp__Nmet2",
"EX_ac_lp_e_rp_",
"EX_glc_lp_e_rp_")
evaluator = KnockoutEvaluator(model, decoder, objective1, fba, {})
fitness = evaluator([[0, 1, 2, 3, 4]])[0]
assert abs(fitness - 0.41) < 0.02
def test_ko_evaluate_single_objective_benchmark(self, benchmark, model):
benchmark(self.test_evaluate_single_objective, model)
def test_evaluate_multi_objective(self, model):
representation = ["ATPS4r", "PYK", "GLUDy", "PPS", "CO2t", "PDH",
"FUM", "FBA", "G6PDH2r", "FRD7", "PGL", "PPC"]
decoder = ReactionSetDecoder(representation, model)
objective1 = biomass_product_coupled_yield(
"Biomass_Ecoli_core_N_lp_w_fsh_GAM_rp__Nmet2",
"EX_ac_lp_e_rp_",
"EX_glc_lp_e_rp_")
objective2 = product_yield("EX_ac_lp_e_rp_", "EX_glc_lp_e_rp_", carbon_yield=False)
objective = MultiObjectiveFunction([objective1, objective2])
evaluator = KnockoutEvaluator(model, decoder, objective, fba, {})
fitness = evaluator([[0, 1, 2, 3, 4]])[0]
assert isinstance(fitness, Pareto)
assert abs(fitness[0] - 0.41) < 0.02
assert abs(fitness[1] - 1.57) < 0.035
def test_ko_evaluate_multi_objective_benchmark(self, benchmark, model):
benchmark(self.test_evaluate_multi_objective, model)
def test_evaluate_infeasible_solution(self, model):
representation = ["ENO", "ATPS4r", "PYK", "GLUDy", "PPS", "CO2t", "PDH",
"FUM", "FBA", "G6PDH2r", "FRD7", "PGL", "PPC"]
decoder = ReactionSetDecoder(representation, model)
objective1 = biomass_product_coupled_yield(
"Biomass_Ecoli_core_N_lp_w_fsh_GAM_rp__Nmet2",
"EX_ac_lp_e_rp_",
"EX_glc_lp_e_rp_")
evaluator = KnockoutEvaluator(model, decoder, objective1, fba, {})
fitness = evaluator([[0]])[0]
assert fitness == 0
class TestWrappedEvaluator:
def test_initializer(self):
def evaluation_function(x):
return 1
evaluator = EvaluatorWrapper(config.default_view, evaluation_function)
assert hasattr(evaluator, '__call__')
assert hasattr(evaluator, 'view')
assert hasattr(evaluator, 'evaluator')
assert evaluator.view == config.default_view
assert evaluator.evaluator == evaluation_function
def test_invalid_initializer(self):
with pytest.raises(ValueError):
EvaluatorWrapper(config.default_view, None)
with pytest.raises(ValueError):
EvaluatorWrapper(config.default_view, 1)
with pytest.raises(ValueError):
EvaluatorWrapper(config.default_view, [1, 2, 3])
with pytest.raises(ValueError):
EvaluatorWrapper(lambda x: 1, config.default_view)
with pytest.raises(ValueError):
EvaluatorWrapper(None, lambda x: 1)
with pytest.raises(ValueError):
EvaluatorWrapper(123, lambda x: 1)
class TestSwapOptimization:
def test_swap_reaction_identification(self, model):
expected_reactions = ['ACALD', 'AKGDH', 'ALCD2x', 'G6PDH2r', 'GAPD', 'GLUDy', 'GLUSy', 'GND', 'ICDHyr',
'LDH_D', 'MDH', 'ME1', 'ME2', 'NADH16', 'PDH']
swap_pairs = ([model.metabolites.get_by_id(m) for m in NADH_NADPH[0]],
[model.metabolites.get_by_id(m) for m in NADH_NADPH[1]])
representation = CofactorSwapOptimization.find_swappable_reactions(model, swap_pairs)
assert expected_reactions == representation
assert 'PGI' not in representation
def test_evaluate_swap(self, model):
cofactors = ((model.metabolites.nad_c, model.metabolites.nadh_c),
(model.metabolites.nadp_c, model.metabolites.nadph_c))
with model:
swap_cofactors(model.reactions.ALCD2x, model, cofactors, inplace=True)
assert model.metabolites.nadp_c in model.reactions.ALCD2x.metabolites
assert model.metabolites.nad_c in model.reactions.ALCD2x.metabolites
with model:
model.reactions.Biomass_Ecoli_core_N_lp_w_fsh_GAM_rp__Nmet2.lower_bound = 0.5
py = product_yield(model.reactions.EX_etoh_lp_e_rp_, model.reactions.EX_glc_lp_e_rp_)
model.objective = model.reactions.EX_etoh_lp_e_rp_
swap_cofactors(model.reactions.ALCD2x, model, cofactors, inplace=True)
reactions = ['GAPD', 'AKGDH', 'PDH', 'GLUDy', 'MDH']
optimization = CofactorSwapOptimization(model=model, objective_function=py,
candidate_reactions=reactions)
optimization_result = optimization.run(max_evaluations=10000, max_size=1, pop_size=100,
variable_size=False,
mutation_rate=0.5, seed=1485441961)
# above should not have added anything to the history
fitness = optimization_result.data_frame.fitness.max()
assert round(abs(fitness - 0.322085), 3) == 0
class TestDecoders:
def test_set_decoder(self, model):
representation = [1, 2, 'a', 'b', None, '0']
decoder = SetDecoder(representation, model)
assert decoder([])[0] == []
for i in range(len(representation)):
assert decoder([i])[0] == [representation[i]]
def test_reaction_set_decoder(self, model):
decoder = ReactionSetDecoder([r.id for r in model.reactions], model)
reactions = decoder([1, 2, 3, 4])[0]
for i in range(1, 5):
assert model.reactions[i] == reactions[i - 1]
def test_reaction_set_decoder_with_groups(self, model):
groups = [{model.reactions[1]: 1, model.reactions[11]: 1, model.reactions[12]: 5},
{model.reactions[2]: 1, model.reactions[13]: 1, model.reactions[14]: 5}]
decoder = ReactionSetDecoder([r.id for r in model.reactions[0:10]], model, groups=groups)
combinations = decoder([1, 2, 3, 4])
for reactions in combinations:
for i in range(1, 5):
reaction = reactions[i - 1]
group = next((g for g in groups if reaction in g), {reaction: 1})
assert model.reactions[i] in group
def test_gene_set_decoder(self, model):
decoder = GeneSetDecoder([g.id for g in model.genes], model)
genes = decoder([1, 2, 3, 4])[0]
for i in range(1, 5):
assert model.genes[i] == genes[i - 1]
class TestGenerators:
def test_set_generator(self):
random = Random(SEED)
representation = ["a", "b", "c", "d", "e", "f"]
max_size = 5
variable_size = False
expected = [[0, 1, 2, 4, 5],
[0, 2, 3, 4, 5],
[0, 1, 2, 3, 5],
[1, 2, 3, 4, 5],
[0, 2, 3, 4, 5]]
for i in range(len(expected)):
candidate = set_generator(random, dict(representation=representation,
max_size=max_size,
variable_size=variable_size))
assert candidate == expected[i]
def test_multiple_chromosome_set_generator(self):
random = Random(SEED)
args = dict(keys=["test_key_1", "test_key_2"],
test_key_1_representation=["a1", "a2", "a3", "a4", "a5"],
test_key_2_representation=["b1", "b2", "b3", "b4", "b5", "b6", "b7"],
test_key_1_max_size=3,
test_key_2_max_size=5,
variable_size=False)
candidate = multiple_chromosome_set_generator(random, args)
assert len(candidate['test_key_1']) == 3
assert len(candidate['test_key_2']) == 5
def test_fixed_size_set_generator(self, generators):
args, random, _ = generators
candidates_file = os.path.join(CURRENT_PATH, "data", "fix_size_candidates.pkl")
random.seed(SEED)
args.setdefault('variable_size', False)
candidates = []
args['max_size'] = 10
for _ in range(1000):
candidate = set_generator(random, args)
assert len(candidate) == 10
candidates.append(candidate)
# with open(candidates_file, 'wb') as out_file:
# pickle.dump(candidates, out_file, protocol=2)
with open(candidates_file, 'rb') as in_file:
expected_candidates = pickle.load(in_file)
assert candidates == expected_candidates
args['max_size'] = 20
for _ in range(1000):
candidate = set_generator(random, args)
assert len(candidate) == 20
def test_variable_size_set_generator(self, generators):
args, random, _ = generators
candidates_file = os.path.join(CURRENT_PATH, "data", "variable_size_candidates.pkl")
args.setdefault('variable_size', True)
random.seed(SEED)
candidates = []
args['max_size'] = 10
for _ in range(1000):
candidate = set_generator(random, args)
assert len(candidate) <= 10
candidates.append(candidate)
with open(candidates_file, 'rb') as in_file:
expected_candidates = pickle.load(in_file)
assert candidates == expected_candidates
args['max_size'] = 20
for _ in range(1000):
candidate = set_generator(random, args)
assert len(candidate) <= 20
def test_fixed_size_linear_set_generator(self, generators):
args, random, mockup_evolutionary_algorithm = generators
ec = mockup_evolutionary_algorithm(Bounder(-10, 10))
args.setdefault('variable_size', False)
args['max_size'] = 10
args['_ec'] = ec
for _ in range(1000):
candidate = linear_set_generator(random, args)
for i, v in candidate.items():
assert isinstance(i, (int, numpy.int64, numpy.int32))
assert isinstance(v, float)
assert len(candidate) <= 10
class TestHeuristicOptimization:
def test_default_initializer(self, model, objectives):
single_objective_function, multi_objective_function = objectives
heuristic_optimization = HeuristicOptimization(
model=model,
objective_function=single_objective_function
)
assert heuristic_optimization.model == model
assert heuristic_optimization.objective_function == single_objective_function
heuristic_optimization = HeuristicOptimization(
model=model,
objective_function=single_objective_function,
)
assert heuristic_optimization.model == model
assert heuristic_optimization.objective_function == single_objective_function
def test_multi_objective_initializer(self, model, objectives):
single_objective_function, multi_objective_function = objectives
heuristic_optimization = HeuristicOptimization(
model=model,
objective_function=multi_objective_function,
heuristic_method=inspyred.ec.emo.NSGA2
)
assert heuristic_optimization.model == model
assert len(heuristic_optimization.objective_function) == 2
heuristic_optimization = HeuristicOptimization(
model=model,
objective_function=multi_objective_function,
heuristic_method=inspyred.ec.emo.NSGA2,
)
assert heuristic_optimization.model == model
assert len(heuristic_optimization.objective_function) == 2
def test_invalid_initializer(self, model, objectives):
single_objective_function, multi_objective_function = objectives
with pytest.raises(TypeError):
HeuristicOptimization(model=model,
objective_function=multi_objective_function,
heuristic_method=inspyred.ec.GA)
def test_single_objective_function_with_multiobjective_initializer(self, model, objectives):
single_objective_function, multi_objective_function = objectives
heuristic_optimization = HeuristicOptimization(
model=model,
objective_function=single_objective_function,
heuristic_method=inspyred.ec.emo.NSGA2
)
assert len(heuristic_optimization.objective_function) == 1
def test_change_objective_function(self, model, objectives):
single_objective_function, multi_objective_function = objectives
single_objective_heuristic = HeuristicOptimization(
model=model,
objective_function=single_objective_function,
)
nok = number_of_knockouts()
single_objective_heuristic.objective_function = nok
assert nok == single_objective_heuristic.objective_function
with pytest.raises(TypeError):
single_objective_heuristic.objective_function(multi_objective_function)
with pytest.raises(TypeError):
single_objective_heuristic.objective_function(multi_objective_function)
multiobjective_heuristic = HeuristicOptimization(
model=model,
objective_function=multi_objective_function,
heuristic_method=inspyred.ec.emo.NSGA2
)
multiobjective_heuristic.objective_function = nok
assert len(multiobjective_heuristic.objective_function) == 1
assert multiobjective_heuristic.objective_function == nok
def test_change_heuristic_method(self, model, objectives):
single_objective_function, multi_objective_function = objectives
single_objective_heuristic = HeuristicOptimization(
model=model,
objective_function=single_objective_function,
)
single_objective_heuristic.heuristic_method = inspyred.ec.emo.NSGA2
assert len(single_objective_heuristic.objective_function) == 1
multiobjective_heuristic = HeuristicOptimization(
model=model,
objective_function=multi_objective_function,
heuristic_method=inspyred.ec.emo.NSGA2
)
with pytest.raises(TypeError):
multiobjective_heuristic.heuristic_method(inspyred.ec.GA)
multiobjective_heuristic.objective_function = single_objective_function
multiobjective_heuristic.heuristic_method = inspyred.ec.GA
def test_set_distance_function(self):
s1 = {1, 2, 3}
s2 = {1, 2, 3, 4}
d = set_distance_function(s1, s2)
assert d == 1
s3 = {2, 3, 4}
d = set_distance_function(s1, s3)
assert d == 2
d = set_distance_function(s3, s2)
assert d == 1
class TestMigrators:
@pytest.mark.skipif(RedisQueue is None, reason='redis not available')
def test_migrator_constructor(self):
migrator = MultiprocessingMigrator(max_migrants=1, host=REDIS_HOST)
assert isinstance(migrator.migrants, RedisQueue)
assert migrator.max_migrants == 1
migrator = MultiprocessingMigrator(max_migrants=2, host=REDIS_HOST)
assert isinstance(migrator.migrants, RedisQueue)
assert migrator.max_migrants == 2
migrator = MultiprocessingMigrator(max_migrants=3, host=REDIS_HOST)
assert isinstance(migrator.migrants, RedisQueue)
assert migrator.max_migrants == 3
@pytest.mark.skipif(RedisQueue is None, reason='redis not available')
def test_migrate_individuals_without_evaluation(self):
population = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
random = Random(SEED)
migrator = MultiprocessingMigrator(max_migrants=1, host=REDIS_HOST)
assert isinstance(migrator.migrants, RedisQueue)
assert migrator.max_migrants == 1
migrator(random, population, {})
assert len(migrator.migrants) == 1
migrator(random, population, {})
assert len(migrator.migrants) == 1
class TestOptimizationResult:
def test_reaction_result(self, model):
representation = [r.id for r in model.reactions]
random = Random(SEED)
args = {"representation": representation}
solutions = BestSolutionArchive()
for _ in range(10000):
solutions.add(set_generator(random, args), random.random(), None, True, 100)
decoder = ReactionSetDecoder(representation, model)
result = TargetOptimizationResult(
model=model,
heuristic_method=None,
simulation_method=fba,
simulation_kwargs=None,
solutions=solutions,
objective_function=None,
target_type="reaction",
decoder=decoder,
seed=SEED,
simplify=False)
assert result.target_type == "reaction"
individuals = []
for row in result:
encoded = set(representation.index(v) for v in row[0])
individual = Individual(encoded, row[1])
assert individual not in individuals, "%s is repeated on result"
individuals.append(individual)
assert individual in solutions.archive
assert solutions.archive.count(individual) == 1, "%s is unique in archive" % individual
@pytest.fixture(scope="function")
def reaction_ko_single_objective(model):
objective = biomass_product_coupled_yield(
"Biomass_Ecoli_core_N_lp_w_fsh_GAM_rp__Nmet2", "EX_ac_lp_e_rp_", "EX_glc_lp_e_rp_")
return ReactionKnockoutOptimization(model=model, simulation_method=fba, objective_function=objective)
@pytest.fixture(scope="function")
def reaction_ko_multi_objective(model):
objective1 = biomass_product_coupled_yield(
"Biomass_Ecoli_core_N_lp_w_fsh_GAM_rp__Nmet2", "EX_ac_lp_e_rp_", "EX_glc_lp_e_rp_")
objective2 = number_of_knockouts()
objective = MultiObjectiveFunction([objective1, objective2])
return ReactionKnockoutOptimization(model=model, simulation_method=fba, objective_function=objective,
heuristic_method=inspyred.ec.emo.NSGA2)
class TestReactionKnockoutOptimization:
def test_initializer(self, model):
essential_reactions = set([r.id for r in find_essential_reactions(model)])
objective = biomass_product_coupled_yield(
"Biomass_Ecoli_core_N_lp_w_fsh_GAM_rp__Nmet2", "EX_ac_lp_e_rp_", "EX_glc_lp_e_rp_")
rko = ReactionKnockoutOptimization(model=model,
simulation_method=fba,
objective_function=objective)
assert sorted(essential_reactions) == sorted(rko.essential_reactions)
assert rko._target_type == "reaction"
assert isinstance(rko._decoder, ReactionSetDecoder)
def test_run_single_objective(self, reaction_ko_single_objective):
# TODO: make optlang deterministic so this results can be permanently stored.
_, result_file = mkstemp('.pkl')
results = reaction_ko_single_objective.run(max_evaluations=3000, pop_size=10, view=SequentialView(), seed=SEED)
assert len(results.data_frame.targets) > 0
assert len(results.data_frame.targets) == len(results.data_frame.targets.apply(tuple).unique())
# with open(result_file, 'wb') as in_file:
# print(results)
# print(in_file)
# pickle.dump(results, in_file)
# with open(result_file, 'rb') as in_file:
# expected_results = pickle.load(in_file)
# assert results.seed == expected_results.seed
def test_run_reaction_single_ko_objective_benchmark(self, benchmark, reaction_ko_single_objective):
benchmark(reaction_ko_single_objective.run, max_evaluations=3000, pop_size=10, view=SequentialView(), seed=SEED)
def test_run_with_time_limit(self, model):
# TODO: make optlang deterministic so this results can be permanently stored.
objective = biomass_product_coupled_yield(
"Biomass_Ecoli_core_N_lp_w_fsh_GAM_rp__Nmet2", "EX_ac_lp_e_rp_", "EX_glc_lp_e_rp_")
rko = ReactionKnockoutOptimization(model=model,
simulation_method=fba,
objective_function=objective)
start_time = time.time()
rko.run(max_evaluations=3000000, pop_size=10, view=SequentialView(), seed=SEED, max_time=(1, 0))
elapsed_time = time.time() - start_time
assert elapsed_time < 1.25 * 60
def test_optgene_with_time_limit(self, model):
ko = OptGene(model)
start_time = time.time()
ko.run(target="EX_ac_lp_e_rp_",
biomass="Biomass_Ecoli_core_N_lp_w_fsh_GAM_rp__Nmet2",
substrate="EX_glc_lp_e_rp_", max_evaluations=3000000, seed=SEED, max_time=(1, 0))
elapsed_time = time.time() - start_time
# assert elapsed_time < 1.25 * 60
print(elapsed_time)
assert elapsed_time < 2 * 60
def test_run_multi_objective(self, model, reaction_ko_multi_objective):
# TODO: make optlang deterministic so this results can be permanently stored.
_, result_file = mkstemp('.pkl')
results = reaction_ko_multi_objective.run(max_evaluations=3000, pop_size=10, view=SequentialView(), seed=SEED)
assert len(results.data_frame.targets) == len(results.data_frame.targets.apply(tuple).unique())
# with open(result_file, 'wb') as in_file:
# pickle.dump(results, in_file)
# with open(result_file, 'rb') as in_file:
# expected_results = pickle.load(in_file)
# assert results.seed == expected_results.seed
def test_run_reaction_ko_multi_objective_benchmark(self, benchmark, reaction_ko_multi_objective):
benchmark(reaction_ko_multi_objective.run, max_evaluations=3000, pop_size=10, view=SequentialView(), seed=SEED)
@pytest.fixture(scope="function")
def gene_ko_single_objective(model):
objective = biomass_product_coupled_yield(
"Biomass_Ecoli_core_N_lp_w_fsh_GAM_rp__Nmet2", "EX_ac_lp_e_rp_", "EX_glc_lp_e_rp_")
return GeneKnockoutOptimization(model=model, simulation_method=fba, objective_function=objective)
@pytest.fixture(scope="function")
def gene_ko_multi_objective(model):
objective1 = biomass_product_coupled_yield(
"Biomass_Ecoli_core_N_lp_w_fsh_GAM_rp__Nmet2", "EX_ac_lp_e_rp_", "EX_glc_lp_e_rp_")
objective2 = number_of_knockouts()
objective = MultiObjectiveFunction([objective1, objective2])
return GeneKnockoutOptimization(model=model, simulation_method=fba, objective_function=objective,
heuristic_method=inspyred.ec.emo.NSGA2)
class TestGeneKnockoutOptimization:
def test_initializer(self, model):
essential_genes = set([r.id for r in find_essential_genes(model)])
objective = biomass_product_coupled_yield(
"Biomass_Ecoli_core_N_lp_w_fsh_GAM_rp__Nmet2", "EX_ac_lp_e_rp_", "EX_glc_lp_e_rp_")
rko = GeneKnockoutOptimization(model=model,
simulation_method=fba,
objective_function=objective)
assert sorted(essential_genes) == sorted(rko.essential_genes)
assert rko._target_type == "gene"
assert isinstance(rko._decoder, GeneSetDecoder)
def test_run_single_objective(self, model, gene_ko_single_objective):
# TODO: make optlang deterministic so this results can be permanently stored.
_, result_file = mkstemp('.pkl')
results = gene_ko_single_objective.run(max_evaluations=3000, pop_size=10, view=SequentialView(), seed=SEED)
assert len(results.data_frame.targets) == len(results.data_frame.targets.apply(tuple).unique())
# with open(result_file, 'wb') as in_file:
# print(results)
# pickle.dump(results, in_file)
# with open(result_file, 'rb') as in_file:
# expected_results = pickle.load(in_file)
# assert results.seed == expected_results.seed
def test_run_gene_ko_single_objective_benchmark(self, gene_ko_single_objective, benchmark):
benchmark(gene_ko_single_objective.run, max_evaluations=3000, pop_size=10, view=SequentialView(), seed=SEED)
def test_run_multi_objective(self, model, gene_ko_multi_objective):
# TODO: make optlang deterministic so this results can be permanently stored.
_, result_file = mkstemp('.pkl')
results = gene_ko_multi_objective.run(max_evaluations=3000, pop_size=10, view=SequentialView(), seed=SEED)
assert len(results.data_frame.targets) == len(results.data_frame.targets.apply(tuple).unique())
# with open(result_file, 'wb') as in_file:
# pickle.dump(results, in_file)
# with open(result_file, 'rb') as in_file:
# expected_results = pickle.load(in_file)
# assert results.seed == expected_results.seed
def test_run_gene_ko_multi_objective_benchmark(self, gene_ko_multi_objective, benchmark):
benchmark(gene_ko_multi_objective.run, max_evaluations=3000, pop_size=10, view=SequentialView(), seed=SEED)
def test_run_with_time_limit(self, model):
# TODO: make optlang deterministic so this results can be permanently stored.
objective = biomass_product_coupled_yield(
"Biomass_Ecoli_core_N_lp_w_fsh_GAM_rp__Nmet2", "EX_ac_lp_e_rp_", "EX_glc_lp_e_rp_")
rko = ReactionKnockoutOptimization(model=model,
simulation_method=fba,
objective_function=objective)
start_time = time.time()
rko.run(max_evaluations=3000000, pop_size=10, view=SequentialView(), seed=SEED, max_time=(1, 0))
elapsed_time = time.time() - start_time
assert elapsed_time < 1.25 * 60
class TestVariator:
def test_set_n_point_crossover(self):
mom = OrderedSet([1, 3, 5, 9, 10])
dad = OrderedSet([2, 3, 7, 8])
args = {
"crossover_rate": 1.0,
"num_crossover_points": 1,
"candidate_size": 10
}
children = set_n_point_crossover(Random(SEED), [mom, dad], args)
bro = OrderedSet([1, 3, 5, 8])
sis = OrderedSet([2, 3, 7, 9, 10])
assert bro == children[0]
assert sis == children[1]
def test_do_not_set_n_point_crossover(self):
mom = OrderedSet([1, 3, 5, 9, 10])
dad = OrderedSet([2, 3, 7, 8])
args = {
"crossover_rate": 0.0,
"num_crossover_points": 1,
"candidate_size": 10
}
children = set_n_point_crossover(Random(SEED), [mom, dad], args)
assert mom == children[0]
assert dad == children[1]
def test_set_mutation(self):
individual = OrderedSet([1, 3, 5, 9, 10])
representation = list(range(10))
args = {
"representation": representation,
"mutation_rate": 1.0
}
new_individuals = set_mutation(Random(SEED), [individual], args)
assert len(new_individuals[0]) == len(individual)
assert new_individuals[0] != individual
assert new_individuals[0] == [0, 2, 4, 6, 7]
def test_do_not_set_mutation(self):
individual = OrderedSet([1, 3, 5, 9, 10])
representation = list(range(10))
args = {
"representation": representation,
"mutation_rate": 0.0
}
new_individuals = set_mutation(Random(SEED), [individual], args)
assert len(new_individuals[0]) == len(individual)
assert new_individuals[0] == individual
def test_set_indel(self):
individual = [1, 3, 5, 9, 10]
representation = list(range(10))
args = {
"representation": representation,
"indel_rate": 1.0
}
new_individuals = set_indel(Random(SEED), [individual], args)
assert len(new_individuals[0]) != len(individual)
assert new_individuals[0] == [1, 3, 5, 6, 9, 10]
def test_do_not_set_indel(self):
individual = [1, 3, 5, 9, 10]
representation = list(range(10))
args = {
"representation": representation,
"indel_rate": 0.0
}
new_individuals = set_indel(Random(SEED), [individual], args)
assert len(new_individuals[0]) == len(individual)
assert new_individuals[0] == individual
args = {
"representation": representation,
"indel_rate": 1.0,
"variable_size": False
}
new_individuals = set_indel(Random(SEED), [individual], args)
assert len(new_individuals[0]) == len(individual)
assert new_individuals[0] == individual
def test_do_set_n_point_crossover(self):
representation = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N"]
int_representation = [representation.index(v) for v in representation]
mom = OrderedSet([representation.index(v) for v in ["A", "B", "E", "K", "L", "M"]])
dad = OrderedSet([representation.index(v) for v in ["A", "C", "I", "J", "K", "L"]])
points = [4]
children = _do_set_n_point_crossover(int_representation, mom, dad, points, Random(), len(mom))
bro = OrderedSet([0, 1, 8, 9, 10, 11])
sis = OrderedSet([0, 2, 4, 10, 11, 12])
assert children[0] == bro
assert children[1] == sis
def test_multiple_chromosome_set_mutation(self):
genome = MultipleChromosomeGenome(["A", "B"])
genome["A"] = [1, 2, 3, 4]
genome["B"] = [1, 5, 7, 10]
representation = list(range(10))
args = {
"A_representation": representation,
"B_representation": representation,
"A_mutation_rate": 1,
"B_mutation_rate": 1
}
new_individuals = multiple_chromosome_set_mutation(Random(SEED), [genome], args)
assert new_individuals[0]["A"] == OrderedSet([0, 6, 7, 8])
assert new_individuals[0]["B"] == OrderedSet([0, 6, 8, 9])
def test_multiple_chromosome_set_indel(self):
genome = MultipleChromosomeGenome(["A", "B"])
genome["A"] = [1, 2, 3, 4]
genome["B"] = [1, 5, 7, 10]
representation = list(range(10))
args = {
"A_representation": representation,
"B_representation": representation,
"A_indel_rate": 1,
"B_indel_rate": 1
}
random = Random(SEED)
new_individuals = multiple_chromosome_set_indel(random, [genome for _ in range(5)], args)
assert new_individuals[0]["A"] == OrderedSet([1, 2, 3, 4, 7])
assert new_individuals[0]["B"] == OrderedSet([1, 5, 10])
assert new_individuals[1]["A"] == OrderedSet([2, 3, 4])
assert new_individuals[1]["B"] == OrderedSet([1, 5, 7, 8, 10])
assert new_individuals[2]["A"] == OrderedSet([1, 2, 3, 4, 6])
assert new_individuals[2]["B"] == OrderedSet([1, 5, 7])
assert new_individuals[3]["A"] == OrderedSet([1, 2, 3, 4, 8])
assert new_individuals[3]["B"] == OrderedSet([0, 1, 5, 7, 10])
assert new_individuals[4]["A"] == OrderedSet([1, 2, 3, 4, 7])
assert new_individuals[4]["B"] == OrderedSet([1, 5, 7, 8, 10])
class TestGenomes:
def test_two_chromosomes(self):
genome = MultipleChromosomeGenome(["A", "B"])
assert isinstance(genome["A"], list)
assert isinstance(genome["B"], list)
genome["A"] = [1, 2, 3, 4]
genome["B"] = ["A", "B", "C"]
assert genome["A"] == OrderedSet([1, 2, 3, 4])
assert genome["B"] == OrderedSet(["A", "B", "C"])
del genome["A"]
with pytest.raises(KeyError):
genome.__getitem__("A")
def simplify_knockout_solutions_for_succ(iaf1260):
representation = ["FUM", "SFGTHi", "DHACOAH", "ASPTRS"]
solution = [0, 1, 2, 3]
bpcy = biomass_product_coupled_min_yield("Ec_biomass_iAF1260_core_59p81M",
"EX_succ_lp_e_rp_",
"EX_glc_lp_e_rp_")
decoder = ReactionSetDecoder(representation, iaf1260)
evaluator = KnockoutEvaluator(iaf1260, decoder, bpcy, fba, {})
simplification = SolutionSimplification(evaluator)
new_solution = simplification(solution)
assert [0] == new_solution
|
biosustain/cameo
|
tests/test_strain_design_heuristics.py
|
Python
|
apache-2.0
| 55,803
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.23
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1IngressClassSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'controller': 'str',
'parameters': 'V1IngressClassParametersReference'
}
attribute_map = {
'controller': 'controller',
'parameters': 'parameters'
}
def __init__(self, controller=None, parameters=None, local_vars_configuration=None): # noqa: E501
"""V1IngressClassSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._controller = None
self._parameters = None
self.discriminator = None
if controller is not None:
self.controller = controller
if parameters is not None:
self.parameters = parameters
@property
def controller(self):
"""Gets the controller of this V1IngressClassSpec. # noqa: E501
Controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable. # noqa: E501
:return: The controller of this V1IngressClassSpec. # noqa: E501
:rtype: str
"""
return self._controller
@controller.setter
def controller(self, controller):
"""Sets the controller of this V1IngressClassSpec.
Controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable. # noqa: E501
:param controller: The controller of this V1IngressClassSpec. # noqa: E501
:type: str
"""
self._controller = controller
@property
def parameters(self):
"""Gets the parameters of this V1IngressClassSpec. # noqa: E501
:return: The parameters of this V1IngressClassSpec. # noqa: E501
:rtype: V1IngressClassParametersReference
"""
return self._parameters
@parameters.setter
def parameters(self, parameters):
"""Sets the parameters of this V1IngressClassSpec.
:param parameters: The parameters of this V1IngressClassSpec. # noqa: E501
:type: V1IngressClassParametersReference
"""
self._parameters = parameters
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1IngressClassSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1IngressClassSpec):
return True
return self.to_dict() != other.to_dict()
|
kubernetes-client/python
|
kubernetes/client/models/v1_ingress_class_spec.py
|
Python
|
apache-2.0
| 5,087
|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Script to generate api_docs for MLMD.
The script needs to be run under Python3.
The doc generator can be installed with:
```
$> pip3 install git+https://github.com/tensorflow/docs
```
To run from it on the mlmd pip package:
```
python3 ml_metadata/tools/documentation/build_docs.py --output_dir=/tmp/mlmd
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from tensorflow_docs.api_generator import doc_controls
from tensorflow_docs.api_generator import generate_lib
from tensorflow_docs.api_generator import public_api
import ml_metadata as mlmd
from google.protobuf.reflection import GeneratedProtocolMessageType
flags.DEFINE_string('output_dir', '/tmp/mlmd_api', 'Where to output the docs')
flags.DEFINE_string(
'code_url_prefix',
'https://github.com/google/ml-metadata/tree/master/ml_metadata',
'The url prefix for links to code.')
flags.DEFINE_bool('search_hints', True,
'Include metadata search hints in the generated files')
flags.DEFINE_string('site_path', 'ml_metadata/api_docs/python',
'Path prefix in the _toc.yaml')
FLAGS = flags.FLAGS
def ignore_proto_method(path, parent, children):
"""Remove all the proto inherited methods.
Args:
path: A tuple of name parts forming the attribute-lookup path to this
object. For `tf.keras.layers.Dense` path is:
("tf","keras","layers","Dense")
parent: The parent object.
children: A list of (name, value) pairs. The attributes of the patent.
Returns:
A filtered list of children `(name, value)` pairs. With all proto methods
removed.
"""
del path
new_children = []
if not isinstance(parent, GeneratedProtocolMessageType):
return children
new_children = []
for (name, obj) in children:
if 'function' in str(obj.__class__):
continue
new_children.append((name, obj))
return new_children
def ignore_attrs_method(path, parent, children):
"""Remove auto generated attrs methods.
Args:
path: A tuple of name parts forming the attribute-lookup path to this
object. For `tf.keras.layers.Dense` path is:
("tf","keras","layers","Dense")
parent: The parent object.
children: A list of (name, value) pairs. The attributes of the patent.
Returns:
A filtered list of children `(name, value)` pairs. With all attrs auto
generated methods removed (e.g., __eq__, __ge__, __gt__)
"""
del path
del parent
new_children = []
for (name, obj) in children:
if name in ['__eq__', '__ge__', '__gt__', '__le__', '__lt__', '__ne__']:
continue
new_children.append((name, obj))
return new_children
def main(args):
if args[1:]:
raise ValueError('Unrecognized command line args', args[1:])
suppress_docs_for = []
for name in ['version', 'goo'+'gle', 'metadata_store', 'pywrap']:
submodule = getattr(mlmd, name, None)
if submodule is not None:
suppress_docs_for.append(submodule)
for obj in suppress_docs_for:
doc_controls.do_not_generate_docs(obj)
doc_generator = generate_lib.DocGenerator(
root_title='ML Metadata',
py_modules=[('mlmd', mlmd)],
base_dir=os.path.dirname(mlmd.__file__),
code_url_prefix=FLAGS.code_url_prefix,
search_hints=FLAGS.search_hints,
site_path=FLAGS.site_path,
private_map={},
callbacks=[
# This filters out objects not defined in the current module or its
# sub-modules.
public_api.local_definitions_filter, ignore_proto_method,
ignore_attrs_method
])
doc_generator.build(output_dir=FLAGS.output_dir)
if __name__ == '__main__':
app.run(main)
|
google/ml-metadata
|
ml_metadata/tools/documentation/build_docs.py
|
Python
|
apache-2.0
| 4,411
|
import os
import pydoc
import sys
class DocTree:
def __init__(self, src, dest):
self.basepath = os.getcwd()
sys.path.append(os.path.join(self.basepath, src))
self.src = src
self.dest = dest
self._make_dest(dest)
self._make_docs(src)
self._move_docs(dest)
def _make_dest(self, dest):
path = os.path.join(self.basepath, dest)
if os.path.isdir(path):
os.rmdir(path)
os.makedirs(path)
def _make_docs(self, src):
print('making htmls for ' + src)
pydoc.writedocs(src)
print(os.listdir())
def _move_docs(self, dest):
for f in os.listdir():
if f.endswith('.html'):
_dest = os.path.join(dest, f)
os.rename(f, _dest)
def main():
dest = 'docs'
src = 'vcx/api'
src = os.path.join(os.getcwd(), src)
DocTree(src, dest)
if __name__ == '__main__':
main()
|
Artemkaaas/indy-sdk
|
vcx/wrappers/python3/generate_docs.py
|
Python
|
apache-2.0
| 952
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.coils import CoilHeatingDxVariableSpeed
log = logging.getLogger(__name__)
class TestCoilHeatingDxVariableSpeed(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_coilheatingdxvariablespeed(self):
pyidf.validation_level = ValidationLevel.error
obj = CoilHeatingDxVariableSpeed()
# alpha
var_name = "Name"
obj.name = var_name
# node
var_indoor_air_inlet_node_name = "node|Indoor Air Inlet Node Name"
obj.indoor_air_inlet_node_name = var_indoor_air_inlet_node_name
# node
var_indoor_air_outlet_node_name = "node|Indoor Air Outlet Node Name"
obj.indoor_air_outlet_node_name = var_indoor_air_outlet_node_name
# integer
var_number_of_speeds = 5
obj.number_of_speeds = var_number_of_speeds
# integer
var_nominal_speed_level = 5
obj.nominal_speed_level = var_nominal_speed_level
# real
var_rated_heating_capacity_at_selected_nominal_speed_level = 6.6
obj.rated_heating_capacity_at_selected_nominal_speed_level = var_rated_heating_capacity_at_selected_nominal_speed_level
# real
var_rated_air_flow_rate_at_selected_nominal_speed_level = 7.7
obj.rated_air_flow_rate_at_selected_nominal_speed_level = var_rated_air_flow_rate_at_selected_nominal_speed_level
# object-list
var_energy_part_load_fraction_curve_name = "object-list|Energy Part Load Fraction Curve Name"
obj.energy_part_load_fraction_curve_name = var_energy_part_load_fraction_curve_name
# object-list
var_defrost_energy_input_ratio_function_of_temperature_curve_name = "object-list|Defrost Energy Input Ratio Function of Temperature Curve Name"
obj.defrost_energy_input_ratio_function_of_temperature_curve_name = var_defrost_energy_input_ratio_function_of_temperature_curve_name
# real
var_minimum_outdoor_drybulb_temperature_for_compressor_operation = -50.0
obj.minimum_outdoor_drybulb_temperature_for_compressor_operation = var_minimum_outdoor_drybulb_temperature_for_compressor_operation
# real
var_outdoor_drybulb_temperature_to_turn_on_compressor = 11.11
obj.outdoor_drybulb_temperature_to_turn_on_compressor = var_outdoor_drybulb_temperature_to_turn_on_compressor
# real
var_maximum_outdoor_drybulb_temperature_for_defrost_operation = 3.61
obj.maximum_outdoor_drybulb_temperature_for_defrost_operation = var_maximum_outdoor_drybulb_temperature_for_defrost_operation
# real
var_crankcase_heater_capacity = 0.0
obj.crankcase_heater_capacity = var_crankcase_heater_capacity
# real
var_maximum_outdoor_drybulb_temperature_for_crankcase_heater_operation = 0.0
obj.maximum_outdoor_drybulb_temperature_for_crankcase_heater_operation = var_maximum_outdoor_drybulb_temperature_for_crankcase_heater_operation
# alpha
var_defrost_strategy = "ReverseCycle"
obj.defrost_strategy = var_defrost_strategy
# alpha
var_defrost_control = "Timed"
obj.defrost_control = var_defrost_control
# real
var_defrost_time_period_fraction = 0.0
obj.defrost_time_period_fraction = var_defrost_time_period_fraction
# real
var_resistive_defrost_heater_capacity = 0.0
obj.resistive_defrost_heater_capacity = var_resistive_defrost_heater_capacity
# real
var_speed_1_reference_unit_gross_rated_heating_capacity = 0.0
obj.speed_1_reference_unit_gross_rated_heating_capacity = var_speed_1_reference_unit_gross_rated_heating_capacity
# real
var_speed_1_reference_unit_gross_rated_heating_cop = 0.0
obj.speed_1_reference_unit_gross_rated_heating_cop = var_speed_1_reference_unit_gross_rated_heating_cop
# real
var_speed_1_reference_unit_rated_air_flow_rate = 0.0
obj.speed_1_reference_unit_rated_air_flow_rate = var_speed_1_reference_unit_rated_air_flow_rate
# object-list
var_speed_1_heating_capacity_function_of_temperature_curve_name = "object-list|Speed 1 Heating Capacity Function of Temperature Curve Name"
obj.speed_1_heating_capacity_function_of_temperature_curve_name = var_speed_1_heating_capacity_function_of_temperature_curve_name
# object-list
var_speed_1_total_heating_capacity_function_of_air_flow_fraction_curve_name = "object-list|Speed 1 Total Heating Capacity Function of Air Flow Fraction Curve Name"
obj.speed_1_total_heating_capacity_function_of_air_flow_fraction_curve_name = var_speed_1_total_heating_capacity_function_of_air_flow_fraction_curve_name
# object-list
var_speed_1_energy_input_ratio_function_of_temperature_curve_name = "object-list|Speed 1 Energy Input Ratio Function of Temperature Curve Name"
obj.speed_1_energy_input_ratio_function_of_temperature_curve_name = var_speed_1_energy_input_ratio_function_of_temperature_curve_name
# object-list
var_speed_1_energy_input_ratio_function_of_air_flow_fraction_curve_name = "object-list|Speed 1 Energy Input Ratio Function of Air Flow Fraction Curve Name"
obj.speed_1_energy_input_ratio_function_of_air_flow_fraction_curve_name = var_speed_1_energy_input_ratio_function_of_air_flow_fraction_curve_name
# real
var_speed_2_reference_unit_gross_rated_heating_capacity = 0.0
obj.speed_2_reference_unit_gross_rated_heating_capacity = var_speed_2_reference_unit_gross_rated_heating_capacity
# real
var_speed_2_reference_unit_gross_rated_heating_cop = 0.0
obj.speed_2_reference_unit_gross_rated_heating_cop = var_speed_2_reference_unit_gross_rated_heating_cop
# real
var_speed_2_reference_unit_rated_air_flow_rate = 0.0
obj.speed_2_reference_unit_rated_air_flow_rate = var_speed_2_reference_unit_rated_air_flow_rate
# object-list
var_speed_2_heating_capacity_function_of_temperature_curve_name = "object-list|Speed 2 Heating Capacity Function of Temperature Curve Name"
obj.speed_2_heating_capacity_function_of_temperature_curve_name = var_speed_2_heating_capacity_function_of_temperature_curve_name
# object-list
var_speed_2_total_heating_capacity_function_of_air_flow_fraction_curve_name = "object-list|Speed 2 Total Heating Capacity Function of Air Flow Fraction Curve Name"
obj.speed_2_total_heating_capacity_function_of_air_flow_fraction_curve_name = var_speed_2_total_heating_capacity_function_of_air_flow_fraction_curve_name
# object-list
var_speed_2_energy_input_ratio_function_of_temperature_curve_name = "object-list|Speed 2 Energy Input Ratio Function of Temperature Curve Name"
obj.speed_2_energy_input_ratio_function_of_temperature_curve_name = var_speed_2_energy_input_ratio_function_of_temperature_curve_name
# object-list
var_speed_2_energy_input_ratio_function_of_air_flow_fraction_curve_name = "object-list|Speed 2 Energy Input Ratio Function of Air Flow Fraction Curve Name"
obj.speed_2_energy_input_ratio_function_of_air_flow_fraction_curve_name = var_speed_2_energy_input_ratio_function_of_air_flow_fraction_curve_name
# real
var_speed_3_reference_unit_gross_rated_heating_capacity = 0.0
obj.speed_3_reference_unit_gross_rated_heating_capacity = var_speed_3_reference_unit_gross_rated_heating_capacity
# real
var_speed_3_reference_unit_gross_rated_heating_cop = 0.0
obj.speed_3_reference_unit_gross_rated_heating_cop = var_speed_3_reference_unit_gross_rated_heating_cop
# real
var_speed_3_reference_unit_rated_air_flow_rate = 0.0
obj.speed_3_reference_unit_rated_air_flow_rate = var_speed_3_reference_unit_rated_air_flow_rate
# object-list
var_speed_3_heating_capacity_function_of_temperature_curve_name = "object-list|Speed 3 Heating Capacity Function of Temperature Curve Name"
obj.speed_3_heating_capacity_function_of_temperature_curve_name = var_speed_3_heating_capacity_function_of_temperature_curve_name
# object-list
var_speed_3_total_heating_capacity_function_of_air_flow_fraction_curve_name = "object-list|Speed 3 Total Heating Capacity Function of Air Flow Fraction Curve Name"
obj.speed_3_total_heating_capacity_function_of_air_flow_fraction_curve_name = var_speed_3_total_heating_capacity_function_of_air_flow_fraction_curve_name
# object-list
var_speed_3_energy_input_ratio_function_of_temperature_curve_name = "object-list|Speed 3 Energy Input Ratio Function of Temperature Curve Name"
obj.speed_3_energy_input_ratio_function_of_temperature_curve_name = var_speed_3_energy_input_ratio_function_of_temperature_curve_name
# object-list
var_speed_3_energy_input_ratio_function_of_air_flow_fraction_curve_name = "object-list|Speed 3 Energy Input Ratio Function of Air Flow Fraction Curve Name"
obj.speed_3_energy_input_ratio_function_of_air_flow_fraction_curve_name = var_speed_3_energy_input_ratio_function_of_air_flow_fraction_curve_name
# real
var_speed_4_reference_unit_gross_rated_heating_capacity = 0.0
obj.speed_4_reference_unit_gross_rated_heating_capacity = var_speed_4_reference_unit_gross_rated_heating_capacity
# real
var_speed_4_reference_unit_gross_rated_heating_cop = 0.0
obj.speed_4_reference_unit_gross_rated_heating_cop = var_speed_4_reference_unit_gross_rated_heating_cop
# real
var_speed_4_reference_unit_rated_air_flow_rate = 0.0
obj.speed_4_reference_unit_rated_air_flow_rate = var_speed_4_reference_unit_rated_air_flow_rate
# object-list
var_speed_4_heating_capacity_function_of_temperature_curve_name = "object-list|Speed 4 Heating Capacity Function of Temperature Curve Name"
obj.speed_4_heating_capacity_function_of_temperature_curve_name = var_speed_4_heating_capacity_function_of_temperature_curve_name
# object-list
var_speed_4_heating_capacity_function_of_air_flow_fraction_curve_name = "object-list|Speed 4 Heating Capacity Function of Air Flow Fraction Curve Name"
obj.speed_4_heating_capacity_function_of_air_flow_fraction_curve_name = var_speed_4_heating_capacity_function_of_air_flow_fraction_curve_name
# object-list
var_speed_4_energy_input_ratio_function_of_temperature_curve_name = "object-list|Speed 4 Energy Input Ratio Function of Temperature Curve Name"
obj.speed_4_energy_input_ratio_function_of_temperature_curve_name = var_speed_4_energy_input_ratio_function_of_temperature_curve_name
# object-list
var_speed_4_energy_input_ratio_function_of_air_flow_fraction_curve_name = "object-list|Speed 4 Energy Input Ratio Function of Air Flow Fraction Curve Name"
obj.speed_4_energy_input_ratio_function_of_air_flow_fraction_curve_name = var_speed_4_energy_input_ratio_function_of_air_flow_fraction_curve_name
# real
var_speed_5_reference_unit_gross_rated_heating_capacity = 0.0
obj.speed_5_reference_unit_gross_rated_heating_capacity = var_speed_5_reference_unit_gross_rated_heating_capacity
# real
var_speed_5_reference_unit_gross_rated_heating_cop = 0.0
obj.speed_5_reference_unit_gross_rated_heating_cop = var_speed_5_reference_unit_gross_rated_heating_cop
# real
var_speed_5_reference_unit_rated_air_flow_rate = 0.0
obj.speed_5_reference_unit_rated_air_flow_rate = var_speed_5_reference_unit_rated_air_flow_rate
# object-list
var_speed_5_heating_capacity_function_of_temperature_curve_name = "object-list|Speed 5 Heating Capacity Function of Temperature Curve Name"
obj.speed_5_heating_capacity_function_of_temperature_curve_name = var_speed_5_heating_capacity_function_of_temperature_curve_name
# object-list
var_speed_5_heating_capacity_function_of_air_flow_fraction_curve_name = "object-list|Speed 5 Heating Capacity Function of Air Flow Fraction Curve Name"
obj.speed_5_heating_capacity_function_of_air_flow_fraction_curve_name = var_speed_5_heating_capacity_function_of_air_flow_fraction_curve_name
# object-list
var_speed_5_energy_input_ratio_function_of_temperature_curve_name = "object-list|Speed 5 Energy Input Ratio Function of Temperature Curve Name"
obj.speed_5_energy_input_ratio_function_of_temperature_curve_name = var_speed_5_energy_input_ratio_function_of_temperature_curve_name
# object-list
var_speed_5_energy_input_ratio_function_of_air_flow_fraction_curve_name = "object-list|Speed 5 Energy Input Ratio Function of Air Flow Fraction Curve Name"
obj.speed_5_energy_input_ratio_function_of_air_flow_fraction_curve_name = var_speed_5_energy_input_ratio_function_of_air_flow_fraction_curve_name
# real
var_speed_6_reference_unit_gross_rated_heating_capacity = 0.0
obj.speed_6_reference_unit_gross_rated_heating_capacity = var_speed_6_reference_unit_gross_rated_heating_capacity
# real
var_speed_6_reference_unit_gross_rated_heating_cop = 0.0
obj.speed_6_reference_unit_gross_rated_heating_cop = var_speed_6_reference_unit_gross_rated_heating_cop
# real
var_speed_6_reference_unit_rated_air_flow_rate = 0.0
obj.speed_6_reference_unit_rated_air_flow_rate = var_speed_6_reference_unit_rated_air_flow_rate
# object-list
var_speed_6_heating_capacity_function_of_temperature_curve_name = "object-list|Speed 6 Heating Capacity Function of Temperature Curve Name"
obj.speed_6_heating_capacity_function_of_temperature_curve_name = var_speed_6_heating_capacity_function_of_temperature_curve_name
# object-list
var_speed_6_heating_capacity_function_of_air_flow_fraction_curve_name = "object-list|Speed 6 Heating Capacity Function of Air Flow Fraction Curve Name"
obj.speed_6_heating_capacity_function_of_air_flow_fraction_curve_name = var_speed_6_heating_capacity_function_of_air_flow_fraction_curve_name
# object-list
var_speed_6_energy_input_ratio_function_of_temperature_curve_name = "object-list|Speed 6 Energy Input Ratio Function of Temperature Curve Name"
obj.speed_6_energy_input_ratio_function_of_temperature_curve_name = var_speed_6_energy_input_ratio_function_of_temperature_curve_name
# object-list
var_speed_6_energy_input_ratio_function_of_air_flow_fraction_curve_name = "object-list|Speed 6 Energy Input Ratio Function of Air Flow Fraction Curve Name"
obj.speed_6_energy_input_ratio_function_of_air_flow_fraction_curve_name = var_speed_6_energy_input_ratio_function_of_air_flow_fraction_curve_name
# real
var_speed_7_reference_unit_gross_rated_heating_capacity = 0.0
obj.speed_7_reference_unit_gross_rated_heating_capacity = var_speed_7_reference_unit_gross_rated_heating_capacity
# real
var_speed_7_reference_unit_gross_rated_heating_cop = 0.0
obj.speed_7_reference_unit_gross_rated_heating_cop = var_speed_7_reference_unit_gross_rated_heating_cop
# real
var_speed_7_reference_unit_rated_air_flow_rate = 0.0
obj.speed_7_reference_unit_rated_air_flow_rate = var_speed_7_reference_unit_rated_air_flow_rate
# object-list
var_speed_7_heating_capacity_function_of_temperature_curve_name = "object-list|Speed 7 Heating Capacity Function of Temperature Curve Name"
obj.speed_7_heating_capacity_function_of_temperature_curve_name = var_speed_7_heating_capacity_function_of_temperature_curve_name
# object-list
var_speed_7_heating_capacity_function_of_air_flow_fraction_curve_name = "object-list|Speed 7 Heating Capacity Function of Air Flow Fraction Curve Name"
obj.speed_7_heating_capacity_function_of_air_flow_fraction_curve_name = var_speed_7_heating_capacity_function_of_air_flow_fraction_curve_name
# object-list
var_speed_7_energy_input_ratio_function_of_temperature_curve_name = "object-list|Speed 7 Energy Input Ratio Function of Temperature Curve Name"
obj.speed_7_energy_input_ratio_function_of_temperature_curve_name = var_speed_7_energy_input_ratio_function_of_temperature_curve_name
# object-list
var_speed_7_energy_input_ratio_function_of_air_flow_fraction_curve_name = "object-list|Speed 7 Energy Input Ratio Function of Air Flow Fraction Curve Name"
obj.speed_7_energy_input_ratio_function_of_air_flow_fraction_curve_name = var_speed_7_energy_input_ratio_function_of_air_flow_fraction_curve_name
# real
var_speed_8_reference_unit_gross_rated_heating_capacity = 0.0
obj.speed_8_reference_unit_gross_rated_heating_capacity = var_speed_8_reference_unit_gross_rated_heating_capacity
# real
var_speed_8_reference_unit_gross_rated_heating_cop = 0.0
obj.speed_8_reference_unit_gross_rated_heating_cop = var_speed_8_reference_unit_gross_rated_heating_cop
# real
var_speed_8_reference_unit_rated_air_flow_rate = 0.0
obj.speed_8_reference_unit_rated_air_flow_rate = var_speed_8_reference_unit_rated_air_flow_rate
# object-list
var_speed_8_heating_capacity_function_of_temperature_curve_name = "object-list|Speed 8 Heating Capacity Function of Temperature Curve Name"
obj.speed_8_heating_capacity_function_of_temperature_curve_name = var_speed_8_heating_capacity_function_of_temperature_curve_name
# object-list
var_speed_8_heating_capacity_function_of_air_flow_fraction_curve_name = "object-list|Speed 8 Heating Capacity Function of Air Flow Fraction Curve Name"
obj.speed_8_heating_capacity_function_of_air_flow_fraction_curve_name = var_speed_8_heating_capacity_function_of_air_flow_fraction_curve_name
# object-list
var_speed_8_energy_input_ratio_function_of_temperature_curve_name = "object-list|Speed 8 Energy Input Ratio Function of Temperature Curve Name"
obj.speed_8_energy_input_ratio_function_of_temperature_curve_name = var_speed_8_energy_input_ratio_function_of_temperature_curve_name
# object-list
var_speed_8_energy_input_ratio_function_of_air_flow_fraction_curve_name = "object-list|Speed 8 Energy Input Ratio Function of Air Flow Fraction Curve Name"
obj.speed_8_energy_input_ratio_function_of_air_flow_fraction_curve_name = var_speed_8_energy_input_ratio_function_of_air_flow_fraction_curve_name
# real
var_speed_9_reference_unit_gross_rated_heating_capacity = 0.0
obj.speed_9_reference_unit_gross_rated_heating_capacity = var_speed_9_reference_unit_gross_rated_heating_capacity
# real
var_speed_9_reference_unit_gross_rated_heating_cop = 0.0
obj.speed_9_reference_unit_gross_rated_heating_cop = var_speed_9_reference_unit_gross_rated_heating_cop
# real
var_speed_9_reference_unit_rated_air_flow_rate = 0.0
obj.speed_9_reference_unit_rated_air_flow_rate = var_speed_9_reference_unit_rated_air_flow_rate
# object-list
var_speed_9_heating_capacity_function_of_temperature_curve_name = "object-list|Speed 9 Heating Capacity Function of Temperature Curve Name"
obj.speed_9_heating_capacity_function_of_temperature_curve_name = var_speed_9_heating_capacity_function_of_temperature_curve_name
# object-list
var_speed_9_heating_capacity_function_of_air_flow_fraction_curve_name = "object-list|Speed 9 Heating Capacity Function of Air Flow Fraction Curve Name"
obj.speed_9_heating_capacity_function_of_air_flow_fraction_curve_name = var_speed_9_heating_capacity_function_of_air_flow_fraction_curve_name
# object-list
var_speed_9_energy_input_ratio_function_of_temperature_curve_name = "object-list|Speed 9 Energy Input Ratio Function of Temperature Curve Name"
obj.speed_9_energy_input_ratio_function_of_temperature_curve_name = var_speed_9_energy_input_ratio_function_of_temperature_curve_name
# object-list
var_speed_9_energy_input_ratio_function_of_air_flow_fraction_curve_name = "object-list|Speed 9 Energy Input Ratio Function of Air Flow Fraction Curve Name"
obj.speed_9_energy_input_ratio_function_of_air_flow_fraction_curve_name = var_speed_9_energy_input_ratio_function_of_air_flow_fraction_curve_name
# real
var_speed_10_reference_unit_gross_rated_heating_capacity = 0.0
obj.speed_10_reference_unit_gross_rated_heating_capacity = var_speed_10_reference_unit_gross_rated_heating_capacity
# real
var_speed_10_reference_unit_gross_rated_heating_cop = 0.0
obj.speed_10_reference_unit_gross_rated_heating_cop = var_speed_10_reference_unit_gross_rated_heating_cop
# real
var_speed_10_reference_unit_rated_air_flow_rate = 0.0
obj.speed_10_reference_unit_rated_air_flow_rate = var_speed_10_reference_unit_rated_air_flow_rate
# object-list
var_speed_10_heating_capacity_function_of_temperature_curve_name = "object-list|Speed 10 Heating Capacity Function of Temperature Curve Name"
obj.speed_10_heating_capacity_function_of_temperature_curve_name = var_speed_10_heating_capacity_function_of_temperature_curve_name
# object-list
var_speed_10_heating_capacity_function_of_air_flow_fraction_curve_name = "object-list|Speed 10 Heating Capacity Function of Air Flow Fraction Curve Name"
obj.speed_10_heating_capacity_function_of_air_flow_fraction_curve_name = var_speed_10_heating_capacity_function_of_air_flow_fraction_curve_name
# object-list
var_speed_10_energy_input_ratio_function_of_temperature_curve_name = "object-list|Speed 10 Energy Input Ratio Function of Temperature Curve Name"
obj.speed_10_energy_input_ratio_function_of_temperature_curve_name = var_speed_10_energy_input_ratio_function_of_temperature_curve_name
# object-list
var_speed_10_energy_input_ratio_function_of_air_flow_fraction_curve_name = "object-list|Speed 10 Energy Input Ratio Function of Air Flow Fraction Curve Name"
obj.speed_10_energy_input_ratio_function_of_air_flow_fraction_curve_name = var_speed_10_energy_input_ratio_function_of_air_flow_fraction_curve_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].name, var_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].indoor_air_inlet_node_name, var_indoor_air_inlet_node_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].indoor_air_outlet_node_name, var_indoor_air_outlet_node_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].number_of_speeds, var_number_of_speeds)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].nominal_speed_level, var_nominal_speed_level)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].rated_heating_capacity_at_selected_nominal_speed_level, var_rated_heating_capacity_at_selected_nominal_speed_level)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].rated_air_flow_rate_at_selected_nominal_speed_level, var_rated_air_flow_rate_at_selected_nominal_speed_level)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].energy_part_load_fraction_curve_name, var_energy_part_load_fraction_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].defrost_energy_input_ratio_function_of_temperature_curve_name, var_defrost_energy_input_ratio_function_of_temperature_curve_name)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].minimum_outdoor_drybulb_temperature_for_compressor_operation, var_minimum_outdoor_drybulb_temperature_for_compressor_operation)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].outdoor_drybulb_temperature_to_turn_on_compressor, var_outdoor_drybulb_temperature_to_turn_on_compressor)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].maximum_outdoor_drybulb_temperature_for_defrost_operation, var_maximum_outdoor_drybulb_temperature_for_defrost_operation)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].crankcase_heater_capacity, var_crankcase_heater_capacity)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].maximum_outdoor_drybulb_temperature_for_crankcase_heater_operation, var_maximum_outdoor_drybulb_temperature_for_crankcase_heater_operation)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].defrost_strategy, var_defrost_strategy)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].defrost_control, var_defrost_control)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].defrost_time_period_fraction, var_defrost_time_period_fraction)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].resistive_defrost_heater_capacity, var_resistive_defrost_heater_capacity)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_1_reference_unit_gross_rated_heating_capacity, var_speed_1_reference_unit_gross_rated_heating_capacity)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_1_reference_unit_gross_rated_heating_cop, var_speed_1_reference_unit_gross_rated_heating_cop)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_1_reference_unit_rated_air_flow_rate, var_speed_1_reference_unit_rated_air_flow_rate)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_1_heating_capacity_function_of_temperature_curve_name, var_speed_1_heating_capacity_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_1_total_heating_capacity_function_of_air_flow_fraction_curve_name, var_speed_1_total_heating_capacity_function_of_air_flow_fraction_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_1_energy_input_ratio_function_of_temperature_curve_name, var_speed_1_energy_input_ratio_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_1_energy_input_ratio_function_of_air_flow_fraction_curve_name, var_speed_1_energy_input_ratio_function_of_air_flow_fraction_curve_name)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_2_reference_unit_gross_rated_heating_capacity, var_speed_2_reference_unit_gross_rated_heating_capacity)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_2_reference_unit_gross_rated_heating_cop, var_speed_2_reference_unit_gross_rated_heating_cop)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_2_reference_unit_rated_air_flow_rate, var_speed_2_reference_unit_rated_air_flow_rate)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_2_heating_capacity_function_of_temperature_curve_name, var_speed_2_heating_capacity_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_2_total_heating_capacity_function_of_air_flow_fraction_curve_name, var_speed_2_total_heating_capacity_function_of_air_flow_fraction_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_2_energy_input_ratio_function_of_temperature_curve_name, var_speed_2_energy_input_ratio_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_2_energy_input_ratio_function_of_air_flow_fraction_curve_name, var_speed_2_energy_input_ratio_function_of_air_flow_fraction_curve_name)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_3_reference_unit_gross_rated_heating_capacity, var_speed_3_reference_unit_gross_rated_heating_capacity)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_3_reference_unit_gross_rated_heating_cop, var_speed_3_reference_unit_gross_rated_heating_cop)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_3_reference_unit_rated_air_flow_rate, var_speed_3_reference_unit_rated_air_flow_rate)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_3_heating_capacity_function_of_temperature_curve_name, var_speed_3_heating_capacity_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_3_total_heating_capacity_function_of_air_flow_fraction_curve_name, var_speed_3_total_heating_capacity_function_of_air_flow_fraction_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_3_energy_input_ratio_function_of_temperature_curve_name, var_speed_3_energy_input_ratio_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_3_energy_input_ratio_function_of_air_flow_fraction_curve_name, var_speed_3_energy_input_ratio_function_of_air_flow_fraction_curve_name)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_4_reference_unit_gross_rated_heating_capacity, var_speed_4_reference_unit_gross_rated_heating_capacity)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_4_reference_unit_gross_rated_heating_cop, var_speed_4_reference_unit_gross_rated_heating_cop)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_4_reference_unit_rated_air_flow_rate, var_speed_4_reference_unit_rated_air_flow_rate)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_4_heating_capacity_function_of_temperature_curve_name, var_speed_4_heating_capacity_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_4_heating_capacity_function_of_air_flow_fraction_curve_name, var_speed_4_heating_capacity_function_of_air_flow_fraction_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_4_energy_input_ratio_function_of_temperature_curve_name, var_speed_4_energy_input_ratio_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_4_energy_input_ratio_function_of_air_flow_fraction_curve_name, var_speed_4_energy_input_ratio_function_of_air_flow_fraction_curve_name)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_5_reference_unit_gross_rated_heating_capacity, var_speed_5_reference_unit_gross_rated_heating_capacity)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_5_reference_unit_gross_rated_heating_cop, var_speed_5_reference_unit_gross_rated_heating_cop)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_5_reference_unit_rated_air_flow_rate, var_speed_5_reference_unit_rated_air_flow_rate)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_5_heating_capacity_function_of_temperature_curve_name, var_speed_5_heating_capacity_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_5_heating_capacity_function_of_air_flow_fraction_curve_name, var_speed_5_heating_capacity_function_of_air_flow_fraction_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_5_energy_input_ratio_function_of_temperature_curve_name, var_speed_5_energy_input_ratio_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_5_energy_input_ratio_function_of_air_flow_fraction_curve_name, var_speed_5_energy_input_ratio_function_of_air_flow_fraction_curve_name)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_6_reference_unit_gross_rated_heating_capacity, var_speed_6_reference_unit_gross_rated_heating_capacity)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_6_reference_unit_gross_rated_heating_cop, var_speed_6_reference_unit_gross_rated_heating_cop)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_6_reference_unit_rated_air_flow_rate, var_speed_6_reference_unit_rated_air_flow_rate)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_6_heating_capacity_function_of_temperature_curve_name, var_speed_6_heating_capacity_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_6_heating_capacity_function_of_air_flow_fraction_curve_name, var_speed_6_heating_capacity_function_of_air_flow_fraction_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_6_energy_input_ratio_function_of_temperature_curve_name, var_speed_6_energy_input_ratio_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_6_energy_input_ratio_function_of_air_flow_fraction_curve_name, var_speed_6_energy_input_ratio_function_of_air_flow_fraction_curve_name)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_7_reference_unit_gross_rated_heating_capacity, var_speed_7_reference_unit_gross_rated_heating_capacity)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_7_reference_unit_gross_rated_heating_cop, var_speed_7_reference_unit_gross_rated_heating_cop)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_7_reference_unit_rated_air_flow_rate, var_speed_7_reference_unit_rated_air_flow_rate)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_7_heating_capacity_function_of_temperature_curve_name, var_speed_7_heating_capacity_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_7_heating_capacity_function_of_air_flow_fraction_curve_name, var_speed_7_heating_capacity_function_of_air_flow_fraction_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_7_energy_input_ratio_function_of_temperature_curve_name, var_speed_7_energy_input_ratio_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_7_energy_input_ratio_function_of_air_flow_fraction_curve_name, var_speed_7_energy_input_ratio_function_of_air_flow_fraction_curve_name)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_8_reference_unit_gross_rated_heating_capacity, var_speed_8_reference_unit_gross_rated_heating_capacity)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_8_reference_unit_gross_rated_heating_cop, var_speed_8_reference_unit_gross_rated_heating_cop)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_8_reference_unit_rated_air_flow_rate, var_speed_8_reference_unit_rated_air_flow_rate)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_8_heating_capacity_function_of_temperature_curve_name, var_speed_8_heating_capacity_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_8_heating_capacity_function_of_air_flow_fraction_curve_name, var_speed_8_heating_capacity_function_of_air_flow_fraction_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_8_energy_input_ratio_function_of_temperature_curve_name, var_speed_8_energy_input_ratio_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_8_energy_input_ratio_function_of_air_flow_fraction_curve_name, var_speed_8_energy_input_ratio_function_of_air_flow_fraction_curve_name)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_9_reference_unit_gross_rated_heating_capacity, var_speed_9_reference_unit_gross_rated_heating_capacity)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_9_reference_unit_gross_rated_heating_cop, var_speed_9_reference_unit_gross_rated_heating_cop)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_9_reference_unit_rated_air_flow_rate, var_speed_9_reference_unit_rated_air_flow_rate)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_9_heating_capacity_function_of_temperature_curve_name, var_speed_9_heating_capacity_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_9_heating_capacity_function_of_air_flow_fraction_curve_name, var_speed_9_heating_capacity_function_of_air_flow_fraction_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_9_energy_input_ratio_function_of_temperature_curve_name, var_speed_9_energy_input_ratio_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_9_energy_input_ratio_function_of_air_flow_fraction_curve_name, var_speed_9_energy_input_ratio_function_of_air_flow_fraction_curve_name)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_10_reference_unit_gross_rated_heating_capacity, var_speed_10_reference_unit_gross_rated_heating_capacity)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_10_reference_unit_gross_rated_heating_cop, var_speed_10_reference_unit_gross_rated_heating_cop)
self.assertAlmostEqual(idf2.coilheatingdxvariablespeeds[0].speed_10_reference_unit_rated_air_flow_rate, var_speed_10_reference_unit_rated_air_flow_rate)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_10_heating_capacity_function_of_temperature_curve_name, var_speed_10_heating_capacity_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_10_heating_capacity_function_of_air_flow_fraction_curve_name, var_speed_10_heating_capacity_function_of_air_flow_fraction_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_10_energy_input_ratio_function_of_temperature_curve_name, var_speed_10_energy_input_ratio_function_of_temperature_curve_name)
self.assertEqual(idf2.coilheatingdxvariablespeeds[0].speed_10_energy_input_ratio_function_of_air_flow_fraction_curve_name, var_speed_10_energy_input_ratio_function_of_air_flow_fraction_curve_name)
|
rbuffat/pyidf
|
tests/test_coilheatingdxvariablespeed.py
|
Python
|
apache-2.0
| 38,214
|
# coding: utf-8
# C++
# http://www.e-olimp.com/articles/21
# http://hardfire.ru/Dij_sparse
# Weak 5
# TODO: hear lession + lession about heap
"""
NAIVE:
Initialize:
- X = [S] [vert. processed so far]
- A[S] = 0 [computed shortest path distances]
[- B[S] = []] [path - help only]
- while X != V:
- among all edges (v, w) in E, with v in X, w not in X
- pick the one that minimize A[v] + l_vw [call it (v*, w*)]
- add w* to X
- set A[w*] := A[v*] + l_v*w*
[- set B[w*] := B[v*] or (v*, w*)]
WITH HEAP:
"""
|
zaqwes8811/my-courses
|
stanford1/py/shortest_path.py
|
Python
|
apache-2.0
| 533
|
from django.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
from django.conf import settings
from datetime import datetime
import uuid
User = settings.AUTH_USER_MODEL
def generate_new_uuid():
return str(uuid.uuid4())
class behaviourExperimentType_model(models.Model):
# BE CAREFUL About migrations that add unique fields !!!!!!!!!!!!! e.g. UUID
# https: // docs.djangoproject.com / en / 1.9 / howto / writing - migrations / # migrations-that-add-unique-fields
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
about = models.CharField(max_length=60, blank=True)
public = models.BooleanField (default = False, blank=True)
public_set_date = models.DateTimeField (default=datetime.now)
description = models.TextField(max_length=1000, blank=True)
created = models.DateTimeField(auto_now_add=True)
creator = models.ForeignKey(User, related_name='behaviouralExperiment_own')
users_with_access = models.ManyToManyField (User, related_name='behaviouralExperiment_accessable', through = 'shareBehaviouralExperiment')
experimentDefinition = models.ForeignKey("experimentType_model")
environmentDefinition = models.ForeignKey("environmentType_model")
class Meta:
#unique_together = ("creator","experimentDefinition","environmentDefinition")
ordering = ["-created"]
def __unicode__(self):
return "id: %s" % (self.uuid, )
def save(self, *args, **kwargs):
if self.uuid is not None:
try:
orig = behaviourExperimentType_model.objects.get(uuid=self.uuid)
if orig.public != self.public:
self.public_set_date = datetime.now()
except: #If it is the first time that is being created then .get() fails and throws an exception
pass
super(behaviourExperimentType_model, self).save(*args, **kwargs)
#### ENVIRONMENT ##########
class environmentType_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
description = models.TextField(max_length=1000, blank=True)
wormStatus = models.ForeignKey("wormStatusType_model")
plateConfiguration = models.ForeignKey("plateConfigurationType_model")
obstacle = models.ManyToManyField("obstacleLocationType_model",blank=True)
crowding = models.ForeignKey("crowdingType_model")
envTemp = models.FloatField(('Environmental Temperature'), default=20)
#class Meta:
#unique_together = ()
def __unicode__(self):
return "id: %s" % (self.uuid, )
class wormStatusType_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
xCoordFromPlateCentre = models.FloatField(blank=False)
yCoorDFromPlateCentre = models.FloatField(blank=False)
angleRelativeXaxis = models.FloatField(validators=[MinValueValidator(0),MaxValueValidator(6.28318)],blank=False)
wormData = models.ForeignKey("wormDataType_model")
#class Meta:
#unique_together = ("xCoordFromPlateCentre","yCoorDFromPlateCentre","angleRelativeXaxis","wormData")
def __unicode__(self):
return "id: %s" % (self.uuid, )
class wormDataType_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
MALE = 'M'
FEMALEHERMAPHRODITES = 'FH'
GENDERTYPE = (
(MALE,"Male"),
(FEMALEHERMAPHRODITES,"Female Hermaphrodites"),
)
gender = models.CharField(max_length=60, blank=False,choices=GENDERTYPE, default=FEMALEHERMAPHRODITES)
age = models.PositiveIntegerField(blank=False)
stageOfLifeCycle = models.PositiveIntegerField(blank=False,validators=[MinValueValidator(1),MaxValueValidator(4)])
timeOffFood = models.PositiveIntegerField(blank=False)
#class Meta:
#unique_together = ("gender","age","stageOfLifeCycle","timeOffFood")
def __unicode__(self):
return "id: %s" % (self.uuid, )
class crowdingType_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
#These parameters wormsDistributionInPlate and wormsInPlate are fo
wormsDistributionInPlate = models.CharField(max_length=60, blank=True)
wormsInPlate = models.PositiveIntegerField(validators=[MinValueValidator(1)],default=1,blank=False,)
#class Meta:
#unique_together = ("wormsDistributionInPlate","wormsInPlate")
def __unicode__(self):
return "id: %s" % (self.uuid, )
class obstacleLocationType_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
xCoordFromPlateCentre = models.FloatField(blank=False)
yCoorDFromPlateCentre = models.FloatField(blank=False)
Stiffness = models.FloatField(validators=[MinValueValidator(0)],blank=False)
CYLINDER = 'CY'
CUBE = 'CU'
HEXAGON = 'HE'
SHAPETYPE = (
(CYLINDER,"cylinder"),
(CUBE,"cube"),
(HEXAGON,"hexagon"),
)
shape = models.CharField(max_length=60, blank=False,choices=SHAPETYPE, default=CYLINDER)
Cylinder = models.ForeignKey("CylinderType_model",null=True, blank=True)
Cube = models.ForeignKey("CubeType_model",null=True, blank=True)
Hexagon = models.ForeignKey("HexagonType_model",null=True, blank=True)
#class Meta:
#unique_together = ("shape","xCoordFromPlateCentre","yCoorDFromPlateCentre","angleRelativeXaxis","Stiffness","Cylinder","Cube","Hexagon","Hair")
def __unicode__(self):
return "id: %s" % (self.uuid,)
class plateConfigurationType_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
WATER = 'W'
GELATIN = 'G'
AGAR = 'A'
BOTTOMMATERIALTYPE = (
(WATER,"water"),
(GELATIN,"gelatin"),
(AGAR,"agar"),
)
lid = models.BooleanField(blank=False,default=False)
bottomMaterial = models.CharField (max_length=60, blank=False,choices=BOTTOMMATERIALTYPE, default=AGAR)
dryness = models.FloatField(blank=False,validators=[MinValueValidator(0)])
CYLINDER = 'CY'
CUBE = 'CU'
HEXAGON = 'HE'
SHAPETYPE = (
(CYLINDER,"cylinder"),
(CUBE,"cube"),
(HEXAGON,"hexagon"),
)
shape = models.CharField(max_length=60, blank=False,choices=SHAPETYPE, default=CYLINDER)
Cylinder = models.ForeignKey("CylinderType_model",null=True, blank=True)
Cube = models.ForeignKey("CubeType_model",null=True, blank=True)
Hexagon = models.ForeignKey("HexagonType_model",null=True, blank=True)
#class Meta:
#unique_together = ("lid","bottomMaterial","dryness","shape","Cylinder","Cube","Hexagon")
def __unicode__(self):
return "id: %s" % (self.uuid, )
class CubeType_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
depth = models.FloatField(validators=[MinValueValidator(0)],blank=False)
side1Length = models.FloatField(validators=[MinValueValidator(0)],blank=False)
side2Length = models.FloatField(validators=[MinValueValidator(0)],blank=False)
#class Meta:
#unique_together = ("depth", "side1Length", "side2Length")
def __unicode__(self):
return "id: %s" % (self.uuid, )
class CylinderType_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
length = models.FloatField(validators=[MinValueValidator(0)], blank=False)
radius = models.FloatField(validators=[MinValueValidator(0)], blank=False)
#class Meta:
#unique_together = ("length", "radius")
def __unicode__(self):
return "id: %s" % (self.uuid, )
class HexagonType_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
depth = models.FloatField(validators=[MinValueValidator(0)],blank=False)
sideLength = models.FloatField(validators=[MinValueValidator(0)],blank=False)
#class Meta:
#unique_together = ("depth", "sideLength")
def __unicode__(self):
return "id: %s" % (self.uuid, )
##### EXPERIMENT ####
class experimentType_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
#It is possible to have different elements of interaction
description = models.TextField(max_length=1000, blank=True)
experimentDuration = models.PositiveIntegerField(blank=False, default=10000)
# The following ManyToManyField relations do not have an explicit definition table since we do not see need to associate extra data to the relationship
# https://docs.djangoproject.com/en/dev/ref/models/fields/#django.db.models.ManyToManyField
#
#GE: Check how can we ensure that at least one of them is defined
#
interactionAtSpecificTime = models.ManyToManyField("interactionAtSpecificTimeType_model",blank=True, null=True )
interactionFromt0tot1 = models.ManyToManyField("interactionFromt0tot1Type_model",blank=True, null=True)
experimentWideConf = models.ManyToManyField("experimentWideConfType_model",blank=True, null=True)
#class Meta:
#unique_together = ()
def __unicode__(self):
return "id: %s" % (self.uuid, )
## Experiments at specific time
class interactionAtSpecificTimeType_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
# Only one of them at each object
#name = models.CharField(max_length=60, blank=True)
description = models.TextField(max_length=1000, blank=True, default='No description provided')
eventTime = models.FloatField(blank=False, default=100)
MECHANOSENSATION = 'MS'
CHEMOTAXIS ='CT'
TERMOTAXIS ='TT'
GALVANOTAXIS = 'GT'
PHOTOTAXIS = 'PT'
EXPERIMENTCATEGORY = (
(MECHANOSENSATION,"mechanosensation"),
(CHEMOTAXIS,"chemotaxis"),
(TERMOTAXIS,"termotaxis"),
(GALVANOTAXIS,"galvanotaxis"),
(PHOTOTAXIS,"phototaxis"),
)
experimentCategory = models.CharField(max_length=60, blank=False,choices=EXPERIMENTCATEGORY, default=MECHANOSENSATION)
#GE: Revise to force the user to fill one of the followings
mechanosensation = models.ForeignKey("mechanosensationTimeEventType_model", blank=True, null=True)
chemotaxis = models.ForeignKey("chemotaxisTimeEventType_model", blank=True, null=True)
termotaxis = models.ForeignKey("termotaxisTimeEventType_model", blank=True, null=True)
galvanotaxis = models.ForeignKey("galvanotaxisTimeEventType_model", blank=True, null=True)
phototaxis = models.ForeignKey("phototaxisTimeEventType_model", blank=True, null=True)
#name = models.CharField(max_length=60, blank=True)
#class Meta:
#unique_together = ("eventTime","mechanosensation","chemotaxis","termotaxis","galvanotaxis", "phototaxis")
def __unicode__(self):
return "id: %s" % (self.uuid, )
class mechanosensationTimeEventType_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
description = models.TextField(max_length=1000, blank=True)
PLATETAP = 'PT'
DIRECTWORMTOUCH = 'DWT'
INTERACTIONOPTIONS = (
(PLATETAP,"plateTap"),
(DIRECTWORMTOUCH,"directWormTouch"),
)
interactionType = models.CharField(max_length=60, blank=False,choices=INTERACTIONOPTIONS, default=DIRECTWORMTOUCH)
directTouch = models.ForeignKey("directTouchType_model", blank=True, null=True)
plateTap = models.ForeignKey("plateTapType_model", blank=True, null=True)
#class Meta:
#unique_together = ()
def __unicode__(self):
return "id: %s" % (self.uuid, )
class directTouchType_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
EYEBROW = 'EB'
VONFREYHAIR = 'VFH'
PLATINIUMWIRE = 'PW'
TOUCHINSTRUMENTTYPE = (
(EYEBROW,"Eyebrow"),
(VONFREYHAIR,"Von Frey hair"),
(PLATINIUMWIRE,"Platinium wire"),
)
directTouchInstrument = models.CharField(max_length=60, blank=False, choices=TOUCHINSTRUMENTTYPE, default=EYEBROW)
touchDistance = models.FloatField(blank=False, validators=[MinValueValidator(0),MaxValueValidator(1.0)])
touchAngle = models.FloatField(blank=False, validators=[MinValueValidator(0),MaxValueValidator(360)])
appliedForce = models.FloatField(blank=False,validators=[MinValueValidator(0),
MaxValueValidator(100)])
#class Meta:
#unique_together = ("directTouchInstrument", "appliedForce","touchDistance","touchAngle")
def __unicode__(self):
return "id: %s" % (self.uuid, )
class plateTapType_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
appliedForce = models.FloatField(blank=False,validators=[MinValueValidator(0),
MaxValueValidator(100)]) #In the GUI the max is 1 to reflect 1mN, I'll leave it to 100 to avoid breaking if we make slight changes to support a bit more
#class Meta:
#unique_together = ()
def __unicode__(self):
return "id: %s" % (self.uuid, )
class chemotaxisTimeEventType_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
description = models.TextField(max_length=1000, blank=True)
DYNAMICDROPTEST = 'DDT'
CHEMOTAXISOPTIONS = (
(DYNAMICDROPTEST,"Dynamic drop test"),
)
chemotaxisType = models.CharField(max_length=60, blank=False,choices=CHEMOTAXISOPTIONS, default=DYNAMICDROPTEST)
dynamicDropTestConf = models.ForeignKey("dynamicDropTestType_model", blank=True, null=True)
#class Meta:
#unique_together = ()
def __unicode__(self):
return "id: %s" % (self.uuid, )
class staticPointSourceType_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
dropQuantity = models.FloatField(blank=False,)
chemical = models.ForeignKey("chemicalType_model",blank=False)
chemicalConcentration = models.FloatField(blank=False)
xCoordFromPlateCentre = models.FloatField(blank=False)
yCoordFromPlateCentre = models.FloatField(blank=False)
#class Meta:
#unique_together = ("dropQuantity","chemical","chemicalConcentration","xCoordFromPlateCentre","yCoordFromPlateCentre")
def __unicode__(self):
return "id: %s" % (self.uuid, )
class dynamicDropTestType_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
dropQuantity = models.FloatField(blank=False,)
chemical = models.ForeignKey("chemicalType_model",blank=False)
chemicalConcentration = models.FloatField(blank=False)
xCoordFromPlateCentre = models.FloatField(blank=False)
yCoordFromPlateCentre = models.FloatField(blank=False)
#class Meta:
#unique_together = ("dropQuantity","chemical","chemicalConcentration","xCoordFromPlateCentre","yCoordFromPlateCentre")
def __unicode__(self):
return "id: %s" % (self.uuid, )
class chemicalType_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
'''
From NeuronsIDtable-NTU-EditV3.xlsx (Si elegans GDrive)
lysine
cAMP
biotin
Na+
Cl-
heavy metals
copper
cadmium
SDS - Sodium dodecyl sulfate
quinine
'''
NONE = 'None'
NACL = 'NaCl'
BIOTIN = 'biotin'
ETHANOL = 'ethanol'
BUTANONE = 'butanone'
COPPERSULPHATE = 'CuSO4'
SODIUMDODECYLSULFATE = 'SDS - Sodium dodecyl sulfate'
QUININE = 'quinine' # C20H24N2O2
BENZALDEHYDE='benzaldehyde'
DIACETYL='diacetyl'
SODIUMAZIDE='NaN3'
CHEMICALS = (
(NONE, 'None'),
(NACL, "Sodium chloride"),
(BIOTIN, "Biotin"),
(ETHANOL, "Ethanol"),
(BUTANONE, "Butanone"),
(COPPERSULPHATE, "Copper sulphate"),
(SODIUMDODECYLSULFATE, "Sodium dodecyl sulfate"),
(QUININE, "Quinine"),
(BENZALDEHYDE, "Benzaldehyde"),
(DIACETYL, "Diacetyl"),
(SODIUMAZIDE, "Sodium azide"),
)
diffusionCoefficient = models.FloatField (blank=False, default=0)
chemical_name = models.CharField(max_length=60, blank=False, choices=CHEMICALS, default=NONE)
isVolatile = models.BooleanField(blank=False, default=False)
#GE: How can I make a validation so that In case in not volatile this should be empty
volatilitySpeed = models.FloatField(validators=[MinValueValidator(0)],blank=True,null=True)
#class Meta:
#unique_together = ("isVolatile","volatilitySpeed","chemical_name")
def __unicode__(self):
return "id: %s" % (self.uuid, )
class termotaxisTimeEventType_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
#Add a type selector if an experiment type of this is added
#Add a foreign key to the defined experiment model
#class Meta:
#unique_together = ()
def __unicode__(self):
return "id: %s" % (self.uuid )
class pointSourceHeatAvoidanceType_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
temperature = models.FloatField(blank=False) #Understood as Celsius
#We consider worm size as 1
heatPointDistance = models.FloatField(blank=False, validators=[MinValueValidator(0),MaxValueValidator(1)])
# heatPointAngle we are not considering it. We will consider that heat is exposed perpendicular to the worm and in a small distance to the worm
# heatPointAngle = models.FloatField(blank=False, validators=[MinValueValidator(0),MaxValueValidator(6.28318)])
#class Meta:
#unique_together = ("temperature","heatPointDistance","heatPointAngle")
def __unicode__(self):
return "id: %s" % (self.uuid, )
class galvanotaxisTimeEventType_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
#Add a type selector if an experiment type of this is added
#Add a foreign key to the defined experiment model
#class Meta:
#unique_together = ()
def __unicode__(self):
return "id: %s" % (self.uuid, )
class phototaxisTimeEventType_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
#Add a type selector if an experiment type of this is added
#Add a foreign key to the defined experiment model
#class Meta:
#unique_together = ()
def __unicode__(self):
return "id: %s" % (self.uuid, )
class electricShockType_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
amplitude = models.FloatField (blank=False)
shockDuration = models.PositiveIntegerField (blank = False)
shockFrequency = models.FloatField (blank = False) # Provide in shocks / sec
#class Meta:y
#unique_together = ("waveLength","intensity","lightingPointDistance","lightingPointAngle")
def __unicode__(self):
return "id: %s" % (self.uuid, )
class pointSourceLightType_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
waveLength = models.FloatField(blank=False, validators=[MinValueValidator(0), MaxValueValidator(255)])
#Ask Kofi Categorical vs Wavelength in 10nm .- 1um?
intensity = models.FloatField(blank=False, validators=[MinValueValidator(0), MaxValueValidator(255)])
#Ask Kofi
#The intensity values used by most neuroscientist range from -3 to 0; (log I/20 mW). In my simulations I have been using values from 0 to 255.
'''The values below refer to the point of the worm, considering the worm as a cylinder
Worm's size is considered as 1. Therefore, the max value of lightingPointDistance is 1'''
lightingPointDistance = models.FloatField(blank=False, validators=[MinValueValidator(0), MaxValueValidator(1)])
#lightingPointAngle we are not considering it. We will consider that light is exposed perpendicular to the plate
#lightingPointAngle = models.FloatField(blank=False, validators=[MinValueValidator(0), MaxValueValidator(6.28318)])
'''lightBeamRadius is to have width value to calculate which neurons are lighted, if width=1 all worm is covered'''
lightBeamRadius = models.FloatField(blank=False, default=0.1, validators=[MinValueValidator(0), MaxValueValidator(1)])
#class Meta:
#unique_together = ("waveLength","intensity","lightingPointDistance","lightingPointAngle")
def __unicode__(self):
return "id: %s" % (self.uuid, )
## Experiments from t0 to t1
class interactionFromt0tot1Type_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
description = models.TextField(max_length=1000, blank=True, default='No description provided')
eventStartTime = models.FloatField(blank=False, default=100)
eventStopTime = models.FloatField(blank=False, default=1000)
MECHANOSENSATION = 'MS'
CHEMOTAXIS ='CT'
TERMOTAXIS ='TT'
GALVANOTAXIS = 'GT'
PHOTOTAXIS = 'PT'
EXPERIMENTCATEGORY = (
(MECHANOSENSATION,"mechanosensation"),
(CHEMOTAXIS,"chemotaxis"),
(TERMOTAXIS,"termotaxis"),
(GALVANOTAXIS,"galvanotaxis"),
(PHOTOTAXIS,"phototaxis"),
)
experimentCategory = models.CharField(max_length=60, blank=False,choices=EXPERIMENTCATEGORY, default=MECHANOSENSATION)
#GE: Revise to force the user to fill one of the followings
mechanosensation = models.ForeignKey("mechanosensationTimet0tot1Type_model", blank=True, null=True)
chemotaxis = models.ForeignKey("chemotaxisTimet0tot1Type_model", blank=True, null=True)
termotaxis = models.ForeignKey("termotaxisTimet0tot1Type_model", blank=True, null=True)
galvanotaxis = models.ForeignKey("galvanotaxisTimet0tot1Type_model", blank=True, null=True)
phototaxis = models.ForeignKey("phototaxisTimet0tot1Type_model", blank=True, null=True)
#class Meta:
#unique_together = ("eventStartTime","eventStopTime","mechanosensation","chemotaxis", "termotaxis","galvanotaxis", "phototaxis")
def __unicode__(self):
return "id: %s" % (self.uuid, )
class mechanosensationTimet0tot1Type_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
#Add a type selector if an experiment type of this is added
#Add a foreign key to the defined experiment model
#class Meta:
#unique_together = ()
def __unicode__(self):
return "id: %s" % (self.uuid, )
class termotaxisTimet0tot1Type_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
description = models.TextField(max_length=1000, blank=True)
TEMPERATURECHANGEINTIME = 'TC'
POINTSOURCEHEATAVOIDANCE = 'PS'
TERMOTAXISOPTIONS = (
(TEMPERATURECHANGEINTIME,"temperatureChangeInTime"),
(POINTSOURCEHEATAVOIDANCE,"pointsourceheatavoidance"),
)
termotaxisType = models.CharField(max_length=60, blank=False,choices=TERMOTAXISOPTIONS, default=TEMPERATURECHANGEINTIME)
temperatureChangeInTime = models.ForeignKey("temperatureChangeInTimeType_model",blank=True, null=True)
pointSourceHeatAvoidance = models.ForeignKey("pointSourceHeatAvoidanceType_model",blank=True, null=True)
#class Meta:
#unique_together = ()
def __unicode__(self):
return "id: %s" % (self.uuid, )
class temperatureChangeInTimeType_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
initialTemperature = models.FloatField(blank=False,validators=[MinValueValidator(0)])
finalTemperature = models.FloatField(blank=False,validators=[MinValueValidator(0)])
#class Meta:
#unique_together = ("initialTemperature","finalTemperature")
def __unicode__(self):
return "id: %s" % (self.uuid, )
class chemotaxisTimet0tot1Type_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
#Add a type selector if an experiment type of this is added
#Add a foreign key to the defined experiment model
#class Meta:
#unique_together = ()
def __unicode__(self):
return "id: %s" % (self.uuid )
class galvanotaxisTimet0tot1Type_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
description = models.TextField(max_length=1000, blank=True, default='')
ELECTRICSHOCK = 'ES'
GALVANOTAXISOPTIONS = (
(ELECTRICSHOCK,"Electric shocks"),
)
galvanotaxisType = models.CharField(max_length=60, blank=False,choices=GALVANOTAXISOPTIONS, default=ELECTRICSHOCK)
electricShockConf = models.ForeignKey("electricShockType_model", blank=True, null=True)
def __unicode__(self):
return "id: %s" % (self.uuid )
class phototaxisTimet0tot1Type_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
description = models.TextField(max_length=1000, blank=True)
POINTSOURCELIGHT = 'PSL'
PHOTOTAXISOPTIONS = (
(POINTSOURCELIGHT,"pointsourcelight"),
)
phototaxisType = models.CharField(max_length=60, blank=False,choices=PHOTOTAXISOPTIONS, default=POINTSOURCELIGHT)
pointSourceLightConf = models.ForeignKey("pointSourceLightType_model", blank=True, null=True)
#class Meta:
#unique_together = ()
def __unicode__(self):
return "id: %s" % (self.uuid, )
# Experiment wide experiment type
class experimentWideConfType_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
description = models.TextField(max_length=1000, blank=True, default='No description provided')
MECHANOSENSATION ='MS'
CHEMOTAXIS = 'CT'
TERMOTAXIS = 'TT'
GALVANOTAXIS = 'GT'
PHOTOTAXIS = 'PT'
EXPERIMENTCATEGORY = (
(MECHANOSENSATION,"mechanosensation"),
(CHEMOTAXIS,"chemotaxis"),
(TERMOTAXIS,"termotaxis"),
(GALVANOTAXIS,"galvanotaxis"),
(PHOTOTAXIS,"phototaxis"),
)
experimentCategory = models.CharField(max_length=60, blank=False,choices=EXPERIMENTCATEGORY, default=MECHANOSENSATION)
#GE: Revise to force the user to fill one of the followings
mechanosensation = models.ForeignKey("mechanosensationExpWideType_model", blank=True, null=True)
chemotaxis = models.ForeignKey("chemotaxisExperimentWideType_model", blank=True, null=True)
termotaxis = models.ForeignKey("termotaxisExperimentWideType_model", blank=True, null=True)
galvanotaxis = models.ForeignKey("galvanotaxisExperimentWideType_model", blank=True, null=True)
phototaxis = models.ForeignKey("phototaxisExperimentWideType_model", blank=True, null=True)
#class Meta:
#unique_together = ("mechanosensation","chemotaxis","termotaxis","galvanotaxis","phototaxis")
def __unicode__(self):
return "id: %s" % (self.uuid, )
class mechanosensationExpWideType_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
#Add a type selector if an experiment type of this is added
#Add a foreign key to the defined experiment model
#class Meta:
#unique_together = ()
def __unicode__(self):
return "id: %s" % (self.uuid, )
class termotaxisExperimentWideType_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
description = models.TextField(max_length=1000, blank=True)
LINEARTHERMALGRADIENT = 'LT'
TERMOTAXIS = (
(LINEARTHERMALGRADIENT,"linearThermalGradient"),
)
termotaxisType = models.CharField(max_length=60, blank=False,choices=TERMOTAXIS, default=LINEARTHERMALGRADIENT)
linearThermalGradient = models.ForeignKey("linearThermalGradientType_model",blank=True, null=True)
#class Meta:
#unique_together = ()
def __unicode__(self):
return "id: %s" % (self.uuid, )
class linearThermalGradientType_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
temperatureRightHorizonal = models.FloatField(blank=False)
temperatureLeftHorizontal = models.FloatField(blank=False)
#class Meta:
#unique_together = ("temperatureRightHorizonal","temperatureLeftHorizontal")
def __unicode__(self):
return "id: %s" % (self.uuid, )
class chemotaxisExperimentWideType_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
description = models.TextField(max_length=1000, blank=True)
STATICPOINTSOURCE = 'SPS'
CHEMICALQUADRANTS1 = 'CQ1'
CHEMICALQUADRANTS2 = 'CQ2'
CHEMICALQUADRANTS4 = 'CQ4'
OSMOTICRING = 'OR'
CHEMICALCATEGORY = (
(STATICPOINTSOURCE,"Static point source"),
(CHEMICALQUADRANTS1,"chemicalquadrants1"),
(CHEMICALQUADRANTS2,"chemicalquadrants2"),
(CHEMICALQUADRANTS4,"chemicalquadrants4"),
(OSMOTICRING,"osmoticring"),
)
chemicalCategory = models.CharField(max_length=60, blank=False,choices=CHEMICALCATEGORY, default=CHEMICALQUADRANTS1)
staticPointSourceConf = models.ForeignKey("staticPointSourceType_model", blank=True, null=True)
chemotaxisQuadrants1 = models.ForeignKey("chemotaxisQuadrantsType_1_model", blank=True, null=True)
chemotaxisQuadrants2 = models.ForeignKey("chemotaxisQuadrantsType_2_model", blank=True, null=True)
chemotaxisQuadrants4 = models.ForeignKey("chemotaxisQuadrantsType_4_model", blank=True, null=True)
osmoticRing = models.ForeignKey("osmoticRingType_model", blank=True, null=True)
#class Meta:
#unique_together = ("chemicalCategory","chemotaxisQuadrants","osmoticRing")
def __unicode__(self):
return "id: %s" % (self.uuid, )
class chemotaxisQuadrantsType_1_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
quadrantChemical = models.ForeignKey("chemicalType_model",related_name='access_quadrant_1_1', blank=False)
quadrantChemicalConcentration = models.FloatField(blank=False) #Provide in 1 mol / l = Molar = 1M
#class Meta:
#unique_together = ("quadrantsPlacement","numberOfQuadrants","quadrantChemical","quadrantBarrierChemical","quadrantChemicalConcentration","quadrantBarrierChemicalConcentration" )
def __unicode__(self):
return "id: %s" % (self.uuid, )
class chemotaxisQuadrantsType_2_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
quadrant_1_Chemical = models.ForeignKey("chemicalType_model",related_name='access_quadrant_2_1', blank=False)
quadrant_2_Chemical = models.ForeignKey("chemicalType_model",related_name='access_quadrant_2_2', blank=False)
quadrant_1_ChemicalConcentration = models.FloatField(blank=False)#Provide in 1 mol / l = Molar = 1M
quadrant_2_ChemicalConcentration = models.FloatField(blank=False)#Provide in 1 mol / l = Molar = 1M
quadrantBarrierChemical = models.ForeignKey("chemicalType_model",related_name='access_quadrant_2_Barrier', blank=False)
quadrantBarrierChemicalConcentration = models.FloatField(blank=False)#Provide in 1 mol / l = Molar = 1M
#class Meta:
#unique_together = ("quadrantsPlacement","numberOfQuadrants","quadrantChemical","quadrantBarrierChemical","quadrantChemicalConcentration","quadrantBarrierChemicalConcentration" )
def __unicode__(self):
return "id: %s" % (self.uuid, )
class chemotaxisQuadrantsType_4_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
quadrant_1_Chemical = models.ForeignKey("chemicalType_model",related_name='access_quadrant_4_1', blank=False)
quadrant_2_Chemical = models.ForeignKey("chemicalType_model",related_name='access_quadrant_4_2', blank=False)
quadrant_3_Chemical = models.ForeignKey("chemicalType_model",related_name='access_quadrant_4_3', blank=False)
quadrant_4_Chemical = models.ForeignKey("chemicalType_model",related_name='access_quadrant_4_4', blank=False)
quadrant_1_ChemicalConcentration = models.FloatField(blank=False)#Provide in 1 mol / l = Molar = 1M
quadrant_2_ChemicalConcentration = models.FloatField(blank=False)#Provide in 1 mol / l = Molar = 1M
quadrant_3_ChemicalConcentration = models.FloatField(blank=False)#Provide in 1 mol / l = Molar = 1M
quadrant_4_ChemicalConcentration = models.FloatField(blank=False)#Provide in 1 mol / l = Molar = 1M
quadrantBarrierChemical = models.ForeignKey("chemicalType_model",related_name='access_quadrant_4_Barrier', blank=False)
quadrantBarrierChemicalConcentration = models.FloatField(blank=False)#Provide in 1 mol / l = Molar = 1M
#class Meta:
#unique_together = ("quadrantsPlacement","numberOfQuadrants","quadrantChemical","quadrantBarrierChemical","quadrantChemicalConcentration","quadrantBarrierChemicalConcentration" )
def __unicode__(self):
return "id: %s" % (self.uuid, )
class osmoticRingType_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
ringChemical = models.ForeignKey("chemicalType_model", blank=False)
chemicalConcentration = models.FloatField(blank=False) #Provide in 1 mol / l = Molar = 1M
internalRadius = models.FloatField(blank=False,validators=[MinValueValidator(0)])
externalRadius = models.FloatField(blank=False,validators=[MinValueValidator(0)])
#class Meta:
#unique_together = ("ringChemical","chemicalConcentration","externalRadius","internalRadius")
def __unicode__(self):
return "id: %s" % (self.uuid, )
class galvanotaxisExperimentWideType_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
#Add a type selector if an experiment type of this is added
#Add a foreign key to the defined experiment model
#class Meta:
#unique_together = ()
def __unicode__(self):
return "id: %s" % (self.uuid, )
class phototaxisExperimentWideType_model(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
#Add a type selector if an experiment type of this is added
#Add a foreign key to the defined experiment model
#class Meta:
#unique_together = ()
def __unicode__(self):
return "id: %s" % (self.uuid, )
class shareBehaviouralExperiment(models.Model):
uuid = models.CharField(('Unique Identifier'), max_length=36, primary_key=True, default=generate_new_uuid)
user = models.ForeignKey(User)
behaviouralExperiment = models.ForeignKey (behaviourExperimentType_model)
shared_date = models.DateTimeField(auto_now_add=True)
class Meta:
unique_together = ("user","behaviouralExperiment")
def __unicode__(self):
return "id: %s_%s" % (self.user,self.behaviouralExperiment )
|
Si-elegans/Web-based_GUI_Tools
|
behaviouralExperimentDefinition/models.py
|
Python
|
apache-2.0
| 36,673
|
#Задача 7. Вариант 6
#компьютер загадывает название одного из семи городов России, имеющих действующий метрополитен, а игрок должен его угадать.
#Борщёва В.О
#28.03.2016
import random
subways=('Москва','Санкт-Петербург','Нижний Новгород','Новосибирск','Самара','Екатеринбург','Казань')
subway=random.randint(0,6)
rand=subways[subway]
ball=100
print('я загадал один город,имеющий дейстующий метрополитен')
#print(rand)
otvet=0
while (otvet)!=(rand):
otvet=input("Введите один из городов:")
if(otvet)!=(rand):
print("Вы не угадали. Попробуйте снова.")
ball/=2
elif (otvet)==(rand):
print("Ваш счет:"+ str(ball))
break
input(" Нажмите Enter для выхода")
|
Mariaanisimova/pythonintask
|
BITs/2014/Borsheva_ V_O/task_7_6.py
|
Python
|
apache-2.0
| 1,044
|
"""
Author: Keith Bourgoin, Emmett Butler
"""
__license__ = """
Copyright 2015 Parse.ly, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ["Topic"]
import logging
from collections import defaultdict
from .balancedconsumer import BalancedConsumer
from .common import OffsetType
from .exceptions import LeaderNotAvailable
from .handlers import GEventHandler
from .partition import Partition
from .producer import Producer
from .protocol import PartitionOffsetRequest
from .simpleconsumer import SimpleConsumer
from .utils.compat import iteritems, itervalues
log = logging.getLogger(__name__)
try:
from . import rdkafka
log.info("Successfully loaded pykafka.rdkafka extension.")
except ImportError:
rdkafka = False
log.info("Could not load pykafka.rdkafka extension.", exc_info=True)
class Topic(object):
"""
A Topic is an abstraction over the kafka concept of a topic.
It contains a dictionary of partitions that comprise it.
"""
def __init__(self, cluster, topic_metadata):
"""Create the Topic from metadata.
:param cluster: The Cluster to use
:type cluster: :class:`pykafka.cluster.Cluster`
:param topic_metadata: Metadata for all topics.
:type topic_metadata: :class:`pykafka.protocol.TopicMetadata`
"""
self._name = topic_metadata.name
self._cluster = cluster
self._partitions = {}
self.update(topic_metadata)
def __repr__(self):
return "<{module}.{classname} at {id_} (name={name})>".format(
module=self.__class__.__module__,
classname=self.__class__.__name__,
id_=hex(id(self)),
name=self._name
)
@property
def name(self):
"""The name of this topic"""
return self._name
@property
def partitions(self):
"""A dictionary containing all known partitions for this topic"""
return self._partitions
def get_producer(self, use_rdkafka=False, **kwargs):
"""Create a :class:`pykafka.producer.Producer` for this topic.
For a description of all available `kwargs`, see the Producer docstring.
"""
if not rdkafka and use_rdkafka:
raise ImportError("use_rdkafka requires rdkafka to be installed")
if isinstance(self._cluster.handler, GEventHandler) and use_rdkafka:
raise ImportError("use_rdkafka cannot be used with gevent")
Cls = rdkafka.RdKafkaProducer if rdkafka and use_rdkafka else Producer
return Cls(self._cluster, self, **kwargs)
def get_sync_producer(self, **kwargs):
"""Create a :class:`pykafka.producer.Producer` for this topic.
For a description of all available `kwargs`, see the Producer docstring.
"""
return Producer(self._cluster, self, sync=True, **kwargs)
def fetch_offset_limits(self, offsets_before, max_offsets=1):
"""Get earliest or latest offset.
Use the Offset API to find a limit of valid offsets for each partition
in this topic.
:param offsets_before: Return an offset from before this timestamp (in
milliseconds)
:type offsets_before: int
:param max_offsets: The maximum number of offsets to return
:type max_offsets: int
"""
requests = defaultdict(list) # one request for each broker
for part in itervalues(self.partitions):
requests[part.leader].append(PartitionOffsetRequest(
self.name, part.id, offsets_before, max_offsets
))
output = {}
for broker, reqs in iteritems(requests):
res = broker.request_offset_limits(reqs)
output.update(res.topics[self.name])
return output
def earliest_available_offsets(self):
"""Get the earliest offset for each partition of this topic."""
return self.fetch_offset_limits(OffsetType.EARLIEST)
def latest_available_offsets(self):
"""Get the latest offset for each partition of this topic."""
return self.fetch_offset_limits(OffsetType.LATEST)
def update(self, metadata):
"""Update the Partitions with metadata about the cluster.
:param metadata: Metadata for all topics
:type metadata: :class:`pykafka.protocol.TopicMetadata`
"""
p_metas = metadata.partitions
# Remove old partitions
removed = set(self._partitions.keys()) - set(p_metas.keys())
if len(removed) > 0:
log.info('Removing %d partitions', len(removed))
for id_ in removed:
log.debug('Removing partition %s', self._partitions[id_])
self._partitions.pop(id_)
# Add/update current partitions
brokers = self._cluster.brokers
if len(p_metas) > 0:
log.info("Adding %d partitions", len(p_metas))
for id_, meta in iteritems(p_metas):
if meta.leader not in brokers:
raise LeaderNotAvailable()
if meta.id not in self._partitions:
log.debug('Adding partition %s/%s', self.name, meta.id)
self._partitions[meta.id] = Partition(
self, meta.id,
brokers[meta.leader],
[brokers[b] for b in meta.replicas],
[brokers[b] for b in meta.isr],
)
else:
self._partitions[id_].update(brokers, meta)
def get_simple_consumer(self,
consumer_group=None,
use_rdkafka=False,
**kwargs):
"""Return a SimpleConsumer of this topic
:param consumer_group: The name of the consumer group to join
:type consumer_group: str
:param use_rdkafka: Use librdkafka-backed consumer if available
:type use_rdkafka: bool
"""
if not rdkafka and use_rdkafka:
raise ImportError("use_rdkafka requires rdkafka to be installed")
if isinstance(self._cluster.handler, GEventHandler) and use_rdkafka:
raise ImportError("use_rdkafka cannot be used with gevent")
Cls = (rdkafka.RdKafkaSimpleConsumer
if rdkafka and use_rdkafka else SimpleConsumer)
return Cls(self,
self._cluster,
consumer_group=consumer_group,
**kwargs)
def get_balanced_consumer(self, consumer_group, **kwargs):
"""Return a BalancedConsumer of this topic
:param consumer_group: The name of the consumer group to join
:type consumer_group: str
"""
if "zookeeper_connect" not in kwargs and \
self._cluster._zookeeper_connect is not None:
kwargs['zookeeper_connect'] = self._cluster._zookeeper_connect
return BalancedConsumer(self, self._cluster, consumer_group, **kwargs)
|
thedrow/samsa
|
pykafka/topic.py
|
Python
|
apache-2.0
| 7,391
|
# This file is part of the pyqualtrics package.
# For copyright and licensing information about this package, see the
# NOTICE.txt and LICENSE.txt files in its top-level directory; they are
# available at https://github.com/Baguage/pyqualtrics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyqualtrics import Qualtrics
import os
user = None # os.environ["QUALTRICS_USER"]
token = None # os.environ["QUALTRICS_TOKEN"]
if __name__ == "__main__":
print "This is an example of panel import"
print "Make sure you have set QUALTRICS_USER, QUALTRICS_TOKEN and QUALTRICS_LIBRARY_ID enviroment variable"
# Note is user and token are None, QUALTRICS_USER and QUALTRICS_TOKEN environment variables will be used instead
qualtrics = Qualtrics(user, token)
library_id = os.environ["QUALTRICS_LIBRARY_ID"]
panel_id = qualtrics.importJsonPanel(
library_id,
Name="New Panel Created by PyQualtrics library (DELETE ME)",
panel=[
{"Email": "pyqualtrics+1@gmail.com", "FirstName": "PyQualtrics", "LastName": "Library", "SubjectID": "123"},
{"Email": "pyqualtrics+2@gmail.com", "FirstName": "PyQualtrics2", "LastName": "Library2"}
],
headers=["Email", "FirstName", "LastName", "ExternalRef", "SubjectID"],
AllED=1)
if qualtrics.last_error_message:
print "Error creating panel: " + qualtrics.last_error_message
else:
print "Panel created successfully, PanelID: " + panel_id
|
Baguage/pyqualtrics
|
examples/import_panel_example.py
|
Python
|
apache-2.0
| 1,983
|
#!/usr/bin/env python3
"""
Test for Hintidentifier
"""
import datetime
import unittest
from base_test import PschedTestBase
from pscheduler.limitprocessor.identifier.hint import *
DATA = {
"hint": "value",
"match": {
"style": "exact",
"match": "testing",
"case-insensitive": False
}
}
HINTS_HIT = {
"value": "testing"
}
HINTS_MISS = {
"value": "not-testing"
}
class TestLimitprocessorIdentifierAlways(PschedTestBase):
"""
Test the Identifier
"""
def test_data_is_valid(self):
"""Limit Processor / Identifier Hint / Data Validation"""
self.assertEqual(data_is_valid(DATA), (True, "OK"))
self.assertEqual(data_is_valid({}), (False, "At /: 'hint' is a required property"))
self.assertRaises(ValueError, data_is_valid, 123)
def test_identifier(self):
"""Limit Processor / Identifier Hint / Identifier"""
ident = IdentifierHint(DATA)
self.assertEqual(ident.evaluate(HINTS_HIT), True)
self.assertEqual(ident.evaluate(HINTS_MISS), False)
if __name__ == '__main__':
unittest.main()
|
perfsonar/pscheduler
|
python-pscheduler/pscheduler/tests/limitprocessor_identifier_hint_test.py
|
Python
|
apache-2.0
| 1,127
|
# coding=utf-8
import datetime
from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
from django.core.signing import Signer
from djorm_pgfulltext.models import SearchManager
from djorm_pgfulltext.fields import VectorField
from urllib import quote_plus
from django.utils.translation import ugettext_lazy as _
class Category(models.Model):
name = models.CharField(max_length=255)
def __unicode__(self):
return self.name
class Meta:
ordering = ["name"]
verbose_name_plural = _("categories")
class Submission(models.Model):
def user_display_name(self):
return self.voter.user_display_name()
category = models.ForeignKey(Category)
idea = models.TextField(verbose_name=_('Question'))
headline = models.TextField(null=True, blank=True)
followup = models.TextField(null=True, blank=True)
citation = models.URLField(null=True, blank=True, db_index=True,
verbose_name=_("Optional link to full proposal or reference"))
citation_verified = models.BooleanField(default=False, db_index=True)
voter = models.ForeignKey("Voter")
created_at = models.DateTimeField(db_index=True)
ip_address = models.CharField(max_length=255, db_index=True)
editors_pick = models.BooleanField(default=False)
approved = models.BooleanField(default=False)
has_duplicates = models.BooleanField(default=False)
duplicate_of = models.ForeignKey('opendebates.Submission', null=True, blank=True,
related_name="duplicates")
votes = models.IntegerField(default=0, db_index=True)
score = models.FloatField(default=0, db_index=True)
rank = models.FloatField(default=0, db_index=True)
random_id = models.FloatField(default=0, db_index=True)
search_index = VectorField()
keywords = models.TextField(null=True, blank=True)
objects = SearchManager(fields=["idea", "keywords"],
auto_update_search_field=True)
source = models.CharField(max_length=255, null=True, blank=True)
def get_recent_votes(self):
timespan = datetime.datetime.now() - datetime.timedelta(1)
return Vote.objects.filter(submission=self, created_at__gte=timespan).count()
def get_duplicates(self):
if not self.has_duplicates:
return None
return Submission.objects.select_related(
"voter", "category", "voter__user").filter(
approved=True, duplicate_of=self)
def __unicode__(self):
return self.idea
@models.permalink
def get_absolute_url(self):
return "vote", [self.id]
def my_tweet_text(self):
return _(u"Vote for my progressive idea for @ThinkBigUS #BigIdeasProject. 30 leaders in Congress will see top ideas!")
def tweet_text(self):
text = _(u"Let's make sure 30 leaders in Congress see this #BigIdea about %(category_name)s - please vote and RT!" % {"category_name": quote_plus(self.category.name)})
if self.voter.twitter_handle:
text += u" h/t @%s" % self.voter.twitter_handle
return text
def facebook_text(self):
if len(self.idea) > 240:
return self.idea[:240] + u'…'
return self.idea
def facebook_url(self):
return u"https://www.facebook.com/sharer/sharer.php?&u=%(idea_url)s" % {
"idea_url": quote_plus(self.really_absolute_url()),
}
def really_absolute_url(self):
return settings.SITE_DOMAIN_WITH_PROTOCOL + self.get_absolute_url()
def email_subject_text(self):
return _("Vote+for+my+Big+Idea!")
def email_body_text(self):
return _("I+posted+an+idea+on+The+Big+Ideas+Project+--+30+members+of+Congress+will+see+the+top+20+ideas!+Please+click+here+to+see+it+and+vote+on+my+idea+--+and+share+it+with+your+friends!")
def email_url(self):
return u"mailto:?subject=%s&body=%s" % (self.email_subject_text(), self.email_body_text(), self.really_absolute_url())
def twitter_url(self):
return u"https://twitter.com/intent/tweet?url=%(SITE_DOMAIN)s%(idea_url)s&text=%(tweet_text)s" % {
"SITE_DOMAIN": quote_plus(settings.SITE_DOMAIN_WITH_PROTOCOL),
"idea_url": quote_plus(self.get_absolute_url()),
"tweet_text": quote_plus(self.tweet_text()),
}
class ZipCode(models.Model):
zip = models.CharField(max_length=10, unique=True)
city = models.CharField(max_length=255, null=True, blank=True)
state = models.CharField(max_length=255, null=True, blank=True)
class Voter(models.Model):
def user_display_name(self):
voter = self
if voter.display_name:
return voter.display_name
if not voter.user:
name = _(u"Somebody")
else:
user = voter.user
name = u"%s" % user.first_name
if user.last_name:
name = u"%s %s." % (name, user.last_name[0])
if not name or not name.strip():
name = _(u"Somebody")
if voter.state:
name = _(u"%(name)s from %(state)s" % {"name": name, "state": voter.state})
return name
email = models.EmailField(unique=True)
zip = models.CharField(max_length=10, db_index=True)
state = models.CharField(max_length=255, null=True, blank=True)
user = models.OneToOneField(User, null=True, blank=True, related_name="voter")
source = models.CharField(max_length=255, null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
display_name = models.CharField(max_length=255, null=True, blank=True)
twitter_handle = models.CharField(max_length=255, null=True, blank=True)
unsubscribed = models.BooleanField(default=False)
def __unicode__(self):
return self.email
def account_token(self):
return Voter.make_account_token(self.email)
@classmethod
def make_account_token(cls, email):
signer = Signer()
value = signer.sign(email)
return value
class Vote(models.Model):
submission = models.ForeignKey(Submission)
voter = models.ForeignKey(Voter)
ip_address = models.CharField(max_length=255, db_index=True)
request_headers = models.TextField(null=True, blank=True)
original_merged_submission = models.ForeignKey(Submission, null=True, blank=True,
related_name="votes_merged_elsewhere")
class Meta:
unique_together = [("submission", "voter")]
created_at = models.DateTimeField(db_index=True)
source = models.CharField(max_length=255, null=True, blank=True)
class Candidate(models.Model):
first_name = models.CharField(max_length=255, null=True, blank=True)
last_name = models.CharField(max_length=255, null=True, blank=True)
current_title = models.CharField(max_length=255, null=True, blank=True)
bio = models.TextField(default='', null=True, blank=True)
website = models.URLField(null=True, blank=True, db_index=True)
facebook = models.URLField(null=True, blank=True, db_index=True)
twitter_handle = models.CharField(max_length=16, null=True, blank=True)
display_name = models.CharField(max_length=255, null=True, blank=True,
help_text=_("Defaults to first_name last_name."))
created_at = models.DateTimeField(auto_now_add=True)
def save(self, *args, **kwargs):
if not self.display_name:
self.display_name = u'{0} {1}'.format(self.first_name, self.last_name)
super(Candidate, self).save(*args, **kwargs)
def __unicode__(self):
return self.display_name
from djangohelpers.lib import register_admin
register_admin(Category)
register_admin(Submission)
register_admin(Voter)
register_admin(Vote)
register_admin(Candidate)
|
boldprogressives/django-opendebates
|
opendebates/opendebates/models.py
|
Python
|
apache-2.0
| 7,910
|
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
from cloudify.state import ctx_parameters as inputs
import subprocess
import os
import re
import sys
import time
import threading
import platform
from StringIO import StringIO
from cloudify_rest_client import CloudifyClient
from cloudify import utils
if 'MANAGER_REST_PROTOCOL' in os.environ and os.environ['MANAGER_REST_PROTOCOL'] == "https":
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port(), protocol='https', trust_all=True)
else:
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port())
def convert_env_value_to_string(envDict):
for key, value in envDict.items():
envDict[str(key)] = str(envDict.pop(key))
def get_attribute_user(ctx):
if get_attribute(ctx, 'user'):
return get_attribute(ctx, 'user')
else:
return get_attribute(ctx, 'cloudify_agent')['user']
def get_attribute_key(ctx):
if get_attribute(ctx, 'key'):
return get_attribute(ctx, 'key')
else:
return get_attribute(ctx, 'cloudify_agent')['key']
def get_host(entity):
if entity.instance.relationships:
for relationship in entity.instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target
return None
def has_attribute_mapping(entity, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, entity.node.properties))
mapping_configuration = entity.node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def process_attribute_mapping(entity, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = entity.node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(entity, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and entity.instance.relationships:
for relationship in entity.instance.relationships:
if mapping_configuration['parameters'][1] in relationship.type_hierarchy:
return data_retriever_function(relationship.target, mapping_configuration['parameters'][2])
return ""
def get_nested_attribute(entity, attribute_names):
deep_properties = get_attribute(entity, attribute_names[0])
attribute_names_iter = iter(attribute_names)
next(attribute_names_iter)
for attribute_name in attribute_names_iter:
if deep_properties is None:
return ""
else:
deep_properties = deep_properties.get(attribute_name, None)
return deep_properties
def _all_instances_get_nested_attribute(entity, attribute_names):
return None
def get_attribute(entity, attribute_name):
if has_attribute_mapping(entity, attribute_name):
# First check if any mapping exist for attribute
mapped_value = process_attribute_mapping(entity, attribute_name, get_attribute)
ctx.logger.info('Mapping exists for attribute {0} with value {1}'.format(attribute_name, mapped_value))
return mapped_value
# No mapping exist, try to get directly the attribute from the entity
attribute_value = entity.instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
ctx.logger.info('Found the attribute {0} with value {1} on the node {2}'.format(attribute_name, attribute_value, entity.node.id))
return attribute_value
# Attribute retrieval fails, fall back to property
property_value = entity.node.properties.get(attribute_name, None)
if property_value is not None:
return property_value
# Property retrieval fails, fall back to host instance
host = get_host(entity)
if host is not None:
ctx.logger.info('Attribute not found {0} go up to the parent node {1}'.format(attribute_name, host.node.id))
return get_attribute(host, attribute_name)
# Nothing is found
return ""
def _all_instances_get_attribute(entity, attribute_name):
result_map = {}
# get all instances data using cfy rest client
# we have to get the node using the rest client with node_instance.node_id
# then we will have the relationships
node = client.nodes.get(ctx.deployment.id, entity.node.id)
all_node_instances = client.node_instances.list(ctx.deployment.id, entity.node.id)
for node_instance in all_node_instances:
prop_value = __recursively_get_instance_data(node, node_instance, attribute_name)
if prop_value is not None:
ctx.logger.info('Found the property/attribute {0} with value {1} on the node {2} instance {3}'.format(attribute_name, prop_value, entity.node.id,
node_instance.id))
result_map[node_instance.id + '_'] = prop_value
return result_map
def get_property(entity, property_name):
# Try to get the property value on the node
property_value = entity.node.properties.get(property_name, None)
if property_value is not None:
ctx.logger.info('Found the property {0} with value {1} on the node {2}'.format(property_name, property_value, entity.node.id))
return property_value
# No property found on the node, fall back to the host
host = get_host(entity)
if host is not None:
ctx.logger.info('Property not found {0} go up to the parent node {1}'.format(property_name, host.node.id))
return get_property(host, property_name)
return ""
def get_instance_list(node_id):
result = ''
all_node_instances = client.node_instances.list(ctx.deployment.id, node_id)
for node_instance in all_node_instances:
if len(result) > 0:
result += ','
result += node_instance.id
return result
def get_host_node_name(instance):
for relationship in instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target.node.id
return None
def __get_relationship(node, target_name, relationship_type):
for relationship in node.relationships:
if relationship.get('target_id') == target_name and relationship_type in relationship.get('type_hierarchy'):
return relationship
return None
def __has_attribute_mapping(node, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, node.properties))
mapping_configuration = node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def __process_attribute_mapping(node, node_instance, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(node, node_instance, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and node_instance.relationships:
for rel in node_instance.relationships:
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if mapping_configuration['parameters'][1] in relationship.get('type_hierarchy'):
target_instance = client.node_instances.get(rel.get('target_id'))
target_node = client.nodes.get(ctx.deployment.id, target_instance.node_id)
return data_retriever_function(target_node, target_instance, mapping_configuration['parameters'][2])
return None
def __recursively_get_instance_data(node, node_instance, attribute_name):
if __has_attribute_mapping(node, attribute_name):
return __process_attribute_mapping(node, node_instance, attribute_name, __recursively_get_instance_data)
attribute_value = node_instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
return attribute_value
elif node_instance.relationships:
for rel in node_instance.relationships:
# on rel we have target_name, target_id (instanceId), type
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if 'cloudify.relationships.contained_in' in relationship.get('type_hierarchy'):
parent_instance = client.node_instances.get(rel.get('target_id'))
parent_node = client.nodes.get(ctx.deployment.id, parent_instance.node_id)
return __recursively_get_instance_data(parent_node, parent_instance, attribute_name)
return None
else:
return None
def download(child_rel_path, child_abs_path, download_dir):
artifact_downloaded_path = ctx.download_resource(child_abs_path)
new_file = os.path.join(download_dir, child_rel_path)
new_file_dir = os.path.dirname(new_file)
if not os.path.exists(new_file_dir):
os.makedirs(new_file_dir)
os.rename(artifact_downloaded_path, new_file)
ctx.logger.info('Downloaded artifact from path ' + child_abs_path + ', it\'s available now at ' + new_file)
return new_file
def download_artifacts(artifacts, download_dir):
downloaded_artifacts = {}
os.makedirs(download_dir)
for artifact_name, artifact_ref in artifacts.items():
ctx.logger.info('Download artifact ' + artifact_name)
if isinstance(artifact_ref, basestring):
downloaded_artifacts[artifact_name] = download(os.path.basename(artifact_ref), artifact_ref, download_dir)
else:
child_download_dir = os.path.join(download_dir, artifact_name)
for child_path in artifact_ref:
download(child_path['relative_path'], child_path['absolute_path'], child_download_dir)
downloaded_artifacts[artifact_name] = child_download_dir
return downloaded_artifacts
env_map = {}
env_map['TARGET_NODE'] = ctx.target.node.id
env_map['TARGET_INSTANCE'] = ctx.target.instance.id
env_map['TARGET_INSTANCES'] = get_instance_list(ctx.target.node.id)
env_map['SOURCE_NODE'] = ctx.source.node.id
env_map['SOURCE_INSTANCE'] = ctx.source.instance.id
env_map['SOURCE_INSTANCES'] = get_instance_list(ctx.source.node.id)
env_map['A4C_EXECUTION_HOST'] = get_attribute(ctx.source, 'ip_address')
env_map['A4C_EXECUTION_USER'] = get_attribute_user(ctx.source)
env_map['A4C_EXECUTION_KEY'] = get_attribute_key(ctx.source)
env_map['DB_IP'] = get_attribute(ctx.target, 'ip_address')
env_map['DB_PORT'] = r'3306'
env_map['DB_NAME'] = r'wordpress'
env_map['DB_USER'] = r'pass'
env_map['DB_PASSWORD'] = r'pass'
other_instances_map = _all_instances_get_attribute(ctx.target, 'ip_address')
if other_instances_map is not None:
for other_instances_key in other_instances_map:
env_map[other_instances_key + 'DB_IP'] = other_instances_map[other_instances_key]
node_artifacts = {
"configs": [
{
"relative_path": "mysqld_charset.cnf",
"absolute_path": "_a4c_artifact/Mysql/configs/configs/mysqld_charset.cnf"
}
]
}
relationship_artifacts = {
}
artifacts = node_artifacts.copy()
artifacts.update(relationship_artifacts)
download_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'downloads')
env_map.update(download_artifacts(artifacts, download_dir))
if inputs.get('process', None) is not None and inputs['process'].get('env', None) is not None:
ctx.logger.info('Operation is executed with environment variable {0}'.format(inputs['process']['env']))
env_map.update(inputs['process']['env'])
def parse_output(output):
# by convention, the last output is the result of the operation
last_output = None
outputs = {}
pattern = re.compile('EXPECTED_OUTPUT_(\w+)=(.*)')
for line in output.splitlines():
match = pattern.match(line)
if match is None:
last_output = line
else:
output_name = match.group(1)
output_value = match.group(2)
outputs[output_name] = output_value
return {'last_output': last_output, 'outputs': outputs}
def execute(script_path, process, outputNames, command_prefix=None, cwd=None):
os.chmod(script_path, 0755)
on_posix = 'posix' in sys.builtin_module_names
env = os.environ.copy()
process_env = process.get('env', {})
env.update(process_env)
if outputNames is not None:
env['EXPECTED_OUTPUTS'] = outputNames
if platform.system() == 'Windows':
wrapper_path = ctx.download_resource("scriptWrapper.bat")
else:
wrapper_path = ctx.download_resource("scriptWrapper.sh")
os.chmod(wrapper_path, 0755)
command = '{0} {1}'.format(wrapper_path, script_path)
else:
command = script_path
if command_prefix is not None:
command = "{0} {1}".format(command_prefix, command)
ctx.logger.info('Executing: {0} in env {1}'.format(command, env))
process = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
cwd=cwd,
bufsize=1,
close_fds=on_posix)
return_code = None
stdout_consumer = OutputConsumer(process.stdout)
stderr_consumer = OutputConsumer(process.stderr)
while True:
return_code = process.poll()
if return_code is not None:
break
time.sleep(0.1)
stdout_consumer.join()
stderr_consumer.join()
parsed_output = parse_output(stdout_consumer.buffer.getvalue())
if outputNames is not None:
outputNameList = outputNames.split(';')
for outputName in outputNameList:
ctx.logger.info('Ouput name: {0} value : {1}'.format(outputName, parsed_output['outputs'].get(outputName, None)))
if return_code != 0:
error_message = "Script {0} encountered error with return code {1} and standard output {2}, error output {3}".format(command, return_code,
stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
error_message = str(unicode(error_message, errors='ignore'))
ctx.logger.error(error_message)
raise NonRecoverableError(error_message)
else:
ok_message = "Script {0} executed normally with standard output {1} and error output {2}".format(command, stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
ok_message = str(unicode(ok_message, errors='ignore'))
ctx.logger.info(ok_message)
return parsed_output
class OutputConsumer(object):
def __init__(self, out):
self.out = out
self.buffer = StringIO()
self.consumer = threading.Thread(target=self.consume_output)
self.consumer.daemon = True
self.consumer.start()
def consume_output(self):
for line in iter(self.out.readline, b''):
self.buffer.write(line)
self.out.close()
def join(self):
self.consumer.join()
new_script_process = {'env': env_map}
operationOutputNames = None
convert_env_value_to_string(new_script_process['env'])
parsed_output = execute(ctx.download_resource('_a4c_impl_artifact/Wordpress_Mysql/wordpressConnectToMysqlMysql/tosca.interfaces.relationship.Configure/pre_configure_source/config_wordpress_for_mysql.sh'), new_script_process, operationOutputNames)
outputs = parsed_output['outputs'].items()
for k,v in outputs:
ctx.logger.info('Output name: {0} value: {1}'.format(k, v))
ctx.source.instance.runtime_properties['_a4c_OO:tosca.interfaces.relationship.Configure:pre_configure_source:{0}'.format(k)] = v
ctx.source.instance.runtime_properties['wordpress_url'] = r'http://' + get_attribute(ctx.source, 'public_ip_address') + r':' + r'80' + r'/'
ctx.source.instance.update()
ctx.target.instance.update()
|
alien4cloud/alien4cloud-cloudify3-provider
|
alien4cloud-cloudify3-provider/src/test/resources/outputs/blueprints/openstack/lamp/wrapper/Wordpress_Mysql/wordpressConnectToMysqlMysql/tosca.interfaces.relationship.Configure/pre_configure_source/_a4c_pre_configure_source.py
|
Python
|
apache-2.0
| 17,718
|
from .split import Split
from .tree import Tree
from .node import Node
from .column import NominalColumn, OrdinalColumn, ContinuousColumn
from .stats import Stats
from .invalid_split_reason import InvalidSplitReason
__version__ = "5.3.0"
|
Rambatino/CHAID
|
CHAID/__init__.py
|
Python
|
apache-2.0
| 239
|
"""
WSGI config for hpt project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hpt.settings")
application = get_wsgi_application()
|
hollowpoint/hollowpoint
|
hpt/hpt/wsgi.py
|
Python
|
apache-2.0
| 382
|
# -------------------------------- Database models----------------------------------------------------------------------
import sys, os
import sqlalchemy
from sqlalchemy import create_engine
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import secrets
import settings
MYSQL_USERNAME = secrets.MYSQL_USERNAME
MYSQL_PASSWORD = secrets.MYSQL_PASSWORD
MYSQL_HOSTNAME = secrets.MYSQL_HOSTNAME
MYSQL_DATABASE_NAME = secrets.MYSQL_DATABASE_NAME
MYSQL_HOST_PORT = secrets.MYSQL_HOST_PORT
MAX_MESSAGE_SIZE = settings.MAX_MESSAGE_SIZE
database_url = 'mysql://{}:{}@{}:{}/{}'.format(MYSQL_USERNAME, MYSQL_PASSWORD, MYSQL_HOSTNAME, MYSQL_HOST_PORT,
MYSQL_DATABASE_NAME)
engine = create_engine(database_url)
from sqlalchemy import Column, Integer, String, Boolean, DateTime, Text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import ForeignKey
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
class ModelManager(object):
"""
Model manager
"""
@classmethod
def create_session(cls, engine):
"""
create a session based
:param engine: engine object
:return: returns the created session object
"""
Session = sessionmaker(bind=engine)
session = Session()
return session
@classmethod
def add_to_session(cls, session, obj):
"""
add the object to the session
:param obj:
:param session: session object
:return:
"""
session.add(obj)
@classmethod
def commit_session(cls, session):
"""
commit to session
:param session:
:return:
"""
session.commit()
@classmethod
def delete_from_session(cls, session, obj):
"""
delete the object from the session
:param session:
:return:
"""
session.delete(obj)
@classmethod
def rollback_session(cls, session):
"""
rollback the current session
:param session:
:return:
"""
session.rollback()
@classmethod
def close_session(cls, session):
"""
close the current session
:param session:
:return:
"""
session.close()
class Queue(Base):
"""
Queues model class
"""
__tablename__ = "Queue"
id = Column(Integer, primary_key=True)
name = Column(String(20), unique=True)
created_timestamp = Column(DateTime)
message = relationship("Message", back_populates="queue")
def __repr__(self):
"""
representation of the Queue class
:return:
"""
return "<Queue (name: {}, created_timestamp: {})>".format(self.name, self.created_timestamp)
class Message(Base):
"""
Message model class
"""
__tablename__ = "Message"
id = Column(Integer, primary_key=True)
queue_id = Column(Integer, ForeignKey('Queue.id'))
is_fetched = Column(Boolean, default=False)
content = Column(Text)
publish_timestamp = Column(DateTime)
consumed_timestamp = Column(DateTime)
queue = relationship("Queue", back_populates="message")
# The consumed_timestamp should ideally have a null value for default but that is not feasible here so
# for checking we will first check whether the is_fetched value is true, if so we consider the consumed_timestamp
# as the date and time when the message was dequeued.
def __repr__(self):
"""
representation of the Message class
:return:
"""
return "<Message (queue_id: {}, is_fetched: {}, content: {}...{}, publish_timestamp: {}, " \
"consumed_timestamp: {})>".format(self.queue_id, self.is_fetched, self.content[:10],self.content[10:],
self.publish_timestamp, self.consumed_timestamp)
|
MQFN/MQFN
|
bbmq/server/models.py
|
Python
|
apache-2.0
| 3,993
|
# coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ImageNet."""
import tensorflow as tf
import uncertainty_baselines as ub
# TODO(dusenberrymw): Use TFDS mocking.
class ImageNetDatasetTest(ub.datasets.DatasetTest):
# TODO(dusenberrymw): Rename to `test_dataset_size`.
def testDatasetSize(self):
super()._testDatasetSize(
ub.datasets.ImageNetDataset, (224, 224, 3), validation_percent=0.1)
def test_expected_features(self):
builder = ub.datasets.ImageNetDataset('train')
dataset = builder.load(batch_size=1)
self.assertEqual(list(dataset.element_spec.keys()), ['features', 'labels'])
builder_with_file_name = ub.datasets.ImageNetDataset(
'train', include_file_name=True)
dataset_with_file_name = builder_with_file_name.load(batch_size=1)
self.assertEqual(
list(dataset_with_file_name.element_spec.keys()),
['features', 'labels', 'file_name'])
if __name__ == '__main__':
tf.test.main()
|
google/uncertainty-baselines
|
uncertainty_baselines/datasets/imagenet_test.py
|
Python
|
apache-2.0
| 1,536
|
import datetime
import json
import logging
from django.contrib import messages
from django.contrib.auth import REDIRECT_FIELD_NAME, get_user_model
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import logout
from django.contrib.sites.shortcuts import get_current_site
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.db import connections
from django.db.models import DateField, F, Q
from django.db.models.functions import Trunc
from django.utils import timezone
from django.utils.translation import ugettext as _
from django.http import Http404, HttpResponse, HttpResponseServerError
from django.shortcuts import get_object_or_404, redirect
from django.views.decorators.cache import cache_page
from djangohelpers.lib import rendered_with, allow_http
from registration.backends.simple.views import RegistrationView
from .forms import OpenDebatesRegistrationForm, VoterForm, QuestionForm, MergeFlagForm
from .models import (Candidate, Category, Debate, Flag, Submission, Vote, Voter,
TopSubmissionCategory, ZipCode, RECENT_EVENTS_CACHE_ENTRY)
from .router import readonly_db
from .utils import (get_ip_address_from_request, get_headers_from_request, choose_sort, sort_list,
vote_needs_captcha, registration_needs_captcha, get_voter)
from opendebates_emails.models import send_email
def health_check(request):
"""
Health check for the load balancer.
"""
logger = logging.getLogger('opendebates.views.health_check')
db_errors = []
for conn_name in connections:
conn = connections[conn_name]
try:
cursor = conn.cursor()
cursor.execute('SELECT 1')
row = cursor.fetchone()
assert row[0] == 1
except Exception as e:
# note that there doesn't seem to be a way to pass a timeout to
# psycopg2 through Django, so this will likely not raise a timeout
# exception
logger.warning('Caught error checking database connection "{0}"'
''.format(conn_name), exc_info=True)
db_errors.append(e)
if not db_errors:
return HttpResponse('OK')
else:
return HttpResponseServerError('Configuration Error')
def state_from_zip(zip):
try:
return ZipCode.objects.get(zip=zip).state
except ZipCode.DoesNotExist:
return ''
def root_redirect(request):
site = get_current_site(request)
# Look for the *next* debate
debate = Debate.objects.annotate(
debate_day=Trunc('debate_time', 'day', output_field=DateField())
).filter(
site=site,
debate_day__gte=datetime.date.today(),
).order_by('debate_time').first()
if debate is None:
# No next debate? Look for the most recently ended debate.
debate = Debate.objects.filter(
site=site,
).order_by('-debate_time').first()
if debate:
return redirect('/%s/' % debate.prefix)
else:
# If no debates at all, redirect to opendebatecoalition.com
return redirect('https://opendebatecoalition.com')
@cache_page(5) # Cache for 5 seconds after rendering
@allow_http("GET")
@rendered_with("opendebates/snippets/recent_activity.html")
def recent_activity(request):
entries = cache.get(RECENT_EVENTS_CACHE_ENTRY.format(request.debate.id), default=[])
return {
"recent_activity": entries
}
@rendered_with("opendebates/list_ideas.html")
def list_ideas(request):
ideas = Submission.objects.filter(category__debate=request.debate)
citations_only = request.GET.get("citations_only")
sort = choose_sort(request, request.GET.get('sort'))
ideas = sort_list(citations_only, sort, ideas)
return {
'ideas': ideas,
'sort': sort,
'url_name': reverse('list_ideas'),
'stashed_submission': request.session.pop(
"opendebates.stashed_submission", None) if request.user.is_authenticated else None,
}
@rendered_with("opendebates/list_ideas.html")
def list_category(request, cat_id):
category = get_object_or_404(Category, id=cat_id, debate=request.debate)
ideas = Submission.objects.filter(category__debate=request.debate, category=cat_id)
citations_only = request.GET.get("citations_only")
sort = choose_sort(request, request.GET.get('sort'))
ideas = sort_list(citations_only, sort, ideas)
return {
'ideas': ideas,
'sort': sort,
'url_name': reverse("list_category", kwargs={'cat_id': cat_id}),
'category': category
}
@rendered_with("opendebates/list_ideas.html")
@allow_http("GET")
def search_ideas(request):
try:
search_term = [q for q in request.GET.getlist("q") if q][0]
except IndexError:
return redirect(reverse('list_ideas'))
ideas = Submission.objects.filter(category__debate=request.debate)
citations_only = request.GET.get("citations_only")
sort = choose_sort(request, request.GET.get('sort'))
ideas = sort_list(citations_only, sort, ideas)
ideas = ideas.search(search_term.replace("%", ""))
return {
'ideas': ideas,
'search_term': search_term,
'sort': sort,
'url_name': reverse('search_ideas'),
}
@rendered_with("opendebates/list_ideas.html")
def category_search(request, cat_id):
ideas = Submission.objects.filter(category__debate=request.debate, category=cat_id)
citations_only = request.GET.get("citations_only")
search_term = request.GET['q']
sort = choose_sort(request, request.GET.get('sort'))
ideas = sort_list(citations_only, sort, ideas)
ideas = ideas.search(search_term.replace("%", ""))
return {
'ideas': ideas,
'search_term': search_term,
'sort': sort,
'url_name': reverse("list_category", kwargs={'cat_id': cat_id})
}
@rendered_with("opendebates/vote.html")
@allow_http("GET", "POST")
def vote(request, id):
"""Despite the name, this is both the page for voting AND the detail page for submissions"""
try:
with readonly_db():
idea = Submission.objects.get(
id=id, category__debate=request.debate,
)
except Submission.DoesNotExist:
raise Http404
if request.method == "POST" and not idea.approved:
# Don't allow voting on removed submissions, but do allow viewing them
raise Http404
if idea.duplicate_of_id:
if not idea.approved:
# Submissions which have been "unmoderated as duplicates"
# should remain completely inaccessible, and should not redirect
raise Http404
url = reverse("show_idea", kwargs={'id': idea.duplicate_of_id})
url = url + "#i"+str(idea.id)
return redirect(url)
if request.method == "GET":
two_other_approved_ideas = list(Submission.objects.filter(
category=idea.category,
duplicate_of=None,
approved=True).exclude(id=idea.id)[:2]) + [None, None]
related1 = two_other_approved_ideas[0]
related2 = two_other_approved_ideas[1]
return {
'idea': idea,
'show_duplicates': True,
'related1': related1,
'related2': related2,
'duplicates': (Submission.objects.filter(
category__debate=request.debate,
approved=True, duplicate_of=idea)
if idea.has_duplicates else []),
}
if not request.debate.allow_voting_and_submitting_questions:
raise Http404
form = VoterForm(request.POST)
if not vote_needs_captcha(request):
form.ignore_captcha()
if not form.is_valid():
if request.is_ajax():
return HttpResponse(
json.dumps({"status": "400", "errors": form.errors}),
content_type="application/json")
messages.error(request, _('You have some errors in your form'))
return {
'form': form,
'idea': idea,
}
state = state_from_zip(form.cleaned_data['zipcode'])
is_fraudulent = False
session_key = request.session.session_key or ''
if session_key and Vote.objects.filter(submission=idea,
sessionid=session_key).exists():
# Django creates a session for both signed-in users and anonymous, so
# we should be able to rely on this. If it is duplicated on a given
# question, it's because they are scripting votes. Behave the same
# way as if it was a normal email duplicate, i.e. don't increment but
# return without error.
is_fraudulent = True
session_voter = get_voter(request)
if session_voter and session_voter['email'] != form.cleaned_data['email']:
# This can only happen with an manually-created POST request.
is_fraudulent = True
if is_fraudulent:
# Pretend like everything is fine, but don't increment the tally or
# create a Vote. Deny attackers any information about how they are failing.
if request.is_ajax():
result = {"status": "200",
"tally": idea.votes if request.debate.show_question_votes else '',
"id": idea.id}
return HttpResponse(
json.dumps(result),
content_type="application/json")
url = reverse("vote", kwargs={'id': id})
return redirect(url)
voter, created = Voter.objects.get_or_create(
email=form.cleaned_data['email'],
defaults=dict(
source=request.COOKIES.get('opendebates.source'),
state=state,
zip=form.cleaned_data['zipcode'],
user=request.user if request.user.is_authenticated else None,
)
)
if not created and voter.zip != form.cleaned_data['zipcode']:
voter.zip = form.cleaned_data['zipcode']
voter.state = state
voter.save()
vote, created = Vote.objects.get_or_create(
submission=idea,
voter=voter,
defaults=dict(
created_at=timezone.now(),
source=request.COOKIES.get('opendebates.source'),
ip_address=get_ip_address_from_request(request),
sessionid=session_key,
request_headers=get_headers_from_request(request),
is_suspicious=False,
is_invalid=False,
)
)
previous_debate_time = request.debate.previous_debate_time
if created:
# update the DB with the real tally
Submission.objects.filter(category__debate=request.debate, id=id).update(
votes=F('votes')+1,
current_votes=F('current_votes')+(
1 if previous_debate_time is None or vote.created_at > previous_debate_time
else 0
),
local_votes=F('local_votes')+(
1 if voter.state and voter.state == request.debate.debate_state
else 0)
)
# also calculate a simple increment tally for the client
idea.votes += 1
if 'voter' not in request.session:
request.session['voter'] = {"email": voter.email, "zip": voter.zip}
if request.is_ajax():
result = {"status": "200",
"tally": idea.votes if request.debate.show_question_votes else '',
"id": idea.id}
return HttpResponse(
json.dumps(result),
content_type="application/json")
url = reverse("vote", kwargs={'id': id})
return redirect(url)
@rendered_with("opendebates/list_ideas.html")
@allow_http("GET", "POST")
def questions(request):
# If the user is GETting the list of questions, then redirect to the list_ideas
# page for this Debate.
if request.method == 'GET':
return redirect(reverse("list_ideas"))
if not request.debate.allow_voting_and_submitting_questions:
raise Http404
form = QuestionForm(request.POST, request=request)
if not form.is_valid():
# form = QuestionForm(request=request)
messages.error(request, _('You have some errors in the form'))
return {
'form': form,
'categories': Category.objects.filter(debate=request.debate),
'ideas': [],
}
if not request.user.is_authenticated:
request.session['opendebates.stashed_submission'] = {
"category": request.POST['category'],
"headline": request.POST['headline'],
"question": request.POST['question'],
"citation": request.POST.get("citation"),
}
return redirect('registration_register')
category = request.POST.get('category')
form_data = form.cleaned_data
voter, created = Voter.objects.get_or_create(
email=request.user.email,
defaults=dict(
source=request.COOKIES.get('opendebates.source')
)
)
previous_debate_time = request.debate.previous_debate_time
created_at = timezone.now()
idea = Submission.objects.create(
voter=voter,
category_id=category,
headline=form_data['headline'],
followup=form_data['question'],
idea=(u'%s %s' % (form_data['headline'], form_data['question'])).strip(),
citation=form_data['citation'],
created_at=created_at,
ip_address=get_ip_address_from_request(request),
approved=True,
votes=1,
local_votes=1 if voter.state and voter.state == request.debate.debate_state else 0,
current_votes=(1 if previous_debate_time is None or created_at > previous_debate_time
else 0),
source=request.COOKIES.get('opendebates.source'),
)
Vote.objects.create(
submission=idea,
voter=voter,
source=idea.source,
ip_address=get_ip_address_from_request(request),
sessionid=request.session.session_key or '',
request_headers=get_headers_from_request(request),
created_at=created_at,
is_suspicious=False,
is_invalid=False,
)
send_email("submitted_new_idea", {"idea": idea})
send_email("notify_moderators_submitted_new_idea", {"idea": idea})
url = reverse("vote", kwargs={'id': idea.id})
return redirect(url + "#created=%s" % idea.id)
@rendered_with("opendebates/changelog.html")
def changelog(request):
moderated = Submission.objects.filter(
Q(approved=False) | Q(duplicate_of__isnull=False)
).select_related('duplicate_of').order_by('-moderated_at', '-id')
return {
'moderated': moderated
}
class OpenDebatesRegistrationView(RegistrationView):
form_class = OpenDebatesRegistrationForm
next = None
prefix = None
def get(self, request, *args, **kwargs):
self.next = request.GET.get('next', None)
return super(OpenDebatesRegistrationView, self).get(request)
def get_context_data(self, **kwargs):
data = super(OpenDebatesRegistrationView, self).get_context_data(**kwargs)
if self.next:
data['next'] = self.next
return data
def form_valid(self, form):
User = get_user_model()
if User.objects.filter(email__iexact=form.cleaned_data['email']).exists():
return redirect(reverse('registration_duplicate'))
return super(OpenDebatesRegistrationView, self).form_valid(form)
def register(self, form):
new_user = super(OpenDebatesRegistrationView, self).register(form)
voter, created = Voter.objects.update_or_create(
email=form.cleaned_data['email'],
defaults=dict(
source=self.request.COOKIES.get('opendebates.source'),
state=state_from_zip(form.cleaned_data['zip']),
zip=form.cleaned_data['zip'],
display_name=form.cleaned_data.get('display_name'),
twitter_handle=form.cleaned_data.get('twitter_handle'),
phone_number=form.cleaned_data.get('phone_number'),
user=new_user,
)
)
return new_user
def get_form_kwargs(self):
kwargs = super(OpenDebatesRegistrationView, self).get_form_kwargs()
kwargs.update({
'request': self.request,
})
return kwargs
def get_form(self, form_class=None):
form = super(OpenDebatesRegistrationView, self).get_form(form_class)
if not registration_needs_captcha(self.request):
form.ignore_captcha()
return form
def get_success_url(self, user=None):
if self.request.GET.get('next'):
return self.request.GET.get('next')
else:
return reverse('registration_complete')
def registration_complete(request):
request.session['events.account_created'] = True
return redirect(reverse('list_ideas'))
@rendered_with("registration/registration_duplicate.html")
def registration_duplicate(request):
return {}
@rendered_with("opendebates/list_candidates.html")
@allow_http("GET")
def list_candidates(request):
candidates = Candidate.objects.filter(
debate=request.debate,
).order_by('last_name', 'first_name')
return {
'candidates': candidates,
}
@rendered_with("opendebates/flag_report.html")
@allow_http("GET", "POST")
@login_required
def report(request, id):
if not request.debate.allow_voting_and_submitting_questions and not request.user.is_staff:
raise Http404
idea = get_object_or_404(Submission, pk=id, category__debate=request.debate)
voter = Voter.objects.get(user=request.user)
if request.method == 'POST':
flag, created = Flag.objects.get_or_create(
to_remove=idea,
voter=voter,
duplicate_of=None,
defaults=dict(note=request.POST.get("report_why"))
)
messages.info(request, _(u'This question has been flagged for removal.'))
return redirect(idea)
return {
'idea': idea,
}
@rendered_with("opendebates/flag_merge.html")
@allow_http("GET", "POST")
@login_required
def merge(request, id):
if not request.debate.allow_voting_and_submitting_questions and not request.user.is_staff:
raise Http404
idea = get_object_or_404(Submission, pk=id, category__debate=request.debate)
voter = Voter.objects.get(user=request.user)
if Flag.objects.filter(to_remove=idea, voter=voter).exists():
messages.info(request, _(u'You have already flagged this question.'))
return redirect(idea)
form = MergeFlagForm(idea=idea, voter=voter, data=request.POST or None)
if request.method == 'POST' and form.is_valid():
form.save()
messages.info(request, _(u'This question has been flagged for merging.'))
return redirect(idea)
return {
'idea': idea,
'form': form,
}
@rendered_with("opendebates/top_archive.html")
@allow_http("GET")
def top_archive(request, slug):
category = get_object_or_404(TopSubmissionCategory,
debate=request.debate, slug=slug)
submissions = category.submissions.select_related(
"submission", "submission__voter", "submission__voter__user",
"submission__category").order_by("rank", "created_at").all()
return {
'category': category,
'submissions': submissions,
}
def od_logout(request, next_page=None,
template_name='registration/logged_out.html',
redirect_field_name=REDIRECT_FIELD_NAME,
current_app=None, extra_context=None):
if next_page is not None:
next_page = reverse(next_page)
return logout(request, next_page, template_name, redirect_field_name, extra_context)
|
caktus/django-opendebates
|
opendebates/views.py
|
Python
|
apache-2.0
| 19,833
|
from __future__ import absolute_import
from __future__ import division
from typing import Any, Dict, List, Tuple
from django.db import connection
from django.template import RequestContext, loader
from django.core import urlresolvers
from django.http import HttpResponseNotFound
from jinja2 import Markup as mark_safe
from zerver.decorator import has_request_variables, REQ, zulip_internal
from zerver.models import get_realm, UserActivity, UserActivityInterval, Realm
from zerver.lib.timestamp import timestamp_to_datetime
from collections import defaultdict
from datetime import datetime, timedelta
import itertools
import time
import re
import pytz
from six.moves import filter
from six.moves import map
from six.moves import range
from six.moves import zip
eastern_tz = pytz.timezone('US/Eastern')
from zproject.jinja2 import render_to_response
def make_table(title, cols, rows, has_row_class=False):
if not has_row_class:
def fix_row(row):
return dict(cells=row, row_class=None)
rows = list(map(fix_row, rows))
data = dict(title=title, cols=cols, rows=rows)
content = loader.render_to_string(
'analytics/ad_hoc_query.html',
dict(data=data)
)
return content
def dictfetchall(cursor):
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
dict(list(zip([col[0] for col in desc], row)))
for row in cursor.fetchall()
]
def get_realm_day_counts():
query = '''
select
r.domain,
(now()::date - pub_date::date) age,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
join zerver_client c on c.id = m.sending_client_id
where
(not up.is_bot)
and
pub_date > now()::date - interval '8 day'
and
c.name not in ('zephyr_mirror', 'ZulipMonitoring')
group by
r.domain,
age
order by
r.domain,
age
'''
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
counts = defaultdict(dict) # type: Dict[str, Dict[int, int]]
for row in rows:
counts[row['domain']][row['age']] = row['cnt']
result = {}
for domain in counts:
raw_cnts = [counts[domain].get(age, 0) for age in range(8)]
min_cnt = min(raw_cnts)
max_cnt = max(raw_cnts)
def format_count(cnt):
if cnt == min_cnt:
good_bad = 'bad'
elif cnt == max_cnt:
good_bad = 'good'
else:
good_bad = 'neutral'
return '<td class="number %s">%s</td>' % (good_bad, cnt)
cnts = ''.join(map(format_count, raw_cnts))
result[domain] = dict(cnts=cnts)
return result
def realm_summary_table(realm_minutes):
query = '''
SELECT
realm.domain,
coalesce(user_counts.active_user_count, 0) active_user_count,
coalesce(at_risk_counts.at_risk_count, 0) at_risk_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND not is_bot
) user_profile_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND is_bot
) bot_count
FROM zerver_realm realm
LEFT OUTER JOIN
(
SELECT
up.realm_id realm_id,
count(distinct(ua.user_profile_id)) active_user_count
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer',
'/json/users/me/pointer'
)
AND
last_visit > now() - interval '1 day'
AND
not is_bot
GROUP BY realm_id
) user_counts
ON user_counts.realm_id = realm.id
LEFT OUTER JOIN
(
SELECT
realm_id,
count(*) at_risk_count
FROM (
SELECT
realm.id as realm_id,
up.email
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
JOIN zerver_realm realm
ON realm.id = up.realm_id
WHERE up.is_active
AND (not up.is_bot)
AND
ua.query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer',
'/json/users/me/pointer'
)
GROUP by realm.id, up.email
HAVING max(last_visit) between
now() - interval '7 day' and
now() - interval '1 day'
) as at_risk_users
GROUP BY realm_id
) at_risk_counts
ON at_risk_counts.realm_id = realm.id
WHERE EXISTS (
SELECT *
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
query in (
'/json/send_message',
'/api/v1/send_message',
'send_message_backend',
'/json/update_pointer',
'/json/users/me/pointer'
)
AND
up.realm_id = realm.id
AND
last_visit > now() - interval '2 week'
)
ORDER BY active_user_count DESC, domain ASC
'''
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
# get messages sent per day
counts = get_realm_day_counts()
for row in rows:
try:
row['history'] = counts[row['domain']]['cnts']
except:
row['history'] = ''
# augment data with realm_minutes
total_hours = 0
for row in rows:
domain = row['domain']
minutes = realm_minutes.get(domain, 0)
hours = minutes / 60.0
total_hours += hours
row['hours'] = str(int(hours))
try:
row['hours_per_user'] = '%.1f' % (hours / row['active_user_count'],)
except:
pass
# formatting
for row in rows:
row['domain'] = realm_activity_link(row['domain'])
# Count active sites
def meets_goal(row):
return row['active_user_count'] >= 5
num_active_sites = len(list(filter(meets_goal, rows)))
# create totals
total_active_user_count = 0
total_user_profile_count = 0
total_bot_count = 0
total_at_risk_count = 0
for row in rows:
total_active_user_count += int(row['active_user_count'])
total_user_profile_count += int(row['user_profile_count'])
total_bot_count += int(row['bot_count'])
total_at_risk_count += int(row['at_risk_count'])
rows.append(dict(
domain='Total',
active_user_count=total_active_user_count,
user_profile_count=total_user_profile_count,
bot_count=total_bot_count,
hours=int(total_hours),
at_risk_count=total_at_risk_count,
))
content = loader.render_to_string(
'analytics/realm_summary_table.html',
dict(rows=rows, num_active_sites=num_active_sites)
)
return content
def user_activity_intervals():
day_end = timestamp_to_datetime(time.time())
day_start = day_end - timedelta(hours=24)
output = "Per-user online duration for the last 24 hours:\n"
total_duration = timedelta(0)
all_intervals = UserActivityInterval.objects.filter(
end__gte=day_start,
start__lte=day_end
).select_related(
'user_profile',
'user_profile__realm'
).only(
'start',
'end',
'user_profile__email',
'user_profile__realm__domain'
).order_by(
'user_profile__realm__domain',
'user_profile__email'
)
by_domain = lambda row: row.user_profile.realm.domain
by_email = lambda row: row.user_profile.email
realm_minutes = {}
for domain, realm_intervals in itertools.groupby(all_intervals, by_domain):
realm_duration = timedelta(0)
output += '<hr>%s\n' % (domain,)
for email, intervals in itertools.groupby(realm_intervals, by_email):
duration = timedelta(0)
for interval in intervals:
start = max(day_start, interval.start)
end = min(day_end, interval.end)
duration += end - start
total_duration += duration
realm_duration += duration
output += " %-*s%s\n" % (37, email, duration)
realm_minutes[domain] = realm_duration.total_seconds() / 60
output += "\nTotal Duration: %s\n" % (total_duration,)
output += "\nTotal Duration in minutes: %s\n" % (total_duration.total_seconds() / 60.,)
output += "Total Duration amortized to a month: %s" % (total_duration.total_seconds() * 30. / 60.,)
content = mark_safe('<pre>' + output + '</pre>')
return content, realm_minutes
def sent_messages_report(realm):
title = 'Recently sent messages for ' + realm
cols = [
'Date',
'Humans',
'Bots'
]
query = '''
select
series.day::date,
humans.cnt,
bots.cnt
from (
select generate_series(
(now()::date - interval '2 week'),
now()::date,
interval '1 day'
) as day
) as series
left join (
select
pub_date::date pub_date,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.domain = %s
and
(not up.is_bot)
and
pub_date > now() - interval '2 week'
group by
pub_date::date
order by
pub_date::date
) humans on
series.day = humans.pub_date
left join (
select
pub_date::date pub_date,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.domain = %s
and
up.is_bot
and
pub_date > now() - interval '2 week'
group by
pub_date::date
order by
pub_date::date
) bots on
series.day = bots.pub_date
'''
cursor = connection.cursor()
cursor.execute(query, [realm, realm])
rows = cursor.fetchall()
cursor.close()
return make_table(title, cols, rows)
def ad_hoc_queries():
def get_page(query, cols, title):
cursor = connection.cursor()
cursor.execute(query)
rows = cursor.fetchall()
rows = list(map(list, rows))
cursor.close()
def fix_rows(i, fixup_func):
for row in rows:
row[i] = fixup_func(row[i])
for i, col in enumerate(cols):
if col == 'Domain':
fix_rows(i, realm_activity_link)
elif col in ['Last time', 'Last visit']:
fix_rows(i, format_date_for_activity_reports)
content = make_table(title, cols, rows)
return dict(
content=content,
title=title
)
pages = []
###
for mobile_type in ['Android', 'ZulipiOS']:
title = '%s usage' % (mobile_type,)
query = '''
select
realm.domain,
up.id user_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like '%s'
group by domain, up.id, client.name
having max(last_visit) > now() - interval '2 week'
order by domain, up.id, client.name
''' % (mobile_type,)
cols = [
'Domain',
'User id',
'Name',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Desktop users'
query = '''
select
realm.domain,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like 'desktop%%'
group by domain, client.name
having max(last_visit) > now() - interval '2 week'
order by domain, client.name
'''
cols = [
'Domain',
'Client',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by domain'
query = '''
select
realm.domain,
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by domain, client_name
having max(last_visit) > now() - interval '2 week'
order by domain, client_name
'''
cols = [
'Domain',
'Client',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by client'
query = '''
select
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
realm.domain,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by client_name, domain
having max(last_visit) > now() - interval '2 week'
order by client_name, domain
'''
cols = [
'Client',
'Domain',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
return pages
@zulip_internal
@has_request_variables
def get_activity(request):
duration_content, realm_minutes = user_activity_intervals()
counts_content = realm_summary_table(realm_minutes)
data = [
('Counts', counts_content),
('Durations', duration_content),
]
for page in ad_hoc_queries():
data.append((page['title'], page['content']))
title = 'Activity'
return render_to_response(
'analytics/activity.html',
dict(data=data, title=title, is_home=True),
request=request
)
def get_user_activity_records_for_realm(realm, is_bot):
fields = [
'user_profile__full_name',
'user_profile__email',
'query',
'client__name',
'count',
'last_visit',
]
records = UserActivity.objects.filter(
user_profile__realm__domain=realm,
user_profile__is_active=True,
user_profile__is_bot=is_bot
)
records = records.order_by("user_profile__email", "-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def get_user_activity_records_for_email(email):
fields = [
'user_profile__full_name',
'query',
'client__name',
'count',
'last_visit'
]
records = UserActivity.objects.filter(
user_profile__email=email
)
records = records.order_by("-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def raw_user_activity_table(records):
cols = [
'query',
'client',
'count',
'last_visit'
]
def row(record):
return [
record.query,
record.client.name,
record.count,
format_date_for_activity_reports(record.last_visit)
]
rows = list(map(row, records))
title = 'Raw Data'
return make_table(title, cols, rows)
def get_user_activity_summary(records):
# type: (Any) -> Any
summary = {} # type: Dict[str, Dict[str, Any]]
def update(action, record):
if action not in summary:
summary[action] = dict(
count=record.count,
last_visit=record.last_visit
)
else:
summary[action]['count'] += record.count
summary[action]['last_visit'] = max(
summary[action]['last_visit'],
record.last_visit
)
if records:
summary['name'] = records[0].user_profile.full_name
for record in records:
client = record.client.name
query = record.query
update('use', record)
if client == 'API':
m = re.match('/api/.*/external/(.*)', query)
if m:
client = m.group(1)
update(client, record)
if client.startswith('desktop'):
update('desktop', record)
if client == 'website':
update('website', record)
if ('send_message' in query) or re.search('/api/.*/external/.*', query):
update('send', record)
if query in ['/json/update_pointer', '/json/users/me/pointer', '/api/v1/update_pointer']:
update('pointer', record)
update(client, record)
return summary
def format_date_for_activity_reports(date):
if date:
return date.astimezone(eastern_tz).strftime('%Y-%m-%d %H:%M')
else:
return ''
def user_activity_link(email):
url_name = 'analytics.views.get_user_activity'
url = urlresolvers.reverse(url_name, kwargs=dict(email=email))
email_link = '<a href="%s">%s</a>' % (url, email)
return mark_safe(email_link)
def realm_activity_link(realm):
url_name = 'analytics.views.get_realm_activity'
url = urlresolvers.reverse(url_name, kwargs=dict(realm=realm))
realm_link = '<a href="%s">%s</a>' % (url, realm)
return mark_safe(realm_link)
def realm_client_table(user_summaries):
exclude_keys = [
'internal',
'name',
'use',
'send',
'pointer',
'website',
'desktop',
]
rows = []
for email, user_summary in user_summaries.items():
email_link = user_activity_link(email)
name = user_summary['name']
for k, v in user_summary.items():
if k in exclude_keys:
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
name,
email_link,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'Last visit',
'Client',
'Name',
'Email',
'Count',
]
title = 'Clients'
return make_table(title, cols, rows)
def user_activity_summary_table(user_summary):
rows = []
for k, v in user_summary.items():
if k == 'name':
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'last_visit',
'client',
'count',
]
title = 'User Activity'
return make_table(title, cols, rows)
def realm_user_summary_table(all_records, admin_emails):
user_records = {}
def by_email(record):
return record.user_profile.email
for email, records in itertools.groupby(all_records, by_email):
user_records[email] = get_user_activity_summary(list(records))
def get_last_visit(user_summary, k):
if k in user_summary:
return user_summary[k]['last_visit']
else:
return None
def get_count(user_summary, k):
if k in user_summary:
return user_summary[k]['count']
else:
return ''
def is_recent(val):
age = datetime.now(val.tzinfo) - val
return age.total_seconds() < 5 * 60
rows = []
for email, user_summary in user_records.items():
email_link = user_activity_link(email)
sent_count = get_count(user_summary, 'send')
cells = [user_summary['name'], email_link, sent_count]
row_class = ''
for field in ['use', 'send', 'pointer', 'desktop', 'ZulipiOS', 'Android']:
val = get_last_visit(user_summary, field)
if field == 'use':
if val and is_recent(val):
row_class += ' recently_active'
if email in admin_emails:
row_class += ' admin'
val = format_date_for_activity_reports(val)
cells.append(val)
row = dict(cells=cells, row_class=row_class)
rows.append(row)
def by_used_time(row):
return row['cells'][3]
rows = sorted(rows, key=by_used_time, reverse=True)
cols = [
'Name',
'Email',
'Total sent',
'Heard from',
'Message sent',
'Pointer motion',
'Desktop',
'ZulipiOS',
'Android'
]
title = 'Summary'
content = make_table(title, cols, rows, has_row_class=True)
return user_records, content
@zulip_internal
def get_realm_activity(request, realm):
# type: (Any, Any) -> Any
data = [] # type: List[Tuple[str, str]]
all_user_records = {} # type: Dict[str, Any]
try:
admins = get_realm(realm).get_admin_users()
except Realm.DoesNotExist:
return HttpResponseNotFound("Realm %s does not exist" % (realm,))
admin_emails = {admin.email for admin in admins}
for is_bot, page_title in [(False, 'Humans'), (True, 'Bots')]:
all_records = list(get_user_activity_records_for_realm(realm, is_bot))
user_records, content = realm_user_summary_table(all_records, admin_emails)
all_user_records.update(user_records)
data += [(page_title, content)]
page_title = 'Clients'
content = realm_client_table(all_user_records)
data += [(page_title, content)]
page_title = 'History'
content = sent_messages_report(realm)
data += [(page_title, content)]
fix_name = lambda realm: realm.replace('.', '_')
realm_link = 'https://stats1.zulip.net:444/render/?from=-7days'
realm_link += '&target=stats.gauges.staging.users.active.%s.0_16hr' % (fix_name(realm),)
title = realm
return render_to_response(
'analytics/activity.html',
dict(data=data, realm_link=realm_link, title=title),
request=request
)
@zulip_internal
def get_user_activity(request, email):
records = get_user_activity_records_for_email(email)
data = [] # type: List[Tuple[str, str]]
user_summary = get_user_activity_summary(records)
content = user_activity_summary_table(user_summary)
data += [('Summary', content)]
content = raw_user_activity_table(records)
data += [('Info', content)]
title = email
return render_to_response(
'analytics/activity.html',
dict(data=data, title=title),
request=request
)
|
peiwei/zulip
|
analytics/views.py
|
Python
|
apache-2.0
| 26,077
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.models import (
Connection, DagModel, DagRun, DagTag, Pool, RenderedTaskInstanceFields, SlaMiss, TaskInstance, Variable,
errors,
)
from airflow.models.dagcode import DagCode
from airflow.utils.db import add_default_pool_if_not_exists, create_default_connections, \
create_session
def clear_db_runs():
with create_session() as session:
session.query(DagRun).delete()
session.query(TaskInstance).delete()
def clear_db_dags():
with create_session() as session:
session.query(DagTag).delete()
session.query(DagModel).delete()
def clear_db_sla_miss():
with create_session() as session:
session.query(SlaMiss).delete()
def clear_db_errors():
with create_session() as session:
session.query(errors.ImportError).delete()
def clear_db_pools():
with create_session() as session:
session.query(Pool).delete()
add_default_pool_if_not_exists(session)
def clear_db_connections():
with create_session() as session:
session.query(Connection).delete()
create_default_connections(session)
def clear_db_variables():
with create_session() as session:
session.query(Variable).delete()
def clear_db_dag_code():
with create_session() as session:
session.query(DagCode).delete()
def set_default_pool_slots(slots):
with create_session() as session:
default_pool = Pool.get_default_pool(session)
default_pool.slots = slots
def clear_rendered_ti_fields():
with create_session() as session:
session.query(RenderedTaskInstanceFields).delete()
|
owlabs/incubator-airflow
|
tests/test_utils/db.py
|
Python
|
apache-2.0
| 2,430
|
from nose_parameterized import parameterized
from unittest import TestCase
from tensorrec import TensorRec
from tensorrec.representation_graphs import (
LinearRepresentationGraph, NormalizedLinearRepresentationGraph, FeaturePassThroughRepresentationGraph,
WeightedFeaturePassThroughRepresentationGraph, ReLURepresentationGraph
)
from tensorrec.util import generate_dummy_data
class RepresentationGraphTestCase(TestCase):
@parameterized.expand([
["linear", LinearRepresentationGraph, LinearRepresentationGraph, 50, 60, 20],
["norm_lin", NormalizedLinearRepresentationGraph, NormalizedLinearRepresentationGraph, 50, 60, 20],
["fpt_user", FeaturePassThroughRepresentationGraph, NormalizedLinearRepresentationGraph, 50, 60, 50],
["fpt_item", NormalizedLinearRepresentationGraph, FeaturePassThroughRepresentationGraph, 50, 60, 60],
["fpt_both", FeaturePassThroughRepresentationGraph, FeaturePassThroughRepresentationGraph, 50, 50, 50],
["weighted_fpt", WeightedFeaturePassThroughRepresentationGraph, WeightedFeaturePassThroughRepresentationGraph,
50, 50, 50],
["relu", ReLURepresentationGraph, ReLURepresentationGraph, 50, 60, 20],
])
def test_fit(self, name, user_repr, item_repr, n_user_features, n_item_features, n_components):
interactions, user_features, item_features = generate_dummy_data(
num_users=15, num_items=30, interaction_density=.5, num_user_features=n_user_features,
num_item_features=n_item_features, n_features_per_user=20, n_features_per_item=20, pos_int_ratio=.5
)
model = TensorRec(n_components=n_components,
user_repr_graph=user_repr(),
item_repr_graph=item_repr())
model.fit(interactions, user_features, item_features, epochs=10)
# Ensure that the nodes have been built
self.assertIsNotNone(model.tf_prediction)
class IdentityRepresentationGraphTestCase(TestCase):
def test_fit_fail_on_bad_dims(self):
interactions, user_features, item_features = generate_dummy_data(
num_users=15, num_items=30, interaction_density=.5, num_user_features=30,
num_item_features=20, n_features_per_user=20, n_features_per_item=20, pos_int_ratio=.5
)
with self.assertRaises(ValueError):
model = TensorRec(n_components=25,
user_repr_graph=FeaturePassThroughRepresentationGraph(),
item_repr_graph=LinearRepresentationGraph())
model.fit(interactions, user_features, item_features, epochs=10)
with self.assertRaises(ValueError):
model = TensorRec(n_components=25,
user_repr_graph=LinearRepresentationGraph(),
item_repr_graph=FeaturePassThroughRepresentationGraph())
model.fit(interactions, user_features, item_features, epochs=10)
|
jfkirk/tensorrec
|
test/test_representation_graphs.py
|
Python
|
apache-2.0
| 2,973
|
import os
import sys
def readable_dir(prospective_dir):
""" Function that checks if a path is a directory, if it exists and if it is accessible and only
returns true if all these three are the case
:param prospective_dir: path to the directory"""
if prospective_dir is not None:
if not os.path.isdir(prospective_dir):
raise Exception("readable_dir:{0} is not a valid path".format(prospective_dir))
if os.access(prospective_dir, os.R_OK):
return prospective_dir
else:
raise Exception("readable_dir:{0} is not a readable dir".format(prospective_dir))
def find_plugins(plugin_dir):
"""Finds all python files in the specified path and imports them. This is needed, if we want to
detect automatically, which datastore and parser we can apply
:param plugin_dir: path to the plugin directory"""
plugin_files = [x[:-3] for x in os.listdir(plugin_dir) if x.endswith(".py")]
sys.path.insert(0, plugin_dir)
for plugin in plugin_files:
__import__(plugin)
def get_immediate_subdirectories(a_dir):
""" Helper method, which gets the **immediate** subdirectories of a path. Is helpful, if one want to create a
parser, which looks if certain folders are there.
:param a_dir: directory from which **immediate** subdirectories should be listed """
return [name for name in os.listdir(a_dir) if os.path.isdir(os.path.join(a_dir, name))]
|
smartshark/vcsSHARK
|
pyvcsshark/utils.py
|
Python
|
apache-2.0
| 1,460
|
#! /usr/bin/env python
"""
Variables that are shared between modules
"""
import sys
import codecs
version = ""
scriptPath = ""
scriptName = ""
mediaInfoExe = ""
mets_ns = ""
mods_ns = ""
premis_ns = ""
ebucore_ns = ""
xlink_ns = ""
xsi_ns = ""
isolyzer_ns = ""
cdInfo_ns = ""
dfxml_ns = ""
metsSchema = ""
modsSchema = ""
premisSchema = ""
ebucoreSchema = ""
NSMAP = {}
failedPPNs = []
errors = 0
warnings = 0
createSIPs = False
pruneBatch = False
skipChecksumFlag = False
batchErr = ""
dirOut = ""
dirsInMetaCarriers = []
carrierTypeAllowedValues = []
# Set encoding of the terminal to UTF-8
if sys.version.startswith("2"):
out = codecs.getwriter("UTF-8")(sys.stdout)
err = codecs.getwriter("UTF-8")(sys.stderr)
elif sys.version.startswith("3"):
out = codecs.getwriter("UTF-8")(sys.stdout.buffer)
err = codecs.getwriter("UTF-8")(sys.stderr.buffer)
|
bitsgalore/omSipCreator
|
omSipCreator/config.py
|
Python
|
apache-2.0
| 867
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for compute API."""
import contextlib
import copy
import datetime
import iso8601
import mock
from mox3 import mox
from oslo_utils import timeutils
from oslo_utils import uuidutils
from nova.compute import api as compute_api
from nova.compute import arch
from nova.compute import cells_api as compute_cells_api
from nova.compute import flavors
from nova.compute import instance_actions
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_mode
from nova.compute import vm_states
from nova import conductor
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova.objects import quotas as quotas_obj
from nova.openstack.common import policy as common_policy
from nova import policy
from nova import quota
from nova import test
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova.tests.unit.image import fake as fake_image
from nova.tests.unit import matchers
from nova.tests.unit.objects import test_flavor
from nova.tests.unit.objects import test_migration
from nova.tests.unit.objects import test_service
from nova import utils
from nova.volume import cinder
FAKE_IMAGE_REF = 'fake-image-ref'
NODENAME = 'fakenode1'
SHELVED_IMAGE = 'fake-shelved-image'
SHELVED_IMAGE_NOT_FOUND = 'fake-shelved-image-notfound'
SHELVED_IMAGE_NOT_AUTHORIZED = 'fake-shelved-image-not-authorized'
SHELVED_IMAGE_EXCEPTION = 'fake-shelved-image-exception'
class _ComputeAPIUnitTestMixIn(object):
def setUp(self):
super(_ComputeAPIUnitTestMixIn, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id)
def _get_vm_states(self, exclude_states=None):
vm_state = set([vm_states.ACTIVE, vm_states.BUILDING, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.RESCUED, vm_states.STOPPED,
vm_states.RESIZED, vm_states.SOFT_DELETED,
vm_states.DELETED, vm_states.ERROR, vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED])
if not exclude_states:
exclude_states = set()
return vm_state - exclude_states
def _create_flavor(self, **updates):
flavor = {'id': 1,
'flavorid': 1,
'name': 'm1.tiny',
'memory_mb': 512,
'vcpus': 1,
'vcpu_weight': None,
'root_gb': 1,
'ephemeral_gb': 0,
'rxtx_factor': 1,
'swap': 0,
'deleted': 0,
'disabled': False,
'is_public': True,
'deleted_at': None,
'created_at': datetime.datetime(2012, 1, 19, 18,
49, 30, 877329),
'updated_at': None,
}
if updates:
flavor.update(updates)
return objects.Flavor._from_db_object(self.context, objects.Flavor(),
flavor)
def _create_instance_obj(self, params=None, flavor=None):
"""Create a test instance."""
if not params:
params = {}
if flavor is None:
flavor = self._create_flavor()
now = timeutils.utcnow()
instance = objects.Instance()
instance.metadata = {}
instance.metadata.update(params.pop('metadata', {}))
instance.system_metadata = params.pop('system_metadata', {})
instance._context = self.context
instance.id = 1
instance.uuid = uuidutils.generate_uuid()
instance.cell_name = 'api!child'
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.image_ref = FAKE_IMAGE_REF
instance.reservation_id = 'r-fakeres'
instance.user_id = self.user_id
instance.project_id = self.project_id
instance.host = 'fake_host'
instance.node = NODENAME
instance.instance_type_id = flavor.id
instance.ami_launch_index = 0
instance.memory_mb = 0
instance.vcpus = 0
instance.root_gb = 0
instance.ephemeral_gb = 0
instance.architecture = arch.X86_64
instance.os_type = 'Linux'
instance.locked = False
instance.created_at = now
instance.updated_at = now
instance.launched_at = now
instance.disable_terminate = False
instance.info_cache = objects.InstanceInfoCache()
instance.flavor = flavor
instance.old_flavor = instance.new_flavor = None
if params:
instance.update(params)
instance.obj_reset_changes()
return instance
def test_create_quota_exceeded_messages(self):
image_href = "image_href"
image_id = 0
instance_type = self._create_flavor()
self.mox.StubOutWithMock(self.compute_api, "_get_image")
self.mox.StubOutWithMock(quota.QUOTAS, "limit_check")
self.mox.StubOutWithMock(quota.QUOTAS, "reserve")
quotas = {'instances': 1, 'cores': 1, 'ram': 1}
usages = {r: {'in_use': 1, 'reserved': 1} for r in
['instances', 'cores', 'ram']}
quota_exception = exception.OverQuota(quotas=quotas,
usages=usages, overs=['instances'])
for _unused in range(2):
self.compute_api._get_image(self.context, image_href).AndReturn(
(image_id, {}))
quota.QUOTAS.limit_check(self.context, metadata_items=mox.IsA(int),
project_id=mox.IgnoreArg(),
user_id=mox.IgnoreArg())
quota.QUOTAS.reserve(self.context, instances=40,
cores=mox.IsA(int),
expire=mox.IgnoreArg(),
project_id=mox.IgnoreArg(),
user_id=mox.IgnoreArg(),
ram=mox.IsA(int)).AndRaise(quota_exception)
self.mox.ReplayAll()
for min_count, message in [(20, '20-40'), (40, '40')]:
try:
self.compute_api.create(self.context, instance_type,
"image_href", min_count=min_count,
max_count=40)
except exception.TooManyInstances as e:
self.assertEqual(message, e.kwargs['req'])
else:
self.fail("Exception not raised")
def test_specified_port_and_multiple_instances_neutronv2(self):
# Tests that if port is specified there is only one instance booting
# (i.e max_count == 1) as we can't share the same port across multiple
# instances.
self.flags(network_api_class='nova.network.neutronv2.api.API')
port = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
address = '10.0.0.1'
min_count = 1
max_count = 2
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(address=address,
port_id=port)])
self.assertRaises(exception.MultiplePortsNotApplicable,
self.compute_api.create, self.context, 'fake_flavor', 'image_id',
min_count=min_count, max_count=max_count,
requested_networks=requested_networks)
def _test_specified_ip_and_multiple_instances_helper(self,
requested_networks):
# Tests that if ip is specified there is only one instance booting
# (i.e max_count == 1)
min_count = 1
max_count = 2
self.assertRaises(exception.InvalidFixedIpAndMaxCountRequest,
self.compute_api.create, self.context, "fake_flavor", 'image_id',
min_count=min_count, max_count=max_count,
requested_networks=requested_networks)
def test_specified_ip_and_multiple_instances(self):
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
address = '10.0.0.1'
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id=network,
address=address)])
self._test_specified_ip_and_multiple_instances_helper(
requested_networks)
def test_specified_ip_and_multiple_instances_neutronv2(self):
self.flags(network_api_class='nova.network.neutronv2.api.API')
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
address = '10.0.0.1'
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id=network,
address=address)])
self._test_specified_ip_and_multiple_instances_helper(
requested_networks)
def test_suspend(self):
# Ensure instance can be suspended.
instance = self._create_instance_obj()
self.assertEqual(instance.vm_state, vm_states.ACTIVE)
self.assertIsNone(instance.task_state)
self.mox.StubOutWithMock(instance, 'save')
self.mox.StubOutWithMock(self.compute_api,
'_record_action_start')
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.compute_rpcapi
self.mox.StubOutWithMock(rpcapi, 'suspend_instance')
instance.save(expected_task_state=[None])
self.compute_api._record_action_start(self.context,
instance, instance_actions.SUSPEND)
rpcapi.suspend_instance(self.context, instance)
self.mox.ReplayAll()
self.compute_api.suspend(self.context, instance)
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
self.assertEqual(task_states.SUSPENDING,
instance.task_state)
def _test_suspend_fails(self, vm_state):
params = dict(vm_state=vm_state)
instance = self._create_instance_obj(params=params)
self.assertIsNone(instance.task_state)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.suspend,
self.context, instance)
def test_suspend_fails_invalid_states(self):
invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE]))
for state in invalid_vm_states:
self._test_suspend_fails(state)
def test_resume(self):
# Ensure instance can be resumed (if suspended).
instance = self._create_instance_obj(
params=dict(vm_state=vm_states.SUSPENDED))
self.assertEqual(instance.vm_state, vm_states.SUSPENDED)
self.assertIsNone(instance.task_state)
self.mox.StubOutWithMock(instance, 'save')
self.mox.StubOutWithMock(self.compute_api,
'_record_action_start')
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.compute_rpcapi
self.mox.StubOutWithMock(rpcapi, 'resume_instance')
instance.save(expected_task_state=[None])
self.compute_api._record_action_start(self.context,
instance, instance_actions.RESUME)
rpcapi.resume_instance(self.context, instance)
self.mox.ReplayAll()
self.compute_api.resume(self.context, instance)
self.assertEqual(vm_states.SUSPENDED, instance.vm_state)
self.assertEqual(task_states.RESUMING,
instance.task_state)
def test_start(self):
params = dict(vm_state=vm_states.STOPPED)
instance = self._create_instance_obj(params=params)
self.mox.StubOutWithMock(instance, 'save')
self.mox.StubOutWithMock(self.compute_api,
'_record_action_start')
instance.save(expected_task_state=[None])
self.compute_api._record_action_start(self.context,
instance, instance_actions.START)
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.compute_rpcapi
self.mox.StubOutWithMock(rpcapi, 'start_instance')
rpcapi.start_instance(self.context, instance)
self.mox.ReplayAll()
self.compute_api.start(self.context, instance)
self.assertEqual(task_states.POWERING_ON,
instance.task_state)
def test_start_invalid_state(self):
instance = self._create_instance_obj()
self.assertEqual(instance.vm_state, vm_states.ACTIVE)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.start,
self.context, instance)
def test_start_no_host(self):
params = dict(vm_state=vm_states.STOPPED, host='')
instance = self._create_instance_obj(params=params)
self.assertRaises(exception.InstanceNotReady,
self.compute_api.start,
self.context, instance)
def _test_stop(self, vm_state, force=False, clean_shutdown=True):
# Make sure 'progress' gets reset
params = dict(task_state=None, progress=99, vm_state=vm_state)
instance = self._create_instance_obj(params=params)
self.mox.StubOutWithMock(instance, 'save')
self.mox.StubOutWithMock(self.compute_api,
'_record_action_start')
instance.save(expected_task_state=[None])
self.compute_api._record_action_start(self.context,
instance, instance_actions.STOP)
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.compute_rpcapi
self.mox.StubOutWithMock(rpcapi, 'stop_instance')
rpcapi.stop_instance(self.context, instance, do_cast=True,
clean_shutdown=clean_shutdown)
self.mox.ReplayAll()
if force:
self.compute_api.force_stop(self.context, instance,
clean_shutdown=clean_shutdown)
else:
self.compute_api.stop(self.context, instance,
clean_shutdown=clean_shutdown)
self.assertEqual(task_states.POWERING_OFF,
instance.task_state)
self.assertEqual(0, instance.progress)
def test_stop(self):
self._test_stop(vm_states.ACTIVE)
def test_stop_stopped_instance_with_bypass(self):
self._test_stop(vm_states.STOPPED, force=True)
def test_stop_forced_shutdown(self):
self._test_stop(vm_states.ACTIVE, force=True)
def test_stop_without_clean_shutdown(self):
self._test_stop(vm_states.ACTIVE,
clean_shutdown=False)
def test_stop_forced_without_clean_shutdown(self):
self._test_stop(vm_states.ACTIVE, force=True,
clean_shutdown=False)
def _test_stop_invalid_state(self, vm_state):
params = dict(vm_state=vm_state)
instance = self._create_instance_obj(params=params)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.stop,
self.context, instance)
def test_stop_fails_invalid_states(self):
invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE,
vm_states.ERROR]))
for state in invalid_vm_states:
self._test_stop_invalid_state(state)
def test_stop_a_stopped_inst(self):
params = {'vm_state': vm_states.STOPPED}
instance = self._create_instance_obj(params=params)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.stop,
self.context, instance)
def test_stop_no_host(self):
params = {'host': ''}
instance = self._create_instance_obj(params=params)
self.assertRaises(exception.InstanceNotReady,
self.compute_api.stop,
self.context, instance)
def _test_shelve(self, vm_state=vm_states.ACTIVE,
boot_from_volume=False, clean_shutdown=True):
params = dict(task_state=None, vm_state=vm_state,
display_name='fake-name')
instance = self._create_instance_obj(params=params)
with contextlib.nested(
mock.patch.object(self.compute_api, 'is_volume_backed_instance',
return_value=boot_from_volume),
mock.patch.object(self.compute_api, '_create_image',
return_value=dict(id='fake-image-id')),
mock.patch.object(instance, 'save'),
mock.patch.object(self.compute_api, '_record_action_start'),
mock.patch.object(self.compute_api.compute_rpcapi,
'shelve_instance'),
mock.patch.object(self.compute_api.compute_rpcapi,
'shelve_offload_instance')
) as (
volume_backed_inst, create_image, instance_save,
record_action_start, rpcapi_shelve_instance,
rpcapi_shelve_offload_instance
):
self.compute_api.shelve(self.context, instance,
clean_shutdown=clean_shutdown)
# assert field values set on the instance object
self.assertEqual(task_states.SHELVING, instance.task_state)
# assert our mock calls
volume_backed_inst.assert_called_once_with(
self.context, instance)
instance_save.assert_called_once_with(expected_task_state=[None])
record_action_start.assert_called_once_with(
self.context, instance, instance_actions.SHELVE)
if boot_from_volume:
rpcapi_shelve_offload_instance.assert_called_once_with(
self.context, instance=instance,
clean_shutdown=clean_shutdown)
else:
rpcapi_shelve_instance.assert_called_once_with(
self.context, instance=instance, image_id='fake-image-id',
clean_shutdown=clean_shutdown)
def test_shelve(self):
self._test_shelve()
def test_shelve_stopped(self):
self._test_shelve(vm_state=vm_states.STOPPED)
def test_shelve_paused(self):
self._test_shelve(vm_state=vm_states.PAUSED)
def test_shelve_suspended(self):
self._test_shelve(vm_state=vm_states.SUSPENDED)
def test_shelve_boot_from_volume(self):
self._test_shelve(boot_from_volume=True)
def test_shelve_forced_shutdown(self):
self._test_shelve(clean_shutdown=False)
def test_shelve_boot_from_volume_forced_shutdown(self):
self._test_shelve(boot_from_volume=True,
clean_shutdown=False)
def _test_shelve_invalid_state(self, vm_state):
params = dict(vm_state=vm_state)
instance = self._create_instance_obj(params=params)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.shelve,
self.context, instance)
def test_shelve_fails_invalid_states(self):
invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE,
vm_states.STOPPED,
vm_states.PAUSED,
vm_states.SUSPENDED]))
for state in invalid_vm_states:
self._test_shelve_invalid_state(state)
def _test_shelve_offload(self, clean_shutdown=True):
params = dict(task_state=None, vm_state=vm_states.SHELVED)
instance = self._create_instance_obj(params=params)
with contextlib.nested(
mock.patch.object(instance, 'save'),
mock.patch.object(self.compute_api.compute_rpcapi,
'shelve_offload_instance')
) as (
instance_save, rpcapi_shelve_offload_instance
):
self.compute_api.shelve_offload(self.context, instance,
clean_shutdown=clean_shutdown)
# assert field values set on the instance object
self.assertEqual(task_states.SHELVING_OFFLOADING,
instance.task_state)
instance_save.assert_called_once_with(expected_task_state=[None])
rpcapi_shelve_offload_instance.assert_called_once_with(
self.context, instance=instance,
clean_shutdown=clean_shutdown)
def test_shelve_offload(self):
self._test_shelve_offload()
def test_shelve_offload_forced_shutdown(self):
self._test_shelve_offload(clean_shutdown=False)
def _test_shelve_offload_invalid_state(self, vm_state):
params = dict(vm_state=vm_state)
instance = self._create_instance_obj(params=params)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.shelve_offload,
self.context, instance)
def test_shelve_offload_fails_invalid_states(self):
invalid_vm_states = self._get_vm_states(set([vm_states.SHELVED]))
for state in invalid_vm_states:
self._test_shelve_offload_invalid_state(state)
def _test_reboot_type(self, vm_state, reboot_type, task_state=None):
# Ensure instance can be soft rebooted.
inst = self._create_instance_obj()
inst.vm_state = vm_state
inst.task_state = task_state
self.mox.StubOutWithMock(self.context, 'elevated')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(inst, 'save')
expected_task_state = [None]
if reboot_type == 'HARD':
expected_task_state.extend([task_states.REBOOTING,
task_states.REBOOT_PENDING,
task_states.REBOOT_STARTED,
task_states.REBOOTING_HARD,
task_states.RESUMING,
task_states.UNPAUSING,
task_states.SUSPENDING])
inst.save(expected_task_state=expected_task_state)
self.compute_api._record_action_start(self.context, inst,
instance_actions.REBOOT)
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.compute_rpcapi
self.mox.StubOutWithMock(rpcapi, 'reboot_instance')
rpcapi.reboot_instance(self.context, instance=inst,
block_device_info=None,
reboot_type=reboot_type)
self.mox.ReplayAll()
self.compute_api.reboot(self.context, inst, reboot_type)
def _test_reboot_type_fails(self, reboot_type, **updates):
inst = self._create_instance_obj()
inst.update(updates)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.reboot,
self.context, inst, reboot_type)
def test_reboot_hard_active(self):
self._test_reboot_type(vm_states.ACTIVE, 'HARD')
def test_reboot_hard_error(self):
self._test_reboot_type(vm_states.ERROR, 'HARD')
def test_reboot_hard_rebooting(self):
self._test_reboot_type(vm_states.ACTIVE, 'HARD',
task_state=task_states.REBOOTING)
def test_reboot_hard_reboot_started(self):
self._test_reboot_type(vm_states.ACTIVE, 'HARD',
task_state=task_states.REBOOT_STARTED)
def test_reboot_hard_reboot_pending(self):
self._test_reboot_type(vm_states.ACTIVE, 'HARD',
task_state=task_states.REBOOT_PENDING)
def test_reboot_hard_rescued(self):
self._test_reboot_type_fails('HARD', vm_state=vm_states.RESCUED)
def test_reboot_hard_resuming(self):
self._test_reboot_type(vm_states.ACTIVE,
'HARD', task_state=task_states.RESUMING)
def test_reboot_hard_pausing(self):
self._test_reboot_type(vm_states.ACTIVE,
'HARD', task_state=task_states.PAUSING)
def test_reboot_hard_unpausing(self):
self._test_reboot_type(vm_states.ACTIVE,
'HARD', task_state=task_states.UNPAUSING)
def test_reboot_hard_suspending(self):
self._test_reboot_type(vm_states.ACTIVE,
'HARD', task_state=task_states.SUSPENDING)
def test_reboot_hard_error_not_launched(self):
self._test_reboot_type_fails('HARD', vm_state=vm_states.ERROR,
launched_at=None)
def test_reboot_soft(self):
self._test_reboot_type(vm_states.ACTIVE, 'SOFT')
def test_reboot_soft_error(self):
self._test_reboot_type_fails('SOFT', vm_state=vm_states.ERROR)
def test_reboot_soft_paused(self):
self._test_reboot_type_fails('SOFT', vm_state=vm_states.PAUSED)
def test_reboot_soft_stopped(self):
self._test_reboot_type_fails('SOFT', vm_state=vm_states.STOPPED)
def test_reboot_soft_suspended(self):
self._test_reboot_type_fails('SOFT', vm_state=vm_states.SUSPENDED)
def test_reboot_soft_rebooting(self):
self._test_reboot_type_fails('SOFT', task_state=task_states.REBOOTING)
def test_reboot_soft_rebooting_hard(self):
self._test_reboot_type_fails('SOFT',
task_state=task_states.REBOOTING_HARD)
def test_reboot_soft_reboot_started(self):
self._test_reboot_type_fails('SOFT',
task_state=task_states.REBOOT_STARTED)
def test_reboot_soft_reboot_pending(self):
self._test_reboot_type_fails('SOFT',
task_state=task_states.REBOOT_PENDING)
def test_reboot_soft_rescued(self):
self._test_reboot_type_fails('SOFT', vm_state=vm_states.RESCUED)
def test_reboot_soft_error_not_launched(self):
self._test_reboot_type_fails('SOFT', vm_state=vm_states.ERROR,
launched_at=None)
def test_reboot_soft_resuming(self):
self._test_reboot_type_fails('SOFT', task_state=task_states.RESUMING)
def test_reboot_soft_pausing(self):
self._test_reboot_type_fails('SOFT', task_state=task_states.PAUSING)
def test_reboot_soft_unpausing(self):
self._test_reboot_type_fails('SOFT', task_state=task_states.UNPAUSING)
def test_reboot_soft_suspending(self):
self._test_reboot_type_fails('SOFT', task_state=task_states.SUSPENDING)
def _test_delete_resizing_part(self, inst, deltas):
fake_db_migration = test_migration.fake_db_migration()
migration = objects.Migration._from_db_object(
self.context, objects.Migration(),
fake_db_migration)
inst.instance_type_id = migration.new_instance_type_id
old_flavor = self._create_flavor(vcpus=1, memory_mb=512)
deltas['cores'] = -old_flavor.vcpus
deltas['ram'] = -old_flavor.memory_mb
self.mox.StubOutWithMock(objects.Migration,
'get_by_instance_and_status')
self.mox.StubOutWithMock(flavors, 'get_flavor')
self.context.elevated().AndReturn(self.context)
objects.Migration.get_by_instance_and_status(
self.context, inst.uuid, 'post-migrating').AndReturn(migration)
flavors.get_flavor(migration.old_instance_type_id).AndReturn(
old_flavor)
def _test_delete_resized_part(self, inst):
migration = objects.Migration._from_db_object(
self.context, objects.Migration(),
test_migration.fake_db_migration())
self.mox.StubOutWithMock(objects.Migration,
'get_by_instance_and_status')
self.context.elevated().AndReturn(self.context)
objects.Migration.get_by_instance_and_status(
self.context, inst.uuid, 'finished').AndReturn(migration)
self.compute_api._downsize_quota_delta(self.context, inst
).AndReturn('deltas')
fake_quotas = objects.Quotas.from_reservations(self.context,
['rsvs'])
self.compute_api._reserve_quota_delta(self.context, 'deltas', inst,
).AndReturn(fake_quotas)
self.compute_api._record_action_start(
self.context, inst, instance_actions.CONFIRM_RESIZE)
self.compute_api.compute_rpcapi.confirm_resize(
self.context, inst, migration,
migration['source_compute'], fake_quotas.reservations, cast=False)
def _test_delete_shelved_part(self, inst):
image_api = self.compute_api.image_api
self.mox.StubOutWithMock(image_api, 'delete')
snapshot_id = inst.system_metadata.get('shelved_image_id')
if snapshot_id == SHELVED_IMAGE:
image_api.delete(self.context, snapshot_id).AndReturn(True)
elif snapshot_id == SHELVED_IMAGE_NOT_FOUND:
image_api.delete(self.context, snapshot_id).AndRaise(
exception.ImageNotFound(image_id=snapshot_id))
elif snapshot_id == SHELVED_IMAGE_NOT_AUTHORIZED:
image_api.delete(self.context, snapshot_id).AndRaise(
exception.ImageNotAuthorized(image_id=snapshot_id))
elif snapshot_id == SHELVED_IMAGE_EXCEPTION:
image_api.delete(self.context, snapshot_id).AndRaise(
test.TestingException("Unexpected error"))
def _test_downed_host_part(self, inst, updates, delete_time, delete_type):
inst.info_cache.delete()
compute_utils.notify_about_instance_usage(
self.compute_api.notifier, self.context, inst,
'%s.start' % delete_type)
self.context.elevated().AndReturn(self.context)
self.compute_api.network_api.deallocate_for_instance(
self.context, inst)
state = ('soft' in delete_type and vm_states.SOFT_DELETED or
vm_states.DELETED)
updates.update({'vm_state': state,
'task_state': None,
'terminated_at': delete_time})
inst.save()
updates.update({'deleted_at': delete_time,
'deleted': True})
fake_inst = fake_instance.fake_db_instance(**updates)
db.instance_destroy(self.context, inst.uuid,
constraint=None).AndReturn(fake_inst)
compute_utils.notify_about_instance_usage(
self.compute_api.notifier,
self.context, inst, '%s.end' % delete_type,
system_metadata=inst.system_metadata)
def _test_delete(self, delete_type, **attrs):
reservations = ['fake-resv']
inst = self._create_instance_obj()
inst.update(attrs)
inst._context = self.context
deltas = {'instances': -1,
'cores': -inst.vcpus,
'ram': -inst.memory_mb}
delete_time = datetime.datetime(1955, 11, 5, 9, 30,
tzinfo=iso8601.iso8601.Utc())
timeutils.set_time_override(delete_time)
task_state = (delete_type == 'soft_delete' and
task_states.SOFT_DELETING or task_states.DELETING)
updates = {'progress': 0, 'task_state': task_state}
if delete_type == 'soft_delete':
updates['deleted_at'] = delete_time
self.mox.StubOutWithMock(inst, 'save')
self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
'get_by_instance_uuid')
self.mox.StubOutWithMock(quota.QUOTAS, 'reserve')
self.mox.StubOutWithMock(self.context, 'elevated')
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(self.compute_api.servicegroup_api,
'service_is_up')
self.mox.StubOutWithMock(self.compute_api, '_downsize_quota_delta')
self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.StubOutWithMock(inst.info_cache, 'delete')
self.mox.StubOutWithMock(self.compute_api.network_api,
'deallocate_for_instance')
self.mox.StubOutWithMock(db, 'instance_system_metadata_get')
self.mox.StubOutWithMock(db, 'instance_destroy')
self.mox.StubOutWithMock(compute_utils,
'notify_about_instance_usage')
self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
rpcapi = self.compute_api.compute_rpcapi
self.mox.StubOutWithMock(rpcapi, 'confirm_resize')
if (inst.vm_state in
(vm_states.SHELVED, vm_states.SHELVED_OFFLOADED)):
self._test_delete_shelved_part(inst)
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
self.mox.StubOutWithMock(rpcapi, 'terminate_instance')
self.mox.StubOutWithMock(rpcapi, 'soft_delete_instance')
objects.BlockDeviceMappingList.get_by_instance_uuid(
self.context, inst.uuid).AndReturn([])
inst.save()
if inst.task_state == task_states.RESIZE_FINISH:
self._test_delete_resizing_part(inst, deltas)
quota.QUOTAS.reserve(self.context, project_id=inst.project_id,
user_id=inst.user_id,
expire=mox.IgnoreArg(),
**deltas).AndReturn(reservations)
# NOTE(comstud): This is getting messy. But what we are wanting
# to test is:
# If cells is enabled and we're the API cell:
# * Cast to cells_rpcapi.<method> with reservations=None
# * Commit reservations
# Otherwise:
# * Check for downed host
# * If downed host:
# * Clean up instance, destroying it, sending notifications.
# (Tested in _test_downed_host_part())
# * Commit reservations
# * If not downed host:
# * Record the action start.
# * Cast to compute_rpcapi.<method> with the reservations
cast = True
commit_quotas = True
soft_delete = False
if self.cell_type != 'api':
if inst.vm_state == vm_states.RESIZED:
self._test_delete_resized_part(inst)
if inst.vm_state == vm_states.SOFT_DELETED:
soft_delete = True
if inst.vm_state != vm_states.SHELVED_OFFLOADED:
self.context.elevated().AndReturn(self.context)
db.service_get_by_compute_host(
self.context, inst.host).AndReturn(
test_service.fake_service)
self.compute_api.servicegroup_api.service_is_up(
mox.IsA(objects.Service)).AndReturn(
inst.host != 'down-host')
if (inst.host == 'down-host' or
inst.vm_state == vm_states.SHELVED_OFFLOADED):
self._test_downed_host_part(inst, updates, delete_time,
delete_type)
cast = False
else:
# Happens on the manager side
commit_quotas = False
if cast:
if self.cell_type != 'api':
self.compute_api._record_action_start(self.context, inst,
instance_actions.DELETE)
if commit_quotas or soft_delete:
cast_reservations = None
else:
cast_reservations = reservations
if delete_type == 'soft_delete':
rpcapi.soft_delete_instance(self.context, inst,
reservations=cast_reservations)
elif delete_type in ['delete', 'force_delete']:
rpcapi.terminate_instance(self.context, inst, [],
reservations=cast_reservations)
if commit_quotas:
# Local delete or when we're testing API cell.
quota.QUOTAS.commit(self.context, reservations,
project_id=inst.project_id,
user_id=inst.user_id)
self.mox.ReplayAll()
getattr(self.compute_api, delete_type)(self.context, inst)
for k, v in updates.items():
self.assertEqual(inst[k], v)
self.mox.UnsetStubs()
def test_delete(self):
self._test_delete('delete')
def test_delete_if_not_launched(self):
self._test_delete('delete', launched_at=None)
def test_delete_in_resizing(self):
self._test_delete('delete',
task_state=task_states.RESIZE_FINISH)
def test_delete_in_resized(self):
self._test_delete('delete', vm_state=vm_states.RESIZED)
def test_delete_shelved(self):
fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE}
self._test_delete('delete',
vm_state=vm_states.SHELVED,
system_metadata=fake_sys_meta)
def test_delete_shelved_offloaded(self):
fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE}
self._test_delete('delete',
vm_state=vm_states.SHELVED_OFFLOADED,
system_metadata=fake_sys_meta)
def test_delete_shelved_image_not_found(self):
fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE_NOT_FOUND}
self._test_delete('delete',
vm_state=vm_states.SHELVED_OFFLOADED,
system_metadata=fake_sys_meta)
def test_delete_shelved_image_not_authorized(self):
fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE_NOT_AUTHORIZED}
self._test_delete('delete',
vm_state=vm_states.SHELVED_OFFLOADED,
system_metadata=fake_sys_meta)
def test_delete_shelved_exception(self):
fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE_EXCEPTION}
self._test_delete('delete',
vm_state=vm_states.SHELVED,
system_metadata=fake_sys_meta)
def test_delete_with_down_host(self):
self._test_delete('delete', host='down-host')
def test_delete_soft_with_down_host(self):
self._test_delete('soft_delete', host='down-host')
def test_delete_soft(self):
self._test_delete('soft_delete')
def test_delete_forced(self):
fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE}
for vm_state in self._get_vm_states():
if vm_state in (vm_states.SHELVED, vm_states.SHELVED_OFFLOADED):
self._test_delete('force_delete',
vm_state=vm_state,
system_metadata=fake_sys_meta)
self._test_delete('force_delete', vm_state=vm_state)
def test_delete_fast_if_host_not_set(self):
inst = self._create_instance_obj()
inst.host = ''
quotas = quotas_obj.Quotas(self.context)
updates = {'progress': 0, 'task_state': task_states.DELETING}
self.mox.StubOutWithMock(inst, 'save')
self.mox.StubOutWithMock(db,
'block_device_mapping_get_all_by_instance')
self.mox.StubOutWithMock(db, 'constraint')
self.mox.StubOutWithMock(db, 'instance_destroy')
self.mox.StubOutWithMock(self.compute_api, '_create_reservations')
self.mox.StubOutWithMock(compute_utils,
'notify_about_instance_usage')
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.compute_rpcapi
self.mox.StubOutWithMock(rpcapi, 'terminate_instance')
db.block_device_mapping_get_all_by_instance(self.context,
inst.uuid,
use_slave=False).AndReturn([])
inst.save()
self.compute_api._create_reservations(self.context,
inst, inst.task_state,
inst.project_id, inst.user_id
).AndReturn(quotas)
if self.cell_type == 'api':
rpcapi.terminate_instance(
self.context, inst,
mox.IsA(objects.BlockDeviceMappingList),
reservations=None)
else:
compute_utils.notify_about_instance_usage(
self.compute_api.notifier, self.context,
inst, 'delete.start')
db.constraint(host=mox.IgnoreArg()).AndReturn('constraint')
delete_time = datetime.datetime(1955, 11, 5, 9, 30,
tzinfo=iso8601.iso8601.Utc())
updates['deleted_at'] = delete_time
updates['deleted'] = True
fake_inst = fake_instance.fake_db_instance(**updates)
db.instance_destroy(self.context, inst.uuid,
constraint='constraint').AndReturn(fake_inst)
compute_utils.notify_about_instance_usage(
self.compute_api.notifier, self.context,
inst, 'delete.end',
system_metadata=inst.system_metadata)
self.mox.ReplayAll()
self.compute_api.delete(self.context, inst)
for k, v in updates.items():
self.assertEqual(inst[k], v)
def _fake_do_delete(context, instance, bdms,
rservations=None, local=False):
pass
def test_local_delete_with_deleted_volume(self):
bdms = [objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
{'id': 42, 'volume_id': 'volume_id',
'source_type': 'volume', 'destination_type': 'volume',
'delete_on_termination': False}))]
inst = self._create_instance_obj()
inst._context = self.context
self.mox.StubOutWithMock(inst, 'destroy')
self.mox.StubOutWithMock(self.context, 'elevated')
self.mox.StubOutWithMock(inst.info_cache, 'delete')
self.mox.StubOutWithMock(self.compute_api.network_api,
'deallocate_for_instance')
self.mox.StubOutWithMock(db, 'instance_system_metadata_get')
self.mox.StubOutWithMock(compute_utils,
'notify_about_instance_usage')
self.mox.StubOutWithMock(self.compute_api.volume_api,
'terminate_connection')
self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'destroy')
inst.info_cache.delete()
compute_utils.notify_about_instance_usage(
self.compute_api.notifier, self.context,
inst, 'delete.start')
self.context.elevated().MultipleTimes().AndReturn(self.context)
if self.cell_type != 'api':
self.compute_api.network_api.deallocate_for_instance(
self.context, inst)
self.compute_api.volume_api.terminate_connection(
mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).\
AndRaise(exception. VolumeNotFound('volume_id'))
bdms[0].destroy()
inst.destroy()
compute_utils.notify_about_instance_usage(
self.compute_api.notifier, self.context,
inst, 'delete.end',
system_metadata=inst.system_metadata)
self.mox.ReplayAll()
self.compute_api._local_delete(self.context, inst, bdms,
'delete',
self._fake_do_delete)
def test_local_delete_without_info_cache(self):
inst = self._create_instance_obj()
with contextlib.nested(
mock.patch.object(inst, 'destroy'),
mock.patch.object(self.context, 'elevated'),
mock.patch.object(self.compute_api.network_api,
'deallocate_for_instance'),
mock.patch.object(db, 'instance_system_metadata_get'),
mock.patch.object(compute_utils,
'notify_about_instance_usage')
) as (
inst_destroy, context_elevated, net_api_deallocate_for_instance,
db_instance_system_metadata_get, notify_about_instance_usage
):
compute_utils.notify_about_instance_usage(
self.compute_api.notifier, self.context,
inst, 'delete.start')
self.context.elevated().MultipleTimes().AndReturn(self.context)
if self.cell_type != 'api':
self.compute_api.network_api.deallocate_for_instance(
self.context, inst)
inst.destroy()
compute_utils.notify_about_instance_usage(
self.compute_api.notifier, self.context,
inst, 'delete.end',
system_metadata=inst.system_metadata)
inst.info_cache = None
self.compute_api._local_delete(self.context, inst, [],
'delete',
self._fake_do_delete)
def test_delete_disabled(self):
inst = self._create_instance_obj()
inst.disable_terminate = True
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.ReplayAll()
self.compute_api.delete(self.context, inst)
def test_delete_soft_rollback(self):
inst = self._create_instance_obj()
self.mox.StubOutWithMock(db,
'block_device_mapping_get_all_by_instance')
self.mox.StubOutWithMock(inst, 'save')
delete_time = datetime.datetime(1955, 11, 5)
timeutils.set_time_override(delete_time)
db.block_device_mapping_get_all_by_instance(
self.context, inst.uuid, use_slave=False).AndReturn([])
inst.save().AndRaise(test.TestingException)
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.compute_api.soft_delete, self.context, inst)
def _test_confirm_resize(self, mig_ref_passed=False):
params = dict(vm_state=vm_states.RESIZED)
fake_inst = self._create_instance_obj(params=params)
fake_mig = objects.Migration._from_db_object(
self.context, objects.Migration(),
test_migration.fake_db_migration())
self.mox.StubOutWithMock(self.context, 'elevated')
self.mox.StubOutWithMock(objects.Migration,
'get_by_instance_and_status')
self.mox.StubOutWithMock(self.compute_api, '_downsize_quota_delta')
self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
self.mox.StubOutWithMock(fake_mig, 'save')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
'confirm_resize')
self.context.elevated().AndReturn(self.context)
if not mig_ref_passed:
objects.Migration.get_by_instance_and_status(
self.context, fake_inst['uuid'], 'finished').AndReturn(
fake_mig)
self.compute_api._downsize_quota_delta(self.context,
fake_inst).AndReturn('deltas')
resvs = ['resvs']
fake_quotas = objects.Quotas.from_reservations(self.context, resvs)
self.compute_api._reserve_quota_delta(self.context, 'deltas',
fake_inst).AndReturn(fake_quotas)
def _check_mig(expected_task_state=None):
self.assertEqual('confirming', fake_mig.status)
fake_mig.save().WithSideEffects(_check_mig)
if self.cell_type:
fake_quotas.commit()
self.compute_api._record_action_start(self.context, fake_inst,
'confirmResize')
self.compute_api.compute_rpcapi.confirm_resize(
self.context, fake_inst, fake_mig, 'compute-source',
[] if self.cell_type else fake_quotas.reservations)
self.mox.ReplayAll()
if mig_ref_passed:
self.compute_api.confirm_resize(self.context, fake_inst,
migration=fake_mig)
else:
self.compute_api.confirm_resize(self.context, fake_inst)
def test_confirm_resize(self):
self._test_confirm_resize()
def test_confirm_resize_with_migration_ref(self):
self._test_confirm_resize(mig_ref_passed=True)
def _test_revert_resize(self):
params = dict(vm_state=vm_states.RESIZED)
fake_inst = self._create_instance_obj(params=params)
fake_mig = objects.Migration._from_db_object(
self.context, objects.Migration(),
test_migration.fake_db_migration())
self.mox.StubOutWithMock(self.context, 'elevated')
self.mox.StubOutWithMock(objects.Migration,
'get_by_instance_and_status')
self.mox.StubOutWithMock(self.compute_api,
'_reverse_upsize_quota_delta')
self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
self.mox.StubOutWithMock(fake_inst, 'save')
self.mox.StubOutWithMock(fake_mig, 'save')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
'revert_resize')
self.context.elevated().AndReturn(self.context)
objects.Migration.get_by_instance_and_status(
self.context, fake_inst['uuid'], 'finished').AndReturn(
fake_mig)
self.compute_api._reverse_upsize_quota_delta(
self.context, fake_mig).AndReturn('deltas')
resvs = ['resvs']
fake_quotas = objects.Quotas.from_reservations(self.context, resvs)
self.compute_api._reserve_quota_delta(self.context, 'deltas',
fake_inst).AndReturn(fake_quotas)
def _check_state(expected_task_state=None):
self.assertEqual(task_states.RESIZE_REVERTING,
fake_inst.task_state)
fake_inst.save(expected_task_state=[None]).WithSideEffects(
_check_state)
def _check_mig(expected_task_state=None):
self.assertEqual('reverting', fake_mig.status)
fake_mig.save().WithSideEffects(_check_mig)
if self.cell_type:
fake_quotas.commit()
self.compute_api._record_action_start(self.context, fake_inst,
'revertResize')
self.compute_api.compute_rpcapi.revert_resize(
self.context, fake_inst, fake_mig, 'compute-dest',
[] if self.cell_type else fake_quotas.reservations)
self.mox.ReplayAll()
self.compute_api.revert_resize(self.context, fake_inst)
def test_revert_resize(self):
self._test_revert_resize()
def test_revert_resize_concurent_fail(self):
params = dict(vm_state=vm_states.RESIZED)
fake_inst = self._create_instance_obj(params=params)
fake_mig = objects.Migration._from_db_object(
self.context, objects.Migration(),
test_migration.fake_db_migration())
self.mox.StubOutWithMock(self.context, 'elevated')
self.mox.StubOutWithMock(objects.Migration,
'get_by_instance_and_status')
self.mox.StubOutWithMock(self.compute_api,
'_reverse_upsize_quota_delta')
self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
self.mox.StubOutWithMock(fake_inst, 'save')
self.context.elevated().AndReturn(self.context)
objects.Migration.get_by_instance_and_status(
self.context, fake_inst['uuid'], 'finished').AndReturn(fake_mig)
delta = ['delta']
self.compute_api._reverse_upsize_quota_delta(
self.context, fake_mig).AndReturn(delta)
resvs = ['resvs']
fake_quotas = objects.Quotas.from_reservations(self.context, resvs)
self.compute_api._reserve_quota_delta(
self.context, delta, fake_inst).AndReturn(fake_quotas)
exc = exception.UnexpectedTaskStateError(
instance_uuid=fake_inst['uuid'],
actual={'task_state': task_states.RESIZE_REVERTING},
expected={'task_state': [None]})
fake_inst.save(expected_task_state=[None]).AndRaise(exc)
fake_quotas.rollback()
self.mox.ReplayAll()
self.assertRaises(exception.UnexpectedTaskStateError,
self.compute_api.revert_resize,
self.context,
fake_inst)
def _test_resize(self, flavor_id_passed=True,
same_host=False, allow_same_host=False,
project_id=None,
extra_kwargs=None,
same_flavor=False,
clean_shutdown=True):
if extra_kwargs is None:
extra_kwargs = {}
self.flags(allow_resize_to_same_host=allow_same_host)
params = {}
if project_id is not None:
# To test instance w/ different project id than context (admin)
params['project_id'] = project_id
fake_inst = self._create_instance_obj(params=params)
self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
self.mox.StubOutWithMock(self.compute_api, '_upsize_quota_delta')
self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
self.mox.StubOutWithMock(fake_inst, 'save')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(self.compute_api.compute_task_api,
'resize_instance')
current_flavor = fake_inst.get_flavor()
if flavor_id_passed:
new_flavor = self._create_flavor(id=200, flavorid='new-flavor-id',
name='new_flavor', disabled=False)
if same_flavor:
new_flavor.id = current_flavor.id
flavors.get_flavor_by_flavor_id(
'new-flavor-id',
read_deleted='no').AndReturn(new_flavor)
else:
new_flavor = current_flavor
if (self.cell_type == 'compute' or
not (flavor_id_passed and same_flavor)):
resvs = ['resvs']
project_id, user_id = quotas_obj.ids_from_instance(self.context,
fake_inst)
fake_quotas = objects.Quotas.from_reservations(self.context,
resvs)
if flavor_id_passed:
self.compute_api._upsize_quota_delta(
self.context, mox.IsA(objects.Flavor),
mox.IsA(objects.Flavor)).AndReturn('deltas')
self.compute_api._reserve_quota_delta(self.context, 'deltas',
fake_inst).AndReturn(fake_quotas)
def _check_state(expected_task_state=None):
self.assertEqual(task_states.RESIZE_PREP,
fake_inst.task_state)
self.assertEqual(fake_inst.progress, 0)
for key, value in extra_kwargs.items():
self.assertEqual(value, getattr(fake_inst, key))
fake_inst.save(expected_task_state=[None]).WithSideEffects(
_check_state)
if allow_same_host:
filter_properties = {'ignore_hosts': []}
else:
filter_properties = {'ignore_hosts': [fake_inst['host']]}
if flavor_id_passed:
expected_reservations = fake_quotas.reservations
else:
expected_reservations = []
if self.cell_type == 'api':
fake_quotas.commit()
expected_reservations = []
mig = objects.Migration()
def _get_migration(context=None):
return mig
def _check_mig():
self.assertEqual(fake_inst.uuid, mig.instance_uuid)
self.assertEqual(current_flavor.id,
mig.old_instance_type_id)
self.assertEqual(new_flavor.id,
mig.new_instance_type_id)
self.assertEqual('finished', mig.status)
if new_flavor.id != current_flavor.id:
self.assertEqual('resize', mig.migration_type)
else:
self.assertEqual('migration', mig.migration_type)
self.stubs.Set(objects, 'Migration', _get_migration)
self.mox.StubOutWithMock(self.context, 'elevated')
self.mox.StubOutWithMock(mig, 'create')
self.context.elevated().AndReturn(self.context)
mig.create().WithSideEffects(_check_mig)
if flavor_id_passed:
self.compute_api._record_action_start(self.context, fake_inst,
'resize')
else:
self.compute_api._record_action_start(self.context, fake_inst,
'migrate')
scheduler_hint = {'filter_properties': filter_properties}
self.compute_api.compute_task_api.resize_instance(
self.context, fake_inst, extra_kwargs,
scheduler_hint=scheduler_hint,
flavor=mox.IsA(objects.Flavor),
reservations=expected_reservations,
clean_shutdown=clean_shutdown)
self.mox.ReplayAll()
if flavor_id_passed:
self.compute_api.resize(self.context, fake_inst,
flavor_id='new-flavor-id',
clean_shutdown=clean_shutdown,
**extra_kwargs)
else:
self.compute_api.resize(self.context, fake_inst,
clean_shutdown=clean_shutdown,
**extra_kwargs)
def _test_migrate(self, *args, **kwargs):
self._test_resize(*args, flavor_id_passed=False, **kwargs)
def test_resize(self):
self._test_resize()
def test_resize_with_kwargs(self):
self._test_resize(extra_kwargs=dict(cow='moo'))
def test_resize_same_host_and_allowed(self):
self._test_resize(same_host=True, allow_same_host=True)
def test_resize_same_host_and_not_allowed(self):
self._test_resize(same_host=True, allow_same_host=False)
def test_resize_different_project_id(self):
self._test_resize(project_id='different')
def test_resize_forced_shutdown(self):
self._test_resize(clean_shutdown=False)
def test_migrate(self):
self._test_migrate()
def test_migrate_with_kwargs(self):
self._test_migrate(extra_kwargs=dict(cow='moo'))
def test_migrate_same_host_and_allowed(self):
self._test_migrate(same_host=True, allow_same_host=True)
def test_migrate_same_host_and_not_allowed(self):
self._test_migrate(same_host=True, allow_same_host=False)
def test_migrate_different_project_id(self):
self._test_migrate(project_id='different')
def test_resize_invalid_flavor_fails(self):
self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
# Should never reach these.
self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(self.compute_api.compute_task_api,
'resize_instance')
fake_inst = self._create_instance_obj()
exc = exception.FlavorNotFound(flavor_id='flavor-id')
flavors.get_flavor_by_flavor_id('flavor-id',
read_deleted='no').AndRaise(exc)
self.mox.ReplayAll()
with mock.patch.object(fake_inst, 'save') as mock_save:
self.assertRaises(exception.FlavorNotFound,
self.compute_api.resize, self.context,
fake_inst, flavor_id='flavor-id')
self.assertFalse(mock_save.called)
def test_resize_disabled_flavor_fails(self):
self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
# Should never reach these.
self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(self.compute_api.compute_task_api,
'resize_instance')
fake_inst = self._create_instance_obj()
fake_flavor = self._create_flavor(id=200, flavorid='flavor-id',
name='foo', disabled=True)
flavors.get_flavor_by_flavor_id(
'flavor-id', read_deleted='no').AndReturn(fake_flavor)
self.mox.ReplayAll()
with mock.patch.object(fake_inst, 'save') as mock_save:
self.assertRaises(exception.FlavorNotFound,
self.compute_api.resize, self.context,
fake_inst, flavor_id='flavor-id')
self.assertFalse(mock_save.called)
@mock.patch.object(flavors, 'get_flavor_by_flavor_id')
def test_resize_to_zero_disk_flavor_fails(self, get_flavor_by_flavor_id):
fake_inst = self._create_instance_obj()
fake_flavor = self._create_flavor(id=200, flavorid='flavor-id',
name='foo', root_gb=0)
get_flavor_by_flavor_id.return_value = fake_flavor
self.assertRaises(exception.CannotResizeDisk,
self.compute_api.resize, self.context,
fake_inst, flavor_id='flavor-id')
def test_resize_quota_exceeds_fails(self):
self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
self.mox.StubOutWithMock(self.compute_api, '_upsize_quota_delta')
self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
# Should never reach these.
self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(self.compute_api.compute_task_api,
'resize_instance')
fake_inst = self._create_instance_obj()
fake_flavor = self._create_flavor(id=200, flavorid='flavor-id',
name='foo', disabled=False)
flavors.get_flavor_by_flavor_id(
'flavor-id', read_deleted='no').AndReturn(fake_flavor)
deltas = dict(resource=0)
self.compute_api._upsize_quota_delta(
self.context, mox.IsA(objects.Flavor),
mox.IsA(objects.Flavor)).AndReturn(deltas)
usage = dict(in_use=0, reserved=0)
quotas = {'resource': 0}
usages = {'resource': usage}
overs = ['resource']
over_quota_args = dict(quotas=quotas,
usages=usages,
overs=overs)
self.compute_api._reserve_quota_delta(self.context, deltas,
fake_inst).AndRaise(
exception.OverQuota(**over_quota_args))
self.mox.ReplayAll()
with mock.patch.object(fake_inst, 'save') as mock_save:
self.assertRaises(exception.TooManyInstances,
self.compute_api.resize, self.context,
fake_inst, flavor_id='flavor-id')
self.assertFalse(mock_save.called)
def test_pause(self):
# Ensure instance can be paused.
instance = self._create_instance_obj()
self.assertEqual(instance.vm_state, vm_states.ACTIVE)
self.assertIsNone(instance.task_state)
self.mox.StubOutWithMock(instance, 'save')
self.mox.StubOutWithMock(self.compute_api,
'_record_action_start')
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.compute_rpcapi
self.mox.StubOutWithMock(rpcapi, 'pause_instance')
instance.save(expected_task_state=[None])
self.compute_api._record_action_start(self.context,
instance, instance_actions.PAUSE)
rpcapi.pause_instance(self.context, instance)
self.mox.ReplayAll()
self.compute_api.pause(self.context, instance)
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
self.assertEqual(task_states.PAUSING,
instance.task_state)
def _test_pause_fails(self, vm_state):
params = dict(vm_state=vm_state)
instance = self._create_instance_obj(params=params)
self.assertIsNone(instance.task_state)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.pause,
self.context, instance)
def test_pause_fails_invalid_states(self):
invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE]))
for state in invalid_vm_states:
self._test_pause_fails(state)
def test_unpause(self):
# Ensure instance can be unpaused.
params = dict(vm_state=vm_states.PAUSED)
instance = self._create_instance_obj(params=params)
self.assertEqual(instance.vm_state, vm_states.PAUSED)
self.assertIsNone(instance.task_state)
self.mox.StubOutWithMock(instance, 'save')
self.mox.StubOutWithMock(self.compute_api,
'_record_action_start')
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.compute_rpcapi
self.mox.StubOutWithMock(rpcapi, 'unpause_instance')
instance.save(expected_task_state=[None])
self.compute_api._record_action_start(self.context,
instance, instance_actions.UNPAUSE)
rpcapi.unpause_instance(self.context, instance)
self.mox.ReplayAll()
self.compute_api.unpause(self.context, instance)
self.assertEqual(vm_states.PAUSED, instance.vm_state)
self.assertEqual(task_states.UNPAUSING, instance.task_state)
def test_live_migrate_active_vm_state(self):
instance = self._create_instance_obj()
self._live_migrate_instance(instance)
def test_live_migrate_paused_vm_state(self):
paused_state = dict(vm_state=vm_states.PAUSED)
instance = self._create_instance_obj(params=paused_state)
self._live_migrate_instance(instance)
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(objects.InstanceAction, 'action_start')
def _live_migrate_instance(self, instance, _save, _action):
# TODO(gilliard): This logic is upside-down (different
# behaviour depending on which class this method is mixed-into. Once
# we have cellsv2 we can remove this kind of logic from this test
if self.cell_type == 'api':
api = self.compute_api.cells_rpcapi
else:
api = conductor.api.ComputeTaskAPI
with mock.patch.object(api, 'live_migrate_instance') as task:
self.compute_api.live_migrate(self.context, instance,
block_migration=True,
disk_over_commit=True,
host_name='fake_dest_host')
self.assertEqual(task_states.MIGRATING, instance.task_state)
task.assert_called_once_with(self.context, instance,
'fake_dest_host',
block_migration=True,
disk_over_commit=True)
def test_swap_volume_volume_api_usage(self):
# This test ensures that volume_id arguments are passed to volume_api
# and that volumes return to previous states in case of error.
def fake_vol_api_begin_detaching(context, volume_id):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
volumes[volume_id]['status'] = 'detaching'
def fake_vol_api_roll_detaching(context, volume_id):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
if volumes[volume_id]['status'] == 'detaching':
volumes[volume_id]['status'] = 'in-use'
def fake_vol_api_reserve(context, volume_id):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
self.assertEqual(volumes[volume_id]['status'], 'available')
volumes[volume_id]['status'] = 'attaching'
def fake_vol_api_unreserve(context, volume_id):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
if volumes[volume_id]['status'] == 'attaching':
volumes[volume_id]['status'] = 'available'
def fake_swap_volume_exc(context, instance, old_volume_id,
new_volume_id):
raise AttributeError # Random exception
# Should fail if VM state is not valid
instance = fake_instance.fake_instance_obj(None, **{
'vm_state': vm_states.BUILDING,
'launched_at': timeutils.utcnow(),
'locked': False,
'availability_zone': 'fake_az',
'uuid': 'fake'})
volumes = {}
old_volume_id = uuidutils.generate_uuid()
volumes[old_volume_id] = {'id': old_volume_id,
'display_name': 'old_volume',
'attach_status': 'attached',
'instance_uuid': 'fake',
'size': 5,
'status': 'in-use'}
new_volume_id = uuidutils.generate_uuid()
volumes[new_volume_id] = {'id': new_volume_id,
'display_name': 'new_volume',
'attach_status': 'detached',
'instance_uuid': None,
'size': 5,
'status': 'available'}
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.swap_volume, self.context, instance,
volumes[old_volume_id], volumes[new_volume_id])
instance['vm_state'] = vm_states.ACTIVE
instance['task_state'] = None
# Should fail if old volume is not attached
volumes[old_volume_id]['attach_status'] = 'detached'
self.assertRaises(exception.VolumeUnattached,
self.compute_api.swap_volume, self.context, instance,
volumes[old_volume_id], volumes[new_volume_id])
self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
self.assertEqual(volumes[new_volume_id]['status'], 'available')
volumes[old_volume_id]['attach_status'] = 'attached'
# Should fail if old volume's instance_uuid is not that of the instance
volumes[old_volume_id]['instance_uuid'] = 'fake2'
self.assertRaises(exception.InvalidVolume,
self.compute_api.swap_volume, self.context, instance,
volumes[old_volume_id], volumes[new_volume_id])
self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
self.assertEqual(volumes[new_volume_id]['status'], 'available')
volumes[old_volume_id]['instance_uuid'] = 'fake'
# Should fail if new volume is attached
volumes[new_volume_id]['attach_status'] = 'attached'
self.assertRaises(exception.InvalidVolume,
self.compute_api.swap_volume, self.context, instance,
volumes[old_volume_id], volumes[new_volume_id])
self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
self.assertEqual(volumes[new_volume_id]['status'], 'available')
volumes[new_volume_id]['attach_status'] = 'detached'
# Should fail if new volume is smaller than the old volume
volumes[new_volume_id]['size'] = 4
self.assertRaises(exception.InvalidVolume,
self.compute_api.swap_volume, self.context, instance,
volumes[old_volume_id], volumes[new_volume_id])
self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
self.assertEqual(volumes[new_volume_id]['status'], 'available')
volumes[new_volume_id]['size'] = 5
# Fail call to swap_volume
self.stubs.Set(self.compute_api.volume_api, 'begin_detaching',
fake_vol_api_begin_detaching)
self.stubs.Set(self.compute_api.volume_api, 'roll_detaching',
fake_vol_api_roll_detaching)
self.stubs.Set(self.compute_api.volume_api, 'reserve_volume',
fake_vol_api_reserve)
self.stubs.Set(self.compute_api.volume_api, 'unreserve_volume',
fake_vol_api_unreserve)
self.stubs.Set(self.compute_api.compute_rpcapi, 'swap_volume',
fake_swap_volume_exc)
self.assertRaises(AttributeError,
self.compute_api.swap_volume, self.context, instance,
volumes[old_volume_id], volumes[new_volume_id])
self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
self.assertEqual(volumes[new_volume_id]['status'], 'available')
# Should succeed
self.stubs.Set(self.compute_api.compute_rpcapi, 'swap_volume',
lambda c, instance, old_volume_id, new_volume_id: True)
self.compute_api.swap_volume(self.context, instance,
volumes[old_volume_id],
volumes[new_volume_id])
def _test_snapshot_and_backup(self, is_snapshot=True,
with_base_ref=False, min_ram=None,
min_disk=None,
create_fails=False,
instance_vm_state=vm_states.ACTIVE):
params = dict(locked=True)
instance = self._create_instance_obj(params=params)
instance.vm_state = instance_vm_state
# 'cache_in_nova' is for testing non-inheritable properties
# 'user_id' should also not be carried from sys_meta into
# image property...since it should be set explicitly by
# _create_image() in compute api.
fake_image_meta = {
'is_public': True,
'name': 'base-name',
'properties': {
'user_id': 'meow',
'foo': 'bar',
'blah': 'bug?',
'cache_in_nova': 'dropped',
},
}
image_type = is_snapshot and 'snapshot' or 'backup'
sent_meta = {
'is_public': False,
'name': 'fake-name',
'properties': {
'user_id': self.context.user_id,
'instance_uuid': instance.uuid,
'image_type': image_type,
'foo': 'bar',
'blah': 'bug?',
'cow': 'moo',
'cat': 'meow',
},
}
if is_snapshot:
if min_ram is not None:
fake_image_meta['min_ram'] = min_ram
sent_meta['min_ram'] = min_ram
if min_disk is not None:
fake_image_meta['min_disk'] = min_disk
sent_meta['min_disk'] = min_disk
else:
sent_meta['properties']['backup_type'] = 'fake-backup-type'
extra_props = dict(cow='moo', cat='meow')
self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
self.mox.StubOutWithMock(self.compute_api.image_api,
'create')
self.mox.StubOutWithMock(instance, 'save')
self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
'snapshot_instance')
self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
'backup_instance')
if not is_snapshot:
self.mox.StubOutWithMock(self.compute_api,
'is_volume_backed_instance')
self.compute_api.is_volume_backed_instance(self.context,
instance).AndReturn(False)
utils.get_image_from_system_metadata(
instance.system_metadata).AndReturn(fake_image_meta)
fake_image = dict(id='fake-image-id')
mock_method = self.compute_api.image_api.create(
self.context, sent_meta)
if create_fails:
mock_method.AndRaise(test.TestingException())
else:
mock_method.AndReturn(fake_image)
def check_state(expected_task_state=None):
expected_state = (is_snapshot and
task_states.IMAGE_SNAPSHOT_PENDING or
task_states.IMAGE_BACKUP)
self.assertEqual(expected_state, instance.task_state)
if not create_fails:
instance.save(expected_task_state=[None]).WithSideEffects(
check_state)
if is_snapshot:
self.compute_api.compute_rpcapi.snapshot_instance(
self.context, instance, fake_image['id'])
else:
self.compute_api.compute_rpcapi.backup_instance(
self.context, instance, fake_image['id'],
'fake-backup-type', 'fake-rotation')
self.mox.ReplayAll()
got_exc = False
try:
if is_snapshot:
res = self.compute_api.snapshot(self.context, instance,
'fake-name',
extra_properties=extra_props)
else:
res = self.compute_api.backup(self.context, instance,
'fake-name',
'fake-backup-type',
'fake-rotation',
extra_properties=extra_props)
self.assertEqual(fake_image, res)
except test.TestingException:
got_exc = True
self.assertEqual(create_fails, got_exc)
self.mox.UnsetStubs()
def test_snapshot(self):
self._test_snapshot_and_backup()
def test_snapshot_fails(self):
self._test_snapshot_and_backup(create_fails=True)
def test_snapshot_invalid_state(self):
instance = self._create_instance_obj()
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.IMAGE_SNAPSHOT
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.snapshot,
self.context, instance, 'fake-name')
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.IMAGE_BACKUP
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.snapshot,
self.context, instance, 'fake-name')
instance.vm_state = vm_states.BUILDING
instance.task_state = None
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.snapshot,
self.context, instance, 'fake-name')
def test_snapshot_with_base_image_ref(self):
self._test_snapshot_and_backup(with_base_ref=True)
def test_snapshot_min_ram(self):
self._test_snapshot_and_backup(min_ram=42)
def test_snapshot_min_disk(self):
self._test_snapshot_and_backup(min_disk=42)
def test_backup(self):
for state in [vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED]:
self._test_snapshot_and_backup(is_snapshot=False,
instance_vm_state=state)
def test_backup_fails(self):
self._test_snapshot_and_backup(is_snapshot=False, create_fails=True)
def test_backup_invalid_state(self):
instance = self._create_instance_obj()
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.IMAGE_SNAPSHOT
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.backup,
self.context, instance, 'fake-name',
'fake', 'fake')
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.IMAGE_BACKUP
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.backup,
self.context, instance, 'fake-name',
'fake', 'fake')
instance.vm_state = vm_states.BUILDING
instance.task_state = None
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.backup,
self.context, instance, 'fake-name',
'fake', 'fake')
def test_backup_with_base_image_ref(self):
self._test_snapshot_and_backup(is_snapshot=False,
with_base_ref=True)
def test_backup_volume_backed_instance(self):
instance = self._create_instance_obj()
with mock.patch.object(self.compute_api,
'is_volume_backed_instance',
return_value=True) as mock_is_volume_backed:
self.assertRaises(exception.InvalidRequest,
self.compute_api.backup, self.context,
instance, 'fake-name', 'weekly',
3, extra_properties={})
mock_is_volume_backed.assert_called_once_with(self.context,
instance)
def _test_snapshot_volume_backed(self, quiesce_required, quiesce_fails,
vm_state=vm_states.ACTIVE):
params = dict(locked=True, vm_state=vm_state)
instance = self._create_instance_obj(params=params)
instance['root_device_name'] = 'vda'
instance_bdms = []
image_meta = {
'id': 'fake-image-id',
'properties': {'mappings': []},
'status': 'fake-status',
'location': 'far-away',
'owner': 'fake-tenant',
}
expect_meta = {
'name': 'test-snapshot',
'properties': {'root_device_name': 'vda',
'mappings': 'DONTCARE'},
'size': 0,
'is_public': False
}
quiesced = [False, False]
quiesce_expected = not quiesce_fails and vm_state == vm_states.ACTIVE
if quiesce_required:
image_meta['properties']['os_require_quiesce'] = 'yes'
expect_meta['properties']['os_require_quiesce'] = 'yes'
def fake_get_all_by_instance(context, instance, use_slave=False):
return copy.deepcopy(instance_bdms)
def fake_image_create(context, image_meta, data=None):
self.assertThat(image_meta, matchers.DictMatches(expect_meta))
def fake_volume_get(context, volume_id):
return {'id': volume_id, 'display_description': ''}
def fake_volume_create_snapshot(context, volume_id, name, description):
return {'id': '%s-snapshot' % volume_id}
def fake_quiesce_instance(context, instance):
if quiesce_fails:
raise exception.InstanceQuiesceNotSupported(
instance_id=instance['uuid'], reason='test')
quiesced[0] = True
def fake_unquiesce_instance(context, instance, mapping=None):
quiesced[1] = True
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_get_all_by_instance)
self.stubs.Set(self.compute_api.image_api, 'create',
fake_image_create)
self.stubs.Set(self.compute_api.volume_api, 'get',
fake_volume_get)
self.stubs.Set(self.compute_api.volume_api, 'create_snapshot_force',
fake_volume_create_snapshot)
self.stubs.Set(self.compute_api.compute_rpcapi, 'quiesce_instance',
fake_quiesce_instance)
self.stubs.Set(self.compute_api.compute_rpcapi, 'unquiesce_instance',
fake_unquiesce_instance)
# No block devices defined
self.compute_api.snapshot_volume_backed(
self.context, instance, copy.deepcopy(image_meta), 'test-snapshot')
bdm = fake_block_device.FakeDbBlockDeviceDict(
{'no_device': False, 'volume_id': '1', 'boot_index': 0,
'connection_info': 'inf', 'device_name': '/dev/vda',
'source_type': 'volume', 'destination_type': 'volume'})
instance_bdms.append(bdm)
expect_meta['properties']['bdm_v2'] = True
expect_meta['properties']['block_device_mapping'] = []
expect_meta['properties']['block_device_mapping'].append(
{'guest_format': None, 'boot_index': 0, 'no_device': None,
'image_id': None, 'volume_id': None, 'disk_bus': None,
'volume_size': None, 'source_type': 'snapshot',
'device_type': None, 'snapshot_id': '1-snapshot',
'device_name': '/dev/vda',
'destination_type': 'volume', 'delete_on_termination': False})
# All the db_only fields and the volume ones are removed
self.compute_api.snapshot_volume_backed(
self.context, instance, copy.deepcopy(image_meta), 'test-snapshot')
self.assertEqual(quiesce_expected, quiesced[0])
self.assertEqual(quiesce_expected, quiesced[1])
image_mappings = [{'virtual': 'ami', 'device': 'vda'},
{'device': 'vda', 'virtual': 'ephemeral0'},
{'device': 'vdb', 'virtual': 'swap'},
{'device': 'vdc', 'virtual': 'ephemeral1'}]
image_meta['properties']['mappings'] = image_mappings
expect_meta['properties']['mappings'] = [
{'virtual': 'ami', 'device': 'vda'}]
quiesced = [False, False]
# Check that the mappgins from the image properties are included
self.compute_api.snapshot_volume_backed(
self.context, instance, copy.deepcopy(image_meta), 'test-snapshot')
self.assertEqual(quiesce_expected, quiesced[0])
self.assertEqual(quiesce_expected, quiesced[1])
def test_snapshot_volume_backed(self):
self._test_snapshot_volume_backed(False, False)
def test_snapshot_volume_backed_with_quiesce(self):
self._test_snapshot_volume_backed(True, False)
def test_snapshot_volume_backed_with_quiesce_skipped(self):
self._test_snapshot_volume_backed(False, True)
def test_snapshot_volume_backed_with_quiesce_exception(self):
self.assertRaises(exception.NovaException,
self._test_snapshot_volume_backed, True, True)
def test_snapshot_volume_backed_with_quiesce_stopped(self):
self._test_snapshot_volume_backed(True, True,
vm_state=vm_states.STOPPED)
def test_volume_snapshot_create(self):
volume_id = '1'
create_info = {'id': 'eyedee'}
fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
'id': 123,
'device_name': '/dev/sda2',
'source_type': 'volume',
'destination_type': 'volume',
'connection_info': "{'fake': 'connection_info'}",
'volume_id': 1,
'boot_index': -1})
fake_bdm['instance'] = fake_instance.fake_db_instance()
fake_bdm['instance_uuid'] = fake_bdm['instance']['uuid']
fake_bdm = objects.BlockDeviceMapping._from_db_object(
self.context, objects.BlockDeviceMapping(),
fake_bdm, expected_attrs=['instance'])
self.mox.StubOutWithMock(objects.BlockDeviceMapping,
'get_by_volume_id')
self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
'volume_snapshot_create')
objects.BlockDeviceMapping.get_by_volume_id(
self.context, volume_id,
expected_attrs=['instance']).AndReturn(fake_bdm)
self.compute_api.compute_rpcapi.volume_snapshot_create(self.context,
fake_bdm['instance'], volume_id, create_info)
self.mox.ReplayAll()
snapshot = self.compute_api.volume_snapshot_create(self.context,
volume_id, create_info)
expected_snapshot = {
'snapshot': {
'id': create_info['id'],
'volumeId': volume_id,
},
}
self.assertEqual(snapshot, expected_snapshot)
def test_volume_snapshot_delete(self):
volume_id = '1'
snapshot_id = '2'
fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
'id': 123,
'device_name': '/dev/sda2',
'source_type': 'volume',
'destination_type': 'volume',
'connection_info': "{'fake': 'connection_info'}",
'volume_id': 1,
'boot_index': -1})
fake_bdm['instance'] = fake_instance.fake_db_instance()
fake_bdm['instance_uuid'] = fake_bdm['instance']['uuid']
fake_bdm = objects.BlockDeviceMapping._from_db_object(
self.context, objects.BlockDeviceMapping(),
fake_bdm, expected_attrs=['instance'])
self.mox.StubOutWithMock(objects.BlockDeviceMapping,
'get_by_volume_id')
self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
'volume_snapshot_delete')
objects.BlockDeviceMapping.get_by_volume_id(
self.context, volume_id,
expected_attrs=['instance']).AndReturn(fake_bdm)
self.compute_api.compute_rpcapi.volume_snapshot_delete(self.context,
fake_bdm['instance'], volume_id, snapshot_id, {})
self.mox.ReplayAll()
self.compute_api.volume_snapshot_delete(self.context, volume_id,
snapshot_id, {})
def _test_boot_volume_bootable(self, is_bootable=False):
def get_vol_data(*args, **kwargs):
return {'bootable': is_bootable}
block_device_mapping = [{
'id': 1,
'device_name': 'vda',
'no_device': None,
'virtual_name': None,
'snapshot_id': None,
'volume_id': '1',
'delete_on_termination': False,
}]
expected_meta = {'min_disk': 0, 'min_ram': 0, 'properties': {},
'size': 0, 'status': 'active'}
with mock.patch.object(self.compute_api.volume_api, 'get',
side_effect=get_vol_data):
if not is_bootable:
self.assertRaises(exception.InvalidBDMVolumeNotBootable,
self.compute_api._get_bdm_image_metadata,
self.context, block_device_mapping)
else:
meta = self.compute_api._get_bdm_image_metadata(self.context,
block_device_mapping)
self.assertEqual(expected_meta, meta)
def test_boot_volume_non_bootable(self):
self._test_boot_volume_bootable(False)
def test_boot_volume_bootable(self):
self._test_boot_volume_bootable(True)
def test_boot_volume_basic_property(self):
block_device_mapping = [{
'id': 1,
'device_name': 'vda',
'no_device': None,
'virtual_name': None,
'snapshot_id': None,
'volume_id': '1',
'delete_on_termination': False,
}]
fake_volume = {"volume_image_metadata":
{"min_ram": 256, "min_disk": 128, "foo": "bar"}}
with mock.patch.object(self.compute_api.volume_api, 'get',
return_value=fake_volume):
meta = self.compute_api._get_bdm_image_metadata(
self.context, block_device_mapping)
self.assertEqual(256, meta['min_ram'])
self.assertEqual(128, meta['min_disk'])
self.assertEqual('active', meta['status'])
self.assertEqual('bar', meta['properties']['foo'])
def test_boot_volume_snapshot_basic_property(self):
block_device_mapping = [{
'id': 1,
'device_name': 'vda',
'no_device': None,
'virtual_name': None,
'snapshot_id': '2',
'volume_id': None,
'delete_on_termination': False,
}]
fake_volume = {"volume_image_metadata":
{"min_ram": 256, "min_disk": 128, "foo": "bar"}}
fake_snapshot = {"volume_id": "1"}
with contextlib.nested(
mock.patch.object(self.compute_api.volume_api, 'get',
return_value=fake_volume),
mock.patch.object(self.compute_api.volume_api, 'get_snapshot',
return_value=fake_snapshot)) as (
volume_get, volume_get_snapshot):
meta = self.compute_api._get_bdm_image_metadata(
self.context, block_device_mapping)
self.assertEqual(256, meta['min_ram'])
self.assertEqual(128, meta['min_disk'])
self.assertEqual('active', meta['status'])
self.assertEqual('bar', meta['properties']['foo'])
volume_get_snapshot.assert_called_once_with(self.context,
block_device_mapping[0]['snapshot_id'])
volume_get.assert_called_once_with(self.context,
fake_snapshot['volume_id'])
def _create_instance_with_disabled_disk_config(self, object=False):
sys_meta = {"image_auto_disk_config": "Disabled"}
params = {"system_metadata": sys_meta}
instance = self._create_instance_obj(params=params)
if object:
return instance
return obj_base.obj_to_primitive(instance)
def _setup_fake_image_with_disabled_disk_config(self):
self.fake_image = {
'id': 1,
'name': 'fake_name',
'status': 'active',
'properties': {"auto_disk_config": "Disabled"},
}
def fake_show(obj, context, image_id, **kwargs):
return self.fake_image
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
return self.fake_image['id']
def test_resize_with_disabled_auto_disk_config_fails(self):
fake_inst = self._create_instance_with_disabled_disk_config(
object=True)
self.assertRaises(exception.AutoDiskConfigDisabledByImage,
self.compute_api.resize,
self.context, fake_inst,
auto_disk_config=True)
def test_create_with_disabled_auto_disk_config_fails(self):
image_id = self._setup_fake_image_with_disabled_disk_config()
self.assertRaises(exception.AutoDiskConfigDisabledByImage,
self.compute_api.create, self.context,
"fake_flavor", image_id, auto_disk_config=True)
def test_rebuild_with_disabled_auto_disk_config_fails(self):
fake_inst = self._create_instance_with_disabled_disk_config(
object=True)
image_id = self._setup_fake_image_with_disabled_disk_config()
self.assertRaises(exception.AutoDiskConfigDisabledByImage,
self.compute_api.rebuild,
self.context,
fake_inst,
image_id,
"new password",
auto_disk_config=True)
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(objects.Instance, 'get_flavor')
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(compute_api.API, '_get_image')
@mock.patch.object(compute_api.API, '_check_auto_disk_config')
@mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild')
@mock.patch.object(compute_api.API, '_record_action_start')
def test_rebuild(self, _record_action_start,
_checks_for_create_and_rebuild, _check_auto_disk_config,
_get_image, bdm_get_by_instance_uuid, get_flavor, instance_save):
orig_system_metadata = {}
instance = fake_instance.fake_instance_obj(self.context,
vm_state=vm_states.ACTIVE, cell_name='fake-cell',
launched_at=timeutils.utcnow(),
system_metadata=orig_system_metadata,
expected_attrs=['system_metadata'])
get_flavor.return_value = test_flavor.fake_flavor
flavor = instance.get_flavor()
image_href = ''
image = {"min_ram": 10, "min_disk": 1,
"properties": {'architecture': arch.X86_64}}
admin_pass = ''
files_to_inject = []
bdms = []
_get_image.return_value = (None, image)
bdm_get_by_instance_uuid.return_value = bdms
with mock.patch.object(self.compute_api.compute_task_api,
'rebuild_instance') as rebuild_instance:
self.compute_api.rebuild(self.context, instance, image_href,
admin_pass, files_to_inject)
rebuild_instance.assert_called_once_with(self.context,
instance=instance, new_pass=admin_pass,
injected_files=files_to_inject, image_ref=image_href,
orig_image_ref=image_href,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host, kwargs={})
_check_auto_disk_config.assert_called_once_with(image=image)
_checks_for_create_and_rebuild.assert_called_once_with(self.context,
None, image, flavor, {}, [])
self.assertNotEqual(orig_system_metadata, instance.system_metadata)
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(objects.Instance, 'get_flavor')
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(compute_api.API, '_get_image')
@mock.patch.object(compute_api.API, '_check_auto_disk_config')
@mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild')
@mock.patch.object(compute_api.API, '_record_action_start')
def test_rebuild_change_image(self, _record_action_start,
_checks_for_create_and_rebuild, _check_auto_disk_config,
_get_image, bdm_get_by_instance_uuid, get_flavor, instance_save):
orig_system_metadata = {}
get_flavor.return_value = test_flavor.fake_flavor
orig_image_href = 'orig_image'
orig_image = {"min_ram": 10, "min_disk": 1,
"properties": {'architecture': arch.X86_64,
'vm_mode': 'hvm'}}
new_image_href = 'new_image'
new_image = {"min_ram": 10, "min_disk": 1,
"properties": {'architecture': arch.X86_64,
'vm_mode': 'xen'}}
admin_pass = ''
files_to_inject = []
bdms = []
instance = fake_instance.fake_instance_obj(self.context,
vm_state=vm_states.ACTIVE, cell_name='fake-cell',
launched_at=timeutils.utcnow(),
system_metadata=orig_system_metadata,
expected_attrs=['system_metadata'],
image_ref=orig_image_href,
vm_mode=vm_mode.HVM)
flavor = instance.get_flavor()
def get_image(context, image_href):
if image_href == new_image_href:
return (None, new_image)
if image_href == orig_image_href:
return (None, orig_image)
_get_image.side_effect = get_image
bdm_get_by_instance_uuid.return_value = bdms
with mock.patch.object(self.compute_api.compute_task_api,
'rebuild_instance') as rebuild_instance:
self.compute_api.rebuild(self.context, instance, new_image_href,
admin_pass, files_to_inject)
rebuild_instance.assert_called_once_with(self.context,
instance=instance, new_pass=admin_pass,
injected_files=files_to_inject, image_ref=new_image_href,
orig_image_ref=orig_image_href,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host, kwargs={})
_check_auto_disk_config.assert_called_once_with(image=new_image)
_checks_for_create_and_rebuild.assert_called_once_with(self.context,
None, new_image, flavor, {}, [])
self.assertEqual(vm_mode.XEN, instance.vm_mode)
def _test_check_injected_file_quota_onset_file_limit_exceeded(self,
side_effect):
injected_files = [
{
"path": "/etc/banner.txt",
"contents": "foo"
}
]
with mock.patch.object(quota.QUOTAS, 'limit_check',
side_effect=side_effect):
self.compute_api._check_injected_file_quota(
self.context, injected_files)
def test_check_injected_file_quota_onset_file_limit_exceeded(self):
# This is the first call to limit_check.
side_effect = exception.OverQuota(overs='injected_files')
self.assertRaises(exception.OnsetFileLimitExceeded,
self._test_check_injected_file_quota_onset_file_limit_exceeded,
side_effect)
def test_check_injected_file_quota_onset_file_path_limit(self):
# This is the second call to limit_check.
side_effect = (mock.DEFAULT,
exception.OverQuota(overs='injected_file_path_bytes'))
self.assertRaises(exception.OnsetFilePathLimitExceeded,
self._test_check_injected_file_quota_onset_file_limit_exceeded,
side_effect)
def test_check_injected_file_quota_onset_file_content_limit(self):
# This is the second call to limit_check but with different overs.
side_effect = (mock.DEFAULT,
exception.OverQuota(overs='injected_file_content_bytes'))
self.assertRaises(exception.OnsetFileContentLimitExceeded,
self._test_check_injected_file_quota_onset_file_limit_exceeded,
side_effect)
@mock.patch('nova.objects.Quotas.commit')
@mock.patch('nova.objects.Quotas.reserve')
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.objects.InstanceAction.action_start')
def test_restore(self, action_start, instance_save, quota_reserve,
quota_commit):
instance = self._create_instance_obj()
instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None
instance.save()
with mock.patch.object(self.compute_api, 'compute_rpcapi') as rpc:
self.compute_api.restore(self.context, instance)
rpc.restore_instance.assert_called_once_with(self.context,
instance)
self.assertEqual(instance.task_state, task_states.RESTORING)
self.assertEqual(1, quota_commit.call_count)
def test_external_instance_event(self):
instances = [
objects.Instance(uuid='uuid1', host='host1'),
objects.Instance(uuid='uuid2', host='host1'),
objects.Instance(uuid='uuid3', host='host2'),
]
events = [
objects.InstanceExternalEvent(instance_uuid='uuid1'),
objects.InstanceExternalEvent(instance_uuid='uuid2'),
objects.InstanceExternalEvent(instance_uuid='uuid3'),
]
self.compute_api.compute_rpcapi = mock.MagicMock()
self.compute_api.external_instance_event(self.context,
instances, events)
method = self.compute_api.compute_rpcapi.external_instance_event
method.assert_any_call(self.context, instances[0:2], events[0:2])
method.assert_any_call(self.context, instances[2:], events[2:])
self.assertEqual(2, method.call_count)
def test_volume_ops_invalid_task_state(self):
instance = self._create_instance_obj()
self.assertEqual(instance.vm_state, vm_states.ACTIVE)
instance.task_state = 'Any'
volume_id = uuidutils.generate_uuid()
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.attach_volume,
self.context, instance, volume_id)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.detach_volume,
self.context, instance, volume_id)
new_volume_id = uuidutils.generate_uuid()
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.swap_volume,
self.context, instance,
volume_id, new_volume_id)
@mock.patch.object(cinder.API, 'get',
side_effect=exception.CinderConnectionFailed(reason='error'))
def test_get_bdm_image_metadata_with_cinder_down(self, mock_get):
bdms = [objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
{
'id': 1,
'volume_id': 1,
'source_type': 'volume',
'destination_type': 'volume',
'device_name': 'vda',
}))]
self.assertRaises(exception.CinderConnectionFailed,
self.compute_api._get_bdm_image_metadata,
self.context,
bdms, legacy_bdm=True)
@mock.patch.object(cinder.API, 'get')
@mock.patch.object(cinder.API, 'check_attach',
side_effect=exception.InvalidVolume(reason='error'))
def test_validate_bdm_with_error_volume(self, mock_check_attach, mock_get):
# Tests that an InvalidVolume exception raised from
# volume_api.check_attach due to the volume status not being
# 'available' results in _validate_bdm re-raising InvalidVolume.
instance = self._create_instance_obj()
instance_type = self._create_flavor()
volume_id = 'e856840e-9f5b-4894-8bde-58c6e29ac1e8'
volume_info = {'status': 'error',
'attach_status': 'detached',
'id': volume_id}
mock_get.return_value = volume_info
bdms = [objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
{
'boot_index': 0,
'volume_id': volume_id,
'source_type': 'volume',
'destination_type': 'volume',
'device_name': 'vda',
}))]
self.assertRaises(exception.InvalidVolume,
self.compute_api._validate_bdm,
self.context,
instance, instance_type, bdms)
mock_get.assert_called_once_with(self.context, volume_id)
mock_check_attach.assert_called_once_with(
self.context, volume_info, instance=instance)
@mock.patch.object(cinder.API, 'get_snapshot',
side_effect=exception.CinderConnectionFailed(reason='error'))
@mock.patch.object(cinder.API, 'get',
side_effect=exception.CinderConnectionFailed(reason='error'))
def test_validate_bdm_with_cinder_down(self, mock_get, mock_get_snapshot):
instance = self._create_instance_obj()
instance_type = self._create_flavor()
bdm = [objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
{
'id': 1,
'volume_id': 1,
'source_type': 'volume',
'destination_type': 'volume',
'device_name': 'vda',
'boot_index': 0,
}))]
bdms = [objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
{
'id': 1,
'snapshot_id': 1,
'source_type': 'volume',
'destination_type': 'volume',
'device_name': 'vda',
'boot_index': 0,
}))]
self.assertRaises(exception.CinderConnectionFailed,
self.compute_api._validate_bdm,
self.context,
instance, instance_type, bdm)
self.assertRaises(exception.CinderConnectionFailed,
self.compute_api._validate_bdm,
self.context,
instance, instance_type, bdms)
def _test_create_db_entry_for_new_instance_with_cinder_error(self,
expected_exception):
@mock.patch.object(objects.Instance, 'create')
@mock.patch.object(compute_api.SecurityGroupAPI, 'ensure_default')
@mock.patch.object(compute_api.API, '_populate_instance_names')
@mock.patch.object(compute_api.API, '_populate_instance_for_create')
def do_test(self, mock_create, mock_names, mock_ensure,
mock_inst_create):
instance = self._create_instance_obj()
instance['display_name'] = 'FAKE_DISPLAY_NAME'
instance['shutdown_terminate'] = False
instance_type = self._create_flavor()
fake_image = {
'id': 'fake-image-id',
'properties': {'mappings': []},
'status': 'fake-status',
'location': 'far-away'}
fake_security_group = None
fake_num_instances = 1
fake_index = 1
bdm = [objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
{
'id': 1,
'volume_id': 1,
'source_type': 'volume',
'destination_type': 'volume',
'device_name': 'vda',
'boot_index': 0,
}))]
with mock.patch.object(instance, "destroy") as destroy:
self.assertRaises(expected_exception,
self.compute_api.
create_db_entry_for_new_instance,
self.context,
instance_type,
fake_image,
instance,
fake_security_group,
bdm,
fake_num_instances,
fake_index)
destroy.assert_called_once_with()
# We use a nested method so we can decorate with the mocks.
do_test(self)
@mock.patch.object(cinder.API, 'get',
side_effect=exception.CinderConnectionFailed(reason='error'))
def test_create_db_entry_for_new_instancewith_cinder_down(self, mock_get):
self._test_create_db_entry_for_new_instance_with_cinder_error(
expected_exception=exception.CinderConnectionFailed)
@mock.patch.object(cinder.API, 'get',
return_value={'id': 1, 'status': 'error',
'attach_status': 'detached'})
def test_create_db_entry_for_new_instancewith_error_volume(self, mock_get):
self._test_create_db_entry_for_new_instance_with_cinder_error(
expected_exception=exception.InvalidVolume)
def _test_rescue(self, vm_state=vm_states.ACTIVE, rescue_password=None,
rescue_image=None, clean_shutdown=True):
instance = self._create_instance_obj(params={'vm_state': vm_state})
bdms = []
with contextlib.nested(
mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid', return_value=bdms),
mock.patch.object(self.compute_api, 'is_volume_backed_instance',
return_value=False),
mock.patch.object(instance, 'save'),
mock.patch.object(self.compute_api, '_record_action_start'),
mock.patch.object(self.compute_api.compute_rpcapi,
'rescue_instance')
) as (
bdm_get_by_instance_uuid, volume_backed_inst, instance_save,
record_action_start, rpcapi_rescue_instance
):
self.compute_api.rescue(self.context, instance,
rescue_password=rescue_password,
rescue_image_ref=rescue_image,
clean_shutdown=clean_shutdown)
# assert field values set on the instance object
self.assertEqual(task_states.RESCUING, instance.task_state)
# assert our mock calls
bdm_get_by_instance_uuid.assert_called_once_with(
self.context, instance.uuid)
volume_backed_inst.assert_called_once_with(
self.context, instance, bdms)
instance_save.assert_called_once_with(expected_task_state=[None])
record_action_start.assert_called_once_with(
self.context, instance, instance_actions.RESCUE)
rpcapi_rescue_instance.assert_called_once_with(
self.context, instance=instance,
rescue_password=rescue_password,
rescue_image_ref=rescue_image,
clean_shutdown=clean_shutdown)
def test_rescue_active(self):
self._test_rescue()
def test_rescue_stopped(self):
self._test_rescue(vm_state=vm_states.STOPPED)
def test_rescue_error(self):
self._test_rescue(vm_state=vm_states.ERROR)
def test_rescue_with_password(self):
self._test_rescue(rescue_password='fake-password')
def test_rescue_with_image(self):
self._test_rescue(rescue_image='fake-image')
def test_rescue_forced_shutdown(self):
self._test_rescue(clean_shutdown=False)
def test_unrescue(self):
instance = self._create_instance_obj(
params={'vm_state': vm_states.RESCUED})
with contextlib.nested(
mock.patch.object(instance, 'save'),
mock.patch.object(self.compute_api, '_record_action_start'),
mock.patch.object(self.compute_api.compute_rpcapi,
'unrescue_instance')
) as (
instance_save, record_action_start, rpcapi_unrescue_instance
):
self.compute_api.unrescue(self.context, instance)
# assert field values set on the instance object
self.assertEqual(task_states.UNRESCUING, instance.task_state)
# assert our mock calls
instance_save.assert_called_once_with(expected_task_state=[None])
record_action_start.assert_called_once_with(
self.context, instance, instance_actions.UNRESCUE)
rpcapi_unrescue_instance.assert_called_once_with(
self.context, instance=instance)
def test_set_admin_password_invalid_state(self):
# Tests that InstanceInvalidState is raised when not ACTIVE.
instance = self._create_instance_obj({'vm_state': vm_states.STOPPED})
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.set_admin_password,
self.context, instance)
def test_set_admin_password(self):
# Ensure instance can have its admin password set.
instance = self._create_instance_obj()
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(self.compute_api, '_record_action_start')
@mock.patch.object(self.compute_api.compute_rpcapi,
'set_admin_password')
def do_test(compute_rpcapi_mock, record_mock, instance_save_mock):
# call the API
self.compute_api.set_admin_password(self.context, instance)
# make our assertions
instance_save_mock.assert_called_once_with(
expected_task_state=[None])
record_mock.assert_called_once_with(
self.context, instance, instance_actions.CHANGE_PASSWORD)
compute_rpcapi_mock.assert_called_once_with(
self.context, instance=instance, new_pass=None)
do_test()
def _test_attach_interface_invalid_state(self, state):
instance = self._create_instance_obj(
params={'vm_state': state})
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.attach_interface,
self.context, instance, '', '', '', [])
def test_attach_interface_invalid_state(self):
for state in [vm_states.BUILDING, vm_states.DELETED,
vm_states.ERROR, vm_states.RESCUED,
vm_states.RESIZED, vm_states.SOFT_DELETED,
vm_states.SUSPENDED, vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED]:
self._test_attach_interface_invalid_state(state)
def _test_detach_interface_invalid_state(self, state):
instance = self._create_instance_obj(
params={'vm_state': state})
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.detach_interface,
self.context, instance, '', '', '', [])
def test_detach_interface_invalid_state(self):
for state in [vm_states.BUILDING, vm_states.DELETED,
vm_states.ERROR, vm_states.RESCUED,
vm_states.RESIZED, vm_states.SOFT_DELETED,
vm_states.SUSPENDED, vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED]:
self._test_detach_interface_invalid_state(state)
def test_check_and_transform_bdm(self):
instance_type = self._create_flavor()
base_options = {'uuid': 'fake_uuid',
'image_ref': 'fake_image_ref',
'metadata': {}}
image_meta = {'status': 'active',
'name': 'image_name',
'deleted': False,
'container_format': 'bare',
'id': 'image_id'}
legacy_bdm = False
block_device_mapping = [{'boot_index': 0,
'device_name': None,
'image_id': 'image_id',
'source_type': 'image'},
{'device_name': '/dev/vda',
'source_type': 'volume',
'device_type': None,
'volume_id': 'volume_id'}]
self.assertRaises(exception.InvalidRequest,
self.compute_api._check_and_transform_bdm,
self.context, base_options, instance_type,
image_meta, 1, 1, block_device_mapping, legacy_bdm)
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(objects.InstanceAction, 'action_start')
@mock.patch.object(compute_rpcapi.ComputeAPI, 'pause_instance')
@mock.patch.object(objects.Instance, 'get_by_uuid')
@mock.patch.object(compute_api.API, '_get_instances_by_filters',
return_value=[])
@mock.patch.object(compute_api.API, '_create_instance')
def test_skip_policy_check(self, mock_create, mock_get_ins_by_filters,
mock_get, mock_pause, mock_action, mock_save):
policy.reset()
rules = {'compute:pause': common_policy.parse_rule('!'),
'compute:get': common_policy.parse_rule('!'),
'compute:get_all': common_policy.parse_rule('!'),
'compute:create': common_policy.parse_rule('!')}
policy.set_rules(common_policy.Rules(rules))
instance = self._create_instance_obj()
mock_get.return_value = instance
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.pause, self.context, instance)
api = compute_api.API(skip_policy_check=True)
api.pause(self.context, instance)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.get, self.context, instance.uuid)
api = compute_api.API(skip_policy_check=True)
api.get(self.context, instance.uuid)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.get_all, self.context)
api = compute_api.API(skip_policy_check=True)
api.get_all(self.context)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.create, self.context, None, None)
api = compute_api.API(skip_policy_check=True)
api.create(self.context, None, None)
@mock.patch.object(compute_api.API, '_get_instances_by_filters')
def test_tenant_to_project_conversion(self, mock_get):
mock_get.return_value = []
api = compute_api.API()
api.get_all(self.context, search_opts={'tenant_id': 'foo'})
filters = mock_get.call_args_list[0][0][1]
self.assertEqual({'project_id': 'foo'}, filters)
class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
def setUp(self):
super(ComputeAPIUnitTestCase, self).setUp()
self.compute_api = compute_api.API()
self.cell_type = None
def test_resize_same_flavor_fails(self):
self.assertRaises(exception.CannotResizeToSameFlavor,
self._test_resize, same_flavor=True)
class ComputeAPIAPICellUnitTestCase(_ComputeAPIUnitTestMixIn,
test.NoDBTestCase):
def setUp(self):
super(ComputeAPIAPICellUnitTestCase, self).setUp()
self.flags(cell_type='api', enable=True, group='cells')
self.compute_api = compute_cells_api.ComputeCellsAPI()
self.cell_type = 'api'
def test_resize_same_flavor_fails(self):
self.assertRaises(exception.CannotResizeToSameFlavor,
self._test_resize, same_flavor=True)
class ComputeAPIComputeCellUnitTestCase(_ComputeAPIUnitTestMixIn,
test.NoDBTestCase):
def setUp(self):
super(ComputeAPIComputeCellUnitTestCase, self).setUp()
self.flags(cell_type='compute', enable=True, group='cells')
self.compute_api = compute_api.API()
self.cell_type = 'compute'
def test_resize_same_flavor_passes(self):
self._test_resize(same_flavor=True)
class DiffDictTestCase(test.NoDBTestCase):
"""Unit tests for _diff_dict()."""
def test_no_change(self):
old = dict(a=1, b=2, c=3)
new = dict(a=1, b=2, c=3)
diff = compute_api._diff_dict(old, new)
self.assertEqual(diff, {})
def test_new_key(self):
old = dict(a=1, b=2, c=3)
new = dict(a=1, b=2, c=3, d=4)
diff = compute_api._diff_dict(old, new)
self.assertEqual(diff, dict(d=['+', 4]))
def test_changed_key(self):
old = dict(a=1, b=2, c=3)
new = dict(a=1, b=4, c=3)
diff = compute_api._diff_dict(old, new)
self.assertEqual(diff, dict(b=['+', 4]))
def test_removed_key(self):
old = dict(a=1, b=2, c=3)
new = dict(a=1, c=3)
diff = compute_api._diff_dict(old, new)
self.assertEqual(diff, dict(b=['-']))
class SecurityGroupAPITest(test.NoDBTestCase):
def setUp(self):
super(SecurityGroupAPITest, self).setUp()
self.secgroup_api = compute_api.SecurityGroupAPI()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id)
@mock.patch('nova.objects.security_group.SecurityGroupList.'
'get_by_instance')
def test_get_instance_security_groups(self, mock_get):
groups = objects.SecurityGroupList()
groups.objects = [objects.SecurityGroup(name='foo'),
objects.SecurityGroup(name='bar')]
mock_get.return_value = groups
names = self.secgroup_api.get_instance_security_groups(self.context,
'fake-uuid')
self.assertEqual([{'name': 'bar'}, {'name': 'foo'}], sorted(names))
self.assertEqual(1, mock_get.call_count)
self.assertEqual('fake-uuid', mock_get.call_args_list[0][0][1].uuid)
|
LoHChina/nova
|
nova/tests/unit/compute/test_compute_api.py
|
Python
|
apache-2.0
| 128,405
|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Meta-strategy solvers for PSRO."""
import numpy as np
from open_spiel.python.algorithms import lp_solver
from open_spiel.python.algorithms import projected_replicator_dynamics
import pyspiel
EPSILON_MIN_POSITIVE_PROBA = 1e-8
def uniform_strategy(solver, return_joint=False):
"""Returns a Random Uniform distribution on policies.
Args:
solver: GenPSROSolver instance.
return_joint: If true, only returns marginals. Otherwise marginals as well
as joint probabilities.
Returns:
uniform distribution on strategies.
"""
policies = solver.get_policies()
policy_lengths = [len(pol) for pol in policies]
result = [np.ones(pol_len) / pol_len for pol_len in policy_lengths]
if not return_joint:
return result
else:
joint_strategies = get_joint_strategy_from_marginals(result)
return result, joint_strategies
def softmax_on_range(number_policies):
x = np.array(list(range(number_policies)))
x = np.exp(x-x.max())
x /= np.sum(x)
return x
def uniform_biased_strategy(solver, return_joint=False):
"""Returns a Biased Random Uniform distribution on policies.
The uniform distribution is biased to prioritize playing against more recent
policies (Policies that were appended to the policy list later in training)
instead of older ones.
Args:
solver: GenPSROSolver instance.
return_joint: If true, only returns marginals. Otherwise marginals as well
as joint probabilities.
Returns:
uniform distribution on strategies.
"""
policies = solver.get_policies()
if not isinstance(policies[0], list):
policies = [policies]
policy_lengths = [len(pol) for pol in policies]
result = [softmax_on_range(pol_len) for pol_len in policy_lengths]
if not return_joint:
return result
else:
joint_strategies = get_joint_strategy_from_marginals(result)
return result, joint_strategies
def renormalize(probabilities):
"""Replaces all negative entries with zeroes and normalizes the result.
Args:
probabilities: probability vector to renormalize. Has to be one-dimensional.
Returns:
Renormalized probabilities.
"""
probabilities[probabilities < 0] = 0
probabilities = probabilities / np.sum(probabilities)
return probabilities
def get_joint_strategy_from_marginals(probabilities):
"""Returns a joint strategy matrix from a list of marginals.
Args:
probabilities: list of probabilities.
Returns:
A joint strategy from a list of marginals.
"""
probas = []
for i in range(len(probabilities)):
probas_shapes = [1] * len(probabilities)
probas_shapes[i] = -1
probas.append(probabilities[i].reshape(*probas_shapes))
result = np.product(probas)
return result.reshape(-1)
def nash_strategy(solver, return_joint=False):
"""Returns nash distribution on meta game matrix.
This method only works for two player zero-sum games.
Args:
solver: GenPSROSolver instance.
return_joint: If true, only returns marginals. Otherwise marginals as well
as joint probabilities.
Returns:
Nash distribution on strategies.
"""
meta_games = solver.get_meta_game()
if not isinstance(meta_games, list):
meta_games = [meta_games, -meta_games]
meta_games = [x.tolist() for x in meta_games]
if len(meta_games) != 2:
raise NotImplementedError(
"nash_strategy solver works only for 2p zero-sum"
"games, but was invoked for a {} player game".format(len(meta_games)))
nash_prob_1, nash_prob_2, _, _ = (
lp_solver.solve_zero_sum_matrix_game(
pyspiel.create_matrix_game(*meta_games)))
result = [
renormalize(np.array(nash_prob_1).reshape(-1)),
renormalize(np.array(nash_prob_2).reshape(-1))
]
if not return_joint:
return result
else:
joint_strategies = get_joint_strategy_from_marginals(result)
return result, joint_strategies
def prd_strategy(solver, return_joint=False):
"""Computes Projected Replicator Dynamics strategies.
Args:
solver: GenPSROSolver instance.
return_joint: If true, only returns marginals. Otherwise marginals as well
as joint probabilities.
Returns:
PRD-computed strategies.
"""
meta_games = solver.get_meta_game()
if not isinstance(meta_games, list):
meta_games = [meta_games, -meta_games]
kwargs = solver.get_kwargs()
result = projected_replicator_dynamics.projected_replicator_dynamics(
meta_games, **kwargs)
if not return_joint:
return result
else:
joint_strategies = get_joint_strategy_from_marginals(result)
return result, joint_strategies
META_STRATEGY_METHODS = {
"uniform_biased": uniform_biased_strategy,
"uniform": uniform_strategy,
"nash": nash_strategy,
"prd": prd_strategy,
}
|
deepmind/open_spiel
|
open_spiel/python/algorithms/psro_v2/meta_strategies.py
|
Python
|
apache-2.0
| 5,344
|
from __future__ import division
# # TODO: document those functions
# def array_hash(X):
# writeable = X.flags.writeable
# X.flags.writeable = False
# h = hash(X.tobytes())
# X.flags.writeable = writeable
# return h
def filehash(filepath):
r"""Compute sha256 from a given file."""
import hashlib
BUF_SIZE = 65536
sha256 = hashlib.sha256()
with open(filepath, "rb") as f:
while True:
data = f.read(BUF_SIZE)
if not data:
break
sha256.update(data)
return sha256.hexdigest()
|
Horta/limix
|
limix/sh/_hash.py
|
Python
|
apache-2.0
| 582
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Netius System
# Copyright (c) 2008-2020 Hive Solutions Lda.
#
# This file is part of Hive Netius System.
#
# Hive Netius System is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Netius System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Netius System. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2020 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
def sh_call(globals = {}, locals = {}):
import sys
if not len(sys.argv) > 1:
raise RuntimeError("invalid number of arguments (no method)")
name = sys.argv[1]
method = globals[name]
method(*sys.argv[2:])
|
hivesolutions/netius
|
src/netius/sh/base.py
|
Python
|
apache-2.0
| 1,533
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
私有方法和私有字段
私有字段:self.__Thailand
私有字段是不能直接被对象和类进行访问的,需要通过动态方法访问
同样私有字段也是可以通过特性的方法访问的
私有方法:
私有方法是不能直接被对象和类进行访问的,通过使用动态方法进行访问
japan._Provice__sha() #显示调用私有方法,但是不建议这么使用
私有字段使用字段,可以让被人访问,但是不可以让别人改动
'''
class Provice:
memo = '省份之一' #这个值是属于类的 静态字段
def __init__(self,name,capital,leader,flag): #slef就是你以后创建对象的对象值
self.Name = name #这个值属于对象 动态字段
self.Capital = capital
self.Leader = leader
self.__Thailand = flag #私有字段
def show(self): #访问私有字段
print self.__Thailand
def __sha(self): #定义私有方法
print '我是Alex'
def Foo2(self): #通过动态方法访问私有方法
self.__sha()
@property #通过特性访问私有字段
def Thailand(self):
return self.__Thailand
#hn = Provice('河南','郑州','李克强')
#sd = Provice('山东','济南','习近平')
japan = Provice('日本','东京','安倍',True)
#print japan.__Thailand #访问报错 AttributeError: Provice instance has no attribute '__Thailand'
#对象是不能直接访问私有字段的
japan.show() #私有字段需要使用动态方法进行访问输出
japan.Foo2() #私有方法通过使用动态方法进行访问输出
japan._Provice__sha() #显示调用私有方法,但是不建议这么使用
print japan.Thailand #通过特性访问私有字段
|
zhangyage/Python-oldboy
|
day04/oop/class_study5.py
|
Python
|
apache-2.0
| 2,002
|
# Copyright 2022 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create a dataset of SequenceExamples from NoteSequence protos.
This script will extract drum tracks from NoteSequence protos and save them to
TensorFlow's SequenceExample protos for input to the drums RNN models.
"""
import os
from magenta.models.drums_rnn import drums_rnn_config_flags
from magenta.models.drums_rnn import drums_rnn_pipeline
from magenta.pipelines import pipeline
import tensorflow.compat.v1 as tf
flags = tf.app.flags
FLAGS = tf.app.flags.FLAGS
flags.DEFINE_string('input', None, 'TFRecord to read NoteSequence protos from.')
flags.DEFINE_string(
'output_dir', None,
'Directory to write training and eval TFRecord files. The TFRecord files '
'are populated with SequenceExample protos.')
flags.DEFINE_float(
'eval_ratio', 0.1,
'Fraction of input to set aside for eval set. Partition is randomly '
'selected.')
flags.DEFINE_string(
'log', 'INFO',
'The threshold for what messages will be logged DEBUG, INFO, WARN, ERROR, '
'or FATAL.')
def main(unused_argv):
tf.logging.set_verbosity(FLAGS.log)
config = drums_rnn_config_flags.config_from_flags()
pipeline_instance = drums_rnn_pipeline.get_pipeline(
config, FLAGS.eval_ratio)
FLAGS.input = os.path.expanduser(FLAGS.input)
FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)
pipeline.run_pipeline_serial(
pipeline_instance,
pipeline.tf_record_iterator(FLAGS.input, pipeline_instance.input_type),
FLAGS.output_dir)
def console_entry_point():
tf.disable_v2_behavior()
tf.app.run(main)
if __name__ == '__main__':
console_entry_point()
|
magenta/magenta
|
magenta/models/drums_rnn/drums_rnn_create_dataset.py
|
Python
|
apache-2.0
| 2,181
|
# Copyright 2016-2017 University of Pittsburgh
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http:www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, uuid, datetime
from sets import Set
from model.micropublication import Annotation, DataMaterialRow, DMItem, DataRatioItem, MaterialDoseItem, MaterialParticipants, MaterialPhenotypeItem, DataReviewer, DataDips
######################### QUERY MP Annotation ##########################
# query all mp annotations
# return annotations with claim, data and material
def queryAllMpAnnotation(conn):
mpAnnotations = []
claimAnns = queryAllMpClaim(conn)
for claimId,claimAnn in claimAnns.items():
claimDataAnno = queryMpData(conn, claimAnn, claimId)
claimDataMatAnno = queryMpMaterial(conn, claimDataAnno, claimId)
mpAnnotations.append(claimDataMatAnno)
return mpAnnotations
# query all mp annotations
# return annotations with claim, data and material
def queryMpAnnotationByUrn(conn, annotationUrn):
claimAnn = queryMpClaimByUrn(conn, annotationUrn)
claimDataAnn = queryMpData(conn, claimAnn, claimAnn.claimid)
claimDataMatAnn = queryMpMaterial(conn, claimDataAnn, claimAnn.claimid)
return claimDataMatAnn
######################### QUERY MP Claim ##########################
## query all claim annotation by document URL
## return {{key: id-1, value: Ann-1"}, {key: id-2, value: Ann-2"}, ...}
def queryAllMpClaim(conn):
annotations = {} # key: id, value obj Annotation
cur = conn.cursor()
qry = """
select cann.id, t.has_source, cann.creator, cann.date_created, s.exact, s.prefix, s.suffix, cbody.label, qualifierrole(q.subject, q.predicate, q.object) as qtype, qvalue, cann.rejected_statement, cann.rejected_statement_reason, cann.rejected_statement_comment, met.entered_value, cann.negation, q.enantiomer, q.metabolite
from mp_claim_annotation cann join oa_claim_body cbody on cann.has_body = cbody.id
join qualifier q on cbody.id = q.claim_body_id
join method met on cann.id = met.mp_claim_id
join oa_target t on cann.has_target = t.id
join oa_selector s on t.has_selector = s.id;
"""
cur.execute(qry)
for row in cur.fetchall():
id = row[0]
if id not in annotations: ## Using existing annotation if it's available
annotation = Annotation()
annotations[id] = annotation
else:
annotation = annotations[id]
drugPC = "" ## define parent compound string
if row[15] and not row[16]:
drugPC = "enantiomer|"
elif row[16] and not row[15]:
drugPC = "|metabolite"
elif row[15] and row[16]:
drugPC = "enantiomer|metabolite"
## claim qualifiers
if row[8] == "subject":
annotation.csubject = row[9]
annotation.setSubjectPC(drugPC) # parent compound for subject
elif row[8] == "predicate":
annotation.cpredicate = row[9]
elif row[8] == "object":
annotation.cobject = row[9]
annotation.setObjectPC(drugPC) # parent compound for object
elif row[8] == "qualifer":
annotation.qualifier = row[9]
annotation.setQualifierPC(drugPC) # parent compound for qualifier
else:
print "[ERROR] qualifier role unidentified qvalue: %s (claimid %s)" % (row[8], id)
## claim source and label
if annotation.source == None:
annotation.source = row[1]
if annotation.label == None:
annotation.label = row[7]
## claim text selector
if annotation.exact == None:
annotation.setOaSelector(row[5], row[4], row[6])
## user entered method
if annotation.method == None:
annotation.method = row[13]
## rejected reason
if annotation.rejected == None and row[10] == True:
annotation.rejected = row[11] + "|" + row[12]
## assertion negation
if annotation.negation == None and row[14] != None:
annotation.negation = row[14]
return annotations
def queryMpClaimByUrn(conn, urn):
"""
query claim annotation by annotationId
return Annotation
"""
cur = conn.cursor()
qry = """
select cann.id, t.has_source, cann.creator, cann.date_created, s.exact, s.prefix, s.suffix, cbody.label, qualifierrole(q.subject, q.predicate, q.object) as qtype, qvalue, cann.rejected_statement, cann.rejected_statement_reason, cann.rejected_statement_comment, met.entered_value, cann.negation, q.enantiomer, q.metabolite
from mp_claim_annotation cann join oa_claim_body cbody on cann.has_body = cbody.id
join qualifier q on cbody.id = q.claim_body_id
join method met on cann.id = met.mp_claim_id
join oa_target t on cann.has_target = t.id
join oa_selector s on t.has_selector = s.id
where cann.urn = '%s'; """ % (urn)
cur.execute(qry)
annotation = Annotation()
for row in cur.fetchall():
annotation.claimid = row[0]
annotation.urn = urn
drugPC = "" ## define parent compound string
if row[15] and not row[16]:
drugPC = "enantiomer|"
elif row[16] and not row[15]:
drugPC = "|metabolite"
elif row[15] and row[16]:
drugPC = "enantiomer|metabolite"
## claim qualifiers
if row[8] == "subject":
annotation.csubject = row[9]
annotation.setSubjectPC(drugPC) # parent compound for subject
elif row[8] == "predicate":
annotation.cpredicate = row[9]
elif row[8] == "object":
annotation.cobject = row[9]
annotation.setObjectPC(drugPC) # parent compound for object
elif row[8] == "qualifer":
annotation.qualifier = row[9]
annotation.setQualifierPC(drugPC) # parent compound for qualifier
else:
print "[ERROR] qualifier role unidentified qvalue: %s (claimid %s)" % (row[8], annotation.claimid)
## claim source and label
if annotation.source == None:
annotation.source = row[1]
if annotation.label == None:
annotation.label = row[7]
## claim text selector
if annotation.exact == None:
annotation.setOaSelector(row[5], row[4], row[6])
## rejected reason
if annotation.rejected == None and row[10] == True:
annotation.rejected = row[11] + "|" + row[12]
## user entered method
if annotation.method == None:
annotation.method = row[13]
## assertion negation
if annotation.negation == None and row[14] != None:
annotation.negation = row[14]
return annotation
######################### QUERY MP Data ##########################
# query data items for claim annotation
# return list of annotation with data items attached
def queryMpData(conn, annotation, claimid):
qry = """
select dann.type, df.data_field_type, df.value_as_string, df.value_as_number, s.exact, s.prefix, s.suffix, dann.mp_data_index, dann.ev_supports, dann.rejected, dann.rejected_reason, dann.rejected_comment, met.entered_value, met.inferred_value, eq.question, eq.value_as_string
from mp_data_annotation dann
join oa_data_body dbody on dann.has_body = dbody.id
join data_field df on df.data_body_id = dbody.id
left join oa_target t on dann.has_target = t.id
left join oa_selector s on t.has_selector = s.id
join method met on dann.mp_claim_id = met.mp_claim_id and met.mp_data_index = dann.mp_data_index
left join evidence_question eq on met.id = eq.method_id
where dann.mp_claim_id = %s
""" % (claimid)
cur = conn.cursor()
cur.execute(qry)
for row in cur.fetchall():
dType = row[0] # data type
dfType = row[1] # data field
exact = row[4]; value = str(row[2] or row[3]) # value as string or number
index = row[7] # data index
evRelationship = row[8] # EV supports or refutes
dmRow = None
if annotation.getSpecificDataMaterial(index) == None:
dmRow = DataMaterialRow() # create new row of data & material
annotation.setSpecificDataMaterial(dmRow, index)
else: # current row of data & material exists
dmRow = annotation.getSpecificDataMaterial(index)
if dType in ["auc", "cmax" , "clearance", "halflife"]:
if dmRow.getDataRatioItemInRow(dType): # DataRatioItem exists
dataRatioItem = dmRow.getDataRatioItemInRow(dType)
else: # create new dataRatioItem
dataRatioItem = DataRatioItem(dType)
dataRatioItem.setSelector("", exact, "")
dataRatioItem.setAttribute(dfType, value) # add value
dmRow.setDataRatioItem(dataRatioItem)
if dType == "reviewer":
if dmRow.getDataReviewer(): # DataReviewer exists
dataReviewer = dmRow.getDataReviewer()
else:
dataReviewer = DataReviewer()
dataReviewer.setAttribute(dfType, value)
dmRow.setDataReviewer(dataReviewer)
if dType == "dipsquestion": # DataDips exists
if dmRow.getDataDips():
dips = dmRow.getDataDips()
else:
dips = DataDips()
dips.setQuestion(dfType, value)
dmRow.setDataDips(dips)
if not dmRow.getEvRelationship(): # add evidence relationship to dmRow
if evRelationship is True:
dmRow.setEvRelationship("supports")
elif evRelationship is False:
dmRow.setEvRelationship("refutes")
evqs = row[14]; evqsVal = row[15] # add evidence type questions
if evqs and evqsVal:
if evqs == "grouprandom" and not dmRow.getGroupRandom():
dmRow.setGroupRandom(evqsVal)
elif evqs == "parallelgroup" and not dmRow.getParallelGroup():
dmRow.setParallelGroup(evqsVal)
return annotation
######################### QUERY MP Material ##########################
# query material items for claim annotation
# return list of MaterialItems
def queryMpMaterial(conn, annotation, claimid):
qry = """
select mann.type, mf.material_field_type, mf.value_as_string, mf.value_as_number, s.exact, s.prefix, s.suffix, mann.mp_data_index, mann.ev_supports
from mp_material_annotation mann join oa_material_body mbody on mann.has_body = mbody.id
join material_field mf on mf.material_body_id = mbody.id
left join oa_target t on mann.has_target = t.id
left join oa_selector s on t.has_selector = s.id
where mann.mp_claim_id = %s
""" % (claimid)
results = []
cur = conn.cursor()
cur.execute(qry)
for row in cur.fetchall():
mType = row[0] # material type
mfType = row[1] # material field
exact = row[4]; value = str(row[2] or row[3]) # value as string or number
index = row[7] # data & material index
evRelationship = row[8] # EV supports or refutes
if annotation.getSpecificDataMaterial(index) == None:
dmRow = DataMaterialRow() # create new row of data & material
if evRelationship:
dmRow.setEvRelationship("supports")
else:
dmRow.setEvRelationship("refutes")
if mType in ["object_dose","subject_dose"]: # dose
doseItem = MaterialDoseItem(mType)
doseItem.setAttribute(mfType, value)
doseItem.setSelector("", exact, "")
dmRow.setMaterialDoseItem(doseItem)
elif mType == "participants":
partItem = MaterialParticipants(value)
partItem.setSelector("", exact, "")
dmRow.setParticipants(partItem)
elif mType == "phenotype":
phenoItem = MaterialPhenotypeItem()
phenoItem.setAttribute(mfType, value)
dmRow.setPhenotype(phenoItem)
annotation.setSpecificDataMaterial(dmRow, index)
else: # current row of material & material exists
dmRow = annotation.getSpecificDataMaterial(index)
if dmRow.getEvRelationship() == None and evRelationship is True:
dmRow.setEvRelationship("supports")
elif dmRow.getEvRelationship() == None and evRelationship is False:
dmRow.setEvRelationship("refutes")
if mType in ["object_dose","subject_dose"]:
if dmRow.getMaterialDoseInRow(mType): # current MaterialItem exists
doseItem = dmRow.getMaterialDoseInRow(mType)
else:
doseItem = MaterialDoseItem(mType)
doseItem.setAttribute(mfType, value)
doseItem.setSelector("", exact, "")
dmRow.setMaterialDoseItem(doseItem)
elif mType == "participants":
if dmRow.getParticipantsInRow(): # participants exists
partItem = dmRow.getParticipantsInRow()
partItem.setValue(value)
else:
partItem = MaterialParticipants(value)
dmRow.setParticipants(partItem)
partItem.setSelector("", exact, "")
elif mType == "phenotype":
if dmRow.getPhenotype():
phenoItem = dmRow.getPhenotype()
else:
phenoItem = MaterialPhenotypeItem()
phenoItem.setAttribute(mfType, value)
dmRow.setPhenotype(phenoItem)
return annotation
######################### QUERY Highlight Annotaiton ##########################
# query all highlight annotation
# return dict for drug set in document dict {"doc url": "drug set"}
def queryHighlightAnns(conn):
highlightD = {}
qry = """SELECT h.id, t.has_source, s.exact
FROM highlight_annotation h, oa_target t, oa_selector s
WHERE h.has_target = t.id
AND t.has_selector = s.id;"""
cur = conn.cursor()
cur.execute(qry)
for row in cur.fetchall():
source = row[1]; drugname = row[2]
if source in highlightD:
highlightD[source].add(drugname)
else:
highlightD[source] = Set([drugname])
return highlightD
|
dbmi-pitt/dbmi-annotator
|
translation/mp-evidence-base-ETL/deprecated/mpEvidenceQry.py
|
Python
|
apache-2.0
| 12,990
|
from __future__ import print_function, division
from brian import (Network, NeuronGroup, SpikeMonitor,
PoissonGroup, Connection,
mV, ms, Hz)
import sys
import matplotlib.pyplot as plt
import numpy as np
import itertools as itt
fin = [f*Hz for f in range(10, 41, 5)]
win = [w*mV for w in np.arange(0.5, 2.1, 0.5)]
Nin = [n for n in range(100, 181, 20)]
tau = 10*ms
Vth = 15*mV
reset = 0*mV
configs = [c for c in itt.product(Nin, fin, win)]
Nsims = len(configs)
print("Number of configurations: {}".format(Nsims))
lifeq = "dV/dt = -V/tau : volt"
sim = Network()
nrn = NeuronGroup(Nsims, lifeq, threshold="V>=Vth", reset="V=reset")
inputgroups = []
connections = []
print("Setting up ...")
for idx, c in enumerate(configs):
n, f, w = c
inp = PoissonGroup(n, f)
conn = Connection(inp, nrn[idx], state="V", weight=w)
inputgroups.append(inp)
connections.append(conn)
print("\r{}/{}".format(idx+1, Nsims), end="")
sys.stdout.flush()
print()
spikemon = SpikeMonitor(nrn)
sim.add(*inputgroups)
sim.add(*connections)
sim.add(nrn)
sim.add(spikemon)
duration = 1000*ms
print("Running for {} s".format(duration))
sim.run(duration, report="text")
plt.figure()
inputvolts = np.array([c[0]*c[1]*c[2]*tau for c in configs])
spikerates = np.array([len(sp) for sp in spikemon.spiketimes.itervalues()])
for idx in range(Nsims):
iv = inputvolts[idx]
sr = spikerates[idx]
plt.plot(iv, sr, "b.")
print("{} mV -> {} Hz".format(iv*1000, sr/duration))
ivsorted = np.sort(inputvolts)
theofout = 1.0/(tau*np.log(ivsorted/(ivsorted-Vth)))
theovin = Vth/(1-np.exp(-1.0/(tau*spikerates)))
plt.plot(ivsorted, theofout, "r-")
sidx = np.argsort(theovin)
plt.plot(theovin[sidx], spikerates[sidx], "g-")
Narr = np.array([c[0] for c in configs])
Warr = np.array([c[1] for c in configs])
farr = np.array([c[2] for c in configs])
theofin = Vth/((1-np.exp(-1.0/(tau*spikerates)))*Narr*Warr*tau)
plt.figure()
plt.plot(theofin, farr, "b.")
plt.plot([min(theofin), max(theofin)], [min(theofin), max(theofin)], 'k--')
plt.show()
|
achilleas-k/brian-scripts
|
thesis_stuff/test_in_out.py
|
Python
|
apache-2.0
| 2,076
|
from rest_framework import status
from test_utils import serialized_time
def test_get_profile_topics(
api_client, enable_premium_requirement, profile_topic_factory, user_factory
):
"""
Premium users should be able to list their own profile topics.
"""
password = "password"
user = user_factory(has_premium=True, password=password)
api_client.log_in(user.primary_email.email, password)
topic = profile_topic_factory(profile__km_user__user=user)
url = f"/know-me/profile/profiles/{topic.profile.pk}/topics/"
response = api_client.get(url)
assert response.status_code == status.HTTP_200_OK
assert response.json() == [
{
"id": topic.pk,
"url": api_client.build_full_url(
f"/know-me/profile/profile-topics/{topic.pk}/"
),
"created_at": serialized_time(topic.created_at),
"updated_at": serialized_time(topic.updated_at),
"is_detailed": topic.is_detailed,
"items_url": api_client.build_full_url(
f"/know-me/profile/profile-topics/{topic.pk}/items/"
),
"name": topic.name,
"permissions": {"read": True, "write": True},
"profile_id": topic.profile.pk,
}
]
def test_post_create_topic(
api_client, enable_premium_requirement, profile_factory, user_factory
):
"""
Premium users should be able to add new topics to their own
profiles.
"""
password = "password"
user = user_factory(has_premium=True, password=password)
api_client.log_in(user.primary_email.email, password)
profile = profile_factory(km_user__user=user)
url = f"/know-me/profile/profiles/{profile.pk}/topics/"
data = {"name": "Test Topic"}
response = api_client.post(url, data)
assert response.status_code == status.HTTP_201_CREATED
assert response.json()["name"] == data["name"]
def test_put_topic_order(
api_client, enable_premium_requirement, profile_topic_factory, user_factory
):
"""
Premium users should be able to sort their own profile topics with
respect to the parent profile.
"""
password = "password"
user = user_factory(has_premium=True, password=password)
api_client.log_in(user.primary_email.email, password)
t1 = profile_topic_factory(profile__km_user__user=user)
t2 = profile_topic_factory(profile=t1.profile)
url = f"/know-me/profile/profiles/{t1.profile.pk}/topics/"
data = {"order": [t2.pk, t1.pk]}
response = api_client.put(url, data)
assert response.status_code == status.HTTP_200_OK
# The collection should now be sorted
topics = api_client.get(url).json()
assert list(map(lambda topic: topic["id"], topics)) == data["order"]
|
knowmetools/km-api
|
km_api/functional_tests/know_me/profile/profile_topics/test_profile_topic_list.py
|
Python
|
apache-2.0
| 2,774
|
#!/usr/bin/env python
# -*- coding: ascii -*-
u"""
:Copyright:
Copyright 2015 - 2021
Andr\xe9 Malo or his licensors, as applicable
:License:
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==================================================
GenSASchema - Static SQLAlchemy Schema Generator
==================================================
GenSASchema - Static SQLAlchemy Schema Generator.
"""
__author__ = u"Andr\xe9 Malo"
import os as _os
import posixpath as _posixpath
# pylint: disable = no-name-in-module, import-error, raise-missing-from
import setuptools as _setuptools
# pylint: disable = invalid-name
def _doc(filename):
""" Read docs file """
# pylint: disable = unspecified-encoding
args = {} if str is bytes else dict(encoding='utf-8')
try:
with open(_os.path.join('docs', filename), **args) as fp:
return fp.read()
except IOError:
return None
def _lines(multiline):
""" Split multiline string into single line % empty and comments """
return [line for line in (
line.strip() for line in multiline.splitlines(False)
) if line and not line.startswith('#')]
package = dict(
name='gensaschema',
top='gensaschema',
pathname='gensaschema',
provides=_doc('PROVIDES'),
desc=_doc('SUMMARY').strip(),
longdesc=_doc('DESCRIPTION'),
author=__author__,
email='nd@perlig.de',
license="Apache License, Version 2.0",
# keywords=_lines(_doc('KEYWORDS')),
url='http://opensource.perlig.de/gensaschema/',
classifiers=_lines(_doc('CLASSIFIERS') or ''),
packages=True,
# py_modules=[],
# version_file='__init__.py',
install_requires=[],
)
def setup():
""" Main """
# pylint: disable = too-many-branches
# pylint: disable = unspecified-encoding
args = {} if str is bytes else dict(encoding='utf-8')
version_file = '%s/%s' % (package['pathname'],
package.get('version_file', '__init__.py'))
with open(version_file, **args) as fp:
for line in fp: # pylint: disable = redefined-outer-name
if line.startswith('__version__'):
version = line.split('=', 1)[1].strip()
if version.startswith(("'", '"')):
version = version[1:-1].strip()
break
else:
raise RuntimeError("Version not found")
kwargs = {}
if package.get('packages', True):
kwargs['packages'] = [package['top']] + [
'%s.%s' % (package['top'], item)
for item in
_setuptools.find_packages(package['pathname'])
]
if package.get('py_modules'):
kwargs['py_modules'] = package['py_modules']
_setuptools.setup(
name=package['name'],
author=package['author'],
author_email=package['email'],
license=package['license'],
classifiers=package['classifiers'],
description=package['desc'],
long_description=package['longdesc'],
url=package['url'],
install_requires=package['install_requires'],
version=version,
zip_safe=False,
**kwargs
)
if __name__ == '__main__':
setup()
|
ndparker/gensaschema
|
setup.py
|
Python
|
apache-2.0
| 3,688
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
],
),
]
|
PeterHo/mysite
|
lists/migrations/0001_initial.py
|
Python
|
apache-2.0
| 420
|
"""A zigzag path, a sequence of points."""
import collections
from .defuzz import Defuzzer
from .euclid import collinear, Point, Line, Segment, Bounds, EmptyBounds
from .postulates import adjacent_pairs, triples
class Path:
def __init__(self, points):
self.points = tuple(points)
def __repr__(self):
return f"<Path {list(self.points)}>"
def __eq__(self, other):
return self.points == other.points
def __hash__(self):
return hash(self.points)
def __lt__(self, other):
return self.points < other.points
def __len__(self):
return len(self.points)
def __iter__(self):
return iter(self.points)
def __getitem__(self, idx):
# Lots of code tries to get the endpoints by index. Allow that but
# nothing else.
assert idx in [0, -1]
return self.points[idx]
@property
def closed(self):
"""Does the path loop? Start and end are the same points."""
return self.points[0] == self.points[-1]
def length(self):
"""The euclidean distance along the path."""
return sum(p1.distance(p2) for p1, p2 in adjacent_pairs(self.points))
def ends(self):
yield self.points[0]
yield self.points[-1]
def bounds(self):
"""What is the `Bounds` for this path?"""
return Bounds.points(self.points)
def segments(self):
for p1, p2 in adjacent_pairs(self.points):
yield Segment(tuple(p1), tuple(p2))
def transform(self, xform):
"""Transform the Path through the affine `xform`."""
return Path(pt.transform(xform) for pt in self)
def any_collinear(self):
"""Are any of the parts of this path collinear?"""
return any(collinear(*them) for them in triples(self.points))
def clean(self):
"""Remove unneeded points from a path."""
if len(self.points) <= 2:
return self
# Points are unneeded if they are collinear with their neighbors.
new_points = []
if not self.closed:
new_points.append(self.points[0])
for a, b, c in triples(self.points):
if not collinear(a, b, c):
new_points.append(b)
if self.closed:
new_points.append(new_points[0])
else:
new_points.append(self.points[-1])
return Path(new_points)
def reversed(self):
return Path(self.points[::-1])
def draw(self, ctx, append=False, reverse=False):
points = self.points
if reverse:
points = points[::-1]
(ctx.line_to if append else ctx.move_to)(*points[0])
for pt in points[1:-1]:
ctx.line_to(*pt)
if self.closed:
ctx.close_path()
else:
ctx.line_to(*points[-1])
def offset_path(self, offset):
lines = []
for p1, p2 in adjacent_pairs(self.points):
lines.append(Line(p1, p2).offset(offset))
points = []
if self.closed:
p0 = lines[-1].intersect(lines[0])
points.append(p0)
else:
points.append(lines[0].p1)
for l1, l2 in adjacent_pairs(lines):
points.append(l1.intersect(l2))
if self.closed:
points.append(p0)
else:
points.append(lines[-1].p2)
return Path(points)
def defuzz(self, defuzz):
return Path([Point(*defuzz(pt)) for pt in self.points])
def perturb(self, jitter):
"""Jostle around all the points in the path."""
pts = self.points
if self.closed:
pts = pts[:-1]
pts = [pt.perturb(jitter) for pt in pts]
if self.closed:
pts.append(pts[0])
return Path(pts)
def penultimate(self, point):
"""The second-to-last point from whichever end ends with `point`."""
if self.points[0] == point:
return self.points[1]
else:
assert self.points[-1] == point
return self.points[-2]
def join(self, p2):
"""Join `self` and `p2` together by their common endpoint."""
p1 = self.points
p2 = p2.points
# Find the ends that are the same point. Rearrange p1 and p2 so that p1+p2
# is the join we need, and remove the duplicate point at p2[0].
if p1[-1] == p2[0]:
p2 = p2[1:]
elif p1[-1] == p2[-1]:
p2 = p2[-2::-1]
elif p1[0] == p2[-1]:
p1, p2 = p2, p1[1:]
elif p1[0] == p2[0]:
p1, p2 = p1[::-1], p2[1:]
else:
return None
# If the join would have a redundant point because of three collinear
# points in a row, then remove the middle point.
if collinear(p1[-2], p1[-1], p2[0]):
p1 = p1[:-1]
return Path(p1 + p2)
def trim(self, end, trimmers):
"""Trim one end of path where trimmers (paths) cross it."""
points = list(self.points)
seg = Segment(*points[[None, -2][end]:[2, None][end]])
cuts = [pt for t in trimmers for pt in seg_path_intersections(seg, t)]
if cuts:
cuts = seg.sort_along(cuts)
if end == 0:
points = [cuts[-1]] + points[1:]
else:
points = points[:-1] + [cuts[0]]
return Path(points)
else:
return self
def canonicalize(self):
"""Produce an equivalent canonical path."""
if self.closed:
points = list(self.points[:-1])
points = min((points[i:]+points[:i])[::s] for i in range(len(points)) for s in [1, -1])
points.append(points[0])
return Path(points)
else:
return Path(min(self.points, self.points[::-1]))
def defuzz_paths(paths):
defuzz = Defuzzer().defuzz
return [path.defuzz(defuzz) for path in paths]
def combine_paths(paths):
paths = defuzz_paths(paths)
pm = collections.defaultdict(list)
for path in paths:
for end in path.ends():
pm[end].append(path)
combined = []
used = set()
for path in paths:
if id(path) in used:
continue
for end in [0, -1]:
while True:
target = path[end]
possibilities = pm[target]
possibilities = [p for p in possibilities if id(p) not in used]
if not possibilities:
break
other = best_join(path, target, possibilities)
if other is not None:
used.add(id(path))
used.add(id(other))
path = path.join(other)
pm[path[0]].append(path)
pm[path[-1]].append(path)
else:
break
used.add(id(path))
combined.append(path.clean())
return combined
def draw_paths(paths, ctx):
for path in paths:
path.draw(ctx)
def best_join(path, join_point, possibilities):
others = [p for p in possibilities if p != path]
# If there's only one other path, then join to that one.
if len(others) == 1:
return others[0]
# If there's more than one, find one we are collinear with.
path_pen = path.penultimate(join_point)
for other in others:
other_pen = other.penultimate(join_point)
if collinear(path_pen, join_point, other_pen):
return other
return None
def show_path(path):
if path is None:
return "None"
return f"Path[{path[0]}..{len(path)}..{path[-1]}]@{id(path)}"
def show_paths(paths):
ret = "[\n"
for path in paths:
ret += f" {show_path(path)}\n"
ret += "]"
return ret
def paths_bounds(paths):
"""Return the `Bounds` of the paths."""
bounds = EmptyBounds()
for path in paths:
bounds |= path.bounds()
return bounds
def clip_paths(paths, bounds):
"""Return the paths that overlap the bounds."""
return [path for path in paths if path.bounds().overlap(bounds)]
def equal_path(path1, path2):
return path1.canonicalize() == path2.canonicalize()
def canonicalize_paths(paths):
"""Canonicalize a list of paths."""
paths = [p.canonicalize() for p in paths]
paths.sort()
return paths
def equal_paths(paths1, paths2):
"""Are the paths in paths1 and paths2 equivalent?"""
return canonicalize_paths(paths1) == canonicalize_paths(paths2)
def paths_length(paths):
return sum(path.length() for path in paths)
def seg_path_intersections(segment, path):
"""Return a list of all the points where segment and path intersect."""
for pseg in path.segments():
pt = segment.intersect(pseg)
if pt is not None:
yield pt
def perturb_paths(paths, jitter):
"""Perturb all of the points in all of the path."""
return [path.perturb(jitter) for path in paths]
|
nedbat/zellij
|
zellij/path.py
|
Python
|
apache-2.0
| 8,958
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
service_available_group = cfg.OptGroup(name="service_available",
title="Available OpenStack Services")
ServiceAvailableGroup = [
cfg.BoolOpt("senlin",
default=True,
help="Whether or not senlin is expected to be available"),
]
clustering_group = cfg.OptGroup(name="clustering",
title="Clustering Service Options")
ClusteringGroup = [
cfg.StrOpt("catalog_type",
default="clustering",
help="Catalog type of the clustering service."),
cfg.IntOpt("wait_timeout",
default=60,
help="Waiting time for a specific status, in seconds.")
]
|
tengqm/senlin-container
|
senlin/tests/tempest_tests/config.py
|
Python
|
apache-2.0
| 1,284
|
import sys
import errno
import json
import os
from argparse import ArgumentParser
sys.path.insert(1, 'py-bindings')
from squad import SQUADConverter
def get_samples(test_file, vocab_file, output_dir):
print("Test file:", test_file)
print("Vocab file:", vocab_file)
print("Output dir:", output_dir)
max_seq_length = 384
max_query_length = 64
doc_stride = 128
lower_case = False
sqd = SQUADConverter(test_file, vocab_file, max_seq_length, max_query_length, doc_stride, lower_case)
samples = sqd.convert()
# Dump samples to json
print("--Dumping examples to json--")
os.makedirs(output_dir, exist_ok=True)
output_file = output_dir + "/squad_examples.json"
c = 0
with open(output_file, 'w', encoding='utf-8') as fid:
json.dump({'samples':samples}, fid, ensure_ascii=False, indent=4)
return c
def get_arguments():
parser = ArgumentParser()
parser.add_argument("--test_file", type=str, help="Path to squad test json file", required=True)
parser.add_argument("--vocab_file", type=str, help="Path to vocab.txt file", required=True)
parser.add_argument("--max_seq_length", type=int, help="Max sequence length", default=384)
parser.add_argument("--max_query_length", type=int, help="Max query length", default=64)
parser.add_argument("--doc_stride", type=int, help="Document stride", default=128)
parser.add_argument("--lower_case", type=bool, help="Lower case", default=1)
parser.add_argument("--output_dir", type=str, help="Output directory for saved json", default="samples_cache")
return parser.parse_args()
def main():
args = get_arguments()
if not os.path.isfile(args.test_file):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), args.test_file)
if not os.path.isfile(args.vocab_file):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), args.vocab_file)
sqd = SQUADConverter(args.test_file, args.vocab_file, args.max_seq_length, args.max_query_length, args.doc_stride, args.lower_case)
# Convert examples
print("--Reading samples--")
samples = sqd.convert()
# Dump samples ot json
print("--Dumping examples to json--")
os.makedirs(args.output_dir, exist_ok=True)
output_file = args.output_dir + "/squad_examples.json"
with open(output_file, 'w', encoding='utf-8') as fid:
json.dump({'samples':samples}, fid, ensure_ascii=False, indent=4)
if __name__=="__main__":
main()
|
mlperf/inference_results_v0.7
|
closed/Intel/code/resnet/resnet-ov/py-bindings/convert.py
|
Python
|
apache-2.0
| 2,497
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
_READER = "role:reader"
_MEMBER = "role:member"
_ADMIN = "role:admin"
_PROJECT_MEMBER = f"{_MEMBER} and project_id:%(target.secret.project_id)s"
_PROJECT_ADMIN = f"{_ADMIN} and project_id:%(target.secret.project_id)s"
_SECRET_CREATOR = "user_id:%(target.secret.creator_id)s"
_SECRET_IS_NOT_PRIVATE = "True:%(target.secret.read_project_access)s"
rules = [
policy.DocumentedRuleDefault(
name='secret:decrypt',
check_str='rule:secret_decrypt_non_private_read or ' +
'rule:secret_project_creator or ' +
'rule:secret_project_admin or rule:secret_acl_read or ' +
f"({_PROJECT_MEMBER} and ({_SECRET_CREATOR} or " +
f"{_SECRET_IS_NOT_PRIVATE})) or {_PROJECT_ADMIN}",
scope_types=['project'],
description='Retrieve a secrets payload.',
operations=[
{
'path': '/v1/secrets/{uuid}/payload',
'method': 'GET'
}
]
),
policy.DocumentedRuleDefault(
name='secret:get',
check_str='rule:secret_non_private_read or ' +
'rule:secret_project_creator or ' +
'rule:secret_project_admin or rule:secret_acl_read or ' +
f"({_PROJECT_MEMBER} and ({_SECRET_CREATOR} or " +
f"{_SECRET_IS_NOT_PRIVATE})) or {_PROJECT_ADMIN}",
scope_types=['project'],
description='Retrieves a secrets metadata.',
operations=[
{
'path': '/v1/secrets/{secret-id}',
'method': 'GET"'
}
]
),
policy.DocumentedRuleDefault(
name='secret:put',
check_str='rule:admin_or_creator and rule:secret_project_match or ' +
f"({_PROJECT_MEMBER} and ({_SECRET_CREATOR} or " +
f"{_SECRET_IS_NOT_PRIVATE})) or {_PROJECT_ADMIN}",
scope_types=['project'],
description='Add the payload to an existing metadata-only secret.',
operations=[
{
'path': '/v1/secrets/{secret-id}',
'method': 'PUT'
}
]
),
policy.DocumentedRuleDefault(
name='secret:delete',
check_str='rule:secret_project_admin or ' +
'rule:secret_project_creator or ' +
'(rule:secret_project_creator_role and ' +
'not rule:secret_private_read) or ' +
f"({_PROJECT_MEMBER} and ({_SECRET_CREATOR} or " +
f"{_SECRET_IS_NOT_PRIVATE})) or {_PROJECT_ADMIN}",
scope_types=['project'],
description='Delete a secret by uuid.',
operations=[
{
'path': '/v1/secrets/{secret-id}',
'method': 'DELETE'
}
]
),
policy.DocumentedRuleDefault(
name='secrets:post',
check_str=f'rule:admin_or_creator or {_MEMBER}',
scope_types=['project'],
description='Creates a Secret entity.',
operations=[
{
'path': '/v1/secrets',
'method': 'POST'
}
]
),
policy.DocumentedRuleDefault(
name='secrets:get',
check_str=f'rule:all_but_audit or {_MEMBER}',
scope_types=['project'],
description='Lists a projects secrets.',
operations=[
{
'path': '/v1/secrets',
'method': 'GET'
}
]
)
]
def list_rules():
return rules
|
openstack/barbican
|
barbican/common/policies/secrets.py
|
Python
|
apache-2.0
| 4,101
|
#(C) Copyright Syd Logan 2017-2020
#(C) Copyright Thousand Smiles Foundation 2017-2020
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
'''
Unit tests for medications application. Assumes django server is up
and running on the specified host and port
'''
import unittest
import getopt, sys
import json
from tschartslib.service.serviceapi import ServiceAPI
from tschartslib.tscharts.tscharts import Login, Logout
class CreateMedications(ServiceAPI):
def __init__(self, host, port, token, payload):
super(CreateMedications, self).__init__()
self.setHttpMethod("POST")
self.setHost(host)
self.setPort(port)
self.setToken(token)
self.setPayload(payload)
self.setURL("tscharts/v1/medications/")
class GetMedications(ServiceAPI):
def makeURL(self):
hasQArgs = False
if not self._id == None:
base = "tscharts/v1/medications/{}/".format(self._id)
else:
base = "tscharts/v1/medications/"
if not self._name == None:
if not hasQArgs:
base += "?"
else:
base += "&"
base += "name={}".format(self._name)
hasQArgs = True
self.setURL(base)
def __init__(self, host, port, token):
super(GetMedications, self).__init__()
self.setHttpMethod("GET")
self.setHost(host)
self.setPort(port)
self.setToken(token)
self._name = None
self._id = None
self.makeURL();
def setId(self, id):
self._id = id;
self.makeURL()
def setName(self,val):
self._name = val
self.makeURL()
class DeleteMedications(ServiceAPI):
def __init__(self, host, port, token, id):
super(DeleteMedications, self).__init__()
self.setHttpMethod("DELETE")
self.setHost(host)
self.setPort(port)
self.setToken(token)
self.setURL("tscharts/v1/medications/{}/".format(id))
class TestTSMedications(unittest.TestCase):
def setUp(self):
login = Login(host, port, username, password)
ret = login.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("token" in ret[1])
global token
token = ret[1]["token"]
def testCreateMedications(self):
data = {}
data["name"] = "AAAAA"
x = CreateMedications(host, port, token, data)
ret = x.send(timeout = 30)
self.assertEqual(ret[0], 200)
id = int(ret[1]["id"])
x = GetMedications(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
ret = ret[1]
self.assertEqual(ret['name'], "AAAAA")
x = CreateMedications(host, port, token, data)
ret = x.send(timeout = 30)
self.assertEqual(ret[0], 400) #bad request test uniqueness
x = DeleteMedications(host, port, token, id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = GetMedications(host, port, token)
x.setId(id)
ret = x.send(timeout = 30)
self.assertEqual(ret[0], 404) # not found
data = {}
x = CreateMedications(host, port, token, data)
ret = x.send(timeout = 30)
self.assertEqual(ret[0], 400) #bad request
data["names"] = "AAAAA"
x = CreateMedications(host, port, token, data)
ret = x.send(timeout = 30)
self.assertEqual(ret[0], 400) #bad request
data = {}
data["name"] = ""
x = CreateMedications(host, port, token, data)
ret = x.send(timeout = 30)
self.assertEqual(ret[0], 400)
data = {}
data["name"] = 123
x = CreateMedications(host, port, token, data)
ret = x.send(timeout = 30)
self.assertEqual(ret[0], 400)
def testDeleteMedications(self):
data = {}
data["name"] = "AAAAA"
x = CreateMedications(host, port, token, data)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("id" in ret[1])
id = int(ret[1]["id"])
x = GetMedications(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
ret = ret[1]
self.assertEqual(ret["name"], "AAAAA")
self.assertEqual(ret["id"], id)
x = DeleteMedications(host, port, token, id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = GetMedications(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404) # not found
x = DeleteMedications(host, port, token, id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404) # not found
def testGetMedications(self):
data = {}
data["name"] = "AAAAA"
x = CreateMedications(host, port, token, data)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("id" in ret[1])
x = GetMedications(host, port, token); #test get a medication by its id
x.setId(int(ret[1]["id"]))
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
ret = ret[1]
id = int(ret["id"])
self.assertTrue(ret["name"] == "AAAAA")
x = DeleteMedications(host, port, token, id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = GetMedications(host, port, token)
x.setId(id)
ret = x.send(timeout = 30)
self.assertEqual(ret[0], 404)
data = {}
data["name"] = "CCCCCC"
x = CreateMedications(host, port, token, data)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("id" in ret[1])
id = ret[1]["id"]
x = GetMedications(host, port, token) #test get a medication by its name
x.setName("CCCCCC")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue(ret[1]["name"] == "CCCCCC")
x = GetMedications(host, port, token)
x.setName("aaaa")
ret = x.send(timeout = 30)
self.assertEqual(ret[0], 404) #not found
x = DeleteMedications(host, port, token, id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
namelist = ['CCCCC','AAAAA','BBBBB']
copynamelist = ['CCCCC','AAAAA','BBBBB']
idlist = []
for x in namelist:
data = {}
data["name"] = x
x = CreateMedications(host, port, token, data)
ret = x.send(timeout = 30)
idlist.append(ret[1]["id"])
self.assertEqual(ret[0], 200)
x = GetMedications(host, port, token) #test get a list of medications
ret = x.send(timeout = 30)
for name in namelist:
self.assertTrue(name in ret[1])
copynamelist.remove(name)
self.assertEqual(copynamelist, [])
for id in idlist:
x = DeleteMedications(host, port, token, id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
for id in idlist:
x = GetMedications(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404) #not found
def usage():
print("medications [-h host] [-p port] [-u username] [-w password]")
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "h:p:u:w:")
except getopt.GetoptError as err:
print(str(err))
usage()
sys.exit(2)
global host
host = "127.0.0.1"
global port
port = 8000
global username
username = None
global password
password = None
for o, a in opts:
if o == "-h":
host = a
elif o == "-p":
port = int(a)
elif o == "-u":
username = a
elif o == "-w":
password = a
else:
assert False, "unhandled option"
unittest.main(argv=[sys.argv[0]])
if __name__ == "__main__":
main()
|
slogan621/tscharts
|
tschartslib/medications/medications.py
|
Python
|
apache-2.0
| 8,744
|
"""Time offset classes for use with cftime.datetime objects"""
# The offset classes and mechanisms for generating time ranges defined in
# this module were copied/adapted from those defined in pandas. See in
# particular the objects and methods defined in pandas.tseries.offsets
# and pandas.core.indexes.datetimes.
# For reference, here is a copy of the pandas copyright notice:
# (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team
# All rights reserved.
# Copyright (c) 2008-2011 AQR Capital Management, LLC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the copyright holder nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
from datetime import timedelta
from distutils.version import LooseVersion
from functools import partial
from typing import ClassVar, Optional
import numpy as np
from ..core.pdcompat import count_not_none
from .cftimeindex import CFTimeIndex, _parse_iso8601_with_reso
from .times import format_cftime_datetime
def get_date_type(calendar):
"""Return the cftime date type for a given calendar name."""
try:
import cftime
except ImportError:
raise ImportError("cftime is required for dates with non-standard calendars")
else:
calendars = {
"noleap": cftime.DatetimeNoLeap,
"360_day": cftime.Datetime360Day,
"365_day": cftime.DatetimeNoLeap,
"366_day": cftime.DatetimeAllLeap,
"gregorian": cftime.DatetimeGregorian,
"proleptic_gregorian": cftime.DatetimeProlepticGregorian,
"julian": cftime.DatetimeJulian,
"all_leap": cftime.DatetimeAllLeap,
"standard": cftime.DatetimeGregorian,
}
return calendars[calendar]
class BaseCFTimeOffset:
_freq: ClassVar[Optional[str]] = None
_day_option: ClassVar[Optional[str]] = None
def __init__(self, n=1):
if not isinstance(n, int):
raise TypeError(
"The provided multiple 'n' must be an integer. "
"Instead a value of type {!r} was provided.".format(type(n))
)
self.n = n
def rule_code(self):
return self._freq
def __eq__(self, other):
return self.n == other.n and self.rule_code() == other.rule_code()
def __ne__(self, other):
return not self == other
def __add__(self, other):
return self.__apply__(other)
def __sub__(self, other):
import cftime
if isinstance(other, cftime.datetime):
raise TypeError("Cannot subtract a cftime.datetime " "from a time offset.")
elif type(other) == type(self):
return type(self)(self.n - other.n)
else:
return NotImplemented
def __mul__(self, other):
return type(self)(n=other * self.n)
def __neg__(self):
return self * -1
def __rmul__(self, other):
return self.__mul__(other)
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
if isinstance(other, BaseCFTimeOffset) and type(self) != type(other):
raise TypeError("Cannot subtract cftime offsets of differing " "types")
return -self + other
def __apply__(self):
return NotImplemented
def onOffset(self, date):
"""Check if the given date is in the set of possible dates created
using a length-one version of this offset class."""
test_date = (self + date) - self
return date == test_date
def rollforward(self, date):
if self.onOffset(date):
return date
else:
return date + type(self)()
def rollback(self, date):
if self.onOffset(date):
return date
else:
return date - type(self)()
def __str__(self):
return "<{}: n={}>".format(type(self).__name__, self.n)
def __repr__(self):
return str(self)
def _get_offset_day(self, other):
# subclass must implement `_day_option`; calling from the base class
# will raise NotImplementedError.
return _get_day_of_month(other, self._day_option)
def _get_day_of_month(other, day_option):
"""Find the day in `other`'s month that satisfies a BaseCFTimeOffset's
onOffset policy, as described by the `day_option` argument.
Parameters
----------
other : cftime.datetime
day_option : 'start', 'end'
'start': returns 1
'end': returns last day of the month
Returns
-------
day_of_month : int
"""
if day_option == "start":
return 1
elif day_option == "end":
days_in_month = _days_in_month(other)
return days_in_month
elif day_option is None:
# Note: unlike `_shift_month`, _get_day_of_month does not
# allow day_option = None
raise NotImplementedError()
else:
raise ValueError(day_option)
def _days_in_month(date):
"""The number of days in the month of the given date"""
if date.month == 12:
reference = type(date)(date.year + 1, 1, 1)
else:
reference = type(date)(date.year, date.month + 1, 1)
return (reference - timedelta(days=1)).day
def _adjust_n_months(other_day, n, reference_day):
"""Adjust the number of times a monthly offset is applied based
on the day of a given date, and the reference day provided.
"""
if n > 0 and other_day < reference_day:
n = n - 1
elif n <= 0 and other_day > reference_day:
n = n + 1
return n
def _adjust_n_years(other, n, month, reference_day):
"""Adjust the number of times an annual offset is applied based on
another date, and the reference day provided"""
if n > 0:
if other.month < month or (other.month == month and other.day < reference_day):
n -= 1
else:
if other.month > month or (other.month == month and other.day > reference_day):
n += 1
return n
def _shift_month(date, months, day_option="start"):
"""Shift the date to a month start or end a given number of months away.
"""
import cftime
delta_year = (date.month + months) // 12
month = (date.month + months) % 12
if month == 0:
month = 12
delta_year = delta_year - 1
year = date.year + delta_year
if day_option == "start":
day = 1
elif day_option == "end":
reference = type(date)(year, month, 1)
day = _days_in_month(reference)
else:
raise ValueError(day_option)
if LooseVersion(cftime.__version__) < LooseVersion("1.0.4"):
# dayofwk=-1 is required to update the dayofwk and dayofyr attributes of
# the returned date object in versions of cftime between 1.0.2 and
# 1.0.3.4. It can be removed for versions of cftime greater than
# 1.0.3.4.
return date.replace(year=year, month=month, day=day, dayofwk=-1)
else:
return date.replace(year=year, month=month, day=day)
def roll_qtrday(other, n, month, day_option, modby=3):
"""Possibly increment or decrement the number of periods to shift
based on rollforward/rollbackward conventions.
Parameters
----------
other : cftime.datetime
n : number of periods to increment, before adjusting for rolling
month : int reference month giving the first month of the year
day_option : 'start', 'end'
The convention to use in finding the day in a given month against
which to compare for rollforward/rollbackward decisions.
modby : int 3 for quarters, 12 for years
Returns
-------
n : int number of periods to increment
See Also
--------
_get_day_of_month : Find the day in a month provided an offset.
"""
months_since = other.month % modby - month % modby
if n > 0:
if months_since < 0 or (
months_since == 0 and other.day < _get_day_of_month(other, day_option)
):
# pretend to roll back if on same month but
# before compare_day
n -= 1
else:
if months_since > 0 or (
months_since == 0 and other.day > _get_day_of_month(other, day_option)
):
# make sure to roll forward, so negate
n += 1
return n
def _validate_month(month, default_month):
if month is None:
result_month = default_month
else:
result_month = month
if not isinstance(result_month, int):
raise TypeError(
"'self.month' must be an integer value between 1 "
"and 12. Instead, it was set to a value of "
"{!r}".format(result_month)
)
elif not (1 <= result_month <= 12):
raise ValueError(
"'self.month' must be an integer value between 1 "
"and 12. Instead, it was set to a value of "
"{!r}".format(result_month)
)
return result_month
class MonthBegin(BaseCFTimeOffset):
_freq = "MS"
def __apply__(self, other):
n = _adjust_n_months(other.day, self.n, 1)
return _shift_month(other, n, "start")
def onOffset(self, date):
"""Check if the given date is in the set of possible dates created
using a length-one version of this offset class."""
return date.day == 1
class MonthEnd(BaseCFTimeOffset):
_freq = "M"
def __apply__(self, other):
n = _adjust_n_months(other.day, self.n, _days_in_month(other))
return _shift_month(other, n, "end")
def onOffset(self, date):
"""Check if the given date is in the set of possible dates created
using a length-one version of this offset class."""
return date.day == _days_in_month(date)
_MONTH_ABBREVIATIONS = {
1: "JAN",
2: "FEB",
3: "MAR",
4: "APR",
5: "MAY",
6: "JUN",
7: "JUL",
8: "AUG",
9: "SEP",
10: "OCT",
11: "NOV",
12: "DEC",
}
class QuarterOffset(BaseCFTimeOffset):
"""Quarter representation copied off of pandas/tseries/offsets.py
"""
_freq: ClassVar[str]
_default_month: ClassVar[int]
def __init__(self, n=1, month=None):
BaseCFTimeOffset.__init__(self, n)
self.month = _validate_month(month, self._default_month)
def __apply__(self, other):
# months_since: find the calendar quarter containing other.month,
# e.g. if other.month == 8, the calendar quarter is [Jul, Aug, Sep].
# Then find the month in that quarter containing an onOffset date for
# self. `months_since` is the number of months to shift other.month
# to get to this on-offset month.
months_since = other.month % 3 - self.month % 3
qtrs = roll_qtrday(
other, self.n, self.month, day_option=self._day_option, modby=3
)
months = qtrs * 3 - months_since
return _shift_month(other, months, self._day_option)
def onOffset(self, date):
"""Check if the given date is in the set of possible dates created
using a length-one version of this offset class."""
mod_month = (date.month - self.month) % 3
return mod_month == 0 and date.day == self._get_offset_day(date)
def __sub__(self, other):
import cftime
if isinstance(other, cftime.datetime):
raise TypeError("Cannot subtract cftime.datetime from offset.")
elif type(other) == type(self) and other.month == self.month:
return type(self)(self.n - other.n, month=self.month)
else:
return NotImplemented
def __mul__(self, other):
return type(self)(n=other * self.n, month=self.month)
def rule_code(self):
return "{}-{}".format(self._freq, _MONTH_ABBREVIATIONS[self.month])
def __str__(self):
return "<{}: n={}, month={}>".format(type(self).__name__, self.n, self.month)
class QuarterBegin(QuarterOffset):
# When converting a string to an offset, pandas converts
# 'QS' to a QuarterBegin offset starting in the month of
# January. When creating a QuarterBegin offset directly
# from the constructor, however, the default month is March.
# We follow that behavior here.
_default_month = 3
_freq = "QS"
_day_option = "start"
def rollforward(self, date):
"""Roll date forward to nearest start of quarter"""
if self.onOffset(date):
return date
else:
return date + QuarterBegin(month=self.month)
def rollback(self, date):
"""Roll date backward to nearest start of quarter"""
if self.onOffset(date):
return date
else:
return date - QuarterBegin(month=self.month)
class QuarterEnd(QuarterOffset):
# When converting a string to an offset, pandas converts
# 'Q' to a QuarterEnd offset starting in the month of
# December. When creating a QuarterEnd offset directly
# from the constructor, however, the default month is March.
# We follow that behavior here.
_default_month = 3
_freq = "Q"
_day_option = "end"
def rollforward(self, date):
"""Roll date forward to nearest end of quarter"""
if self.onOffset(date):
return date
else:
return date + QuarterEnd(month=self.month)
def rollback(self, date):
"""Roll date backward to nearest end of quarter"""
if self.onOffset(date):
return date
else:
return date - QuarterEnd(month=self.month)
class YearOffset(BaseCFTimeOffset):
_freq: ClassVar[str]
_day_option: ClassVar[str]
_default_month: ClassVar[int]
def __init__(self, n=1, month=None):
BaseCFTimeOffset.__init__(self, n)
self.month = _validate_month(month, self._default_month)
def __apply__(self, other):
reference_day = _get_day_of_month(other, self._day_option)
years = _adjust_n_years(other, self.n, self.month, reference_day)
months = years * 12 + (self.month - other.month)
return _shift_month(other, months, self._day_option)
def __sub__(self, other):
import cftime
if isinstance(other, cftime.datetime):
raise TypeError("Cannot subtract cftime.datetime from offset.")
elif type(other) == type(self) and other.month == self.month:
return type(self)(self.n - other.n, month=self.month)
else:
return NotImplemented
def __mul__(self, other):
return type(self)(n=other * self.n, month=self.month)
def rule_code(self):
return "{}-{}".format(self._freq, _MONTH_ABBREVIATIONS[self.month])
def __str__(self):
return "<{}: n={}, month={}>".format(type(self).__name__, self.n, self.month)
class YearBegin(YearOffset):
_freq = "AS"
_day_option = "start"
_default_month = 1
def onOffset(self, date):
"""Check if the given date is in the set of possible dates created
using a length-one version of this offset class."""
return date.day == 1 and date.month == self.month
def rollforward(self, date):
"""Roll date forward to nearest start of year"""
if self.onOffset(date):
return date
else:
return date + YearBegin(month=self.month)
def rollback(self, date):
"""Roll date backward to nearest start of year"""
if self.onOffset(date):
return date
else:
return date - YearBegin(month=self.month)
class YearEnd(YearOffset):
_freq = "A"
_day_option = "end"
_default_month = 12
def onOffset(self, date):
"""Check if the given date is in the set of possible dates created
using a length-one version of this offset class."""
return date.day == _days_in_month(date) and date.month == self.month
def rollforward(self, date):
"""Roll date forward to nearest end of year"""
if self.onOffset(date):
return date
else:
return date + YearEnd(month=self.month)
def rollback(self, date):
"""Roll date backward to nearest end of year"""
if self.onOffset(date):
return date
else:
return date - YearEnd(month=self.month)
class Day(BaseCFTimeOffset):
_freq = "D"
def as_timedelta(self):
return timedelta(days=self.n)
def __apply__(self, other):
return other + self.as_timedelta()
class Hour(BaseCFTimeOffset):
_freq = "H"
def as_timedelta(self):
return timedelta(hours=self.n)
def __apply__(self, other):
return other + self.as_timedelta()
class Minute(BaseCFTimeOffset):
_freq = "T"
def as_timedelta(self):
return timedelta(minutes=self.n)
def __apply__(self, other):
return other + self.as_timedelta()
class Second(BaseCFTimeOffset):
_freq = "S"
def as_timedelta(self):
return timedelta(seconds=self.n)
def __apply__(self, other):
return other + self.as_timedelta()
_FREQUENCIES = {
"A": YearEnd,
"AS": YearBegin,
"Y": YearEnd,
"YS": YearBegin,
"Q": partial(QuarterEnd, month=12),
"QS": partial(QuarterBegin, month=1),
"M": MonthEnd,
"MS": MonthBegin,
"D": Day,
"H": Hour,
"T": Minute,
"min": Minute,
"S": Second,
"AS-JAN": partial(YearBegin, month=1),
"AS-FEB": partial(YearBegin, month=2),
"AS-MAR": partial(YearBegin, month=3),
"AS-APR": partial(YearBegin, month=4),
"AS-MAY": partial(YearBegin, month=5),
"AS-JUN": partial(YearBegin, month=6),
"AS-JUL": partial(YearBegin, month=7),
"AS-AUG": partial(YearBegin, month=8),
"AS-SEP": partial(YearBegin, month=9),
"AS-OCT": partial(YearBegin, month=10),
"AS-NOV": partial(YearBegin, month=11),
"AS-DEC": partial(YearBegin, month=12),
"A-JAN": partial(YearEnd, month=1),
"A-FEB": partial(YearEnd, month=2),
"A-MAR": partial(YearEnd, month=3),
"A-APR": partial(YearEnd, month=4),
"A-MAY": partial(YearEnd, month=5),
"A-JUN": partial(YearEnd, month=6),
"A-JUL": partial(YearEnd, month=7),
"A-AUG": partial(YearEnd, month=8),
"A-SEP": partial(YearEnd, month=9),
"A-OCT": partial(YearEnd, month=10),
"A-NOV": partial(YearEnd, month=11),
"A-DEC": partial(YearEnd, month=12),
"QS-JAN": partial(QuarterBegin, month=1),
"QS-FEB": partial(QuarterBegin, month=2),
"QS-MAR": partial(QuarterBegin, month=3),
"QS-APR": partial(QuarterBegin, month=4),
"QS-MAY": partial(QuarterBegin, month=5),
"QS-JUN": partial(QuarterBegin, month=6),
"QS-JUL": partial(QuarterBegin, month=7),
"QS-AUG": partial(QuarterBegin, month=8),
"QS-SEP": partial(QuarterBegin, month=9),
"QS-OCT": partial(QuarterBegin, month=10),
"QS-NOV": partial(QuarterBegin, month=11),
"QS-DEC": partial(QuarterBegin, month=12),
"Q-JAN": partial(QuarterEnd, month=1),
"Q-FEB": partial(QuarterEnd, month=2),
"Q-MAR": partial(QuarterEnd, month=3),
"Q-APR": partial(QuarterEnd, month=4),
"Q-MAY": partial(QuarterEnd, month=5),
"Q-JUN": partial(QuarterEnd, month=6),
"Q-JUL": partial(QuarterEnd, month=7),
"Q-AUG": partial(QuarterEnd, month=8),
"Q-SEP": partial(QuarterEnd, month=9),
"Q-OCT": partial(QuarterEnd, month=10),
"Q-NOV": partial(QuarterEnd, month=11),
"Q-DEC": partial(QuarterEnd, month=12),
}
_FREQUENCY_CONDITION = "|".join(_FREQUENCIES.keys())
_PATTERN = fr"^((?P<multiple>\d+)|())(?P<freq>({_FREQUENCY_CONDITION}))$"
# pandas defines these offsets as "Tick" objects, which for instance have
# distinct behavior from monthly or longer frequencies in resample.
CFTIME_TICKS = (Day, Hour, Minute, Second)
def to_offset(freq):
"""Convert a frequency string to the appropriate subclass of
BaseCFTimeOffset."""
if isinstance(freq, BaseCFTimeOffset):
return freq
else:
try:
freq_data = re.match(_PATTERN, freq).groupdict()
except AttributeError:
raise ValueError("Invalid frequency string provided")
freq = freq_data["freq"]
multiples = freq_data["multiple"]
if multiples is None:
multiples = 1
else:
multiples = int(multiples)
return _FREQUENCIES[freq](n=multiples)
def to_cftime_datetime(date_str_or_date, calendar=None):
import cftime
if isinstance(date_str_or_date, str):
if calendar is None:
raise ValueError(
"If converting a string to a cftime.datetime object, "
"a calendar type must be provided"
)
date, _ = _parse_iso8601_with_reso(get_date_type(calendar), date_str_or_date)
return date
elif isinstance(date_str_or_date, cftime.datetime):
return date_str_or_date
else:
raise TypeError(
"date_str_or_date must be a string or a "
"subclass of cftime.datetime. Instead got "
"{!r}.".format(date_str_or_date)
)
def normalize_date(date):
"""Round datetime down to midnight."""
return date.replace(hour=0, minute=0, second=0, microsecond=0)
def _maybe_normalize_date(date, normalize):
"""Round datetime down to midnight if normalize is True."""
if normalize:
return normalize_date(date)
else:
return date
def _generate_linear_range(start, end, periods):
"""Generate an equally-spaced sequence of cftime.datetime objects between
and including two dates (whose length equals the number of periods)."""
import cftime
total_seconds = (end - start).total_seconds()
values = np.linspace(0.0, total_seconds, periods, endpoint=True)
units = "seconds since {}".format(format_cftime_datetime(start))
calendar = start.calendar
return cftime.num2date(
values, units=units, calendar=calendar, only_use_cftime_datetimes=True
)
def _generate_range(start, end, periods, offset):
"""Generate a regular range of cftime.datetime objects with a
given time offset.
Adapted from pandas.tseries.offsets.generate_range.
Parameters
----------
start : cftime.datetime, or None
Start of range
end : cftime.datetime, or None
End of range
periods : int, or None
Number of elements in the sequence
offset : BaseCFTimeOffset
An offset class designed for working with cftime.datetime objects
Returns
-------
A generator object
"""
if start:
start = offset.rollforward(start)
if end:
end = offset.rollback(end)
if periods is None and end < start:
end = None
periods = 0
if end is None:
end = start + (periods - 1) * offset
if start is None:
start = end - (periods - 1) * offset
current = start
if offset.n >= 0:
while current <= end:
yield current
next_date = current + offset
if next_date <= current:
raise ValueError(f"Offset {offset} did not increment date")
current = next_date
else:
while current >= end:
yield current
next_date = current + offset
if next_date >= current:
raise ValueError(f"Offset {offset} did not decrement date")
current = next_date
def cftime_range(
start=None,
end=None,
periods=None,
freq="D",
normalize=False,
name=None,
closed=None,
calendar="standard",
):
"""Return a fixed frequency CFTimeIndex.
Parameters
----------
start : str or cftime.datetime, optional
Left bound for generating dates.
end : str or cftime.datetime, optional
Right bound for generating dates.
periods : integer, optional
Number of periods to generate.
freq : str, default 'D', BaseCFTimeOffset, or None
Frequency strings can have multiples, e.g. '5H'.
normalize : bool, default False
Normalize start/end dates to midnight before generating date range.
name : str, default None
Name of the resulting index
closed : {None, 'left', 'right'}, optional
Make the interval closed with respect to the given frequency to the
'left', 'right', or both sides (None, the default).
calendar : str
Calendar type for the datetimes (default 'standard').
Returns
-------
CFTimeIndex
Notes
-----
This function is an analog of ``pandas.date_range`` for use in generating
sequences of ``cftime.datetime`` objects. It supports most of the
features of ``pandas.date_range`` (e.g. specifying how the index is
``closed`` on either side, or whether or not to ``normalize`` the start and
end bounds); however, there are some notable exceptions:
- You cannot specify a ``tz`` (time zone) argument.
- Start or end dates specified as partial-datetime strings must use the
`ISO-8601 format <https://en.wikipedia.org/wiki/ISO_8601>`_.
- It supports many, but not all, frequencies supported by
``pandas.date_range``. For example it does not currently support any of
the business-related, semi-monthly, or sub-second frequencies.
- Compound sub-monthly frequencies are not supported, e.g. '1H1min', as
these can easily be written in terms of the finest common resolution,
e.g. '61min'.
Valid simple frequency strings for use with ``cftime``-calendars include
any multiples of the following.
+--------+--------------------------+
| Alias | Description |
+========+==========================+
| A, Y | Year-end frequency |
+--------+--------------------------+
| AS, YS | Year-start frequency |
+--------+--------------------------+
| Q | Quarter-end frequency |
+--------+--------------------------+
| QS | Quarter-start frequency |
+--------+--------------------------+
| M | Month-end frequency |
+--------+--------------------------+
| MS | Month-start frequency |
+--------+--------------------------+
| D | Day frequency |
+--------+--------------------------+
| H | Hour frequency |
+--------+--------------------------+
| T, min | Minute frequency |
+--------+--------------------------+
| S | Second frequency |
+--------+--------------------------+
Any multiples of the following anchored offsets are also supported.
+----------+--------------------------------------------------------------------+
| Alias | Description |
+==========+====================================================================+
| A(S)-JAN | Annual frequency, anchored at the end (or beginning) of January |
+----------+--------------------------------------------------------------------+
| A(S)-FEB | Annual frequency, anchored at the end (or beginning) of February |
+----------+--------------------------------------------------------------------+
| A(S)-MAR | Annual frequency, anchored at the end (or beginning) of March |
+----------+--------------------------------------------------------------------+
| A(S)-APR | Annual frequency, anchored at the end (or beginning) of April |
+----------+--------------------------------------------------------------------+
| A(S)-MAY | Annual frequency, anchored at the end (or beginning) of May |
+----------+--------------------------------------------------------------------+
| A(S)-JUN | Annual frequency, anchored at the end (or beginning) of June |
+----------+--------------------------------------------------------------------+
| A(S)-JUL | Annual frequency, anchored at the end (or beginning) of July |
+----------+--------------------------------------------------------------------+
| A(S)-AUG | Annual frequency, anchored at the end (or beginning) of August |
+----------+--------------------------------------------------------------------+
| A(S)-SEP | Annual frequency, anchored at the end (or beginning) of September |
+----------+--------------------------------------------------------------------+
| A(S)-OCT | Annual frequency, anchored at the end (or beginning) of October |
+----------+--------------------------------------------------------------------+
| A(S)-NOV | Annual frequency, anchored at the end (or beginning) of November |
+----------+--------------------------------------------------------------------+
| A(S)-DEC | Annual frequency, anchored at the end (or beginning) of December |
+----------+--------------------------------------------------------------------+
| Q(S)-JAN | Quarter frequency, anchored at the end (or beginning) of January |
+----------+--------------------------------------------------------------------+
| Q(S)-FEB | Quarter frequency, anchored at the end (or beginning) of February |
+----------+--------------------------------------------------------------------+
| Q(S)-MAR | Quarter frequency, anchored at the end (or beginning) of March |
+----------+--------------------------------------------------------------------+
| Q(S)-APR | Quarter frequency, anchored at the end (or beginning) of April |
+----------+--------------------------------------------------------------------+
| Q(S)-MAY | Quarter frequency, anchored at the end (or beginning) of May |
+----------+--------------------------------------------------------------------+
| Q(S)-JUN | Quarter frequency, anchored at the end (or beginning) of June |
+----------+--------------------------------------------------------------------+
| Q(S)-JUL | Quarter frequency, anchored at the end (or beginning) of July |
+----------+--------------------------------------------------------------------+
| Q(S)-AUG | Quarter frequency, anchored at the end (or beginning) of August |
+----------+--------------------------------------------------------------------+
| Q(S)-SEP | Quarter frequency, anchored at the end (or beginning) of September |
+----------+--------------------------------------------------------------------+
| Q(S)-OCT | Quarter frequency, anchored at the end (or beginning) of October |
+----------+--------------------------------------------------------------------+
| Q(S)-NOV | Quarter frequency, anchored at the end (or beginning) of November |
+----------+--------------------------------------------------------------------+
| Q(S)-DEC | Quarter frequency, anchored at the end (or beginning) of December |
+----------+--------------------------------------------------------------------+
Finally, the following calendar aliases are supported.
+--------------------------------+---------------------------------------+
| Alias | Date type |
+================================+=======================================+
| standard, gregorian | ``cftime.DatetimeGregorian`` |
+--------------------------------+---------------------------------------+
| proleptic_gregorian | ``cftime.DatetimeProlepticGregorian`` |
+--------------------------------+---------------------------------------+
| noleap, 365_day | ``cftime.DatetimeNoLeap`` |
+--------------------------------+---------------------------------------+
| all_leap, 366_day | ``cftime.DatetimeAllLeap`` |
+--------------------------------+---------------------------------------+
| 360_day | ``cftime.Datetime360Day`` |
+--------------------------------+---------------------------------------+
| julian | ``cftime.DatetimeJulian`` |
+--------------------------------+---------------------------------------+
Examples
--------
This function returns a ``CFTimeIndex``, populated with ``cftime.datetime``
objects associated with the specified calendar type, e.g.
>>> xr.cftime_range(start="2000", periods=6, freq="2MS", calendar="noleap")
CFTimeIndex([2000-01-01 00:00:00, 2000-03-01 00:00:00, 2000-05-01 00:00:00,
2000-07-01 00:00:00, 2000-09-01 00:00:00, 2000-11-01 00:00:00],
dtype='object')
As in the standard pandas function, three of the ``start``, ``end``,
``periods``, or ``freq`` arguments must be specified at a given time, with
the other set to ``None``. See the `pandas documentation
<https://pandas.pydata.org/pandas-docs/stable/generated/pandas.date_range.html#pandas.date_range>`_
for more examples of the behavior of ``date_range`` with each of the
parameters.
See Also
--------
pandas.date_range
"""
# Adapted from pandas.core.indexes.datetimes._generate_range.
if count_not_none(start, end, periods, freq) != 3:
raise ValueError(
"Of the arguments 'start', 'end', 'periods', and 'freq', three "
"must be specified at a time."
)
if start is not None:
start = to_cftime_datetime(start, calendar)
start = _maybe_normalize_date(start, normalize)
if end is not None:
end = to_cftime_datetime(end, calendar)
end = _maybe_normalize_date(end, normalize)
if freq is None:
dates = _generate_linear_range(start, end, periods)
else:
offset = to_offset(freq)
dates = np.array(list(_generate_range(start, end, periods, offset)))
left_closed = False
right_closed = False
if closed is None:
left_closed = True
right_closed = True
elif closed == "left":
left_closed = True
elif closed == "right":
right_closed = True
else:
raise ValueError("Closed must be either 'left', 'right' or None")
if not left_closed and len(dates) and start is not None and dates[0] == start:
dates = dates[1:]
if not right_closed and len(dates) and end is not None and dates[-1] == end:
dates = dates[:-1]
return CFTimeIndex(dates, name=name)
|
shoyer/xarray
|
xarray/coding/cftime_offsets.py
|
Python
|
apache-2.0
| 35,760
|
""" test """
import logging
import os
import tempfile
import sys
import random
from bzt.engine import Engine, Configuration, FileLister
from bzt.utils import load_class
from bzt.engine import Provisioning, ScenarioExecutor, Reporter, AggregatorListener
from bzt.modules.aggregator import ResultsReader
from tests import random_sample
try:
from exceptions import KeyboardInterrupt
except ImportError:
from builtins import KeyboardInterrupt
class EngineEmul(Engine):
"""
emulating engine
"""
def __init__(self):
Engine.__init__(self, logging.getLogger(''))
self.artifacts_base_dir = os.path.dirname(__file__) + "/../build/test"
self._create_artifacts_dir()
self.finalize_exc = None
self.was_finalize = False
def _shutdown(self):
return super(EngineEmul, self)._shutdown()
def dump_config(self):
""" test """
fname = tempfile.mkstemp()[1]
self.config.dump(fname, Configuration.JSON)
with open(fname) as fh:
logging.debug("JSON:\n%s", fh.read())
class ModuleMock(ScenarioExecutor, Provisioning, Reporter, FileLister):
""" mock """
def __init__(self):
super(ModuleMock, self).__init__()
self.postproc_exc = None
self.check_exc = None
self.prepare_exc = None
self.startup_exc = None
self.shutdown_exc = None
self.check_iterations = sys.maxsize
self.was_shutdown = False
self.was_startup = False
self.was_prepare = False
self.was_check = False
self.was_postproc = False
def prepare(self):
"""
:raise self.prepare_exc:
"""
self.log.info("Preparing mock")
self.was_prepare = True
self.check_iterations = int(self.settings.get('check_iterations', "2"))
self.postproc_exc = self.get_exc("postproc")
self.check_exc = self.get_exc("check")
self.prepare_exc = self.get_exc("prepare")
self.startup_exc = self.get_exc("startup")
self.shutdown_exc = self.get_exc("shutdown")
if isinstance(self.engine.aggregator, ResultsReader):
reader = MockReader()
for num in range(0, self.check_iterations):
for quan in range(0, int(random.random() * 10)):
reader.data.append(random_sample(num))
self.engine.aggregator.add_reader(reader)
if self.prepare_exc:
raise self.prepare_exc
def startup(self):
"""
:raise self.startup_exc:
"""
self.log.info("Startup mock")
self.was_startup = True
if self.startup_exc:
raise self.startup_exc
def check(self):
"""
:return: :raise self.check_exc:
"""
self.was_check = True
self.log.info("Checks remaining: %s", self.check_iterations)
self.check_iterations -= 1
if not self.check_iterations:
if self.check_exc:
raise self.check_exc
else:
return True
return False
def shutdown(self):
"""
:raise self.shutdown_exc:
"""
self.log.info("Shutdown mock")
self.was_shutdown = True
if self.shutdown_exc:
raise self.shutdown_exc
def post_process(self):
"""
:raise self.postproc_exc:
"""
self.log.info("Postproc mock")
self.was_postproc = True
if self.postproc_exc:
raise self.postproc_exc
def get_exc(self, param):
"""
:type param: str
:return:
"""
name = self.settings.get(param, "")
if name:
cls = load_class(name)
return cls()
return None
def resource_files(self):
"""
:return:
"""
return [__file__]
class MockReader(ResultsReader, AggregatorListener):
"""
test
"""
def __init__(self):
super(MockReader, self).__init__()
self.results = []
self.data = []
self.add_listener(self)
self.track_percentiles = [0, 50, 90, 99, 99.5, 100]
def _read(self, final_pass=False):
"""
Emulating read samples
:type final_pass: bool
:return:
"""
while self.data:
# logging.debug("Emul read: %s", self.data[0])
yield self.data.pop(0)
def aggregated_second(self, data):
"""
Store and assert aggregate sequence
:type data: dict
:raise AssertionError:
"""
if self.results:
if self.results[-1]["ts"] >= data["ts"]:
raise AssertionError("TS sequence wrong: %s>=%s" % (self.results[-1]["ts"], data["ts"]))
logging.info("Data: %s", data)
self.results.append(data)
def download_progress_mock(blocknum, blocksize, totalsize):
pass
class ResultChecker(AggregatorListener):
def __init__(self, callback):
super(ResultChecker, self).__init__()
self.callback = callback
def aggregated_second(self, data):
self.callback(data)
|
Nefry/taurus
|
tests/mocks.py
|
Python
|
apache-2.0
| 5,148
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import string
from urlparse import urlparse, parse_qs
from mopidy import backend
from mopidy.models import SearchResult, Track, Album, Artist
import pykka
import pafy
import requests
import unicodedata
from mopidy_youtube import logger
yt_api_endpoint = 'https://www.googleapis.com/youtube/v3/'
yt_key = 'AIzaSyAl1Xq9DwdE_KD4AtPaE4EJl3WZe2zCqg4'
def resolve_track(track, stream=False):
logger.debug("Resolving Youtube for track '%s'", track)
if hasattr(track, 'uri'):
return resolve_url(track.comment, stream)
else:
return resolve_url(track.split('.')[-1], stream)
def safe_url(uri):
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
safe_uri = unicodedata.normalize(
'NFKD',
unicode(uri)
).encode('ASCII', 'ignore')
return re.sub(
'\s+',
' ',
''.join(c for c in safe_uri if c in valid_chars)
).strip()
def resolve_url(url, stream=False):
video = pafy.new(url)
if not stream:
uri = 'youtube:video/%s.%s' % (
safe_url(video.title), video.videoid
)
else:
uri = video.getbestaudio()
if not uri: # get video url
uri = video.getbest()
logger.debug('%s - %s %s %s' % (
video.title, uri.bitrate, uri.mediatype, uri.extension))
uri = uri.url
if not uri:
return
if '-' in video.title:
title = video.title.split('-')
track = Track(
name=title[1].strip(),
comment=video.videoid,
length=video.length*1000,
artists=[Artist(name=title[0].strip())],
album=Album(
name='Youtube',
images=[video.bigthumb, video.bigthumbhd]
),
uri=uri
)
else:
track = Track(
name=video.title,
comment=video.videoid,
length=video.length*1000,
album=Album(
name='Youtube',
images=[video.bigthumb, video.bigthumbhd]
),
uri=uri
)
return track
def search_youtube(q):
query = {
'part': 'id',
'maxResults': 15,
'type': 'video',
'q': q,
'key': yt_key
}
pl = requests.get(yt_api_endpoint+'search', params=query)
playlist = []
for yt_id in pl.json().get('items'):
try:
track = resolve_url(yt_id.get('id').get('videoId'))
playlist.append(track)
except Exception as e:
logger.info(e.message)
return playlist
def resolve_playlist(url):
logger.info("Resolving Youtube for playlist '%s'", url)
query = {
'part': 'snippet',
'maxResults': 50,
'playlistId': url,
'fields': 'items/snippet/resourceId',
'key': yt_key
}
pl = requests.get(yt_api_endpoint+'playlistItem', params=query)
playlist = []
for yt_id in pl.json().get('items'):
try:
yt_id = yt_id.get('snippet').get('resourceId').get('videoId')
playlist.append(resolve_url(yt_id))
except Exception as e:
logger.info(e.message)
return playlist
class YoutubeBackend(pykka.ThreadingActor, backend.Backend):
def __init__(self, config, audio):
super(YoutubeBackend, self).__init__()
self.config = config
self.library = YoutubeLibraryProvider(backend=self)
self.playback = YoutubePlaybackProvider(audio=audio, backend=self)
self.uri_schemes = ['youtube', 'yt']
class YoutubeLibraryProvider(backend.LibraryProvider):
def lookup(self, track):
if 'yt:' in track:
track = track.replace('yt:', '')
if 'youtube.com' in track:
url = urlparse(track)
req = parse_qs(url.query)
if 'list' in req:
return resolve_playlist(req.get('list')[0])
else:
return [resolve_url(track)]
else:
return [resolve_url(track)]
def search(self, query=None, uris=None):
if not query:
return
if 'uri' in query:
search_query = ''.join(query['uri'])
url = urlparse(search_query)
if 'youtube.com' in url.netloc:
req = parse_qs(url.query)
if 'list' in req:
return SearchResult(
uri='youtube:search',
tracks=resolve_playlist(req.get('list')[0])
)
else:
logger.info(
"Resolving Youtube for track '%s'", search_query)
return SearchResult(
uri='youtube:search',
tracks=[resolve_url(search_query)]
)
else:
search_query = '|'.join(query.values()[0])
logger.info("Searching Youtube for query '%s'", search_query)
return SearchResult(
uri='youtube:search',
tracks=search_youtube(search_query)
)
class YoutubePlaybackProvider(backend.PlaybackProvider):
def play(self, track):
track = resolve_track(track, True)
return super(YoutubePlaybackProvider, self).play(track)
|
hkariti/mopidy-youtube
|
mopidy_youtube/backend.py
|
Python
|
apache-2.0
| 5,359
|
# coding:utf-8
# 单向循环链表的相关操作:
# is_empty() 判断链表是否为空
# length() 返回链表的长度
# travel() 遍历
# add(item) 在头部添加一个节点
# append(item) 在尾部添加一个节点
# insert(pos, item) 在指定位置pos添加节点
# remove(item) 删除一个节点
# search(item) 查找节点是否存在
class Node(object):
"""节点"""
def __init__(self, item):
self.elem = item
self.next = None
class SingleCycleLinkedList(object):
"""单向循环链表"""
def __init__(self, node=None):
self.__head = node
# 如果node不为空,则需要指向自己构成一个循环链表
if node:
node.next = node
def is_empty(self):
"""判断链表是否为空"""
return self.__head is None
def length(self):
"""返回链表的长度"""
if self.is_empty():
return 0
else:
cur = self.__head
count = 1
while cur.next is not self.__head:
count += 1
cur = cur.next
return count
def travel(self):
"""遍历"""
if self.is_empty():
return
else:
cur = self.__head
while cur.next is not self.__head:
print(cur.elem, end=" ")
cur = cur.next
# 循环结束,cur指向尾节点,但是尾节点元素尚未打印,需要单独输出
print(cur.elem)
def add(self, item):
"""在头部添加一个节点,头插法"""
node = Node(item)
if self.is_empty():
self.__head = node
node.next = node
else:
# 需要获取到尾节点
cur = self.__head
while cur.next is not self.__head:
cur = cur.next
node.next = self.__head
self.__head = node
cur.next = node
def append(self, item):
"""在尾部添加一个节点,尾插法"""
node = Node(item)
if self.is_empty():
self.__head = node
node.next = node
else:
# 同样需要获取到尾节点
cur = self.__head
while cur.next is not self.__head:
cur = cur.next
cur.next = node
node.next = self.__head
def insert(self, pos, item):
"""在指定位置pos添加节点"""
if pos <= 0:
self.add(item)
elif pos > (self.length() - 1):
self.append(item)
else:
node = Node(item)
prev = self.__head
count = 0
while count < pos - 1:
count += 1
prev = prev.next
# 循环结束,prev指向要插入位置的前一个元素
node.next = prev.next
prev.next = node
def remove(self, item):
"""删除一个节点,需要考虑链表是否为空,删除的节点是头节点,尾节点,还是中间节点"""
if self.is_empty():
return
else:
cur = self.__head
pre = None
while cur.next is not self.__head:
if cur.elem == item:
# 判断是头节点,还是中间节点
if cur is self.__head:
# 头节点,需要找到尾节点
rear = self.__head
while rear.next is not self.__head:
rear = rear.next
self.__head = cur.next
rear.next = self.__head
else:
# 中间节点
pre.next = cur.next
return
else:
pre = cur
cur = cur.next
# 退出循环,cur指向尾节点
if cur.elem == item:
# 注意判断链表中是否只有一个节点
if cur is self.__head:
self.__head = None
else:
pre.next = self.__head
def search(self, item):
"""查找节点是否存在"""
if self.is_empty():
return False
else:
cur = self.__head
while cur.next is not self.__head:
if cur.elem == item:
return True
else:
cur = cur.next
# 循环结束,cur指向尾节点,但是尾节点并未参与比较,需要单独进行判断的
if cur.elem == item:
return True
else:
return False
if __name__ == "__main__":
scll = SingleCycleLinkedList()
print("befor initialized:", scll.is_empty())
print("befor initialized:", scll.length())
scll.add(1)
scll.add(2)
scll.add(3)
scll.add(4)
scll.add(5)
scll.add(6)
scll.travel()
scll.append(7)
scll.travel()
scll.insert(3, 99)
scll.travel()
print("scll.search(99):", scll.search(99))
scll.remove(99)
scll.travel()
|
coderwjq/adt_python
|
02-linked_list/04-single_cycle_linked_list.py
|
Python
|
apache-2.0
| 5,206
|
# -*- coding: utf-8 -*-
import json
import requests
import sys, os
import time
import re
class Helper:
initheaders = {
"Host": "segmentfault.com",
"Connection": "keep-alive",
"Content-Length": "55",
"Accept": "*/*",
"Origin": "https://segmentfault.com",
"X-Requested-With": "XMLHttpRequest",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"DNT": "1",
"Referer": "https://segmentfault.com/",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.8,en;q=0.6,ja;q=0.4,zh-TW;q=0.2",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
"Cookie": "PHPSESSID=web3~fdf535b2518f7f061780d987bb65934a; _gat=1; io=onpREhr-L-d7pRxJHvSF; Hm_lvt_e23800c454aa573c0ccb16b52665ac26=1508383051,1508500169,1508563643,1508565378; Hm_lpvt_e23800c454aa573c0ccb16b52665ac26=1508569683; _ga=GA1.2.613128477.1495522770; _gid=GA1.2.1217955936.1508498183"
}
def __init__(self):
self.loadConfig()
self._session = requests.session()
self._session.headers = Helper.initheaders
self._session.max_redirects = 60
if(self.initHeader() != None):
print 'use cached headers'
self._session.headers = self.initHeader()
print self._session.headers
self.filenameList = {}
def loadConfig(self):
# 获取配置文件
currentProject = os.path.dirname(sys.path[0])
configStr = os.path.abspath( os.path.join(currentProject,'config.json'))
data_file = open(configStr)
data = json.load(data_file)
self.loginUrl = data["segment"]["login-url"]
self.loginPage = data["segment"]["login-page"]
self.postUrl = data["segment"]["post-url"]
self.username = data["segment"]["username"]
self.password = data["segment"]["password"]
self.draftUrl = data["segment"]["draft-url"]
def initHeader(self):
try:
cookiepath = os.path.abspath(os.path.join(os.path.dirname('.'), 'cookie/segment_cookies'))
data_file = open(cookiepath,'r')
data = json.load(data_file)
return data
except ValueError, e:
print 'cache-cookie is None'
return None
except IOError , e:
print 'file is not found'
return None
def login(self):
# 使用緩存登陸 //TODO //TODO token
# try:
# print self._session.headers
# res = self._session.post(self.loginUrl + '?_=b56c39ea0c0d50b3dd9e5fa11d9e2f00', timeout=10)
# except requests.exceptions.ReadTimeout,e:
# print '使用緩存登錄失敗'
res = '';
while(res == ''):
try:
data = self._prepare_login_form_data()
res = self._session.post(self.loginUrl,data=data,timeout=10)
print res
if(res.status_code == 200):
self.saveHeader()
print 'login succ'
return 0
else:
print 'login fail'
except ValueError,e:
print e
print 'use cached login is succ'
return 'succ'
except requests.exceptions.ConnectionError:
print 'requests.exceptions.ConnectionError try again'
time.sleep(5)
print 'sleep over'
continue
def _prepare_login_form_data(self):
# 封装返回数据
form = {
'username': str(self.username),
'password': str(self.password),
'remember': "1"
}
print form
return form
def postArticle(self,filename):
print '--submit post--'
self._session.headers['Referer'] = 'https://segmentfault.com/write?freshman=1'
formdata = self._prepare_post_form_data(filename)
res = ''
while (res == ''):
try:
res = self._session.post(self.postUrl,data=formdata,timeout=10)
print res
if(res.json()['status'] == 0):
print '文章发布成功:'+formdata['title']
else:
print '文章发布失败:'+formdata['title']
except:
print '发布异常'
continue
print '-- post end --'
def _prepare_post_form_data(self,filename):
draftData = self.extractFile(filename)
print draftData
print '-- save draft --'
artId = ''
res = ''
while (res == ''):
try:
res = self._session.post(self.draftUrl,data=draftData,timeout=10)
status = res.json()['status']
if(status == 0):
artId = res.json()['data']
print '保存草稿成功'
else:
print '保存草稿失败'
return None
except:
print '保存草稿出现异常'
time.sleep(5)
continue
del draftData['do']
del draftData['aticleId']
draftData['license'] = '1'
draftData['draftId'] = artId
draftData['createId'] = ''
draftData['newsType'] = '1490000006201495'
return draftData
def saveHeader(self):
cookiepath = os.path.abspath(os.path.join(os.path.dirname('.'), 'cookie/segment_cookies'))
file = open(cookiepath, 'w')
cookies = self._session.headers
json.dump(cookies, file)
file.close()
def dirCb(self,dirname):
for line in os.listdir(dirname):
filename = os.path.abspath(os.path.join(dirname, line))
if(os.path.isdir(filename)):
self.dirCb(filename)
else:
pattern = re.compile(r"(\d+)-(\d+)-(\d+)-(\S+.md)")
result = pattern.findall(filename)
if (len(result) != 0):
tags = filename.split('_posts')[1]
# print tags
tagname = ''
for tag in tags.split(os.sep):
if (tag != '' and len(pattern.findall(tag)) == 0):
tagname = tagname + '|' + tag
tagname = tagname[1:]
self.filenameList[filename] = tagname
# for fn in self.filenameList:
# print fn +' -t- '+self.filenameList[fn]
def destroy(self):
self._session.close()
def extractFile(self,filename):
data = {}
## 长度
file = open(filename)
filecontent = file.read()
print len(filecontent)
if (len(filecontent) >= 75000):
filecontent = filecontent[0:75000]
## 链接添加
pattern = re.compile(r"(\d+)-(\d+)-(\d+)-(\S+).md")
print filename
result = pattern.findall(filename)
print result
href = 'www.paraller.com/' + result[0][0] + '/' + result[0][1] + '/' + result[0][2] + '/' + result[0][
3] + '/'
lience = '转载请注明出处 [http://www.paraller.com](http://www.paraller.com) \r\n 原文排版地址 [' + href + '](' + href + ')\r\n'
print lience
## 处理头部注释
pattern = re.compile(r"---(\n(.{0,}))*---")
filecontent = re.sub(pattern, lience, filecontent)
## 封装数据
data = {
"do": "saveArticle",
"type": "1",
"title": result[0][3],
"text": filecontent,
"weibo": "0",
"blogId": "0",
"aticleId": "",
"id": "",
"url": ""
}
print self.filenameList[filename]
# 获取标签
tags = self.filenameList[filename].split('|')
tagsDict = []
for tag in tags:
print 'tag is :'
print tag
data['tags[]'] = self.getTags(tag)
return data
def getTags(self,tagname):
## 标签处理
self._session.headers['Referer'] = 'https://segmentfault.com/write?freshman=1'
if(self._session.headers.has_key('Origin')):
del self._session.headers['Origin']
del self._session.headers['Content-Length']
del self._session.headers['Content-Type']
res = ''
while res == '':
try:
print 'https://segmentfault.com/api/tags/search?q='+tagname+'&_=7ee3470a9132cf004a134938342f4b35'
res = self._session.get('https://segmentfault.com/api/tags/search?q='+tagname+'&_=7ee3470a9132cf004a134938342f4b35',timeout=5)
except:
time.sleep(5)
print 'ag'
continue
print res.text
if( len(res.json()['data']) == 0):
print 'could not found tag,ag'
print tagname[0:len(tagname)-1]
self.getTags(tagname[0:len(tagname)-1])
else:
print res.json()['data'][0]['name']
return res.json()['data'][0]['id']
if __name__ == '__main__':
_helper = Helper()
code = _helper.login()
if (code == 0):
path = os.path.abspath(os.path.join(sys.path[0],'../../'))
_helper.dirCb(path)
for filename in _helper.filenameList:
_helper.postArticle(filename)
else:
print '登录失败'
_helper.destroy()
# _helper.extractFile('/Users/zhidaliao/Desktop/zhida_blog/_posts/运维 & 主机 & 系统搭建/2016-05-22-gitlab-runner-maven卡死的情况.md')
# _helper.postArticle('/Users/zhidaliao/Desktop/zhida_blog/_posts/运维 & 主机 & 系统搭建/2016-05-22-gitlab-runner-maven卡死的情况.md')
# _helper._prepare_post_form_data('/Users/zhidaliao/Desktop/zhida_blog/_posts/运维 & 主机 & 系统搭建/2016-05-22-gitlab-runner-maven卡死的情况.md')
# 遍历文章
# _helper.loopDir()
# _helper.dirCb('docker')
# if(code == 0):
# _helper.postArticle()
# _helper.destroy()
else:
print 'being imported as module'
|
liaozhida/liaozhida.github.io
|
_posts/pythonbak/porter/porter_segment.py
|
Python
|
apache-2.0
| 8,573
|
#!/usr/bin/env python
from absl.testing import absltest
from typing import Text
from grr_response_core.lib import rdfvalue
from grr_response_server import blob_store
from grr_response_server.databases import db as abstract_db
from grr.test_lib import db_test_lib
class WithDatabaseTest(absltest.TestCase):
def testDatabaseIsProvided(self):
@db_test_lib.WithDatabase
def TestMethod(db: abstract_db.Database):
self.assertIsInstance(db, abstract_db.Database)
TestMethod() # pylint: disable=no-value-for-parameter
def testDatabaseWorks(self):
now = rdfvalue.RDFDatetime.Now()
@db_test_lib.WithDatabase
def TestMethod(self, db: abstract_db.Database):
client_id = "C.0123456789abcdef"
db.WriteClientMetadata(client_id, first_seen=now)
client = db.ReadClientFullInfo(client_id)
self.assertEqual(client.metadata.first_seen, now)
TestMethod(self) # pylint: disable=no-value-for-parameter
def testDatabaseIsFresh(self):
@db_test_lib.WithDatabase
def TestMethod(db: abstract_db.Database):
self.assertEqual(db.CountGRRUsers(), 0)
db.WriteGRRUser("foo")
self.assertEqual(db.CountGRRUsers(), 1)
# We execute test method twice to ensure that each time the database is
# really empty.
TestMethod() # pylint: disable=no-value-for-parameter
TestMethod() # pylint: disable=no-value-for-parameter
def testPassesArguments(self):
@db_test_lib.WithDatabase
def TestMethod(self, username: Text, db: abstract_db.Database):
db.WriteGRRUser(username)
user = db.ReadGRRUser(username)
self.assertEqual(user.username, username)
TestMethod(self, "foo") # pylint: disable=no-value-for-parameter
TestMethod(self, "bar") # pylint: disable=no-value-for-parameter
class WithDatabaseBlobstore(absltest.TestCase):
@db_test_lib.WithDatabase
def testBlobstoreIsProvided(self, db: abstract_db.Database):
del db # Unused.
@db_test_lib.WithDatabaseBlobstore
def TestMethod(bs: blob_store.BlobStore):
self.assertIsInstance(bs, blob_store.BlobStore)
TestMethod() # pylint: disable=no-value-for-parameter
@db_test_lib.WithDatabase
def testBlobstoreWorks(self, db: abstract_db.Database):
del db # Unused.
@db_test_lib.WithDatabaseBlobstore
def TestMethod(bs: blob_store.BlobStore):
blob_id = bs.WriteBlobWithUnknownHash(b"foobarbaz")
self.assertEqual(bs.ReadBlob(blob_id), b"foobarbaz")
TestMethod() # pylint: disable=no-value-for-parameter
@db_test_lib.WithDatabase
def testPassesArguments(self, db: abstract_db.Database):
del db # Unused.
@db_test_lib.WithDatabaseBlobstore
def TestMethod(self, data: bytes, bs: blob_store.BlobStore):
blob_id = bs.WriteBlobWithUnknownHash(data)
self.assertEqual(bs.ReadBlob(blob_id), data)
TestMethod(self, b"quux") # pylint: disable=no-value-for-parameter
TestMethod(self, b"norf") # pylint: disable=no-value-for-parameter
if __name__ == "__main__":
absltest.main()
|
google/grr
|
grr/test_lib/db_test_lib_test.py
|
Python
|
apache-2.0
| 3,039
|
# Copyright 2015-2016 Hewlett Packard Enterprise Development Company, LP
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import constants as api_const
from neutron_lib.api.definitions import l3 as l3_apidef
from neutron_lib.api.definitions import network as net_def
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib.db import api as db_api
from neutron_lib.db import utils as db_utils
from neutron_lib import exceptions as n_exc
from neutron_lib.objects import exceptions as obj_exc
from neutron_lib.plugins import constants
from neutron_lib.plugins import directory
from neutron_lib.plugins import utils as p_utils
from oslo_log import log as logging
from neutron._i18n import _
from neutron.common import exceptions as c_exc
from neutron.db import _resource_extend as resource_extend
from neutron.db import common_db_mixin
from neutron.objects import auto_allocate as auto_allocate_obj
from neutron.objects import base as base_obj
from neutron.objects import network as net_obj
from neutron.services.auto_allocate import exceptions
LOG = logging.getLogger(__name__)
CHECK_REQUIREMENTS = 'dry-run'
def _ensure_external_network_default_value_callback(
resource, event, trigger, **kwargs):
"""Ensure the is_default db field matches the create/update request."""
# TODO(boden): remove shim once all callbacks use payloads
if 'payload' in kwargs:
_request = kwargs['payload'].request_body
_context = kwargs['payload'].context
_network = kwargs['payload'].desired_state
_orig = kwargs['payload'].states[0]
else:
_request = kwargs['request']
_context = kwargs['context']
_network = kwargs['network']
_orig = kwargs.get('original_network')
@db_api.retry_if_session_inactive()
def _do_ensure_external_network_default_value_callback(
context, request, orig, network):
is_default = request.get(api_const.IS_DEFAULT)
if is_default is None:
return
if is_default:
# ensure only one default external network at any given time
pager = base_obj.Pager(limit=1)
objs = net_obj.ExternalNetwork.get_objects(context,
_pager=pager, is_default=True)
if objs:
if objs[0] and network['id'] != objs[0].network_id:
raise exceptions.DefaultExternalNetworkExists(
net_id=objs[0].network_id)
if orig and orig.get(api_const.IS_DEFAULT) == is_default:
return
network[api_const.IS_DEFAULT] = is_default
# Reflect the status of the is_default on the create/update request
obj = net_obj.ExternalNetwork.get_object(context,
network_id=network['id'])
if obj:
obj.is_default = is_default
obj.update()
_do_ensure_external_network_default_value_callback(
_context, _request, _orig, _network)
@resource_extend.has_resource_extenders
class AutoAllocatedTopologyMixin(common_db_mixin.CommonDbMixin):
def __new__(cls, *args, **kwargs):
# NOTE(kevinbenton): we subscribe on object construction because
# the tests blow away the callback manager for each run
new = super(AutoAllocatedTopologyMixin, cls).__new__(cls, *args,
**kwargs)
registry.subscribe(_ensure_external_network_default_value_callback,
resources.NETWORK, events.PRECOMMIT_UPDATE)
registry.subscribe(_ensure_external_network_default_value_callback,
resources.NETWORK, events.PRECOMMIT_CREATE)
return new
# TODO(armax): if a tenant modifies auto allocated resources under
# the hood the behavior of the get_auto_allocated_topology API is
# undetermined. Consider adding callbacks to deal with the following
# situations:
# - insert subnet -> plug router interface
# - delete router -> remove the entire topology
# - update subnet -> prevent operation
# - update router gateway -> prevent operation
# - ...
@property
def core_plugin(self):
if not getattr(self, '_core_plugin', None):
self._core_plugin = directory.get_plugin()
return self._core_plugin
@property
def l3_plugin(self):
if not getattr(self, '_l3_plugin', None):
self._l3_plugin = directory.get_plugin(constants.L3)
return self._l3_plugin
@staticmethod
@resource_extend.extends([net_def.COLLECTION_NAME])
def _extend_external_network_default(net_res, net_db):
"""Add is_default field to 'show' response."""
if net_db.external is not None:
net_res[api_const.IS_DEFAULT] = net_db.external.is_default
return net_res
def get_auto_allocated_topology(self, context, tenant_id, fields=None):
"""Return tenant's network associated to auto-allocated topology.
The topology will be provisioned upon return, if network is missing.
"""
fields = fields or []
tenant_id = self._validate(context, tenant_id)
if CHECK_REQUIREMENTS in fields:
# for dry-run requests, simply validates that subsequent
# requests can be fulfilled based on a set of requirements
# such as existence of default networks, pools, etc.
return self._check_requirements(context, tenant_id)
elif fields:
raise n_exc.BadRequest(resource='auto_allocate',
msg=_("Unrecognized field"))
# Check for an existent topology
network_id = self._get_auto_allocated_network(context, tenant_id)
if network_id:
return self._response(network_id, tenant_id, fields=fields)
# See if we indeed have an external network to connect to, otherwise
# we will fail fast
default_external_network = self._get_default_external_network(
context)
# If we reach this point, then we got some work to do!
network_id = self._build_topology(
context, tenant_id, default_external_network)
return self._response(network_id, tenant_id, fields=fields)
def delete_auto_allocated_topology(self, context, tenant_id):
tenant_id = self._validate(context, tenant_id)
topology = self._get_auto_allocated_topology(context, tenant_id)
if topology:
subnets = self.core_plugin.get_subnets(
context,
filters={'network_id': [topology['network_id']]})
self._cleanup(
context, network_id=topology['network_id'],
router_id=topology['router_id'], subnets=subnets)
def _build_topology(self, context, tenant_id, default_external_network):
"""Build the network topology and returns its network UUID."""
try:
subnets = self._provision_tenant_private_network(
context, tenant_id)
network_id = subnets[0]['network_id']
router = self._provision_external_connectivity(
context, default_external_network, subnets, tenant_id)
network_id = self._save(
context, tenant_id, network_id, router['id'], subnets)
return network_id
except exceptions.UnknownProvisioningError as e:
# Clean partially provisioned topologies, and reraise the
# error. If it can be retried, so be it.
LOG.error("Unknown error while provisioning topology for "
"tenant %(tenant_id)s. Reason: %(reason)s",
{'tenant_id': tenant_id, 'reason': e})
self._cleanup(
context, network_id=e.network_id,
router_id=e.router_id, subnets=e.subnets)
raise e.error
def _check_requirements(self, context, tenant_id):
"""Raise if requirements are not met."""
self._get_default_external_network(context)
try:
self._get_supported_subnetpools(context)
except n_exc.NotFound:
raise exceptions.AutoAllocationFailure(
reason=_("No default subnetpools defined"))
return {'id': 'dry-run=pass', 'tenant_id': tenant_id}
def _validate(self, context, tenant_id):
"""Validate and return the tenant to be associated to the topology."""
if tenant_id == 'None':
# NOTE(HenryG): the client might be sending us astray by
# passing no tenant; this is really meant to be the tenant
# issuing the request, therefore let's get it from the context
tenant_id = context.tenant_id
if not context.is_admin and tenant_id != context.tenant_id:
raise n_exc.NotAuthorized()
return tenant_id
def _get_auto_allocated_topology(self, context, tenant_id):
"""Return the auto allocated topology record if present or None."""
return auto_allocate_obj.AutoAllocatedTopology.get_object(
context, project_id=tenant_id)
def _get_auto_allocated_network(self, context, tenant_id):
"""Get the auto allocated network for the tenant."""
network = self._get_auto_allocated_topology(context, tenant_id)
if network:
return network['network_id']
@staticmethod
def _response(network_id, tenant_id, fields=None):
"""Build response for auto-allocated network."""
res = {
'id': network_id,
'tenant_id': tenant_id
}
return db_utils.resource_fields(res, fields)
def _get_default_external_network(self, context):
"""Get the default external network for the deployment."""
default_external_networks = net_obj.ExternalNetwork.get_objects(
context, is_default=True)
if not default_external_networks:
LOG.error("Unable to find default external network "
"for deployment, please create/assign one to "
"allow auto-allocation to work correctly.")
raise exceptions.AutoAllocationFailure(
reason=_("No default router:external network"))
if len(default_external_networks) > 1:
LOG.error("Multiple external default networks detected. "
"Network %s is true 'default'.",
default_external_networks[0]['network_id'])
return default_external_networks[0].network_id
def _get_supported_subnetpools(self, context):
"""Return the default subnet pools available."""
default_subnet_pools = [
self.core_plugin.get_default_subnetpool(
context, ver) for ver in (4, 6)
]
available_pools = [
s for s in default_subnet_pools if s
]
if not available_pools:
LOG.error("No default pools available")
raise n_exc.NotFound()
return available_pools
def _provision_tenant_private_network(self, context, tenant_id):
"""Create a tenant private network/subnets."""
network = None
try:
network_args = {
'name': 'auto_allocated_network',
'admin_state_up': False,
'tenant_id': tenant_id,
'shared': False
}
network = p_utils.create_network(
self.core_plugin, context, {'network': network_args})
subnets = []
for pool in self._get_supported_subnetpools(context):
subnet_args = {
'name': 'auto_allocated_subnet_v%s' % pool['ip_version'],
'network_id': network['id'],
'tenant_id': tenant_id,
'ip_version': pool['ip_version'],
'subnetpool_id': pool['id'],
}
subnets.append(p_utils.create_subnet(
self.core_plugin, context, {'subnet': subnet_args}))
return subnets
except (c_exc.SubnetAllocationError, ValueError,
n_exc.BadRequest, n_exc.NotFound) as e:
LOG.error("Unable to auto allocate topology for tenant "
"%(tenant_id)s due to missing or unmet "
"requirements. Reason: %(reason)s",
{'tenant_id': tenant_id, 'reason': e})
if network:
self._cleanup(context, network['id'])
raise exceptions.AutoAllocationFailure(
reason=_("Unable to provide tenant private network"))
except Exception as e:
network_id = network['id'] if network else None
raise exceptions.UnknownProvisioningError(e, network_id=network_id)
def _provision_external_connectivity(
self, context, default_external_network, subnets, tenant_id):
"""Uplink tenant subnet(s) to external network."""
router_args = {
'name': 'auto_allocated_router',
l3_apidef.EXTERNAL_GW_INFO: {
'network_id': default_external_network},
'tenant_id': tenant_id,
'admin_state_up': True
}
router = None
attached_subnets = []
try:
router = self.l3_plugin.create_router(
context, {'router': router_args})
for subnet in subnets:
self.l3_plugin.add_router_interface(
context, router['id'], {'subnet_id': subnet['id']})
attached_subnets.append(subnet)
return router
except n_exc.BadRequest as e:
LOG.error("Unable to auto allocate topology for tenant "
"%(tenant_id)s because of router errors. "
"Reason: %(reason)s",
{'tenant_id': tenant_id, 'reason': e})
router_id = router['id'] if router else None
self._cleanup(context,
network_id=subnets[0]['network_id'],
router_id=router_id, subnets=attached_subnets)
raise exceptions.AutoAllocationFailure(
reason=_("Unable to provide external connectivity"))
except Exception as e:
router_id = router['id'] if router else None
raise exceptions.UnknownProvisioningError(
e, network_id=subnets[0]['network_id'],
router_id=router_id, subnets=subnets)
def _save(self, context, tenant_id, network_id, router_id, subnets):
"""Save auto-allocated topology, or revert in case of DB errors."""
try:
auto_allocate_obj.AutoAllocatedTopology(
context, project_id=tenant_id, network_id=network_id,
router_id=router_id).create()
self.core_plugin.update_network(
context, network_id,
{'network': {'admin_state_up': True}})
except obj_exc.NeutronDbObjectDuplicateEntry:
LOG.debug("Multiple auto-allocated networks detected for "
"tenant %s. Attempting clean up for network %s "
"and router %s.",
tenant_id, network_id, router_id)
self._cleanup(
context, network_id=network_id,
router_id=router_id, subnets=subnets)
network_id = self._get_auto_allocated_network(context, tenant_id)
except Exception as e:
raise exceptions.UnknownProvisioningError(
e, network_id=network_id,
router_id=router_id, subnets=subnets)
return network_id
def _cleanup(self, context, network_id=None, router_id=None, subnets=None):
"""Clean up auto allocated resources."""
# Concurrent attempts to delete the topology may interleave and
# cause some operations to fail with NotFound exceptions. Rather
# than fail partially, the exceptions should be ignored and the
# cleanup should proceed uninterrupted.
if router_id:
for subnet in subnets or []:
ignore_notfound(
self.l3_plugin.remove_router_interface,
context, router_id, {'subnet_id': subnet['id']})
ignore_notfound(self.l3_plugin.delete_router, context, router_id)
if network_id:
ignore_notfound(
self.core_plugin.delete_network, context, network_id)
def ignore_notfound(func, *args, **kwargs):
"""Call the given function and pass if a `NotFound` exception is raised."""
try:
return func(*args, **kwargs)
except n_exc.NotFound:
pass
|
noironetworks/neutron
|
neutron/services/auto_allocate/db.py
|
Python
|
apache-2.0
| 17,334
|
from kvmagent import kvmagent
from zstacklib.utils import jsonobject
from zstacklib.utils import http
from zstacklib.utils import log
from zstacklib.utils.bash import *
from zstacklib.utils import linux
from zstacklib.utils import thread
from jinja2 import Template
import os.path
import re
import time
import traceback
from prometheus_client.core import GaugeMetricFamily,REGISTRY
from prometheus_client import start_http_server
logger = log.get_logger(__name__)
class PrometheusPlugin(kvmagent.KvmAgent):
COLLECTD_PATH = "/prometheus/collectdexporter/start"
@kvmagent.replyerror
@in_bash
def start_collectd_exporter(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = kvmagent.AgentResponse()
eths = bash_o("ls /sys/class/net").split()
interfaces = []
for eth in eths:
eth = eth.strip(' \t\n\r')
if eth == 'lo': continue
elif eth.startswith('vnic'): continue
elif eth.startswith('outer'): continue
elif eth.startswith('br_'): continue
elif not eth: continue
else:
interfaces.append(eth)
conf_path = os.path.join(os.path.dirname(cmd.binaryPath), 'collectd.conf')
conf = '''Interval {{INTERVAL}}
FQDNLookup false
LoadPlugin syslog
LoadPlugin aggregation
LoadPlugin cpu
LoadPlugin disk
LoadPlugin interface
LoadPlugin memory
LoadPlugin network
LoadPlugin virt
<Plugin aggregation>
<Aggregation>
#Host "unspecified"
Plugin "cpu"
#PluginInstance "unspecified"
Type "cpu"
#TypeInstance "unspecified"
GroupBy "Host"
GroupBy "TypeInstance"
CalculateNum false
CalculateSum false
CalculateAverage true
CalculateMinimum false
CalculateMaximum false
CalculateStddev false
</Aggregation>
</Plugin>
<Plugin cpu>
ReportByCpu true
ReportByState true
ValuesPercentage true
</Plugin>
<Plugin disk>
Disk "/^sd/"
Disk "/^hd/"
Disk "/^vd/"
IgnoreSelected false
</Plugin>
<Plugin "interface">
{% for i in INTERFACES -%}
Interface "{{i}}"
{% endfor -%}
IgnoreSelected false
</Plugin>
<Plugin memory>
ValuesAbsolute true
ValuesPercentage false
</Plugin>
<Plugin virt>
Connection "qemu:///system"
RefreshInterval {{INTERVAL}}
HostnameFormat name
PluginInstanceFormat name
</Plugin>
<Plugin network>
Server "localhost" "25826"
</Plugin>
'''
tmpt = Template(conf)
conf = tmpt.render({
'INTERVAL': cmd.interval,
'INTERFACES': interfaces,
})
need_restart_collectd = False
if os.path.exists(conf_path):
with open(conf_path, 'r') as fd:
old_conf = fd.read()
if old_conf != conf:
with open(conf_path, 'w') as fd:
fd.write(conf)
need_restart_collectd = True
else:
with open(conf_path, 'w') as fd:
fd.write(conf)
need_restart_collectd = True
pid = linux.find_process_by_cmdline(['collectd', conf_path])
if not pid:
bash_errorout('collectd -C %s' % conf_path)
else:
if need_restart_collectd:
bash_errorout('kill -9 %s' % pid)
bash_errorout('collectd -C %s' % conf_path)
pid = linux.find_process_by_cmdline([cmd.binaryPath])
if not pid:
EXPORTER_PATH = cmd.binaryPath
LOG_FILE = os.path.join(os.path.dirname(EXPORTER_PATH), cmd.binaryPath + '.log')
ARGUMENTS = cmd.startupArguments
if not ARGUMENTS:
ARGUMENTS = ""
bash_errorout('chmod +x {{EXPORTER_PATH}}')
bash_errorout("nohup {{EXPORTER_PATH}} {{ARGUMENTS}} >{{LOG_FILE}} 2>&1 < /dev/null &\ndisown")
return jsonobject.dumps(rsp)
def install_colletor(self):
class Collector(object):
def collect(self):
try:
ret = []
for c in kvmagent.metric_collectors:
ret.extend(c())
return ret
except Exception as e:
content = traceback.format_exc()
err = '%s\n%s\n' % (str(e), content)
logger.warn(err)
return []
REGISTRY.register(Collector())
def start(self):
http_server = kvmagent.get_http_server()
http_server.register_async_uri(self.COLLECTD_PATH, self.start_collectd_exporter)
self.install_colletor()
start_http_server(7069)
def stop(self):
pass
|
live4thee/zstack-utility
|
kvmagent/kvmagent/plugins/prometheus.py
|
Python
|
apache-2.0
| 4,603
|
from __future__ import unicode_literals
from moto.core.responses import BaseResponse
from .models import support_backends
import json
class SupportResponse(BaseResponse):
SERVICE_NAME = "support"
@property
def support_backend(self):
return support_backends[self.region]
def describe_trusted_advisor_checks(self):
language = self._get_param("language")
checks = self.support_backend.describe_trusted_advisor_checks(
language=language,
)
return json.dumps({"checks": checks})
def refresh_trusted_advisor_check(self):
check_id = self._get_param("checkId")
status = self.support_backend.refresh_trusted_advisor_check(check_id=check_id,)
return json.dumps(status)
def resolve_case(self):
case_id = self._get_param("caseId")
resolve_case_response = self.support_backend.resolve_case(case_id=case_id,)
return json.dumps(resolve_case_response)
def create_case(self):
subject = self._get_param("subject")
service_code = self._get_param("serviceCode")
severity_code = self._get_param("severityCode")
category_code = self._get_param("categoryCode")
communication_body = self._get_param("communicationBody")
cc_email_addresses = self._get_param("ccEmailAddresses")
language = self._get_param("language")
issue_type = self._get_param("issueType")
attachment_set_id = self._get_param("attachmentSetId")
create_case_response = self.support_backend.create_case(
subject=subject,
service_code=service_code,
severity_code=severity_code,
category_code=category_code,
communication_body=communication_body,
cc_email_addresses=cc_email_addresses,
language=language,
issue_type=issue_type,
attachment_set_id=attachment_set_id,
)
return json.dumps(create_case_response)
def describe_cases(self):
case_id_list = self._get_param("caseIdList")
display_id = self._get_param("displayId")
after_time = self._get_param("afterTime")
before_time = self._get_param("beforeTime")
include_resolved_cases = self._get_param("includeResolvedCases", False)
next_token = self._get_param("nextToken")
max_results = self._get_int_param("maxResults")
language = self._get_param("language")
include_communications = self._get_param("includeCommunications", True)
describe_cases_response = self.support_backend.describe_cases(
case_id_list=case_id_list,
display_id=display_id,
after_time=after_time,
before_time=before_time,
include_resolved_cases=include_resolved_cases,
next_token=next_token,
max_results=max_results,
language=language,
include_communications=include_communications,
)
return json.dumps(describe_cases_response)
|
william-richard/moto
|
moto/support/responses.py
|
Python
|
apache-2.0
| 3,039
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .cloud_tpu import (
AcceleratorType,
AccessConfig,
AttachedDisk,
CreateNodeRequest,
DeleteNodeRequest,
GenerateServiceIdentityRequest,
GenerateServiceIdentityResponse,
GetAcceleratorTypeRequest,
GetGuestAttributesRequest,
GetGuestAttributesResponse,
GetNodeRequest,
GetRuntimeVersionRequest,
GuestAttributes,
GuestAttributesEntry,
GuestAttributesValue,
ListAcceleratorTypesRequest,
ListAcceleratorTypesResponse,
ListNodesRequest,
ListNodesResponse,
ListRuntimeVersionsRequest,
ListRuntimeVersionsResponse,
NetworkConfig,
NetworkEndpoint,
Node,
OperationMetadata,
RuntimeVersion,
SchedulingConfig,
ServiceAccount,
ServiceIdentity,
StartNodeRequest,
StopNodeRequest,
Symptom,
UpdateNodeRequest,
)
__all__ = (
"AcceleratorType",
"AccessConfig",
"AttachedDisk",
"CreateNodeRequest",
"DeleteNodeRequest",
"GenerateServiceIdentityRequest",
"GenerateServiceIdentityResponse",
"GetAcceleratorTypeRequest",
"GetGuestAttributesRequest",
"GetGuestAttributesResponse",
"GetNodeRequest",
"GetRuntimeVersionRequest",
"GuestAttributes",
"GuestAttributesEntry",
"GuestAttributesValue",
"ListAcceleratorTypesRequest",
"ListAcceleratorTypesResponse",
"ListNodesRequest",
"ListNodesResponse",
"ListRuntimeVersionsRequest",
"ListRuntimeVersionsResponse",
"NetworkConfig",
"NetworkEndpoint",
"Node",
"OperationMetadata",
"RuntimeVersion",
"SchedulingConfig",
"ServiceAccount",
"ServiceIdentity",
"StartNodeRequest",
"StopNodeRequest",
"Symptom",
"UpdateNodeRequest",
)
|
googleapis/python-tpu
|
google/cloud/tpu_v2alpha1/types/__init__.py
|
Python
|
apache-2.0
| 2,318
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('orchestra', '0010_merge'),
]
operations = [
migrations.AlterField(
model_name='project',
name='process_slug',
field=models.CharField(max_length=200, choices=[('website_enhancement_experiment', 'Website Enhancement Experiment'), ('website_enhancement', 'Website Enhancement'), ('doctors', 'Doctors Process')]),
),
migrations.AlterField(
model_name='task',
name='step_slug',
field=models.CharField(max_length=200, choices=[('website_enhancement_experiment', 'Website Enhancement'), ('website_enhancement', 'Website Enhancement'), ('export', 'Export'), ('design', 'Design'), ('content_extraction', ' Content Extraction')]),
),
migrations.AlterField(
model_name='taskassignment',
name='status',
field=models.IntegerField(choices=[(0, 'Processing'), (1, 'Submitted')]),
),
migrations.AlterField(
model_name='taskassignment',
name='worker',
field=models.ForeignKey(blank=True, null=True, to='orchestra.Worker'),
),
]
|
Sonblind/orchestra
|
orchestra/migrations/0011_auto_20150618_0003.py
|
Python
|
apache-2.0
| 1,311
|
"""
error types
Copyright (c) 2010-2012 Mika Eloranta
See LICENSE for details.
"""
class Error(Exception):
"""error"""
class InvalidProperty(Error):
"""invalid property"""
class MissingProperty(Error):
"""missing property"""
class UserError(Error):
"""user error"""
class InvalidRange(Error):
"""invalid range"""
class SettingsError(Error):
"""settings error"""
class VerifyError(Error):
"""verify error"""
class TemplateError(Error):
"""template rendering error"""
class CloudError(Error):
"""cloud error"""
class RemoteError(Error):
"""remote error"""
class RemoteFileDoesNotExist(RemoteError):
"""remote file does not exist"""
class RepoError(Error):
"""repository error"""
class ImporterError(Error):
"""importer error"""
class MissingLibraryError(Error):
"""missing library error"""
class RequirementError(Error):
"""requirement error"""
class ControlError(Error):
"""control error"""
class OperationError(Error):
"""operation error"""
|
ohmu/poni
|
poni/errors.py
|
Python
|
apache-2.0
| 1,028
|
from plenum.test.plugin.demo_plugin import DemoTransactions
AUCTION_LEDGER_ID = 909
AUCTION_START = DemoTransactions.AUCTION_START.value
AUCTION_END = DemoTransactions.AUCTION_END.value
PLACE_BID = DemoTransactions.PLACE_BID.value
GET_BAL = DemoTransactions.GET_BAL.value
AMOUNT = "amount"
ID = "id"
|
evernym/zeno
|
plenum/test/plugin/demo_plugin/constants.py
|
Python
|
apache-2.0
| 303
|
"""Add Graph and GraphCache models
Revision ID: 654121a84a33
Revises: fc7bc5c66c63
Create Date: 2020-11-16 21:02:36.249989
"""
# revision identifiers, used by Alembic.
revision = '654121a84a33'
down_revision = 'fc7bc5c66c63'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('graph',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('sketch_id', sa.Integer(), nullable=True),
sa.Column('name', sa.UnicodeText(), nullable=True),
sa.Column('description', sa.UnicodeText(), nullable=True),
sa.Column('graph_config', sa.UnicodeText(), nullable=True),
sa.Column('graph_elements', sa.UnicodeText(), nullable=True),
sa.Column('graph_thumbnail', sa.UnicodeText(), nullable=True),
sa.Column('num_nodes', sa.Integer(), nullable=True),
sa.Column('num_edges', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['sketch_id'], ['sketch.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('graphcache',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('sketch_id', sa.Integer(), nullable=True),
sa.Column('graph_plugin', sa.UnicodeText(), nullable=True),
sa.Column('graph_config', sa.UnicodeText(), nullable=True),
sa.Column('graph_elements', sa.UnicodeText(), nullable=True),
sa.Column('num_nodes', sa.Integer(), nullable=True),
sa.Column('num_edges', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['sketch_id'], ['sketch.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('graph_comment',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('comment', sa.UnicodeText(), nullable=True),
sa.Column('parent_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['parent_id'], ['graph.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('graph_label',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('label', sa.Unicode(length=255), nullable=True),
sa.Column('parent_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['parent_id'], ['graph.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('graph_label')
op.drop_table('graph_comment')
op.drop_table('graphcache')
op.drop_table('graph')
# ### end Alembic commands ###
|
google/timesketch
|
timesketch/migrations/versions/654121a84a33_.py
|
Python
|
apache-2.0
| 3,278
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
""" zope.interface.verify unit tests
"""
import unittest
# pylint:disable=inherit-non-class,no-method-argument,no-self-argument
class Test_verifyClass(unittest.TestCase):
verifier = None
def setUp(self):
self.verifier = self._get_FUT()
@classmethod
def _get_FUT(cls):
from zope.interface.verify import verifyClass
return verifyClass
_adjust_object_before_verify = lambda self, x: x
def _callFUT(self, iface, klass, **kwargs):
return self.verifier(iface,
self._adjust_object_before_verify(klass),
**kwargs)
def test_class_doesnt_implement(self):
from zope.interface import Interface
from zope.interface.exceptions import DoesNotImplement
class ICurrent(Interface):
pass
class Current(object):
pass
self.assertRaises(DoesNotImplement, self._callFUT, ICurrent, Current)
def test_class_doesnt_implement_but_classImplements_later(self):
from zope.interface import Interface
from zope.interface import classImplements
class ICurrent(Interface):
pass
class Current(object):
pass
classImplements(Current, ICurrent)
self._callFUT(ICurrent, Current)
def test_class_doesnt_have_required_method_simple(self):
from zope.interface import Interface
from zope.interface import implementer
from zope.interface.exceptions import BrokenImplementation
class ICurrent(Interface):
def method():
pass
@implementer(ICurrent)
class Current(object):
pass
self.assertRaises(BrokenImplementation,
self._callFUT, ICurrent, Current)
def test_class_has_required_method_simple(self):
from zope.interface import Interface
from zope.interface import implementer
class ICurrent(Interface):
def method():
pass
@implementer(ICurrent)
class Current(object):
def method(self):
raise NotImplementedError()
self._callFUT(ICurrent, Current)
def test_class_doesnt_have_required_method_derived(self):
from zope.interface import Interface
from zope.interface import implementer
from zope.interface.exceptions import BrokenImplementation
class IBase(Interface):
def method():
pass
class IDerived(IBase):
pass
@implementer(IDerived)
class Current(object):
pass
self.assertRaises(BrokenImplementation,
self._callFUT, IDerived, Current)
def test_class_has_required_method_derived(self):
from zope.interface import Interface
from zope.interface import implementer
class IBase(Interface):
def method():
pass
class IDerived(IBase):
pass
@implementer(IDerived)
class Current(object):
def method(self):
raise NotImplementedError()
self._callFUT(IDerived, Current)
def test_method_takes_wrong_arg_names_but_OK(self):
# We no longer require names to match.
from zope.interface import Interface
from zope.interface import implementer
class ICurrent(Interface):
def method(a):
pass
@implementer(ICurrent)
class Current(object):
def method(self, b):
raise NotImplementedError()
self._callFUT(ICurrent, Current)
def test_method_takes_not_enough_args(self):
from zope.interface import Interface
from zope.interface import implementer
from zope.interface.exceptions import BrokenMethodImplementation
class ICurrent(Interface):
def method(a):
pass
@implementer(ICurrent)
class Current(object):
def method(self):
raise NotImplementedError()
self.assertRaises(BrokenMethodImplementation,
self._callFUT, ICurrent, Current)
def test_method_doesnt_take_required_starargs(self):
from zope.interface import Interface
from zope.interface import implementer
from zope.interface.exceptions import BrokenMethodImplementation
class ICurrent(Interface):
def method(*args):
pass
@implementer(ICurrent)
class Current(object):
def method(self):
raise NotImplementedError()
self.assertRaises(BrokenMethodImplementation,
self._callFUT, ICurrent, Current)
def test_method_doesnt_take_required_only_kwargs(self):
from zope.interface import Interface
from zope.interface import implementer
from zope.interface.exceptions import BrokenMethodImplementation
class ICurrent(Interface):
def method(**kw):
pass
@implementer(ICurrent)
class Current(object):
def method(self):
raise NotImplementedError()
self.assertRaises(BrokenMethodImplementation,
self._callFUT, ICurrent, Current)
def test_method_takes_extra_arg(self):
from zope.interface import Interface
from zope.interface import implementer
from zope.interface.exceptions import BrokenMethodImplementation
class ICurrent(Interface):
def method(a):
pass
@implementer(ICurrent)
class Current(object):
def method(self, a, b):
raise NotImplementedError()
self.assertRaises(BrokenMethodImplementation,
self._callFUT, ICurrent, Current)
def test_method_takes_extra_arg_with_default(self):
from zope.interface import Interface
from zope.interface import implementer
class ICurrent(Interface):
def method(a):
pass
@implementer(ICurrent)
class Current(object):
def method(self, a, b=None):
raise NotImplementedError()
self._callFUT(ICurrent, Current)
def test_method_takes_only_positional_args(self):
from zope.interface import Interface
from zope.interface import implementer
class ICurrent(Interface):
def method(a):
pass
@implementer(ICurrent)
class Current(object):
def method(self, *args):
raise NotImplementedError()
self._callFUT(ICurrent, Current)
def test_method_takes_only_kwargs(self):
from zope.interface import Interface
from zope.interface import implementer
from zope.interface.exceptions import BrokenMethodImplementation
class ICurrent(Interface):
def method(a):
pass
@implementer(ICurrent)
class Current(object):
def method(self, **kw):
raise NotImplementedError()
self.assertRaises(BrokenMethodImplementation,
self._callFUT, ICurrent, Current)
def test_method_takes_extra_starargs(self):
from zope.interface import Interface
from zope.interface import implementer
class ICurrent(Interface):
def method(a):
pass
@implementer(ICurrent)
class Current(object):
def method(self, a, *args):
raise NotImplementedError()
self._callFUT(ICurrent, Current)
def test_method_takes_extra_starargs_and_kwargs(self):
from zope.interface import Interface
from zope.interface import implementer
class ICurrent(Interface):
def method(a):
pass
@implementer(ICurrent)
class Current(object):
def method(self, a, *args, **kw):
raise NotImplementedError()
self._callFUT(ICurrent, Current)
def test_method_doesnt_take_required_positional_and_starargs(self):
from zope.interface import Interface
from zope.interface import implementer
from zope.interface.exceptions import BrokenMethodImplementation
class ICurrent(Interface):
def method(a, *args):
pass
@implementer(ICurrent)
class Current(object):
def method(self, a):
raise NotImplementedError()
self.assertRaises(BrokenMethodImplementation,
self._callFUT, ICurrent, Current)
def test_method_takes_required_positional_and_starargs(self):
from zope.interface import Interface
from zope.interface import implementer
class ICurrent(Interface):
def method(a, *args):
pass
@implementer(ICurrent)
class Current(object):
def method(self, a, *args):
raise NotImplementedError()
self._callFUT(ICurrent, Current)
def test_method_takes_only_starargs(self):
from zope.interface import Interface
from zope.interface import implementer
class ICurrent(Interface):
def method(a, *args):
pass
@implementer(ICurrent)
class Current(object):
def method(self, *args):
raise NotImplementedError()
self._callFUT(ICurrent, Current)
def test_method_takes_required_kwargs(self):
from zope.interface import Interface
from zope.interface import implementer
class ICurrent(Interface):
def method(**kwargs):
pass
@implementer(ICurrent)
class Current(object):
def method(self, **kw):
raise NotImplementedError()
self._callFUT(ICurrent, Current)
def test_method_takes_positional_plus_required_starargs(self):
from zope.interface import Interface
from zope.interface import implementer
from zope.interface.exceptions import BrokenMethodImplementation
class ICurrent(Interface):
def method(*args):
pass
@implementer(ICurrent)
class Current(object):
def method(self, a, *args):
raise NotImplementedError()
self.assertRaises(BrokenMethodImplementation,
self._callFUT, ICurrent, Current)
def test_method_doesnt_take_required_kwargs(self):
from zope.interface import Interface
from zope.interface import implementer
from zope.interface.exceptions import BrokenMethodImplementation
class ICurrent(Interface):
def method(**kwargs):
pass
@implementer(ICurrent)
class Current(object):
def method(self, a):
raise NotImplementedError()
self.assertRaises(BrokenMethodImplementation,
self._callFUT, ICurrent, Current)
def test_class_has_method_for_iface_attr(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface import implementer
class ICurrent(Interface):
attr = Attribute("The foo Attribute")
@implementer(ICurrent)
class Current:
def attr(self):
raise NotImplementedError()
self._callFUT(ICurrent, Current)
def test_class_has_nonmethod_for_method(self):
from zope.interface import Interface
from zope.interface import implementer
from zope.interface.exceptions import BrokenMethodImplementation
class ICurrent(Interface):
def method():
pass
@implementer(ICurrent)
class Current:
method = 1
self.assertRaises(BrokenMethodImplementation,
self._callFUT, ICurrent, Current)
def test_class_has_attribute_for_attribute(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface import implementer
class ICurrent(Interface):
attr = Attribute("The foo Attribute")
@implementer(ICurrent)
class Current:
attr = 1
self._callFUT(ICurrent, Current)
def test_class_misses_attribute_for_attribute(self):
# This check *passes* for verifyClass
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface import implementer
class ICurrent(Interface):
attr = Attribute("The foo Attribute")
@implementer(ICurrent)
class Current:
pass
self._callFUT(ICurrent, Current)
def test_w_callable_non_func_method(self):
from zope.interface.interface import Method
from zope.interface import Interface
from zope.interface import implementer
class QuasiMethod(Method):
def __call__(self, *args, **kw):
raise NotImplementedError()
class QuasiCallable(object):
def __call__(self, *args, **kw):
raise NotImplementedError()
class ICurrent(Interface):
attr = QuasiMethod('This is callable')
@implementer(ICurrent)
class Current:
attr = QuasiCallable()
self._callFUT(ICurrent, Current)
def test_w_decorated_method(self):
from zope.interface import Interface
from zope.interface import implementer
def decorator(func):
# this is, in fact, zope.proxy.non_overridable
return property(lambda self: func.__get__(self))
class ICurrent(Interface):
def method(a):
pass
@implementer(ICurrent)
class Current(object):
@decorator
def method(self, a):
raise NotImplementedError()
self._callFUT(ICurrent, Current)
def test_dict_IFullMapping(self):
# A dict should be an IFullMapping, but this exposes two
# issues. First, on CPython, methods of builtin types are
# "method_descriptor" objects, and are harder to introspect.
# Second, on PyPy, the signatures can be just plain wrong,
# specifying as required arguments that are actually optional.
# See https://github.com/zopefoundation/zope.interface/issues/118
from zope.interface.common.mapping import IFullMapping
self._callFUT(IFullMapping, dict, tentative=True)
def test_list_ISequence(self):
# As for test_dict_IFullMapping
from zope.interface.common.sequence import ISequence
self._callFUT(ISequence, list, tentative=True)
def test_tuple_IReadSequence(self):
# As for test_dict_IFullMapping
from zope.interface.common.sequence import IReadSequence
self._callFUT(IReadSequence, tuple, tentative=True)
def test_multiple_invalid(self):
from zope.interface.exceptions import MultipleInvalid
from zope.interface.exceptions import DoesNotImplement
from zope.interface.exceptions import BrokenImplementation
from zope.interface import Interface
from zope.interface import classImplements
class ISeveralMethods(Interface):
def meth1(arg1):
"Method 1"
def meth2(arg1):
"Method 2"
class SeveralMethods(object):
pass
with self.assertRaises(MultipleInvalid) as exc:
self._callFUT(ISeveralMethods, SeveralMethods)
ex = exc.exception
self.assertEqual(3, len(ex.exceptions))
self.assertIsInstance(ex.exceptions[0], DoesNotImplement)
self.assertIsInstance(ex.exceptions[1], BrokenImplementation)
self.assertIsInstance(ex.exceptions[2], BrokenImplementation)
# If everything else is correct, only the single error is raised without
# the wrapper.
classImplements(SeveralMethods, ISeveralMethods)
SeveralMethods.meth1 = lambda self, arg1: "Hi"
with self.assertRaises(BrokenImplementation):
self._callFUT(ISeveralMethods, SeveralMethods)
class Test_verifyObject(Test_verifyClass):
@classmethod
def _get_FUT(cls):
from zope.interface.verify import verifyObject
return verifyObject
def _adjust_object_before_verify(self, target):
if isinstance(target, (type, type(OldSkool))):
target = target()
return target
def test_class_misses_attribute_for_attribute(self):
# This check *fails* for verifyObject
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface import implementer
from zope.interface.exceptions import BrokenImplementation
class ICurrent(Interface):
attr = Attribute("The foo Attribute")
@implementer(ICurrent)
class Current:
pass
self.assertRaises(BrokenImplementation,
self._callFUT, ICurrent, Current)
def test_module_hit(self):
from zope.interface.tests.idummy import IDummyModule
from zope.interface.tests import dummy
self._callFUT(IDummyModule, dummy)
def test_module_miss(self):
from zope.interface import Interface
from zope.interface.tests import dummy
from zope.interface.exceptions import DoesNotImplement
# same name, different object
class IDummyModule(Interface):
pass
self.assertRaises(DoesNotImplement,
self._callFUT, IDummyModule, dummy)
def test_staticmethod_hit_on_class(self):
from zope.interface import Interface
from zope.interface import provider
from zope.interface.verify import verifyObject
class IFoo(Interface):
def bar(a, b):
"The bar method"
@provider(IFoo)
class Foo(object):
@staticmethod
def bar(a, b):
raise AssertionError("We're never actually called")
# Don't use self._callFUT, we don't want to instantiate the
# class.
verifyObject(IFoo, Foo)
class OldSkool:
pass
|
mdworks2016/work_development
|
Python/05_FirstPython/Chapter9_WebApp/fppython_develop/lib/python3.7/site-packages/zope/interface/tests/test_verify.py
|
Python
|
apache-2.0
| 19,156
|
# Copyright (c) 2015 Quobyte Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils import fileutils
import psutil
import six
import nova.conf
from nova import exception as nova_exception
from nova.i18n import _
import nova.privsep.libvirt
from nova import utils
from nova.virt.libvirt.volume import fs
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
SOURCE_PROTOCOL = 'quobyte'
SOURCE_TYPE = 'file'
DRIVER_CACHE = 'none'
DRIVER_IO = 'native'
VALID_SYSD_STATES = ["starting", "running", "degraded"]
SYSTEMCTL_CHECK_PATH = "/run/systemd/system"
_is_systemd = None
def is_systemd():
"""Checks if the host is running systemd"""
global _is_systemd
if _is_systemd is not None:
return _is_systemd
tmp_is_systemd = False
if psutil.Process(1).name() == "systemd" or os.path.exists(
SYSTEMCTL_CHECK_PATH):
# NOTE(kaisers): exit code might be >1 in theory but in practice this
# is hard coded to 1. Due to backwards compatibility and systemd
# CODING_STYLE this is unlikely to change.
sysdout, sysderr = processutils.execute("systemctl",
"is-system-running",
check_exit_code=[0, 1])
for state in VALID_SYSD_STATES:
if state == sysdout.strip():
tmp_is_systemd = True
break
_is_systemd = tmp_is_systemd
return _is_systemd
def mount_volume(volume, mnt_base, configfile=None):
"""Wraps execute calls for mounting a Quobyte volume"""
fileutils.ensure_tree(mnt_base)
# Note(kaisers): with systemd this requires a separate CGROUP to
# prevent Nova service stop/restarts from killing the mount.
if is_systemd():
LOG.debug('Mounting volume %s at mount point %s via systemd-run',
volume, mnt_base)
nova.privsep.libvirt.systemd_run_qb_mount(volume, mnt_base,
cfg_file=configfile)
else:
LOG.debug('Mounting volume %s at mount point %s via mount.quobyte',
volume, mnt_base, cfg_file=configfile)
nova.privsep.libvirt.unprivileged_qb_mount(volume, mnt_base,
cfg_file=configfile)
LOG.info('Mounted volume: %s', volume)
def umount_volume(mnt_base):
"""Wraps execute calls for unmouting a Quobyte volume"""
try:
if is_systemd():
nova.privsep.libvirt.umount(mnt_base)
else:
nova.privsep.libvirt.unprivileged_umount(mnt_base)
except processutils.ProcessExecutionError as exc:
if 'Device or resource busy' in six.text_type(exc):
LOG.error("The Quobyte volume at %s is still in use.", mnt_base)
else:
LOG.exception(_("Couldn't unmount the Quobyte Volume at %s"),
mnt_base)
def validate_volume(mount_path):
"""Determine if the volume is a valid Quobyte mount.
Runs a number of tests to be sure this is a (working) Quobyte mount
"""
partitions = psutil.disk_partitions(all=True)
for p in partitions:
if mount_path != p.mountpoint:
continue
if p.device.startswith("quobyte@") or p.fstype == "fuse.quobyte":
statresult = os.stat(mount_path)
# Note(kaisers): Quobyte always shows mount points with size 0
if statresult.st_size == 0:
# client looks healthy
return # we're happy here
else:
msg = (_("The mount %(mount_path)s is not a "
"valid Quobyte volume. Stale mount?")
% {'mount_path': mount_path})
raise nova_exception.StaleVolumeMount(msg, mount_path=mount_path)
else:
msg = (_("The mount %(mount_path)s is not a valid "
"Quobyte volume according to partition list.")
% {'mount_path': mount_path})
raise nova_exception.InvalidVolume(msg)
msg = (_("No matching Quobyte mount entry for %(mount_path)s"
" could be found for validation in partition list.")
% {'mount_path': mount_path})
raise nova_exception.InvalidVolume(msg)
class LibvirtQuobyteVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
"""Class implements libvirt part of volume driver for Quobyte."""
def _get_mount_point_base(self):
return CONF.libvirt.quobyte_mount_point_base
def get_config(self, connection_info, disk_info):
conf = super(LibvirtQuobyteVolumeDriver,
self).get_config(connection_info, disk_info)
data = connection_info['data']
conf.source_protocol = SOURCE_PROTOCOL
conf.source_type = SOURCE_TYPE
conf.driver_cache = DRIVER_CACHE
conf.driver_io = DRIVER_IO
conf.driver_format = data.get('format', 'raw')
conf.source_path = self._get_device_path(connection_info)
return conf
@utils.synchronized('connect_qb_volume')
def connect_volume(self, connection_info, instance):
"""Connect the volume."""
if is_systemd():
LOG.debug("systemd detected.")
else:
LOG.debug("No systemd detected.")
data = connection_info['data']
quobyte_volume = self._normalize_export(data['export'])
mount_path = self._get_mount_path(connection_info)
try:
validate_volume(mount_path)
mounted = True
except nova_exception.StaleVolumeMount:
mounted = False
LOG.info('Fixing previous mount %s which was not '
'unmounted correctly.', mount_path)
umount_volume(mount_path)
except nova_exception.InvalidVolume:
mounted = False
if not mounted:
mount_volume(quobyte_volume,
mount_path,
CONF.libvirt.quobyte_client_cfg)
try:
validate_volume(mount_path)
except (nova_exception.InvalidVolume,
nova_exception.StaleVolumeMount) as nex:
LOG.error("Could not mount Quobyte volume: %s", nex)
@utils.synchronized('connect_qb_volume')
def disconnect_volume(self, connection_info, instance):
"""Disconnect the volume."""
mount_path = self._get_mount_path(connection_info)
try:
validate_volume(mount_path)
except (nova_exception.InvalidVolume,
nova_exception.StaleVolumeMount) as exc:
LOG.warning("Could not disconnect Quobyte volume mount: %s", exc)
else:
umount_volume(mount_path)
def _normalize_export(self, export):
protocol = SOURCE_PROTOCOL + "://"
if export.startswith(protocol):
export = export[len(protocol):]
return export
|
rahulunair/nova
|
nova/virt/libvirt/volume/quobyte.py
|
Python
|
apache-2.0
| 7,552
|
#!/usr/bin/env python
import json
from tdclient import api, models
class Client:
"""API Client for Treasure Data Service
"""
def __init__(self, *args, **kwargs):
self._api = api.API(*args, **kwargs)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
@property
def api(self):
"""
an instance of :class:`tdclient.api.API`
"""
return self._api
@property
def apikey(self):
"""
API key string.
"""
return self._api.apikey
def server_status(self):
"""
Returns:
a string represents current server status.
"""
return self.api.server_status()
def create_database(self, db_name, **kwargs):
"""
Args:
db_name (str): name of a database to create
Returns:
`True` if success
"""
return self.api.create_database(db_name, **kwargs)
def delete_database(self, db_name):
"""
Args:
db_name (str): name of database to delete
Returns:
`True` if success
"""
return self.api.delete_database(db_name)
def databases(self):
"""
Returns:
a list of :class:`tdclient.models.Database`
"""
databases = self.api.list_databases()
return [
models.Database(self, db_name, **kwargs)
for (db_name, kwargs) in databases.items()
]
def database(self, db_name):
"""
Args:
db_name (str): name of a database
Returns:
:class:`tdclient.models.Database`
"""
databases = self.api.list_databases()
for (name, kwargs) in databases.items():
if name == db_name:
return models.Database(self, name, **kwargs)
raise api.NotFoundError("Database '%s' does not exist" % (db_name))
def create_log_table(self, db_name, table_name):
"""
Args:
db_name (str): name of a database
table_name (str): name of a table to create
Returns:
`True` if success
"""
return self.api.create_log_table(db_name, table_name)
def swap_table(self, db_name, table_name1, table_name2):
"""
Args:
db_name (str): name of a database
table_name1 (str): original table name
table_name2 (str): table name you want to rename to
Returns:
`True` if success
"""
return self.api.swap_table(db_name, table_name1, table_name2)
def update_schema(self, db_name, table_name, schema):
"""Updates the schema of a table
Args:
db_name (str): name of a database
table_name (str): name of a table
schema (list): a dictionary object represents the schema definition (will
be converted to JSON)
e.g.
.. code-block:: python
[
["member_id", # column name
"string", # data type
"mem_id", # alias of the column name
],
["row_index", "long", "row_ind"],
...
]
Returns:
`True` if success
"""
return self.api.update_schema(db_name, table_name, json.dumps(schema))
def update_expire(self, db_name, table_name, expire_days):
"""Set expiration date to a table
Args:
db_name (str): name of a database
table_name (str): name of a table
epire_days (int): expiration date in days from today
Returns:
`True` if success
"""
return self.api.update_expire(db_name, table_name, expire_days)
def delete_table(self, db_name, table_name):
"""Delete a table
Args:
db_name (str): name of a database
table_name (str): name of a table
Returns:
a string represents the type of deleted table
"""
return self.api.delete_table(db_name, table_name)
def tables(self, db_name):
"""List existing tables
Args:
db_name (str): name of a database
Returns:
a list of :class:`tdclient.models.Table`
"""
m = self.api.list_tables(db_name)
return [
models.Table(self, db_name, table_name, **kwargs)
for (table_name, kwargs) in m.items()
]
def table(self, db_name, table_name):
"""
Args:
db_name (str): name of a database
table_name (str): name of a table
Returns:
:class:`tdclient.models.Table`
Raises:
tdclient.api.NotFoundError: if the table doesn't exist
"""
tables = self.tables(db_name)
for table in tables:
if table.table_name == table_name:
return table
raise api.NotFoundError("Table '%s.%s' does not exist" % (db_name, table_name))
def tail(self, db_name, table_name, count, to=None, _from=None, block=None):
"""Get the contents of the table in reverse order based on the registered time
(last data first).
Args:
db_name (str): Target database name.
table_name (str): Target table name.
count (int): Number for record to show up from the end.
to: Deprecated parameter.
_from: Deprecated parameter.
block: Deprecated parameter.
Returns:
[dict]: Contents of the table.
"""
return self.api.tail(db_name, table_name, count, to, _from, block)
def change_database(self, db_name, table_name, new_db_name):
"""Move a target table from it's original database to new destination database.
Args:
db_name (str): Target database name.
table_name (str): Target table name.
new_db_name (str): Destination database name to be moved.
Returns:
bool: `True` if succeeded.
"""
return self.api.change_database(db_name, table_name, new_db_name)
def query(
self,
db_name,
q,
result_url=None,
priority=None,
retry_limit=None,
type="hive",
**kwargs
):
"""Run a query on specified database table.
Args:
db_name (str): name of a database
q (str): a query string
result_url (str): result output URL. e.g.,
``postgresql://<username>:<password>@<hostname>:<port>/<database>/<table>``
priority (int or str): priority (e.g. "NORMAL", "HIGH", etc.)
retry_limit (int): retry limit
type (str): name of a query engine
Returns:
:class:`tdclient.models.Job`
Raises:
ValueError: if unknown query type has been specified
"""
# for compatibility, assume type is hive unless specifically specified
if type not in ["hive", "pig", "impala", "presto"]:
raise ValueError("The specified query type is not supported: %s" % (type))
job_id = self.api.query(
q,
type=type,
db=db_name,
result_url=result_url,
priority=priority,
retry_limit=retry_limit,
**kwargs
)
return models.Job(self, job_id, type, q)
def jobs(self, _from=None, to=None, status=None, conditions=None):
"""List jobs
Args:
_from (int, optional): Gets the Job from the nth index in the list. Default: 0.
to (int, optional): Gets the Job up to the nth index in the list.
By default, the first 20 jobs in the list are displayed
status (str, optional): Filter by given status. {"queued", "running", "success", "error"}
conditions (str, optional): Condition for ``TIMESTAMPDIFF()`` to search for slow queries.
Avoid using this parameter as it can be dangerous.
Returns:
a list of :class:`tdclient.models.Job`
"""
results = self.api.list_jobs(_from, to, status, conditions)
return [job_from_dict(self, d) for d in results]
def job(self, job_id):
"""Get a job from `job_id`
Args:
job_id (str): job id
Returns:
:class:`tdclient.models.Job`
"""
d = self.api.show_job(str(job_id))
return job_from_dict(self, d, job_id=job_id)
def job_status(self, job_id):
"""
Args:
job_id (str): job id
Returns:
a string represents the status of the job ("success", "error", "killed", "queued", "running")
"""
return self.api.job_status(job_id)
def job_result(self, job_id):
"""
Args:
job_id (str): job id
Returns:
a list of each rows in result set
"""
return self.api.job_result(job_id)
def job_result_each(self, job_id):
"""
Args:
job_id (str): job id
Returns:
an iterator of result set
"""
for row in self.api.job_result_each(job_id):
yield row
def job_result_format(self, job_id, format):
"""
Args:
job_id (str): job id
format (str): output format of result set
Returns:
a list of each rows in result set
"""
return self.api.job_result_format(job_id, format)
def job_result_format_each(self, job_id, format):
"""
Args:
job_id (str): job id
format (str): output format of result set
Returns:
an iterator of rows in result set
"""
for row in self.api.job_result_format_each(job_id, format):
yield row
def kill(self, job_id):
"""
Args:
job_id (str): job id
Returns:
a string represents the status of killed job ("queued", "running")
"""
return self.api.kill(job_id)
def export_data(self, db_name, table_name, storage_type, params=None):
"""Export data from Treasure Data Service
Args:
db_name (str): name of a database
table_name (str): name of a table
storage_type (str): type of the storage
params (dict): optional parameters. Assuming the following keys:
- access_key_id (str):
ID to access the information to be exported.
- secret_access_key (str):
Password for the `access_key_id`.
- file_prefix (str, optional):
Filename of exported file.
Default: "<database_name>/<table_name>"
- file_format (str, optional):
File format of the information to be
exported. {"jsonl.gz", "tsv.gz", "json.gz"}
- from (int, optional):
From Time of the data to be exported in Unix epoch format.
- to (int, optional):
End Time of the data to be exported in Unix epoch format.
- assume_role (str, optional): Assume role.
- bucket (str):
Name of bucket to be used.
- domain_key (str, optional):
Job domain key.
- pool_name (str, optional):
For Presto only. Pool name to be used, if not
specified, default pool would be used.
Returns:
:class:`tdclient.models.Job`
"""
params = {} if params is None else params
job_id = self.api.export_data(db_name, table_name, storage_type, params)
return models.Job(self, job_id, "export", None)
def partial_delete(self, db_name, table_name, to, _from, params=None):
"""Create a job to partially delete the contents of the table with the given
time range.
Args:
db_name (str): Target database name.
table_name (str): Target table name.
to (int): Time in Unix Epoch format indicating the End date and time of the
data to be deleted. Should be set only by the hour. Minutes and seconds
values will not be accepted.
_from (int): Time in Unix Epoch format indicating the Start date and time of
the data to be deleted. Should be set only by the hour. Minutes and
seconds values will not be accepted.
params (dict, optional): Extra parameters.
- pool_name (str, optional):
Indicates the resource pool to execute this
job. If not provided, the account's default resource pool would be
used.
- domain_key (str, optional):
Domain key that will be assigned to the
partial delete job to be created
Returns:
:class:`tdclient.models.Job`
"""
params = {} if params is None else params
job_id = self.api.partial_delete(db_name, table_name, to, _from, params)
return models.Job(self, job_id, "partialdelete", None)
def create_bulk_import(self, name, database, table, params=None):
"""Create new bulk import session
Args:
name (str): name of new bulk import session
database (str): name of a database
table (str): name of a table
Returns:
:class:`tdclient.models.BulkImport`
"""
params = {} if params is None else params
self.api.create_bulk_import(name, database, table, params)
return models.BulkImport(self, name=name, database=database, table=table)
def delete_bulk_import(self, name):
"""Delete a bulk import session
Args:
name (str): name of a bulk import session
Returns:
`True` if success
"""
return self.api.delete_bulk_import(name)
def freeze_bulk_import(self, name):
"""Freeze a bulk import session
Args:
name (str): name of a bulk import session
Returns:
`True` if success
"""
return self.api.freeze_bulk_import(name)
def unfreeze_bulk_import(self, name):
"""Unfreeze a bulk import session
Args:
name (str): name of a bulk import session
Returns:
`True` if success
"""
return self.api.unfreeze_bulk_import(name)
def perform_bulk_import(self, name):
"""Perform a bulk import session
Args:
name (str): name of a bulk import session
Returns:
:class:`tdclient.models.Job`
"""
job_id = self.api.perform_bulk_import(name)
return models.Job(self, job_id, "bulk_import", None)
def commit_bulk_import(self, name):
"""Commit a bulk import session
Args:
name (str): name of a bulk import session
Returns:
`True` if success
"""
return self.api.commit_bulk_import(name)
def bulk_import_error_records(self, name):
"""
Args:
name (str): name of a bulk import session
Returns:
an iterator of error records
"""
for record in self.api.bulk_import_error_records(name):
yield record
def bulk_import(self, name):
"""Get a bulk import session
Args:
name (str): name of a bulk import session
Returns:
:class:`tdclient.models.BulkImport`
"""
data = self.api.show_bulk_import(name)
return models.BulkImport(self, **data)
def bulk_imports(self):
"""List bulk import sessions
Returns:
a list of :class:`tdclient.models.BulkImport`
"""
return [
models.BulkImport(self, **data) for data in self.api.list_bulk_imports()
]
def bulk_import_upload_part(self, name, part_name, bytes_or_stream, size):
"""Upload a part to a bulk import session
Args:
name (str): name of a bulk import session
part_name (str): name of a part of the bulk import session
bytes_or_stream (file-like): a file-like object contains the part
size (int): the size of the part
"""
return self.api.bulk_import_upload_part(name, part_name, bytes_or_stream, size)
def bulk_import_upload_file(self, name, part_name, format, file, **kwargs):
"""Upload a part to Bulk Import session, from an existing file on filesystem.
Args:
name (str): name of a bulk import session
part_name (str): name of a part of the bulk import session
format (str): format of data type (e.g. "msgpack", "json", "csv", "tsv")
file (str or file-like): the name of a file, or a file-like object,
containing the data
**kwargs: extra arguments.
There is more documentation on `format`, `file` and `**kwargs` at
`file import parameters`_.
In particular, for "csv" and "tsv" data, you can change how data columns
are parsed using the ``dtypes`` and ``converters`` arguments.
* ``dtypes`` is a dictionary used to specify a datatype for individual
columns, for instance ``{"col1": "int"}``. The available datatypes
are ``"bool"``, ``"float"``, ``"int"``, ``"str"`` and ``"guess"``.
If a column is also mentioned in ``converters``, then the function
will be used, NOT the datatype.
* ``converters`` is a dictionary used to specify a function that will
be used to parse individual columns, for instance ``{"col1", int}``.
The default behaviour is ``"guess"``, which makes a best-effort to decide
the column datatype. See `file import parameters`_ for more details.
.. _`file import parameters`:
https://tdclient.readthedocs.io/en/latest/file_import_parameters.html
"""
return self.api.bulk_import_upload_file(name, part_name, format, file, **kwargs)
def bulk_import_delete_part(self, name, part_name):
"""Delete a part from a bulk import session
Args:
name (str): name of a bulk import session
part_name (str): name of a part of the bulk import session
Returns:
`True` if success
"""
return self.api.bulk_import_delete_part(name, part_name)
def list_bulk_import_parts(self, name):
"""List parts of a bulk import session
Args:
name (str): name of a bulk import session
Returns:
a list of string represents the name of parts
"""
return self.api.list_bulk_import_parts(name)
def create_schedule(self, name, params=None):
"""Create a new scheduled query with the specified name.
Args:
name (str): Scheduled query name.
params (dict, optional): Extra parameters.
- type (str):
Query type. {"presto", "hive"}. Default: "hive"
- database (str):
Target database name.
- timezone (str):
Scheduled query's timezone. e.g. "UTC"
For details, see also: https://gist.github.com/frsyuki/4533752
- cron (str, optional):
Schedule of the query.
{``"@daily"``, ``"@hourly"``, ``"10 * * * *"`` (custom cron)}
See also: https://tddocs.atlassian.net/wiki/spaces/PD/pages/1084633/Scheduling+Jobs+Using+TD+Console
- delay (int, optional):
A delay ensures all buffered events are imported
before running the query. Default: 0
- query (str):
Is a language used to retrieve, insert, update and modify
data. See also: https://tddocs.atlassian.net/wiki/spaces/PD/pages/1084438/SQL+Examples+of+Scheduled+Queries
- priority (int, optional):
Priority of the query.
Range is from -2 (very low) to 2 (very high). Default: 0
- retry_limit (int, optional):
Automatic retry count. Default: 0
- engine_version (str, optional):
Engine version to be used. If none is
specified, the account's default engine version would be set.
{"stable", "experimental"}
- pool_name (str, optional):
For Presto only. Pool name to be used, if not
specified, default pool would be used.
- result (str, optional):
Location where to store the result of the query.
e.g. 'tableau://user:password@host.com:1234/datasource'
Returns:
:class:`datetime.datetime`: Start date time.
"""
if "cron" not in params:
raise ValueError("'cron' option is required")
if "query" not in params:
raise ValueError("'query' option is required")
params = {} if params is None else params
return self.api.create_schedule(name, params)
def delete_schedule(self, name):
"""Delete the scheduled query with the specified name.
Args:
name (str): Target scheduled query name.
Returns:
(str, str): Tuple of cron and query.
"""
return self.api.delete_schedule(name)
def schedules(self):
"""Get the list of all the scheduled queries.
Returns:
[:class:`tdclient.models.Schedule`]
"""
result = self.api.list_schedules()
return [models.Schedule(self, **m) for m in result]
def update_schedule(self, name, params=None):
"""Update the scheduled query.
Args:
name (str): Target scheduled query name.
params (dict): Extra parameters.
- type (str):
Query type. {"presto", "hive"}. Default: "hive"
- database (str):
Target database name.
- timezone (str):
Scheduled query's timezone. e.g. "UTC"
For details, see also: https://gist.github.com/frsyuki/4533752
- cron (str, optional):
Schedule of the query.
{``"@daily"``, ``"@hourly"``, ``"10 * * * *"`` (custom cron)}
See also: https://tddocs.atlassian.net/wiki/spaces/PD/pages/1084633/Scheduling+Jobs+Using+TD+Console
- delay (int, optional):
A delay ensures all buffered events are imported
before running the query. Default: 0
- query (str):
Is a language used to retrieve, insert, update and modify
data. See also: https://tddocs.atlassian.net/wiki/spaces/PD/pages/1084438/SQL+Examples+of+Scheduled+Queries
- priority (int, optional):
Priority of the query.
Range is from -2 (very low) to 2 (very high). Default: 0
- retry_limit (int, optional):
Automatic retry count. Default: 0
- engine_version (str, optional):
Engine version to be used. If none is
specified, the account's default engine version would be set.
{"stable", "experimental"}
- pool_name (str, optional):
For Presto only. Pool name to be used, if not
specified, default pool would be used.
- result (str, optional):
Location where to store the result of the query.
e.g. 'tableau://user:password@host.com:1234/datasource'
"""
params = {} if params is None else params
self.api.update_schedule(name, params)
def history(self, name, _from=None, to=None):
"""Get the history details of the saved query for the past 90days.
Args:
name (str): Target name of the scheduled query.
_from (int, optional): Indicates from which nth record in the run history
would be fetched.
Default: 0.
Note: Count starts from zero. This means that the first record in the
list has a count of zero.
to (int, optional): Indicates up to which nth record in the run history
would be fetched.
Default: 20
Returns:
[:class:`tdclient.models.ScheduledJob`]
"""
result = self.api.history(name, _from, to)
def scheduled_job(m):
(
scheduled_at,
job_id,
type,
status,
query,
start_at,
end_at,
result_url,
priority,
database,
) = m
job_param = {
"url": None,
"debug": None,
"start_at": start_at,
"end_at": end_at,
"cpu_time": None,
"result_size": None,
"result": None,
"result_url": result_url,
"hive_result_schema": None,
"priority": priority,
"retry_limit": None,
"org_name": None,
"database": database,
}
return models.ScheduledJob(
self, scheduled_at, job_id, type, query, **job_param
)
return [scheduled_job(m) for m in result]
def run_schedule(self, name, time, num):
"""Execute the specified query.
Args:
name (str): Target scheduled query name.
time (int): Time in Unix epoch format that would be set as TD_SCHEDULED_TIME
num (int): Indicates how many times the query will be executed.
Value should be 9 or less.
Returns:
[:class:`tdclient.models.ScheduledJob`]
"""
results = self.api.run_schedule(name, time, num)
def scheduled_job(m):
job_id, type, scheduled_at = m
return models.ScheduledJob(self, scheduled_at, job_id, type, None)
return [scheduled_job(m) for m in results]
def import_data(
self, db_name, table_name, format, bytes_or_stream, size, unique_id=None
):
"""Import data into Treasure Data Service
Args:
db_name (str): name of a database
table_name (str): name of a table
format (str): format of data type (e.g. "msgpack.gz")
bytes_or_stream (str or file-like): a byte string or a file-like object contains the data
size (int): the length of the data
unique_id (str): a unique identifier of the data
Returns:
second in float represents elapsed time to import data
"""
return self.api.import_data(
db_name, table_name, format, bytes_or_stream, size, unique_id=unique_id
)
def import_file(self, db_name, table_name, format, file, unique_id=None):
"""Import data into Treasure Data Service, from an existing file on filesystem.
This method will decompress/deserialize records from given file, and then
convert it into format acceptable from Treasure Data Service ("msgpack.gz").
Args:
db_name (str): name of a database
table_name (str): name of a table
format (str): format of data type (e.g. "msgpack", "json")
file (str or file-like): a name of a file, or a file-like object contains the data
unique_id (str): a unique identifier of the data
Returns:
float represents the elapsed time to import data
"""
return self.api.import_file(
db_name, table_name, format, file, unique_id=unique_id
)
def results(self):
"""Get the list of all the available authentications.
Returns:
a list of :class:`tdclient.models.Result`
"""
results = self.api.list_result()
def result(m):
name, url, organizations = m
return models.Result(self, name, url, organizations)
return [result(m) for m in results]
def create_result(self, name, url, params=None):
"""Create a new authentication with the specified name.
Args:
name (str): Authentication name.
url (str): Url of the authentication to be created. e.g. "ftp://test.com/"
params (dict, optional): Extra parameters.
Returns:
bool: True if succeeded.
"""
params = {} if params is None else params
return self.api.create_result(name, url, params)
def delete_result(self, name):
"""Delete the authentication having the specified name.
Args:
name (str): Authentication name.
Returns:
bool: True if succeeded.
"""
return self.api.delete_result(name)
def users(self):
"""List users
Returns:
a list of :class:`tdclient.models.User`
"""
results = self.api.list_users()
def user(m):
name, org, roles, email = m
return models.User(self, name, org, roles, email)
return [user(m) for m in results]
def add_user(self, name, org, email, password):
"""Add a new user
Args:
name (str): name of the user
org (str): organization
email: (str): e-mail address
password (str): password
Returns:
`True` if success
"""
return self.api.add_user(name, org, email, password)
def remove_user(self, name):
"""Remove a user
Args:
name (str): name of the user
Returns:
`True` if success
"""
return self.api.remove_user(name)
def list_apikeys(self, name):
"""
Args:
name (str): name of the user
Returns:
a list of string of API key
"""
return self.api.list_apikeys(name)
def add_apikey(self, name):
"""
Args:
name (str): name of the user
Returns:
`True` if success
"""
return self.api.add_apikey(name)
def remove_apikey(self, name, apikey):
"""
Args:
name (str): name of the user
apikey (str): an API key to remove
Returns:
`True` if success
"""
return self.api.remove_apikey(name, apikey)
def close(self):
"""Close opened API connections.
"""
return self._api.close()
def job_from_dict(client, dd, **values):
d = dict()
d.update(dd)
d.update(values)
return models.Job(
client,
d["job_id"],
d["type"],
d["query"],
status=d.get("status"),
url=d.get("url"),
debug=d.get("debug"),
start_at=d.get("start_at"),
end_at=d.get("end_at"),
created_at=d.get("created_at"),
updated_at=d.get("updated_at"),
cpu_time=d.get("cpu_time"),
result_size=d.get("result_size"),
result=d.get("result"),
result_url=d.get("result_url"),
hive_result_schema=d.get("hive_result_schema"),
priority=d.get("priority"),
retry_limit=d.get("retry_limit"),
org_name=d.get("org_name"),
database=d.get("database"),
num_records=d.get("num_records"),
user_name=d.get("user_name"),
linked_result_export_job_id=d.get("linked_result_export_job_id"),
result_export_target_job_id=d.get("result_export_target_job_id"),
)
|
treasure-data/td-client-python
|
tdclient/client.py
|
Python
|
apache-2.0
| 32,550
|
#!/usr/bin/env python
# Welcome to Gobbler, the Scapy pcap parser and dump scripts
# Part of the sniffMyPackets suite http://www.sniffmypackets.net
# Written by @catalyst256 / catalyst256@gmail.com
import datetime
from layers.http import *
from layers.BadLayers import *
from auxtools import error_logging
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
from collections import OrderedDict
bind_layers(TCP, HTTP)
def rename_layer(x, n):
n = n.lower().replace(' ', '_').replace('-', '_').replace('.', '_') + '_'
return dict((n+k.lower(), f(v) if hasattr(v, 'keys') else v) for k, v in x.items())
def find_layers(pkts, pcap, pcap_id, streamid):
packet = OrderedDict()
count = 1
pcap_id = pcap_id.encode('utf-8')
streamid = streamid.encode('utf-8')
try:
for p in pkts:
header = {"Buffer": {"timestamp": datetime.datetime.fromtimestamp(p.time).strftime('%Y-%m-%d %H:%M:%S.%f'),
"packetnumber": count, "PCAP ID": pcap_id, "pcapfile": pcap, "StreamID": streamid}}
packet.update(header)
counter = 0
while True:
layer = p.getlayer(counter)
if layer != None:
i = int(counter)
x = p[0][i].fields
t = exclude_layers(x, layer.name)
s = rename_layer(t, layer.name)
v = '{"' + layer.name.replace('.', '_') + '[' + str(i) + ']' + '":' + str(s) + '}'
s = eval(v)
try:
del s['HTTP[3]']
del s['HTTP[5]']
except KeyError:
pass
packet.update(s)
else:
break
counter += 1
count += 1
yield packet
packet.clear()
except Exception as e:
error_logging(str(e), 'PacketParser')
pass
|
SneakersInc/sniffmypacketsv2
|
src/sniffmypacketsv2/transforms/common/packetParser.py
|
Python
|
apache-2.0
| 2,024
|
import os
import sys
import textwrap
import warnings
from stripe import error, util
# - Requests is the preferred HTTP library
# - Google App Engine has urlfetch
# - Use Pycurl if it's there (at least it verifies SSL certs)
# - Fall back to urllib2 with a warning if needed
try:
import urllib2
except ImportError:
pass
try:
import pycurl
except ImportError:
pycurl = None
try:
import requests
except ImportError:
requests = None
else:
try:
# Require version 0.8.8, but don't want to depend on distutils
version = requests.__version__
major, minor, patch = [int(i) for i in version.split('.')]
except Exception:
# Probably some new-fangled version, so it should support verify
pass
else:
if (major, minor, patch) < (0, 8, 8):
sys.stderr.write(
'Warning: the Stripe library requires that your Python '
'"requests" library be newer than version 0.8.8, but your '
'"requests" library is version %s. Stripe will fall back to '
'an alternate HTTP library so everything should work. We '
'recommend upgrading your "requests" library. If you have any '
'questions, please contact support@stripe.com. (HINT: running '
'"pip install -U requests" should upgrade your requests '
'library to the latest version.)' % (version,))
requests = None
try:
from google.appengine.api import urlfetch
except ImportError:
urlfetch = None
def new_default_http_client(*args, **kwargs):
if urlfetch:
impl = UrlFetchClient
elif requests:
impl = RequestsClient
elif pycurl:
impl = PycurlClient
else:
impl = Urllib2Client
warnings.warn(
"Warning: the Stripe library is falling back to urllib2/urllib "
"because neither requests nor pycurl are installed. "
"urllib2's SSL implementation doesn't verify server "
"certificates. For improved security, we suggest installing "
"requests.")
return impl(*args, **kwargs)
class HTTPClient(object):
def __init__(self, verify_ssl_certs=True):
self._verify_ssl_certs = verify_ssl_certs
def request(self, method, url, headers, post_data=None):
raise NotImplementedError(
'HTTPClient subclasses must implement `request`')
class RequestsClient(HTTPClient):
name = 'requests'
def request(self, method, url, headers, post_data=None):
kwargs = {}
if self._verify_ssl_certs:
kwargs['verify'] = os.path.join(
os.path.dirname(__file__), 'data/ca-certificates.crt')
else:
kwargs['verify'] = False
try:
try:
result = requests.request(method,
url,
headers=headers,
data=post_data,
timeout=80,
**kwargs)
except TypeError, e:
raise TypeError(
'Warning: It looks like your installed version of the '
'"requests" library is not compatible with Stripe\'s '
'usage thereof. (HINT: The most likely cause is that '
'your "requests" library is out of date. You can fix '
'that by running "pip install -U requests".) The '
'underlying error was: %s' % (e,))
# This causes the content to actually be read, which could cause
# e.g. a socket timeout. TODO: The other fetch methods probably
# are succeptible to the same and should be updated.
content = result.content
status_code = result.status_code
except Exception, e:
# Would catch just requests.exceptions.RequestException, but can
# also raise ValueError, RuntimeError, etc.
self._handle_request_error(e)
return content, status_code
def _handle_request_error(self, e):
if isinstance(e, requests.exceptions.RequestException):
msg = ("Unexpected error communicating with Stripe. "
"If this problem persists, let us know at "
"support@stripe.com.")
err = "%s: %s" % (type(e).__name__, str(e))
else:
msg = ("Unexpected error communicating with Stripe. "
"It looks like there's probably a configuration "
"issue locally. If this problem persists, let us "
"know at support@stripe.com.")
err = "A %s was raised" % (type(e).__name__,)
if str(e):
err += " with error message %s" % (str(e),)
else:
err += " with no error message"
msg = textwrap.fill(msg) + "\n\n(Network error: %s)" % (err,)
raise error.APIConnectionError(msg)
class UrlFetchClient(HTTPClient):
name = 'urlfetch'
def request(self, method, url, headers, post_data=None):
try:
result = urlfetch.fetch(
url=url,
method=method,
headers=headers,
# Google App Engine doesn't let us specify our own cert bundle.
# However, that's ok because the CA bundle they use recognizes
# api.stripe.com.
validate_certificate=self._verify_ssl_certs,
# GAE requests time out after 60 seconds, so make sure we leave
# some time for the application to handle a slow Stripe
deadline=55,
payload=post_data
)
except urlfetch.Error, e:
self._handle_request_error(e, url)
return result.content, result.status_code
def _handle_request_error(self, e, url):
if isinstance(e, urlfetch.InvalidURLError):
msg = ("The Stripe library attempted to fetch an "
"invalid URL (%r). This is likely due to a bug "
"in the Stripe Python bindings. Please let us know "
"at support@stripe.com." % (url,))
elif isinstance(e, urlfetch.DownloadError):
msg = "There was a problem retrieving data from Stripe."
elif isinstance(e, urlfetch.ResponseTooLargeError):
msg = ("There was a problem receiving all of your data from "
"Stripe. This is likely due to a bug in Stripe. "
"Please let us know at support@stripe.com.")
else:
msg = ("Unexpected error communicating with Stripe. If this "
"problem persists, let us know at support@stripe.com.")
msg = textwrap.fill(msg) + "\n\n(Network error: " + str(e) + ")"
raise error.APIConnectionError(msg)
class PycurlClient(HTTPClient):
name = 'pycurl'
def request(self, method, url, headers, post_data=None):
s = util.StringIO.StringIO()
curl = pycurl.Curl()
if method == 'get':
curl.setopt(pycurl.HTTPGET, 1)
elif method == 'post':
curl.setopt(pycurl.POST, 1)
curl.setopt(pycurl.POSTFIELDS, post_data)
else:
curl.setopt(pycurl.CUSTOMREQUEST, method.upper())
# pycurl doesn't like unicode URLs
curl.setopt(pycurl.URL, util.utf8(url))
curl.setopt(pycurl.WRITEFUNCTION, s.write)
curl.setopt(pycurl.NOSIGNAL, 1)
curl.setopt(pycurl.CONNECTTIMEOUT, 30)
curl.setopt(pycurl.TIMEOUT, 80)
curl.setopt(pycurl.HTTPHEADER, ['%s: %s' % (k, v)
for k, v in headers.iteritems()])
if self._verify_ssl_certs:
curl.setopt(pycurl.CAINFO, os.path.join(
os.path.dirname(__file__), 'data/ca-certificates.crt'))
else:
curl.setopt(pycurl.SSL_VERIFYHOST, False)
try:
curl.perform()
except pycurl.error, e:
self._handle_request_error(e)
rbody = s.getvalue()
rcode = curl.getinfo(pycurl.RESPONSE_CODE)
return rbody, rcode
def _handle_request_error(self, e):
if e[0] in [pycurl.E_COULDNT_CONNECT,
pycurl.E_COULDNT_RESOLVE_HOST,
pycurl.E_OPERATION_TIMEOUTED]:
msg = ("Could not connect to Stripe. Please check your "
"internet connection and try again. If this problem "
"persists, you should check Stripe's service status at "
"https://twitter.com/stripestatus, or let us know at "
"support@stripe.com.")
elif (e[0] in [pycurl.E_SSL_CACERT,
pycurl.E_SSL_PEER_CERTIFICATE]):
msg = ("Could not verify Stripe's SSL certificate. Please make "
"sure that your network is not intercepting certificates. "
"If this problem persists, let us know at "
"support@stripe.com.")
else:
msg = ("Unexpected error communicating with Stripe. If this "
"problem persists, let us know at support@stripe.com.")
msg = textwrap.fill(msg) + "\n\n(Network error: " + e[1] + ")"
raise error.APIConnectionError(msg)
class Urllib2Client(HTTPClient):
if sys.version_info >= (3, 0):
name = 'urllib.request'
else:
name = 'urllib2'
def request(self, method, url, headers, post_data=None):
if sys.version_info >= (3, 0) and isinstance(post_data, basestring):
post_data = post_data.encode('utf-8')
req = urllib2.Request(url, post_data, headers)
if method not in ('get', 'post'):
req.get_method = lambda: method.upper()
try:
response = urllib2.urlopen(req)
rbody = response.read()
rcode = response.code
except urllib2.HTTPError, e:
rcode = e.code
rbody = e.read()
except (urllib2.URLError, ValueError), e:
self._handle_request_error(e)
return rbody, rcode
def _handle_request_error(self, e):
msg = ("Unexpected error communicating with Stripe. "
"If this problem persists, let us know at support@stripe.com.")
msg = textwrap.fill(msg) + "\n\n(Network error: " + str(e) + ")"
raise error.APIConnectionError(msg)
|
Rio517/pledgeservice
|
lib/stripe/http_client.py
|
Python
|
apache-2.0
| 10,566
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
import subprocess
class TachyonServiceCheck(Script):
# Service check for VSFTPD service
def service_check(self, env):
import params
env.set_params(params)
target_host = format("{tachyon_master}")
print ('Service check host is: ' + target_host)
full_command = [ "ssh", target_host, params.base_dir + "/bin/tachyon", "runTest", "Basic", "STORE", "SYNC_PERSIST" ]
proc = subprocess.Popen(full_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
response = stdout
if 'Failed' in response:
raise ComponentIsNotRunning()
if __name__ == "__main__":
TachyonServiceCheck().execute()
|
maocorte/ambari-tachyon-service
|
package/scripts/service_check.py
|
Python
|
apache-2.0
| 1,490
|
#! /usr/bin/env python
# -*- Mode: python; py-indent-offset: 4; tab-width: 8; indent-tabs-mode: t; -*-
#
# A script for generating a number of flows.
#
# The output of the script should be saved to a file, and the flows from
# that file should be added by the following command:
#
# web/add_flow.py -f filename
#
# NOTE: Currently, some of the parameters fo the flows are hard-coded,
# and all flows are between same source and destination DPID and ports
# (differentiated by different matchSrcMac and matchDstMac).
#
import copy
import pprint
import os
import sys
import subprocess
import json
import argparse
import io
import time
## Global Var ##
DEBUG=0
pp = pprint.PrettyPrinter(indent=4)
## Worker Functions ##
def log_error(txt):
print '%s' % (txt)
def debug(txt):
if DEBUG:
print '%s' % (txt)
if __name__ == "__main__":
usage_msg = "Generate a number of flows by using a pre-defined template.\n"
usage_msg = usage_msg + "\n"
usage_msg = usage_msg + "NOTE: This script is work-in-progress. Currently all flows are within same\n"
usage_msg = usage_msg + "pair of switch ports and contain auto-generated MAC-based matching conditions.\n"
usage_msg = usage_msg + "\n"
usage_msg = usage_msg + "Usage: %s <begin-flow-id> <end-flow-id>\n" % (sys.argv[0])
usage_msg = usage_msg + "\n"
usage_msg = usage_msg + " The output should be saved to a file, and the flows should be installed\n"
usage_msg = usage_msg + " by using the command './add_flow.py -f filename'\n"
# app.debug = False;
# Usage info
if len(sys.argv) > 1 and (sys.argv[1] == "-h" or sys.argv[1] == "--help"):
print(usage_msg)
exit(0)
# Check arguments
if len(sys.argv) < 3:
log_error(usage_msg)
exit(1)
# Extract the arguments
begin_flow_id = int(sys.argv[1], 0)
end_flow_id = int(sys.argv[2], 0)
if begin_flow_id > end_flow_id:
log_error(usage_msg)
exit(1)
#
# Do the work
#
# NOTE: Currently, up to 65536 flows are supported.
# More flows can be supported by iterating by, say, iterating over some of
# the other bytes of the autogenereated source/destination MAC addresses.
#
flow_id = begin_flow_id
idx = 0
while flow_id <= end_flow_id:
mac3 = idx / 255
mac4 = idx % 255
str_mac3 = "%0.2x" % mac3
str_mac4 = "%0.2x" % mac4
src_mac = "00:00:" + str_mac3 + ":" + str_mac4 + ":00:00";
dst_mac = "00:01:" + str_mac3 + ":" + str_mac4 + ":00:00";
print "%s FOOBAR 00:00:00:00:00:00:00:01 1 00:00:00:00:00:00:00:01 2 matchSrcMac %s matchDstMac %s" % (flow_id, src_mac, dst_mac)
flow_id = flow_id + 1
idx = idx + 1
|
opennetworkinglab/spring-open
|
scripts/perf-scripts/generate_flows.py
|
Python
|
apache-2.0
| 2,622
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the volume scanner objects."""
import unittest
from dfvfs.lib import errors
from dfvfs.path import fake_path_spec
from dfvfs.path import os_path_spec
from dfvfs.path import qcow_path_spec
from dfvfs.path import raw_path_spec
from dfvfs.path import tsk_partition_path_spec
from dfvfs.path import tsk_path_spec
from dfvfs.helpers import source_scanner
from dfvfs.helpers import volume_scanner
from dfvfs.resolver import resolver
from tests import test_lib as shared_test_lib
class TestVolumeScannerMediator(volume_scanner.VolumeScannerMediator):
"""Class that defines a volume scanner mediator for testing."""
_BDE_PASSWORD = u'bde-TEST'
def GetPartitionIdentifiers(self, unused_volume_system, volume_identifiers):
"""Retrieves partition identifiers.
This method can be used to prompt the user to provide partition identifiers.
Args:
volume_system (TSKVolumeSystem): volume system.
volume_identifiers (list[str]): volume identifiers.
Returns:
list[str]: selected partition identifiers, such as "p1", or None.
Raises:
ScannerError: if the source cannot be processed.
"""
return volume_identifiers
def GetVSSStoreIdentifiers(self, unused_volume_system, volume_identifiers):
"""Retrieves VSS store identifiers.
This method can be used to prompt the user to provide VSS store identifiers.
Args:
volume_system (VShadowVolumeSystem): volume system.
volume_identifiers (list[str]): volume identifiers.
Returns:
list[int]: selected VSS store numbers or None.
Raises:
ScannerError: if the source cannot be processed.
"""
return [
int(volume_identifier[3:], 10)
for volume_identifier in volume_identifiers]
def UnlockEncryptedVolume(
self, source_scanner_object, scan_context, locked_scan_node,
unused_credentials):
"""Unlocks an encrypted volume.
This method can be used to prompt the user to provide encrypted volume
credentials.
Args:
source_scanner_object (SourceScanner): source scanner.
scan_context (SourceScannerContext): source scanner context.
locked_scan_node (SourceScanNode): locked scan node.
credentials (Credentials): credentials supported by the locked scan node.
Returns:
bool: True if the volume was unlocked.
"""
return source_scanner_object.Unlock(
scan_context, locked_scan_node.path_spec, u'password',
self._BDE_PASSWORD)
class VolumeScannerTest(shared_test_lib.BaseTestCase):
"""Tests for a volume scanner."""
# pylint: disable=protected-access
def _GetTestScanNode(self, scan_context):
"""Retrieves the scan node for testing.
Retrieves the first scan node, from the root upwards, with more or less
than 1 sub node.
Args:
scan_context (ScanContext): scan context.
Returns:
SourceScanNode: scan node.
"""
scan_node = scan_context.GetRootScanNode()
while len(scan_node.sub_nodes) == 1:
scan_node = scan_node.sub_nodes[0]
return scan_node
@shared_test_lib.skipUnlessHasTestFile([u'tsk_volume_system.raw'])
def testGetTSKPartitionIdentifiers(self):
"""Tests the _GetTSKPartitionIdentifiers function."""
# Test with mediator.
test_mediator = TestVolumeScannerMediator()
test_scanner = volume_scanner.VolumeScanner(test_mediator)
test_file = self._GetTestFilePath([u'tsk_volume_system.raw'])
scan_context = source_scanner.SourceScannerContext()
scan_context.OpenSourcePath(test_file)
test_scanner._source_scanner.Scan(scan_context)
scan_node = self._GetTestScanNode(scan_context)
expected_identifiers = sorted([u'p1', u'p2'])
identifiers = test_scanner._GetTSKPartitionIdentifiers(scan_node)
self.assertEqual(len(identifiers), 2)
self.assertEqual(sorted(identifiers), expected_identifiers)
# Test without mediator.
test_scanner = volume_scanner.VolumeScanner()
test_file = self._GetTestFilePath([u'tsk_volume_system.raw'])
scan_context = source_scanner.SourceScannerContext()
scan_context.OpenSourcePath(test_file)
test_scanner._source_scanner.Scan(scan_context)
scan_node = self._GetTestScanNode(scan_context)
expected_identifiers = sorted([u'p1', u'p2'])
identifiers = test_scanner._GetTSKPartitionIdentifiers(scan_node)
self.assertEqual(len(identifiers), 2)
self.assertEqual(sorted(identifiers), expected_identifiers)
# Test error conditions.
with self.assertRaises(errors.ScannerError):
test_scanner._GetTSKPartitionIdentifiers(None)
scan_node = source_scanner.SourceScanNode(None)
with self.assertRaises(errors.ScannerError):
test_scanner._GetTSKPartitionIdentifiers(scan_node)
@shared_test_lib.skipUnlessHasTestFile([u'vsstest.qcow2'])
def testGetVSSStoreIdentifiers(self):
"""Tests the _GetVSSStoreIdentifiers function."""
# Test with mediator.
test_mediator = TestVolumeScannerMediator()
test_scanner = volume_scanner.VolumeScanner(test_mediator)
test_file = self._GetTestFilePath([u'vsstest.qcow2'])
scan_context = source_scanner.SourceScannerContext()
scan_context.OpenSourcePath(test_file)
test_scanner._source_scanner.Scan(scan_context)
scan_node = self._GetTestScanNode(scan_context)
expected_identifiers = sorted([1, 2])
identifiers = test_scanner._GetVSSStoreIdentifiers(scan_node.sub_nodes[0])
self.assertEqual(len(identifiers), 2)
self.assertEqual(sorted(identifiers), expected_identifiers)
# Test without mediator.
test_scanner = volume_scanner.VolumeScanner()
test_file = self._GetTestFilePath([u'vsstest.qcow2'])
scan_context = source_scanner.SourceScannerContext()
scan_context.OpenSourcePath(test_file)
test_scanner._source_scanner.Scan(scan_context)
scan_node = self._GetTestScanNode(scan_context)
with self.assertRaises(errors.ScannerError):
test_scanner._GetVSSStoreIdentifiers(scan_node.sub_nodes[0])
# Test error conditions.
with self.assertRaises(errors.ScannerError):
test_scanner._GetVSSStoreIdentifiers(None)
scan_node = source_scanner.SourceScanNode(None)
with self.assertRaises(errors.ScannerError):
test_scanner._GetVSSStoreIdentifiers(scan_node)
def testScanFileSystem(self):
"""Tests the _ScanFileSystem function."""
test_scanner = volume_scanner.VolumeScanner()
path_spec = fake_path_spec.FakePathSpec(location=u'/')
scan_node = source_scanner.SourceScanNode(path_spec)
base_path_specs = []
test_scanner._ScanFileSystem(scan_node, base_path_specs)
self.assertEqual(len(base_path_specs), 1)
# Test error conditions.
with self.assertRaises(errors.ScannerError):
test_scanner._ScanFileSystem(None, [])
scan_node = source_scanner.SourceScanNode(None)
with self.assertRaises(errors.ScannerError):
test_scanner._ScanFileSystem(scan_node, [])
@shared_test_lib.skipUnlessHasTestFile([u'ímynd.dd'])
def testScanVolumeRAW(self):
"""Tests the _ScanVolume function on a RAW image."""
test_scanner = volume_scanner.VolumeScanner()
test_file = self._GetTestFilePath([u'ímynd.dd'])
scan_context = source_scanner.SourceScannerContext()
scan_context.OpenSourcePath(test_file)
test_scanner._source_scanner.Scan(scan_context)
volume_scan_node = scan_context.GetRootScanNode()
base_path_specs = []
test_scanner._ScanVolume(scan_context, volume_scan_node, base_path_specs)
self.assertEqual(len(base_path_specs), 1)
# Test error conditions.
scan_context = source_scanner.SourceScannerContext()
with self.assertRaises(errors.ScannerError):
test_scanner._ScanVolume(scan_context, None, [])
volume_scan_node = source_scanner.SourceScanNode(None)
with self.assertRaises(errors.ScannerError):
test_scanner._ScanVolume(scan_context, volume_scan_node, [])
@shared_test_lib.skipUnlessHasTestFile([u'vsstest.qcow2'])
def testScanVolumeVSS(self):
"""Tests the _ScanVolume function on NSS."""
test_mediator = TestVolumeScannerMediator()
test_scanner = volume_scanner.VolumeScanner(test_mediator)
test_file = self._GetTestFilePath([u'vsstest.qcow2'])
scan_context = source_scanner.SourceScannerContext()
scan_context.OpenSourcePath(test_file)
test_scanner._source_scanner.Scan(scan_context)
volume_scan_node = self._GetTestScanNode(scan_context)
base_path_specs = []
test_scanner._ScanVolume(
scan_context, volume_scan_node, base_path_specs)
self.assertEqual(len(base_path_specs), 3)
@shared_test_lib.skipUnlessHasTestFile([u'ímynd.dd'])
def testScanVolumeScanNodeRAW(self):
"""Tests the _ScanVolumeScanNode function on a RAW image."""
test_scanner = volume_scanner.VolumeScanner()
test_file = self._GetTestFilePath([u'ímynd.dd'])
scan_context = source_scanner.SourceScannerContext()
scan_context.OpenSourcePath(test_file)
test_scanner._source_scanner.Scan(scan_context)
volume_scan_node = scan_context.GetRootScanNode()
base_path_specs = []
test_scanner._ScanVolumeScanNode(
scan_context, volume_scan_node, base_path_specs)
self.assertEqual(len(base_path_specs), 1)
# Test error conditions.
scan_context = source_scanner.SourceScannerContext()
with self.assertRaises(errors.ScannerError):
test_scanner._ScanVolumeScanNode(scan_context, None, [])
volume_scan_node = source_scanner.SourceScanNode(None)
with self.assertRaises(errors.ScannerError):
test_scanner._ScanVolumeScanNode(scan_context, volume_scan_node, [])
@shared_test_lib.skipUnlessHasTestFile([u'vsstest.qcow2'])
def testScanVolumeScanNode(self):
"""Tests the _ScanVolumeScanNode function on VSS."""
test_mediator = TestVolumeScannerMediator()
test_scanner = volume_scanner.VolumeScanner(test_mediator)
# Test VSS root.
test_file = self._GetTestFilePath([u'vsstest.qcow2'])
scan_context = source_scanner.SourceScannerContext()
scan_context.OpenSourcePath(test_file)
test_scanner._source_scanner.Scan(scan_context)
volume_scan_node = self._GetTestScanNode(scan_context)
base_path_specs = []
test_scanner._ScanVolumeScanNode(
scan_context, volume_scan_node, base_path_specs)
self.assertEqual(len(base_path_specs), 0)
# Test VSS volume.
test_file = self._GetTestFilePath([u'vsstest.qcow2'])
scan_context = source_scanner.SourceScannerContext()
scan_context.OpenSourcePath(test_file)
test_scanner._source_scanner.Scan(scan_context)
volume_scan_node = self._GetTestScanNode(scan_context)
base_path_specs = []
test_scanner._ScanVolumeScanNode(
scan_context, volume_scan_node.sub_nodes[0], base_path_specs)
self.assertEqual(len(base_path_specs), 2)
@shared_test_lib.skipUnlessHasTestFile([u'bdetogo.raw'])
def testScanVolumeScanNodeEncrypted(self):
"""Tests the _ScanVolumeScanNodeEncrypted function."""
resolver.Resolver.key_chain.Empty()
test_mediator = TestVolumeScannerMediator()
test_scanner = volume_scanner.VolumeScanner(test_mediator)
test_file = self._GetTestFilePath([u'bdetogo.raw'])
scan_context = source_scanner.SourceScannerContext()
scan_context.OpenSourcePath(test_file)
test_scanner._source_scanner.Scan(scan_context)
volume_scan_node = self._GetTestScanNode(scan_context)
base_path_specs = []
test_scanner._ScanVolumeScanNode(
scan_context, volume_scan_node.sub_nodes[0], base_path_specs)
self.assertEqual(len(base_path_specs), 1)
# Test error conditions.
path_spec = fake_path_spec.FakePathSpec(location=u'/')
scan_node = source_scanner.SourceScanNode(path_spec)
with self.assertRaises(errors.ScannerError):
test_scanner._ScanVolumeScanNodeEncrypted(scan_node, None, [])
volume_scan_node = source_scanner.SourceScanNode(None)
with self.assertRaises(errors.ScannerError):
test_scanner._ScanVolumeScanNodeEncrypted(scan_node, volume_scan_node, [])
@shared_test_lib.skipUnlessHasTestFile([u'vsstest.qcow2'])
def testScanVolumeScanNodeVSS(self):
"""Tests the _ScanVolumeScanNodeVSS function."""
test_mediator = TestVolumeScannerMediator()
test_scanner = volume_scanner.VolumeScanner(test_mediator)
# Test root.
test_file = self._GetTestFilePath([u'vsstest.qcow2'])
scan_context = source_scanner.SourceScannerContext()
scan_context.OpenSourcePath(test_file)
test_scanner._source_scanner.Scan(scan_context)
volume_scan_node = scan_context.GetRootScanNode()
base_path_specs = []
test_scanner._ScanVolumeScanNodeVSS(volume_scan_node, base_path_specs)
self.assertEqual(len(base_path_specs), 0)
# Test VSS volume.
test_file = self._GetTestFilePath([u'vsstest.qcow2'])
scan_context = source_scanner.SourceScannerContext()
scan_context.OpenSourcePath(test_file)
test_scanner._source_scanner.Scan(scan_context)
volume_scan_node = self._GetTestScanNode(scan_context)
base_path_specs = []
test_scanner._ScanVolumeScanNodeVSS(
volume_scan_node.sub_nodes[0], base_path_specs)
self.assertEqual(len(base_path_specs), 2)
# Test error conditions.
with self.assertRaises(errors.ScannerError):
test_scanner._ScanVolumeScanNodeVSS(None, [])
volume_scan_node = source_scanner.SourceScanNode(None)
with self.assertRaises(errors.ScannerError):
test_scanner._ScanVolumeScanNodeVSS(volume_scan_node, [])
@shared_test_lib.skipUnlessHasTestFile([u'ímynd.dd'])
def testGetBasePathSpecsRAW(self):
"""Tests the GetBasePathSpecs function on a RAW image."""
test_file = self._GetTestFilePath([u'ímynd.dd'])
test_scanner = volume_scanner.VolumeScanner()
test_os_path_spec = os_path_spec.OSPathSpec(location=test_file)
test_raw_path_spec = raw_path_spec.RawPathSpec(parent=test_os_path_spec)
test_tsk_path_spec = tsk_path_spec.TSKPathSpec(
location=u'/', parent=test_raw_path_spec)
expected_base_path_specs = [test_tsk_path_spec.comparable]
base_path_specs = test_scanner.GetBasePathSpecs(test_file)
base_path_specs = [
base_path_spec.comparable for base_path_spec in base_path_specs]
self.assertEqual(base_path_specs, expected_base_path_specs)
# Test error conditions.
with self.assertRaises(errors.ScannerError):
test_scanner.GetBasePathSpecs(None)
with self.assertRaises(errors.ScannerError):
test_scanner.GetBasePathSpecs(u'/bogus')
@shared_test_lib.skipUnlessHasTestFile([u'tsk_volume_system.raw'])
def testGetBasePathSpecsPartitionedImage(self):
"""Tests the GetBasePathSpecs function on a partitioned image."""
test_file = self._GetTestFilePath([u'tsk_volume_system.raw'])
test_scanner = volume_scanner.VolumeScanner()
test_os_path_spec = os_path_spec.OSPathSpec(location=test_file)
test_raw_path_spec = raw_path_spec.RawPathSpec(parent=test_os_path_spec)
test_tsk_partition_path_spec = tsk_partition_path_spec.TSKPartitionPathSpec(
location=u'/p2', part_index=6, start_offset=0x0002c000,
parent=test_raw_path_spec)
test_tsk_path_spec = tsk_path_spec.TSKPathSpec(
location=u'/', parent=test_tsk_partition_path_spec)
expected_base_path_specs = [test_tsk_path_spec.comparable]
base_path_specs = test_scanner.GetBasePathSpecs(test_file)
base_path_specs = [
base_path_spec.comparable for base_path_spec in base_path_specs]
self.assertEqual(base_path_specs, expected_base_path_specs)
@shared_test_lib.skipUnlessHasTestFile([u'testdir_os'])
def testGetBasePathSpecsDirectory(self):
"""Tests the GetBasePathSpecs function on a directory."""
test_file = self._GetTestFilePath([u'testdir_os'])
test_scanner = volume_scanner.VolumeScanner()
test_os_path_spec = os_path_spec.OSPathSpec(location=test_file)
expected_base_path_specs = [test_os_path_spec.comparable]
base_path_specs = test_scanner.GetBasePathSpecs(test_file)
base_path_specs = [
base_path_spec.comparable for base_path_spec in base_path_specs]
self.assertEqual(base_path_specs, expected_base_path_specs)
@shared_test_lib.skipUnlessHasTestFile([u'windows_volume.qcow2'])
class WindowsVolumeScannerTest(shared_test_lib.BaseTestCase):
"""Tests for a Windows volume scanner."""
# pylint: disable=protected-access
def testScanFileSystem(self):
"""Tests the _ScanFileSystem function."""
test_scanner = volume_scanner.WindowsVolumeScanner()
test_file = self._GetTestFilePath([u'windows_volume.qcow2'])
test_os_path_spec = os_path_spec.OSPathSpec(location=test_file)
test_qcow_path_spec = qcow_path_spec.QCOWPathSpec(parent=test_os_path_spec)
test_tsk_path_spec = tsk_path_spec.TSKPathSpec(
location=u'/', parent=test_qcow_path_spec)
scan_node = source_scanner.SourceScanNode(test_tsk_path_spec)
base_path_specs = []
test_scanner._ScanFileSystem(scan_node, base_path_specs)
self.assertEqual(len(base_path_specs), 1)
# Test error conditions.
with self.assertRaises(errors.ScannerError):
test_scanner._ScanFileSystem(None, [])
scan_node = source_scanner.SourceScanNode(None)
with self.assertRaises(errors.ScannerError):
test_scanner._ScanFileSystem(scan_node, [])
# _ScanFileSystemForWindowsDirectory is tested by testScanFileSystem.
def testOpenFile(self):
"""Tests the OpenFile function."""
test_file = self._GetTestFilePath([u'windows_volume.qcow2'])
test_scanner = volume_scanner.WindowsVolumeScanner()
result = test_scanner.ScanForWindowsVolume(test_file)
self.assertTrue(result)
file_object = test_scanner.OpenFile(
u'C:\\Windows\\System32\\config\\syslog')
self.assertIsNotNone(file_object)
file_object.close()
file_object = test_scanner.OpenFile(u'C:\\bogus')
self.assertIsNone(file_object)
with self.assertRaises(IOError):
test_scanner.OpenFile(u'C:\\Windows\\System32\\config')
@shared_test_lib.skipUnlessHasTestFile([u'tsk_volume_system.raw'])
def testScanForWindowsVolume(self):
"""Tests the ScanForWindowsVolume function."""
test_file = self._GetTestFilePath([u'tsk_volume_system.raw'])
test_scanner = volume_scanner.WindowsVolumeScanner()
result = test_scanner.ScanForWindowsVolume(test_file)
self.assertFalse(result)
test_file = self._GetTestFilePath([u'windows_volume.qcow2'])
test_scanner = volume_scanner.WindowsVolumeScanner()
result = test_scanner.ScanForWindowsVolume(test_file)
self.assertTrue(result)
if __name__ == '__main__':
unittest.main()
|
dc3-plaso/dfvfs
|
tests/helpers/volume_scanner.py
|
Python
|
apache-2.0
| 18,673
|
__author__ = 'PaleNeutron'
import os
from urllib.parse import urlparse, unquote
import sys
from PyQt5 import QtWidgets, QtCore, QtGui
class MyMainWindow(QtWidgets.QMainWindow):
file_loaded = QtCore.pyqtSignal(str)
image_loaded = QtCore.pyqtSignal(QtGui.QImage)
def __init__(self):
super(MyMainWindow, self).__init__()
self.windowList = []
self.text_path = ''
self.epub_path = ''
self.win_file_mime = "application/x-qt-windows-mime;value=\"FileNameW\""
self.text_uri_mime = "text/uri-list"
self.create_content_browser()
def create_content_browser(self):
self.content_browser = QtWidgets.QTextBrowser()
self.content_browser.setFontPointSize(12)
self.content_browser.setGeometry(QtCore.QRect(300, 150, 600, 400))
self.windowList.append(self.content_browser)
def dragEnterEvent(self, ev):
ev.accept()
def load_file(self, file_path):
self.file_loaded.emit(file_path)
# def image_loaded(self, file_path):
# with open(file_path, "b") as f:
# r = f.read()
# with open("images/cover.jpg", "wb") as f:
# f.write(r)
# def epub_loaded(self, file_path):
# self.epub_path = file_path
# self.file_loaded.emit(False, )
def uri_to_path(self, uri):
if sys.platform == "win32":
path = unquote(urlparse(uri).path)[1:]
elif sys.platform == "linux":
path = unquote(urlparse(uri).path)
else:
path = None
return path
def dropEvent(self, ev):
# formats = ev.mimeData().formats()
# for i in formats:
# print(i)
# if ev.mimeData().hasFormat(self.win_file_mime):
# ev.accept()
# file_path = bytes(ev.mimeData().data(self.win_file_mime).data())[:-2].decode('utf16')
# if file_path.endswith(".txt"):
# self.text_loaded(file_path)
# elif file_path.endswith(".jpg") or file_path.endswith(".jpeg") or file_path.endswith(".png"):
# self.image_loaded(file_path)
# elif file_path.endswith(".epub"):
# self.epub_loaded(file_path)
# print(file_path)
if ev.mimeData().hasImage():
self.image_loaded.emit(ev.mimeData().imageData())
if ev.mimeData().hasFormat(self.text_uri_mime):
uri = ev.mimeData().data(self.text_uri_mime).data().decode("utf8").strip()
file_path = self.uri_to_path(uri)
if uri.lower().endswith(".txt") or uri.lower().endswith(".epub"):
self.load_file(file_path)
elif uri.lower().endswith(".zip"):
#打开一个zip文档,获取其中的txt
import zipfile
zf = zipfile.ZipFile(file_path)
for filename in zf.namelist():
#如果文档中txt文件大于10kb则解压到当前文件夹
if filename.lower().endswith(".txt") and zf.getinfo(filename).file_size > 10 * 1024:
zf.extract(filename)
# 发送文件位置信号
self.load_file(os.curdir + os.sep + filename)
break
elif uri.lower().endswith(".rar"):
import rarfile
rf = rarfile.RarFile(file_path)
for filename in rf.namelist():
# 如果文档中txt文件大于10kb则解压到当前文件夹
if filename.lower().endswith(".txt") and rf.getinfo(filename).file_size > 10 * 1024:
rf.extract(filename)
#发送文件位置信号
self.load_file(os.curdir + os.sep + filename)
break
else:
ev.ignore()
|
PaleNeutron/EpubBuilder
|
my_mainwindow.py
|
Python
|
apache-2.0
| 3,933
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 16 17:14:41 2016
@author: sdemyanov
"""
import numpy as np
from sklearn import metrics
def get_prob_acc(probs, labels):
return np.mean(np.argmax(probs, axis=1) == labels)
def get_auc_score(scores, labels):
fpr, tpr, thresholds = metrics.roc_curve(labels, scores, pos_label=0)
return metrics.auc(fpr, tpr)
def get_f1_score(confmat):
assert confmat.shape[0] == 2 and confmat.shape[1] == 2
precision = float(confmat[0, 0]) / np.sum(confmat[:, 0])
recall = float(confmat[0, 0]) / np.sum(confmat[0, :])
print 'precision: %f' % precision
print 'recall: %f' % recall
return 2 * precision * recall / (precision + recall)
def get_accuracy(confmat):
correct = np.sum(np.diagonal(confmat))
overall = np.sum(confmat)
return correct.astype(float) / overall
def get_sensitivities(confmat):
correct = np.diagonal(confmat)
overall = np.sum(confmat, 1)
return np.divide(np.array(correct, dtype=np.float), overall)
def get_pred_confmat(classes, preds, labels):
classnum = len(classes)
mat = np.zeros((classnum, classnum), dtype=int)
for pind in range(preds.shape[0]):
labind = np.where(classes == labels[pind])
predind = np.where(classes == preds[pind])
mat[labind[0], predind[0]] += 1
# mat = np.transpose(mat)
return mat
def get_prob_confmat(probs, labels):
classnum = probs.shape[1]
mat = np.zeros((classnum, classnum), dtype=int)
for pind in range(probs.shape[0]):
mat[int(labels[pind]), np.argmax(probs[pind, :])] += 1
#mat = np.transpose(mat)
return mat
def get_block_confmat(confmat, blocks):
assert(confmat.shape[0] == confmat.shape[1])
classnum = confmat.shape[0]
#assert(np.sum(blocks) == classnum)
blocknum = len(blocks)
blockconf = np.zeros((blocknum, blocknum))
for bi in range(blocknum):
for bj in range(blocknum):
blockconf[bi, bj] = 0
for i in blocks[bi]:
for j in blocks[bj]:
blockconf[bi, bj] += confmat[i, j]
assert np.sum(blockconf) == np.sum(confmat), 'Blocks should represent a splitting of confmat'
return blockconf
def get_block_probs_labels(prob, labels, blocks):
# IMPORTANT: blocks must not intersect, otherwise the result is not unique
blocknum = len(blocks)
assert prob.shape[0] == labels.shape[0]
newprob = np.zeros((prob.shape[0], blocknum))
for i in range(blocknum):
newprob[:, i] = np.sum(prob[:, blocks[i]], 1)
#normalize to have sum = 1
mult_coefs = np.sum(newprob, 1, keepdims=True)
newprob /= np.tile(mult_coefs, (1, blocknum))
newlab = np.zeros(prob.shape[0])
missing = []
for i in range(prob.shape[0]):
is_missing = True
for j in range(len(blocks)):
if (labels[i] in blocks[j]):
newlab[i] = j
is_missing = False
break
if (is_missing):
missing.append(i)
newprob = np.delete(newprob, missing, axis=0)
newlab = np.delete(newlab, missing, axis=0)
return newprob, newlab
def get_spec_for_sens(scores, labels, sens):
fpr, tpr, thresholds = metrics.roc_curve(labels, scores, pos_label=0)
curind = np.size(tpr) - 1
while (tpr[curind-1] >= sens):
curind -= 1
return tpr[curind], 1 - fpr[curind], thresholds[curind]
def get_sens_for_spec(scores, labels, spec):
fpr, tpr, thresholds = metrics.roc_curve(labels, scores, pos_label=0)
curind = 0
while (1 - fpr[curind+1] >= spec):
curind += 1
return tpr[curind], 1 - fpr[curind], thresholds[curind]
def get_average_precisions(probs, labels):
print 'probshape:', np.shape(probs)
classnum = np.size(probs, 1)
labels_arr = np.zeros_like(probs)
for i in xrange(classnum):
labels_arr[labels == i, i] = 1
print 'macro:', metrics.average_precision_score(labels_arr, probs, average='macro')
print 'weighted:', metrics.average_precision_score(labels_arr, probs, average='weighted')
skap = metrics.average_precision_score(labels_arr, probs, average=None)
return {i: round(skap[i] * 1000) / 10 for i in xrange(classnum)}
|
sdemyanov/tensorflow-worklab
|
classes/stats.py
|
Python
|
apache-2.0
| 3,985
|
#!/usr/bin/env python3
import glob
import numpy as np
import pyboof as pb
# Scene recognition is defined here as the problem where you wish to find multiple views of the same scene
# In this example we will load a set of images that has sets of 3 related images. We will tell it to find the 5
# most similar images so that you can see what it does when it fails to find a good match
# Get a list of all images which we wish to search
list_images = list(glob.glob("../data/example/recognition/scene/*.jpg"))
list_images.sort()
# Create an instance of SceneRecognition. This will take in images as input
recognizer = pb.FactorySceneRecognition(np.uint8).scene_recognition()
# First we need to create a model so that it knows how to describe a model. BoofCV does provide a
# pre-build model generated from vacation photos. This is fast enough that often its just easier to train it
# on the images you plan to search.
print("Learning the model. This can take a moment or two.")
recognizer.learn_model(list_images)
# Alternatively you can comment out the code above (lines 18 to 24) and load
# a pre-build model by uncommenting the line below
# recognizer = pb.download_default_scene_recognition(np.uint8, "saved_models")
# Now add all the images that we wish to look up
print("Adding images to the database")
for image_file in list_images:
boof_gray = pb.load_single_band(image_file, np.uint8)
recognizer.add_image(image_file, boof_gray)
# Let's look one up and see which images are related
print("Making a query: ", list_images[6])
query_image = pb.load_single_band(list_images[6], np.uint8)
found_matches = recognizer.query(query_image, 5)
# We are expecting 3 matches to be first, then other two will be incorrect/noise
print("len={}".format(len(found_matches)))
print("\nResults:")
for m in found_matches:
print("{:s} error={:f}".format(m["id"], m["error"]))
# Display the results
image_list = [(query_image, "Query")]
for m in found_matches:
image_list.append((pb.load_planar(m["id"], np.uint8), m["id"]))
pb.swing.show_list(image_list, title="Query Results")
input("Press any key to exit")
|
lessthanoptimal/PyBoof
|
examples/scene_recognition.py
|
Python
|
apache-2.0
| 2,120
|
import pymysql.cursors
from model.group import Group
from model.contact import Contact
class DbFixture:
def __init__(self, host, name, user, password):
self.host = host
self.name = name
self.user = user
self.password = password
self.connection = pymysql.connect(host=host, database=name, user=user, password=password)
self.connection.autocommit(True)
def get_group_list(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute("select group_id, group_name, group_header, group_footer from group_list")
for row in cursor:
(ident, name, header, footer) = row
list.append(Group(ident=str(ident), name=name, header=header, footer=footer))
finally:
cursor.close()
return list
def get_contact_list(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute("SELECT id, firstname, middlename, lastname, nickname, title, company, address, "
"home, mobile, work, fax, email, email2, email3, homepage, byear, ayear, address2, "
"phone2, notes FROM addressbook WHERE deprecated='0000-00-00 00:00:00'")
for row in cursor:
(ident, firstname, middlename, lastname, nickname, title, company, company_address,
homephone, mobilephone, workphone, telephone_fax, email, email2, email3,
homepage, birthday_year, anniversary, secondary_address, secondaryphone,
secondary_notes) = row
list.append(Contact(ident=str(ident), firstname=firstname, middlename=middlename, lastname=lastname,
nickname=nickname, title=title, company=company, company_address=company_address,
homephone=homephone, mobilephone=mobilephone, workphone=workphone,
telephone_fax=telephone_fax, email=email, email2=email2, email3=email3,
homepage=homepage, birthday_year=birthday_year, anniversary=anniversary,
secondary_address=secondary_address, secondaryphone=secondaryphone,
secondary_notes=secondary_notes,
all_emails_from_homepage=email + email2 + email3,
all_phones_from_homepage=homephone + mobilephone + workphone + secondaryphone))
finally:
cursor.close()
return list
def destroy(self):
self.connection.close()
|
evgeniy-shorgin/python_training
|
fixture/db.py
|
Python
|
apache-2.0
| 2,681
|
#!/usr/bin/env python
from tdclient.model import Model
class User(Model):
"""User on Treasure Data Service
"""
def __init__(self, client, name, org_name, role_names, email, **kwargs):
super(User, self).__init__(client)
self._name = name
self._org_name = org_name
self._role_names = role_names
self._email = email
@property
def name(self):
"""
Returns: name of the user
"""
return self._name
@property
def org_name(self):
"""
Returns: organization name
"""
return self._org_name
@property
def role_names(self):
"""
TODO: add docstring
"""
return self._role_names
@property
def email(self):
"""
Returns: e-mail address
"""
return self._email
|
treasure-data/td-client-python
|
tdclient/user_model.py
|
Python
|
apache-2.0
| 860
|
# Copyright 2017 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Cyborg common internal object model"""
import netaddr
from oslo_utils import versionutils
from oslo_versionedobjects import base as object_base
from cyborg import objects
from cyborg.objects import fields as object_fields
class CyborgObjectRegistry(object_base.VersionedObjectRegistry):
def registration_hook(self, cls, index):
# NOTE(jroll): blatantly stolen from nova
# NOTE(danms): This is called when an object is registered,
# and is responsible for maintaining cyborg.objects.$OBJECT
# as the highest-versioned implementation of a given object.
version = versionutils.convert_version_to_tuple(cls.VERSION)
if not hasattr(objects, cls.obj_name()):
setattr(objects, cls.obj_name(), cls)
else:
cur_version = versionutils.convert_version_to_tuple(
getattr(objects, cls.obj_name()).VERSION)
if version >= cur_version:
setattr(objects, cls.obj_name(), cls)
class CyborgObject(object_base.VersionedObject):
"""Base class and object factory.
This forms the base of all objects that can be remoted or instantiated
via RPC. Simply defining a class that inherits from this base class
will make it remotely instantiatable. Objects should implement the
necessary "get" classmethod routines as well as "save" object methods
as appropriate.
"""
OBJ_SERIAL_NAMESPACE = 'cyborg_object'
OBJ_PROJECT_NAMESPACE = 'cyborg'
fields = {
'created_at': object_fields.DateTimeField(nullable=True),
'updated_at': object_fields.DateTimeField(nullable=True),
}
def as_dict(self):
return dict((k, getattr(self, k))
for k in self.fields
if hasattr(self, k))
@staticmethod
def _from_db_object(obj, db_obj):
"""Converts a database entity to a formal object.
:param obj: An object of the class.
:param db_obj: A DB model of the object
:return: The object of the class with the database entity added
"""
for field in obj.fields:
obj[field] = db_obj[field]
obj.obj_reset_changes()
return obj
@classmethod
def _from_db_object_list(cls, db_objs, context):
"""Converts a list of database entities to a list of formal objects."""
objs = []
for db_obj in db_objs:
objs.append(cls._from_db_object(cls(context), db_obj))
return objs
class CyborgObjectSerializer(object_base.VersionedObjectSerializer):
# Base class to use for object hydration
OBJ_BASE_CLASS = CyborgObject
CyborgObjectDictCompat = object_base.VersionedObjectDictCompat
class CyborgPersistentObject(object):
"""Mixin class for Persistent objects.
This adds the fields that we use in common for most persistent objects.
"""
fields = {
'created_at': object_fields.DateTimeField(nullable=True),
'updated_at': object_fields.DateTimeField(nullable=True),
'deleted_at': object_fields.DateTimeField(nullable=True),
'deleted': object_fields.BooleanField(default=False),
}
class ObjectListBase(object_base.ObjectListBase):
@classmethod
def _obj_primitive_key(cls, field):
return 'cyborg_object.%s' % field
@classmethod
def _obj_primitive_field(cls, primitive, field,
default=object_fields.UnspecifiedDefault):
key = cls._obj_primitive_key(field)
if default == object_fields.UnspecifiedDefault:
return primitive[key]
else:
return primitive.get(key, default)
def obj_to_primitive(obj):
"""Recursively turn an object into a python primitive.
A CyborgObject becomes a dict, and anything that implements ObjectListBase
becomes a list.
"""
if isinstance(obj, ObjectListBase):
return [obj_to_primitive(x) for x in obj]
elif isinstance(obj, CyborgObject):
result = {}
for key in obj.obj_fields:
if obj.obj_attr_is_set(key) or key in obj.obj_extra_fields:
result[key] = obj_to_primitive(getattr(obj, key))
return result
elif isinstance(obj, netaddr.IPAddress):
return str(obj)
elif isinstance(obj, netaddr.IPNetwork):
return str(obj)
else:
return obj
def obj_equal_prims(obj_1, obj_2, ignore=None):
"""Compare two primitives for equivalence ignoring some keys.
This operation tests the primitives of two objects for equivalence.
Object primitives may contain a list identifying fields that have been
changed - this is ignored in the comparison. The ignore parameter lists
any other keys to be ignored.
:param:obj1: The first object in the comparison
:param:obj2: The second object in the comparison
:param:ignore: A list of fields to ignore
:returns: True if the primitives are equal ignoring changes
and specified fields, otherwise False.
"""
def _strip(prim, keys):
if isinstance(prim, dict):
for k in keys:
prim.pop(k, None)
for v in prim.values():
_strip(v, keys)
if isinstance(prim, list):
for v in prim:
_strip(v, keys)
return prim
if ignore is not None:
keys = ['cyborg_object.changes'] + ignore
else:
keys = ['cyborg_object.changes']
prim_1 = _strip(obj_1.obj_to_primitive(), keys)
prim_2 = _strip(obj_2.obj_to_primitive(), keys)
return prim_1 == prim_2
class DriverObjectBase(CyborgObject):
@staticmethod
def _from_db_object(obj, db_obj):
fields = obj.fields
fields.pop("updated_at")
fields.pop("created_at")
for field in fields:
obj[field] = db_obj[field]
obj.obj_reset_changes()
return obj
|
openstack/nomad
|
cyborg/objects/base.py
|
Python
|
apache-2.0
| 6,515
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import re
import functools
import warnings
import string
from six.moves import cStringIO
from ..proto import framework_pb2
from ..framework import OpProtoHolder, Variable
from ..layer_helper import LayerHelper
__all__ = [
'deprecated', 'generate_layer_fn', 'generate_layer_fn_noattr', 'autodoc',
'templatedoc'
]
def _convert_(name):
"""
Formatting.
Args:
name: The name/alias
This function takes in a name and converts it to a standard format of
group1_group2. Where as per the regular expression, group1 can have
alphabets and numbers and group2 has capital alphabets.
"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def _type_to_str_(tp):
return framework_pb2.AttrType.Name(tp)
_two_dollar_pattern_ = re.compile(r"\$\$([^\$]+)\$\$")
_single_dollar_pattern_ = re.compile(r"\$([^\$]+)\$")
_two_bang_pattern_ = re.compile(r"!!([^!]+)!!")
def escape_math(text):
return _two_bang_pattern_.sub(
r'$$\1$$',
_single_dollar_pattern_.sub(r':math:`\1`',
_two_dollar_pattern_.sub(r"!!\1!!", text)))
def _generate_doc_string_(op_proto, additional_args_lines=None):
"""
Generate docstring by OpProto
Args:
op_proto (framework_pb2.OpProto): a protobuf message typed OpProto
Returns:
str: the document string
"""
if not isinstance(op_proto, framework_pb2.OpProto):
raise TypeError("OpProto should be `framework_pb2.OpProto`")
buf = cStringIO()
buf.write(escape_math(op_proto.comment))
buf.write('\nArgs:\n')
for each_input in op_proto.inputs:
line_begin = ' {0}: '.format(_convert_(each_input.name))
buf.write(line_begin)
buf.write(escape_math(each_input.comment))
if each_input.duplicable:
buf.write(" Duplicatable.")
if each_input.dispensable:
buf.write(" Optional.")
buf.write('\n')
skip_attrs = OpProtoHolder.generated_op_attr_names()
for each_attr in op_proto.attrs:
if each_attr.name in skip_attrs:
continue
buf.write(' ')
buf.write(each_attr.name)
buf.write(' (')
buf.write(_type_to_str_(each_attr.type))
buf.write('): ')
buf.write(escape_math(each_attr.comment))
buf.write('\n')
if additional_args_lines is not None:
for line in additional_args_lines:
line = line.strip()
buf.write(' ')
buf.write(line)
buf.write('\n')
if len(op_proto.outputs) != 0:
buf.write('\nReturns:\n')
buf.write(' ')
for each_opt in op_proto.outputs:
if not each_opt.intermediate:
break
buf.write(escape_math(each_opt.comment))
return buf.getvalue()
def generate_layer_fn(op_type):
"""Register the Python layer for an Operator.
Args:
op_type: The name of the operator to be created.
This function takes in the operator type (sigmoid, mean , average etc) and
creates the operator functionality.
"""
op_proto = OpProtoHolder.instance().get_op_proto(op_type)
not_intermediate_outputs = \
[output for output in op_proto.outputs if not output.intermediate]
intermediate_outputs = \
[output for output in op_proto.outputs if output.intermediate]
if len(not_intermediate_outputs) != 1:
raise ValueError("Only one non intermediate output operator can be",
"automatically generated. {0}".format(op_type))
if not_intermediate_outputs[0].duplicable:
raise ValueError(
"Only non duplicable op can be automatically generated.")
for output in intermediate_outputs:
if output.duplicable:
raise ValueError("The op can be automatically generated only when ",
"all intermediate ops are not duplicable.")
o_name = not_intermediate_outputs[0].name
intermediate_output_names = [output.name for output in intermediate_outputs]
def infer_and_check_dtype(op_proto, *args, **kwargs):
"""
This function performs the sanity check for dtype and
instance type.
"""
dtype = None
for ipt in op_proto.inputs:
name = _convert_(ipt.name)
val = kwargs.pop(name, [])
if not isinstance(val, list) and not isinstance(val, tuple):
val = [val]
if len(val) == 0:
val = [args[0]]
args = args[1:]
for each in val:
if not isinstance(each, Variable):
raise ValueError("input of {0} must be variable".format(
op_type))
if dtype is None:
dtype = each.dtype
elif dtype != each.dtype:
raise ValueError(
"operator {0} must input same dtype. {1} vs {2}".format(
op_type, dtype, each.dtype))
return dtype
def func(*args, **kwargs):
helper = LayerHelper(op_type, **kwargs)
dtype = infer_and_check_dtype(op_proto, *args, **kwargs)
inputs = dict()
for ipt in op_proto.inputs:
name = _convert_(ipt.name)
val = kwargs.pop(name, [])
if not isinstance(val, list) and not isinstance(val, tuple):
val = [val]
if len(val) == 0 and len(args) != 0:
val = args[0]
args = args[1:]
inputs[ipt.name] = val
outputs = dict()
out = kwargs.pop(_convert_(o_name), [])
if out:
out_var = out[0] if (isinstance(out, list) or
isinstance(out, tuple)) else out
else:
out_var = helper.create_variable_for_type_inference(dtype=dtype)
outputs[o_name] = [out_var]
for name in intermediate_output_names:
outputs[name] = [
helper.create_variable_for_type_inference(dtype=dtype)
]
helper.append_op(
type=op_type, inputs=inputs, outputs=outputs, attrs=kwargs)
return helper.append_activation(out_var)
func.__name__ = op_type
func.__doc__ = _generate_doc_string_(op_proto)
return func
def generate_layer_fn_noattr(op_type):
"""Register the Python layer for an Operator without Attribute.
Args:
op_type: The name of the operator to be created.
This function takes in the operator type (sigmoid, exp , tanh etc) and
creates the operator functionality.
"""
op_proto = OpProtoHolder.instance().get_op_proto(op_type)
def func(x, name=None):
helper = LayerHelper(op_type, **locals())
output = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type=op_type, inputs={"X": x}, outputs={"Out": output})
return output
func.__name__ = op_type
func.__doc__ = _generate_doc_string_(op_proto)
return func
def deprecated(func_or_class):
"""
Deprecated warning decorator. It will result a warning message.
Should be used before class or function, member function
"""
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
"""
Wrap func with deprecated warning
"""
warnings.simplefilter('always', DeprecationWarning) # turn off filter
warnings.warn(
"Call to deprecated function {}.".format(func.__name__),
category=DeprecationWarning,
stacklevel=2)
warnings.simplefilter('default', DeprecationWarning) # reset filter
return func(*args, **kwargs)
return func_wrapper
def autodoc(comment=""):
def __impl__(func):
func.__doc__ = _generate_doc_string_(OpProtoHolder.instance(
).get_op_proto(func.__name__)) + comment
return func
return __impl__
def templatedoc(op_type=None):
"""
Decorator of layer function. It will use the docstring from the layer
function as the template. The template arguments are:
* ${comment}: The operator comment written in CPP.
* ${{name}_comment}: The comment of ${name} written with AddAttr, AddOutput,
and AddInput. The ${name} is Python snake style. i.e., xxx_xxx.
* ${{name}_type}: The type of ${name}.
Returns:
Decorated function.
"""
def trim_ending_dot(msg):
return msg.rstrip('.')
def __impl__(func):
if op_type is None:
op_type_name = func.__name__
else:
op_type_name = op_type
op_proto = OpProtoHolder.instance().get_op_proto(op_type_name)
tmpl = string.Template(func.__doc__)
comment_lines = op_proto.comment.split("\n")
comment = ""
for line in comment_lines:
line = line.strip()
if len(line) != 0:
comment += escape_math(line)
comment += " "
elif len(comment) != 0:
comment += "\n \n "
args = {"comment": trim_ending_dot(comment)}
for each_input in op_proto.inputs:
input_name = _convert_(each_input.name)
args["{0}_comment".format(input_name)] = trim_ending_dot(
each_input.comment)
args["{0}_type".format(input_name)] = "Variable"
for each_attr in op_proto.attrs:
input_name = _convert_(each_attr.name)
args["{0}_comment".format(input_name)] = trim_ending_dot(
each_attr.comment)
args["{0}_type".format(input_name)] = _type_to_str_(each_attr.type)
for each_opt in op_proto.outputs:
output_name = _convert_(each_opt.name)
args["{0}_comment".format(output_name)] = trim_ending_dot(
each_opt.comment)
args["{0}_type".format(output_name)] = "Variable"
func.__doc__ = tmpl.substitute(args)
return func
return __impl__
|
reyoung/Paddle
|
python/paddle/fluid/layers/layer_function_generator.py
|
Python
|
apache-2.0
| 10,724
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from __future__ import division
import unittest
import numpy as np
import paddle.fluid.core as core
from op_test import OpTest
def adaptive_start_index(index, input_size, output_size):
return int(np.floor(index * input_size / output_size))
def adaptive_end_index(index, input_size, output_size):
return int(np.ceil((index + 1) * input_size / output_size))
def max_pool3D_forward_naive(x,
ksize,
strides,
paddings,
global_pool=0,
ceil_mode=False,
exclusive=True,
adaptive=False):
N, C, D, H, W = x.shape
if global_pool == 1:
ksize = [D, H, W]
if adaptive:
D_out, H_out, W_out = ksize
else:
D_out = (D - ksize[0] + 2 * paddings[0] + strides[0] - 1
) // strides[0] + 1 if ceil_mode else (
H - ksize[0] + 2 * paddings[0]) // strides[0] + 1
H_out = (H - ksize[1] + 2 * paddings[1] + strides[1] - 1
) // strides[1] + 1 if ceil_mode else (
W - ksize[1] + 2 * paddings[1]) // strides[1] + 1
W_out = (W - ksize[2] + 2 * paddings[2] + strides[2] - 1
) // strides[2] + 1 if ceil_mode else (
W - ksize[2] + 2 * paddings[2]) // strides[2] + 1
out = np.zeros((N, C, D_out, H_out, W_out))
for k in range(D_out):
if adaptive:
d_start = adaptive_start_index(k, D, ksize[0])
d_end = adaptive_end_index(k, D, ksize[0])
else:
d_start = np.max((k * strides[0] - paddings[0], 0))
d_end = np.min((k * strides[0] + ksize[0] - paddings[0], D))
for i in range(H_out):
if adaptive:
h_start = adaptive_start_index(i, H, ksize[1])
h_end = adaptive_end_index(i, H, ksize[1])
else:
h_start = np.max((i * strides[1] - paddings[1], 0))
h_end = np.min((i * strides[1] + ksize[1] - paddings[1], H))
for j in range(W_out):
if adaptive:
w_start = adaptive_start_index(j, W, ksize[2])
w_end = adaptive_end_index(j, W, ksize[2])
else:
w_start = np.max((j * strides[2] - paddings[2], 0))
w_end = np.min((j * strides[2] + ksize[2] - paddings[2], W))
x_masked = x[:, :, d_start:d_end, h_start:h_end, w_start:w_end]
out[:, :, k, i, j] = np.max(x_masked, axis=(2, 3, 4))
return out
def avg_pool3D_forward_naive(x,
ksize,
strides,
paddings,
global_pool=0,
ceil_mode=False,
exclusive=True,
adaptive=False):
N, C, D, H, W = x.shape
if global_pool == 1:
ksize = [D, H, W]
if adaptive:
D_out, H_out, W_out = ksize
else:
D_out = (D - ksize[0] + 2 * paddings[0] + strides[0] - 1
) // strides[0] + 1 if ceil_mode else (
H - ksize[0] + 2 * paddings[0]) // strides[0] + 1
H_out = (H - ksize[1] + 2 * paddings[1] + strides[1] - 1
) // strides[1] + 1 if ceil_mode else (
W - ksize[1] + 2 * paddings[1]) // strides[1] + 1
W_out = (W - ksize[2] + 2 * paddings[2] + strides[2] - 1
) // strides[2] + 1 if ceil_mode else (
W - ksize[2] + 2 * paddings[2]) // strides[2] + 1
out = np.zeros((N, C, D_out, H_out, W_out))
for k in range(D_out):
if adaptive:
d_start = adaptive_start_index(k, D, ksize[0])
d_end = adaptive_end_index(k, D, ksize[0])
else:
d_start = np.max((k * strides[0] - paddings[0], 0))
d_end = np.min((k * strides[0] + ksize[0] - paddings[0], D))
for i in range(H_out):
if adaptive:
h_start = adaptive_start_index(i, H, ksize[1])
h_end = adaptive_end_index(i, H, ksize[1])
else:
h_start = np.max((i * strides[1] - paddings[1], 0))
h_end = np.min((i * strides[1] + ksize[1] - paddings[1], H))
for j in range(W_out):
if adaptive:
w_start = adaptive_start_index(j, W, ksize[2])
w_end = adaptive_end_index(j, W, ksize[2])
else:
w_start = np.max((j * strides[2] - paddings[2], 0))
w_end = np.min((j * strides[2] + ksize[2] - paddings[2], W))
x_masked = x[:, :, d_start:d_end, h_start:h_end, w_start:w_end]
field_size = (d_end - d_start) * (h_end - h_start) * (w_end - w_start) \
if (exclusive or adaptive) else ksize[0] * ksize[1] * ksize[2]
out[:, :, k, i, j] = np.sum(x_masked, axis=(2, 3,
4)) / field_size
return out
class TestPool3d_Op(OpTest):
def setUp(self):
self.op_type = "pool3d"
self.use_cudnn = False
self.dtype = np.float32
self.init_test_case()
self.init_global_pool()
self.init_kernel_type()
self.init_pool_type()
self.init_ceil_mode()
self.init_exclusive()
self.init_adaptive()
if self.global_pool:
self.paddings = [0 for _ in range(len(self.paddings))]
input = np.random.random(self.shape).astype(self.dtype)
output = self.pool3D_forward_naive(
input, self.ksize, self.strides, self.paddings, self.global_pool,
self.ceil_mode, self.exclusive, self.adaptive).astype(self.dtype)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(input)}
self.attrs = {
'strides': self.strides,
'paddings': self.paddings,
'ksize': self.ksize,
'pooling_type': self.pool_type,
'global_pooling': self.global_pool,
'use_cudnn': self.use_cudnn,
'ceil_mode': self.ceil_mode,
'data_format':
'AnyLayout', # TODO(dzhwinter) : should be fix latter
'exclusive': self.exclusive,
'adaptive': self.adaptive
}
self.outputs = {'Out': output}
def testcudnn(self):
return core.is_compiled_with_cuda() and self.use_cudnn
def test_check_output(self):
if self.testcudnn():
place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-5)
else:
self.check_output()
def test_check_grad(self):
if self.dtype == np.float16:
return
if self.testcudnn() and self.pool_type != "max":
place = core.CUDAPlace(0)
self.check_grad_with_place(
place, set(['X']), 'Out', max_relative_error=0.07)
elif self.pool_type != "max":
self.check_grad(set(['X']), 'Out', max_relative_error=0.07)
def init_test_case(self):
self.shape = [2, 3, 5, 5, 5]
self.ksize = [3, 3, 3]
self.strides = [1, 1, 1]
self.paddings = [0, 0, 0]
def init_kernel_type(self):
pass
def init_pool_type(self):
self.pool_type = "avg"
self.pool3D_forward_naive = avg_pool3D_forward_naive
def init_global_pool(self):
self.global_pool = True
def init_ceil_mode(self):
self.ceil_mode = False
def init_exclusive(self):
self.exclusive = True
def init_adaptive(self):
self.adaptive = False
class TestCase1(TestPool3d_Op):
def init_test_case(self):
self.shape = [2, 3, 7, 7, 7]
self.ksize = [3, 3, 3]
self.strides = [1, 1, 1]
self.paddings = [0, 0, 0]
def init_pool_type(self):
self.pool_type = "avg"
self.pool3D_forward_naive = avg_pool3D_forward_naive
def init_global_pool(self):
self.global_pool = False
class TestCase2(TestPool3d_Op):
def init_test_case(self):
self.shape = [2, 3, 7, 7, 7]
self.ksize = [3, 3, 3]
self.strides = [1, 1, 1]
self.paddings = [1, 1, 1]
def init_pool_type(self):
self.pool_type = "avg"
self.pool3D_forward_naive = avg_pool3D_forward_naive
def init_global_pool(self):
self.global_pool = False
class TestCase3(TestPool3d_Op):
def init_pool_type(self):
self.pool_type = "max"
self.pool3D_forward_naive = max_pool3D_forward_naive
class TestCase4(TestCase1):
def init_pool_type(self):
self.pool_type = "max"
self.pool3D_forward_naive = max_pool3D_forward_naive
class TestCase5(TestCase2):
def init_pool_type(self):
self.pool_type = "max"
self.pool3D_forward_naive = max_pool3D_forward_naive
#--------------------test pool3d--------------------
class TestCUDNNCase1(TestPool3d_Op):
def init_kernel_type(self):
self.use_cudnn = True
class TestFP16CUDNNCase1(TestPool3d_Op):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=1e-3)
class TestCUDNNCase2(TestCase1):
def init_kernel_type(self):
self.use_cudnn = True
class TestFP16CUDNNCase2(TestCase1):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=1e-3)
class TestCUDNNCase3(TestCase2):
def init_kernel_type(self):
self.use_cudnn = True
class TestFP16CUDNNCase3(TestCase2):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=1e-3)
class TestCUDNNCase4(TestCase3):
def init_kernel_type(self):
self.use_cudnn = True
class TestFP16CUDNNCase4(TestCase3):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=1e-3)
class TestCUDNNCase5(TestCase4):
def init_kernel_type(self):
self.use_cudnn = True
class TestFP16CUDNNCase5(TestCase4):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=1e-3)
class TestCUDNNCase6(TestCase5):
def init_kernel_type(self):
self.use_cudnn = True
class TestFP16CUDNNCase6(TestCase5):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=1e-3)
class TestCeilModeCase1(TestCUDNNCase1):
def init_ceil_mode(self):
self.ceil_mode = True
class TestCeilModeCase2(TestCUDNNCase2):
def init_ceil_mode(self):
self.ceil_mode = True
class TestCeilModeCase3(TestCase1):
def init_ceil_mode(self):
self.ceil_mode = True
class TestCeilModeCase4(TestCase2):
def init_ceil_mode(self):
self.ceil_mode = True
class TestAvgInclude(TestCase2):
def init_exclusive(self):
self.exclusive = False
class TestCUDNNAvgInclude(TestCUDNNCase3):
def init_exclusive(self):
self.exclusive = False
class TestAvgPoolAdaptive(TestCase1):
def init_adaptive(self):
self.adaptive = True
if __name__ == '__main__':
unittest.main()
|
baidu/Paddle
|
python/paddle/fluid/tests/unittests/test_pool3d_op.py
|
Python
|
apache-2.0
| 13,221
|
"""
.. module: lemur.certificate.service
:platform: Unix
:copyright: (c) 2015 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Kevin Glisson <kglisson@netflix.com>
"""
import arrow
from flask import current_app
from sqlalchemy import func, or_, not_, cast, Integer
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from lemur import database
from lemur.extensions import metrics, signals
from lemur.plugins.base import plugins
from lemur.common.utils import generate_private_key, truthiness
from lemur.roles.models import Role
from lemur.domains.models import Domain
from lemur.authorities.models import Authority
from lemur.destinations.models import Destination
from lemur.certificates.models import Certificate
from lemur.notifications.models import Notification
from lemur.pending_certificates.models import PendingCertificate
from lemur.certificates.schemas import CertificateOutputSchema, CertificateInputSchema
from lemur.roles import service as role_service
csr_created = signals.signal('csr_created', "CSR generated")
csr_imported = signals.signal('csr_imported', "CSR imported from external source")
certificate_issued = signals.signal('certificate_issued', "Authority issued a certificate")
certificate_imported = signals.signal('certificate_imported', "Certificate imported from external source")
def get(cert_id):
"""
Retrieves certificate by its ID.
:param cert_id:
:return:
"""
return database.get(Certificate, cert_id)
def get_by_name(name):
"""
Retrieves certificate by its Name.
:param name:
:return:
"""
return database.get(Certificate, name, field='name')
def get_by_serial(serial):
"""
Retrieves certificate by it's Serial.
:param serial:
:return:
"""
if isinstance(serial, int):
# although serial is a number, the DB column is String(128)
serial = str(serial)
return Certificate.query.filter(Certificate.serial == serial).all()
def delete(cert_id):
"""
Delete's a certificate.
:param cert_id:
"""
database.delete(get(cert_id))
def get_all_certs():
"""
Retrieves all certificates within Lemur.
:return:
"""
return Certificate.query.all()
def get_all_pending_cleaning(source):
"""
Retrieves all certificates that are available for cleaning.
:param source:
:return:
"""
return Certificate.query.filter(Certificate.sources.any(id=source.id))\
.filter(not_(Certificate.endpoints.any())).all()
def get_all_pending_reissue():
"""
Retrieves all certificates that need to be rotated.
Must be X days from expiration, uses the certificates rotation
policy to determine how many days from expiration the certificate must be
for rotation to be pending.
:return:
"""
return Certificate.query.filter(Certificate.rotation == True)\
.filter(not_(Certificate.replaced.any()))\
.filter(Certificate.in_rotation_window == True).all() # noqa
def find_duplicates(cert):
"""
Finds certificates that already exist within Lemur. We do this by looking for
certificate bodies that are the same. This is the most reliable way to determine
if a certificate is already being tracked by Lemur.
:param cert:
:return:
"""
if cert['chain']:
return Certificate.query.filter_by(body=cert['body'].strip(), chain=cert['chain'].strip()).all()
else:
return Certificate.query.filter_by(body=cert['body'].strip(), chain=None).all()
def export(cert, export_plugin):
"""
Exports a certificate to the requested format. This format
may be a binary format.
:param export_plugin:
:param cert:
:return:
"""
plugin = plugins.get(export_plugin['slug'])
return plugin.export(cert.body, cert.chain, cert.private_key, export_plugin['pluginOptions'])
def update(cert_id, **kwargs):
"""
Updates a certificate
:param cert_id:
:return:
"""
cert = get(cert_id)
for key, value in kwargs.items():
setattr(cert, key, value)
return database.update(cert)
def create_certificate_roles(**kwargs):
# create an role for the owner and assign it
owner_role = role_service.get_by_name(kwargs['owner'])
if not owner_role:
owner_role = role_service.create(
kwargs['owner'],
description="Auto generated role based on owner: {0}".format(kwargs['owner'])
)
# ensure that the authority's owner is also associated with the certificate
if kwargs.get('authority'):
authority_owner_role = role_service.get_by_name(kwargs['authority'].owner)
return [owner_role, authority_owner_role]
return [owner_role]
def mint(**kwargs):
"""
Minting is slightly different for each authority.
Support for multiple authorities is handled by individual plugins.
"""
authority = kwargs['authority']
issuer = plugins.get(authority.plugin_name)
# allow the CSR to be specified by the user
if not kwargs.get('csr'):
csr, private_key = create_csr(**kwargs)
csr_created.send(authority=authority, csr=csr)
else:
csr = str(kwargs.get('csr'))
private_key = None
csr_imported.send(authority=authority, csr=csr)
cert_body, cert_chain, external_id = issuer.create_certificate(csr, kwargs)
return cert_body, private_key, cert_chain, external_id, csr
def import_certificate(**kwargs):
"""
Uploads already minted certificates and pulls the required information into Lemur.
This is to be used for certificates that are created outside of Lemur but
should still be tracked.
Internally this is used to bootstrap Lemur with external
certificates, and used when certificates are 'discovered' through various discovery
techniques. was still in aws.
:param kwargs:
"""
if not kwargs.get('owner'):
kwargs['owner'] = current_app.config.get('LEMUR_SECURITY_TEAM_EMAIL')[0]
return upload(**kwargs)
def upload(**kwargs):
"""
Allows for pre-made certificates to be imported into Lemur.
"""
roles = create_certificate_roles(**kwargs)
if kwargs.get('roles'):
kwargs['roles'] += roles
else:
kwargs['roles'] = roles
if kwargs.get('private_key'):
private_key = kwargs['private_key']
if not isinstance(private_key, bytes):
kwargs['private_key'] = private_key.encode('utf-8')
cert = Certificate(**kwargs)
cert = database.create(cert)
kwargs['creator'].certificates.append(cert)
cert = database.update(cert)
certificate_imported.send(certificate=cert, authority=cert.authority)
return cert
def create(**kwargs):
"""
Creates a new certificate.
"""
cert_body, private_key, cert_chain, external_id, csr = mint(**kwargs)
kwargs['body'] = cert_body
kwargs['private_key'] = private_key
kwargs['chain'] = cert_chain
kwargs['external_id'] = external_id
kwargs['csr'] = csr
roles = create_certificate_roles(**kwargs)
if kwargs.get('roles'):
kwargs['roles'] += roles
else:
kwargs['roles'] = roles
if cert_body:
cert = Certificate(**kwargs)
kwargs['creator'].certificates.append(cert)
else:
cert = PendingCertificate(**kwargs)
kwargs['creator'].pending_certificates.append(cert)
cert.authority = kwargs['authority']
database.commit()
if isinstance(cert, Certificate):
certificate_issued.send(certificate=cert, authority=cert.authority)
metrics.send('certificate_issued', 'counter', 1, metric_tags=dict(owner=cert.owner, issuer=cert.issuer))
return cert
def render(args):
"""
Helper function that allows use to render our REST Api.
:param args:
:return:
"""
query = database.session_query(Certificate)
time_range = args.pop('time_range')
destination_id = args.pop('destination_id')
notification_id = args.pop('notification_id', None)
show = args.pop('show')
# owner = args.pop('owner')
# creator = args.pop('creator') # TODO we should enabling filtering by owner
filt = args.pop('filter')
if filt:
terms = filt.split(';')
term = '%{0}%'.format(terms[1])
# Exact matches for quotes. Only applies to name, issuer, and cn
if terms[1].startswith('"') and terms[1].endswith('"'):
term = terms[1][1:-1]
if 'issuer' in terms:
# we can't rely on issuer being correct in the cert directly so we combine queries
sub_query = database.session_query(Authority.id)\
.filter(Authority.name.ilike(term))\
.subquery()
query = query.filter(
or_(
Certificate.issuer.ilike(term),
Certificate.authority_id.in_(sub_query)
)
)
elif 'destination' in terms:
query = query.filter(Certificate.destinations.any(Destination.id == terms[1]))
elif 'notify' in filt:
query = query.filter(Certificate.notify == truthiness(terms[1]))
elif 'active' in filt:
query = query.filter(Certificate.active == truthiness(terms[1]))
elif 'cn' in terms:
query = query.filter(
or_(
Certificate.cn.ilike(term),
Certificate.domains.any(Domain.name.ilike(term))
)
)
elif 'id' in terms:
query = query.filter(Certificate.id == cast(terms[1], Integer))
elif 'name' in terms:
query = query.filter(
or_(
Certificate.name.ilike(term),
Certificate.domains.any(Domain.name.ilike(term)),
Certificate.cn.ilike(term),
)
)
else:
query = database.filter(query, Certificate, terms)
if show:
sub_query = database.session_query(Role.name).filter(Role.user_id == args['user'].id).subquery()
query = query.filter(
or_(
Certificate.user_id == args['user'].id,
Certificate.owner.in_(sub_query)
)
)
if destination_id:
query = query.filter(Certificate.destinations.any(Destination.id == destination_id))
if notification_id:
query = query.filter(Certificate.notifications.any(Notification.id == notification_id))
if time_range:
to = arrow.now().replace(weeks=+time_range).format('YYYY-MM-DD')
now = arrow.now().format('YYYY-MM-DD')
query = query.filter(Certificate.not_after <= to).filter(Certificate.not_after >= now)
return database.sort_and_page(query, Certificate, args)
def create_csr(**csr_config):
"""
Given a list of domains create the appropriate csr
for those domains
:param csr_config:
"""
private_key = generate_private_key(csr_config.get('key_type'))
builder = x509.CertificateSigningRequestBuilder()
name_list = [x509.NameAttribute(x509.OID_COMMON_NAME, csr_config['common_name'])]
if current_app.config.get('LEMUR_OWNER_EMAIL_IN_SUBJECT', True):
name_list.append(x509.NameAttribute(x509.OID_EMAIL_ADDRESS, csr_config['owner']))
if 'organization' in csr_config and csr_config['organization'].strip():
name_list.append(x509.NameAttribute(x509.OID_ORGANIZATION_NAME, csr_config['organization']))
if 'organizational_unit' in csr_config and csr_config['organizational_unit'].strip():
name_list.append(x509.NameAttribute(x509.OID_ORGANIZATIONAL_UNIT_NAME, csr_config['organizational_unit']))
if 'country' in csr_config and csr_config['country'].strip():
name_list.append(x509.NameAttribute(x509.OID_COUNTRY_NAME, csr_config['country']))
if 'state' in csr_config and csr_config['state'].strip():
name_list.append(x509.NameAttribute(x509.OID_STATE_OR_PROVINCE_NAME, csr_config['state']))
if 'location' in csr_config and csr_config['location'].strip():
name_list.append(x509.NameAttribute(x509.OID_LOCALITY_NAME, csr_config['location']))
builder = builder.subject_name(x509.Name(name_list))
extensions = csr_config.get('extensions', {})
critical_extensions = ['basic_constraints', 'sub_alt_names', 'key_usage']
noncritical_extensions = ['extended_key_usage']
for k, v in extensions.items():
if v:
if k in critical_extensions:
current_app.logger.debug('Adding Critical Extension: {0} {1}'.format(k, v))
if k == 'sub_alt_names':
if v['names']:
builder = builder.add_extension(v['names'], critical=True)
else:
builder = builder.add_extension(v, critical=True)
if k in noncritical_extensions:
current_app.logger.debug('Adding Extension: {0} {1}'.format(k, v))
builder = builder.add_extension(v, critical=False)
ski = extensions.get('subject_key_identifier', {})
if ski.get('include_ski', False):
builder = builder.add_extension(
x509.SubjectKeyIdentifier.from_public_key(private_key.public_key()),
critical=False
)
request = builder.sign(
private_key, hashes.SHA256(), default_backend()
)
# serialize our private key and CSR
private_key = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL, # would like to use PKCS8 but AWS ELBs don't like it
encryption_algorithm=serialization.NoEncryption()
)
if isinstance(private_key, bytes):
private_key = private_key.decode('utf-8')
csr = request.public_bytes(
encoding=serialization.Encoding.PEM
).decode('utf-8')
return csr, private_key
def stats(**kwargs):
"""
Helper that defines some useful statistics about certifications.
:param kwargs:
:return:
"""
if kwargs.get('metric') == 'not_after':
start = arrow.utcnow()
end = start.replace(weeks=+32)
items = database.db.session.query(Certificate.issuer, func.count(Certificate.id))\
.group_by(Certificate.issuer)\
.filter(Certificate.not_after <= end.format('YYYY-MM-DD')) \
.filter(Certificate.not_after >= start.format('YYYY-MM-DD')).all()
else:
attr = getattr(Certificate, kwargs.get('metric'))
query = database.db.session.query(attr, func.count(attr))
items = query.group_by(attr).all()
keys = []
values = []
for key, count in items:
keys.append(key)
values.append(count)
return {'labels': keys, 'values': values}
def get_account_number(arn):
"""
Extract the account number from an arn.
:param arn: IAM SSL arn
:return: account number associated with ARN
"""
return arn.split(":")[4]
def get_name_from_arn(arn):
"""
Extract the certificate name from an arn.
:param arn: IAM SSL arn
:return: name of the certificate as uploaded to AWS
"""
return arn.split("/", 1)[1]
def calculate_reissue_range(start, end):
"""
Determine what the new validity_start and validity_end dates should be.
:param start:
:param end:
:return:
"""
span = end - start
new_start = arrow.utcnow()
new_end = new_start + span
return new_start, arrow.get(new_end)
def get_certificate_primitives(certificate):
"""
Retrieve key primitive from a certificate such that the certificate
could be recreated with new expiration or be used to build upon.
:param certificate:
:return: dict of certificate primitives, should be enough to effectively re-issue
certificate via `create`.
"""
start, end = calculate_reissue_range(certificate.not_before, certificate.not_after)
data = CertificateInputSchema().load(CertificateOutputSchema().dump(certificate).data).data
# we can't quite tell if we are using a custom name, as this is an automated process (typically)
# we will rely on the Lemur generated name
data.pop('name', None)
# TODO this can be removed once we migrate away from cn
data['cn'] = data['common_name']
# needed until we move off not_*
data['not_before'] = start
data['not_after'] = end
data['validity_start'] = start
data['validity_end'] = end
return data
def reissue_certificate(certificate, replace=None, user=None):
"""
Reissue certificate with the same properties of the given certificate.
:param certificate:
:param replace:
:param user:
:return:
"""
primitives = get_certificate_primitives(certificate)
if not user:
primitives['creator'] = certificate.user
else:
primitives['creator'] = user
if replace:
primitives['replaces'] = [certificate]
new_cert = create(**primitives)
return new_cert
|
kevgliss/lemur
|
lemur/certificates/service.py
|
Python
|
apache-2.0
| 17,189
|
from django.test import TestCase
from score.templatetags.score_tags import display_score
class TestDisplayScore(TestCase):
def test_string(self):
assert "?" in display_score("")
def test_none(self):
assert "?" in display_score(None)
def test_danger(self):
assert "danger" in display_score("30")
def test_warning(self):
assert "warning" in display_score("50")
def test_primary(self):
assert "primary" in display_score("65")
def test_info(self):
assert "info" in display_score("79")
def test_success(self):
assert "success" in display_score("81")
|
kencochrane/scorinator
|
scorinator/score/tests/tags.py
|
Python
|
apache-2.0
| 638
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Logging system
Log level
---------
======= ======
Level number
------- ------
DEBUG4 9
DEBUG3 8
DEBUG2 7
DEBUG1 6
DEBUG 5
INFO 4
NOTE 3
WARN 2
ERROR 1
QUIET 0
======= ======
Large value means more noise in the output file.
.. note::
Error and warning messages are written to stderr.
Each Logger object has its own output destination and verbose level. So
multiple Logger objects can be created to manage the message system without
affecting each other.
The methods provided by Logger class has the direct connection to the log level.
E.g. :func:`info` print messages if the verbose level >= 4 (INFO):
>>> import sys
>>> from pyscf import lib
>>> log = lib.logger.Logger(sys.stdout, 4)
>>> log.info('info level')
info level
>>> log.verbose = 3
>>> log.info('info level')
>>> log.note('note level')
note level
timer
-----
Logger object provides timer method for timing. Set :attr:`TIMER_LEVEL` to
control at which level the timing information should be output. It is 5
(DEBUG) by default.
>>> import sys, time
>>> from pyscf import lib
>>> log = lib.logger.Logger(sys.stdout, 4)
>>> t0 = logger.process_clock()
>>> log.timer('test', t0)
>>> lib.logger.TIMER_LEVEL = 4
>>> log.timer('test', t0)
CPU time for test 0.00 sec
'''
import sys
import time
if sys.version_info < (3, 0):
process_clock = time.clock
perf_counter = time.time
else:
process_clock = time.process_time
perf_counter = time.perf_counter
from pyscf.lib import parameters as param
import pyscf.__config__
DEBUG4 = param.VERBOSE_DEBUG + 4
DEBUG3 = param.VERBOSE_DEBUG + 3
DEBUG2 = param.VERBOSE_DEBUG + 2
DEBUG1 = param.VERBOSE_DEBUG + 1
DEBUG = param.VERBOSE_DEBUG
INFO = param.VERBOSE_INFO
NOTE = param.VERBOSE_NOTICE
NOTICE = NOTE
WARN = param.VERBOSE_WARN
WARNING = WARN
ERR = param.VERBOSE_ERR
ERROR = ERR
QUIET = param.VERBOSE_QUIET
CRIT = param.VERBOSE_CRIT
ALERT = param.VERBOSE_ALERT
PANIC = param.VERBOSE_PANIC
TIMER_LEVEL = getattr(pyscf.__config__, 'TIMER_LEVEL', DEBUG)
sys.verbose = NOTE
def flush(rec, msg, *args):
rec.stdout.write(msg%args)
rec.stdout.write('\n')
rec.stdout.flush()
def log(rec, msg, *args):
if rec.verbose > QUIET:
flush(rec, msg, *args)
def error(rec, msg, *args):
if rec.verbose >= ERROR:
flush(rec, '\nERROR: '+msg+'\n', *args)
sys.stderr.write('ERROR: ' + (msg%args) + '\n')
def warn(rec, msg, *args):
if rec.verbose >= WARN:
flush(rec, '\nWARN: '+msg+'\n', *args)
if rec.stdout is not sys.stdout:
sys.stderr.write('WARN: ' + (msg%args) + '\n')
def info(rec, msg, *args):
if rec.verbose >= INFO:
flush(rec, msg, *args)
def note(rec, msg, *args):
if rec.verbose >= NOTICE:
flush(rec, msg, *args)
def debug(rec, msg, *args):
if rec.verbose >= DEBUG:
flush(rec, msg, *args)
def debug1(rec, msg, *args):
if rec.verbose >= DEBUG1:
flush(rec, msg, *args)
def debug2(rec, msg, *args):
if rec.verbose >= DEBUG2:
flush(rec, msg, *args)
def debug3(rec, msg, *args):
if rec.verbose >= DEBUG3:
flush(rec, msg, *args)
def debug4(rec, msg, *args):
if rec.verbose >= DEBUG4:
flush(rec, msg, *args)
def stdout(rec, msg, *args):
if rec.verbose >= DEBUG:
flush(rec, msg, *args)
sys.stdout.write('>>> %s\n' % msg)
def timer(rec, msg, cpu0=None, wall0=None):
if cpu0 is None:
cpu0 = rec._t0
if wall0:
rec._t0, rec._w0 = process_clock(), perf_counter()
if rec.verbose >= TIMER_LEVEL:
flush(rec, ' CPU time for %s %9.2f sec, wall time %9.2f sec'
% (msg, rec._t0-cpu0, rec._w0-wall0))
return rec._t0, rec._w0
else:
rec._t0 = process_clock()
if rec.verbose >= TIMER_LEVEL:
flush(rec, ' CPU time for %s %9.2f sec' % (msg, rec._t0-cpu0))
return rec._t0
def timer_debug1(rec, msg, cpu0=None, wall0=None):
if rec.verbose >= DEBUG1:
return timer(rec, msg, cpu0, wall0)
elif wall0:
rec._t0, rec._w0 = process_clock(), perf_counter()
return rec._t0, rec._w0
else:
rec._t0 = process_clock()
return rec._t0
class Logger(object):
'''
Attributes:
stdout : file object or sys.stdout
The file to dump output message.
verbose : int
Large value means more noise in the output file.
'''
def __init__(self, stdout=sys.stdout, verbose=NOTE):
self.stdout = stdout
self.verbose = verbose
self._t0 = process_clock()
self._w0 = perf_counter()
log = log
error = error
warn = warn
note = note
info = info
debug = debug
debug1 = debug1
debug2 = debug2
debug3 = debug3
debug4 = debug4
timer = timer
timer_debug1 = timer_debug1
def new_logger(rec=None, verbose=None):
'''Create and return a :class:`Logger` object
Args:
rec : An object which carries the attributes stdout and verbose
verbose : a Logger object, or integer or None
The verbose level. If verbose is a Logger object, the Logger
object is returned. If verbose is not specified (None),
rec.verbose will be used in the new Logger object.
'''
if isinstance(verbose, Logger):
log = verbose
elif isinstance(verbose, int):
if getattr(rec, 'stdout', None):
log = Logger(rec.stdout, verbose)
else:
log = Logger(sys.stdout, verbose)
else:
log = Logger(rec.stdout, rec.verbose)
return log
|
sunqm/pyscf
|
pyscf/lib/logger.py
|
Python
|
apache-2.0
| 6,296
|
#!/usr/bin/python
from __future__ import division
import math
import signal
import sys
import numpy as np
from scipy.spatial import distance
from munkres import munkres
from . import Matcher
from itertools import izip
from scipy.stats import kendalltau
from matteautils.base import printd
import matteautils.config as conf
from sklearn.neighbors import kneighbors_graph
from scipy.sparse.csgraph import minimum_spanning_tree
from skidmarks import wald_wolfowitz
def munkres_handler(signum, frame):
printd("Can't keep waiting...")
print frame
raise Exception("ran out of time...")
#a = np.array([[.1]*5,[.2]*5,[.3]*5])
#b = np.array([[.1]*5,[.2,.2,.2,.2,.3],[0,0,0,0,1]])
#c = 1 - cdist(a,b,'cosine')
def multiCdist(s1, s2, metric):
if s1.ndims == 1:
return metric(s1["vector"], s2["vector"])
else:
return sum([metric(x, y) * a for x, y, a in izip(s1["vector"], s2["vector"], conf.alphas)])
def getMetric(metric):
if isinstance(metric, basestring):
if metric in locals():
return locals()[metric]
else:
return lambda x, y: distance.cdist(x["vector"], y["vector"], metric=metric)
else:
return metric
# Lots of possible fns:
# http://docs.scipy.org/doc/scipy-0.17.0/reference/generated/scipy.spatial.distance.cdist.html
def pairdist(s1, s2, distfn=getMetric('cosine')):
if type(s1[0]) == np.ndarray:
if len(s1) == 0 or len(s2) == 0:
return np.ndarray(0)
return distfn(s1, s2)
else:
return distfn(s1["vector"], s2["vector"])
#return distfn([x.vector for x in s1], [x.vector for x in s2])
class VecSim(Matcher):
def __init__(self, df=None, metric='cosine'):
self.metric = getMetric(metric)
@classmethod
def normalize(self, vec, df=None):
return vec, np.sum(vec, axis=0)
def match(self, pair):
self.pair = pair
self.s1 = pair.s1
self.s2 = pair.s2
self.tsim = 1 - self.metric([self.s1["vec_sum"]], [self.s2["vec_sum"]])[0, 0]
self.nmatches = -1
self.start = -1
self.end = len(self.s2["vector"]) - 1
return self.tsim
class MinDistSim(Matcher):
def __init__(self, df=None, metric='cosine', maxsent=20, ngram=1, recurse=False, dimfeatures=True):
#self.dist = ndist(s1, s2)
#s1 = s1["vector"]
#s2 = s2["vector"]
self.metric = getMetric(metric)
self._names = ["MDS_" + x for x in ["tsim", "lsim", "kdist", "kldist", "ldist", "kt", "tmax", "tmin", "tsum", "tstd", "tmaxidf", "tsumidf"]]
maxsent = maxsent - ngram + 1
if dimfeatures:
self._names.extend(["MDS_w%03d" % x for x in range(maxsent)])
self.maxsent = maxsent
self.ngram = ngram
self.recurse = recurse
self.vocab = df
self.wordcount = df.total
self.dimfeatures = dimfeatures
def match(self, pair):
s1l = len(pair.s1["vector"])
s2l = len(pair.s2["vector"])
self.tsim = float('-9999')
self.lsim = float('-9999')
self.minlen = min(s1l, s2l)
self.maxlen = max(s1l, s2l)
self.nmatches = 0
self.start = -1
self.end = -1
if (self.minlen == 0 or
self.maxlen >= 100):
return self.tsim
# For simplicity in later code, make the shorter one first
if s1l < s2l:
self.s1 = pair.s1
self.s2 = pair.s2
s1l = len(pair.s1["vector"])
s2l = len(pair.s2["vector"])
else:
self.s1 = pair.s2
self.s2 = pair.s1
wc = self.wordcount
if "wv_idfs" not in self.s1:
self.s1["wv_idfs"] = [math.log(wc / self.vocab[x], 2) for x in self.s1["wv_tokens"]]
if "wv_idfs" not in self.s2:
self.s2["wv_idfs"] = [math.log(wc / self.vocab[x], 2) for x in self.s2["wv_tokens"]]
if self.ngram > 1:
ng = self.ngram
v1 = self.s1["vector"]
v2 = self.s2["vector"]
t1 = self.s1["wv_tokens"]
t2 = self.s2["wv_tokens"]
#idf1 = self.s1["wv_idfs"]
#idf2 = self.s2["wv_idfs"]
weights1 = self.s1["weights"]
weights2 = self.s2["weights"]
nv1 = [sum(v1[i:i + ng]) for i in range(max(1, len(v1) - ng + 1))]
nv2 = [sum(v2[i:i + ng]) for i in range(max(1, len(v2) - ng + 1))]
nt1 = ["_".join(t1[i:i + ng]) for i in range(max(1, len(t1) - ng + 1))]
nt2 = ["_".join(t2[i:i + ng]) for i in range(max(1, len(t2) - ng + 1))]
#nidf1 = [max(idf1[i:i + ng]) for i in range(max(1, len(idf1) - ng + 1))]
#nidf2 = [max(idf2[i:i + ng]) for i in range(max(1, len(idf2) - ng + 1))]
nweights1 = [max(weights1[i:i + ng]) for i in range(max(1, len(weights1) - ng + 1))]
nweights2 = [max(weights2[i:i + ng]) for i in range(max(1, len(weights2) - ng + 1))]
#self.s1 = {"vector": nv1, "wv_tokens": nt1, "wv_idfs": nidf1}
#self.s2 = {"vector": nv2, "wv_tokens": nt2, "wv_idfs": nidf2}
self.s1 = {"vector": nv1, "wv_tokens": nt1, "weights": nweights1}
self.s2 = {"vector": nv2, "wv_tokens": nt2, "weights": nweights2}
self.minlen = max(self.minlen - ng + 1, 1)
self.maxlen = max(self.maxlen - ng + 1, 1)
self.dists = [1] * self.minlen
self.pair = pair
#self.dist = pairdist(self.s1["vector"], self.s2["vector"], fn=self.metric)
#self.dist = pairdist(self.s1, self.s2, fn=self.metric)
dist = self.metric(self.s1, self.s2)
# scale by max of idf
#for i in range(dist.shape[0]):
# for j in range(dist.shape[1]):
# dist[i][j] *= max(self.s1["wv_idfs"][i], self.s2["wv_idfs"][j])
self.matchv = np.zeros(dist.shape, int)
np.fill_diagonal(self.matchv, 1)
if np.sum(dist) == 0:
self.tsim = 1
self.nmatches = min(dist.shape)
self.start = 0
self.end = dist.shape[1] - 1
return self.tsim
if (dist == dist[0]).all():
self.tsim = 1 - sum(dist[0])
self.nmatches = min(dist.shape)
self.start = 0
self.end = dist.shape[1] - 1
return self.tsim
if (dist.T == dist[:, 0]).all():
self.tsim = 1 - sum(dist[:, 0])
self.nmatches = min(dist.shape)
self.start = 0
self.end = dist.shape[1] - 1
return self.tsim
signal.signal(signal.SIGALRM, munkres_handler)
signal.alarm(10)
try:
matches = munkres(dist)
except Exception, e:
printd(e)
printd("dist: " + dist.shape)
printd(dist)
return self.tsim
signal.alarm(0)
self.matchv = matches
tdist = 0
tmaxidf = 0
tsumidf = 0
nmatches = 0
mstart = dist.shape[1]
mend = 0
#print self.s1["text"]
#print self.s2["text"]
#print " ".join(self.s1["wv_tokens"])
#print " ".join(self.s2["wv_tokens"])
s1tok = self.s1["wv_tokens"]
s2tok = self.s2["wv_tokens"]
matcharr = [0] * matches.shape[0]
dists = [0] * matches.shape[0]
matchedy = [0] * matches.shape[1]
for i in range(matches.shape[0]):
for j in range(matches.shape[1]):
if matches[i, j]:
matchedy[j] = 1
tdist += dist[i, j]
#tmaxidf += dist[i, j] * max(self.s1["wv_idfs"][i], self.s2["wv_idfs"][j])
#tsumidf += dist[i, j] * sum((self.s1["wv_idfs"][i], self.s2["wv_idfs"][j]))
wi = self.s1["weights"][i]
wj = self.s2["weights"][j]
tmaxidf += dist[i, j] * max(wi, wj)
tsumidf += dist[i, j] * sum((wi, wj))
printd("%s\t%s\t%0.4f\t%0.4f\t%0.4f" % (s1tok[i], s2tok[j], dist[i, j], wi, wj), level=1, sock=sys.stdout)
nmatches += 1
matcharr[i] = j
dists[i] = dist[i, j]
if j < mstart:
mstart = j
if j > mend:
mend = j
ldist = tdist
tdist = tdist * max(dist.shape) / pow(min(dist.shape), 2)
tmaxidf = tmaxidf * max(dist.shape) / pow(min(dist.shape), 2)
tsumidf = tsumidf * max(dist.shape) / pow(min(dist.shape), 2)
kt, ktp = kendalltau(range(len(matcharr)), matcharr)
printd("Score: %0.4f\t%0.4f\t%0.4f\tLabel: %g\n" % (tdist, tmaxidf, tsumidf, pair.label), level=1, sock=sys.stdout)
if self.recurse:
# Remove matches from dist array, and rerun munkres
# Repeat until dist array is empty
pass
else:
for i in range(matches.shape[1]):
if not matchedy[i]:
ldist += min(matches[:, i])
ldist /= max(dist.shape)
# TODO:
# Dist penalty is at most beta
# The problem with this is that there may be a better pairing between the two sentences
# if you optimize for mindist with dist penalty.
# Also could add a weight to each pairing like IDF, most important for the
# summing, but a different sum would again potentially affect the optimal
# match.
beta = 1
self.kdist = tdist * (1 + beta * (kt + 1) / 2)
self.kldist = ldist * (1 + beta * (kt + 1) / 2)
self.ldist = ldist
#print "Score: %g" % tsim
#print "Label: %g" % self.pair.label
self.tsim = 1 - tdist
self.tmaxidf = tmaxidf
self.tsumidf = tsumidf
self.nmatches = nmatches
self.start = mstart
self.end = mend
self.kt = kt
self.dists = sorted(dists, reverse=True)
self.lsim = tdist + (max(dists) * (self.maxlen - self.minlen))
self.tmax = max(dists)
self.tmin = max(dists)
self.tsum = sum(dists)
self.tstd = np.std(dists)
return self.tsim
def features(self):
fs = [self.tsim, self.lsim, self.kdist, self.kldist, self.ldist, self.kt, self.tmax, self.tmin, self.tsum, self.tstd, self.tmaxidf, self.tsumidf]
if self.dimfeatures:
distarr = [0] * self.maxsent
dists = self.dists
distarr[0:len(dists)] = dists
fs += distarr
return fs
class InfSim(Matcher):
#def __init__(self, df, metric='cosine'):
# #self.metric = getMetric(metric)
# self.df = df
# self._vectorsums = dict()
def __init__(self, data, wordvec, metric='cosine', dimfeatures=False):
self.df = wordvec.logdf(data.wv_vocab())
data.normalize(self, self.df)
self._vectorsums = dict()
@classmethod
def normalize(cls, s, df):
if len(s) == 0:
return s, 0
if np.any(np.isnan(df)):
printd("Hmm, nan for df %0.4f")
printd("df:\n" + str(df))
sys.exit(1)
# TODO: This should be a weighted sum with IDF
# As a result of this sum, different length sentences naturally receive a
# penalty, as the sum is naturally larger than the min.
# Also, we aren't looking at euclidean distance, so we may be losing out on scale information
# But if we did, longer sentences would be harder to match together (as distances would compound).
# Maybe should divide by min sentence legnth or something of the sort...
#This is avg, not sum...................................................
# probably causes all sorts of weirdness
ps = np.sum(s, axis=0) / np.sum(s)
if np.any(np.isnan(ps)):
printd("Hmm, nan for ps %0.4f" % np.sum(s))
printd("ps:\n" + str(ps))
printd("s:\n" + str(s))
printd("df:\n" + str(df))
sys.exit(1)
ts = np.sum(np.multiply(ps, df))
if ts == 0:
printd("Hmm, 0 for ts")
printd("ps:\n" + str(ps))
printd("df:\n" + str(df))
sys.exit(1)
return ps, ts
def match(self, pair):
self.pair = pair
self.s1 = pair.s1["vector"]
self.s2 = pair.s2["vector"]
self.ts1 = pair.s1["vector_sum"]
self.ts2 = pair.s2["vector_sum"]
return self.domatch()
def domatch(self):
self.nmatches = -1
if self.ts1 == 0 or self.ts2 == 0:
self.tsim = 0.0
self.start = -1
self.end = -1
return self.tsim
self.tsim = 2 * sum([min(s1i, s2i) * dfi for s1i, s2i, dfi in izip(self.s1, self.s2, self.df)]) / (self.ts1 + self.ts2)
self.start = -1
self.end = len(self.s2) - 1
return self.tsim
def pairwisedist(self, s1, s2):
#Must create the "vector" and "vector_sum" for each word, rather than for each sentence
dists = np.zeros((len(s1["wv_tokens"]), len(s2["wv_tokens"])))
for wi1, (w1, v1) in enumerate(izip(s1["wv_tokens"], s1["vector"])):
for wi2, (w2, v2) in enumerate(izip(s2["wv_tokens"], s2["vector"])):
self.s1 = v1
self.s2 = v2
self.ts1 = self.vectorsum(w1, v1)
self.ts2 = self.vectorsum(w2, v2)
dists[wi1, wi2] = 1 - self.domatch()
#TODO could multiply by term based on wi1/wi2 (distance penalty)...
if dists[wi1, wi2] < 0:
dists[wi1, wi2] = 0
#printd("Hmm, negative dist %g" % dists[wi1, wi2])
# Annoying rounding errors, e.g. -2.22045e-16
return dists
def vectorsum(self, word, wv):
if word not in self._vectorsums:
self._vectorsums[word] = np.sum(np.multiply(wv, self.df))
return self._vectorsums[word]
class InfRankSim(Matcher):
def __init__(self, data, wordvec, df=None, metric='cosine', dimfeatures=True):
self._vectorsums = dict()
self.vocabvec = self.sortwords(data.wv_vocab(), wordvec)
self._names = ["IRS_" + x for x in ["asim", "mdim", "msent"]]
if dimfeatures:
self._names.extend(["IRS_d%03d" % x for x in range(wordvec.size)])
self.dimfeatures = dimfeatures
def sortwords(self, vocab, wordvec):
vvecs = [list() for _ in range(wordvec.size)]
ftot = 0
for t, tc in vocab.iteritems():
try:
tv = wordvec[t]
ftot += tc
for d in range(len(tv)):
vvecs[d].append((tv[d], t))
except KeyError:
pass
lookupvecs = [dict() for _ in range(wordvec.size)]
for d, vvec in enumerate(vvecs):
vvec.sort()
cdf = 0
#vtot = len(vvec)
#vitm = 1 / vtot
lookup = lookupvecs[d]
for tv, t in vvec:
# Should the CDF be based on TF? or just on the word existence?
cdf += tv / ftot
#cdf += vitm
lookup[t] = cdf
return lookupvecs
def match(self, pair):
wvlen = len(pair.s1["vector"][0])
m = len(pair.s1["wv_tokens"])
n = len(pair.s2["wv_tokens"])
self._features = []
# Take the average of all distances
asim = 0
mdim = 0
msent = 0
for d in range(wvlen):
mindimp = 1
for t1 in pair.s1["wv_tokens"]:
minsentp = 1
for t2 in pair.s2["wv_tokens"]:
p = abs(self.vocabvec[d][t1] - self.vocabvec[d][t2])
asim += simP(p)
if p < mindimp:
mindimp = p
if p < minsentp:
minsentp = p
# Take the minimum across one sentences
msent += simP(minsentp)
# Take the minimum across dimensions
mdim += simP(mindimp)
asim /= m * n * wvlen
self._features.append(asim)
self._features.append(mdim / wvlen)
msent /= n * wvlen
self._features.append(msent)
if self.dimfeatures:
for d in range(wvlen):
combvec = ([(self.vocabvec[d][t1], 0) for t1 in pair.s1["wv_tokens"]] +
[(self.vocabvec[d][t2], 1) for t2 in pair.s1["wv_tokens"]])
combvec.sort()
combval = multicdf(combvec)
self._features.append(simP(combval))
self.tsim = asim
self.start = 0
self.end = m
def simP(p):
if p == 0:
return 1
elif p == 1:
return 0
else:
return 1 / (1 + (1 / (math.log(1 / p, 2))))
# TODO: Should add slack, so not all values must match. Would require a lot
# more bookkeeping
def multicdf(vec):
lastvs = [[], []]
cdf = 0
for v, senti in vec:
vs = lastvs[senti]
ovs = lastvs[senti - 1]
if len(ovs) != 0:
if len(vs) == 0:
cdf += v - ovs[0]
del ovs[:]
else:
back = None
forward = None
prevv = vs[0]
# If expecting large set, could do binary search...
for ov in ovs:
if (ov - prevv) < (v - ov):
back = ov
else:
forward = ov
break
if back is not None:
cdf += back - prevv
if forward is not None:
cdf += v - forward
del vs[:]
del ovs[:]
vs.append(v)
#if lasti is not None:
# cdf += (v - lastv)
# if senti != lasti:
# lasti = None
#else:
# lasti = senti
#lastv = v
return cdf
def ndist(s1, s2, fn='cosine'):
rd = []
for w1 in s1:
rr = []
for w2 in s2:
rr.append(w1.dist(w2, fn=fn))
rd.append(rr)
return np.array(rd)
# Wald-Wolfowitz test
# Adapted from:
# Monaco, John V.
# "Classification and authentication of one-dimensional behavioral biometrics."
# Biometrics (IJCB), 2014 IEEE International Joint Conference on. IEEE, 2014.
# https://gist.github.com/vmonaco/e9ff0ac61fcb3b1b60ba
class WWSim(Matcher):
def __init__(self, wordvec, df=None, metric='cosine', k=10, dimfeatures=True):
self.k = 10
self._names = ["WWS_base"]
if dimfeatures:
self._names.extend(["WWS_%03d" % x for x in range(wordvec.size)])
self.dimfeatures = dimfeatures
def match(self, pair):
self.pair = pair
v1 = pair.s1["vector"]
v2 = pair.s2["vector"]
m = len(v1)
n = len(v2)
N = m + n
k = min(N - 1, self.k)
if m == 0 or n == 0 or np.linalg.norm(v1) == 0 or np.linalg.norm(v2) == 0:
return 0
vs = np.concatenate((v1, v2))
g = kneighbors_graph(vs, mode='distance', n_neighbors=k)
mst = minimum_spanning_tree(g, overwrite=True)
edges = np.array(mst.nonzero()).T
labels = np.array([0] * m + [1] * n)
c = labels[edges]
runs_edges = edges[c[:, 0] == c[:, 1]]
# number of runs is the total number of observations minus edges within each run
R = N - len(runs_edges)
# expected value of R
e_R = ((2.0 * m * n) / N) + 1
# variance of R is _numer/_denom
v = 2 * m * n * (2 * m * n - N) / (N ** 2 * (N - 1))
# see Eq. 1 in Friedman 1979
# W approaches a standard normal distribution
W = (R - e_R) / np.sqrt(v)
self.tsim = -1 if np.isnan(W) else W
bydim = []
for d in range(len(v1[0])):
sorteddim = np.argsort(vs[:, d])
wd = wald_wolfowitz(labels[sorteddim])
bydim.append(wd['z'])
self._features = [self.tsim]
if self.dimfeatures:
self._features += bydim
return self.tsim
class PairFeatures(Matcher):
def __init__(self, dimfeatures=True):
self.dimfeatures = dimfeatures
self._names = ["PF_" + x for x in ["Len", "TextLen", "TextDiff"]]
def match(self, pair):
fs = []
self._features = fs
self.tsim = abs(len(pair.s1["vector"]) - len(pair.s2["vector"]))
fs.append(self.tsim)
fs.append(abs(len(pair.s1["text"]) - len(pair.s2["text"])))
fs.append(abs(len(pair.s1["text"]) + len(pair.s2["text"]) - len(" ".join(pair.s1["wv_tokens"])) - len(" ".join(pair.s2["wv_tokens"]))))
return self.tsim
|
mattea/mattea-utils
|
matteautils/match/vectorsim.py
|
Python
|
apache-2.0
| 17,233
|
#!/usr/bin/env python
from groupflow_shared import *
from mininet.net import *
from mininet.node import OVSSwitch, UserSwitch
from mininet.link import TCLink
from mininet.log import setLogLevel
from mininet.cli import CLI
from mininet.node import Node, RemoteController
from scipy.stats import truncnorm
from numpy.random import randint, uniform
from subprocess import *
import sys
import signal
from time import sleep, time
from datetime import datetime
from multiprocessing import Process, Pipe
import numpy as np
import traceback
import os.path
# Hardcoded purely for testing / debug, these will be moved once functionality is stable
ARRIVAL_RATE = 5 * (1.0 / 60)
SERVICE_RATE = 1.0 / 60
TRIAL_DURATION_SECONDS = 60.0 * 3
RECEIVERS_AT_TRIAL_START = 5
STATS_RECORDING_INTERVAL = 5
MEDIA_DURATION_SECONDS = 72
def mcastTestDynamic(topo, hosts = [], log_file_name = 'test_log.log', replacement_mode='none', link_weight_type = 'linear', number_of_groups = 30, pipe = None):
test_groups = []
test_success = True
# First, check if the log file already exists, and stop the test if it does
# (This is primarily in place to allow continuing an interrupted trial set by running the same console command)
if os.path.isfile(log_file_name):
print 'Skipping trial, log file already exists: ' + str(log_file_name)
if pipe is not None:
pipe.send(test_success)
pipe.close()
return
# Launch the external controller
pox_link_weight_type = link_weight_type
static_link_weight = 0
util_link_weight = 1
if link_weight_type == 'linear': # Linear link weights
pox_link_weight_type = 'linear'
static_link_weight = 0
util_link_weight = 1
elif link_weight_type == 'sh': # Shortest-hop routing
pox_link_weight_type = 'linear'
static_link_weight = 1
util_link_weight = 0
elif link_weight_type == 'exponential': # Exponential link weights
pox_link_weight_type = 'exponential'
static_link_weight = 0
util_link_weight = 1
pox_arguments = []
static_link_weight = 0
if util_link_weight == 0:
static_link_weight = 1
if 'periodic' in replacement_mode:
pox_arguments = ['pox.py', 'log', '--file=pox.log,w', 'openflow.discovery', '--link_timeout=30', 'openflow.keepalive',
'openflow.flow_tracker', '--query_interval=1', '--link_max_bw=19', '--link_cong_threshold=13', '--avg_smooth_factor=0.5', '--log_peak_usage=True',
'misc.benchmark_terminator', 'openflow.igmp_manager', 'misc.groupflow_event_tracer',
'openflow.groupflow', '--static_link_weight=' + str(static_link_weight), '--util_link_weight=' + str(util_link_weight), '--link_weight_type=' + link_weight_type, '--flow_replacement_mode=' + replacement_mode,
'--flow_replacement_interval=10',
'log.level', '--WARNING', '--openflow.flow_tracker=INFO']
else:
pox_arguments = ['pox.py', 'log', '--file=pox.log,w', 'openflow.discovery', '--link_timeout=30', 'openflow.keepalive',
'openflow.flow_tracker', '--query_interval=1', '--link_max_bw=19', '--link_cong_threshold=13', '--avg_smooth_factor=0.5', '--log_peak_usage=True',
'misc.benchmark_terminator', 'openflow.igmp_manager', 'misc.groupflow_event_tracer',
'openflow.groupflow', '--static_link_weight=' + str(static_link_weight), '--util_link_weight=' + str(util_link_weight), '--link_weight_type=' + link_weight_type, '--flow_replacement_mode=' + replacement_mode,
'--flow_replacement_interval=10',
'log.level', '--WARNING', '--openflow.flow_tracker=INFO']
print 'Launching external controller: ' + str(pox_arguments[0])
print 'Launch arguments:'
print ' '.join(pox_arguments)
with open(os.devnull, "w") as fnull:
pox_process = Popen(pox_arguments, stdout=fnull, stderr=fnull, shell=False, close_fds=True)
# Allow time for the log file to be generated
sleep(1)
# Determine the flow tracker log file
pox_log_file = open('./pox.log', 'r')
flow_log_path = None
event_log_path = None
got_flow_log_path = False
got_event_log_path = False
while (not got_flow_log_path) or (not got_event_log_path):
pox_log = pox_log_file.readline()
if 'Writing flow tracker info to file:' in pox_log:
pox_log_split = pox_log.split()
flow_log_path = pox_log_split[-1]
got_flow_log_path = True
if 'Writing event trace info to file:' in pox_log:
pox_log_split = pox_log.split()
event_log_path = pox_log_split[-1]
got_event_log_path = True
print 'Got flow tracker log file: ' + str(flow_log_path)
print 'Got event trace log file: ' + str(event_log_path)
print 'Controller initialized'
pox_log_offset = pox_log_file.tell()
pox_log_file.close()
# External controller launched
# Launch Mininet
net = Mininet(topo, controller=RemoteController, switch=OVSSwitch, link=TCLink, build=False, autoSetMacs=True)
# pox = RemoteController('pox', '127.0.0.1', 6633)
net.addController('pox', RemoteController, ip = '127.0.0.1', port = 6633)
net.start()
for switch_name in topo.get_switch_list():
#print switch_name + ' route add -host 127.0.0.1 dev lo'
net.get(switch_name).controlIntf = net.get(switch_name).intf('lo')
net.get(switch_name).cmd('route add -host 127.0.0.1 dev lo')
#print 'pox' + ' route add -host ' + net.get(switch_name).IP() + ' dev lo'
net.get('pox').cmd('route add -host ' + net.get(switch_name).IP() + ' dev lo')
#print net.get(switch_name).cmd('ifconfig')
topo.mcastConfig(net)
# Wait for controller topology discovery
controller_init_sleep_time = 10
print 'Waiting ' + str(controller_init_sleep_time) + ' seconds to allow for controller topology discovery.'
sleep(controller_init_sleep_time)
# Mininet launched
# Generate the test groups, and launch the sender applications
rand_seed = int(time())
print 'Using random seed: ' + str(rand_seed)
np.random.seed(rand_seed)
trial_start_time = time() + MEDIA_DURATION_SECONDS + 10 # Assume generation of test group events will take no more than 10 seconds
trial_end_time = trial_start_time + TRIAL_DURATION_SECONDS
mcast_group_last_octet = 1
mcast_port = 5010
for i in range(0, number_of_groups):
mcast_ip = '224.1.1.{last_octet}'.format(last_octet = str(mcast_group_last_octet))
test_group = DynamicMulticastGroupDefinition(net.hosts, mcast_ip, mcast_port, mcast_port + 1)
print 'Generating events for group: ' + mcast_ip
test_group.generate_receiver_events(trial_start_time, TRIAL_DURATION_SECONDS, RECEIVERS_AT_TRIAL_START, ARRIVAL_RATE, SERVICE_RATE)
test_groups.append(test_group)
mcast_group_last_octet += 1
mcast_port += 2
test_group_start_times = []
for i in range(0, number_of_groups):
test_group_start_times.append(uniform(0, MEDIA_DURATION_SECONDS))
test_group_start_times.sort()
# Test groups generated
# Launch initial receiver applications
group_launch_index = 0
launch_start_time = time()
while len(test_group_start_times) > 0:
cur_time = time() - launch_start_time
if cur_time >= test_group_start_times[0]:
test_group_start_times.pop(0)
print 'Launching test group ' + str(group_launch_index) + ' at launch time: ' + str(cur_time)
test_groups[group_launch_index].launch_sender_application()
test_groups[group_launch_index].update_receiver_applications(trial_start_time)
group_launch_index += 1
else:
sleep_time = test_group_start_times[0] - cur_time
sleep(sleep_time)
# Wait for trial run start time
sleep_time = trial_start_time - time()
if sleep_time < 0:
print 'WARNING: sleep_time is negative!'
else:
print 'Waiting ' + str(sleep_time) + ' seconds to allow for group initialization.'
sleep(sleep_time) # Allow time for the controller to detect the topology
# Trial has started at this point
try:
while True:
cur_time = time()
if cur_time > trial_end_time:
print 'Reached trial end at time: ' + str(cur_time)
break
next_event_time = trial_end_time
for group in test_groups:
group.update_receiver_applications(cur_time)
next_event = group.get_next_receiver_event()
if next_event is not None and next_event[0] < next_event_time:
next_event_time = next_event[0]
sleep_time = next_event_time - time()
if sleep_time < 0:
print 'WARNING: sleep_time (' + str(sleep_time) + ') is negative!'
else:
#print 'Waiting ' + str(sleep_time) + ' for next event.\n'
sleep(sleep_time)
print 'Terminating network applications'
for group in test_groups:
group.terminate_group()
print 'Network applications terminated'
print 'Terminating controller'
pox_process.send_signal(signal.SIGINT)
sleep(1)
print 'Waiting for controller termination...'
pox_process.send_signal(signal.SIGKILL)
pox_process.wait()
print 'Controller terminated'
pox_process = None
net.stop()
sleep(3)
# Print packet loss statistics
recv_packets = sum(group.get_total_recv_packets() for group in test_groups)
lost_packets = sum(group.get_total_lost_packets() for group in test_groups)
packet_loss = 0
if (recv_packets + lost_packets) != 0:
packet_loss = (float(lost_packets) / (float(recv_packets) + float(lost_packets))) * 100
print 'RecvPackets: ' + str(recv_packets) + ' LostPackets: ' + str(lost_packets) + ' PacketLoss: ' + str(packet_loss) + '%'
# Calculate mean service time (sanity check to see that exponential service time generation is working as intended)
num_apps = 0
total_service_time = 0
for group in test_groups:
for recv_app in group.receiver_applications:
num_apps += 1
total_service_time += recv_app.service_time
print 'Average Service Time: ' + str(total_service_time / num_apps)
# Delete log file if test encountered an error, or write the statistic log file if the run was succesfull
if not test_success:
call('rm -rf ' + str(flow_log_path), shell=True)
call('rm -rf ' + str(event_log_path), shell=True)
else:
write_dynamic_stats_log(log_file_name, flow_log_path, event_log_path, test_groups, topo, ARRIVAL_RATE, SERVICE_RATE,
RECEIVERS_AT_TRIAL_START, trial_start_time, trial_end_time, STATS_RECORDING_INTERVAL)
except BaseException as e:
traceback.print_exc()
test_success = False
if pipe is not None:
pipe.send(test_success)
pipe.close()
topos = { 'mcast_test': ( lambda: MulticastTestTopo() ) }
def print_usage_text():
print 'GroupFlow Multicast Testing with Mininet'
print 'Usage - Automated Benchmarking:'
print '> mininet_multicast_pox <topology_path> <iterations_to_run> <log_file_prefix> <index_of_first_log_file> <parameter_sets (number is variable and unlimited)>'
print 'Parameter sets have the form: flow_replacement_mode,link_weight_type,number_of_groups'
print 'The topology path "manhattan" is currently hardcoded to generate a 20 Mbps, 5x5 Manhattan grid topology'
if __name__ == '__main__':
setLogLevel( 'info' )
# Uncomment for easy debug testing
#topo = ManhattanGridTopo(5, 4, 20, 1, False)
#hosts = topo.get_host_list()
#mcastTestDynamic(topo, hosts, 'test.log', 10, 'linear', 'none')
#sys.exit()
if len(sys.argv) >= 2:
if '-h' in str(sys.argv[1]) or 'help' in str(sys.argv[1]):
print_usage_text()
sys.exit()
if len(sys.argv) >= 6:
# Automated simulations - Differing link usage weights in Groupflow Module
log_prefix = sys.argv[3]
num_iterations = int(sys.argv[2])
first_index = int(sys.argv[4])
util_params = []
for param_index in range(5, len(sys.argv)):
param_split = sys.argv[param_index].split(',')
util_params.append((param_split[0], param_split[1], int(param_split[2])))
topo = None
if 'manhattan' in sys.argv[1]:
print 'Generating Manhattan Grid Topology'
topo = ManhattanGridTopo(5, 4, 20, 1, False)
else:
print 'Generating BRITE Specified Topology'
topo = BriteTopo(sys.argv[1])
hosts = topo.get_host_list()
start_time = time()
num_success = 0
num_failure = 0
print 'Simulations started at: ' + str(datetime.now())
for i in range(0,num_iterations):
for util_param in util_params:
test_success = False
while not test_success:
parent_pipe, child_pipe = Pipe()
p = Process(target=mcastTestDynamic, args=(topo, hosts, log_prefix + '_' + ','.join([util_param[0], util_param[1], str(util_param[2])]) + '_' + str(i + first_index) + '.log', util_param[0], util_param[1], util_param[2], child_pipe))
sim_start_time = time()
p.start()
p.join()
sim_end_time = time()
# Make extra sure the network terminated cleanly
call(['python', 'kill_running_test.py'])
test_success = parent_pipe.recv()
parent_pipe.close()
print 'Test Success: ' + str(test_success)
if test_success:
num_success += 1
else:
num_failure += 1
print 'Simulation ' + str(i+1) + '_' + ','.join([util_param[0], util_param[1], str(util_param[2])]) + ' completed at: ' + str(datetime.now()) + ' (runtime: ' + str(sim_end_time - sim_start_time) + ' seconds)'
end_time = time()
print ' '
print 'Simulations completed at: ' + str(datetime.now())
print 'Total runtime: ' + str(end_time - start_time) + ' seconds'
print 'Average runtime per sim: ' + str((end_time - start_time) / (num_iterations * len(util_params))) + ' seconds'
print 'Number of failed sims: ' + str(num_failure)
print 'Number of successful sims: ' + str(num_success)
|
kvasukib/groupflow_simulator
|
groupflow_scripts/mininet_multicast_dynamic.py
|
Python
|
apache-2.0
| 15,080
|
import pytest
from openshift_checks.docker_image_availability import DockerImageAvailability
@pytest.fixture()
def task_vars():
return dict(
openshift=dict(
common=dict(
service_type='origin',
is_containerized=False,
is_atomic=False,
),
docker=dict(),
),
openshift_deployment_type='origin',
openshift_image_tag='',
group_names=['nodes', 'masters'],
)
@pytest.mark.parametrize('deployment_type, is_containerized, group_names, expect_active', [
("origin", True, [], True),
("openshift-enterprise", True, [], True),
("invalid", True, [], False),
("", True, [], False),
("origin", False, [], False),
("openshift-enterprise", False, [], False),
("origin", False, ["nodes", "masters"], True),
("openshift-enterprise", False, ["etcd"], False),
])
def test_is_active(task_vars, deployment_type, is_containerized, group_names, expect_active):
task_vars['openshift_deployment_type'] = deployment_type
task_vars['openshift']['common']['is_containerized'] = is_containerized
task_vars['group_names'] = group_names
assert DockerImageAvailability(None, task_vars).is_active() == expect_active
@pytest.mark.parametrize("is_containerized,is_atomic", [
(True, True),
(False, False),
(True, False),
(False, True),
])
def test_all_images_available_locally(task_vars, is_containerized, is_atomic):
def execute_module(module_name, module_args, *_):
if module_name == "yum":
return {}
assert module_name == "docker_image_facts"
assert 'name' in module_args
assert module_args['name']
return {
'images': [module_args['name']],
}
task_vars['openshift']['common']['is_containerized'] = is_containerized
task_vars['openshift']['common']['is_atomic'] = is_atomic
result = DockerImageAvailability(execute_module, task_vars).run()
assert not result.get('failed', False)
@pytest.mark.parametrize("available_locally", [
False,
True,
])
def test_all_images_available_remotely(task_vars, available_locally):
def execute_module(module_name, *_):
if module_name == 'docker_image_facts':
return {'images': [], 'failed': available_locally}
return {}
task_vars['openshift']['docker']['additional_registries'] = ["docker.io", "registry.access.redhat.com"]
task_vars['openshift_image_tag'] = 'v3.4'
check = DockerImageAvailability(execute_module, task_vars)
check._module_retry_interval = 0
result = check.run()
assert not result.get('failed', False)
def test_all_images_unavailable(task_vars):
def execute_module(module_name=None, *args):
if module_name == "wait_for":
return {}
elif module_name == "command":
return {'failed': True}
return {} # docker_image_facts failure
task_vars['openshift']['docker']['additional_registries'] = ["docker.io"]
task_vars['openshift_deployment_type'] = "openshift-enterprise"
task_vars['openshift_image_tag'] = 'latest'
check = DockerImageAvailability(execute_module, task_vars)
check._module_retry_interval = 0
actual = check.run()
assert actual['failed']
assert "required Docker images are not available" in actual['msg']
def test_no_known_registries():
def execute_module(module_name=None, *_):
if module_name == "command":
return {
'failed': True,
}
return {
'changed': False,
}
def mock_known_docker_registries():
return []
dia = DockerImageAvailability(execute_module, task_vars=dict(
openshift=dict(
common=dict(
service_type='origin',
is_containerized=False,
is_atomic=False,
),
docker=dict(additional_registries=["docker.io"]),
),
openshift_deployment_type="openshift-enterprise",
openshift_image_tag='latest',
group_names=['nodes', 'masters'],
))
dia.known_docker_registries = mock_known_docker_registries
actual = dia.run()
assert actual['failed']
assert "Unable to retrieve any docker registries." in actual['msg']
@pytest.mark.parametrize("message,extra_words", [
(
"docker image update failure",
["docker image update failure"],
),
(
"No package matching 'skopeo' found available, installed or updated",
["dependencies can be installed via `yum`"]
),
])
def test_skopeo_update_failure(task_vars, message, extra_words):
def execute_module(module_name=None, *_):
if module_name == "yum":
return {
"failed": True,
"msg": message,
}
return {}
task_vars['openshift']['docker']['additional_registries'] = ["unknown.io"]
task_vars['openshift_deployment_type'] = "openshift-enterprise"
check = DockerImageAvailability(execute_module, task_vars)
check._module_retry_interval = 0
actual = check.run()
assert actual["failed"]
for word in extra_words:
assert word in actual["msg"]
@pytest.mark.parametrize(
"image, registries, connection_test_failed, skopeo_failed, "
"expect_success, expect_registries_reached", [
(
"spam/eggs:v1", ["test.reg"],
True, True,
False,
{"test.reg": False},
),
(
"spam/eggs:v1", ["test.reg"],
False, True,
False,
{"test.reg": True},
),
(
"eggs.reg/spam/eggs:v1", ["test.reg"],
False, False,
True,
{"eggs.reg": True},
),
])
def test_registry_availability(image, registries, connection_test_failed, skopeo_failed,
expect_success, expect_registries_reached):
def execute_module(module_name=None, *_):
if module_name == "wait_for":
return dict(msg="msg", failed=connection_test_failed)
elif module_name == "command":
return dict(msg="msg", failed=skopeo_failed)
check = DockerImageAvailability(execute_module, task_vars())
check._module_retry_interval = 0
available = check.is_available_skopeo_image(image, registries)
assert available == expect_success
assert expect_registries_reached == check.reachable_registries
@pytest.mark.parametrize("deployment_type, is_containerized, groups, oreg_url, expected", [
( # standard set of stuff required on nodes
"origin", False, ['nodes'], None,
set([
'openshift/origin-pod:vtest',
'openshift/origin-deployer:vtest',
'openshift/origin-docker-registry:vtest',
'openshift/origin-haproxy-router:vtest',
'cockpit/kubernetes', # origin version of registry-console
])
),
( # set a different URL for images
"origin", False, ['nodes'], 'foo.io/openshift/origin-${component}:${version}',
set([
'foo.io/openshift/origin-pod:vtest',
'foo.io/openshift/origin-deployer:vtest',
'foo.io/openshift/origin-docker-registry:vtest',
'foo.io/openshift/origin-haproxy-router:vtest',
'cockpit/kubernetes', # AFAICS this is not built from the URL
])
),
(
"origin", True, ['nodes', 'masters', 'etcd'], None,
set([
# images running on top of openshift
'openshift/origin-pod:vtest',
'openshift/origin-deployer:vtest',
'openshift/origin-docker-registry:vtest',
'openshift/origin-haproxy-router:vtest',
'cockpit/kubernetes',
# containerized component images
'openshift/origin:vtest',
'openshift/node:vtest',
'openshift/openvswitch:vtest',
'registry.access.redhat.com/rhel7/etcd',
])
),
( # enterprise images
"openshift-enterprise", True, ['nodes'], 'foo.io/openshift3/ose-${component}:f13ac45',
set([
'foo.io/openshift3/ose-pod:f13ac45',
'foo.io/openshift3/ose-deployer:f13ac45',
'foo.io/openshift3/ose-docker-registry:f13ac45',
'foo.io/openshift3/ose-haproxy-router:f13ac45',
# registry-console is not constructed/versioned the same as the others.
'registry.access.redhat.com/openshift3/registry-console',
# containerized images aren't built from oreg_url
'openshift3/node:vtest',
'openshift3/openvswitch:vtest',
])
),
(
"openshift-enterprise", True, ['etcd', 'lb'], 'foo.io/openshift3/ose-${component}:f13ac45',
set([
'registry.access.redhat.com/rhel7/etcd',
# lb does not yet come in a containerized version
])
),
])
def test_required_images(deployment_type, is_containerized, groups, oreg_url, expected):
task_vars = dict(
openshift=dict(
common=dict(
is_containerized=is_containerized,
is_atomic=False,
),
),
openshift_deployment_type=deployment_type,
group_names=groups,
oreg_url=oreg_url,
openshift_image_tag='vtest',
)
assert expected == DockerImageAvailability(task_vars=task_vars).required_images()
def test_containerized_etcd():
task_vars = dict(
openshift=dict(
common=dict(
is_containerized=True,
),
),
openshift_deployment_type="origin",
group_names=['etcd'],
)
expected = set(['registry.access.redhat.com/rhel7/etcd'])
assert expected == DockerImageAvailability(task_vars=task_vars).required_images()
|
detiber/openshift-ansible
|
roles/openshift_health_checker/test/docker_image_availability_test.py
|
Python
|
apache-2.0
| 9,879
|
# Copyright 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
try:
from urllib import urlencode # noqa
except ImportError:
from urllib.parse import urlencode # noqa
from manilaclient import base
from manilaclient.openstack.common.apiclient import base as common_base
RESOURCES_PATH = '/share-servers'
RESOURCE_PATH = '/share-servers/%s'
RESOURCES_NAME = 'share_servers'
RESOURCE_NAME = 'share_server'
class ShareServer(common_base.Resource):
def __repr__(self):
return "<ShareServer: %s>" % self.id
def __getattr__(self, attr):
if attr == 'share_network':
attr = 'share_network_name'
return super(ShareServer, self).__getattr__(attr)
class ShareServerManager(base.Manager):
"""Manage :class:`ShareServer` resources."""
resource_class = ShareServer
def get(self, server_id):
"""Get a share server.
:param server_id: The ID of the share server to get.
:rtype: :class:`ShareServer`
"""
server = self._get("%s/%s" % (RESOURCES_PATH, server_id),
RESOURCE_NAME)
# Split big dict 'backend_details' to separated strings
# as next:
# +---------------------+------------------------------------+
# | Property | Value |
# +---------------------+------------------------------------+
# | details:instance_id |35203a78-c733-4b1f-b82c-faded312e537|
# +---------------------+------------------------------------+
for k, v in six.iteritems(server._info["backend_details"]):
server._info["details:%s" % k] = v
return server
def details(self, server_id):
"""Get a share server details.
:param server_id: The ID of the share server to get details from.
:rtype: list of :class:`ShareServerBackendDetails
"""
return self._get("%s/%s/details" % (RESOURCES_PATH, server_id),
"details")
def delete(self, server_id):
"""Delete share server.
:param server_id: id of share server to be deleted.
"""
self._delete(RESOURCE_PATH % server_id)
def list(self, search_opts=None):
"""Get a list of share servers.
:rtype: list of :class:`ShareServer`
"""
query_string = ''
if search_opts:
opts = sorted(
[(k, v) for (k, v) in six.iteritems(search_opts) if v])
query_string = urlencode(opts)
query_string = '?' + query_string if query_string else ''
return self._list(RESOURCES_PATH + query_string, RESOURCES_NAME)
|
sniperganso/python-manilaclient
|
manilaclient/v2/share_servers.py
|
Python
|
apache-2.0
| 3,246
|
def add(a, b):
"""
This function adds two numbers
"""
return a + b
def sub(a,b):
"""
This function subtracts two numbers
"""
return a - b
# print "The first number you want to subtract?"
# a = int(raw_input("First no: "))
# print "What's the second number you want to subtract"
# b = int(raw_input("Second no: "))
# """
# This function subtracts two numbers
# """
# result = sub(a,b)
# """
# this prints the results
# """
# print "The result is: %r." % result
def opp(a):
return a * -1
# print "Number you want to change"
# a = int(raw_input("Number to change: "))
# result = opp(a)
# """
# This function changes the sign of the number
# """
# print "The result is: %r." % result
# """
# this prints the results
# """
|
alveyworld-dev/calculator
|
team6.py
|
Python
|
apache-2.0
| 738
|
# Copyright 2015 Mirantis, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
import netaddr
from oslo_config import cfg
from manila.common import constants
from manila import context
from manila import exception
from manila.network import standalone_network_plugin as plugin
from manila import test
from manila.tests import utils as test_utils
CONF = cfg.CONF
fake_context = context.RequestContext(
user_id='fake user', project_id='fake project', is_admin=False)
fake_share_server = dict(id='fake_share_server_id')
fake_share_network = dict(id='fake_share_network_id')
@ddt.ddt
class StandaloneNetworkPluginTest(test.TestCase):
@ddt.data('custom_config_group_name', 'DEFAULT')
def test_init_only_with_required_data_v4(self, group_name):
data = {
group_name: {
'standalone_network_plugin_gateway': '10.0.0.1',
'standalone_network_plugin_mask': '24',
},
}
with test_utils.create_temp_config_with_opts(data):
instance = plugin.StandaloneNetworkPlugin(
config_group_name=group_name)
self.assertEqual('10.0.0.1', instance.gateway)
self.assertEqual('24', instance.mask)
self.assertIsNone(instance.segmentation_id)
self.assertIsNone(instance.allowed_ip_ranges)
self.assertEqual(4, instance.ip_version)
self.assertEqual(netaddr.IPNetwork('10.0.0.1/24'), instance.net)
self.assertEqual(['10.0.0.1/24'], instance.allowed_cidrs)
self.assertEqual(
('10.0.0.0', '10.0.0.1', '10.0.0.255'),
instance.reserved_addresses)
@ddt.data('custom_config_group_name', 'DEFAULT')
def test_init_with_all_data_v4(self, group_name):
data = {
group_name: {
'standalone_network_plugin_gateway': '10.0.0.1',
'standalone_network_plugin_mask': '255.255.0.0',
'standalone_network_plugin_segmentation_id': '1001',
'standalone_network_plugin_allowed_ip_ranges': (
'10.0.0.3-10.0.0.7,10.0.0.69-10.0.0.157,10.0.0.213'),
'standalone_network_plugin_ip_version': 4,
},
}
allowed_cidrs = [
'10.0.0.3/32', '10.0.0.4/30', '10.0.0.69/32', '10.0.0.70/31',
'10.0.0.72/29', '10.0.0.80/28', '10.0.0.96/27', '10.0.0.128/28',
'10.0.0.144/29', '10.0.0.152/30', '10.0.0.156/31', '10.0.0.213/32',
]
with test_utils.create_temp_config_with_opts(data):
instance = plugin.StandaloneNetworkPlugin(
config_group_name=group_name)
self.assertEqual(4, instance.ip_version)
self.assertEqual('10.0.0.1', instance.gateway)
self.assertEqual('255.255.0.0', instance.mask)
self.assertEqual('1001', instance.segmentation_id)
self.assertEqual(allowed_cidrs, instance.allowed_cidrs)
self.assertEqual(
['10.0.0.3-10.0.0.7', '10.0.0.69-10.0.0.157', '10.0.0.213'],
instance.allowed_ip_ranges)
self.assertEqual(
netaddr.IPNetwork('10.0.0.1/255.255.0.0'), instance.net)
self.assertEqual(
('10.0.0.0', '10.0.0.1', '10.0.255.255'),
instance.reserved_addresses)
@ddt.data('custom_config_group_name', 'DEFAULT')
def test_init_only_with_required_data_v6(self, group_name):
data = {
group_name: {
'standalone_network_plugin_gateway': (
'2001:cdba::3257:9652'),
'standalone_network_plugin_mask': '48',
'standalone_network_plugin_ip_version': 6,
},
}
with test_utils.create_temp_config_with_opts(data):
instance = plugin.StandaloneNetworkPlugin(
config_group_name=group_name)
self.assertEqual(
'2001:cdba::3257:9652', instance.gateway)
self.assertEqual('48', instance.mask)
self.assertIsNone(instance.segmentation_id)
self.assertIsNone(instance.allowed_ip_ranges)
self.assertEqual(6, instance.ip_version)
self.assertEqual(
netaddr.IPNetwork('2001:cdba::3257:9652/48'),
instance.net)
self.assertEqual(
['2001:cdba::3257:9652/48'], instance.allowed_cidrs)
self.assertEqual(
('2001:cdba::', '2001:cdba::3257:9652',
'2001:cdba:0:ffff:ffff:ffff:ffff:ffff'),
instance.reserved_addresses)
@ddt.data('custom_config_group_name', 'DEFAULT')
def test_init_with_all_data_v6(self, group_name):
data = {
group_name: {
'standalone_network_plugin_gateway': '2001:db8::0001',
'standalone_network_plugin_mask': '88',
'standalone_network_plugin_segmentation_id': '3999',
'standalone_network_plugin_allowed_ip_ranges': (
'2001:db8::-2001:db8:0000:0000:0000:007f:ffff:ffff'),
'standalone_network_plugin_ip_version': 6,
},
}
with test_utils.create_temp_config_with_opts(data):
instance = plugin.StandaloneNetworkPlugin(
config_group_name=group_name)
self.assertEqual(6, instance.ip_version)
self.assertEqual('2001:db8::0001', instance.gateway)
self.assertEqual('88', instance.mask)
self.assertEqual('3999', instance.segmentation_id)
self.assertEqual(['2001:db8::/89'], instance.allowed_cidrs)
self.assertEqual(
['2001:db8::-2001:db8:0000:0000:0000:007f:ffff:ffff'],
instance.allowed_ip_ranges)
self.assertEqual(
netaddr.IPNetwork('2001:db8::0001/88'), instance.net)
self.assertEqual(
('2001:db8::', '2001:db8::0001', '2001:db8::ff:ffff:ffff'),
instance.reserved_addresses)
@ddt.data('custom_config_group_name', 'DEFAULT')
def test_invalid_init_without_any_config_definitions(self, group_name):
self.assertRaises(
exception.NetworkBadConfigurationException,
plugin.StandaloneNetworkPlugin,
config_group_name=group_name)
@ddt.data(
{},
{'gateway': '20.0.0.1'},
{'mask': '8'},
{'gateway': '20.0.0.1', 'mask': '33'},
{'gateway': '20.0.0.256', 'mask': '16'})
def test_invalid_init_required_data_improper(self, data):
group_name = 'custom_group_name'
if 'gateway' in data:
data['standalone_network_plugin_gateway'] = data.pop('gateway')
if 'mask' in data:
data['standalone_network_plugin_mask'] = data.pop('mask')
data = {group_name: data}
with test_utils.create_temp_config_with_opts(data):
self.assertRaises(
exception.NetworkBadConfigurationException,
plugin.StandaloneNetworkPlugin,
config_group_name=group_name)
@ddt.data(
'fake',
'11.0.0.0-11.0.0.5-11.0.0.11',
'11.0.0.0-11.0.0.5',
'10.0.10.0-10.0.10.5',
'10.0.0.0-10.0.0.5,fake',
'10.0.10.0-10.0.10.5,10.0.0.0-10.0.0.5',
'10.0.10.0-10.0.10.5,10.0.0.10-10.0.10.5',
'10.0.0.0-10.0.0.5,10.0.10.0-10.0.10.5')
def test_invalid_init_incorrect_allowed_ip_ranges_v4(self, ip_range):
group_name = 'DEFAULT'
data = {
group_name: {
'standalone_network_plugin_gateway': '10.0.0.1',
'standalone_network_plugin_mask': '255.255.255.0',
'standalone_network_plugin_allowed_ip_ranges': ip_range,
},
}
with test_utils.create_temp_config_with_opts(data):
self.assertRaises(
exception.NetworkBadConfigurationException,
plugin.StandaloneNetworkPlugin,
config_group_name=group_name)
@ddt.data(
{'gateway': '2001:db8::0001', 'vers': 4},
{'gateway': '10.0.0.1', 'vers': 6})
@ddt.unpack
def test_invalid_init_mismatch_of_versions(self, gateway, vers):
group_name = 'DEFAULT'
data = {
group_name: {
'standalone_network_plugin_gateway': gateway,
'standalone_network_plugin_ip_version': vers,
'standalone_network_plugin_mask': '25',
},
}
with test_utils.create_temp_config_with_opts(data):
self.assertRaises(
exception.NetworkBadConfigurationException,
plugin.StandaloneNetworkPlugin,
config_group_name=group_name)
def test_deallocate_network(self):
share_server_id = 'fake_share_server_id'
data = {
'DEFAULT': {
'standalone_network_plugin_gateway': '10.0.0.1',
'standalone_network_plugin_mask': '24',
},
}
fake_allocations = [{'id': 'fake1'}, {'id': 'fake2'}]
with test_utils.create_temp_config_with_opts(data):
instance = plugin.StandaloneNetworkPlugin()
self.mock_object(
instance.db, 'network_allocations_get_for_share_server',
mock.Mock(return_value=fake_allocations))
self.mock_object(instance.db, 'network_allocation_delete')
instance.deallocate_network(fake_context, share_server_id)
instance.db.network_allocations_get_for_share_server.\
assert_called_once_with(fake_context, share_server_id)
instance.db.network_allocation_delete.\
assert_has_calls([
mock.call(fake_context, 'fake1'),
mock.call(fake_context, 'fake2'),
])
def test_allocate_network_zero_addresses_ipv4(self):
data = {
'DEFAULT': {
'standalone_network_plugin_gateway': '10.0.0.1',
'standalone_network_plugin_mask': '24',
},
}
with test_utils.create_temp_config_with_opts(data):
instance = plugin.StandaloneNetworkPlugin()
self.mock_object(instance.db, 'share_network_update')
allocations = instance.allocate_network(
fake_context, fake_share_server, fake_share_network, count=0)
self.assertEqual([], allocations)
instance.db.share_network_update.assert_called_once_with(
fake_context, fake_share_network['id'],
dict(segmentation_id=None, cidr=instance.net.cidr, ip_version=4))
def test_allocate_network_zero_addresses_ipv6(self):
data = {
'DEFAULT': {
'standalone_network_plugin_gateway': '2001:db8::0001',
'standalone_network_plugin_mask': '64',
'standalone_network_plugin_ip_version': 6,
},
}
with test_utils.create_temp_config_with_opts(data):
instance = plugin.StandaloneNetworkPlugin()
self.mock_object(instance.db, 'share_network_update')
allocations = instance.allocate_network(
fake_context, fake_share_server, fake_share_network, count=0)
self.assertEqual([], allocations)
instance.db.share_network_update.assert_called_once_with(
fake_context, fake_share_network['id'],
dict(segmentation_id=None, cidr=instance.net.cidr, ip_version=6))
def test_allocate_network_one_ip_address_ipv4_no_usages_exist(self):
data = {
'DEFAULT': {
'standalone_network_plugin_gateway': '10.0.0.1',
'standalone_network_plugin_mask': '24',
},
}
with test_utils.create_temp_config_with_opts(data):
instance = plugin.StandaloneNetworkPlugin()
self.mock_object(instance.db, 'share_network_update')
self.mock_object(instance.db, 'network_allocation_create')
self.mock_object(
instance.db, 'network_allocations_get_by_ip_address',
mock.Mock(return_value=[]))
allocations = instance.allocate_network(
fake_context, fake_share_server, fake_share_network)
self.assertEqual(1, len(allocations))
instance.db.share_network_update.assert_called_once_with(
fake_context, fake_share_network['id'],
dict(segmentation_id=None, cidr=instance.net.cidr, ip_version=4))
instance.db.network_allocations_get_by_ip_address.assert_has_calls(
[mock.call(fake_context, '10.0.0.2')])
instance.db.network_allocation_create.assert_called_once_with(
fake_context,
dict(share_server_id=fake_share_server['id'],
ip_address='10.0.0.2', status=constants.STATUS_ACTIVE))
def test_allocate_network_two_ip_addresses_ipv4_two_usages_exist(self):
ctxt = type('FakeCtxt', (object,), {'fake': ['10.0.0.2', '10.0.0.4']})
def fake_get_allocations_by_ip_address(context, ip_address):
if ip_address not in context.fake:
context.fake.append(ip_address)
return []
else:
return context.fake
data = {
'DEFAULT': {
'standalone_network_plugin_gateway': '10.0.0.1',
'standalone_network_plugin_mask': '24',
},
}
with test_utils.create_temp_config_with_opts(data):
instance = plugin.StandaloneNetworkPlugin()
self.mock_object(instance.db, 'share_network_update')
self.mock_object(instance.db, 'network_allocation_create')
self.mock_object(
instance.db, 'network_allocations_get_by_ip_address',
mock.Mock(side_effect=fake_get_allocations_by_ip_address))
allocations = instance.allocate_network(
ctxt, fake_share_server, fake_share_network, count=2)
self.assertEqual(2, len(allocations))
instance.db.share_network_update.assert_called_once_with(
ctxt, fake_share_network['id'],
dict(segmentation_id=None, cidr=instance.net.cidr, ip_version=4))
instance.db.network_allocations_get_by_ip_address.assert_has_calls(
[mock.call(ctxt, '10.0.0.2'), mock.call(ctxt, '10.0.0.3'),
mock.call(ctxt, '10.0.0.4'), mock.call(ctxt, '10.0.0.5')])
instance.db.network_allocation_create.assert_has_calls([
mock.call(
ctxt,
dict(share_server_id=fake_share_server['id'],
ip_address='10.0.0.3', status=constants.STATUS_ACTIVE)),
mock.call(
ctxt,
dict(share_server_id=fake_share_server['id'],
ip_address='10.0.0.5', status=constants.STATUS_ACTIVE)),
])
def test_allocate_network_no_available_ipv4_addresses(self):
data = {
'DEFAULT': {
'standalone_network_plugin_gateway': '10.0.0.1',
'standalone_network_plugin_mask': '30',
},
}
with test_utils.create_temp_config_with_opts(data):
instance = plugin.StandaloneNetworkPlugin()
self.mock_object(instance.db, 'share_network_update')
self.mock_object(instance.db, 'network_allocation_create')
self.mock_object(
instance.db, 'network_allocations_get_by_ip_address',
mock.Mock(return_value=['not empty list']))
self.assertRaises(
exception.NetworkBadConfigurationException,
instance.allocate_network,
fake_context, fake_share_server, fake_share_network)
instance.db.share_network_update.assert_called_once_with(
fake_context, fake_share_network['id'],
dict(segmentation_id=None, cidr=instance.net.cidr, ip_version=4))
instance.db.network_allocations_get_by_ip_address.assert_has_calls(
[mock.call(fake_context, '10.0.0.2')])
|
jcsp/manila
|
manila/tests/network/test_standalone_network_plugin.py
|
Python
|
apache-2.0
| 16,379
|
import cmd
class HelloWorld(cmd.Cmd):
prompt = 'prompt: '
intro = "Simple command processor example."
doc_header = 'doc_header'
misc_header = 'misc_header'
undoc_header = 'undoc_header'
ruler = '-'
def do_prompt(self, line):
"Change the interactive prompt"
self.prompt = line + ': '
def do_EOF(self, line):
return True
if __name__ == '__main__':
HelloWorld().cmdloop()
|
jasonwee/asus-rt-n14uhp-mrtg
|
src/lesson_application_building_blocks/cmd_attributes.py
|
Python
|
apache-2.0
| 438
|
import sys
def fix(element):
if element and element[0] == '<':
new = '<'
for c in element[1:-1]:
if c == '<':
new += '<'
elif c == '>':
new += '>'
else:
new += c
new += '>'
return new
else:
return element
for line in open(sys.argv[1]):
cols = line.strip().split('\t')
if len(cols) != 4 or cols[3] != '.':
sys.err.write('Ignoring malformed line: ' + line)
else:
# for now only touch subject for efficiency reasons.
cols[0] = fix(cols[0])
print '\t'.join(cols)
|
Buchhold/QLever
|
misc/fix_nt_file.py
|
Python
|
apache-2.0
| 554
|
import synapse.common as s_common
import synapse.cryotank as s_cryotank
import synapse.lib.const as s_const
import synapse.tests.utils as s_t_utils
from synapse.tests.utils import alist
logger = s_cryotank.logger
cryodata = (('foo', {'bar': 10}), ('baz', {'faz': 20}))
class CryoTest(s_t_utils.SynTest):
async def test_cryo_cell_async(self):
async with self.getTestCryo() as cryo:
async with cryo.getLocalProxy() as prox:
self.true(await prox.init('foo'))
self.eq([], await alist(prox.rows('foo', 0, 1)))
async def test_cryo_cell(self):
with self.getTestDir() as dirn:
async with self.getTestCryoAndProxy(dirn=dirn) as (cryo, prox):
self.eq((), await prox.list())
self.true(await prox.init('foo'))
self.eq('foo', (await prox.list())[0][0])
self.none(await prox.last('foo'))
self.eq([], await alist(prox.rows('foo', 0, 1)))
self.true(await prox.puts('foo', cryodata))
info = await prox.list()
self.eq('foo', info[0][0])
self.eq(2, info[0][1].get('stat').get('entries'))
self.true(await prox.puts('foo', cryodata))
items = await alist(prox.slice('foo', 1, 3))
self.eq(items[0][1][0], 'baz')
metrics = await alist(prox.metrics('foo', 0, 9999))
self.len(2, metrics)
self.eq(2, metrics[0][1]['count'])
self.eq(3, (await prox.last('foo'))[0])
self.eq('baz', (await prox.last('foo'))[1][0])
iden = s_common.guid()
self.eq(0, await prox.offset('foo', iden))
items = await alist(prox.slice('foo', 0, 1000, iden=iden))
self.eq(0, await prox.offset('foo', iden))
items = await alist(prox.slice('foo', 4, 1000, iden=iden))
self.eq(4, await prox.offset('foo', iden))
# test the direct tank share....
async with cryo.getLocalProxy(share='cryotank/foo') as lprox:
items = await alist(lprox.slice(1, 3))
self.eq(items[0][1][0], 'baz')
self.len(4, await alist(lprox.slice(0, 9999)))
await lprox.puts(cryodata)
self.len(6, await alist(lprox.slice(0, 9999)))
# test offset storage and updating
iden = s_common.guid()
self.eq(0, await lprox.offset(iden))
self.eq(2, await lprox.puts(cryodata, seqn=(iden, 0)))
self.eq(2, await lprox.offset(iden))
# test the new open share
async with cryo.getLocalProxy(share='cryotank/lulz') as lprox:
self.len(0, await alist(lprox.slice(0, 9999)))
await lprox.puts(cryodata)
self.len(2, await alist(lprox.slice(0, 9999)))
self.len(1, await alist(lprox.metrics(0)))
# Delete apis
self.false(await prox.delete('newp'))
self.true(await prox.delete('lulz'))
# Re-open the tank and ensure that the deleted tank is not present.
async with self.getTestCryoAndProxy(dirn=dirn) as (cryo, prox):
tanks = await prox.list()
self.len(1, tanks)
self.eq('foo', tanks[0][0])
async def test_cryo_init(self):
with self.getTestDir() as dirn:
async with self.getTestCryo(dirn) as cryo:
# test passing conf data in through init directly
tank = await cryo.init('conftest', conf={'map_size': s_const.mebibyte * 64})
self.eq(tank.slab.mapsize, s_const.mebibyte * 64)
_, conf = await cryo.hive.get(('cryo', 'names', 'conftest'))
self.eq(conf, {'map_size': s_const.mebibyte * 64})
# And the data was persisted
async with self.getTestCryo(dirn) as cryo:
tank = cryo.tanks.get('conftest')
self.eq(tank.slab.mapsize, s_const.mebibyte * 64)
_, conf = await cryo.hive.get(('cryo', 'names', 'conftest'))
self.eq(conf, {'map_size': s_const.mebibyte * 64})
|
vertexproject/synapse
|
synapse/tests/test_cryotank.py
|
Python
|
apache-2.0
| 4,379
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import static_lsp
class static_lsps(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/mpls/lsps/static-lsps. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: statically configured LSPs, without dynamic
signaling
"""
__slots__ = ("_path_helper", "_extmethods", "__static_lsp")
_yang_name = "static-lsps"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__static_lsp = YANGDynClass(
base=YANGListType(
"name",
static_lsp.static_lsp,
yang_name="static-lsp",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="name",
extensions=None,
),
is_container="list",
yang_name="static-lsp",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances", "network-instance", "mpls", "lsps", "static-lsps"
]
def _get_static_lsp(self):
"""
Getter method for static_lsp, mapped from YANG variable /network_instances/network_instance/mpls/lsps/static_lsps/static_lsp (list)
YANG Description: list of defined static LSPs
"""
return self.__static_lsp
def _set_static_lsp(self, v, load=False):
"""
Setter method for static_lsp, mapped from YANG variable /network_instances/network_instance/mpls/lsps/static_lsps/static_lsp (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_static_lsp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_static_lsp() directly.
YANG Description: list of defined static LSPs
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
"name",
static_lsp.static_lsp,
yang_name="static-lsp",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="name",
extensions=None,
),
is_container="list",
yang_name="static-lsp",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """static_lsp must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType("name",static_lsp.static_lsp, yang_name="static-lsp", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions=None), is_container='list', yang_name="static-lsp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=True)""",
}
)
self.__static_lsp = t
if hasattr(self, "_set"):
self._set()
def _unset_static_lsp(self):
self.__static_lsp = YANGDynClass(
base=YANGListType(
"name",
static_lsp.static_lsp,
yang_name="static-lsp",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="name",
extensions=None,
),
is_container="list",
yang_name="static-lsp",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
static_lsp = __builtin__.property(_get_static_lsp, _set_static_lsp)
_pyangbind_elements = OrderedDict([("static_lsp", static_lsp)])
from . import static_lsp
class static_lsps(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/mpls/lsps/static-lsps. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: statically configured LSPs, without dynamic
signaling
"""
__slots__ = ("_path_helper", "_extmethods", "__static_lsp")
_yang_name = "static-lsps"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__static_lsp = YANGDynClass(
base=YANGListType(
"name",
static_lsp.static_lsp,
yang_name="static-lsp",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="name",
extensions=None,
),
is_container="list",
yang_name="static-lsp",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances", "network-instance", "mpls", "lsps", "static-lsps"
]
def _get_static_lsp(self):
"""
Getter method for static_lsp, mapped from YANG variable /network_instances/network_instance/mpls/lsps/static_lsps/static_lsp (list)
YANG Description: list of defined static LSPs
"""
return self.__static_lsp
def _set_static_lsp(self, v, load=False):
"""
Setter method for static_lsp, mapped from YANG variable /network_instances/network_instance/mpls/lsps/static_lsps/static_lsp (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_static_lsp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_static_lsp() directly.
YANG Description: list of defined static LSPs
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
"name",
static_lsp.static_lsp,
yang_name="static-lsp",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="name",
extensions=None,
),
is_container="list",
yang_name="static-lsp",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """static_lsp must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType("name",static_lsp.static_lsp, yang_name="static-lsp", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions=None), is_container='list', yang_name="static-lsp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=True)""",
}
)
self.__static_lsp = t
if hasattr(self, "_set"):
self._set()
def _unset_static_lsp(self):
self.__static_lsp = YANGDynClass(
base=YANGListType(
"name",
static_lsp.static_lsp,
yang_name="static-lsp",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="name",
extensions=None,
),
is_container="list",
yang_name="static-lsp",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
static_lsp = __builtin__.property(_get_static_lsp, _set_static_lsp)
_pyangbind_elements = OrderedDict([("static_lsp", static_lsp)])
|
napalm-automation/napalm-yang
|
napalm_yang/models/openconfig/network_instances/network_instance/mpls/lsps/static_lsps/__init__.py
|
Python
|
apache-2.0
| 13,426
|
import numpy as np
import pandas
import scipy, scipy.spatial
import sklearn
import sys
from sklearn import linear_model
from sklearn.metrics import precision_score, recall_score, f1_score
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('train', help='Training Data')
parser.add_argument('labels', help='Training Labels')
parser.add_argument('test', help='Test Data')
parser.add_argument('data_cv', help='Data for CrossValidation')
parser.add_argument('label_cv', help='Labels for CrossValidation')
parser.add_argument('plab', type=int, help='The class to be predicted')
parser.add_argument('cost', type=float, help='The cost variable (C)')
parser.add_argument('out', help='Output file name')
args = parser.parse_args()
y_all = pandas.read_table(args.labels, header=None, sep=' ')
print(y_all.head())
ndim = pandas.read_table(args.train, sep=' ', header=None, nrows=3).shape[1]
featstat = pandas.read_csv('data/feat_stats.csv')
print(featstat.head())
# ## Logistic Regression
clf = linear_model.LogisticRegression(penalty='l2', dual=False, tol=0.00001, C=args.cost, \
fit_intercept=True, intercept_scaling=1, class_weight=None, \
random_state=None, solver='liblinear', max_iter=10000)
y = np.empty(shape=y_all.shape[0], dtype=int)
ic = args.plab
y[np.where(y_all[0] != ic)[0]] = -1
y[np.where(y_all[0] == ic)[0]] = 1
print('Training set: %d Pos: %d Neg: %d'%(y.shape[0], np.sum(y==1), np.sum(y==-1)))
chunks=500000
for i in range(1):
sys.stdout.write('%d '%(i))
n = 0
for df in pandas.read_table(args.train, sep=' ', header=None, iterator=True, chunksize=chunks):
n0, n1 = n*chunks, (n+1)*chunks
if n1 > y.shape[0]:
n1 = y.shape[0] - n0
ysub = y[n0:n1]
#sys.stdout.write('%d (%d-%d) %d\t'%(n, n0, n1, ysub.shape[0]))
df = (df - featstat['mean']) / featstat['sigma']
clf.fit(df, ysub)
n += 1
break
### Reading cross-validation set
Xcv = pandas.read_table(args.data_cv, sep=' ', header=None)
print(ic, Xcv.shape)
ycv = pandas.read_table(args.label_cv, sep=' ', header=None)[0].values
ycv[np.where(ycv != ic)[0]] = -1
ycv[np.where(ycv == ic)[0]] = 1
print('CrossValidation %d %d for label=%d ==>\tPos: %d Neg: %d' \
%(Xcv.shape[0], ycv.shape[0], ic, np.sum(ycv == 1), np.sum(ycv == -1)))
ypred_cv = clf.predict(Xcv)
prec = precision_score(ycv, ypred_cv)
rec = recall_score(ycv, ypred_cv)
f1score = f1_score(ycv, ypred_cv)
print('Precision=%.3f Recall=%.3f F1Score=%.3f'%(prec, rec, f1score))
print('CrossVal: ==> TP+FP=%d \t TP+FN=%d'%(np.sum(ypred_cv == 1), np.sum(ycv == 1)))
n = 0
for Xtest in pandas.read_table(args.test, sep=' ', header=None, iterator=True, chunksize=10000):
ypred = clf.predict(Xtest)
print('TestSet part %d ==> pos-predicted=%d '%(n, np.sum(ypred == 1)))
if n==0:
mode='w'
else:
mode = 'a'
pandas.DataFrame({'pred':ypred}).to_csv(args.out, mode=mode, header='%.3f %.3f %.3f'%(prec, rec, f1score))
n += 1
if __name__ == '__main__':
main()
|
mirjalil/ml-visual-recognition
|
codes/logisticRegression.py
|
Python
|
apache-2.0
| 3,262
|
#!/usr/bin/env python3
class Employee:
num_of_emps = 0
raise_amount = 1.04
def __init__(self,first,last):
self.first = first
self.last = last
self.email = first + '.' + last + '@kellynoah.com'
def fullname(self):
return '{} {}'.format(self.first,self.last)
def apply_raise(self):
self.pay = int(self.pay * self.raise_amount)
def __repr__(self):
return "Employee('{}', '{}', '{}')".format(self.first, self.last, self.pay)
def __str__(self):
return '{} - {}'.format(self.fullname(), self.email)
def __add__(self, other):
return self.pay + other.pay
def __len__(self):
return len(self.fullname())
emp_1 = Employee('John', 'Smith')
print(emp_1.first)
print(emp_1.email)
print(emp_1.fullname())
|
lmascare/utils
|
python/tutorials/oop6a.py
|
Python
|
artistic-2.0
| 811
|
import numpy as np
from scipy import sparse
from . import Mapper
from . import samplers
class VolumeMapper(Mapper):
@classmethod
def _cache(cls, filename, subject, xfmname, **kwargs):
from .. import db
masks = []
xfm = db.get_xfm(subject, xfmname, xfmtype='coord')
pia = db.get_surf(subject, "pia", merge=False, nudge=False)
wm = db.get_surf(subject, "wm", merge=False, nudge=False)
#iterate over hemispheres
for (wpts, polys), (ppts, _) in zip(pia, wm):
masks.append(cls._getmask(xfm(ppts), xfm(wpts), polys, xfm.shape, **kwargs))
_savecache(filename, masks[0], masks[1], xfm.shape)
return cls(masks[0], masks[1], xfm.shape, subject, xfmname)
@classmethod
def _getmask(cls, pia, wm, polys, shape, **kwargs):
from .. import mp
rand = np.random.rand(npts, 3)
csrshape = len(wm), np.prod(shape)
def func(pts):
if len(pts) > 0:
#generate points within the bounding box
samples = rand * (pts.max(0) - pts.min(0)) + pts.min(0)
#check which points are inside the polyhedron
inside = polyutils.inside_convex_poly(pts)(samples)
return cls._sample(samples[inside], shape, np.sum(inside))
surf = polyutils.Surface(pia, polys)
samples = mp.map(func, surf.polyconvex(wm))
#samples = map(func, surf.polyconvex(wm)) ## For debugging
ij, data = [], []
for i, sample in enumerate(samples):
if sample is not None:
idx = np.zeros((2, len(sample[0])))
idx[0], idx[1] = i, sample[0]
ij.append(idx)
data.append(sample[1])
return sparse.csr_matrix((np.hstack(data), np.hstack(ij)), shape=csrshape)
class PolyConstMapper(VolumeMapper):
patchsize = 0.5
class PolyLinMapper(VolumeMapper):
patchsize = 1
class Polyhedral(VolumeMapper):
'''Uses an actual (likely concave) polyhedra betwen the pial and white surfaces
to estimate the thickness'''
@staticmethod
def _getmask(pia, wm, polys, shape):
from .. import polyutils
mask = sparse.csr_matrix((len(wm), np.prod(shape)))
from tvtk.api import tvtk
measure = tvtk.MassProperties()
planes = tvtk.PlaneCollection()
for norm in np.vstack([-np.eye(3), np.eye(3)]):
planes.append(tvtk.Plane(normal=norm))
ccs = tvtk.ClipClosedSurface(clipping_planes=planes)
feats = tvtk.FeatureEdges(boundary_edges=1, non_manifold_edges=0, manifold_edges=0, feature_edges=0)
feats.set_input(ccs.output)
surf = polyutils.Surface(pia, polys)
for i, (pts, faces) in enumerate(surf.polyhedra(wm)):
if len(pts) > 0:
poly = tvtk.PolyData(points=pts, polys=faces)
measure.set_input(poly)
measure.update()
totalvol = measure.volume
ccs.set_input(poly)
measure.set_input(ccs.output)
bmin = pts.min(0).round().astype(int)
bmax = (pts.max(0).round() + 1).astype(int)
vidx = np.mgrid[bmin[0]:bmax[0], bmin[1]:bmax[1], bmin[2]:bmax[2]]
for vox in vidx.reshape(3, -1).T:
try:
idx = np.ravel_multi_index(vox[::-1], shape)
for plane, m in zip(planes, [.5, .5, .5, -.5, -.5, -.5]):
plane.origin = vox+m
ccs.update()
if ccs.output.number_of_cells > 2:
measure.update()
mask[i, idx] = measure.volume
except ValueError:
print('Voxel not in volume: (%d, %d, %d)'%tuple(vox))
mask.data[mask.indptr[i]:mask.indptr[i+1]] /= mask[i].sum()
return mask
class ConvexPolyhedra(VolumeMapper):
@classmethod
def _getmask(cls, pia, wm, polys, shape, npts=1024):
from .. import mp
from .. import polyutils
rand = np.random.rand(npts, 3)
csrshape = len(wm), np.prod(shape)
def func(pts):
if len(pts) > 0:
#generate points within the bounding box
samples = rand * (pts.max(0) - pts.min(0)) + pts.min(0)
#check which points are inside the polyhedron
inside = polyutils.inside_convex_poly(pts)(samples)
return cls._sample(samples[inside], shape, np.sum(inside))
surf = polyutils.Surface(pia, polys)
samples = mp.map(func, surf.polyconvex(wm))
#samples = map(func, surf.polyconvex(wm)) ## For debugging
ij, data = [], []
for i, sample in enumerate(samples):
if sample is not None:
idx = np.zeros((2, len(sample[0])))
idx[0], idx[1] = i, sample[0]
ij.append(idx)
data.append(sample[1])
return sparse.csr_matrix((np.hstack(data), np.hstack(ij)), shape=csrshape)
class ConvexNN(VolumeMapper):
@staticmethod
def _sample(pts, shape, norm):
coords = pts.round().astype(int)[:,::-1]
d1 = np.logical_and(0 <= coords[:,0], coords[:,0] < shape[0])
d2 = np.logical_and(0 <= coords[:,1], coords[:,1] < shape[1])
d3 = np.logical_and(0 <= coords[:,2], coords[:,2] < shape[2])
valid = np.logical_and(d1, np.logical_and(d2, d3))
if valid.any():
idx = np.ravel_multi_index(coords[valid].T, shape)
j, data = np.array(Counter(idx).items()).T
return j, data / float(norm)
class ConvexTrilin(VolumeMapper):
@staticmethod
def _sample(pts, shape, norm):
(x, y, z), floor = np.modf(pts.T)
floor = floor.astype(int)
ceil = floor + 1
x[x < 0] = 0
y[y < 0] = 0
z[z < 0] = 0
i000 = np.ravel_multi_index((floor[2], floor[1], floor[0]), shape, mode='clip')
i100 = np.ravel_multi_index((floor[2], floor[1], ceil[0]), shape, mode='clip')
i010 = np.ravel_multi_index((floor[2], ceil[1], floor[0]), shape, mode='clip')
i001 = np.ravel_multi_index(( ceil[2], floor[1], floor[0]), shape, mode='clip')
i101 = np.ravel_multi_index(( ceil[2], floor[1], ceil[0]), shape, mode='clip')
i011 = np.ravel_multi_index(( ceil[2], ceil[1], floor[0]), shape, mode='clip')
i110 = np.ravel_multi_index((floor[2], ceil[1], ceil[0]), shape, mode='clip')
i111 = np.ravel_multi_index(( ceil[2], ceil[1], ceil[0]), shape, mode='clip')
v000 = (1-x)*(1-y)*(1-z)
v100 = x*(1-y)*(1-z)
v010 = (1-x)*y*(1-z)
v110 = x*y*(1-z)
v001 = (1-x)*(1-y)*z
v101 = x*(1-y)*z
v011 = (1-x)*y*z
v111 = x*y*z
allj = np.vstack([i000, i100, i010, i001, i101, i011, i110, i111]).T.ravel()
data = np.vstack([v000, v100, v010, v001, v101, v011, v110, v111]).T.ravel()
uniquej = np.unique(allj)
uniquejdata = np.array([data[allj==j].sum() for j in uniquej])
return uniquej, uniquejdata / float(norm)
class ConvexLanczos(VolumeMapper):
def _sample(self, pts):
raise NotImplementedError
|
gallantlab/pycortex
|
cortex/mapper/volume.py
|
Python
|
bsd-2-clause
| 7,345
|
"""
pygments.lexers.other
~~~~~~~~~~~~~~~~~~~~~
Just export lexer classes previously contained in this module.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexers.sql import SqlLexer, MySqlLexer, SqliteConsoleLexer
from pygments.lexers.shell import BashLexer, BashSessionLexer, BatchLexer, \
TcshLexer
from pygments.lexers.robotframework import RobotFrameworkLexer
from pygments.lexers.testing import GherkinLexer
from pygments.lexers.esoteric import BrainfuckLexer, BefungeLexer, RedcodeLexer
from pygments.lexers.prolog import LogtalkLexer
from pygments.lexers.snobol import SnobolLexer
from pygments.lexers.rebol import RebolLexer
from pygments.lexers.configs import KconfigLexer, Cfengine3Lexer
from pygments.lexers.modeling import ModelicaLexer
from pygments.lexers.scripting import AppleScriptLexer, MOOCodeLexer, \
HybrisLexer
from pygments.lexers.graphics import PostScriptLexer, GnuplotLexer, \
AsymptoteLexer, PovrayLexer
from pygments.lexers.business import ABAPLexer, OpenEdgeLexer, \
GoodDataCLLexer, MaqlLexer
from pygments.lexers.automation import AutoItLexer, AutohotkeyLexer
from pygments.lexers.dsls import ProtoBufLexer, BroLexer, PuppetLexer, \
MscgenLexer, VGLLexer
from pygments.lexers.basic import CbmBasicV2Lexer
from pygments.lexers.pawn import SourcePawnLexer, PawnLexer
from pygments.lexers.ecl import ECLLexer
from pygments.lexers.urbi import UrbiscriptLexer
from pygments.lexers.smalltalk import SmalltalkLexer, NewspeakLexer
from pygments.lexers.installers import NSISLexer, RPMSpecLexer
from pygments.lexers.textedit import AwkLexer
from pygments.lexers.smv import NuSMVLexer
__all__ = []
|
dscorbett/pygments
|
pygments/lexers/other.py
|
Python
|
bsd-2-clause
| 1,744
|
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('simple09.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
# Test data out of range. These should be ignored.
worksheet.write('A0', 'foo')
worksheet.write(-1, -1, 'foo')
worksheet.write(0, -1, 'foo')
worksheet.write(-1, 0, 'foo')
worksheet.write(1048576, 0, 'foo')
worksheet.write(0, 16384, 'foo')
workbook.close()
self.assertExcelEqual()
|
jmcnamara/XlsxWriter
|
xlsxwriter/test/comparison/test_simple09.py
|
Python
|
bsd-2-clause
| 1,048
|
from UtmpHead import UtmpHead
import Config
from Log import Log
class Login:
def __eq__(self, other):
if (other == None):
return (self._loginid == 0)
return (self._loginid == other._loginid)
def __ne__(self, other):
return not self.__eq__(other)
def __init__(self, loginid):
self._loginid = loginid
def get_loginid(self):
return self._loginid
def get_userid(self):
from Utmp import Utmp
return Utmp.GetUserId(self._loginid - 1)
# UtmpHead.LIST
@staticmethod
def list_head():
listhead = UtmpHead.GetListHead()
return Login(listhead)
def list_next(self):
return Login(UtmpHead.GetListNext(self._loginid - 1))
def list_prev(self):
return Login(UtmpHead.GetListPrev(self._loginid - 1))
def set_listnext(self, listnext):
return UtmpHead.SetListNext(self._loginid - 1, listnext._loginid)
def set_listprev(self, listprev):
return UtmpHead.SetListPrev(self._loginid - 1, listprev._loginid)
def list_remove(self):
if (Login.list_head() == self):
UtmpHead.SetListHead(self.list_next()._loginid)
self.list_prev().set_listnext(self.list_next())
self.list_next().set_listprev(self.list_prev())
def list_add(self, userid = None):
if (userid == None):
userid = self.get_userid()
if (userid == None or userid == ''):
raise Exception("illegal call to list_add")
node = Login.list_head()
if (node == None):
# empty list -> single element
self.set_listprev(self)
self.set_listnext(self)
UtmpHead.SetListHead(self._loginid)
return True
if (node.get_userid().lower() >= userid.lower()):
# insert at head
self.set_listprev(node.list_prev())
self.set_listnext(node)
node.set_listprev(self)
self.list_prev().set_listnext(self)
UtmpHead.SetListHead(self._loginid)
return True
count = 0
node = node.list_next()
while ((node.get_userid().lower() < userid.lower()) and (node != Login.list_head())):
node = node.list_next()
count += 1
if (count > Config.USHM_SIZE):
UtmpHead.SetListHead(0)
from Utmp import Utmp
Utmp.RebuildList()
return False
self.set_listprev(node.list_prev())
self.set_listnext(node)
node.set_listprev(self)
self.list_prev().set_listnext(self)
return True
# UtmpHead.HASH
@staticmethod
def hash_head(userid):
from Utmp import Utmp
hashkey = Utmp.Hash(userid)
hashhead = UtmpHead.GetHashHead(hashkey)
return hashkey, Login(hashhead)
def set_hashnext(self, hashnext):
UtmpHead.SetNext(self._loginid - 1, hashnext._loginid)
def hash_next(self):
nextid = UtmpHead.GetNext(self._loginid - 1)
return Login(nextid)
def hash_remove(self, userid = None): # userid: for debugging
if (userid == None):
from Utmp import Utmp
userid = Utmp.GetUserId(self._loginid - 1)
hashkey, pos = Login.hash_head(userid)
if (pos == None):
Log.error("Login.hash_remove: hash list empty!")
return False
if (pos == self):
UtmpHead.SetHashHead(hashkey, self.hash_next()._loginid)
else:
while (pos.hash_next() != None and pos.hash_next() != self):
pos = pos.hash_next()
if (pos.hash_next() == None):
Log.error("Login.hash_remove: can't find in hash list")
return False
else:
pos.set_hashnext(self.hash_next())
# add to free list
self.set_hashnext(Login.free_list())
Login.set_freelist(self)
return True
def hash_add(self, userid = None):
if (userid == None):
userid = self.get_userid()
if (userid == None or userid == ''):
raise Exception("illegal call to hash_add")
# remove from free list
Login.set_freelist(self.hash_next())
hashkey, node = Login.hash_head(userid)
self.set_hashnext(node)
UtmpHead.SetHashHead(hashkey, self._loginid)
@staticmethod
def free_list():
hashhead = UtmpHead.GetHashHead(0)
return Login(hashhead)
@staticmethod
def set_freelist(login):
UtmpHead.SetHashHead(0, login._loginid)
|
HenryHu/pybbs
|
Login.py
|
Python
|
bsd-2-clause
| 4,609
|
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import logging
from indra.java_vm import autoclass, JavaException
logger = logging.getLogger('reach_reader')
class ReachReader(object):
"""The ReachReader wraps a singleton instance of the REACH reader.
This allows calling the reader many times without having to wait for it to
start up each time.
Attributes
----------
api_ruler : org.clulab.reach.apis.ApiRuler
An instance of the REACH ApiRuler class (java object).
"""
def __init__(self):
self.api_ruler = None
def get_api_ruler(self):
"""Return the existing reader if it exists or launch a new one.
Returns
-------
api_ruler : org.clulab.reach.apis.ApiRuler
An instance of the REACH ApiRuler class (java object).
"""
if self.api_ruler is None:
try:
self.api_ruler = \
autoclass('org.clulab.reach.export.apis.ApiRuler')
except JavaException:
# This second autoclass is needed because of a jnius
# issue in which the first JavaException is not raised.
try:
autoclass('java.lang.String')
except JavaException as e:
logger.error(e)
pass
return None
return self.api_ruler
|
jmuhlich/indra
|
indra/reach/reach_reader.py
|
Python
|
bsd-2-clause
| 1,451
|
# -*- coding: UTF-8 -*-
# Copyright 2016 Luc Saffre
# License: BSD (see file COPYING for details)
from lino.api import dd, _
class PartnerEvents(dd.ChoiceList):
verbose_name = _("Observed event")
verbose_name_plural = _("Observed events")
max_length = 50
|
khchine5/xl
|
lino_xl/lib/contacts/choicelists.py
|
Python
|
bsd-2-clause
| 272
|
# -*- coding: utf-8 -*-
"""Remove team domain
Revision ID: 07f975f81f03
Revises: 4e206c5ddabd
Create Date: 2017-08-04 15:12:11.992856
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '07f975f81f03'
down_revision = '4e206c5ddabd'
branch_labels = None
depends_on = None
def upgrade():
op.drop_index('ix_team_domain', table_name='team')
op.drop_column('team', 'domain')
def downgrade():
op.add_column(
'team',
sa.Column('domain', sa.VARCHAR(length=253), autoincrement=False, nullable=True),
)
op.create_index('ix_team_domain', 'team', ['domain'], unique=False)
|
hasgeek/lastuser
|
migrations/versions/07f975f81f03_remove_team_domain.py
|
Python
|
bsd-2-clause
| 655
|
# -*- coding: utf-8 -*-
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import HttpResponseRedirect, Http404
from django.core.urlresolvers import reverse
from StackSmash.apps.uploader.models import Document
from StackSmash.apps.uploader.forms import DocumentForm
def list(request):
# Make sure the user is authenticated and able to modify the blog
if not request.user.is_superuser:
raise Http404
# Handle file upload
if request.method == 'POST':
form = DocumentForm(request.POST, request.FILES)
if form.is_valid():
newdoc = Document(docfile=request.FILES['docfile'])
newdoc.save()
# Redirect to the document list after POST
return HttpResponseRedirect(reverse('upload:list'))
else:
form = DocumentForm() # A empty, unbound form
# Load documents for the list page
documents = Document.objects.all()
# Render list page with the documents and the form
return render_to_response(
'uploader/list.html',
{'documents': documents, 'form': form},
context_instance=RequestContext(request)
)
def delete(request, pk):
# Make sure the user is authenticated and able to modify the blog
if not request.user.is_superuser:
raise Http404
Document.objects.filter(pk=pk).delete()
return HttpResponseRedirect(reverse('list'))
|
Justasic/StackSmash
|
StackSmash/apps/uploader/views.py
|
Python
|
bsd-2-clause
| 1,444
|