code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# (C) British Crown Copyright 2013 - 2018, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
"""
Tests for Robinson projection.
"""
from __future__ import (absolute_import, division, print_function)
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
import pytest
import cartopy.crs as ccrs
from .helpers import check_proj_params
_CRS_PC = ccrs.PlateCarree()
_CRS_ROB = ccrs.Robinson()
# Increase tolerance if using older proj releases
_TOL = -1 if ccrs.PROJ4_VERSION < (4, 9) else 7
_LIMIT_TOL = -1 # if ccrs.PROJ4_VERSION < (5, 2, 0) else 7
def test_default():
robin = ccrs.Robinson()
other_args = {'a=6378137.0', 'lon_0=0'}
check_proj_params('robin', robin, other_args)
assert_almost_equal(robin.x_limits,
[-17005833.3305252, 17005833.3305252])
assert_almost_equal(robin.y_limits,
[-8625154.6651000, 8625154.6651000], _LIMIT_TOL)
def test_sphere_globe():
globe = ccrs.Globe(semimajor_axis=1000, ellipse=None)
robin = ccrs.Robinson(globe=globe)
other_args = {'a=1000', 'lon_0=0'}
check_proj_params('robin', robin, other_args)
assert_almost_equal(robin.x_limits, [-2666.2696851, 2666.2696851])
assert_almost_equal(robin.y_limits, [-1352.3000000, 1352.3000000],
_LIMIT_TOL)
def test_ellipse_globe():
globe = ccrs.Globe(ellipse='WGS84')
with pytest.warns(UserWarning,
match='does not handle elliptical globes.') as w:
robin = ccrs.Robinson(globe=globe)
assert len(w) == 1
other_args = {'ellps=WGS84', 'lon_0=0'}
check_proj_params('robin', robin, other_args)
# Limits are the same as default since ellipses are not supported.
assert_almost_equal(robin.x_limits, [-17005833.3305252, 17005833.3305252])
assert_almost_equal(robin.y_limits, [-8625154.6651000, 8625154.6651000],
_LIMIT_TOL)
def test_eccentric_globe():
globe = ccrs.Globe(semimajor_axis=1000, semiminor_axis=500,
ellipse=None)
with pytest.warns(UserWarning,
match='does not handle elliptical globes.') as w:
robin = ccrs.Robinson(globe=globe)
assert len(w) == 1
other_args = {'a=1000', 'b=500', 'lon_0=0'}
check_proj_params('robin', robin, other_args)
# Limits are the same as spheres since ellipses are not supported.
assert_almost_equal(robin.x_limits, [-2666.2696851, 2666.2696851])
assert_almost_equal(robin.y_limits, [-1352.3000000, 1352.3000000],
_LIMIT_TOL)
def test_offset():
crs = ccrs.Robinson()
crs_offset = ccrs.Robinson(false_easting=1234, false_northing=-4321)
other_args = {'a=6378137.0', 'lon_0=0', 'x_0=1234', 'y_0=-4321'}
check_proj_params('robin', crs_offset, other_args)
assert tuple(np.array(crs.x_limits) + 1234) == crs_offset.x_limits
assert tuple(np.array(crs.y_limits) - 4321) == crs_offset.y_limits
@pytest.mark.parametrize('lon', [-10.0, 10.0])
def test_central_longitude(lon):
robin = ccrs.Robinson(central_longitude=lon)
other_args = {'a=6378137.0', 'lon_0={}'.format(lon)}
check_proj_params('robin', robin, other_args)
assert_almost_equal(robin.x_limits,
[-17005833.3305252, 17005833.3305252],
decimal=5)
assert_almost_equal(robin.y_limits,
[-8625154.6651000, 8625154.6651000], _LIMIT_TOL)
def test_transform_point():
"""
Mostly tests the workaround for a specific problem.
Problem report in: https://github.com/SciTools/cartopy/issues/23
Fix covered in: https://github.com/SciTools/cartopy/pull/277
"""
# this way has always worked
result = _CRS_ROB.transform_point(35.0, 70.0, _CRS_PC)
assert_array_almost_equal(result, (2376187.27182751, 7275317.81573085),
_TOL)
# this always did something, but result has altered
result = _CRS_ROB.transform_point(np.nan, 70.0, _CRS_PC)
assert np.all(np.isnan(result))
# this used to crash + is now fixed
result = _CRS_ROB.transform_point(35.0, np.nan, _CRS_PC)
assert np.all(np.isnan(result))
def test_transform_points():
"""
Mostly tests the workaround for a specific problem.
Problem report in: https://github.com/SciTools/cartopy/issues/23
Fix covered in: https://github.com/SciTools/cartopy/pull/277
"""
# these always worked
result = _CRS_ROB.transform_points(_CRS_PC,
np.array([35.0]),
np.array([70.0]))
assert_array_almost_equal(result,
[[2376187.27182751, 7275317.81573085, 0]], _TOL)
result = _CRS_ROB.transform_points(_CRS_PC,
np.array([35.0]),
np.array([70.0]),
np.array([0.0]))
assert_array_almost_equal(result,
[[2376187.27182751, 7275317.81573085, 0]], _TOL)
# this always did something, but result has altered
result = _CRS_ROB.transform_points(_CRS_PC,
np.array([np.nan]),
np.array([70.0]))
assert np.all(np.isnan(result))
# this used to crash + is now fixed
result = _CRS_ROB.transform_points(_CRS_PC,
np.array([35.0]),
np.array([np.nan]))
assert np.all(np.isnan(result))
# multipoint case
x = np.array([10.0, 21.0, 0.0, 77.7, np.nan, 0.0])
y = np.array([10.0, np.nan, 10.0, 77.7, 55.5, 0.0])
z = np.array([10.0, 0.0, 0.0, np.nan, 55.5, 0.0])
expect_result = np.array(
[[9.40422591e+05, 1.06952091e+06, 1.00000000e+01],
[11.1, 11.2, 11.3],
[0.0, 1069520.91213902, 0.0],
[22.1, 22.2, 22.3],
[33.1, 33.2, 33.3],
[0.0, 0.0, 0.0]])
result = _CRS_ROB.transform_points(_CRS_PC, x, y, z)
assert result.shape == (6, 3)
assert np.all(np.isnan(result[[1, 3, 4], :]))
result[[1, 3, 4], :] = expect_result[[1, 3, 4], :]
assert not np.any(np.isnan(result))
assert np.allclose(result, expect_result)
|
pelson/cartopy
|
lib/cartopy/tests/crs/test_robinson.py
|
Python
|
lgpl-3.0
| 6,942
|
# This programs calculates the L2 error for a given velocity file
#
# Usage : python L2ErrorUCouette.py Velocity file
#
# Author : Bruno Blais
# Last modified : December 3rd
#Python imports
import os
import math
import numpy
import matplotlib.pyplot as plt
import sys
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
#***********************************
# Parameters for analytical solution
#***********************************
omega = 0.1000
R=0.1
k = 1./4.;
analyseShear=False
analysePseudo=True
#===========================
# Main program
#===========================
fname = sys.argv[1]
nx = int(sys.argv[2])
# read the file
#print "R-> Velocity file has been read"
if analysePseudo:
[x,y,z,u,v,w,p,V,pseudo] = numpy.loadtxt(fname, unpack=True)
elif analyseShear:
[x,y,z,u,v,w,p,V,shear] = numpy.loadtxt(fname, unpack=True)
else:
[x,y,z,u,v,w,p,V] = numpy.loadtxt(fname, unpack=True)
r = numpy.sqrt(x**2 + y**2)
ut = numpy.sqrt(u**2 + v**2)
#Analytical solution for theta velocity
rplot=[]
errU =[]
errV=[]
errS=[]
for i in range(0,len(r)):
if (r[i]>k*R and r[i] < R):
uth = omega *k* R * (-r[i]/(R) + (R)/r[i]) / (1/k - k)
rplot.append([r[i]])
errU.append([V[i]*(ut[i]-uth)**2])
errV.append([V[i]])
if(analyseShear):
shearth=2*(-2 * omega * (R/r[i])**2 * (k**2/(1-k**2)))**2
errS.append([V[i]*(shear[i]-shearth)**2])
if(analysePseudo):
pseudoth=16 * 2*(-2 * omega * (R)**2 * (k**2/(1-k**2)))**2 * r[i]**(-6)
errS.append([V[i]*(pseudo[i]-pseudoth)**2])
#print "Pseudo, pseudo th : ", pseudo[i], pseudoth, r[i]
nt = len(errU)
L2errU = numpy.sqrt(numpy.sum(errU) / numpy.sum(errV)) / omega / R
if (analyseShear or analysePseudo):
L2errS = numpy.sqrt(numpy.sum(errS) / numpy.sum(errV)) / omega
if (analyseShear or analysePseudo):
print "%i %5.5e %5.5e" %(nx, L2errU,L2errS)
else:
print "%i %5.5e" %(nx, L2errU)
|
blaisb/cfdemUtilities
|
couette/L2ErrorUCouette.py
|
Python
|
lgpl-3.0
| 2,061
|
import os
import sys
import os.path
import pickle
import log
from platform import Platform
from ..Platforms import LINUX
#from opal.core.modelstructure import ModelEvaluator
__docformat__ = 'restructuredtext'
class Model:
def __init__(self, modelData=None,
modelStructure=None,
evaluatingOptions=None,
dataFile='blackbox.dat',
**kwargs):
"""
A `BlackBoxModel` encapsulates the
information of a parameter optimization problem.
From the parameter problem point of view, this class has
two components: model data and model struture.
Example::
blackbox = Model(modelStructure, modelData)
An object of this class must contain a link to a solver.
The link to a solver is created by the solver and added to the
BlackBoxModel object upon solving the problem.
"""
self.data = modelData
self.structure = modelStructure
#self.runFileName = runFileName
self.data_file = dataFile
# The evaluating_options attribute accepts only
# the options of simple type like boolean, integer
# or string. In general, it accepts the options
# of picklable data type.
self.evaluating_options = {}
# Update the running options from data
self.evaluating_options.update(modelData.running_options)
# If there is an option with the same name, it is overwritten by
# the setting in model
if evaluatingOptions is not None:
self.evaluating_options.update(evaluatingOptions)
self.evaluating_options.update(kwargs)
# Get the information about used platform. Normally, platform object
# is not be picklable, So we try to get the information to save
# along with the model data, and use it to reconstruct the platform
# object in run-time.
# By default, LINUX is used
self.platform_description = {'name':'LINUX',
'settings':{}}
if 'platform' in self.evaluating_options.keys():
platform = self.evaluating_options['platform']
if type(platform) == type('a platform name'):
self.platform_description['name'] = platform
elif isinstance(platform, Platform): # A Platform object
self.platform_description['name'] = platform.name
self.platform_description['settings'] = platform.settings
else: # Unable to recognize the specified platfom
pass # Do nothing and use the default platform
del self.evaluating_options['platform'] # Remove platform setting
self.initialize()
return
def initialize(self):
# Transformation to information of an optimization model
# The variables are the parameters
self.variables = self.data.parameters
# Refomulate the constraints
self.inequality_constraints = [] # c_i(x) <= 0
self.equality_constraints = [] # c_e(x) = 0
for constraint in self.structure.constraints:
if constraint.lower_bound == constraint.upper_bound:
self.equality_constraints.append(constraint)
else:
if constraint.lower_bound is not None:
self.inequality_constraints.append(constraint)
if constraint.upper_bound is not None:
self.inequality_constraints.append(constraint)
self.bounds = [var.bound for var in self.variables]
# Initial points has at least one point that is the default values
# of the parameters.
self.initial_points = [[var.value for var in self.variables]]
# The "simple constraints" that contain only the function of
# parameters. This constraints will be verified before running
# the test.
# In the futre, the bound constraints will be considered as
# simple_constraints too
self.simple_constraints = []
pass
# The methods to get information of a general model
def get_n_variable(self):
return len(self.variables)
def get_n_constraints(self):
return len(self.inequality_constraints) + len(self.equality_constraints)
def get_initial_points(self):
return self.initial_points
def add_initial_point(self, point):
converters = {'real':float,
'integer':int,
'categorical':str}
initialPoint = []
for param, val in map(None, self.variables, point):
if param is None: # The point is longer
pass # do nothing
elif val is None: # Set to default value
initialPoint.append(param.get_default())
else: # Convert automatically to the corresponding type
initialPoint.append(converters[param.kind](val))
self.initial_points.append(initialPoint)
def get_bound_constraints(self):
return self.bounds
# The methods to get information of a OPAL model
def get_algorithm(self):
return self.data.get_algorithm()
def get_parameters(self):
return self.data.get_parameters()
def get_problems(self):
return self.data.get_problems()
def get_measures(self):
return self.data.get_measures()
def get_structure(self):
return self.structure
|
dpo/opal
|
opal/core/model.py
|
Python
|
lgpl-3.0
| 5,479
|
import requests
from bs4 import BeautifulSoup
import operator
def start(url):
word_list = []
source_code = requests.get(url).text
soup = BeautifulSoup(source_code)
for post_text in soup.findAll('a', {'class': 'post-title'}):
content = post_text.string
# Split sentences into words with split
words = content.lower().split()
for each_word in words:
# print(each_word)
word_list.append(each_word)
clean_up_list(word_list)
def clean_up_list(word_list):
clean_word_list = []
for word in word_list:
symbols = "!@#$%^&*()_+<>?:\",./`~{}[]_+|-=\]["
for i in range(0, len(symbols)):
word = word.replace(symbols[i], "")
if len(word) > 0:
# print(word)
clean_word_list.append(word)
create_dictionary(clean_word_list)
def create_dictionary(clean_word_list):
word_count = {}
for word in clean_word_list:
if word in word_count:
word_count[word] += 1
else:
word_count[word] = 1
for key, value in sorted(word_count.items(), key=operator.itemgetter(1)):
print(key, value)
start('https://www.thenewboston.com/forum/')
|
washbz250/LearnPythonTheHardWay
|
Python3/TNB/oldFiles/36.py
|
Python
|
unlicense
| 1,220
|
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
list_agents = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'agents': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'agent_id': {'type': ['integer', 'string']},
'hypervisor': {'type': 'string'},
'os': {'type': 'string'},
'architecture': {'type': 'string'},
'version': {'type': 'string'},
'url': {'type': 'string', 'format': 'uri'},
'md5hash': {'type': 'string'}
},
'required': ['agent_id', 'hypervisor', 'os',
'architecture', 'version', 'url', 'md5hash']
}
}
},
'required': ['agents']
}
}
|
vmahuli/tempest
|
tempest/api_schema/compute/agents.py
|
Python
|
apache-2.0
| 1,566
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
from test_collective_base import TestDistBase
paddle.enable_static()
class TestIdentityOp(TestDistBase):
def _setup_config(self):
pass
def test_identity(self, col_type="identity"):
self.check_with_place("collective_identity_op.py", col_type)
if __name__ == '__main__':
unittest.main()
|
luotao1/Paddle
|
python/paddle/fluid/tests/unittests/test_c_identity.py
|
Python
|
apache-2.0
| 1,020
|
# -*- encoding: utf-8
from sqlalchemy import Column
from sqlalchemy import DDL
from sqlalchemy import event
from sqlalchemy import ForeignKey
from sqlalchemy import Index
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import PrimaryKeyConstraint
from sqlalchemy import schema
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import types
from sqlalchemy import util
from sqlalchemy.databases import mssql
from sqlalchemy.dialects.mssql import base
from sqlalchemy.dialects.mssql.information_schema import CoerceUnicode
from sqlalchemy.dialects.mssql.information_schema import tables
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import ComparesTables
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import in_
from sqlalchemy.testing import is_
from sqlalchemy.testing import mock
class ReflectionTest(fixtures.TestBase, ComparesTables, AssertsCompiledSQL):
__only_on__ = "mssql"
__backend__ = True
@testing.provide_metadata
def test_basic_reflection(self):
meta = self.metadata
users = Table(
"engine_users",
meta,
Column("user_id", types.INT, primary_key=True),
Column("user_name", types.VARCHAR(20), nullable=False),
Column("test1", types.CHAR(5), nullable=False),
Column("test2", types.Float(5), nullable=False),
Column("test2.5", types.Float(), nullable=False),
Column("test3", types.Text()),
Column("test4", types.Numeric, nullable=False),
Column("test4.5", types.Numeric(10, 2), nullable=False),
Column("test5", types.DateTime),
Column(
"parent_user_id",
types.Integer,
ForeignKey("engine_users.user_id"),
),
Column("test6", types.DateTime, nullable=False),
Column("test7", types.Text()),
Column("test8", types.LargeBinary()),
Column("test_passivedefault2", types.Integer, server_default="5"),
Column("test9", types.BINARY(100)),
Column("test_numeric", types.Numeric()),
)
addresses = Table(
"engine_email_addresses",
meta,
Column("address_id", types.Integer, primary_key=True),
Column(
"remote_user_id", types.Integer, ForeignKey(users.c.user_id)
),
Column("email_address", types.String(20)),
)
meta.create_all()
meta2 = MetaData()
reflected_users = Table(
"engine_users", meta2, autoload=True, autoload_with=testing.db
)
reflected_addresses = Table(
"engine_email_addresses",
meta2,
autoload=True,
autoload_with=testing.db,
)
self.assert_tables_equal(users, reflected_users)
self.assert_tables_equal(addresses, reflected_addresses)
@testing.provide_metadata
def _test_specific_type(self, type_obj, ddl):
metadata = self.metadata
table = Table("type_test", metadata, Column("col1", type_obj))
table.create()
m2 = MetaData()
table2 = Table("type_test", m2, autoload_with=testing.db)
self.assert_compile(
schema.CreateTable(table2),
"CREATE TABLE type_test (col1 %s NULL)" % ddl,
)
def test_xml_type(self):
self._test_specific_type(mssql.XML, "XML")
def test_image_type(self):
self._test_specific_type(mssql.IMAGE, "IMAGE")
def test_money_type(self):
self._test_specific_type(mssql.MONEY, "MONEY")
def test_numeric_prec_scale(self):
self._test_specific_type(mssql.NUMERIC(10, 2), "NUMERIC(10, 2)")
def test_float(self):
self._test_specific_type(mssql.FLOAT, "FLOAT(53)")
def test_real(self):
self._test_specific_type(mssql.REAL, "REAL")
def test_float_as_real(self):
# FLOAT(5) comes back as REAL
self._test_specific_type(mssql.FLOAT(5), "REAL")
@testing.provide_metadata
def test_identity(self):
metadata = self.metadata
table = Table(
"identity_test",
metadata,
Column(
"col1",
Integer,
mssql_identity_start=2,
mssql_identity_increment=3,
primary_key=True,
),
)
table.create()
meta2 = MetaData(testing.db)
table2 = Table("identity_test", meta2, autoload=True)
eq_(table2.c["col1"].dialect_options["mssql"]["identity_start"], 2)
eq_(table2.c["col1"].dialect_options["mssql"]["identity_increment"], 3)
@testing.emits_warning("Did not recognize")
@testing.provide_metadata
def test_skip_types(self):
metadata = self.metadata
testing.db.execute(
"""
create table foo (id integer primary key, data xml)
"""
)
with mock.patch.object(
testing.db.dialect, "ischema_names", {"int": mssql.INTEGER}
):
t1 = Table("foo", metadata, autoload=True)
assert isinstance(t1.c.id.type, Integer)
assert isinstance(t1.c.data.type, types.NullType)
@testing.provide_metadata
def test_cross_schema_fk_pk_name_overlaps(self):
# test for issue #4228
metadata = self.metadata
Table(
"subject",
metadata,
Column("id", Integer),
PrimaryKeyConstraint("id", name="subj_pk"),
schema=testing.config.test_schema,
)
Table(
"referrer",
metadata,
Column("id", Integer, primary_key=True),
Column(
"sid",
ForeignKey(
"%s.subject.id" % testing.config.test_schema,
name="fk_subject",
),
),
schema=testing.config.test_schema,
)
Table(
"subject",
metadata,
Column("id", Integer),
PrimaryKeyConstraint("id", name="subj_pk"),
schema=testing.config.test_schema_2,
)
metadata.create_all()
insp = inspect(testing.db)
eq_(
insp.get_foreign_keys("referrer", testing.config.test_schema),
[
{
"name": "fk_subject",
"constrained_columns": ["sid"],
"referred_schema": "test_schema",
"referred_table": "subject",
"referred_columns": ["id"],
}
],
)
@testing.provide_metadata
def test_table_name_that_is_greater_than_16_chars(self):
metadata = self.metadata
Table(
"ABCDEFGHIJKLMNOPQRSTUVWXYZ",
metadata,
Column("id", Integer, primary_key=True),
Column("foo", Integer),
Index("foo_idx", "foo"),
)
metadata.create_all()
t = Table(
"ABCDEFGHIJKLMNOPQRSTUVWXYZ", MetaData(), autoload_with=testing.db
)
eq_(t.name, "ABCDEFGHIJKLMNOPQRSTUVWXYZ")
@testing.provide_metadata
def test_db_qualified_items(self):
metadata = self.metadata
Table("foo", metadata, Column("id", Integer, primary_key=True))
Table(
"bar",
metadata,
Column("id", Integer, primary_key=True),
Column("foo_id", Integer, ForeignKey("foo.id", name="fkfoo")),
)
metadata.create_all()
dbname = testing.db.scalar("select db_name()")
owner = testing.db.scalar("SELECT user_name()")
referred_schema = "%(dbname)s.%(owner)s" % {
"dbname": dbname,
"owner": owner,
}
inspector = inspect(testing.db)
bar_via_db = inspector.get_foreign_keys("bar", schema=referred_schema)
eq_(
bar_via_db,
[
{
"referred_table": "foo",
"referred_columns": ["id"],
"referred_schema": referred_schema,
"name": "fkfoo",
"constrained_columns": ["foo_id"],
}
],
)
assert testing.db.has_table("bar", schema=referred_schema)
m2 = MetaData()
Table(
"bar",
m2,
schema=referred_schema,
autoload=True,
autoload_with=testing.db,
)
eq_(m2.tables["%s.foo" % referred_schema].schema, referred_schema)
@testing.provide_metadata
def test_indexes_cols(self):
metadata = self.metadata
t1 = Table("t", metadata, Column("x", Integer), Column("y", Integer))
Index("foo", t1.c.x, t1.c.y)
metadata.create_all()
m2 = MetaData()
t2 = Table("t", m2, autoload=True, autoload_with=testing.db)
eq_(set(list(t2.indexes)[0].columns), set([t2.c["x"], t2.c.y]))
@testing.provide_metadata
def test_indexes_cols_with_commas(self):
metadata = self.metadata
t1 = Table(
"t",
metadata,
Column("x, col", Integer, key="x"),
Column("y", Integer),
)
Index("foo", t1.c.x, t1.c.y)
metadata.create_all()
m2 = MetaData()
t2 = Table("t", m2, autoload=True, autoload_with=testing.db)
eq_(set(list(t2.indexes)[0].columns), set([t2.c["x, col"], t2.c.y]))
@testing.provide_metadata
def test_indexes_cols_with_spaces(self):
metadata = self.metadata
t1 = Table(
"t",
metadata,
Column("x col", Integer, key="x"),
Column("y", Integer),
)
Index("foo", t1.c.x, t1.c.y)
metadata.create_all()
m2 = MetaData()
t2 = Table("t", m2, autoload=True, autoload_with=testing.db)
eq_(set(list(t2.indexes)[0].columns), set([t2.c["x col"], t2.c.y]))
@testing.provide_metadata
def test_max_ident_in_varchar_not_present(self):
"""test [ticket:3504].
Here we are testing not just that the "max" token comes back
as None, but also that these types accept "max" as the value
of "length" on construction, which isn't a directly documented
pattern however is likely in common use.
"""
metadata = self.metadata
Table(
"t",
metadata,
Column("t1", types.String),
Column("t2", types.Text("max")),
Column("t3", types.Text("max")),
Column("t4", types.LargeBinary("max")),
Column("t5", types.VARBINARY("max")),
)
metadata.create_all()
for col in inspect(testing.db).get_columns("t"):
is_(col["type"].length, None)
in_("max", str(col["type"].compile(dialect=testing.db.dialect)))
class InfoCoerceUnicodeTest(fixtures.TestBase, AssertsCompiledSQL):
def test_info_unicode_coercion(self):
dialect = mssql.dialect()
value = CoerceUnicode().bind_processor(dialect)("a string")
assert isinstance(value, util.text_type)
def test_info_unicode_cast_no_2000(self):
dialect = mssql.dialect()
dialect.server_version_info = base.MS_2000_VERSION
stmt = tables.c.table_name == "somename"
self.assert_compile(
stmt,
"[INFORMATION_SCHEMA].[TABLES].[TABLE_NAME] = :table_name_1",
dialect=dialect,
)
def test_info_unicode_cast(self):
dialect = mssql.dialect()
dialect.server_version_info = base.MS_2005_VERSION
stmt = tables.c.table_name == "somename"
self.assert_compile(
stmt,
"[INFORMATION_SCHEMA].[TABLES].[TABLE_NAME] = "
"CAST(:table_name_1 AS NVARCHAR(max))",
dialect=dialect,
)
class ReflectHugeViewTest(fixtures.TestBase):
__only_on__ = "mssql"
__backend__ = True
# crashes on freetds 0.91, not worth it
__skip_if__ = (lambda: testing.requires.mssql_freetds.enabled,)
def setup(self):
self.col_num = 150
self.metadata = MetaData(testing.db)
t = Table(
"base_table",
self.metadata,
*[
Column("long_named_column_number_%d" % i, Integer)
for i in range(self.col_num)
]
)
self.view_str = view_str = (
"CREATE VIEW huge_named_view AS SELECT %s FROM base_table"
% (
",".join(
"long_named_column_number_%d" % i
for i in range(self.col_num)
)
)
)
assert len(view_str) > 4000
event.listen(t, "after_create", DDL(view_str))
event.listen(t, "before_drop", DDL("DROP VIEW huge_named_view"))
self.metadata.create_all()
def teardown(self):
self.metadata.drop_all()
def test_inspect_view_definition(self):
inspector = Inspector.from_engine(testing.db)
view_def = inspector.get_view_definition("huge_named_view")
eq_(view_def, self.view_str)
class OwnerPlusDBTest(fixtures.TestBase):
def test_default_schema_name_not_interpreted_as_tokenized(self):
dialect = mssql.dialect()
dialect.server_version_info = base.MS_2014_VERSION
mock_connection = mock.Mock(scalar=lambda sql: "Jonah.The.Whale")
schema_name = dialect._get_default_schema_name(mock_connection)
eq_(schema_name, "Jonah.The.Whale")
eq_(
base._owner_plus_db(dialect, schema_name),
(None, "Jonah.The.Whale"),
)
def test_owner_database_pairs_dont_use_for_same_db(self):
dialect = mssql.dialect()
identifier = "my_db.some_schema"
schema, owner = base._owner_plus_db(dialect, identifier)
mock_connection = mock.Mock(
dialect=dialect, scalar=mock.Mock(return_value="my_db")
)
mock_lambda = mock.Mock()
base._switch_db(schema, mock_connection, mock_lambda, "x", y="bar")
eq_(mock_connection.mock_calls, [mock.call.scalar("select db_name()")])
eq_(mock_lambda.mock_calls, [mock.call("x", y="bar")])
def test_owner_database_pairs_switch_for_different_db(self):
dialect = mssql.dialect()
identifier = "my_other_db.some_schema"
schema, owner = base._owner_plus_db(dialect, identifier)
mock_connection = mock.Mock(
dialect=dialect, scalar=mock.Mock(return_value="my_db")
)
mock_lambda = mock.Mock()
base._switch_db(schema, mock_connection, mock_lambda, "x", y="bar")
eq_(
mock_connection.mock_calls,
[
mock.call.scalar("select db_name()"),
mock.call.execute("use my_other_db"),
mock.call.execute("use my_db"),
],
)
eq_(mock_lambda.mock_calls, [mock.call("x", y="bar")])
def test_owner_database_pairs(self):
dialect = mssql.dialect()
for identifier, expected_schema, expected_owner, use_stmt in [
("foo", None, "foo", "use foo"),
("foo.bar", "foo", "bar", "use foo"),
("Foo.Bar", "Foo", "Bar", "use [Foo]"),
("[Foo.Bar]", None, "Foo.Bar", "use [Foo].[Bar]"),
("[Foo.Bar].[bat]", "Foo.Bar", "bat", "use [Foo].[Bar]"),
(
"[foo].]do something; select [foo",
"foo",
"do something; select foo",
"use foo",
),
(
"something; select [foo].bar",
"something; select foo",
"bar",
"use [something; select foo]",
),
]:
schema, owner = base._owner_plus_db(dialect, identifier)
eq_(owner, expected_owner)
eq_(schema, expected_schema)
mock_connection = mock.Mock(
dialect=dialect,
scalar=mock.Mock(return_value="Some ] Database"),
)
mock_lambda = mock.Mock()
base._switch_db(schema, mock_connection, mock_lambda, "x", y="bar")
if schema is None:
eq_(mock_connection.mock_calls, [])
else:
eq_(
mock_connection.mock_calls,
[
mock.call.scalar("select db_name()"),
mock.call.execute(use_stmt),
mock.call.execute("use [Some Database]"),
],
)
eq_(mock_lambda.mock_calls, [mock.call("x", y="bar")])
|
cloudera/hue
|
desktop/core/ext-py/SQLAlchemy-1.3.17/test/dialect/mssql/test_reflection.py
|
Python
|
apache-2.0
| 16,960
|
#!/usr/bin/env python3
import argparse
import codecs
import sys
def transform(i,o):
for line in i:
key, trans = line.strip().split(None, 1)
print("{}\t{}".format(" ".join(list(key)), trans), file=o)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='')
parser.add_argument('infile', nargs='?', type=argparse.FileType('r', encoding='utf-8'), default=codecs.getreader('utf-8')(sys.stdin.buffer))
parser.add_argument('outfile', nargs='?', type=argparse.FileType('w', encoding='utf-8'), default=codecs.getwriter('utf-8')(sys.stdout.buffer))
args = parser.parse_args()
transform(args.infile, args.outfile)
|
phsmit/kaldi-recipes
|
common/lex2mtmlex.py
|
Python
|
apache-2.0
| 670
|
# -*- coding: utf-8 -*-
__version__ = '0.1'
from flask import Flask
from lib.AfricasTalkingGateway import (
AfricasTalkingGateway, AfricasTalkingGatewayException)
from datetime import timedelta
import logging
import settings
import redis
app = Flask('app')
app.config.from_pyfile('settings.py', silent=True)
red = redis.StrictRedis(host='localhost', port=6379, db=0)
app.config['ONLINE_LAST_MINUTES'] = settings.ONLINE_LAST_MINUTES
app.secret_key = settings.SECRET_KEY
app.permanent_session_lifetime = timedelta(minutes=5760)
logging.basicConfig(filename='octopus.log', level=logging.DEBUG)
"""
def db_setup():
connection = r.connect(host=settings.RDB_HOST, port=settings.RDB_PORT,
auth_key=settings.rethinkdb_auth)
try:
r.db_create(settings.TARGET_DB).run(connection)
r.db(settings.TARGET_DB).table_create('User').run(connection)
logging.info('Database setup completed')
except RqlError:
logging.info('App database already exists')
except RqlRuntimeError:
logging.info('App database already exists')
finally:
connection.close()
@app.before_request
def before_request():
try:
logging.info('before_request')
g.rdb_conn = r.connect(host=settings.RDB_HOST, port=settings.RDB_PORT,
db=settings.TARGET_DB, auth_key=settings.rethinkdb_auth)
except RqlDriverError:
logging.info('DB Connect Failed')
abort(503, "No database connection could be established")
@app.teardown_request
def teardown_request(exception):
try:
logging.info('teardown_request')
g.rdb_conn.close()
except AttributeError:
logging.info('Database failure - check your connection', exception)
"""
from api import routes
from api import voice
from api import ussd
from api import short_code
from api import airtime
|
AfricasTalkingLtd/Outreach
|
content/tutorials/python/intermediate/octopus/app/__init__.py
|
Python
|
apache-2.0
| 1,890
|
import asyncio
from asyncio import coroutine as coro
import random
from broadway import Actor, Props, ActorSystem
class DummyActor(Actor):
def __init__(self, name, partner=None):
super().__init__()
self.name = name
self.partner = partner
@coro
def receive(self, message):
delayed = random.random() / 100
yield from asyncio.sleep(delayed)
print("%s %s delayed %2.1f ms" % (self.name, message, delayed * 1000))
if self.partner:
yield from self.partner.tell(message)
class EchoActor(Actor):
def __init__(self, name, partner=None):
super().__init__()
self.name = name
@coro
def receive(self, message):
yield from self.sender.tell("%s %s" % (self.name, message))
@coro
def task(system, forwarder, dummy, echoer):
for count in range(1, 101):
seed = random.random()
if seed < 0.3:
yield from forwarder.tell("actor %s" % count)
elif seed < 0.6:
yield from dummy.tell("actor %s" % count)
else:
message = yield from echoer.ask("actor %s" % count)
print(message)
yield from asyncio.sleep(0.001)
yield from asyncio.sleep(0.1)
yield from system.stop()
def main():
system = ActorSystem()
forwardee = system.actor_of(Props(DummyActor, "forwardee"))
forwarder = system.actor_of(Props(DummyActor, "forwarder", forwardee))
dummy = system.actor_of(Props(DummyActor, "dummy "))
echoer = system.actor_of(Props(EchoActor, "echoer "))
coro = task(system, forwarder, dummy, echoer)
system.run_until_stop([coro], exit_after=True)
if __name__ == "__main__":
main()
|
greencase/broadway
|
example/basic.py
|
Python
|
apache-2.0
| 1,709
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable= arguments-differ, too-many-lines
"""Convolutional neural network layers."""
__all__ = ['Conv1D', 'Conv2D', 'Conv3D',
'Conv1DTranspose', 'Conv2DTranspose', 'Conv3DTranspose',
'MaxPool1D', 'MaxPool2D', 'MaxPool3D',
'AvgPool1D', 'AvgPool2D', 'AvgPool3D',
'GlobalMaxPool1D', 'GlobalMaxPool2D', 'GlobalMaxPool3D',
'GlobalAvgPool1D', 'GlobalAvgPool2D', 'GlobalAvgPool3D',
'ReflectionPad2D']
from ..block import HybridBlock
from ... import symbol
from ...base import numeric_types
from .activations import Activation
def _infer_weight_shape(op_name, data_shape, kwargs):
op = getattr(symbol, op_name)
sym = op(symbol.var('data', shape=data_shape), **kwargs)
return sym.infer_shape_partial()[0]
class _Conv(HybridBlock):
"""Abstract nD convolution layer (private, used as implementation base).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of outputs.
If `use_bias` is `True`, a bias vector is created and added to the outputs.
Finally, if `activation` is not `None`,
it is applied to the outputs as well.
Parameters
----------
channels : int
The dimensionality of the output space
i.e. the number of output channels in the convolution.
kernel_size : int or tuple/list of n ints
Specifies the dimensions of the convolution window.
strides: int or tuple/list of n ints,
Specifies the strides of the convolution.
padding : int or tuple/list of n ints,
If padding is non-zero, then the input is implicitly zero-padded
on both sides for padding number of points
dilation: int or tuple/list of n ints,
Specifies the dilation rate to use for dilated convolution.
groups : int
Controls the connections between inputs and outputs.
At groups=1, all inputs are convolved to all outputs.
At groups=2, the operation becomes equivalent to having two convolution
layers side by side, each seeing half the input channels, and producing
half the output channels, and both subsequently concatenated.
layout : str,
Dimension ordering of data and weight. Can be 'NCW', 'NWC', 'NCHW',
'NHWC', 'NCDHW', 'NDHWC', etc. 'N', 'C', 'H', 'W', 'D' stands for
batch, channel, height, width and depth dimensions respectively.
Convolution is performed over 'D', 'H', and 'W' dimensions.
in_channels : int, default 0
The number of input channels to this layer. If not specified,
initialization will be deferred to the first time `forward` is called
and `in_channels` will be inferred from the shape of input data.
activation : str
Activation function to use. See :func:`~mxnet.ndarray.Activation`.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: bool
Whether the layer uses a bias vector.
weight_initializer : str or `Initializer`
Initializer for the `weight` weights matrix.
bias_initializer: str or `Initializer`
Initializer for the bias vector.
"""
def __init__(self, channels, kernel_size, strides, padding, dilation,
groups, layout, in_channels=0, activation=None, use_bias=True,
weight_initializer=None, bias_initializer='zeros',
op_name='Convolution', adj=None, prefix=None, params=None):
super(_Conv, self).__init__(prefix=prefix, params=params)
with self.name_scope():
self._channels = channels
self._in_channels = in_channels
if isinstance(strides, numeric_types):
strides = (strides,)*len(kernel_size)
if isinstance(padding, numeric_types):
padding = (padding,)*len(kernel_size)
if isinstance(dilation, numeric_types):
dilation = (dilation,)*len(kernel_size)
self._op_name = op_name
self._kwargs = {
'kernel': kernel_size, 'stride': strides, 'dilate': dilation,
'pad': padding, 'num_filter': channels, 'num_group': groups,
'no_bias': not use_bias, 'layout': layout}
if adj is not None:
self._kwargs['adj'] = adj
dshape = [0]*(len(kernel_size) + 2)
dshape[layout.find('N')] = 1
dshape[layout.find('C')] = in_channels
wshapes = _infer_weight_shape(op_name, dshape, self._kwargs)
self.weight = self.params.get('weight', shape=wshapes[1],
init=weight_initializer,
allow_deferred_init=True)
if use_bias:
self.bias = self.params.get('bias', shape=wshapes[2],
init=bias_initializer,
allow_deferred_init=True)
else:
self.bias = None
if activation is not None:
self.act = Activation(activation, prefix=activation+'_')
else:
self.act = None
def hybrid_forward(self, F, x, weight, bias=None):
if bias is None:
act = getattr(F, self._op_name)(x, weight, name='fwd', **self._kwargs)
else:
act = getattr(F, self._op_name)(x, weight, bias, name='fwd', **self._kwargs)
if self.act is not None:
act = self.act(act)
return act
def _alias(self):
return 'conv'
def __repr__(self):
s = '{name}({mapping}, kernel_size={kernel}, stride={stride}'
len_kernel_size = len(self._kwargs['kernel'])
if self._kwargs['pad'] != (0,) * len_kernel_size:
s += ', padding={pad}'
if self._kwargs['dilate'] != (1,) * len_kernel_size:
s += ', dilation={dilate}'
if hasattr(self, 'out_pad') and self.out_pad != (0,) * len_kernel_size:
s += ', output_padding={out_pad}'.format(out_pad=self.out_pad)
if self._kwargs['num_group'] != 1:
s += ', groups={num_group}'
if self.bias is None:
s += ', bias=False'
if self.act:
s += ', {}'.format(self.act)
s += ')'
shape = self.weight.shape
return s.format(name=self.__class__.__name__,
mapping='{0} -> {1}'.format(shape[1] if shape[1] else None, shape[0]),
**self._kwargs)
class Conv1D(_Conv):
r"""1D convolution layer (e.g. temporal convolution).
This layer creates a convolution kernel that is convolved
with the layer input over a single spatial (or temporal) dimension
to produce a tensor of outputs.
If `use_bias` is True, a bias vector is created and added to the outputs.
Finally, if `activation` is not `None`,
it is applied to the outputs as well.
If `in_channels` is not specified, `Parameter` initialization will be
deferred to the first time `forward` is called and `in_channels` will be
inferred from the shape of input data.
Parameters
----------
channels : int
The dimensionality of the output space, i.e. the number of output
channels (filters) in the convolution.
kernel_size :int or tuple/list of 1 int
Specifies the dimensions of the convolution window.
strides : int or tuple/list of 1 int,
Specify the strides of the convolution.
padding : int or a tuple/list of 1 int,
If padding is non-zero, then the input is implicitly zero-padded
on both sides for padding number of points
dilation : int or tuple/list of 1 int
Specifies the dilation rate to use for dilated convolution.
groups : int
Controls the connections between inputs and outputs.
At groups=1, all inputs are convolved to all outputs.
At groups=2, the operation becomes equivalent to having two conv
layers side by side, each seeing half the input channels, and producing
half the output channels, and both subsequently concatenated.
layout: str, default 'NCW'
Dimension ordering of data and weight. Only supports 'NCW' layout for now.
'N', 'C', 'W' stands for batch, channel, and width (time) dimensions
respectively. Convolution is applied on the 'W' dimension.
in_channels : int, default 0
The number of input channels to this layer. If not specified,
initialization will be deferred to the first time `forward` is called
and `in_channels` will be inferred from the shape of input data.
activation : str
Activation function to use. See :func:`~mxnet.ndarray.Activation`.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias : bool
Whether the layer uses a bias vector.
weight_initializer : str or `Initializer`
Initializer for the `weight` weights matrix.
bias_initializer : str or `Initializer`
Initializer for the bias vector.
Inputs:
- **data**: 3D input tensor with shape `(batch_size, in_channels, width)`
when `layout` is `NCW`. For other layouts shape is permuted accordingly.
Outputs:
- **out**: 3D output tensor with shape `(batch_size, channels, out_width)`
when `layout` is `NCW`. out_width is calculated as::
out_width = floor((width+2*padding-dilation*(kernel_size-1)-1)/stride)+1
"""
def __init__(self, channels, kernel_size, strides=1, padding=0, dilation=1,
groups=1, layout='NCW', activation=None, use_bias=True,
weight_initializer=None, bias_initializer='zeros',
in_channels=0, **kwargs):
assert layout == 'NCW', "Only supports 'NCW' layout for now"
if isinstance(kernel_size, numeric_types):
kernel_size = (kernel_size,)
assert len(kernel_size) == 1, "kernel_size must be a number or a list of 1 ints"
super(Conv1D, self).__init__(
channels, kernel_size, strides, padding, dilation, groups, layout,
in_channels, activation, use_bias, weight_initializer, bias_initializer, **kwargs)
class Conv2D(_Conv):
r"""2D convolution layer (e.g. spatial convolution over images).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of
outputs. If `use_bias` is True,
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
If `in_channels` is not specified, `Parameter` initialization will be
deferred to the first time `forward` is called and `in_channels` will be
inferred from the shape of input data.
Parameters
----------
channels : int
The dimensionality of the output space, i.e. the number of output
channels (filters) in the convolution.
kernel_size :int or tuple/list of 2 int
Specifies the dimensions of the convolution window.
strides : int or tuple/list of 2 int,
Specify the strides of the convolution.
padding : int or a tuple/list of 2 int,
If padding is non-zero, then the input is implicitly zero-padded
on both sides for padding number of points
dilation : int or tuple/list of 2 int
Specifies the dilation rate to use for dilated convolution.
groups : int
Controls the connections between inputs and outputs.
At groups=1, all inputs are convolved to all outputs.
At groups=2, the operation becomes equivalent to having two conv
layers side by side, each seeing half the input channels, and producing
half the output channels, and both subsequently concatenated.
layout : str, default 'NCHW'
Dimension ordering of data and weight. Only supports 'NCHW' and 'NHWC'
layout for now. 'N', 'C', 'H', 'W' stands for batch, channel, height,
and width dimensions respectively. Convolution is applied on the 'H' and
'W' dimensions.
in_channels : int, default 0
The number of input channels to this layer. If not specified,
initialization will be deferred to the first time `forward` is called
and `in_channels` will be inferred from the shape of input data.
activation : str
Activation function to use. See :func:`~mxnet.ndarray.Activation`.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias : bool
Whether the layer uses a bias vector.
weight_initializer : str or `Initializer`
Initializer for the `weight` weights matrix.
bias_initializer : str or `Initializer`
Initializer for the bias vector.
Inputs:
- **data**: 4D input tensor with shape
`(batch_size, in_channels, height, width)` when `layout` is `NCHW`.
For other layouts shape is permuted accordingly.
Outputs:
- **out**: 4D output tensor with shape
`(batch_size, channels, out_height, out_width)` when `layout` is `NCHW`.
out_height and out_width are calculated as::
out_height = floor((height+2*padding[0]-dilation[0]*(kernel_size[0]-1)-1)/stride[0])+1
out_width = floor((width+2*padding[1]-dilation[1]*(kernel_size[1]-1)-1)/stride[1])+1
"""
def __init__(self, channels, kernel_size, strides=(1, 1), padding=(0, 0),
dilation=(1, 1), groups=1, layout='NCHW',
activation=None, use_bias=True, weight_initializer=None,
bias_initializer='zeros', in_channels=0, **kwargs):
assert layout in ('NCHW', 'NHWC'), "Only supports 'NCHW' and 'NHWC' layout for now"
if isinstance(kernel_size, numeric_types):
kernel_size = (kernel_size,)*2
assert len(kernel_size) == 2, "kernel_size must be a number or a list of 2 ints"
super(Conv2D, self).__init__(
channels, kernel_size, strides, padding, dilation, groups, layout,
in_channels, activation, use_bias, weight_initializer, bias_initializer, **kwargs)
class Conv3D(_Conv):
"""3D convolution layer (e.g. spatial convolution over volumes).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of
outputs. If `use_bias` is `True`,
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
If `in_channels` is not specified, `Parameter` initialization will be
deferred to the first time `forward` is called and `in_channels` will be
inferred from the shape of input data.
Parameters
----------
channels : int
The dimensionality of the output space, i.e. the number of output
channels (filters) in the convolution.
kernel_size :int or tuple/list of 3 int
Specifies the dimensions of the convolution window.
strides : int or tuple/list of 3 int,
Specify the strides of the convolution.
padding : int or a tuple/list of 3 int,
If padding is non-zero, then the input is implicitly zero-padded
on both sides for padding number of points
dilation : int or tuple/list of 3 int
Specifies the dilation rate to use for dilated convolution.
groups : int
Controls the connections between inputs and outputs.
At groups=1, all inputs are convolved to all outputs.
At groups=2, the operation becomes equivalent to having two conv
layers side by side, each seeing half the input channels, and producing
half the output channels, and both subsequently concatenated.
layout : str, default 'NCDHW'
Dimension ordering of data and weight. Only supports 'NCDHW' and 'NDHWC'
layout for now. 'N', 'C', 'H', 'W', 'D' stands for batch, channel, height,
width and depth dimensions respectively. Convolution is applied on the 'D',
'H' and 'W' dimensions.
in_channels : int, default 0
The number of input channels to this layer. If not specified,
initialization will be deferred to the first time `forward` is called
and `in_channels` will be inferred from the shape of input data.
activation : str
Activation function to use. See :func:`~mxnet.ndarray.Activation`.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias : bool
Whether the layer uses a bias vector.
weight_initializer : str or `Initializer`
Initializer for the `weight` weights matrix.
bias_initializer : str or `Initializer`
Initializer for the bias vector.
Inputs:
- **data**: 5D input tensor with shape
`(batch_size, in_channels, depth, height, width)` when `layout` is `NCDHW`.
For other layouts shape is permuted accordingly.
Outputs:
- **out**: 5D output tensor with shape
`(batch_size, channels, out_depth, out_height, out_width)` when `layout` is `NCDHW`.
out_depth, out_height and out_width are calculated as::
out_depth = floor((depth+2*padding[0]-dilation[0]*(kernel_size[0]-1)-1)/stride[0])+1
out_height = floor((height+2*padding[1]-dilation[1]*(kernel_size[1]-1)-1)/stride[1])+1
out_width = floor((width+2*padding[2]-dilation[2]*(kernel_size[2]-1)-1)/stride[2])+1
"""
def __init__(self, channels, kernel_size, strides=(1, 1, 1), padding=(0, 0, 0),
dilation=(1, 1, 1), groups=1, layout='NCDHW', activation=None,
use_bias=True, weight_initializer=None, bias_initializer='zeros',
in_channels=0, **kwargs):
assert layout in ('NCDHW', 'NDHWC'), "Only supports 'NCDHW' and 'NDHWC' layout for now"
if isinstance(kernel_size, numeric_types):
kernel_size = (kernel_size,)*3
assert len(kernel_size) == 3, "kernel_size must be a number or a list of 3 ints"
super(Conv3D, self).__init__(
channels, kernel_size, strides, padding, dilation, groups, layout,
in_channels, activation, use_bias, weight_initializer, bias_initializer, **kwargs)
class Conv1DTranspose(_Conv):
"""Transposed 1D convolution layer (sometimes called Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
If `in_channels` is not specified, `Parameter` initialization will be
deferred to the first time `forward` is called and `in_channels` will be
inferred from the shape of input data.
Parameters
----------
channels : int
The dimensionality of the output space, i.e. the number of output
channels (filters) in the convolution.
kernel_size :int or tuple/list of 1 int
Specifies the dimensions of the convolution window.
strides : int or tuple/list of 1 int
Specify the strides of the convolution.
padding : int or a tuple/list of 1 int,
If padding is non-zero, then the input is implicitly zero-padded
on both sides for padding number of points
output_padding: int or a tuple/list of 1 int
Controls the amount of implicit zero-paddings on both sides of the
output for output_padding number of points for each dimension.
dilation : int or tuple/list of 1 int
Controls the spacing between the kernel points; also known as the
a trous algorithm
groups : int
Controls the connections between inputs and outputs.
At groups=1, all inputs are convolved to all outputs.
At groups=2, the operation becomes equivalent to having two conv
layers side by side, each seeing half the input channels, and producing
half the output channels, and both subsequently concatenated.
layout : str, default 'NCW'
Dimension ordering of data and weight. Only supports 'NCW' layout for now.
'N', 'C', 'W' stands for batch, channel, and width (time) dimensions
respectively. Convolution is applied on the 'W' dimension.
in_channels : int, default 0
The number of input channels to this layer. If not specified,
initialization will be deferred to the first time `forward` is called
and `in_channels` will be inferred from the shape of input data.
activation : str
Activation function to use. See :func:`~mxnet.ndarray.Activation`.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias : bool
Whether the layer uses a bias vector.
weight_initializer : str or `Initializer`
Initializer for the `weight` weights matrix.
bias_initializer : str or `Initializer`
Initializer for the bias vector.
Inputs:
- **data**: 3D input tensor with shape `(batch_size, in_channels, width)`
when `layout` is `NCW`. For other layouts shape is permuted accordingly.
Outputs:
- **out**: 3D output tensor with shape `(batch_size, channels, out_width)`
when `layout` is `NCW`. out_width is calculated as::
out_width = (width-1)*strides-2*padding+kernel_size+output_padding
"""
def __init__(self, channels, kernel_size, strides=1, padding=0, output_padding=0,
dilation=1, groups=1, layout='NCW', activation=None, use_bias=True,
weight_initializer=None, bias_initializer='zeros',
in_channels=0, **kwargs):
assert layout == 'NCW', "Only supports 'NCW' layout for now"
if isinstance(kernel_size, numeric_types):
kernel_size = (kernel_size,)
if isinstance(output_padding, numeric_types):
output_padding = (output_padding,)
assert len(kernel_size) == 1, "kernel_size must be a number or a list of 1 ints"
assert len(output_padding) == 1, "output_padding must be a number or a list of 1 ints"
super(Conv1DTranspose, self).__init__(
channels, kernel_size, strides, padding, dilation, groups, layout,
in_channels, activation, use_bias, weight_initializer,
bias_initializer, op_name='Deconvolution', adj=output_padding, **kwargs)
self.outpad = output_padding
class Conv2DTranspose(_Conv):
"""Transposed 2D convolution layer (sometimes called Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
If `in_channels` is not specified, `Parameter` initialization will be
deferred to the first time `forward` is called and `in_channels` will be
inferred from the shape of input data.
Parameters
----------
channels : int
The dimensionality of the output space, i.e. the number of output
channels (filters) in the convolution.
kernel_size :int or tuple/list of 2 int
Specifies the dimensions of the convolution window.
strides : int or tuple/list of 2 int
Specify the strides of the convolution.
padding : int or a tuple/list of 2 int,
If padding is non-zero, then the input is implicitly zero-padded
on both sides for padding number of points
output_padding: int or a tuple/list of 2 int
Controls the amount of implicit zero-paddings on both sides of the
output for output_padding number of points for each dimension.
dilation : int or tuple/list of 2 int
Controls the spacing between the kernel points; also known as the
a trous algorithm
groups : int
Controls the connections between inputs and outputs.
At groups=1, all inputs are convolved to all outputs.
At groups=2, the operation becomes equivalent to having two conv
layers side by side, each seeing half the input channels, and producing
half the output channels, and both subsequently concatenated.
layout : str, default 'NCHW'
Dimension ordering of data and weight. Only supports 'NCHW' and 'NHWC'
layout for now. 'N', 'C', 'H', 'W' stands for batch, channel, height,
and width dimensions respectively. Convolution is applied on the 'H' and
'W' dimensions.
in_channels : int, default 0
The number of input channels to this layer. If not specified,
initialization will be deferred to the first time `forward` is called
and `in_channels` will be inferred from the shape of input data.
activation : str
Activation function to use. See :func:`~mxnet.ndarray.Activation`.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias : bool
Whether the layer uses a bias vector.
weight_initializer : str or `Initializer`
Initializer for the `weight` weights matrix.
bias_initializer : str or `Initializer`
Initializer for the bias vector.
Inputs:
- **data**: 4D input tensor with shape
`(batch_size, in_channels, height, width)` when `layout` is `NCHW`.
For other layouts shape is permuted accordingly.
Outputs:
- **out**: 4D output tensor with shape
`(batch_size, channels, out_height, out_width)` when `layout` is `NCHW`.
out_height and out_width are calculated as::
out_height = (height-1)*strides[0]-2*padding[0]+kernel_size[0]+output_padding[0]
out_width = (width-1)*strides[1]-2*padding[1]+kernel_size[1]+output_padding[1]
"""
def __init__(self, channels, kernel_size, strides=(1, 1), padding=(0, 0),
output_padding=(0, 0), dilation=(1, 1), groups=1, layout='NCHW',
activation=None, use_bias=True, weight_initializer=None,
bias_initializer='zeros', in_channels=0, **kwargs):
assert layout in ('NCHW', 'NHWC'), "Only supports 'NCHW' and 'NHWC' layout for now"
if isinstance(kernel_size, numeric_types):
kernel_size = (kernel_size,)*2
if isinstance(output_padding, numeric_types):
output_padding = (output_padding,)*2
assert len(kernel_size) == 2, "kernel_size must be a number or a list of 2 ints"
assert len(output_padding) == 2, "output_padding must be a number or a list of 2 ints"
super(Conv2DTranspose, self).__init__(
channels, kernel_size, strides, padding, dilation, groups, layout,
in_channels, activation, use_bias, weight_initializer,
bias_initializer, op_name='Deconvolution', adj=output_padding, **kwargs)
self.outpad = output_padding
class Conv3DTranspose(_Conv):
"""Transposed 3D convolution layer (sometimes called Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
If `in_channels` is not specified, `Parameter` initialization will be
deferred to the first time `forward` is called and `in_channels` will be
inferred from the shape of input data.
Parameters
----------
channels : int
The dimensionality of the output space, i.e. the number of output
channels (filters) in the convolution.
kernel_size :int or tuple/list of 3 int
Specifies the dimensions of the convolution window.
strides : int or tuple/list of 3 int
Specify the strides of the convolution.
padding : int or a tuple/list of 3 int,
If padding is non-zero, then the input is implicitly zero-padded
on both sides for padding number of points
output_padding: int or a tuple/list of 3 int
Controls the amount of implicit zero-paddings on both sides of the
output for output_padding number of points for each dimension.
dilation : int or tuple/list of 3 int
Controls the spacing between the kernel points; also known as the
a trous algorithm.
groups : int
Controls the connections between inputs and outputs.
At groups=1, all inputs are convolved to all outputs.
At groups=2, the operation becomes equivalent to having two conv
layers side by side, each seeing half the input channels, and producing
half the output channels, and both subsequently concatenated.
layout : str, default 'NCDHW'
Dimension ordering of data and weight. Only supports 'NCDHW' and 'NDHWC'
layout for now. 'N', 'C', 'H', 'W', 'D' stands for batch, channel, height,
width and depth dimensions respectively. Convolution is applied on the 'D',
'H' and 'W' dimensions.
in_channels : int, default 0
The number of input channels to this layer. If not specified,
initialization will be deferred to the first time `forward` is called
and `in_channels` will be inferred from the shape of input data.
activation : str
Activation function to use. See :func:`~mxnet.ndarray.Activation`.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias : bool
Whether the layer uses a bias vector.
weight_initializer : str or `Initializer`
Initializer for the `weight` weights matrix.
bias_initializer : str or `Initializer`
Initializer for the bias vector.
Inputs:
- **data**: 5D input tensor with shape
`(batch_size, in_channels, depth, height, width)` when `layout` is `NCDHW`.
For other layouts shape is permuted accordingly.
Outputs:
- **out**: 5D output tensor with shape
`(batch_size, channels, out_depth, out_height, out_width)` when `layout` is `NCDHW`.
out_depth, out_height and out_width are calculated as::
out_depth = (depth-1)*strides[0]-2*padding[0]+kernel_size[0]+output_padding[0]
out_height = (height-1)*strides[1]-2*padding[1]+kernel_size[1]+output_padding[1]
out_width = (width-1)*strides[2]-2*padding[2]+kernel_size[2]+output_padding[2]
"""
def __init__(self, channels, kernel_size, strides=(1, 1, 1), padding=(0, 0, 0),
output_padding=(0, 0, 0), dilation=(1, 1, 1), groups=1, layout='NCDHW',
activation=None, use_bias=True, weight_initializer=None,
bias_initializer='zeros', in_channels=0, **kwargs):
assert layout in ('NCDHW', 'NDHWC'), "Only supports 'NCDHW' and 'NDHWC' layout for now"
if isinstance(kernel_size, numeric_types):
kernel_size = (kernel_size,)*3
if isinstance(output_padding, numeric_types):
output_padding = (output_padding,)*3
assert len(kernel_size) == 3, "kernel_size must be a number or a list of 3 ints"
assert len(output_padding) == 3, "output_padding must be a number or a list of 3 ints"
super(Conv3DTranspose, self).__init__(
channels, kernel_size, strides, padding, dilation, groups, layout,
in_channels, activation, use_bias, weight_initializer, bias_initializer,
op_name='Deconvolution', adj=output_padding, **kwargs)
self.outpad = output_padding
class _Pooling(HybridBlock):
"""Abstract class for different pooling layers."""
def __init__(self, pool_size, strides, padding, ceil_mode, global_pool,
pool_type, layout, count_include_pad=None, **kwargs):
super(_Pooling, self).__init__(**kwargs)
if strides is None:
strides = pool_size
if isinstance(strides, numeric_types):
strides = (strides,)*len(pool_size)
if isinstance(padding, numeric_types):
padding = (padding,)*len(pool_size)
self._kwargs = {
'kernel': pool_size, 'stride': strides, 'pad': padding,
'global_pool': global_pool, 'pool_type': pool_type,
'layout': layout,
'pooling_convention': 'full' if ceil_mode else 'valid'}
if count_include_pad is not None:
self._kwargs['count_include_pad'] = count_include_pad
def _alias(self):
return 'pool'
def hybrid_forward(self, F, x):
return F.Pooling(x, name='fwd', **self._kwargs)
def __repr__(self):
s = '{name}(size={kernel}, stride={stride}, padding={pad}, ceil_mode={ceil_mode}'
s += ', global_pool={global_pool}, pool_type={pool_type}, layout={layout})'
return s.format(name=self.__class__.__name__,
ceil_mode=self._kwargs['pooling_convention'] == 'full',
**self._kwargs)
class MaxPool1D(_Pooling):
"""Max pooling operation for one dimensional data.
Parameters
----------
pool_size: int
Size of the max pooling windows.
strides: int, or None
Factor by which to downscale. E.g. 2 will halve the input size.
If `None`, it will default to `pool_size`.
padding: int
If padding is non-zero, then the input is implicitly
zero-padded on both sides for padding number of points.
layout : str, default 'NCW'
Dimension ordering of data and out ('NCW' or 'NWC').
'N', 'C', 'W' stands for batch, channel, and width (time) dimensions
respectively. Pooling is applied on the W dimension.
ceil_mode : bool, default False
When `True`, will use ceil instead of floor to compute the output shape.
Inputs:
- **data**: 3D input tensor with shape `(batch_size, in_channels, width)`
when `layout` is `NCW`. For other layouts shape is permuted accordingly.
Outputs:
- **out**: 3D output tensor with shape `(batch_size, channels, out_width)`
when `layout` is `NCW`. out_width is calculated as::
out_width = floor((width+2*padding-pool_size)/strides)+1
When `ceil_mode` is `True`, ceil will be used instead of floor in this
equation.
"""
def __init__(self, pool_size=2, strides=None, padding=0, layout='NCW',
ceil_mode=False, **kwargs):
assert layout in ('NCW', 'NWC'),\
"Only NCW and NWC layouts are valid for 1D Pooling"
if isinstance(pool_size, numeric_types):
pool_size = (pool_size,)
assert len(pool_size) == 1, "pool_size must be a number or a list of 1 ints"
super(MaxPool1D, self).__init__(
pool_size, strides, padding, ceil_mode, False, 'max', layout, **kwargs)
class MaxPool2D(_Pooling):
"""Max pooling operation for two dimensional (spatial) data.
Parameters
----------
pool_size: int or list/tuple of 2 ints,
Size of the max pooling windows.
strides: int, list/tuple of 2 ints, or None.
Factor by which to downscale. E.g. 2 will halve the input size.
If `None`, it will default to `pool_size`.
padding: int or list/tuple of 2 ints,
If padding is non-zero, then the input is implicitly
zero-padded on both sides for padding number of points.
layout : str, default 'NCHW'
Dimension ordering of data and out ('NCHW' or 'NHWC').
'N', 'C', 'H', 'W' stands for batch, channel, height, and width
dimensions respectively. padding is applied on 'H' and 'W' dimension.
ceil_mode : bool, default False
When `True`, will use ceil instead of floor to compute the output shape.
Inputs:
- **data**: 4D input tensor with shape
`(batch_size, in_channels, height, width)` when `layout` is `NCHW`.
For other layouts shape is permuted accordingly.
Outputs:
- **out**: 4D output tensor with shape
`(batch_size, channels, out_height, out_width)` when `layout` is `NCHW`.
out_height and out_width are calculated as::
out_height = floor((height+2*padding[0]-pool_size[0])/strides[0])+1
out_width = floor((width+2*padding[1]-pool_size[1])/strides[1])+1
When `ceil_mode` is `True`, ceil will be used instead of floor in this
equation.
"""
def __init__(self, pool_size=(2, 2), strides=None, padding=0, layout='NCHW',
ceil_mode=False, **kwargs):
assert layout in ('NCHW', 'NHWC'),\
"Only NCHW and NHWC layouts are valid for 2D Pooling"
if isinstance(pool_size, numeric_types):
pool_size = (pool_size,)*2
assert len(pool_size) == 2, "pool_size must be a number or a list of 2 ints"
super(MaxPool2D, self).__init__(
pool_size, strides, padding, ceil_mode, False, 'max', layout, **kwargs)
class MaxPool3D(_Pooling):
"""Max pooling operation for 3D data (spatial or spatio-temporal).
Parameters
----------
pool_size: int or list/tuple of 3 ints,
Size of the max pooling windows.
strides: int, list/tuple of 3 ints, or None.
Factor by which to downscale. E.g. 2 will halve the input size.
If `None`, it will default to `pool_size`.
padding: int or list/tuple of 3 ints,
If padding is non-zero, then the input is implicitly
zero-padded on both sides for padding number of points.
layout : str, default 'NCDHW'
Dimension ordering of data and out ('NCDHW' or 'NDHWC').
'N', 'C', 'H', 'W', 'D' stands for batch, channel, height, width and
depth dimensions respectively. padding is applied on 'D', 'H' and 'W'
dimension.
ceil_mode : bool, default False
When `True`, will use ceil instead of floor to compute the output shape.
Inputs:
- **data**: 5D input tensor with shape
`(batch_size, in_channels, depth, height, width)` when `layout` is `NCW`.
For other layouts shape is permuted accordingly.
Outputs:
- **out**: 5D output tensor with shape
`(batch_size, channels, out_depth, out_height, out_width)` when `layout` is `NCDHW`.
out_depth, out_height and out_width are calculated as::
out_depth = floor((depth+2*padding[0]-pool_size[0])/strides[0])+1
out_height = floor((height+2*padding[1]-pool_size[1])/strides[1])+1
out_width = floor((width+2*padding[2]-pool_size[2])/strides[2])+1
When `ceil_mode` is `True`, ceil will be used instead of floor in this
equation.
"""
def __init__(self, pool_size=(2, 2, 2), strides=None, padding=0,
ceil_mode=False, layout='NCDHW', **kwargs):
assert layout in ('NCDHW', 'NDHWC'),\
"Only NCDHW and NDHWC layouts are valid for 3D Pooling"
if isinstance(pool_size, numeric_types):
pool_size = (pool_size,)*3
assert len(pool_size) == 3, "pool_size must be a number or a list of 3 ints"
super(MaxPool3D, self).__init__(
pool_size, strides, padding, ceil_mode, False, 'max', layout, **kwargs)
class AvgPool1D(_Pooling):
"""Average pooling operation for temporal data.
Parameters
----------
pool_size: int
Size of the average pooling windows.
strides: int, or None
Factor by which to downscale. E.g. 2 will halve the input size.
If `None`, it will default to `pool_size`.
padding: int
If padding is non-zero, then the input is implicitly
zero-padded on both sides for padding number of points.
layout : str, default 'NCW'
Dimension ordering of data and out ('NCW' or 'NWC').
'N', 'C', 'W' stands for batch, channel, and width (time) dimensions
respectively. padding is applied on 'W' dimension.
ceil_mode : bool, default False
When `True`, will use ceil instead of floor to compute the output shape.
count_include_pad : bool, default True
When 'False', will exclude padding elements when computing the average value.
Inputs:
- **data**: 3D input tensor with shape `(batch_size, in_channels, width)`
when `layout` is `NCW`. For other layouts shape is permuted accordingly.
Outputs:
- **out**: 3D output tensor with shape `(batch_size, channels, out_width)`
when `layout` is `NCW`. out_width is calculated as::
out_width = floor((width+2*padding-pool_size)/strides)+1
When `ceil_mode` is `True`, ceil will be used instead of floor in this
equation.
"""
def __init__(self, pool_size=2, strides=None, padding=0, layout='NCW',
ceil_mode=False, count_include_pad=True, **kwargs):
assert layout in ('NCW', 'NWC'),\
"Only NCW and NWC layouts are valid for 1D Pooling"
if isinstance(pool_size, numeric_types):
pool_size = (pool_size,)
assert len(pool_size) == 1, "pool_size must be a number or a list of 1 ints"
super(AvgPool1D, self).__init__(
pool_size, strides, padding, ceil_mode, False, 'avg', layout, count_include_pad,
**kwargs)
class AvgPool2D(_Pooling):
"""Average pooling operation for spatial data.
Parameters
----------
pool_size: int or list/tuple of 2 ints,
Size of the average pooling windows.
strides: int, list/tuple of 2 ints, or None.
Factor by which to downscale. E.g. 2 will halve the input size.
If `None`, it will default to `pool_size`.
padding: int or list/tuple of 2 ints,
If padding is non-zero, then the input is implicitly
zero-padded on both sides for padding number of points.
layout : str, default 'NCHW'
Dimension ordering of data and out ('NCHW' or 'NHWC').
'N', 'C', 'H', 'W' stands for batch, channel, height, and width
dimensions respectively. padding is applied on 'H' and 'W' dimension.
ceil_mode : bool, default False
When True, will use ceil instead of floor to compute the output shape.
count_include_pad : bool, default True
When 'False', will exclude padding elements when computing the average value.
Inputs:
- **data**: 4D input tensor with shape
`(batch_size, in_channels, height, width)` when `layout` is `NCHW`.
For other layouts shape is permuted accordingly.
Outputs:
- **out**: 4D output tensor with shape
`(batch_size, channels, out_height, out_width)` when `layout` is `NCHW`.
out_height and out_width are calculated as::
out_height = floor((height+2*padding[0]-pool_size[0])/strides[0])+1
out_width = floor((width+2*padding[1]-pool_size[1])/strides[1])+1
When `ceil_mode` is `True`, ceil will be used instead of floor in this
equation.
"""
def __init__(self, pool_size=(2, 2), strides=None, padding=0,
ceil_mode=False, layout='NCHW', count_include_pad=True, **kwargs):
assert layout in ('NCHW', 'NHWC'),\
"Only NCHW and NHWC layouts are valid for 2D Pooling"
if isinstance(pool_size, numeric_types):
pool_size = (pool_size,)*2
assert len(pool_size) == 2, "pool_size must be a number or a list of 2 ints"
super(AvgPool2D, self).__init__(
pool_size, strides, padding, ceil_mode, False, 'avg', layout, count_include_pad,
**kwargs)
class AvgPool3D(_Pooling):
"""Average pooling operation for 3D data (spatial or spatio-temporal).
Parameters
----------
pool_size: int or list/tuple of 3 ints,
Size of the average pooling windows.
strides: int, list/tuple of 3 ints, or None.
Factor by which to downscale. E.g. 2 will halve the input size.
If `None`, it will default to `pool_size`.
padding: int or list/tuple of 3 ints,
If padding is non-zero, then the input is implicitly
zero-padded on both sides for padding number of points.
layout : str, default 'NCDHW'
Dimension ordering of data and out ('NCDHW' or 'NDHWC').
'N', 'C', 'H', 'W', 'D' stands for batch, channel, height, width and
depth dimensions respectively. padding is applied on 'D', 'H' and 'W'
dimension.
ceil_mode : bool, default False
When True, will use ceil instead of floor to compute the output shape.
count_include_pad : bool, default True
When 'False', will exclude padding elements when computing the average value.
Inputs:
- **data**: 5D input tensor with shape
`(batch_size, in_channels, depth, height, width)` when `layout` is `NCDHW`.
For other layouts shape is permuted accordingly.
Outputs:
- **out**: 5D output tensor with shape
`(batch_size, channels, out_depth, out_height, out_width)` when `layout` is `NCDHW`.
out_depth, out_height and out_width are calculated as::
out_depth = floor((depth+2*padding[0]-pool_size[0])/strides[0])+1
out_height = floor((height+2*padding[1]-pool_size[1])/strides[1])+1
out_width = floor((width+2*padding[2]-pool_size[2])/strides[2])+1
When `ceil_mode` is `True,` ceil will be used instead of floor in this
equation.
"""
def __init__(self, pool_size=(2, 2, 2), strides=None, padding=0,
ceil_mode=False, layout='NCDHW', count_include_pad=True, **kwargs):
assert layout in ('NCDHW', 'NDHWC'),\
"Only NCDHW and NDHWC layouts are valid for 3D Pooling"
if isinstance(pool_size, numeric_types):
pool_size = (pool_size,)*3
assert len(pool_size) == 3, "pool_size must be a number or a list of 3 ints"
super(AvgPool3D, self).__init__(
pool_size, strides, padding, ceil_mode, False, 'avg', layout, count_include_pad,
**kwargs)
class GlobalMaxPool1D(_Pooling):
"""Gloabl max pooling operation for one dimensional (temporal) data.
Parameters
----------
layout : str, default 'NCW'
Dimension ordering of data and out ('NCW' or 'NWC').
'N', 'C', 'W' stands for batch, channel, and width (time) dimensions
respectively. Pooling is applied on the W dimension.
Inputs:
- **data**: 3D input tensor with shape `(batch_size, in_channels, width)`
when `layout` is `NCW`. For other layouts shape is permuted accordingly.
Outputs:
- **out**: 3D output tensor with shape `(batch_size, channels, 1)`
when `layout` is `NCW`.
"""
def __init__(self, layout='NCW', **kwargs):
assert layout in ('NCW', 'NWC'),\
"Only NCW and NWC layouts are valid for 1D Pooling"
super(GlobalMaxPool1D, self).__init__(
(1,), None, 0, True, True, 'max', layout, **kwargs)
class GlobalMaxPool2D(_Pooling):
"""Global max pooling operation for two dimensional (spatial) data.
Parameters
----------
layout : str, default 'NCHW'
Dimension ordering of data and out ('NCHW' or 'NHWC').
'N', 'C', 'H', 'W' stands for batch, channel, height, and width
dimensions respectively. padding is applied on 'H' and 'W' dimension.
Inputs:
- **data**: 4D input tensor with shape
`(batch_size, in_channels, height, width)` when `layout` is `NCHW`.
For other layouts shape is permuted accordingly.
Outputs:
- **out**: 4D output tensor with shape
`(batch_size, channels, 1, 1)` when `layout` is `NCHW`.
"""
def __init__(self, layout='NCHW', **kwargs):
assert layout in ('NCHW', 'NHWC'),\
"Only NCHW and NHWC layouts are valid for 2D Pooling"
super(GlobalMaxPool2D, self).__init__(
(1, 1), None, 0, True, True, 'max', layout, **kwargs)
class GlobalMaxPool3D(_Pooling):
"""Global max pooling operation for 3D data (spatial or spatio-temporal).
Parameters
----------
layout : str, default 'NCDHW'
Dimension ordering of data and out ('NCDHW' or 'NDHWC').
'N', 'C', 'H', 'W', 'D' stands for batch, channel, height, width and
depth dimensions respectively. padding is applied on 'D', 'H' and 'W'
dimension.
Inputs:
- **data**: 5D input tensor with shape
`(batch_size, in_channels, depth, height, width)` when `layout` is `NCW`.
For other layouts shape is permuted accordingly.
Outputs:
- **out**: 5D output tensor with shape
`(batch_size, channels, 1, 1, 1)` when `layout` is `NCDHW`.
"""
def __init__(self, layout='NCDHW', **kwargs):
assert layout in ('NCDHW', 'NDHWC'),\
"Only NCDHW and NDHWC layouts are valid for 3D Pooling"
super(GlobalMaxPool3D, self).__init__(
(1, 1, 1), None, 0, True, True, 'max', layout, **kwargs)
class GlobalAvgPool1D(_Pooling):
"""Global average pooling operation for temporal data.
Parameters
----------
layout : str, default 'NCW'
Dimension ordering of data and out ('NCW' or 'NWC').
'N', 'C', 'W' stands for batch, channel, and width (time) dimensions
respectively. padding is applied on 'W' dimension.
Inputs:
- **data**: 3D input tensor with shape `(batch_size, in_channels, width)`
when `layout` is `NCW`. For other layouts shape is permuted accordingly.
Outputs:
- **out**: 3D output tensor with shape `(batch_size, channels, 1)`.
"""
def __init__(self, layout='NCW', **kwargs):
assert layout in ('NCW', 'NWC'),\
"Only NCW and NWC layouts are valid for 1D Pooling"
super(GlobalAvgPool1D, self).__init__(
(1,), None, 0, True, True, 'avg', layout, **kwargs)
class GlobalAvgPool2D(_Pooling):
"""Global average pooling operation for spatial data.
Parameters
----------
layout : str, default 'NCHW'
Dimension ordering of data and out ('NCHW' or 'NHWC').
'N', 'C', 'H', 'W' stands for batch, channel, height, and width
dimensions respectively.
Inputs:
- **data**: 4D input tensor with shape
`(batch_size, in_channels, height, width)` when `layout` is `NCHW`.
For other layouts shape is permuted accordingly.
Outputs:
- **out**: 4D output tensor with shape
`(batch_size, channels, 1, 1)` when `layout` is `NCHW`.
"""
def __init__(self, layout='NCHW', **kwargs):
assert layout in ('NCHW', 'NHWC'),\
"Only NCHW and NHWC layouts are valid for 2D Pooling"
super(GlobalAvgPool2D, self).__init__(
(1, 1), None, 0, True, True, 'avg', layout, **kwargs)
class GlobalAvgPool3D(_Pooling):
"""Global average pooling operation for 3D data (spatial or spatio-temporal).
Parameters
----------
layout : str, default 'NCDHW'
Dimension ordering of data and out ('NCDHW' or 'NDHWC').
'N', 'C', 'H', 'W', 'D' stands for batch, channel, height, width and
depth dimensions respectively. padding is applied on 'D', 'H' and 'W'
dimension.
Inputs:
- **data**: 5D input tensor with shape
`(batch_size, in_channels, depth, height, width)` when `layout` is `NCDHW`.
For other layouts shape is permuted accordingly.
Outputs:
- **out**: 5D output tensor with shape
`(batch_size, channels, 1, 1, 1)` when `layout` is `NCDHW`.
"""
def __init__(self, layout='NCDHW', **kwargs):
assert layout in ('NCDHW', 'NDHWC'),\
"Only NCDHW and NDHWC layouts are valid for 3D Pooling"
super(GlobalAvgPool3D, self).__init__(
(1, 1, 1), None, 0, True, True, 'avg', layout, **kwargs)
class ReflectionPad2D(HybridBlock):
r"""Pads the input tensor using the reflection of the input boundary.
Parameters
----------
padding: int
An integer padding size
Inputs:
- **data**: input tensor with the shape :math:`(N, C, H_{in}, W_{in})`.
Outputs:
- **out**: output tensor with the shape :math:`(N, C, H_{out}, W_{out})`, where
.. math::
H_{out} = H_{in} + 2 \cdot padding
W_{out} = W_{in} + 2 \cdot padding
Examples
--------
>>> m = nn.ReflectionPad2D(3)
>>> input = mx.nd.random.normal(shape=(16, 3, 224, 224))
>>> output = m(input)
"""
def __init__(self, padding=0, **kwargs):
super(ReflectionPad2D, self).__init__(**kwargs)
if isinstance(padding, numeric_types):
padding = (0, 0, 0, 0, padding, padding, padding, padding)
assert(len(padding) == 8)
self._padding = padding
def hybrid_forward(self, F, x):
return F.pad(x, mode='reflect', pad_width=self._padding)
|
tlby/mxnet
|
python/mxnet/gluon/nn/conv_layers.py
|
Python
|
apache-2.0
| 54,234
|
import datetime
import json
import io
import os
import re
import shutil
import socket
import tempfile
import threading
import time
import unittest
import docker
from docker.api import APIClient
import requests
from requests.packages import urllib3
import six
from . import fake_api
import pytest
try:
from unittest import mock
except ImportError:
import mock
DEFAULT_TIMEOUT_SECONDS = docker.constants.DEFAULT_TIMEOUT_SECONDS
def response(status_code=200, content='', headers=None, reason=None, elapsed=0,
request=None, raw=None):
res = requests.Response()
res.status_code = status_code
if not isinstance(content, six.binary_type):
content = json.dumps(content).encode('ascii')
res._content = content
res.headers = requests.structures.CaseInsensitiveDict(headers or {})
res.reason = reason
res.elapsed = datetime.timedelta(elapsed)
res.request = request
res.raw = raw
return res
def fake_resolve_authconfig(authconfig, registry=None):
return None
def fake_inspect_container(self, container, tty=False):
return fake_api.get_fake_inspect_container(tty=tty)[1]
def fake_resp(method, url, *args, **kwargs):
key = None
if url in fake_api.fake_responses:
key = url
elif (url, method) in fake_api.fake_responses:
key = (url, method)
if not key:
raise Exception('{0} {1}'.format(method, url))
status_code, content = fake_api.fake_responses[key]()
return response(status_code=status_code, content=content)
fake_request = mock.Mock(side_effect=fake_resp)
def fake_get(self, url, *args, **kwargs):
return fake_request('GET', url, *args, **kwargs)
def fake_post(self, url, *args, **kwargs):
return fake_request('POST', url, *args, **kwargs)
def fake_put(self, url, *args, **kwargs):
return fake_request('PUT', url, *args, **kwargs)
def fake_delete(self, url, *args, **kwargs):
return fake_request('DELETE', url, *args, **kwargs)
def fake_read_from_socket(self, response, stream, tty=False):
return six.binary_type()
url_base = '{0}/'.format(fake_api.prefix)
url_prefix = '{0}v{1}/'.format(
url_base,
docker.constants.DEFAULT_DOCKER_API_VERSION)
class BaseAPIClientTest(unittest.TestCase):
def setUp(self):
self.patcher = mock.patch.multiple(
'docker.api.client.APIClient',
get=fake_get,
post=fake_post,
put=fake_put,
delete=fake_delete,
_read_from_socket=fake_read_from_socket
)
self.patcher.start()
self.client = APIClient()
# Force-clear authconfig to avoid tampering with the tests
self.client._cfg = {'Configs': {}}
def tearDown(self):
self.client.close()
self.patcher.stop()
def base_create_payload(self, img='busybox', cmd=None):
if not cmd:
cmd = ['true']
return {"Tty": False, "Image": img, "Cmd": cmd,
"AttachStdin": False,
"AttachStderr": True, "AttachStdout": True,
"StdinOnce": False,
"OpenStdin": False, "NetworkDisabled": False,
}
class DockerApiTest(BaseAPIClientTest):
def test_ctor(self):
with pytest.raises(docker.errors.DockerException) as excinfo:
APIClient(version=1.12)
self.assertEqual(
str(excinfo.value),
'Version parameter must be a string or None. Found float'
)
def test_url_valid_resource(self):
url = self.client._url('/hello/{0}/world', 'somename')
self.assertEqual(
url, '{0}{1}'.format(url_prefix, 'hello/somename/world')
)
url = self.client._url(
'/hello/{0}/world/{1}', 'somename', 'someothername'
)
self.assertEqual(
url,
'{0}{1}'.format(url_prefix, 'hello/somename/world/someothername')
)
url = self.client._url('/hello/{0}/world', 'some?name')
self.assertEqual(
url, '{0}{1}'.format(url_prefix, 'hello/some%3Fname/world')
)
url = self.client._url("/images/{0}/push", "localhost:5000/image")
self.assertEqual(
url,
'{0}{1}'.format(url_prefix, 'images/localhost:5000/image/push')
)
def test_url_invalid_resource(self):
with pytest.raises(ValueError):
self.client._url('/hello/{0}/world', ['sakuya', 'izayoi'])
def test_url_no_resource(self):
url = self.client._url('/simple')
self.assertEqual(url, '{0}{1}'.format(url_prefix, 'simple'))
def test_url_unversioned_api(self):
url = self.client._url(
'/hello/{0}/world', 'somename', versioned_api=False
)
self.assertEqual(
url, '{0}{1}'.format(url_base, 'hello/somename/world')
)
def test_version(self):
self.client.version()
fake_request.assert_called_with(
'GET',
url_prefix + 'version',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_version_no_api_version(self):
self.client.version(False)
fake_request.assert_called_with(
'GET',
url_base + 'version',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_retrieve_server_version(self):
client = APIClient(version="auto")
self.assertTrue(isinstance(client._version, six.string_types))
self.assertFalse(client._version == "auto")
client.close()
def test_auto_retrieve_server_version(self):
version = self.client._retrieve_server_version()
self.assertTrue(isinstance(version, six.string_types))
def test_info(self):
self.client.info()
fake_request.assert_called_with(
'GET',
url_prefix + 'info',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_search(self):
self.client.search('busybox')
fake_request.assert_called_with(
'GET',
url_prefix + 'images/search',
params={'term': 'busybox'},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_events(self):
self.client.events()
fake_request.assert_called_with(
'GET',
url_prefix + 'events',
params={'since': None, 'until': None, 'filters': None},
stream=True,
timeout=None
)
def test_events_with_since_until(self):
ts = 1356048000
now = datetime.datetime.utcfromtimestamp(ts)
since = now - datetime.timedelta(seconds=10)
until = now + datetime.timedelta(seconds=10)
self.client.events(since=since, until=until)
fake_request.assert_called_with(
'GET',
url_prefix + 'events',
params={
'since': ts - 10,
'until': ts + 10,
'filters': None
},
stream=True,
timeout=None
)
def test_events_with_filters(self):
filters = {'event': ['die', 'stop'],
'container': fake_api.FAKE_CONTAINER_ID}
self.client.events(filters=filters)
expected_filters = docker.utils.convert_filters(filters)
fake_request.assert_called_with(
'GET',
url_prefix + 'events',
params={
'since': None,
'until': None,
'filters': expected_filters
},
stream=True,
timeout=None
)
def _socket_path_for_client_session(self, client):
socket_adapter = client.get_adapter('http+docker://')
return socket_adapter.socket_path
def test_url_compatibility_unix(self):
c = APIClient(base_url="unix://socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_unix_triple_slash(self):
c = APIClient(base_url="unix:///socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_http_unix_triple_slash(self):
c = APIClient(base_url="http+unix:///socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_http(self):
c = APIClient(base_url="http://hostname:1234")
assert c.base_url == "http://hostname:1234"
def test_url_compatibility_tcp(self):
c = APIClient(base_url="tcp://hostname:1234")
assert c.base_url == "http://hostname:1234"
def test_remove_link(self):
self.client.remove_container(fake_api.FAKE_CONTAINER_ID, link=True)
fake_request.assert_called_with(
'DELETE',
url_prefix + 'containers/3cc2351ab11b',
params={'v': False, 'link': True, 'force': False},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_create_host_config_secopt(self):
security_opt = ['apparmor:test_profile']
result = self.client.create_host_config(security_opt=security_opt)
self.assertIn('SecurityOpt', result)
self.assertEqual(result['SecurityOpt'], security_opt)
self.assertRaises(
TypeError, self.client.create_host_config, security_opt='wrong'
)
def test_stream_helper_decoding(self):
status_code, content = fake_api.fake_responses[url_prefix + 'events']()
content_str = json.dumps(content)
if six.PY3:
content_str = content_str.encode('utf-8')
body = io.BytesIO(content_str)
# mock a stream interface
raw_resp = urllib3.HTTPResponse(body=body)
setattr(raw_resp._fp, 'chunked', True)
setattr(raw_resp._fp, 'chunk_left', len(body.getvalue()) - 1)
# pass `decode=False` to the helper
raw_resp._fp.seek(0)
resp = response(status_code=status_code, content=content, raw=raw_resp)
result = next(self.client._stream_helper(resp))
self.assertEqual(result, content_str)
# pass `decode=True` to the helper
raw_resp._fp.seek(0)
resp = response(status_code=status_code, content=content, raw=raw_resp)
result = next(self.client._stream_helper(resp, decode=True))
self.assertEqual(result, content)
# non-chunked response, pass `decode=False` to the helper
setattr(raw_resp._fp, 'chunked', False)
raw_resp._fp.seek(0)
resp = response(status_code=status_code, content=content, raw=raw_resp)
result = next(self.client._stream_helper(resp))
self.assertEqual(result, content_str.decode('utf-8'))
# non-chunked response, pass `decode=True` to the helper
raw_resp._fp.seek(0)
resp = response(status_code=status_code, content=content, raw=raw_resp)
result = next(self.client._stream_helper(resp, decode=True))
self.assertEqual(result, content)
class StreamTest(unittest.TestCase):
def setUp(self):
socket_dir = tempfile.mkdtemp()
self.build_context = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, socket_dir)
self.addCleanup(shutil.rmtree, self.build_context)
self.socket_file = os.path.join(socket_dir, 'test_sock.sock')
self.server_socket = self._setup_socket()
self.stop_server = False
server_thread = threading.Thread(target=self.run_server)
server_thread.setDaemon(True)
server_thread.start()
self.response = None
self.request_handler = None
self.addCleanup(server_thread.join)
self.addCleanup(self.stop)
def stop(self):
self.stop_server = True
def _setup_socket(self):
server_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
server_sock.bind(self.socket_file)
# Non-blocking mode so that we can shut the test down easily
server_sock.setblocking(0)
server_sock.listen(5)
return server_sock
def run_server(self):
try:
while not self.stop_server:
try:
connection, client_address = self.server_socket.accept()
except socket.error:
# Probably no connection to accept yet
time.sleep(0.01)
continue
connection.setblocking(1)
try:
self.request_handler(connection)
finally:
connection.close()
finally:
self.server_socket.close()
def early_response_sending_handler(self, connection):
data = b''
headers = None
connection.sendall(self.response)
while not headers:
data += connection.recv(2048)
parts = data.split(b'\r\n\r\n', 1)
if len(parts) == 2:
headers, data = parts
mo = re.search(r'Content-Length: ([0-9]+)', headers.decode())
assert mo
content_length = int(mo.group(1))
while True:
if len(data) >= content_length:
break
data += connection.recv(2048)
@pytest.mark.skipif(
docker.constants.IS_WINDOWS_PLATFORM, reason='Unix only'
)
def test_early_stream_response(self):
self.request_handler = self.early_response_sending_handler
lines = []
for i in range(0, 50):
line = str(i).encode()
lines += [('%x' % len(line)).encode(), line]
lines.append(b'0')
lines.append(b'')
self.response = (
b'HTTP/1.1 200 OK\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n'
) + b'\r\n'.join(lines)
with APIClient(base_url="http+unix://" + self.socket_file) \
as client:
for i in range(5):
try:
stream = client.build(
path=self.build_context,
stream=True
)
break
except requests.ConnectionError as e:
if i == 4:
raise e
self.assertEqual(list(stream), [
str(i).encode() for i in range(50)])
class UserAgentTest(unittest.TestCase):
def setUp(self):
self.patcher = mock.patch.object(
APIClient,
'send',
return_value=fake_resp("GET", "%s/version" % fake_api.prefix)
)
self.mock_send = self.patcher.start()
def tearDown(self):
self.patcher.stop()
def test_default_user_agent(self):
client = APIClient()
client.version()
self.assertEqual(self.mock_send.call_count, 1)
headers = self.mock_send.call_args[0][0].headers
expected = 'docker-sdk-python/%s' % docker.__version__
self.assertEqual(headers['User-Agent'], expected)
def test_custom_user_agent(self):
client = APIClient(user_agent='foo/bar')
client.version()
self.assertEqual(self.mock_send.call_count, 1)
headers = self.mock_send.call_args[0][0].headers
self.assertEqual(headers['User-Agent'], 'foo/bar')
class DisableSocketTest(unittest.TestCase):
class DummySocket(object):
def __init__(self, timeout=60):
self.timeout = timeout
def settimeout(self, timeout):
self.timeout = timeout
def gettimeout(self):
return self.timeout
def setUp(self):
self.client = APIClient()
def test_disable_socket_timeout(self):
"""Test that the timeout is disabled on a generic socket object."""
socket = self.DummySocket()
self.client._disable_socket_timeout(socket)
self.assertEqual(socket.timeout, None)
def test_disable_socket_timeout2(self):
"""Test that the timeouts are disabled on a generic socket object
and it's _sock object if present."""
socket = self.DummySocket()
socket._sock = self.DummySocket()
self.client._disable_socket_timeout(socket)
self.assertEqual(socket.timeout, None)
self.assertEqual(socket._sock.timeout, None)
def test_disable_socket_timout_non_blocking(self):
"""Test that a non-blocking socket does not get set to blocking."""
socket = self.DummySocket()
socket._sock = self.DummySocket(0.0)
self.client._disable_socket_timeout(socket)
self.assertEqual(socket.timeout, None)
self.assertEqual(socket._sock.timeout, 0.0)
|
vpetersson/docker-py
|
tests/unit/api_test.py
|
Python
|
apache-2.0
| 16,548
|
"""Support for Modbus."""
from __future__ import annotations
import asyncio
from collections import namedtuple
import logging
from pymodbus.client.sync import ModbusSerialClient, ModbusTcpClient, ModbusUdpClient
from pymodbus.constants import Defaults
from pymodbus.exceptions import ModbusException
from pymodbus.transaction import ModbusRtuFramer
from homeassistant.const import (
CONF_DELAY,
CONF_HOST,
CONF_METHOD,
CONF_NAME,
CONF_PORT,
CONF_TIMEOUT,
CONF_TYPE,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import CALLBACK_TYPE, callback
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.event import async_call_later
from .const import (
ATTR_ADDRESS,
ATTR_HUB,
ATTR_STATE,
ATTR_UNIT,
ATTR_VALUE,
CALL_TYPE_COIL,
CALL_TYPE_DISCRETE,
CALL_TYPE_REGISTER_HOLDING,
CALL_TYPE_REGISTER_INPUT,
CALL_TYPE_WRITE_COIL,
CALL_TYPE_WRITE_COILS,
CALL_TYPE_WRITE_REGISTER,
CALL_TYPE_WRITE_REGISTERS,
CONF_BAUDRATE,
CONF_BYTESIZE,
CONF_CLOSE_COMM_ON_ERROR,
CONF_MSG_WAIT,
CONF_PARITY,
CONF_RETRIES,
CONF_RETRY_ON_EMPTY,
CONF_STOPBITS,
DEFAULT_HUB,
MODBUS_DOMAIN as DOMAIN,
PLATFORMS,
RTUOVERTCP,
SERIAL,
SERVICE_WRITE_COIL,
SERVICE_WRITE_REGISTER,
TCP,
UDP,
)
_LOGGER = logging.getLogger(__name__)
ConfEntry = namedtuple("ConfEntry", "call_type attr func_name")
RunEntry = namedtuple("RunEntry", "attr func")
PYMODBUS_CALL = [
ConfEntry(
CALL_TYPE_COIL,
"bits",
"read_coils",
),
ConfEntry(
CALL_TYPE_DISCRETE,
"bits",
"read_discrete_inputs",
),
ConfEntry(
CALL_TYPE_REGISTER_HOLDING,
"registers",
"read_holding_registers",
),
ConfEntry(
CALL_TYPE_REGISTER_INPUT,
"registers",
"read_input_registers",
),
ConfEntry(
CALL_TYPE_WRITE_COIL,
"value",
"write_coil",
),
ConfEntry(
CALL_TYPE_WRITE_COILS,
"count",
"write_coils",
),
ConfEntry(
CALL_TYPE_WRITE_REGISTER,
"value",
"write_register",
),
ConfEntry(
CALL_TYPE_WRITE_REGISTERS,
"count",
"write_registers",
),
]
async def async_modbus_setup(
hass, config, service_write_register_schema, service_write_coil_schema
):
"""Set up Modbus component."""
hass.data[DOMAIN] = hub_collect = {}
for conf_hub in config[DOMAIN]:
my_hub = ModbusHub(hass, conf_hub)
hub_collect[conf_hub[CONF_NAME]] = my_hub
# modbus needs to be activated before components are loaded
# to avoid a racing problem
if not await my_hub.async_setup():
return False
# load platforms
for component, conf_key in PLATFORMS:
if conf_key in conf_hub:
hass.async_create_task(
async_load_platform(hass, component, DOMAIN, conf_hub, config)
)
async def async_stop_modbus(event):
"""Stop Modbus service."""
for client in hub_collect.values():
await client.async_close()
del client
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, async_stop_modbus)
async def async_write_register(service):
"""Write Modbus registers."""
unit = int(float(service.data[ATTR_UNIT]))
address = int(float(service.data[ATTR_ADDRESS]))
value = service.data[ATTR_VALUE]
client_name = (
service.data[ATTR_HUB] if ATTR_HUB in service.data else DEFAULT_HUB
)
if isinstance(value, list):
await hub_collect[client_name].async_pymodbus_call(
unit, address, [int(float(i)) for i in value], CALL_TYPE_WRITE_REGISTERS
)
else:
await hub_collect[client_name].async_pymodbus_call(
unit, address, int(float(value)), CALL_TYPE_WRITE_REGISTER
)
hass.services.async_register(
DOMAIN,
SERVICE_WRITE_REGISTER,
async_write_register,
schema=service_write_register_schema,
)
async def async_write_coil(service):
"""Write Modbus coil."""
unit = service.data[ATTR_UNIT]
address = service.data[ATTR_ADDRESS]
state = service.data[ATTR_STATE]
client_name = (
service.data[ATTR_HUB] if ATTR_HUB in service.data else DEFAULT_HUB
)
if isinstance(state, list):
await hub_collect[client_name].async_pymodbus_call(
unit, address, state, CALL_TYPE_WRITE_COILS
)
else:
await hub_collect[client_name].async_pymodbus_call(
unit, address, state, CALL_TYPE_WRITE_COIL
)
hass.services.async_register(
DOMAIN, SERVICE_WRITE_COIL, async_write_coil, schema=service_write_coil_schema
)
return True
class ModbusHub:
"""Thread safe wrapper class for pymodbus."""
name: str
def __init__(self, hass, client_config):
"""Initialize the Modbus hub."""
# generic configuration
self._client = None
self.entity_timers: list[CALLBACK_TYPE] = []
self._async_cancel_listener = None
self._in_error = False
self._lock = asyncio.Lock()
self.hass = hass
self.name = client_config[CONF_NAME]
self._config_type = client_config[CONF_TYPE]
self._config_delay = client_config[CONF_DELAY]
self._pb_call = {}
self._pb_class = {
SERIAL: ModbusSerialClient,
TCP: ModbusTcpClient,
UDP: ModbusUdpClient,
RTUOVERTCP: ModbusTcpClient,
}
self._pb_params = {
"port": client_config[CONF_PORT],
"timeout": client_config[CONF_TIMEOUT],
"reset_socket": client_config[CONF_CLOSE_COMM_ON_ERROR],
"retries": client_config[CONF_RETRIES],
"retry_on_empty": client_config[CONF_RETRY_ON_EMPTY],
}
if self._config_type == SERIAL:
# serial configuration
self._pb_params.update(
{
"method": client_config[CONF_METHOD],
"baudrate": client_config[CONF_BAUDRATE],
"stopbits": client_config[CONF_STOPBITS],
"bytesize": client_config[CONF_BYTESIZE],
"parity": client_config[CONF_PARITY],
}
)
else:
# network configuration
self._pb_params["host"] = client_config[CONF_HOST]
if self._config_type == RTUOVERTCP:
self._pb_params["framer"] = ModbusRtuFramer
Defaults.Timeout = client_config[CONF_TIMEOUT]
if CONF_MSG_WAIT in client_config:
self._msg_wait = client_config[CONF_MSG_WAIT] / 1000
elif self._config_type == SERIAL:
self._msg_wait = 30 / 1000
else:
self._msg_wait = 0
def _log_error(self, text: str, error_state=True):
log_text = f"Pymodbus: {self.name}: {text}"
if self._in_error:
_LOGGER.debug(log_text)
else:
_LOGGER.error(log_text)
self._in_error = error_state
async def async_setup(self):
"""Set up pymodbus client."""
try:
self._client = self._pb_class[self._config_type](**self._pb_params)
except ModbusException as exception_error:
self._log_error(str(exception_error), error_state=False)
return False
for entry in PYMODBUS_CALL:
func = getattr(self._client, entry.func_name)
self._pb_call[entry.call_type] = RunEntry(entry.attr, func)
await self.async_connect_task()
return True
async def async_connect_task(self):
"""Try to connect, and retry if needed."""
async with self._lock:
if not await self.hass.async_add_executor_job(self._pymodbus_connect):
err = f"{self.name} connect failed, retry in pymodbus"
self._log_error(err, error_state=False)
return
# Start counting down to allow modbus requests.
if self._config_delay:
self._async_cancel_listener = async_call_later(
self.hass, self._config_delay, self.async_end_delay
)
@callback
def async_end_delay(self, args):
"""End startup delay."""
self._async_cancel_listener = None
self._config_delay = 0
async def async_close(self):
"""Disconnect client."""
if self._async_cancel_listener:
self._async_cancel_listener()
self._async_cancel_listener = None
for call in self.entity_timers:
call()
self.entity_timers = []
async with self._lock:
if self._client:
try:
self._client.close()
except ModbusException as exception_error:
self._log_error(str(exception_error))
self._client = None
def _pymodbus_connect(self):
"""Connect client."""
try:
return self._client.connect()
except ModbusException as exception_error:
self._log_error(str(exception_error), error_state=False)
return False
def _pymodbus_call(self, unit, address, value, use_call):
"""Call sync. pymodbus."""
kwargs = {"unit": unit} if unit else {}
entry = self._pb_call[use_call]
try:
result = entry.func(address, value, **kwargs)
except ModbusException as exception_error:
self._log_error(str(exception_error))
return None
if not hasattr(result, entry.attr):
self._log_error(str(result))
return None
self._in_error = False
return result
async def async_pymodbus_call(self, unit, address, value, use_call):
"""Convert async to sync pymodbus call."""
if self._config_delay:
return None
async with self._lock:
if not self._client:
return None
result = await self.hass.async_add_executor_job(
self._pymodbus_call, unit, address, value, use_call
)
if self._msg_wait:
# small delay until next request/response
await asyncio.sleep(self._msg_wait)
return result
|
Danielhiversen/home-assistant
|
homeassistant/components/modbus/modbus.py
|
Python
|
apache-2.0
| 10,557
|
from slamon.afm.tables import Task
from slamon.afm.afm_app import app
from slamon.slamon_logging import getSLAMonLogger
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.exc import IntegrityError, ProgrammingError
from bottle import request, HTTPError
from slamon.afm.database import create_session
import jsonschema
import json
logger = getSLAMonLogger(__name__)
post_task_schema = {
'type': 'object',
'properties': {
'task_id': {
'type': 'string',
'pattern': '^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$'
},
'task_type': {
'type': 'string'
},
'task_version': {
'type': 'integer'
},
'task_data': {
'type': 'object'
},
'test_id': {
'type': 'string',
'pattern': '^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$'
}
},
'required': ['task_id', 'task_type', 'task_version', 'test_id'],
'additionalProperties': False
}
@app.post('/task')
@app.post('/task/')
def post_task():
data = request.json
if data is None:
raise HTTPError(400)
try:
jsonschema.validate(data, post_task_schema)
except jsonschema.ValidationError:
raise HTTPError(400)
session = create_session()
task_uuid = str(data['task_id'])
task_type = str(data['task_type'])
task_test_id = (data['test_id'])
task_data = ""
task = Task(
uuid=task_uuid,
type=task_type,
version=int(data['task_version']),
test_id=task_test_id
)
if 'task_data' in data:
task_data = json.dumps(data['task_data'])
task.data = task_data
try:
session.add(task)
except IntegrityError:
session.rollback()
raise HTTPError(400)
try:
session.commit()
except (IntegrityError, ProgrammingError):
session.rollback()
logger.error("Failed to commit database changes for BPMS task POST")
raise HTTPError(400)
finally:
session.close()
logger.info("Task posted by BPMS - Task's type: {}, test process id: {}, uuid: {}, parameters: {}"
.format(task_type, task_test_id, task_uuid, task_data))
@app.get('/task/<task_uuid:re:[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}>')
@app.get('/task/<task_uuid:re:[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}>/')
def get_task(task_uuid: str):
"""
Gets information about single task with uuid task_uuid
:param task_uuid: uuid of the task
:return: dict in following format
{
'task_id': 'de305d54-75b4-431b-adb2-eb6b9e546013', # UUID of the task (str)
'test_id': 'de305d54-75b4-431b-adb2-eb6b9e546013', # UUID of the test (str)
'task_type': 'wait', # type of the task (str)
'task_version': 1, # Version number of the task
'task_data': {}, # Dict containing data passed to the task (if any)
'task_completed': '31-03-2015:12:12:12', # Time when task was completed (if completed)
'task_result': {}, # Dict containing task's results (if completed)
'task_failed': '31-03-2015:12:12:12', # Time when task failed (if failed)
'task_error': 'Something went wrong' # Error that caused task to fail (if failed)
}
"""
try:
session = create_session()
except:
raise HTTPError(500)
try:
query = session.query(Task)
task = query.filter(Task.uuid == str(task_uuid)).one()
except NoResultFound:
raise HTTPError(404)
finally:
session.close()
task_desc = {
'task_id': task.uuid,
'test_id': task.test_id,
'task_type': task.type,
'task_version': task.version
}
if task.data is not None:
task_desc['task_data'] = json.loads(task.data)
if task.failed:
task_desc['task_failed'] = str(task.failed)
task_desc['task_error'] = str(task.error)
elif task.completed:
task_desc['task_completed'] = str(task.completed)
task_desc['task_result'] = json.loads(task.result_data)
return task_desc
|
SLAMon/SLAMon
|
slamon/afm/routes/bpms_routes.py
|
Python
|
apache-2.0
| 4,408
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
import logging
import logging.config
import os
import shutil
import subprocess
import sys
import tempfile
from platforms.chocolatey import build_chocolatey, publish_chocolatey
from platforms.common import ReleaseException, docker, run
from platforms.debian import build_deb
from platforms.homebrew import (
build_bottle,
log_about_manual_tap_push,
publish_tap_changes,
validate_tap,
)
from releases import (
add_assets,
create_new_release,
get_all_releases,
get_current_user,
get_release_for_tag,
get_token,
)
TARGET_MACOS_VERSION = "yosemite"
TARGET_MACOS_VERSION_SPEC = TARGET_MACOS_VERSION
def parse_args(args):
parser = argparse.ArgumentParser("Publish releases of buck to github")
parser.add_argument(
"--valid-git-upstreams",
default=(
"git@github.com:facebook/buck.git",
"https://github.com/facebook/buck.git",
),
nargs="+",
help="List of valid upstreams for the git repository in order to publish",
)
parser.add_argument(
"--github-token-file",
default=os.path.expanduser("~/.buck-github-token"),
help="A file containing the github token to use",
)
parser.add_argument(
"--github-token",
help="If provided, use this github token instead of the one in `--github-token-file`",
)
parser.add_argument(
"--repository",
default="facebook/buck",
help="The github repository to operate on",
)
parser.add_argument(
"--tap-repository", default="facebook/fb", help="The tap to use for homebrew"
)
parser.add_argument(
"--version",
default=datetime.datetime.now().strftime("%Y.%m.%d.01"),
help=(
"Version to use in git tags and github releases. This is generated "
"by default"
),
)
parser.add_argument(
"--use-existing-release",
action="store_true",
help=(
"If specified, use an existing release (specified by --version), rather "
"than pushing tags and creating a new release"
),
)
parser.add_argument(
"--release-message",
help=(
"If specified, use this for the release message. If not specified, "
"and a new release is created, user will be prompted for a message"
),
)
parser.add_argument(
"--no-prompt-for-message",
help="If set, use a default message rather than prompting for a message",
action="store_false",
dest="prompt_for_message",
)
parser.add_argument(
"--no-build-deb",
dest="build_deb",
action="store_false",
help="Do not build deb packages for this release",
)
parser.add_argument(
"--no-build-homebrew",
dest="build_homebrew",
action="store_false",
help="Do not build homebrew packages for this release",
)
parser.add_argument(
"--no-build-chocolatey",
dest="build_chocolatey",
action="store_false",
help="Do not build chocolatey packages for this release",
)
parser.add_argument(
"--deb-file",
help="Upload this file as the deb for this release. Implies --no-build-deb",
)
parser.add_argument(
"--homebrew-file",
help="Upload this file as the bottle for this release. Implies --no-build-homebrew",
)
parser.add_argument(
"--chocolatey-file",
help="Upload this file as the nupkg for this release. Implies --no-build-chocolatey",
)
parser.add_argument(
"--docker-linux-host",
help="If provided, the docker:port to connect to to build linux images",
)
parser.add_argument(
"--docker-windows-host",
help="If provided, the docker:port to connect to to build windows images",
)
parser.add_argument(
"--docker-windows-memory",
default="4g",
help="The memory argument to pass to docker for windows containers",
)
parser.add_argument(
"--docker-windows-isolation",
default="process",
help="The --isolation= argument for windows docker commands",
)
parser.add_argument(
"--keep-temp-files",
action="store_true",
help="Keep temporary files regardless of success/failure",
)
parser.add_argument(
"--no-upload-assets",
dest="upload_assets",
action="store_false",
help="Do not upload assets",
)
parser.add_argument(
"--homebrew-target-macos-version",
default=TARGET_MACOS_VERSION,
help="The target macos version to use in homebrew specs",
)
parser.add_argument(
"--homebrew-target-macos-version-spec",
default=TARGET_MACOS_VERSION_SPEC,
help="The target macos version spec to use in homebrew specs",
)
parser.add_argument(
"--no-homebrew-push-tap",
dest="homebrew_push_tap",
action="store_false",
help="Do not push the homebrew tap. A manual commit will have to be made",
)
parser.add_argument(
"--no-chocolatey-publish",
dest="chocolatey_publish",
action="store_false",
help="Do not publish to chocolatey's community stream",
)
parser.add_argument(
"--chocolatey-token",
help="If provided, use this chocolatey token instead of the one in `--chocolatey-token-file`",
)
parser.add_argument(
"--chocolatey-token-file",
default=os.path.expanduser("~/.buck-chocolatey-token"),
help="A file containing the chocolatey token to use",
)
parser.add_argument(
"--output-dir",
help=(
"If specified, artifacts will be written to this directory, instead of "
"a temporary one"
),
)
parser.add_argument(
"--homebrew-dir",
help=(
"Where homebrew is (e.g. /usr/local). If not specified, homebrew will be "
"installed in a separate, temporary directory that gets cleaned up after "
"building (unless --keep-temp-files is specified). If --output-dir is "
"specified, homebrew will be installed in a subdirectory there. This can "
"be useful to ensure that tap directories are preserved and can be "
"validated and pushed to github if a first run fails, or if a "
"--no-upload-asset run is done"
),
)
parser.add_argument(
"--insecure-chocolatey-upload",
action="store_true",
help=(
"Do less certificate verification when uploading to chocolatey. "
"This is a workaround for "
"https://github.com/chocolatey/chocolatey.org/issues/584"
),
)
parsed_kwargs = dict(parser.parse_args(args)._get_kwargs())
if parsed_kwargs["deb_file"]:
parsed_kwargs["build_deb"] = False
if parsed_kwargs["homebrew_file"]:
parsed_kwargs["build_homebrew"] = False
if parsed_kwargs["chocolatey_file"]:
parsed_kwargs["build_chocolatey"] = False
return argparse.Namespace(**parsed_kwargs)
def configure_logging():
# Bold message
TTY_LOGGING = " publish_release => \033[1m%(message)s\033[0m"
NOTTY_LOGGING = " publish_release => %(message)s"
msg_format = TTY_LOGGING if sys.stderr.isatty() else NOTTY_LOGGING
# Red message for errors
TTY_ERROR_LOGGING = " publish_release => \033[1;31mERROR: %(message)s\033[0m"
NOTTY_ERROR_LOGGING = " publish_release => ERROR: %(message)s"
error_msg_format = TTY_ERROR_LOGGING if sys.stderr.isatty() else NOTTY_ERROR_LOGGING
class LevelFilter(logging.Filter):
def filter(self, record):
return record.levelno < logging.ERROR
logging.config.dictConfig(
{
"version": 1,
"filters": {"lower_than_error": {"()": LevelFilter}},
"formatters": {
"info": {"format": msg_format},
"error": {"format": error_msg_format},
},
"handlers": {
"info": {
"level": "INFO",
"class": "logging.StreamHandler",
"formatter": "info",
"filters": ["lower_than_error"],
},
"error": {
"level": "ERROR",
"class": "logging.StreamHandler",
"formatter": "error",
},
},
"loggers": {"": {"handlers": ["info", "error"], "level": "INFO"}},
}
)
def validate_repo_upstream(args):
""" Make sure we're in the right repository, not a fork """
output = subprocess.check_output(
["git", "remote", "get-url", "origin"], encoding="utf-8"
).strip()
if output not in args.valid_git_upstreams:
raise ReleaseException(
"Releases may only be published from the upstream OSS buck repository"
)
def validate_environment(args):
""" Make sure we can build """
validate_repo_upstream(args)
if args.build_deb:
ret = docker(
args.docker_linux_host,
["info", "-f", "{{.OSType}}"],
check=False,
capture_output=True,
)
host = args.docker_linux_host or "localhost"
if ret.returncode != 0:
raise ReleaseException(
"docker info on linux host {} failed. debs cannot be built", host
)
host_os = ret.stdout.decode("utf-8").strip()
if host_os != "linux":
raise ReleaseException(
"docker info on host {} returned type '{}' not 'linux'. debs cannot be built",
host,
host_os,
)
if args.build_chocolatey:
ret = docker(
args.docker_windows_host,
["info", "-f", "{{.OSType}}"],
check=False,
capture_output=True,
)
host = args.docker_windows_host or "localhost"
if ret.returncode != 0:
raise ReleaseException(
"docker info on windows host {} failed. chocolatey nupkgs cannot be built",
host,
)
host_os = ret.stdout.decode("utf-8").strip()
if host_os != "windows":
raise ReleaseException(
"docker info on host {} returned type '{}' not 'windows'. chocolatey nupkgs cannot be built",
host,
host_os,
)
if args.build_homebrew:
if args.homebrew_dir:
if not os.path.exists(args.homebrew_dir):
raise ReleaseException(
"Specified homebrew path, {}, does not exist", args.homebrew_dir
)
brew_path = os.path.join(args.homebrew_dir, "bin", "brew")
try:
ret = run([brew_path, "--version"])
except Exception:
raise ReleaseException(
"{} --version failed. bottles cannot be created", brew_path
)
def build(args, output_dir, release, github_token, homebrew_dir):
deb_file = args.deb_file
chocolatey_file = args.chocolatey_file
homebrew_file = args.homebrew_file
if args.build_deb:
user = get_current_user(github_token)
releases = get_all_releases(args.repository, github_token)
deb_file = build_deb(
args.repository, release, user, releases, args.docker_linux_host, output_dir
)
if args.build_homebrew:
homebrew_file = build_bottle(
homebrew_dir,
release,
args.repository,
args.tap_repository,
args.homebrew_target_macos_version,
args.homebrew_target_macos_version_spec,
output_dir,
)
if args.build_chocolatey:
chocolatey_file = build_chocolatey(
args.repository,
release,
args.docker_windows_host,
args.docker_windows_memory,
args.docker_windows_isolation,
output_dir,
)
return deb_file, homebrew_file, chocolatey_file
def publish(
args,
release,
github_token,
chocolatey_token,
deb_file,
homebrew_file,
homebrew_dir,
chocolatey_file,
):
if args.upload_assets:
if deb_file:
add_assets(release, github_token, deb_file)
if chocolatey_file:
add_assets(release, github_token, chocolatey_file)
if args.chocolatey_publish:
publish_chocolatey(
chocolatey_file, chocolatey_token, args.insecure_chocolatey_upload
)
if homebrew_file:
add_assets(release, github_token, homebrew_file)
validate_tap(homebrew_dir, args.tap_repository, args.version)
if args.homebrew_push_tap:
publish_tap_changes(homebrew_dir, args.tap_repository, args.version, github_token)
else:
log_about_manual_tap_push(args.tap_repository)
def main():
args = parse_args(sys.argv[1:])
configure_logging()
version_tag = "v" + args.version
github_token = (
args.github_token if args.github_token else get_token(args.github_token_file)
)
if args.chocolatey_publish:
chocolatey_token = (
args.chocolatey_token
if args.chocolatey_token
else get_token(args.chocolatey_token_file)
)
else:
chocolatey_token = None
temp_dir = None
temp_homebrew_dir = None
homebrew_file = None
try:
validate_environment(args)
if args.use_existing_release:
release = get_release_for_tag(args.repository, github_token, version_tag)
else:
release = create_new_release(
args.repository,
github_token,
version_tag,
args.release_message,
args.prompt_for_message,
)
if args.output_dir:
output_dir = args.output_dir
if not os.path.exists(output_dir):
logging.info("{} does not exist. Creating it".format(output_dir))
os.makedirs(output_dir, exist_ok=True)
else:
temp_dir = tempfile.mkdtemp()
output_dir = temp_dir
if args.homebrew_dir:
homebrew_dir = args.homebrew_dir
elif args.output_dir:
homebrew_dir = os.path.abspath(
os.path.join(output_dir, "homebrew_" + version_tag)
)
else:
temp_homebrew_dir = tempfile.mkdtemp()
homebrew_dir = temp_homebrew_dir
deb_file, homebrew_file, chocolatey_file = build(
args, output_dir, release, github_token, homebrew_dir
)
publish(
args,
release,
github_token,
chocolatey_token,
deb_file,
homebrew_file,
homebrew_dir,
chocolatey_file,
)
except ReleaseException as e:
logging.error(str(e))
finally:
if not args.keep_temp_files:
def remove(path):
try:
shutil.rmtree(path)
except Exception:
logging.error("Could not remove temp dir at {}".format(path))
if temp_dir:
remove(temp_dir)
if temp_homebrew_dir:
# If the person didn't want to publish, we need to keep this around
if not homebrew_file or args.homebrew_push_tap:
remove(temp_homebrew_dir)
if __name__ == "__main__":
main()
|
nguyentruongtho/buck
|
tools/release/publish_release.py
|
Python
|
apache-2.0
| 16,394
|
# coding:UTF-8
"""
Django settings for own project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9v^z8-bchk!wo57m*3h%7w*(mkljzxs#)zerrsdrt92*#tcy87'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'own.middle.LoginCheck',
)
ROOT_URLCONF = 'own.urls'
WSGI_APPLICATION = 'own.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'own',
'USER': 'root',
'PASSWORD': 'root',
'HOST': '127.0.0.1',
'PORT': '3306',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'zn-cn'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, "templates"),
)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
SESSION_ENGINE = 'django.contrib.sessions.backends.file'
SESSION_COOKIE_AGE = 3600 * 24 * 30
#调试模式输出sql语句
if DEBUG:
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console':{
'level':'DEBUG',
'class':'logging.StreamHandler',
},
},
'loggers': {
'django.db.backends': {
'handlers': ['console'],
'propagate': True,
'level':'DEBUG',
},
}
}
# 七牛存储信息
QINIU_HOST = ""
QINIU_KEY = ""
QINIU_TOKEN = ""
QINIU_BUCKET = ""
|
wangjun/own
|
own/settings.py
|
Python
|
apache-2.0
| 2,871
|
from hazelcast.serialization.bits import *
from hazelcast.protocol.client_message import ClientMessage
from hazelcast.protocol.custom_codec import *
from hazelcast.util import ImmutableLazyDataList
from hazelcast.protocol.codec.transactional_multi_map_message_type import *
REQUEST_TYPE = TRANSACTIONALMULTIMAP_SIZE
RESPONSE_TYPE = 102
RETRYABLE = False
def calculate_size(name, txn_id, thread_id):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_str(name)
data_size += calculate_size_str(txn_id)
data_size += LONG_SIZE_IN_BYTES
return data_size
def encode_request(name, txn_id, thread_id):
""" Encode request into client_message"""
client_message = ClientMessage(payload_size=calculate_size(name, txn_id, thread_id))
client_message.set_message_type(REQUEST_TYPE)
client_message.set_retryable(RETRYABLE)
client_message.append_str(name)
client_message.append_str(txn_id)
client_message.append_long(thread_id)
client_message.update_frame_length()
return client_message
def decode_response(client_message, to_object=None):
""" Decode response from client message"""
parameters = dict(response=None)
parameters['response'] = client_message.read_int()
return parameters
|
cangencer/hazelcast-python-client
|
hazelcast/protocol/codec/transactional_multi_map_size_codec.py
|
Python
|
apache-2.0
| 1,288
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Facilities for user prompting for request context."""
import abc
from googlecloudsdk.api_lib.compute import lister
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.calliope import exceptions as calliope_exceptions
from googlecloudsdk.core import exceptions as core_exceptions
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.credentials import gce as c_gce
def _GetGCERegion():
if properties.VALUES.core.check_gce_metadata.GetBool():
return c_gce.Metadata().Region()
return None
def _GetGCEZone():
if properties.VALUES.core.check_gce_metadata.GetBool():
return c_gce.Metadata().Zone()
return None
GCE_SUGGESTION_SOURCES = {
'zone': _GetGCEZone,
'region': _GetGCERegion
}
class Error(core_exceptions.Error):
"""Exceptions for the scope prompter."""
pass
class _InvalidPromptInvocation(Error):
"""Exception for invoking prompt with invalid parameters."""
pass
class ScopePrompter(object):
"""A mixin class prompting in the case of ambiguous resource context."""
__metaclass__ = abc.ABCMeta
def FetchChoiceResources(self, attribute, service, flag_names,
prefix_filter=None):
"""Returns a list of choices used to prompt with."""
if prefix_filter:
filter_expr = 'name eq {0}.*'.format(prefix_filter)
else:
filter_expr = None
errors = []
global_resources = lister.GetGlobalResources(
service=service,
project=self.project,
filter_expr=filter_expr,
http=self.http,
batch_url=self.batch_url,
errors=errors)
choices = [resource for resource in global_resources]
if errors or not choices:
punctuation = ':' if errors else '.'
utils.RaiseToolException(
errors,
'Unable to fetch a list of {0}s. Specifying [{1}] may fix this '
'issue{2}'.format(attribute, ', or '.join(flag_names), punctuation))
return choices
def _PromptForScope(self, ambiguous_names,
attributes, services, resource_type,
flag_names, prefix_filter):
"""Prompts user to specify a scope for ambiguous resources.
Args:
ambiguous_names: list(tuple(name, params, collection)),
list of parameters which can be fed into resources.Parse.
attributes: list(str), list of scopes to prompt over.
services: list(apitool.base.py.base_api.BaseApiService), service for each
attribute/scope.
resource_type: str, collection name without api name.
flag_names: list(str), flag names which can be used to specify scopes.
prefix_filter: str, used to filter retrieved resources on backend.
Returns:
List of fully resolved names for provided ambiguous_names parameter.
Raises:
_InvalidPromptInvocation: if number of attributes does not match number of
services.
"""
def RaiseOnPromptFailure():
"""Call this to raise an exn when prompt cannot read from input stream."""
phrases = ('one of ', 'flags') if len(flag_names) > 1 else ('', 'flag')
raise calliope_exceptions.ToolException(
'Unable to prompt. Specify {0}the [{1}] {2}.'.format(
phrases[0], ', '.join(flag_names), phrases[1]))
# one service per attribute
if len(attributes) != len(services):
raise _InvalidPromptInvocation()
# Update selected_resource_name in response to user prompts
selected_resource_name = None
selected_attribute = None
# Try first to give a precise suggestion based on current VM zone/region.
if len(attributes) == 1:
gce_suggestor = (
GCE_SUGGESTION_SOURCES.get(attributes[0]) or (lambda: None))
gce_suggested_resource = gce_suggestor()
if gce_suggested_resource:
selected_attribute = attributes[0]
selected_resource_name = self._PromptDidYouMeanScope(
ambiguous_names, attributes[0], resource_type,
gce_suggested_resource, RaiseOnPromptFailure)
# If the user said "no" fall back to a generic prompt.
if selected_resource_name is None:
choice_resources = {}
for service, attribute in zip(services, attributes):
choice_resources[attribute] = (
self.FetchChoiceResources(
attribute, service, flag_names, prefix_filter))
selected_attribute, selected_resource_name = self._PromptForScopeList(
ambiguous_names, attributes, resource_type, choice_resources,
RaiseOnPromptFailure)
# _PromptForScopeList ensures this.
assert selected_resource_name is not None
assert selected_attribute is not None
result = []
for ambigous_name, params, collection in ambiguous_names:
new_params = params.copy()
new_params[selected_attribute] = selected_resource_name
try:
resource_ref = self.resources.Parse(
ambigous_name, params=new_params, collection=collection)
except (resources.UnknownFieldException,
properties.RequiredPropertyError):
pass
else:
if hasattr(resource_ref, selected_attribute):
result.append(resource_ref)
return result
def _PromptDidYouMeanScope(self, ambiguous_refs, attribute, resource_type,
suggested_resource, raise_on_prompt_failure):
"""Prompts "did you mean <scope>". Returns str or None, or raises."""
# targetInstances -> target instances
resource_name = utils.CamelCaseToOutputFriendly(resource_type)
names = [name for name, _, _ in ambiguous_refs]
message = 'Did you mean {0} [{1}] for {2}: [{3}]?'.format(
attribute, suggested_resource, resource_name, ', '.join(names))
try:
if console_io.PromptContinue(message=message, default=True,
throw_if_unattended=True):
return suggested_resource
else:
return None
except console_io.UnattendedPromptError:
raise_on_prompt_failure()
def _PromptForScopeList(self, ambiguous_refs, attributes,
resource_type, choice_resources,
raise_on_prompt_failure):
"""Prompt to resolve abiguous resources. Either returns str or throws."""
# targetInstances -> target instances
resource_name = utils.CamelCaseToOutputFriendly(resource_type)
# Resource names should be surrounded by brackets while choices should not
names = ['[{0}]'.format(name) for name, _, _ in ambiguous_refs]
# Print deprecation state for choices.
choice_names = []
choice_mapping = []
for attribute in attributes:
for choice_resource in choice_resources[attribute]:
deprecated = choice_resource.deprecated
if deprecated:
choice_name = '{0} ({1})'.format(
choice_resource.name, deprecated.state)
else:
choice_name = choice_resource.name
if len(attributes) > 1:
choice_name = '{0}: {1}'.format(attribute, choice_name)
choice_mapping.append((attribute, choice_resource.name))
choice_names.append(choice_name)
title = utils.ConstructList(
'For the following {0}:'.format(resource_name), names)
idx = console_io.PromptChoice(
options=choice_names,
message='{0}choose a {1}:'.format(title, ' or '.join(attributes)))
if idx is None:
raise_on_prompt_failure()
else:
return choice_mapping[idx]
def PromptForMultiScopedReferences(
self, resource_names, scope_names, scope_services, resource_types,
flag_names):
"""Prompt for resources, which can be placed in several different scopes."""
# one service and resource type per scope
if len(scope_names) != len(scope_services) or (
len(scope_names) != len(resource_types)):
raise _InvalidPromptInvocation()
resource_refs = []
ambiguous_names = []
for resource_name in resource_names:
for resource_type in resource_types:
collection = utils.GetApiCollection(resource_type)
try:
resource_ref = self.resources.Parse(
resource_name, collection=collection, params={})
except resources.WrongResourceCollectionException:
pass
except (resources.UnknownFieldException,
properties.RequiredPropertyError):
ambiguous_names.append((resource_name, {}, collection))
else:
resource_refs.append(resource_ref)
if ambiguous_names:
resource_refs += self._PromptForScope(
ambiguous_names=ambiguous_names,
attributes=scope_names,
services=scope_services,
resource_type=resource_types[0],
flag_names=flag_names,
prefix_filter=None)
return resource_refs
def CreateScopedReferences(self, resource_names, scope_name, scope_arg,
scope_service, resource_type, flag_names,
prefix_filter=None):
"""Returns a list of resolved resource references for scoped resources."""
resource_refs = []
ambiguous_names = []
resource_type = resource_type or self.resource_type
collection = utils.GetApiCollection(resource_type)
for resource_name in resource_names:
params = {scope_name: scope_arg}
try:
resource_ref = self.resources.Parse(
resource_name,
collection=collection,
params=params)
except (resources.UnknownFieldException,
properties.RequiredPropertyError):
ambiguous_names.append((resource_name, params, collection))
else:
resource_refs.append(resource_ref)
has_default = utils.HasApiParamDefaultValue(
self.resources, resource_type, scope_name)
if ambiguous_names and not scope_arg and not has_default:
# We need to prompt.
resource_refs += self._PromptForScope(
ambiguous_names=ambiguous_names,
attributes=[scope_name],
services=[scope_service],
resource_type=resource_type,
flag_names=flag_names,
prefix_filter=prefix_filter)
return resource_refs
def CreateZonalReferences(self, resource_names, zone_arg, resource_type=None,
flag_names=None, region_filter=None):
"""Returns a list of resolved zonal resource references."""
if flag_names is None:
flag_names = ['--zone']
if zone_arg:
zone_ref = self.resources.Parse(zone_arg, collection='compute.zones')
zone_name = zone_ref.Name()
else:
zone_name = None
return self.CreateScopedReferences(
resource_names,
scope_name='zone',
scope_arg=zone_name,
scope_service=self.compute.zones,
resource_type=resource_type,
flag_names=flag_names,
prefix_filter=region_filter)
def CreateZonalReference(self, resource_name, zone_arg, resource_type=None,
flag_names=None, region_filter=None):
return self.CreateZonalReferences(
[resource_name], zone_arg, resource_type, flag_names, region_filter)[0]
def CreateRegionalReferences(self, resource_names, region_arg,
flag_names=None, resource_type=None):
"""Returns a list of resolved regional resource references."""
if flag_names is None:
flag_names = ['--region']
if region_arg:
region_ref = self.resources.Parse(
region_arg, collection='compute.regions')
region_name = region_ref.Name()
else:
region_name = None
return self.CreateScopedReferences(
resource_names,
scope_name='region',
scope_arg=region_name,
scope_service=self.compute.regions,
flag_names=flag_names,
resource_type=resource_type)
def CreateRegionalReference(self, resource_name, region_arg,
flag_names=None, resource_type=None):
return self.CreateRegionalReferences(
[resource_name], region_arg, flag_names, resource_type)[0]
def CreateGlobalReferences(self, resource_names, resource_type=None):
"""Returns a list of resolved global resource references."""
resource_refs = []
for resource_name in resource_names:
resource_refs.append(self.resources.Parse(
resource_name,
collection=utils.GetApiCollection(
resource_type or self.resource_type)))
return resource_refs
def CreateGlobalReference(self, resource_name, resource_type=None):
return self.CreateGlobalReferences([resource_name], resource_type)[0]
|
KaranToor/MA450
|
google-cloud-sdk/lib/googlecloudsdk/api_lib/compute/scope_prompter.py
|
Python
|
apache-2.0
| 13,248
|
# Copyright 2017, Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# EDITING INSTRUCTIONS
# This file was generated from the file
# https://github.com/google/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto,
# and updates to that file get reflected here through a refresh process.
# For the short term, the refresh process will only be runnable by Google engineers.
#
# The only allowed edits are to method and file documentation. A 3-way
# merge preserves those additions if the generated source changes.
"""Accesses the google.cloud.vision.v1 ImageAnnotator API."""
import collections
import json
import os
import pkg_resources
import platform
from google.gax import api_callable
from google.gax import config
from google.gax import path_template
import google.gax
from google.cloud.gapic.vision.v1 import enums
from google.cloud.proto.vision.v1 import image_annotator_pb2
class ImageAnnotatorClient(object):
"""Service that performs Google Cloud Vision API detection tasks over
client images, such as face, landmark, logo, label, and text detection. The
ImageAnnotator service returns detected entities from the images.
"""
SERVICE_ADDRESS = 'vision.googleapis.com'
"""The default address of the service."""
DEFAULT_SERVICE_PORT = 443
"""The default port of the service."""
# The scopes needed to make gRPC calls to all of the methods defined in
# this service
_ALL_SCOPES = ('https://www.googleapis.com/auth/cloud-platform', )
def __init__(self,
service_path=SERVICE_ADDRESS,
port=DEFAULT_SERVICE_PORT,
channel=None,
credentials=None,
ssl_credentials=None,
scopes=None,
client_config=None,
app_name=None,
app_version='',
lib_name=None,
lib_version='',
metrics_headers=()):
"""Constructor.
Args:
service_path (string): The domain name of the API remote host.
port (int): The port on which to connect to the remote host.
channel (:class:`grpc.Channel`): A ``Channel`` instance through
which to make calls.
credentials (object): The authorization credentials to attach to
requests. These credentials identify this application to the
service.
ssl_credentials (:class:`grpc.ChannelCredentials`): A
``ChannelCredentials`` instance for use with an SSL-enabled
channel.
scopes (list[string]): A list of OAuth2 scopes to attach to requests.
client_config (dict):
A dictionary for call options for each method. See
:func:`google.gax.construct_settings` for the structure of
this data. Falls back to the default config if not specified
or the specified config is missing data points.
app_name (string): The name of the application calling
the service. Recommended for analytics purposes.
app_version (string): The version of the application calling
the service. Recommended for analytics purposes.
lib_name (string): The API library software used for calling
the service. (Unless you are writing an API client itself,
leave this as default.)
lib_version (string): The API library software version used
for calling the service. (Unless you are writing an API client
itself, leave this as default.)
metrics_headers (dict): A dictionary of values for tracking
client library metrics. Ultimately serializes to a string
(e.g. 'foo/1.2.3 bar/3.14.1'). This argument should be
considered private.
Returns:
A ImageAnnotatorClient object.
"""
# Unless the calling application specifically requested
# OAuth scopes, request everything.
if scopes is None:
scopes = self._ALL_SCOPES
# Initialize an empty client config, if none is set.
if client_config is None:
client_config = {}
# Initialize metrics_headers as an ordered dictionary
# (cuts down on cardinality of the resulting string slightly).
metrics_headers = collections.OrderedDict(metrics_headers)
metrics_headers['gl-python'] = platform.python_version()
# The library may or may not be set, depending on what is
# calling this client. Newer client libraries set the library name
# and version.
if lib_name:
metrics_headers[lib_name] = lib_version
# Finally, track the GAPIC package version.
metrics_headers['gapic'] = pkg_resources.get_distribution(
'google-cloud-vision', ).version
# Load the configuration defaults.
default_client_config = json.loads(
pkg_resources.resource_string(
__name__, 'image_annotator_client_config.json').decode())
defaults = api_callable.construct_settings(
'google.cloud.vision.v1.ImageAnnotator',
default_client_config,
client_config,
config.STATUS_CODE_NAMES,
metrics_headers=metrics_headers, )
self.image_annotator_stub = config.create_stub(
image_annotator_pb2.ImageAnnotatorStub,
channel=channel,
service_path=service_path,
service_port=port,
credentials=credentials,
scopes=scopes,
ssl_credentials=ssl_credentials)
self._batch_annotate_images = api_callable.create_api_call(
self.image_annotator_stub.BatchAnnotateImages,
settings=defaults['batch_annotate_images'])
# Service calls
def batch_annotate_images(self, requests, options=None):
"""
Run image detection and annotation for a batch of images.
Example:
>>> from google.cloud.gapic.vision.v1 import image_annotator_client
>>> client = image_annotator_client.ImageAnnotatorClient()
>>> requests = []
>>> response = client.batch_annotate_images(requests)
Args:
requests (list[:class:`google.cloud.proto.vision.v1.image_annotator_pb2.AnnotateImageRequest`]): Individual image annotation requests for this batch.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.vision.v1.image_annotator_pb2.BatchAnnotateImagesResponse` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = image_annotator_pb2.BatchAnnotateImagesRequest(
requests=requests)
return self._batch_annotate_images(request, options)
|
calpeyser/google-cloud-python
|
vision/google/cloud/gapic/vision/v1/image_annotator_client.py
|
Python
|
apache-2.0
| 7,551
|
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import collections
import copy
import datetime
import uuid
import fixtures
import iso8601
import mock
from mox3 import mox
from oslo_config import cfg
from oslo_policy import policy as oslo_policy
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from six.moves import range
import six.moves.urllib.parse as urlparse
import testtools
import webob
from nova.api.openstack import api_version_request
from nova.api.openstack import common
from nova.api.openstack import compute
from nova.api.openstack.compute import disk_config
from nova.api.openstack.compute import extension_info
from nova.api.openstack.compute import ips
from nova.api.openstack.compute import keypairs
from nova.api.openstack.compute.schemas import servers as servers_schema
from nova.api.openstack.compute import servers
from nova.api.openstack.compute import views
from nova.api.openstack import extensions
from nova.api.openstack import wsgi as os_wsgi
from nova import availability_zones
from nova.compute import api as compute_api
from nova.compute import flavors
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova.image import glance
from nova.network import manager
from nova.network.neutronv2 import api as neutron_api
from nova import objects
from nova.objects import instance as instance_obj
from nova import policy
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_network
from nova.tests.unit.image import fake
from nova.tests.unit import matchers
from nova import utils as nova_utils
CONF = cfg.CONF
CONF.import_opt('password_length', 'nova.utils')
FAKE_UUID = fakes.FAKE_UUID
INSTANCE_IDS = {FAKE_UUID: 1}
FIELDS = instance_obj.INSTANCE_DEFAULT_FIELDS
def fake_gen_uuid():
return FAKE_UUID
def return_servers_empty(context, *args, **kwargs):
return objects.InstanceList(objects=[])
def instance_update_and_get_original(context, instance_uuid, values,
columns_to_join=None,
):
inst = fakes.stub_instance(INSTANCE_IDS.get(instance_uuid),
name=values.get('display_name'))
inst = dict(inst, **values)
return (inst, inst)
def instance_update(context, instance_uuid, values):
inst = fakes.stub_instance(INSTANCE_IDS.get(instance_uuid),
name=values.get('display_name'))
inst = dict(inst, **values)
return inst
def fake_compute_api(cls, req, id):
return True
def fake_start_stop_not_ready(self, context, instance):
raise exception.InstanceNotReady(instance_id=instance["uuid"])
def fake_start_stop_invalid_state(self, context, instance):
raise exception.InstanceInvalidState(
instance_uuid=instance['uuid'], attr='fake_attr',
method='fake_method', state='fake_state')
def fake_instance_get_by_uuid_not_found(context, uuid,
columns_to_join, use_slave=False):
raise exception.InstanceNotFound(instance_id=uuid)
def fake_instance_get_all_with_locked(context, list_locked, **kwargs):
obj_list = []
s_id = 0
for locked in list_locked:
uuid = fakes.get_fake_uuid(locked)
s_id = s_id + 1
kwargs['locked_by'] = None if locked == 'not_locked' else locked
server = fakes.stub_instance_obj(context, id=s_id, uuid=uuid, **kwargs)
obj_list.append(server)
return objects.InstanceList(objects=obj_list)
class MockSetAdminPassword(object):
def __init__(self):
self.instance_id = None
self.password = None
def __call__(self, context, instance_id, password):
self.instance_id = instance_id
self.password = password
class Base64ValidationTest(test.TestCase):
def setUp(self):
super(Base64ValidationTest, self).setUp()
ext_info = extension_info.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
def test_decode_base64(self):
value = "A random string"
result = self.controller._decode_base64(base64.b64encode(value))
self.assertEqual(result, value)
def test_decode_base64_binary(self):
value = "\x00\x12\x75\x99"
result = self.controller._decode_base64(base64.b64encode(value))
self.assertEqual(result, value)
def test_decode_base64_whitespace(self):
value = "A random string"
encoded = base64.b64encode(value)
white = "\n \n%s\t%s\n" % (encoded[:2], encoded[2:])
result = self.controller._decode_base64(white)
self.assertEqual(result, value)
def test_decode_base64_invalid(self):
invalid = "A random string"
result = self.controller._decode_base64(invalid)
self.assertIsNone(result)
def test_decode_base64_illegal_bytes(self):
value = "A random string"
encoded = base64.b64encode(value)
white = ">\x01%s*%s()" % (encoded[:2], encoded[2:])
result = self.controller._decode_base64(white)
self.assertIsNone(result)
class NeutronV2Subclass(neutron_api.API):
"""Used to ensure that API handles subclasses properly."""
pass
class ControllerTest(test.TestCase):
def setUp(self):
super(ControllerTest, self).setUp()
self.flags(verbose=True, use_ipv6=False)
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
fake.stub_out_image_service(self.stubs)
return_server = fakes.fake_compute_get()
return_servers = fakes.fake_compute_get_all()
# Server sort keys extension is enabled in v21 so sort data is passed
# to the instance API and the sorted DB API is invoked
self.stubs.Set(compute_api.API, 'get_all',
lambda api, *a, **k: return_servers(*a, **k))
self.stubs.Set(compute_api.API, 'get',
lambda api, *a, **k: return_server(*a, **k))
self.stubs.Set(db, 'instance_update_and_get_original',
instance_update_and_get_original)
ext_info = extension_info.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
self.ips_controller = ips.IPsController()
policy.reset()
policy.init()
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
class ServersControllerTest(ControllerTest):
wsgi_api_version = os_wsgi.DEFAULT_API_VERSION
def setUp(self):
super(ServersControllerTest, self).setUp()
CONF.set_override('host', 'localhost', group='glance')
def req(self, url, use_admin_context=False):
return fakes.HTTPRequest.blank(url,
use_admin_context=use_admin_context,
version=self.wsgi_api_version)
def test_requested_networks_prefix(self):
uuid = 'br-00000000-0000-0000-0000-000000000000'
requested_networks = [{'uuid': uuid}]
res = self.controller._get_requested_networks(requested_networks)
self.assertIn((uuid, None), res.as_tuples())
def test_requested_networks_neutronv2_enabled_with_port(self):
self.flags(network_api_class='nova.network.neutronv2.api.API')
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'port': port}]
res = self.controller._get_requested_networks(requested_networks)
self.assertEqual([(None, None, port, None)], res.as_tuples())
def test_requested_networks_neutronv2_enabled_with_network(self):
self.flags(network_api_class='nova.network.neutronv2.api.API')
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
requested_networks = [{'uuid': network}]
res = self.controller._get_requested_networks(requested_networks)
self.assertEqual([(network, None, None, None)], res.as_tuples())
def test_requested_networks_neutronv2_enabled_with_network_and_port(self):
self.flags(network_api_class='nova.network.neutronv2.api.API')
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'uuid': network, 'port': port}]
res = self.controller._get_requested_networks(requested_networks)
self.assertEqual([(None, None, port, None)], res.as_tuples())
def test_requested_networks_with_duplicate_networks(self):
# duplicate networks are allowed only for nova neutron v2.0
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
requested_networks = [{'uuid': network}, {'uuid': network}]
self.assertRaises(
webob.exc.HTTPBadRequest,
self.controller._get_requested_networks,
requested_networks)
def test_requested_networks_with_neutronv2_and_duplicate_networks(self):
# duplicate networks are allowed only for nova neutron v2.0
self.flags(network_api_class='nova.network.neutronv2.api.API')
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
requested_networks = [{'uuid': network}, {'uuid': network}]
res = self.controller._get_requested_networks(requested_networks)
self.assertEqual([(network, None, None, None),
(network, None, None, None)], res.as_tuples())
def test_requested_networks_neutronv2_enabled_conflict_on_fixed_ip(self):
self.flags(network_api_class='nova.network.neutronv2.api.API')
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
addr = '10.0.0.1'
requested_networks = [{'uuid': network,
'fixed_ip': addr,
'port': port}]
self.assertRaises(
webob.exc.HTTPBadRequest,
self.controller._get_requested_networks,
requested_networks)
def test_requested_networks_neutronv2_disabled_with_port(self):
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'port': port}]
self.assertRaises(
webob.exc.HTTPBadRequest,
self.controller._get_requested_networks,
requested_networks)
def test_requested_networks_api_enabled_with_v2_subclass(self):
self.flags(network_api_class='nova.network.neutronv2.api.API')
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'uuid': network, 'port': port}]
res = self.controller._get_requested_networks(requested_networks)
self.assertEqual([(None, None, port, None)], res.as_tuples())
def test_requested_networks_neutronv2_subclass_with_port(self):
cls = ('nova.tests.unit.api.openstack.compute.test_serversV21.'
'NeutronV2Subclass')
self.flags(network_api_class=cls)
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'port': port}]
res = self.controller._get_requested_networks(requested_networks)
self.assertEqual([(None, None, port, None)], res.as_tuples())
def test_get_server_by_uuid(self):
req = self.req('/fake/servers/%s' % FAKE_UUID)
res_dict = self.controller.show(req, FAKE_UUID)
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
def test_get_server_joins_pci_devices(self):
def fake_get(_self, *args, **kwargs):
expected_attrs = kwargs['expected_attrs']
self.assertEqual(['flavor', 'info_cache', 'metadata',
'pci_devices'], expected_attrs)
ctxt = context.RequestContext('fake', 'fake')
return fake_instance.fake_instance_obj(
ctxt, expected_attrs=expected_attrs)
self.stubs.Set(compute_api.API, 'get', fake_get)
req = self.req('/fake/servers/%s' % FAKE_UUID)
self.controller.show(req, FAKE_UUID)
def test_unique_host_id(self):
"""Create two servers with the same host and different
project_ids and check that the host_id's are unique.
"""
def return_instance_with_host(context, *args, **kwargs):
project_id = str(uuid.uuid4())
return fakes.stub_instance_obj(context, id=1, uuid=FAKE_UUID,
project_id=project_id,
host='fake_host')
self.stubs.Set(compute_api.API, 'get',
return_instance_with_host)
req = self.req('/fake/servers/%s' % FAKE_UUID)
with mock.patch.object(compute_api.API, 'get') as mock_get:
mock_get.side_effect = return_instance_with_host
server1 = self.controller.show(req, FAKE_UUID)
server2 = self.controller.show(req, FAKE_UUID)
self.assertNotEqual(server1['server']['hostId'],
server2['server']['hostId'])
def _get_server_data_dict(self, uuid, image_bookmark, flavor_bookmark,
status="ACTIVE", progress=100):
return {
"server": {
"id": uuid,
"user_id": "fake_user",
"tenant_id": "fake_project",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": progress,
"name": "server2",
"status": status,
"hostId": '',
"image": {
"id": "10",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "2",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {
'test1': [
{'version': 4, 'addr': '192.168.1.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': '2001:db8:0:1::1',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'}
]
},
"metadata": {
"seq": "2",
},
"links": [
{
"rel": "self",
"href": "http://localhost/v2/fake/servers/%s" % uuid,
},
{
"rel": "bookmark",
"href": "http://localhost/fake/servers/%s" % uuid,
},
],
}
}
def test_get_server_by_id(self):
self.flags(use_ipv6=True)
image_bookmark = "http://localhost/fake/images/10"
flavor_bookmark = "http://localhost/fake/flavors/2"
uuid = FAKE_UUID
req = self.req('/v2/fake/servers/%s' % uuid)
res_dict = self.controller.show(req, uuid)
expected_server = self._get_server_data_dict(uuid,
image_bookmark,
flavor_bookmark,
status="BUILD",
progress=0)
expected_server['server']['name'] = 'server1'
expected_server['server']['metadata']['seq'] = '1'
self.assertThat(res_dict, matchers.DictMatches(expected_server))
def test_get_server_with_active_status_by_id(self):
image_bookmark = "http://localhost/fake/images/10"
flavor_bookmark = "http://localhost/fake/flavors/2"
new_return_server = fakes.fake_compute_get(
id=2, vm_state=vm_states.ACTIVE, progress=100)
self.stubs.Set(compute_api.API, 'get',
lambda api, *a, **k: new_return_server(*a, **k))
uuid = FAKE_UUID
req = self.req('/fake/servers/%s' % uuid)
res_dict = self.controller.show(req, uuid)
expected_server = self._get_server_data_dict(uuid,
image_bookmark,
flavor_bookmark)
self.assertThat(res_dict, matchers.DictMatches(expected_server))
def test_get_server_with_id_image_ref_by_id(self):
image_ref = "10"
image_bookmark = "http://localhost/fake/images/10"
flavor_id = "1"
flavor_bookmark = "http://localhost/fake/flavors/2"
new_return_server = fakes.fake_compute_get(
id=2, vm_state=vm_states.ACTIVE, image_ref=image_ref,
flavor_id=flavor_id, progress=100)
self.stubs.Set(compute_api.API, 'get',
lambda api, *a, **k: new_return_server(*a, **k))
uuid = FAKE_UUID
req = self.req('/fake/servers/%s' % uuid)
res_dict = self.controller.show(req, uuid)
expected_server = self._get_server_data_dict(uuid,
image_bookmark,
flavor_bookmark)
self.assertThat(res_dict, matchers.DictMatches(expected_server))
def test_get_server_addresses_from_cache(self):
pub0 = ('172.19.0.1', '172.19.0.2',)
pub1 = ('1.2.3.4',)
pub2 = ('b33f::fdee:ddff:fecc:bbaa',)
priv0 = ('192.168.0.3', '192.168.0.4',)
def _ip(ip):
return {'address': ip, 'type': 'fixed'}
nw_cache = [
{'address': 'aa:aa:aa:aa:aa:aa',
'id': 1,
'network': {'bridge': 'br0',
'id': 1,
'label': 'public',
'subnets': [{'cidr': '172.19.0.0/24',
'ips': [_ip(ip) for ip in pub0]},
{'cidr': '1.2.3.0/16',
'ips': [_ip(ip) for ip in pub1]},
{'cidr': 'b33f::/64',
'ips': [_ip(ip) for ip in pub2]}]}},
{'address': 'bb:bb:bb:bb:bb:bb',
'id': 2,
'network': {'bridge': 'br1',
'id': 2,
'label': 'private',
'subnets': [{'cidr': '192.168.0.0/24',
'ips': [_ip(ip) for ip in priv0]}]}}]
return_server = fakes.fake_compute_get(nw_cache=nw_cache)
self.stubs.Set(compute_api.API, 'get',
lambda api, *a, **k: return_server(*a, **k))
req = self.req('/fake/servers/%s/ips' % FAKE_UUID)
res_dict = self.ips_controller.index(req, FAKE_UUID)
expected = {
'addresses': {
'private': [
{'version': 4, 'addr': '192.168.0.3'},
{'version': 4, 'addr': '192.168.0.4'},
],
'public': [
{'version': 4, 'addr': '172.19.0.1'},
{'version': 4, 'addr': '172.19.0.2'},
{'version': 4, 'addr': '1.2.3.4'},
{'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa'},
],
},
}
self.assertThat(res_dict, matchers.DictMatches(expected))
# Make sure we kept the addresses in order
self.assertIsInstance(res_dict['addresses'], collections.OrderedDict)
labels = [vif['network']['label'] for vif in nw_cache]
for index, label in enumerate(res_dict['addresses'].keys()):
self.assertEqual(label, labels[index])
def test_get_server_addresses_nonexistent_network(self):
url = '/v2/fake/servers/%s/ips/network_0' % FAKE_UUID
req = self.req(url)
self.assertRaises(webob.exc.HTTPNotFound, self.ips_controller.show,
req, FAKE_UUID, 'network_0')
def test_get_server_addresses_nonexistent_server(self):
def fake_instance_get(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(compute_api.API, 'get', fake_instance_get)
server_id = str(uuid.uuid4())
req = self.req('/fake/servers/%s/ips' % server_id)
self.assertRaises(webob.exc.HTTPNotFound,
self.ips_controller.index, req, server_id)
def test_get_server_list_empty(self):
self.stubs.Set(compute_api.API, 'get_all',
return_servers_empty)
req = self.req('/fake/servers')
res_dict = self.controller.index(req)
num_servers = len(res_dict['servers'])
self.assertEqual(0, num_servers)
def test_get_server_list_with_reservation_id(self):
req = self.req('/fake/servers?reservation_id=foo')
res_dict = self.controller.index(req)
i = 0
for s in res_dict['servers']:
self.assertEqual(s.get('name'), 'server%d' % (i + 1))
i += 1
def test_get_server_list_with_reservation_id_empty(self):
req = self.req('/fake/servers/detail?'
'reservation_id=foo')
res_dict = self.controller.detail(req)
i = 0
for s in res_dict['servers']:
self.assertEqual(s.get('name'), 'server%d' % (i + 1))
i += 1
def test_get_server_list_with_reservation_id_details(self):
req = self.req('/fake/servers/detail?'
'reservation_id=foo')
res_dict = self.controller.detail(req)
i = 0
for s in res_dict['servers']:
self.assertEqual(s.get('name'), 'server%d' % (i + 1))
i += 1
def test_get_server_list(self):
req = self.req('/fake/servers')
res_dict = self.controller.index(req)
self.assertEqual(len(res_dict['servers']), 5)
for i, s in enumerate(res_dict['servers']):
self.assertEqual(s['id'], fakes.get_fake_uuid(i))
self.assertEqual(s['name'], 'server%d' % (i + 1))
self.assertIsNone(s.get('image', None))
expected_links = [
{
"rel": "self",
"href": "http://localhost/v2/fake/servers/%s" % s['id'],
},
{
"rel": "bookmark",
"href": "http://localhost/fake/servers/%s" % s['id'],
},
]
self.assertEqual(s['links'], expected_links)
def test_get_servers_with_limit(self):
req = self.req('/fake/servers?limit=3')
res_dict = self.controller.index(req)
servers = res_dict['servers']
self.assertEqual([s['id'] for s in servers],
[fakes.get_fake_uuid(i) for i in range(len(servers))])
servers_links = res_dict['servers_links']
self.assertEqual(servers_links[0]['rel'], 'next')
href_parts = urlparse.urlparse(servers_links[0]['href'])
self.assertEqual('/v2/fake/servers', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
expected_params = {'limit': ['3'],
'marker': [fakes.get_fake_uuid(2)]}
self.assertThat(params, matchers.DictMatches(expected_params))
def test_get_servers_with_limit_bad_value(self):
req = self.req('/fake/servers?limit=aaa')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_get_server_details_empty(self):
self.stubs.Set(compute_api.API, 'get_all',
return_servers_empty)
req = self.req('/fake/servers/detail')
res_dict = self.controller.detail(req)
num_servers = len(res_dict['servers'])
self.assertEqual(0, num_servers)
def test_get_server_details_with_limit(self):
req = self.req('/fake/servers/detail?limit=3')
res = self.controller.detail(req)
servers = res['servers']
self.assertEqual([s['id'] for s in servers],
[fakes.get_fake_uuid(i) for i in range(len(servers))])
servers_links = res['servers_links']
self.assertEqual(servers_links[0]['rel'], 'next')
href_parts = urlparse.urlparse(servers_links[0]['href'])
self.assertEqual('/v2/fake/servers/detail', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
expected = {'limit': ['3'], 'marker': [fakes.get_fake_uuid(2)]}
self.assertThat(params, matchers.DictMatches(expected))
def test_get_server_details_with_limit_bad_value(self):
req = self.req('/fake/servers/detail?limit=aaa')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.detail, req)
def test_get_server_details_with_limit_and_other_params(self):
req = self.req('/fake/servers/detail'
'?limit=3&blah=2:t'
'&sort_key=id1&sort_dir=asc')
res = self.controller.detail(req)
servers = res['servers']
self.assertEqual([s['id'] for s in servers],
[fakes.get_fake_uuid(i) for i in range(len(servers))])
servers_links = res['servers_links']
self.assertEqual(servers_links[0]['rel'], 'next')
href_parts = urlparse.urlparse(servers_links[0]['href'])
self.assertEqual('/v2/fake/servers/detail', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
expected = {'limit': ['3'], 'blah': ['2:t'],
'sort_key': ['id1'], 'sort_dir': ['asc'],
'marker': [fakes.get_fake_uuid(2)]}
self.assertThat(params, matchers.DictMatches(expected))
def test_get_servers_with_too_big_limit(self):
req = self.req('/fake/servers?limit=30')
res_dict = self.controller.index(req)
self.assertNotIn('servers_links', res_dict)
def test_get_servers_with_bad_limit(self):
req = self.req('/fake/servers?limit=asdf')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_get_servers_with_marker(self):
url = '/v2/fake/servers?marker=%s' % fakes.get_fake_uuid(2)
req = self.req(url)
servers = self.controller.index(req)['servers']
self.assertEqual([s['name'] for s in servers], ["server4", "server5"])
def test_get_servers_with_limit_and_marker(self):
url = ('/v2/fake/servers?limit=2&marker=%s' %
fakes.get_fake_uuid(1))
req = self.req(url)
servers = self.controller.index(req)['servers']
self.assertEqual([s['name'] for s in servers], ['server3', 'server4'])
def test_get_servers_with_bad_marker(self):
req = self.req('/fake/servers?limit=2&marker=asdf')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_get_servers_with_bad_option(self):
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
limit=None, marker=None, want_objects=False,
expected_attrs=None, sort_keys=None, sort_dirs=None):
db_list = [fakes.stub_instance(100, uuid=server_uuid)]
return instance_obj._make_instance_list(
context, objects.InstanceList(), db_list, FIELDS)
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers?unknownoption=whee')
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_allows_image(self):
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
limit=None, marker=None, want_objects=False,
expected_attrs=None, sort_keys=None, sort_dirs=None):
self.assertIsNotNone(search_opts)
self.assertIn('image', search_opts)
self.assertEqual(search_opts['image'], '12345')
db_list = [fakes.stub_instance(100, uuid=server_uuid)]
return instance_obj._make_instance_list(
context, objects.InstanceList(), db_list, FIELDS)
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers?image=12345')
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_tenant_id_filter_no_admin_context(self):
def fake_get_all(context, search_opts=None, **kwargs):
self.assertNotEqual(search_opts, None)
self.assertEqual(search_opts['project_id'], 'fake')
return [fakes.stub_instance_obj(100)]
req = self.req('/fake/servers?tenant_id=newfake')
with mock.patch.object(compute_api.API, 'get_all') as mock_get:
mock_get.side_effect = fake_get_all
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
def test_all_tenants_param_normal(self):
def fake_get_all(context, search_opts=None, **kwargs):
self.assertNotIn('project_id', search_opts)
return [fakes.stub_instance_obj(100)]
req = self.req('/fake/servers?all_tenants',
use_admin_context=True)
with mock.patch.object(compute_api.API, 'get_all') as mock_get:
mock_get.side_effect = fake_get_all
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
def test_all_tenants_param_one(self):
def fake_get_all(api, context, search_opts=None, **kwargs):
self.assertNotIn('project_id', search_opts)
return [fakes.stub_instance_obj(100)]
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers?all_tenants=1',
use_admin_context=True)
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
def test_all_tenants_param_zero(self):
def fake_get_all(api, context, search_opts=None, **kwargs):
self.assertNotIn('all_tenants', search_opts)
return [fakes.stub_instance_obj(100)]
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers?all_tenants=0',
use_admin_context=True)
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
def test_all_tenants_param_false(self):
def fake_get_all(api, context, search_opts=None, **kwargs):
self.assertNotIn('all_tenants', search_opts)
return [fakes.stub_instance_obj(100)]
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers?all_tenants=false',
use_admin_context=True)
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
def test_all_tenants_param_invalid(self):
def fake_get_all(api, context, search_opts=None, **kwargs):
self.assertNotIn('all_tenants', search_opts)
return [fakes.stub_instance_obj(100)]
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers?all_tenants=xxx',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_admin_restricted_tenant(self):
def fake_get_all(api, context, search_opts=None, **kwargs):
self.assertIsNotNone(search_opts)
self.assertEqual(search_opts['project_id'], 'fake')
return [fakes.stub_instance_obj(100)]
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers',
use_admin_context=True)
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
def test_all_tenants_pass_policy(self):
def fake_get_all(api, context, search_opts=None, **kwargs):
self.assertIsNotNone(search_opts)
self.assertNotIn('project_id', search_opts)
self.assertTrue(context.is_admin)
return [fakes.stub_instance_obj(100)]
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
rules = {
"os_compute_api:servers:index": "project_id:fake",
"os_compute_api:servers:index:get_all_tenants": "project_id:fake"
}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
req = self.req('/fake/servers?all_tenants=1')
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
def test_all_tenants_fail_policy(self):
def fake_get_all(api, context, search_opts=None, **kwargs):
self.assertIsNotNone(search_opts)
return [fakes.stub_instance_obj(100)]
rules = {
"os_compute_api:servers:index:get_all_tenants":
"project_id:non_fake",
"os_compute_api:servers:get_all": "project_id:fake",
}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers?all_tenants=1')
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.index, req)
def test_get_servers_allows_flavor(self):
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
limit=None, marker=None, want_objects=False,
expected_attrs=None, sort_keys=None, sort_dirs=None):
self.assertIsNotNone(search_opts)
self.assertIn('flavor', search_opts)
# flavor is an integer ID
self.assertEqual(search_opts['flavor'], '12345')
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=server_uuid)])
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers?flavor=12345')
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_with_bad_flavor(self):
req = self.req('/fake/servers?flavor=abcde')
with mock.patch.object(compute_api.API, 'get_all') as mock_get:
mock_get.return_value = objects.InstanceList(objects=[])
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 0)
def test_get_server_details_with_bad_flavor(self):
req = self.req('/fake/servers?flavor=abcde')
with mock.patch.object(compute_api.API, 'get_all') as mock_get:
mock_get.return_value = objects.InstanceList(objects=[])
servers = self.controller.detail(req)['servers']
self.assertThat(servers, testtools.matchers.HasLength(0))
def test_get_servers_allows_status(self):
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
limit=None, marker=None, want_objects=False,
expected_attrs=None, sort_keys=None, sort_dirs=None):
self.assertIsNotNone(search_opts)
self.assertIn('vm_state', search_opts)
self.assertEqual(search_opts['vm_state'], [vm_states.ACTIVE])
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=server_uuid)])
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers?status=active')
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_allows_task_status(self):
server_uuid = str(uuid.uuid4())
task_state = task_states.REBOOTING
def fake_get_all(compute_self, context, search_opts=None,
limit=None, marker=None, want_objects=False,
expected_attrs=None, sort_keys=None, sort_dirs=None):
self.assertIsNotNone(search_opts)
self.assertIn('task_state', search_opts)
self.assertEqual([task_states.REBOOT_PENDING,
task_states.REBOOT_STARTED,
task_states.REBOOTING],
search_opts['task_state'])
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=server_uuid,
task_state=task_state)])
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers?status=reboot')
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_resize_status(self):
# Test when resize status, it maps list of vm states.
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
limit=None, marker=None, want_objects=False,
expected_attrs=None, sort_keys=None, sort_dirs=None):
self.assertIn('vm_state', search_opts)
self.assertEqual(search_opts['vm_state'],
[vm_states.ACTIVE, vm_states.STOPPED])
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=server_uuid)])
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers?status=resize')
servers = self.controller.detail(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_invalid_status(self):
# Test getting servers by invalid status.
req = self.req('/fake/servers?status=baloney',
use_admin_context=False)
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 0)
def test_get_servers_deleted_status_as_user(self):
req = self.req('/fake/servers?status=deleted',
use_admin_context=False)
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.detail, req)
def test_get_servers_deleted_status_as_admin(self):
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
limit=None, marker=None, want_objects=False,
expected_attrs=None, sort_keys=None, sort_dirs=None):
self.assertIn('vm_state', search_opts)
self.assertEqual(search_opts['vm_state'], ['deleted'])
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=server_uuid)])
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers?status=deleted',
use_admin_context=True)
servers = self.controller.detail(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
@mock.patch.object(compute_api.API, 'get_all')
def test_get_servers_deleted_filter_str_to_bool(self, mock_get_all):
server_uuid = str(uuid.uuid4())
db_list = objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=server_uuid,
vm_state='deleted')])
mock_get_all.return_value = db_list
req = self.req('/fake/servers?deleted=true',
use_admin_context=True)
servers = self.controller.detail(req)['servers']
self.assertEqual(1, len(servers))
self.assertEqual(server_uuid, servers[0]['id'])
# Assert that 'deleted' filter value is converted to boolean
# while calling get_all() method.
expected_search_opts = {'deleted': True, 'project_id': 'fake'}
mock_get_all.assert_called_once_with(
mock.ANY, search_opts=expected_search_opts, limit=mock.ANY,
expected_attrs=['flavor', 'info_cache', 'metadata', 'pci_devices'],
marker=mock.ANY, want_objects=mock.ANY,
sort_keys=mock.ANY, sort_dirs=mock.ANY)
@mock.patch.object(compute_api.API, 'get_all')
def test_get_servers_deleted_filter_invalid_str(self, mock_get_all):
server_uuid = str(uuid.uuid4())
db_list = objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=server_uuid)])
mock_get_all.return_value = db_list
req = fakes.HTTPRequest.blank('/fake/servers?deleted=abc',
use_admin_context=True)
servers = self.controller.detail(req)['servers']
self.assertEqual(1, len(servers))
self.assertEqual(server_uuid, servers[0]['id'])
# Assert that invalid 'deleted' filter value is converted to boolean
# False while calling get_all() method.
expected_search_opts = {'deleted': False, 'project_id': 'fake'}
mock_get_all.assert_called_once_with(
mock.ANY, search_opts=expected_search_opts, limit=mock.ANY,
expected_attrs=['flavor', 'info_cache', 'metadata', 'pci_devices'],
marker=mock.ANY, want_objects=mock.ANY,
sort_keys=mock.ANY, sort_dirs=mock.ANY)
def test_get_servers_allows_name(self):
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
limit=None, marker=None, want_objects=False,
expected_attrs=None, sort_keys=None, sort_dirs=None):
self.assertIsNotNone(search_opts)
self.assertIn('name', search_opts)
self.assertEqual(search_opts['name'], 'whee.*')
self.assertEqual(['pci_devices'], expected_attrs)
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=server_uuid)])
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers?name=whee.*')
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
@mock.patch.object(compute_api.API, 'get_all')
def test_get_servers_flavor_not_found(self, get_all_mock):
get_all_mock.side_effect = exception.FlavorNotFound(flavor_id=1)
req = fakes.HTTPRequest.blank(
'/fake/servers?status=active&flavor=abc')
servers = self.controller.index(req)['servers']
self.assertEqual(0, len(servers))
def test_get_servers_allows_changes_since(self):
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
limit=None, marker=None, want_objects=False,
expected_attrs=None, sort_keys=None, sort_dirs=None):
self.assertIsNotNone(search_opts)
self.assertIn('changes-since', search_opts)
changes_since = datetime.datetime(2011, 1, 24, 17, 8, 1,
tzinfo=iso8601.iso8601.UTC)
self.assertEqual(search_opts['changes-since'], changes_since)
self.assertNotIn('deleted', search_opts)
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=server_uuid)])
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
params = 'changes-since=2011-01-24T17:08:01Z'
req = self.req('/fake/servers?%s' % params)
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_allows_changes_since_bad_value(self):
params = 'changes-since=asdf'
req = self.req('/fake/servers?%s' % params)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req)
def test_get_servers_admin_filters_as_user(self):
"""Test getting servers by admin-only or unknown options when
context is not admin. Make sure the admin and unknown options
are stripped before they get to compute_api.get_all()
"""
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
limit=None, marker=None, want_objects=False,
expected_attrs=None, sort_keys=None, sort_dirs=None):
self.assertIsNotNone(search_opts)
# Allowed by user
self.assertIn('name', search_opts)
self.assertIn('ip', search_opts)
# OSAPI converts status to vm_state
self.assertIn('vm_state', search_opts)
# Allowed only by admins with admin API on
self.assertNotIn('unknown_option', search_opts)
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=server_uuid)])
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
query_str = "name=foo&ip=10.*&status=active&unknown_option=meow"
req = fakes.HTTPRequest.blank('/fake/servers?%s' % query_str)
res = self.controller.index(req)
servers = res['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_admin_options_as_admin(self):
"""Test getting servers by admin-only or unknown options when
context is admin. All options should be passed
"""
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
limit=None, marker=None, want_objects=False,
expected_attrs=None, sort_keys=None, sort_dirs=None):
self.assertIsNotNone(search_opts)
# Allowed by user
self.assertIn('name', search_opts)
# OSAPI converts status to vm_state
self.assertIn('vm_state', search_opts)
# Allowed only by admins with admin API on
self.assertIn('ip', search_opts)
self.assertIn('unknown_option', search_opts)
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=server_uuid)])
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
query_str = "name=foo&ip=10.*&status=active&unknown_option=meow"
req = self.req('/fake/servers?%s' % query_str,
use_admin_context=True)
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_allows_ip(self):
"""Test getting servers by ip."""
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
limit=None, marker=None, want_objects=False,
expected_attrs=None, sort_keys=None, sort_dirs=None):
self.assertIsNotNone(search_opts)
self.assertIn('ip', search_opts)
self.assertEqual(search_opts['ip'], '10\..*')
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=server_uuid)])
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers?ip=10\..*')
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_admin_allows_ip6(self):
"""Test getting servers by ip6 with admin_api enabled and
admin context
"""
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
limit=None, marker=None, want_objects=False,
expected_attrs=None, sort_keys=None, sort_dirs=None):
self.assertIsNotNone(search_opts)
self.assertIn('ip6', search_opts)
self.assertEqual(search_opts['ip6'], 'ffff.*')
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=server_uuid)])
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers?ip6=ffff.*',
use_admin_context=True)
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_allows_ip6_with_new_version(self):
"""Test getting servers by ip6 with new version requested
and no admin context
"""
server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
limit=None, marker=None, want_objects=False,
expected_attrs=None, sort_keys=None, sort_dirs=None):
self.assertIsNotNone(search_opts)
self.assertIn('ip6', search_opts)
self.assertEqual(search_opts['ip6'], 'ffff.*')
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=server_uuid)])
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers?ip6=ffff.*')
req.api_version_request = api_version_request.APIVersionRequest('2.5')
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_all_server_details(self):
expected_flavor = {
"id": "2",
"links": [
{
"rel": "bookmark",
"href": 'http://localhost/fake/flavors/2',
},
],
}
expected_image = {
"id": "10",
"links": [
{
"rel": "bookmark",
"href": 'http://localhost/fake/images/10',
},
],
}
req = self.req('/fake/servers/detail')
res_dict = self.controller.detail(req)
for i, s in enumerate(res_dict['servers']):
self.assertEqual(s['id'], fakes.get_fake_uuid(i))
self.assertEqual(s['hostId'], '')
self.assertEqual(s['name'], 'server%d' % (i + 1))
self.assertEqual(s['image'], expected_image)
self.assertEqual(s['flavor'], expected_flavor)
self.assertEqual(s['status'], 'BUILD')
self.assertEqual(s['metadata']['seq'], str(i + 1))
def test_get_all_server_details_with_host(self):
"""We want to make sure that if two instances are on the same host,
then they return the same hostId. If two instances are on different
hosts, they should return different hostIds. In this test,
there are 5 instances - 2 on one host and 3 on another.
"""
def return_servers_with_host(*args, **kwargs):
return objects.InstanceList(
objects=[fakes.stub_instance_obj(None,
id=i + 1,
user_id='fake',
project_id='fake',
host=i % 2,
uuid=fakes.get_fake_uuid(i))
for i in range(5)])
self.stubs.Set(compute_api.API, 'get_all', return_servers_with_host)
req = self.req('/fake/servers/detail')
res_dict = self.controller.detail(req)
server_list = res_dict['servers']
host_ids = [server_list[0]['hostId'], server_list[1]['hostId']]
self.assertTrue(host_ids[0] and host_ids[1])
self.assertNotEqual(host_ids[0], host_ids[1])
for i, s in enumerate(server_list):
self.assertEqual(s['id'], fakes.get_fake_uuid(i))
self.assertEqual(s['hostId'], host_ids[i % 2])
self.assertEqual(s['name'], 'server%d' % (i + 1))
def test_get_servers_joins_pci_devices(self):
def fake_get_all(compute_self, context, search_opts=None,
limit=None, marker=None, want_objects=False,
expected_attrs=None, sort_keys=None, sort_dirs=None):
self.assertEqual(['pci_devices'], expected_attrs)
return []
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers', use_admin_context=True)
self.assertIn('servers', self.controller.index(req))
class ServersControllerTestV29(ServersControllerTest):
wsgi_api_version = '2.9'
def _get_server_data_dict(self, uuid, image_bookmark, flavor_bookmark,
status="ACTIVE", progress=100):
server_dict = super(ServersControllerTestV29,
self)._get_server_data_dict(uuid,
image_bookmark,
flavor_bookmark,
status,
progress)
server_dict['server']['locked'] = False
return server_dict
@mock.patch.object(compute_api.API, 'get')
def _test_get_server_with_lock(self, locked_by, get_mock):
image_bookmark = "http://localhost/fake/images/10"
flavor_bookmark = "http://localhost/fake/flavors/2"
uuid = FAKE_UUID
get_mock.side_effect = fakes.fake_compute_get(id=2,
locked_by=locked_by,
uuid=uuid)
req = self.req('/fake/servers/%s' % uuid)
res_dict = self.controller.show(req, uuid)
expected_server = self._get_server_data_dict(uuid,
image_bookmark,
flavor_bookmark,
status="BUILD",
progress=0)
expected_server['server']['locked'] = True if locked_by else False
self.assertThat(res_dict, matchers.DictMatches(expected_server))
return res_dict
def test_get_server_with_locked_by_admin(self):
res_dict = self._test_get_server_with_lock('admin')
self.assertTrue(res_dict['server']['locked'])
def test_get_server_with_locked_by_owner(self):
res_dict = self._test_get_server_with_lock('owner')
self.assertTrue(res_dict['server']['locked'])
def test_get_server_not_locked(self):
res_dict = self._test_get_server_with_lock(None)
self.assertFalse(res_dict['server']['locked'])
@mock.patch.object(compute_api.API, 'get_all')
def _test_list_server_detail_with_lock(self,
s1_locked,
s2_locked,
get_all_mock):
get_all_mock.return_value = fake_instance_get_all_with_locked(
context, [s1_locked, s2_locked])
req = self.req('/fake/servers/detail')
servers_list = self.controller.detail(req)
# Check that each returned server has the same 'locked' value
# and 'id' as they were created.
for locked in [s1_locked, s2_locked]:
server = next(server for server in servers_list['servers']
if (server['id'] == fakes.get_fake_uuid(locked)))
expected = False if locked == 'not_locked' else True
self.assertEqual(expected, server['locked'])
def test_list_server_detail_with_locked_s1_admin_s2_owner(self):
self._test_list_server_detail_with_lock('admin', 'owner')
def test_list_server_detail_with_locked_s1_owner_s2_admin(self):
self._test_list_server_detail_with_lock('owner', 'admin')
def test_list_server_detail_with_locked_s1_admin_s2_admin(self):
self._test_list_server_detail_with_lock('admin', 'admin')
def test_list_server_detail_with_locked_s1_admin_s2_not_locked(self):
self._test_list_server_detail_with_lock('admin', 'not_locked')
def test_list_server_detail_with_locked_s1_s2_not_locked(self):
self._test_list_server_detail_with_lock('not_locked',
'not_locked')
@mock.patch.object(compute_api.API, 'get_all')
def test_get_servers_remove_non_search_options(self, get_all_mock):
req = fakes.HTTPRequestV21.blank('/servers'
'?sort_key=id1&sort_dir=asc'
'&sort_key=id2&sort_dir=desc'
'&limit=1&marker=123',
use_admin_context=True)
self.controller.index(req)
kwargs = get_all_mock.call_args[1]
search_opts = kwargs['search_opts']
for key in ('sort_key', 'sort_dir', 'limit', 'marker'):
self.assertNotIn(key, search_opts)
class ServersControllerDeleteTest(ControllerTest):
def setUp(self):
super(ServersControllerDeleteTest, self).setUp()
self.server_delete_called = False
def fake_delete(api, context, instance):
if instance.uuid == 'non-existent-uuid':
raise exception.InstanceNotFound(instance_id=instance.uuid)
self.server_delete_called = True
self.stubs.Set(compute_api.API, 'delete', fake_delete)
def _create_delete_request(self, uuid):
fakes.stub_out_instance_quota(self.stubs, 0, 10)
req = fakes.HTTPRequestV21.blank('/fake/servers/%s' % uuid)
req.method = 'DELETE'
return req
def _delete_server_instance(self, uuid=FAKE_UUID):
req = self._create_delete_request(uuid)
fake_get = fakes.fake_compute_get(uuid=uuid,
vm_state=vm_states.ACTIVE)
self.stubs.Set(compute_api.API, 'get',
lambda api, *a, **k: fake_get(*a, **k))
self.controller.delete(req, uuid)
def test_delete_server_instance(self):
self._delete_server_instance()
self.assertTrue(self.server_delete_called)
def test_delete_server_instance_not_found(self):
self.assertRaises(webob.exc.HTTPNotFound,
self._delete_server_instance,
uuid='non-existent-uuid')
def test_delete_server_instance_while_building(self):
req = self._create_delete_request(FAKE_UUID)
self.controller.delete(req, FAKE_UUID)
self.assertTrue(self.server_delete_called)
def test_delete_locked_server(self):
req = self._create_delete_request(FAKE_UUID)
self.stubs.Set(compute_api.API, 'soft_delete',
fakes.fake_actions_to_locked_server)
self.stubs.Set(compute_api.API, 'delete',
fakes.fake_actions_to_locked_server)
self.assertRaises(webob.exc.HTTPConflict, self.controller.delete,
req, FAKE_UUID)
def test_delete_server_instance_while_resize(self):
req = self._create_delete_request(FAKE_UUID)
fake_get = fakes.fake_compute_get(vm_state=vm_states.ACTIVE,
task_state=task_states.RESIZE_PREP)
self.stubs.Set(compute_api.API, 'get',
lambda api, *a, **k: fake_get(*a, **k))
self.controller.delete(req, FAKE_UUID)
def test_delete_server_instance_if_not_launched(self):
self.flags(reclaim_instance_interval=3600)
req = fakes.HTTPRequestV21.blank('/fake/servers/%s' % FAKE_UUID)
req.method = 'DELETE'
self.server_delete_called = False
fake_get = fakes.fake_compute_get(launched_at=None)
self.stubs.Set(compute_api.API, 'get',
lambda api, *a, **k: fake_get(*a, **k))
def instance_destroy_mock(*args, **kwargs):
self.server_delete_called = True
deleted_at = timeutils.utcnow()
return fake_instance.fake_db_instance(deleted_at=deleted_at)
self.stubs.Set(db, 'instance_destroy', instance_destroy_mock)
self.controller.delete(req, FAKE_UUID)
# delete() should be called for instance which has never been active,
# even if reclaim_instance_interval has been set.
self.assertEqual(self.server_delete_called, True)
class ServersControllerRebuildInstanceTest(ControllerTest):
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
image_href = 'http://localhost/v2/fake/images/%s' % image_uuid
def setUp(self):
super(ServersControllerRebuildInstanceTest, self).setUp()
def fake_get(ctrl, ctxt, uuid):
if uuid == 'test_inst':
raise webob.exc.HTTPNotFound(explanation='fakeout')
return fakes.stub_instance_obj(None,
vm_state=vm_states.ACTIVE)
self.useFixture(
fixtures.MonkeyPatch('nova.api.openstack.compute.servers.'
'ServersController._get_instance',
fake_get))
fake_get = fakes.fake_compute_get(vm_state=vm_states.ACTIVE)
self.stubs.Set(compute_api.API, 'get',
lambda api, *a, **k: fake_get(*a, **k))
self.body = {
'rebuild': {
'name': 'new_name',
'imageRef': self.image_href,
'metadata': {
'open': 'stack',
},
},
}
self.req = fakes.HTTPRequest.blank('/fake/servers/a/action')
self.req.method = 'POST'
self.req.headers["content-type"] = "application/json"
def test_rebuild_instance_name_with_spaces_in_the_middle(self):
self.body['rebuild']['name'] = 'abc def'
self.req.body = jsonutils.dumps(self.body)
self.controller._action_rebuild(self.req, FAKE_UUID, body=self.body)
def test_rebuild_instance_name_with_leading_trailing_spaces(self):
self.body['rebuild']['name'] = ' abc def '
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
def test_rebuild_instance_name_with_leading_trailing_spaces_compat_mode(
self):
self.body['rebuild']['name'] = ' abc def '
self.req.body = jsonutils.dumps(self.body)
self.req.set_legacy_v2()
def fake_rebuild(*args, **kwargs):
self.assertEqual('abc def', kwargs['display_name'])
with mock.patch.object(compute_api.API, 'rebuild') as mock_rebuild:
mock_rebuild.side_effect = fake_rebuild
self.controller._action_rebuild(self.req, FAKE_UUID,
body=self.body)
def test_rebuild_instance_with_blank_metadata_key(self):
self.body['rebuild']['metadata'][''] = 'world'
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
def test_rebuild_instance_with_metadata_key_too_long(self):
self.body['rebuild']['metadata'][('a' * 260)] = 'world'
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
def test_rebuild_instance_with_metadata_value_too_long(self):
self.body['rebuild']['metadata']['key1'] = ('a' * 260)
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild, self.req,
FAKE_UUID, body=self.body)
def test_rebuild_instance_with_metadata_value_not_string(self):
self.body['rebuild']['metadata']['key1'] = 1
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild, self.req,
FAKE_UUID, body=self.body)
def test_rebuild_instance_fails_when_min_ram_too_small(self):
# make min_ram larger than our instance ram size
def fake_get_image(self, context, image_href, **kwargs):
return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
name='public image', is_public=True,
status='active', properties={'key1': 'value1'},
min_ram="4096", min_disk="10")
self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
def test_rebuild_instance_fails_when_min_disk_too_small(self):
# make min_disk larger than our instance disk size
def fake_get_image(self, context, image_href, **kwargs):
return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
name='public image', is_public=True,
status='active', properties={'key1': 'value1'},
min_ram="128", min_disk="100000")
self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild, self.req,
FAKE_UUID, body=self.body)
def test_rebuild_instance_image_too_large(self):
# make image size larger than our instance disk size
size = str(1000 * (1024 ** 3))
def fake_get_image(self, context, image_href, **kwargs):
return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
name='public image', is_public=True,
status='active', size=size)
self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
def test_rebuild_instance_name_all_blank(self):
def fake_get_image(self, context, image_href, **kwargs):
return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
name='public image', is_public=True, status='active')
self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
self.body['rebuild']['name'] = ' '
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
def test_rebuild_instance_with_deleted_image(self):
def fake_get_image(self, context, image_href, **kwargs):
return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
name='public image', is_public=True,
status='DELETED')
self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
def test_rebuild_instance_onset_file_limit_over_quota(self):
def fake_get_image(self, context, image_href, **kwargs):
return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
name='public image', is_public=True, status='active')
with test.nested(
mock.patch.object(fake._FakeImageService, 'show',
side_effect=fake_get_image),
mock.patch.object(self.controller.compute_api, 'rebuild',
side_effect=exception.OnsetFileLimitExceeded)
) as (
show_mock, rebuild_mock
):
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPForbidden,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
def test_rebuild_bad_personality(self):
body = {
"rebuild": {
"imageRef": self.image_href,
"personality": [{
"path": "/path/to/file",
"contents": "INVALID b64",
}]
},
}
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
def test_rebuild_personality(self):
body = {
"rebuild": {
"imageRef": self.image_href,
"personality": [{
"path": "/path/to/file",
"contents": base64.b64encode("Test String"),
}]
},
}
body = self.controller._action_rebuild(self.req, FAKE_UUID,
body=body).obj
self.assertNotIn('personality', body['server'])
def test_start(self):
self.mox.StubOutWithMock(compute_api.API, 'start')
compute_api.API.start(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
req = fakes.HTTPRequestV21.blank('/fake/servers/%s/action' % FAKE_UUID)
body = dict(start="")
self.controller._start_server(req, FAKE_UUID, body)
def test_start_policy_failed(self):
rules = {
"os_compute_api:servers:start": "project_id:non_fake"
}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
req = fakes.HTTPRequestV21.blank('/fake/servers/%s/action' % FAKE_UUID)
body = dict(start="")
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.controller._start_server,
req, FAKE_UUID, body)
self.assertIn("os_compute_api:servers:start", exc.format_message())
def test_start_not_ready(self):
self.stubs.Set(compute_api.API, 'start', fake_start_stop_not_ready)
req = fakes.HTTPRequestV21.blank('/fake/servers/%s/action' % FAKE_UUID)
body = dict(start="")
self.assertRaises(webob.exc.HTTPConflict,
self.controller._start_server, req, FAKE_UUID, body)
def test_start_locked_server(self):
self.stubs.Set(compute_api.API, 'start',
fakes.fake_actions_to_locked_server)
req = fakes.HTTPRequestV21.blank('/fake/servers/%s/action' % FAKE_UUID)
body = dict(start="")
self.assertRaises(webob.exc.HTTPConflict,
self.controller._start_server, req, FAKE_UUID, body)
def test_start_invalid(self):
self.stubs.Set(compute_api.API, 'start', fake_start_stop_invalid_state)
req = fakes.HTTPRequestV21.blank('/fake/servers/%s/action' % FAKE_UUID)
body = dict(start="")
self.assertRaises(webob.exc.HTTPConflict,
self.controller._start_server, req, FAKE_UUID, body)
def test_stop(self):
self.mox.StubOutWithMock(compute_api.API, 'stop')
compute_api.API.stop(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
req = fakes.HTTPRequestV21.blank('/fake/servers/%s/action' % FAKE_UUID)
body = dict(stop="")
self.controller._stop_server(req, FAKE_UUID, body)
def test_stop_policy_failed(self):
rules = {
"os_compute_api:servers:stop": "project_id:non_fake"
}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
req = fakes.HTTPRequestV21.blank('/fake/servers/%s/action' % FAKE_UUID)
body = dict(stop='')
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.controller._stop_server,
req, FAKE_UUID, body)
self.assertIn("os_compute_api:servers:stop", exc.format_message())
def test_stop_not_ready(self):
self.stubs.Set(compute_api.API, 'stop', fake_start_stop_not_ready)
req = fakes.HTTPRequestV21.blank('/fake/servers/%s/action' % FAKE_UUID)
body = dict(stop="")
self.assertRaises(webob.exc.HTTPConflict,
self.controller._stop_server, req, FAKE_UUID, body)
def test_stop_locked_server(self):
self.stubs.Set(compute_api.API, 'stop',
fakes.fake_actions_to_locked_server)
req = fakes.HTTPRequestV21.blank('/fake/servers/%s/action' % FAKE_UUID)
body = dict(stop="")
self.assertRaises(webob.exc.HTTPConflict,
self.controller._stop_server, req, FAKE_UUID, body)
def test_stop_invalid_state(self):
self.stubs.Set(compute_api.API, 'stop', fake_start_stop_invalid_state)
req = fakes.HTTPRequestV21.blank('/fake/servers/%s/action' % FAKE_UUID)
body = dict(start="")
self.assertRaises(webob.exc.HTTPConflict,
self.controller._stop_server, req, FAKE_UUID, body)
def test_start_with_bogus_id(self):
self.stubs.Set(db, 'instance_get_by_uuid',
fake_instance_get_by_uuid_not_found)
req = fakes.HTTPRequestV21.blank('/fake/servers/test_inst/action')
body = dict(start="")
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._start_server, req, 'test_inst', body)
def test_stop_with_bogus_id(self):
self.stubs.Set(db, 'instance_get_by_uuid',
fake_instance_get_by_uuid_not_found)
req = fakes.HTTPRequestV21.blank('/fake/servers/test_inst/action')
body = dict(stop="")
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._stop_server, req, 'test_inst', body)
class ServersControllerUpdateTest(ControllerTest):
def _get_request(self, body=None, options=None):
if options:
fake_get = fakes.fake_compute_get(**options)
self.stubs.Set(compute_api.API, 'get',
lambda api, *a, **k: fake_get(*a, **k))
req = fakes.HTTPRequestV21.blank('/fake/servers/%s' % FAKE_UUID)
req.method = 'PUT'
req.content_type = 'application/json'
req.body = jsonutils.dumps(body)
return req
def test_update_server_all_attributes(self):
body = {'server': {
'name': 'server_test',
}}
req = self._get_request(body, {'name': 'server_test'})
res_dict = self.controller.update(req, FAKE_UUID, body=body)
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
self.assertEqual(res_dict['server']['name'], 'server_test')
def test_update_server_name(self):
body = {'server': {'name': 'server_test'}}
req = self._get_request(body, {'name': 'server_test'})
res_dict = self.controller.update(req, FAKE_UUID, body=body)
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
self.assertEqual(res_dict['server']['name'], 'server_test')
def test_update_server_name_too_long(self):
body = {'server': {'name': 'x' * 256}}
req = self._get_request(body, {'name': 'server_test'})
self.assertRaises(exception.ValidationError, self.controller.update,
req, FAKE_UUID, body=body)
def test_update_server_name_all_blank_spaces(self):
self.stubs.Set(db, 'instance_get',
fakes.fake_instance_get(name='server_test'))
req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
req.method = 'PUT'
req.content_type = 'application/json'
body = {'server': {'name': ' ' * 64}}
req.body = jsonutils.dumps(body)
self.assertRaises(exception.ValidationError, self.controller.update,
req, FAKE_UUID, body=body)
def test_update_server_name_with_spaces_in_the_middle(self):
self.stubs.Set(db, 'instance_get',
fakes.fake_instance_get(name='server_test'))
req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
req.method = 'PUT'
req.content_type = 'application/json'
body = {'server': {'name': 'abc def'}}
req.body = jsonutils.dumps(body)
self.controller.update(req, FAKE_UUID, body=body)
def test_update_server_name_with_leading_trailing_spaces(self):
self.stubs.Set(db, 'instance_get',
fakes.fake_instance_get(name='server_test'))
req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
req.method = 'PUT'
req.content_type = 'application/json'
body = {'server': {'name': ' abc def '}}
req.body = jsonutils.dumps(body)
self.assertRaises(exception.ValidationError,
self.controller.update, req, FAKE_UUID, body=body)
def test_update_server_name_with_leading_trailing_spaces_compat_mode(self):
self.stubs.Set(db, 'instance_get',
fakes.fake_instance_get(name='server_test'))
req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
req.method = 'PUT'
req.content_type = 'application/json'
body = {'server': {'name': ' abc def '}}
req.body = jsonutils.dumps(body)
req.set_legacy_v2()
self.controller.update(req, FAKE_UUID, body=body)
def test_update_server_admin_password_extra_arg(self):
inst_dict = dict(name='server_test', admin_password='bacon')
body = dict(server=inst_dict)
req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
req.method = 'PUT'
req.content_type = "application/json"
req.body = jsonutils.dumps(body)
self.assertRaises(exception.ValidationError, self.controller.update,
req, FAKE_UUID, body=body)
def test_update_server_host_id(self):
inst_dict = dict(host_id='123')
body = dict(server=inst_dict)
req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
req.method = 'PUT'
req.content_type = "application/json"
req.body = jsonutils.dumps(body)
self.assertRaises(exception.ValidationError, self.controller.update,
req, FAKE_UUID, body=body)
def test_update_server_not_found(self):
def fake_get(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(compute_api.API, 'get', fake_get)
body = {'server': {'name': 'server_test'}}
req = self._get_request(body)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
req, FAKE_UUID, body=body)
def test_update_server_not_found_on_update(self):
def fake_update(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(db, 'instance_update_and_get_original', fake_update)
body = {'server': {'name': 'server_test'}}
req = self._get_request(body)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
req, FAKE_UUID, body=body)
def test_update_server_policy_fail(self):
rule = {'compute:update': 'role:admin'}
policy.set_rules(oslo_policy.Rules.from_dict(rule))
body = {'server': {'name': 'server_test'}}
req = self._get_request(body, {'name': 'server_test'})
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.update, req, FAKE_UUID, body=body)
class ServerStatusTest(test.TestCase):
def setUp(self):
super(ServerStatusTest, self).setUp()
fakes.stub_out_nw_api(self.stubs)
ext_info = extension_info.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
def _get_with_state(self, vm_state, task_state=None):
self.stubs.Set(db, 'instance_get_by_uuid',
fakes.fake_instance_get(vm_state=vm_state,
task_state=task_state))
request = fakes.HTTPRequestV21.blank('/fake/servers/%s' % FAKE_UUID)
return self.controller.show(request, FAKE_UUID)
def test_active(self):
response = self._get_with_state(vm_states.ACTIVE)
self.assertEqual(response['server']['status'], 'ACTIVE')
def test_reboot(self):
response = self._get_with_state(vm_states.ACTIVE,
task_states.REBOOTING)
self.assertEqual(response['server']['status'], 'REBOOT')
def test_reboot_hard(self):
response = self._get_with_state(vm_states.ACTIVE,
task_states.REBOOTING_HARD)
self.assertEqual(response['server']['status'], 'HARD_REBOOT')
def test_reboot_resize_policy_fail(self):
def fake_get_server(context, req, id):
return fakes.stub_instance(id)
self.stubs.Set(self.controller, '_get_server', fake_get_server)
rule = {'compute:reboot': 'role:admin'}
policy.set_rules(oslo_policy.Rules.from_dict(rule))
req = fakes.HTTPRequestV21.blank('/fake/servers/1234/action')
self.assertRaises(exception.PolicyNotAuthorized,
self.controller._action_reboot, req, '1234',
body={'reboot': {'type': 'HARD'}})
def test_rebuild(self):
response = self._get_with_state(vm_states.ACTIVE,
task_states.REBUILDING)
self.assertEqual(response['server']['status'], 'REBUILD')
def test_rebuild_error(self):
response = self._get_with_state(vm_states.ERROR)
self.assertEqual(response['server']['status'], 'ERROR')
def test_resize(self):
response = self._get_with_state(vm_states.ACTIVE,
task_states.RESIZE_PREP)
self.assertEqual(response['server']['status'], 'RESIZE')
def test_confirm_resize_policy_fail(self):
def fake_get_server(context, req, id):
return fakes.stub_instance(id)
self.stubs.Set(self.controller, '_get_server', fake_get_server)
rule = {'compute:confirm_resize': 'role:admin'}
policy.set_rules(oslo_policy.Rules.from_dict(rule))
req = fakes.HTTPRequestV21.blank('/fake/servers/1234/action')
self.assertRaises(exception.PolicyNotAuthorized,
self.controller._action_confirm_resize, req, '1234', {})
def test_verify_resize(self):
response = self._get_with_state(vm_states.RESIZED, None)
self.assertEqual(response['server']['status'], 'VERIFY_RESIZE')
def test_revert_resize(self):
response = self._get_with_state(vm_states.RESIZED,
task_states.RESIZE_REVERTING)
self.assertEqual(response['server']['status'], 'REVERT_RESIZE')
def test_revert_resize_policy_fail(self):
def fake_get_server(context, req, id):
return fakes.stub_instance(id)
self.stubs.Set(self.controller, '_get_server', fake_get_server)
rule = {'compute:revert_resize': 'role:admin'}
policy.set_rules(oslo_policy.Rules.from_dict(rule))
req = fakes.HTTPRequestV21.blank('/fake/servers/1234/action')
self.assertRaises(exception.PolicyNotAuthorized,
self.controller._action_revert_resize, req, '1234', {})
def test_password_update(self):
response = self._get_with_state(vm_states.ACTIVE,
task_states.UPDATING_PASSWORD)
self.assertEqual(response['server']['status'], 'PASSWORD')
def test_stopped(self):
response = self._get_with_state(vm_states.STOPPED)
self.assertEqual(response['server']['status'], 'SHUTOFF')
class ServersControllerCreateTest(test.TestCase):
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(ServersControllerCreateTest, self).setUp()
self.flags(verbose=True,
enable_instance_password=True)
self.instance_cache_num = 0
self.instance_cache_by_id = {}
self.instance_cache_by_uuid = {}
fakes.stub_out_nw_api(self.stubs)
ext_info = extension_info.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
def instance_create(context, inst):
inst_type = flavors.get_flavor_by_flavor_id(3)
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def_image_ref = 'http://localhost/fake/images/%s' % image_uuid
self.instance_cache_num += 1
instance = fake_instance.fake_db_instance(**{
'id': self.instance_cache_num,
'display_name': inst['display_name'] or 'test',
'uuid': FAKE_UUID,
'instance_type': inst_type,
'image_ref': inst.get('image_ref', def_image_ref),
'user_id': 'fake',
'project_id': 'fake',
'reservation_id': inst['reservation_id'],
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"config_drive": None,
"progress": 0,
"fixed_ips": [],
"task_state": "",
"vm_state": "",
"root_device_name": inst.get('root_device_name', 'vda'),
})
self.instance_cache_by_id[instance['id']] = instance
self.instance_cache_by_uuid[instance['uuid']] = instance
return instance
def instance_get(context, instance_id):
"""Stub for compute/api create() pulling in instance after
scheduling
"""
return self.instance_cache_by_id[instance_id]
def instance_update(context, uuid, values):
instance = self.instance_cache_by_uuid[uuid]
instance.update(values)
return instance
def server_update_and_get_original(
context, instance_uuid, params, columns_to_join=None):
inst = self.instance_cache_by_uuid[instance_uuid]
inst.update(params)
return (inst, inst)
def fake_method(*args, **kwargs):
pass
def project_get_networks(context, user_id):
return dict(id='1', host='localhost')
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
fake.stub_out_image_service(self.stubs)
self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
self.stubs.Set(db, 'project_get_networks',
project_get_networks)
self.stubs.Set(db, 'instance_create', instance_create)
self.stubs.Set(db, 'instance_system_metadata_update',
fake_method)
self.stubs.Set(db, 'instance_get', instance_get)
self.stubs.Set(db, 'instance_update', instance_update)
self.stubs.Set(db, 'instance_update_and_get_original',
server_update_and_get_original)
self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
fake_method)
self.body = {
'server': {
'name': 'server_test',
'imageRef': self.image_uuid,
'flavorRef': self.flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
'personality': [
{
"path": "/etc/banner.txt",
"contents": "MQ==",
},
],
},
}
self.bdm = [{'delete_on_termination': 1,
'device_name': 123,
'volume_size': 1,
'volume_id': '11111111-1111-1111-1111-111111111111'}]
self.req = fakes.HTTPRequest.blank('/fake/servers')
self.req.method = 'POST'
self.req.headers["content-type"] = "application/json"
def _check_admin_password_len(self, server_dict):
"""utility function - check server_dict for admin_password length."""
self.assertEqual(CONF.password_length,
len(server_dict["adminPass"]))
def _check_admin_password_missing(self, server_dict):
"""utility function - check server_dict for admin_password absence."""
self.assertNotIn("adminPass", server_dict)
def _test_create_instance(self, flavor=2):
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
self.body['server']['imageRef'] = image_uuid
self.body['server']['flavorRef'] = flavor
self.req.body = jsonutils.dumps(self.body)
server = self.controller.create(self.req, body=self.body).obj['server']
self._check_admin_password_len(server)
self.assertEqual(FAKE_UUID, server['id'])
def test_create_instance_private_flavor(self):
values = {
'name': 'fake_name',
'memory_mb': 512,
'vcpus': 1,
'root_gb': 10,
'ephemeral_gb': 10,
'flavorid': '1324',
'swap': 0,
'rxtx_factor': 0.5,
'vcpu_weight': 1,
'disabled': False,
'is_public': False,
}
db.flavor_create(context.get_admin_context(), values)
self.assertRaises(webob.exc.HTTPBadRequest, self._test_create_instance,
flavor=1324)
def test_create_server_bad_image_href(self):
image_href = 1
self.body['server']['min_count'] = 1
self.body['server']['imageRef'] = image_href,
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create,
self.req, body=self.body)
# TODO(cyeoh): bp-v3-api-unittests
# This needs to be ported to the os-networks extension tests
# def test_create_server_with_invalid_networks_parameter(self):
# self.ext_mgr.extensions = {'os-networks': 'fake'}
# image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
# flavor_ref = 'http://localhost/123/flavors/3'
# body = {
# 'server': {
# 'name': 'server_test',
# 'imageRef': image_href,
# 'flavorRef': flavor_ref,
# 'networks': {'uuid': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'},
# }
# }
# req = fakes.HTTPRequest.blank('/fake/servers')
# req.method = 'POST'
# req.body = jsonutils.dumps(body)
# req.headers["content-type"] = "application/json"
# self.assertRaises(webob.exc.HTTPBadRequest,
# self.controller.create,
# req,
# body)
def test_create_server_with_deleted_image(self):
# Get the fake image service so we can set the status to deleted
(image_service, image_id) = glance.get_remote_image_service(
context, '')
image_service.update(context, self.image_uuid, {'status': 'DELETED'})
self.addCleanup(image_service.update, context, self.image_uuid,
{'status': 'active'})
self.body['server']['flavorRef'] = 2
self.req.body = jsonutils.dumps(self.body)
with testtools.ExpectedException(
webob.exc.HTTPBadRequest,
'Image 76fa36fc-c930-4bf3-8c8a-ea2a2420deb6 is not active.'):
self.controller.create(self.req, body=self.body)
def test_create_server_image_too_large(self):
# Get the fake image service so we can update the size of the image
(image_service, image_id) = glance.get_remote_image_service(
context, self.image_uuid)
image = image_service.show(context, image_id)
orig_size = image['size']
new_size = str(1000 * (1024 ** 3))
image_service.update(context, self.image_uuid, {'size': new_size})
self.addCleanup(image_service.update, context, self.image_uuid,
{'size': orig_size})
self.body['server']['flavorRef'] = 2
self.req.body = jsonutils.dumps(self.body)
with testtools.ExpectedException(
webob.exc.HTTPBadRequest,
"Flavor's disk is too small for requested image."):
self.controller.create(self.req, body=self.body)
def test_create_instance_image_ref_is_bookmark(self):
image_href = 'http://localhost/fake/images/%s' % self.image_uuid
self.body['server']['imageRef'] = image_href
self.req.body = jsonutils.dumps(self.body)
res = self.controller.create(self.req, body=self.body).obj
server = res['server']
self.assertEqual(FAKE_UUID, server['id'])
def test_create_instance_image_ref_is_invalid(self):
image_uuid = 'this_is_not_a_valid_uuid'
image_href = 'http://localhost/fake/images/%s' % image_uuid
flavor_ref = 'http://localhost/fake/flavors/3'
self.body['server']['imageRef'] = image_href
self.body['server']['flavorRef'] = flavor_ref
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, body=self.body)
def test_create_instance_no_key_pair(self):
fakes.stub_out_key_pair_funcs(self.stubs, have_key_pair=False)
self._test_create_instance()
def _test_create_extra(self, params, no_image=False):
self.body['server']['flavorRef'] = 2
if no_image:
self.body['server'].pop('imageRef', None)
self.body['server'].update(params)
self.req.body = jsonutils.dumps(self.body)
self.req.headers["content-type"] = "application/json"
self.controller.create(self.req, body=self.body).obj['server']
# TODO(cyeoh): bp-v3-api-unittests
# This needs to be ported to the os-keypairs extension tests
# def test_create_instance_with_keypairs_enabled(self):
# self.ext_mgr.extensions = {'os-keypairs': 'fake'}
# key_name = 'green'
#
# params = {'key_name': key_name}
# old_create = compute_api.API.create
#
# # NOTE(sdague): key pair goes back to the database,
# # so we need to stub it out for tests
# def key_pair_get(context, user_id, name):
# return {'public_key': 'FAKE_KEY',
# 'fingerprint': 'FAKE_FINGERPRINT',
# 'name': name}
#
# def create(*args, **kwargs):
# self.assertEqual(kwargs['key_name'], key_name)
# return old_create(*args, **kwargs)
#
# self.stubs.Set(db, 'key_pair_get', key_pair_get)
# self.stubs.Set(compute_api.API, 'create', create)
# self._test_create_extra(params)
#
# TODO(cyeoh): bp-v3-api-unittests
# This needs to be ported to the os-networks extension tests
# def test_create_instance_with_networks_enabled(self):
# self.ext_mgr.extensions = {'os-networks': 'fake'}
# net_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
# requested_networks = [{'uuid': net_uuid}]
# params = {'networks': requested_networks}
# old_create = compute_api.API.create
# def create(*args, **kwargs):
# result = [('76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', None)]
# self.assertEqual(kwargs['requested_networks'], result)
# return old_create(*args, **kwargs)
# self.stubs.Set(compute_api.API, 'create', create)
# self._test_create_extra(params)
def test_create_instance_with_port_with_no_fixed_ips(self):
port_id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'port': port_id}]
params = {'networks': requested_networks}
def fake_create(*args, **kwargs):
raise exception.PortRequiresFixedIP(port_id=port_id)
self.stubs.Set(compute_api.API, 'create', fake_create)
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params)
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_raise_user_data_too_large(self, mock_create):
mock_create.side_effect = exception.InstanceUserDataTooLarge(
maxsize=1, length=2)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
def test_create_instance_with_network_with_no_subnet(self):
network = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'uuid': network}]
params = {'networks': requested_networks}
def fake_create(*args, **kwargs):
raise exception.NetworkRequiresSubnet(network_uuid=network)
self.stubs.Set(compute_api.API, 'create', fake_create)
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params)
def test_create_instance_with_non_unique_secgroup_name(self):
network = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'uuid': network}]
params = {'networks': requested_networks,
'security_groups': [{'name': 'dup'}, {'name': 'dup'}]}
def fake_create(*args, **kwargs):
raise exception.NoUniqueMatch("No Unique match found for ...")
self.stubs.Set(compute_api.API, 'create', fake_create)
self.assertRaises(webob.exc.HTTPConflict,
self._test_create_extra, params)
def test_create_instance_secgroup_leading_trailing_spaces(self):
network = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'uuid': network}]
params = {'networks': requested_networks,
'security_groups': [{'name': ' sg '}]}
self.assertRaises(exception.ValidationError,
self._test_create_extra, params)
def test_create_instance_secgroup_leading_trailing_spaces_compat_mode(
self):
network = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'uuid': network}]
params = {'networks': requested_networks,
'security_groups': [{'name': ' sg '}]}
def fake_create(*args, **kwargs):
self.assertEqual([' sg '], kwargs['security_group'])
return (objects.InstanceList(objects=[fakes.stub_instance_obj(
self.req.environ['nova.context'])]), None)
self.stubs.Set(compute_api.API, 'create', fake_create)
self.req.set_legacy_v2()
self._test_create_extra(params)
def test_create_instance_with_networks_disabled_neutronv2(self):
self.flags(network_api_class='nova.network.neutronv2.api.API')
net_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
requested_networks = [{'uuid': net_uuid}]
params = {'networks': requested_networks}
old_create = compute_api.API.create
def create(*args, **kwargs):
result = [('76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', None,
None, None)]
self.assertEqual(result, kwargs['requested_networks'].as_tuples())
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params)
def test_create_instance_with_networks_disabled(self):
net_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
requested_networks = [{'uuid': net_uuid}]
params = {'networks': requested_networks}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertIsNone(kwargs['requested_networks'])
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params)
def test_create_instance_with_pass_disabled(self):
# test with admin passwords disabled See lp bug 921814
self.flags(enable_instance_password=False)
# proper local hrefs must start with 'http://localhost/v2/'
self.flags(enable_instance_password=False)
image_href = 'http://localhost/v2/fake/images/%s' % self.image_uuid
self.body['server']['imageRef'] = image_href
self.req.body = jsonutils.dumps(self.body)
res = self.controller.create(self.req, body=self.body).obj
server = res['server']
self._check_admin_password_missing(server)
self.assertEqual(FAKE_UUID, server['id'])
def test_create_instance_name_too_long(self):
# proper local hrefs must start with 'http://localhost/v2/'
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
self.body['server']['name'] = 'X' * 256
self.body['server']['imageRef'] = image_href
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(exception.ValidationError, self.controller.create,
self.req, body=self.body)
def test_create_instance_name_with_spaces_in_the_middle(self):
# proper local hrefs must start with 'http://localhost/v2/'
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
self.body['server']['name'] = 'abc def'
self.body['server']['imageRef'] = image_href
self.req.body = jsonutils.dumps(self.body)
self.controller.create(self.req, body=self.body)
def test_create_instance_name_with_leading_trailing_spaces(self):
# proper local hrefs must start with 'http://localhost/v2/'
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
self.body['server']['name'] = ' abc def '
self.body['server']['imageRef'] = image_href
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_instance_name_with_leading_trailing_spaces_in_compat_mode(
self):
# proper local hrefs must start with 'http://localhost/v2/'
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
self.body['server']['name'] = ' abc def '
self.body['server']['imageRef'] = image_href
self.req.body = jsonutils.dumps(self.body)
self.req.set_legacy_v2()
self.controller.create(self.req, body=self.body)
def test_create_instance_name_all_blank_spaces(self):
# proper local hrefs must start with 'http://localhost/v2/'
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
image_href = 'http://localhost/v2/images/%s' % image_uuid
flavor_ref = 'http://localhost/fake/flavors/3'
body = {
'server': {
'name': ' ' * 64,
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
},
}
req = fakes.HTTPRequest.blank('/fake/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
def test_create_az_with_leading_trailing_spaces(self):
# proper local hrefs must start with 'http://localhost/v2/'
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
self.body['server']['imageRef'] = image_href
self.body['server']['availability_zone'] = ' zone1 '
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_az_with_leading_trailing_spaces_in_compat_mode(
self):
# proper local hrefs must start with 'http://localhost/v2/'
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
self.body['server']['name'] = ' abc def '
self.body['server']['imageRef'] = image_href
self.body['server']['availability_zones'] = ' zone1 '
self.req.body = jsonutils.dumps(self.body)
self.req.set_legacy_v2()
with mock.patch.object(availability_zones, 'get_availability_zones',
return_value=[' zone1 ']):
self.controller.create(self.req, body=self.body)
def test_create_instance(self):
# proper local hrefs must start with 'http://localhost/v2/'
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
self.body['server']['imageRef'] = image_href
self.req.body = jsonutils.dumps(self.body)
res = self.controller.create(self.req, body=self.body).obj
server = res['server']
self._check_admin_password_len(server)
self.assertEqual(FAKE_UUID, server['id'])
def test_create_instance_extension_create_exception(self):
def fake_keypair_server_create(self, server_dict,
create_kwargs):
raise KeyError
self.stubs.Set(keypairs.Keypairs, 'server_create',
fake_keypair_server_create)
# proper local hrefs must start with 'http://localhost/v2/'
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
image_href = 'http://localhost/v2/images/%s' % image_uuid
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
},
}
req = fakes.HTTPRequestV21.blank('/fake/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPInternalServerError,
self.controller.create, req, body=body)
def test_create_instance_pass_disabled(self):
self.flags(enable_instance_password=False)
# proper local hrefs must start with 'http://localhost/v2/'
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
self.body['server']['imageRef'] = image_href
self.req.body = jsonutils.dumps(self.body)
res = self.controller.create(self.req, body=self.body).obj
server = res['server']
self._check_admin_password_missing(server)
self.assertEqual(FAKE_UUID, server['id'])
@mock.patch('nova.virt.hardware.numa_get_constraints')
def _test_create_instance_numa_topology_wrong(self, exc,
numa_constraints_mock):
numa_constraints_mock.side_effect = exc(**{'name': None,
'cpunum': 0,
'cpumax': 0,
'cpuset': None,
'memsize': 0,
'memtotal': 0})
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
self.body['server']['imageRef'] = image_href
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, body=self.body)
def test_create_instance_numa_topology_wrong(self):
for exc in [exception.ImageNUMATopologyIncomplete,
exception.ImageNUMATopologyForbidden,
exception.ImageNUMATopologyAsymmetric,
exception.ImageNUMATopologyCPUOutOfRange,
exception.ImageNUMATopologyCPUDuplicates,
exception.ImageNUMATopologyCPUsUnassigned,
exception.ImageNUMATopologyMemoryOutOfRange]:
self._test_create_instance_numa_topology_wrong(exc)
def test_create_instance_too_much_metadata(self):
self.flags(quota_metadata_items=1)
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
self.body['server']['imageRef'] = image_href
self.body['server']['metadata']['vote'] = 'fiddletown'
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, self.req, body=self.body)
def test_create_instance_metadata_key_too_long(self):
self.flags(quota_metadata_items=1)
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
self.body['server']['imageRef'] = image_href
self.body['server']['metadata'] = {('a' * 260): '12345'}
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_instance_metadata_value_too_long(self):
self.flags(quota_metadata_items=1)
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
self.body['server']['imageRef'] = image_href
self.body['server']['metadata'] = {'key1': ('a' * 260)}
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_instance_metadata_key_blank(self):
self.flags(quota_metadata_items=1)
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
self.body['server']['imageRef'] = image_href
self.body['server']['metadata'] = {'': 'abcd'}
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_instance_metadata_not_dict(self):
self.flags(quota_metadata_items=1)
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
self.body['server']['imageRef'] = image_href
self.body['server']['metadata'] = 'string'
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_instance_metadata_key_not_string(self):
self.flags(quota_metadata_items=1)
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
self.body['server']['imageRef'] = image_href
self.body['server']['metadata'] = {1: 'test'}
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_instance_metadata_value_not_string(self):
self.flags(quota_metadata_items=1)
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
self.body['server']['imageRef'] = image_href
self.body['server']['metadata'] = {'test': ['a', 'list']}
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_user_data_malformed_bad_request(self):
params = {'user_data': 'u1234'}
self.assertRaises(exception.ValidationError,
self._test_create_extra, params)
def test_create_instance_invalid_key_name(self):
image_href = 'http://localhost/v2/images/2'
self.body['server']['imageRef'] = image_href
self.body['server']['key_name'] = 'nonexistentkey'
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, body=self.body)
def test_create_instance_valid_key_name(self):
self.body['server']['key_name'] = 'key'
self.req.body = jsonutils.dumps(self.body)
res = self.controller.create(self.req, body=self.body).obj
self.assertEqual(FAKE_UUID, res["server"]["id"])
self._check_admin_password_len(res["server"])
def test_create_instance_invalid_flavor_href(self):
image_href = 'http://localhost/v2/images/2'
flavor_ref = 'http://localhost/v2/flavors/asdf'
self.body['server']['imageRef'] = image_href
self.body['server']['flavorRef'] = flavor_ref
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, body=self.body)
def test_create_instance_invalid_flavor_id_int(self):
image_href = 'http://localhost/v2/images/2'
flavor_ref = -1
self.body['server']['imageRef'] = image_href
self.body['server']['flavorRef'] = flavor_ref
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, body=self.body)
def test_create_instance_invalid_flavor_id_empty(self):
flavor_ref = ""
self.body['server']['flavorRef'] = flavor_ref
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_instance_bad_flavor_href(self):
image_href = 'http://localhost/v2/images/2'
flavor_ref = 'http://localhost/v2/flavors/17'
self.body['server']['imageRef'] = image_href
self.body['server']['flavorRef'] = flavor_ref
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, body=self.body)
def test_create_instance_bad_href(self):
image_href = 'asdf'
self.body['server']['imageRef'] = image_href
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, body=self.body)
def test_create_instance_local_href(self):
self.req.body = jsonutils.dumps(self.body)
res = self.controller.create(self.req, body=self.body).obj
server = res['server']
self.assertEqual(FAKE_UUID, server['id'])
def test_create_instance_admin_password(self):
self.body['server']['flavorRef'] = 3
self.body['server']['adminPass'] = 'testpass'
self.req.body = jsonutils.dumps(self.body)
res = self.controller.create(self.req, body=self.body).obj
server = res['server']
self.assertEqual(server['adminPass'],
self.body['server']['adminPass'])
def test_create_instance_admin_password_pass_disabled(self):
self.flags(enable_instance_password=False)
self.body['server']['flavorRef'] = 3
self.body['server']['adminPass'] = 'testpass'
self.req.body = jsonutils.dumps(self.body)
res = self.controller.create(self.req, body=self.body).obj
self.assertIn('server', res)
self.assertIn('adminPass', self.body['server'])
def test_create_instance_admin_password_empty(self):
self.body['server']['flavorRef'] = 3
self.body['server']['adminPass'] = ''
self.req.body = jsonutils.dumps(self.body)
# The fact that the action doesn't raise is enough validation
self.controller.create(self.req, body=self.body)
def test_create_location(self):
selfhref = 'http://localhost/v2/fake/servers/%s' % FAKE_UUID
self.req.body = jsonutils.dumps(self.body)
robj = self.controller.create(self.req, body=self.body)
self.assertEqual(robj['Location'], selfhref)
def _do_test_create_instance_above_quota(self, resource, allowed, quota,
expected_msg):
fakes.stub_out_instance_quota(self.stubs, allowed, quota, resource)
self.body['server']['flavorRef'] = 3
self.req.body = jsonutils.dumps(self.body)
try:
self.controller.create(self.req, body=self.body).obj['server']
self.fail('expected quota to be exceeded')
except webob.exc.HTTPForbidden as e:
self.assertEqual(e.explanation, expected_msg)
def test_create_instance_above_quota_instances(self):
msg = ('Quota exceeded for instances: Requested 1, but'
' already used 10 of 10 instances')
self._do_test_create_instance_above_quota('instances', 0, 10, msg)
def test_create_instance_above_quota_ram(self):
msg = ('Quota exceeded for ram: Requested 4096, but'
' already used 8192 of 10240 ram')
self._do_test_create_instance_above_quota('ram', 2048, 10 * 1024, msg)
def test_create_instance_above_quota_cores(self):
msg = ('Quota exceeded for cores: Requested 2, but'
' already used 9 of 10 cores')
self._do_test_create_instance_above_quota('cores', 1, 10, msg)
def test_create_instance_above_quota_server_group_members(self):
ctxt = self.req.environ['nova.context']
fake_group = objects.InstanceGroup(ctxt)
fake_group.project_id = ctxt.project_id
fake_group.user_id = ctxt.user_id
fake_group.create()
def fake_count(context, name, group, user_id):
self.assertEqual(name, "server_group_members")
self.assertEqual(group.uuid, fake_group.uuid)
self.assertEqual(user_id,
self.req.environ['nova.context'].user_id)
return 10
def fake_limit_check(context, **kwargs):
if 'server_group_members' in kwargs:
raise exception.OverQuota(overs={})
def fake_instance_destroy(context, uuid, constraint):
return fakes.stub_instance(1)
self.stubs.Set(fakes.QUOTAS, 'count', fake_count)
self.stubs.Set(fakes.QUOTAS, 'limit_check', fake_limit_check)
self.stubs.Set(db, 'instance_destroy', fake_instance_destroy)
self.body['os:scheduler_hints'] = {'group': fake_group.uuid}
self.req.body = jsonutils.dumps(self.body)
expected_msg = "Quota exceeded, too many servers in group"
try:
self.controller.create(self.req, body=self.body).obj
self.fail('expected quota to be exceeded')
except webob.exc.HTTPForbidden as e:
self.assertEqual(e.explanation, expected_msg)
def test_create_instance_with_group_hint(self):
ctxt = self.req.environ['nova.context']
test_group = objects.InstanceGroup(ctxt)
test_group.project_id = ctxt.project_id
test_group.user_id = ctxt.user_id
test_group.create()
def fake_instance_destroy(context, uuid, constraint):
return fakes.stub_instance(1)
self.stubs.Set(db, 'instance_destroy', fake_instance_destroy)
self.body['os:scheduler_hints'] = {'group': test_group.uuid}
self.req.body = jsonutils.dumps(self.body)
server = self.controller.create(self.req, body=self.body).obj['server']
test_group = objects.InstanceGroup.get_by_uuid(ctxt, test_group.uuid)
self.assertIn(server['id'], test_group.members)
def test_create_instance_with_neutronv2_port_in_use(self):
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'uuid': network, 'port': port}]
params = {'networks': requested_networks}
def fake_create(*args, **kwargs):
raise exception.PortInUse(port_id=port)
self.stubs.Set(compute_api.API, 'create', fake_create)
self.assertRaises(webob.exc.HTTPConflict,
self._test_create_extra, params)
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_public_network_non_admin(self, mock_create):
public_network_uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
params = {'networks': [{'uuid': public_network_uuid}]}
self.req.body = jsonutils.dumps(self.body)
mock_create.side_effect = exception.ExternalNetworkAttachForbidden(
network_uuid=public_network_uuid)
self.assertRaises(webob.exc.HTTPForbidden,
self._test_create_extra, params)
@mock.patch.object(compute_api.API, 'create')
def test_create_multiple_instance_with_specified_ip_neutronv2(self,
_api_mock):
_api_mock.side_effect = exception.InvalidFixedIpAndMaxCountRequest(
reason="")
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
address = '10.0.0.1'
requested_networks = [{'uuid': network, 'fixed_ip': address,
'port': port}]
params = {'networks': requested_networks}
self.body['server']['max_count'] = 2
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params)
def test_create_multiple_instance_with_neutronv2_port(self):
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'uuid': network, 'port': port}]
params = {'networks': requested_networks}
self.body['server']['max_count'] = 2
def fake_create(*args, **kwargs):
msg = ("Unable to launch multiple instances with"
" a single configured port ID. Please launch your"
" instance one by one with different ports.")
raise exception.MultiplePortsNotApplicable(reason=msg)
self.stubs.Set(compute_api.API, 'create', fake_create)
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params)
def test_create_instance_with_neutronv2_not_found_network(self):
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
requested_networks = [{'uuid': network}]
params = {'networks': requested_networks}
def fake_create(*args, **kwargs):
raise exception.NetworkNotFound(network_id=network)
self.stubs.Set(compute_api.API, 'create', fake_create)
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params)
def test_create_instance_with_neturonv2_network_duplicated(self):
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
requested_networks = [{'uuid': network}, {'uuid': network}]
params = {'networks': requested_networks}
def fake_create(*args, **kwargs):
raise exception.NetworkDuplicated(network_id=network)
self.stubs.Set(compute_api.API, 'create', fake_create)
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params)
def test_create_instance_with_neutronv2_port_not_found(self):
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'uuid': network, 'port': port}]
params = {'networks': requested_networks}
def fake_create(*args, **kwargs):
raise exception.PortNotFound(port_id=port)
self.stubs.Set(compute_api.API, 'create', fake_create)
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params)
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_with_network_ambiguous(self, mock_create):
mock_create.side_effect = exception.NetworkAmbiguous()
self.assertRaises(webob.exc.HTTPConflict,
self._test_create_extra, {})
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.InstanceExists(
name='instance-name'))
def test_create_instance_raise_instance_exists(self, mock_create):
self.assertRaises(webob.exc.HTTPConflict,
self.controller.create,
self.req, body=self.body)
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.InvalidBDMEphemeralSize)
def test_create_instance_raise_invalid_bdm_ephsize(self, mock_create):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.InvalidBDMFormat(details=''))
def test_create_instance_raise_invalid_bdm_format(self, mock_create):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.InvalidBDMSwapSize)
def test_create_instance_raise_invalid_bdm_swapsize(self, mock_create):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.ImageBadRequest(
image_id='dummy', response='dummy'))
def test_create_instance_raise_image_bad_request(self, mock_create):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.FixedIpNotFoundForAddress(
address='dummy'))
def test_create_instance_raise_fixed_ip_not_found_bad_request(self,
mock_create):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_invalid_personality(self, mock_create):
codec = 'utf8'
content = 'b25zLiINCg0KLVJpY2hhcmQgQ$$%QQmFjaA=='
start_position = 19
end_position = 20
msg = 'invalid start byte'
mock_create.side_effect = UnicodeDecodeError(codec, content,
start_position,
end_position, msg)
self.body['server']['personality'] = [
{
"path": "/etc/banner.txt",
"contents": "b25zLiINCg0KLVJpY2hhcmQgQ$$%QQmFjaA==",
},
]
self.req.body = jsonutils.dumps(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, body=self.body)
def test_create_instance_with_extra_personality_arg(self):
self.body['server']['personality'] = [
{
"path": "/etc/banner.txt",
"contents": "b25zLiINCg0KLVJpY2hhcmQgQ$$%QQmFjaA==",
"extra_arg": "extra value"
},
]
self.assertRaises(exception.ValidationError,
self.controller.create,
self.req, body=self.body)
class ServersControllerCreateTestWithMock(test.TestCase):
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(ServersControllerCreateTestWithMock, self).setUp()
self.flags(verbose=True,
enable_instance_password=True)
self.instance_cache_num = 0
self.instance_cache_by_id = {}
self.instance_cache_by_uuid = {}
ext_info = extension_info.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
self.body = {
'server': {
'name': 'server_test',
'imageRef': self.image_uuid,
'flavorRef': self.flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
},
}
self.req = fakes.HTTPRequest.blank('/fake/servers')
self.req.method = 'POST'
self.req.headers["content-type"] = "application/json"
def _test_create_extra(self, params, no_image=False):
self.body['server']['flavorRef'] = 2
if no_image:
self.body['server'].pop('imageRef', None)
self.body['server'].update(params)
self.req.body = jsonutils.dumps(self.body)
self.req.headers["content-type"] = "application/json"
self.controller.create(self.req, body=self.body).obj['server']
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_with_neutronv2_fixed_ip_already_in_use(self,
create_mock):
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
address = '10.0.2.3'
requested_networks = [{'uuid': network, 'fixed_ip': address}]
params = {'networks': requested_networks}
create_mock.side_effect = exception.FixedIpAlreadyInUse(
address=address,
instance_uuid=network)
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params)
self.assertEqual(1, len(create_mock.call_args_list))
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_with_neutronv2_invalid_fixed_ip(self,
create_mock):
self.flags(network_api_class='nova.network.neutronv2.api.API')
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
address = '999.0.2.3'
requested_networks = [{'uuid': network, 'fixed_ip': address}]
params = {'networks': requested_networks}
self.assertRaises(exception.ValidationError,
self._test_create_extra, params)
self.assertFalse(create_mock.called)
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.InvalidVolume(reason='error'))
def test_create_instance_with_invalid_volume_error(self, create_mock):
# Tests that InvalidVolume is translated to a 400 error.
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, {})
class ServersViewBuilderTest(test.TestCase):
def setUp(self):
super(ServersViewBuilderTest, self).setUp()
CONF.set_override('host', 'localhost', group='glance')
self.flags(use_ipv6=True)
nw_cache_info = self._generate_nw_cache_info()
db_inst = fakes.stub_instance(
id=1,
image_ref="5",
uuid="deadbeef-feed-edee-beef-d0ea7beefedd",
display_name="test_server",
include_fake_metadata=False,
nw_cache=nw_cache_info)
privates = ['172.19.0.1']
publics = ['192.168.0.3']
public6s = ['b33f::fdee:ddff:fecc:bbaa']
def nw_info(*args, **kwargs):
return [(None, {'label': 'public',
'ips': [dict(ip=ip) for ip in publics],
'ip6s': [dict(ip=ip) for ip in public6s]}),
(None, {'label': 'private',
'ips': [dict(ip=ip) for ip in privates]})]
fakes.stub_out_nw_api_get_instance_nw_info(self.stubs, nw_info)
self.uuid = db_inst['uuid']
self.view_builder = views.servers.ViewBuilderV21()
self.request = fakes.HTTPRequestV21.blank("/fake")
self.request.context = context.RequestContext('fake', 'fake')
self.instance = fake_instance.fake_instance_obj(
self.request.context,
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS,
**db_inst)
self.self_link = "http://localhost/v2/fake/servers/%s" % self.uuid
self.bookmark_link = "http://localhost/fake/servers/%s" % self.uuid
def _generate_nw_cache_info(self):
fixed_ipv4 = ('192.168.1.100', '192.168.2.100', '192.168.3.100')
fixed_ipv6 = ('2001:db8:0:1::1',)
def _ip(ip):
return {'address': ip, 'type': 'fixed'}
nw_cache = [
{'address': 'aa:aa:aa:aa:aa:aa',
'id': 1,
'network': {'bridge': 'br0',
'id': 1,
'label': 'test1',
'subnets': [{'cidr': '192.168.1.0/24',
'ips': [_ip(fixed_ipv4[0])]},
{'cidr': 'b33f::/64',
'ips': [_ip(fixed_ipv6[0])]}]}},
{'address': 'bb:bb:bb:bb:bb:bb',
'id': 2,
'network': {'bridge': 'br0',
'id': 1,
'label': 'test1',
'subnets': [{'cidr': '192.168.2.0/24',
'ips': [_ip(fixed_ipv4[1])]}]}},
{'address': 'cc:cc:cc:cc:cc:cc',
'id': 3,
'network': {'bridge': 'br0',
'id': 2,
'label': 'test2',
'subnets': [{'cidr': '192.168.3.0/24',
'ips': [_ip(fixed_ipv4[2])]}]}}]
return nw_cache
def test_get_flavor_valid_instance_type(self):
flavor_bookmark = "http://localhost/fake/flavors/1"
expected = {"id": "1",
"links": [{"rel": "bookmark",
"href": flavor_bookmark}]}
result = self.view_builder._get_flavor(self.request, self.instance)
self.assertEqual(result, expected)
def test_build_server(self):
expected_server = {
"server": {
"id": self.uuid,
"name": "test_server",
"links": [
{
"rel": "self",
"href": self.self_link,
},
{
"rel": "bookmark",
"href": self.bookmark_link,
},
],
}
}
output = self.view_builder.basic(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_with_project_id(self):
expected_server = {
"server": {
"id": self.uuid,
"name": "test_server",
"links": [
{
"rel": "self",
"href": self.self_link,
},
{
"rel": "bookmark",
"href": self.bookmark_link,
},
],
}
}
output = self.view_builder.basic(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail(self):
image_bookmark = "http://localhost/fake/images/5"
flavor_bookmark = "http://localhost/fake/flavors/1"
expected_server = {
"server": {
"id": self.uuid,
"user_id": "fake_user",
"tenant_id": "fake_project",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 0,
"name": "test_server",
"status": "BUILD",
"hostId": '',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {
'test1': [
{'version': 4, 'addr': '192.168.1.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': '2001:db8:0:1::1',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 4, 'addr': '192.168.2.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'bb:bb:bb:bb:bb:bb'}
],
'test2': [
{'version': 4, 'addr': '192.168.3.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'cc:cc:cc:cc:cc:cc'},
]
},
"metadata": {},
"links": [
{
"rel": "self",
"href": self.self_link,
},
{
"rel": "bookmark",
"href": self.bookmark_link,
},
],
}
}
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail_with_fault(self):
self.instance['vm_state'] = vm_states.ERROR
self.instance['fault'] = fake_instance.fake_fault_obj(
self.request.context, self.uuid)
image_bookmark = "http://localhost/fake/images/5"
flavor_bookmark = "http://localhost/fake/flavors/1"
expected_server = {
"server": {
"id": self.uuid,
"user_id": "fake_user",
"tenant_id": "fake_project",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"name": "test_server",
"status": "ERROR",
"hostId": '',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {
'test1': [
{'version': 4, 'addr': '192.168.1.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': '2001:db8:0:1::1',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 4, 'addr': '192.168.2.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'bb:bb:bb:bb:bb:bb'}
],
'test2': [
{'version': 4, 'addr': '192.168.3.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'cc:cc:cc:cc:cc:cc'},
]
},
"metadata": {},
"links": [
{
"rel": "self",
"href": self.self_link,
},
{
"rel": "bookmark",
"href": self.bookmark_link,
},
],
"fault": {
"code": 404,
"created": "2010-10-10T12:00:00Z",
"message": "HTTPNotFound",
"details": "Stock details for test",
},
}
}
self.request.context = context.RequestContext('fake', 'fake')
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail_with_fault_that_has_been_deleted(self):
self.instance['deleted'] = 1
self.instance['vm_state'] = vm_states.ERROR
fault = fake_instance.fake_fault_obj(self.request.context,
self.uuid, code=500,
message="No valid host was found")
self.instance['fault'] = fault
expected_fault = {"code": 500,
"created": "2010-10-10T12:00:00Z",
"message": "No valid host was found"}
self.request.context = context.RequestContext('fake', 'fake')
output = self.view_builder.show(self.request, self.instance)
# Regardless of vm_state deleted servers sholud be DELETED
self.assertEqual("DELETED", output['server']['status'])
self.assertThat(output['server']['fault'],
matchers.DictMatches(expected_fault))
def test_build_server_detail_with_fault_no_details_not_admin(self):
self.instance['vm_state'] = vm_states.ERROR
self.instance['fault'] = fake_instance.fake_fault_obj(
self.request.context,
self.uuid,
code=500,
message='Error')
expected_fault = {"code": 500,
"created": "2010-10-10T12:00:00Z",
"message": "Error"}
self.request.context = context.RequestContext('fake', 'fake')
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output['server']['fault'],
matchers.DictMatches(expected_fault))
def test_build_server_detail_with_fault_admin(self):
self.instance['vm_state'] = vm_states.ERROR
self.instance['fault'] = fake_instance.fake_fault_obj(
self.request.context,
self.uuid,
code=500,
message='Error')
expected_fault = {"code": 500,
"created": "2010-10-10T12:00:00Z",
"message": "Error",
'details': 'Stock details for test'}
self.request.environ['nova.context'].is_admin = True
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output['server']['fault'],
matchers.DictMatches(expected_fault))
def test_build_server_detail_with_fault_no_details_admin(self):
self.instance['vm_state'] = vm_states.ERROR
self.instance['fault'] = fake_instance.fake_fault_obj(
self.request.context,
self.uuid,
code=500,
message='Error',
details='')
expected_fault = {"code": 500,
"created": "2010-10-10T12:00:00Z",
"message": "Error"}
self.request.environ['nova.context'].is_admin = True
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output['server']['fault'],
matchers.DictMatches(expected_fault))
def test_build_server_detail_with_fault_but_active(self):
self.instance['vm_state'] = vm_states.ACTIVE
self.instance['progress'] = 100
self.instance['fault'] = fake_instance.fake_fault_obj(
self.request.context, self.uuid)
output = self.view_builder.show(self.request, self.instance)
self.assertNotIn('fault', output['server'])
def test_build_server_detail_active_status(self):
# set the power state of the instance to running
self.instance['vm_state'] = vm_states.ACTIVE
self.instance['progress'] = 100
image_bookmark = "http://localhost/fake/images/5"
flavor_bookmark = "http://localhost/fake/flavors/1"
expected_server = {
"server": {
"id": self.uuid,
"user_id": "fake_user",
"tenant_id": "fake_project",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 100,
"name": "test_server",
"status": "ACTIVE",
"hostId": '',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {
'test1': [
{'version': 4, 'addr': '192.168.1.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': '2001:db8:0:1::1',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 4, 'addr': '192.168.2.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'bb:bb:bb:bb:bb:bb'}
],
'test2': [
{'version': 4, 'addr': '192.168.3.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'cc:cc:cc:cc:cc:cc'},
]
},
"metadata": {},
"links": [
{
"rel": "self",
"href": self.self_link,
},
{
"rel": "bookmark",
"href": self.bookmark_link,
},
],
}
}
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail_with_metadata(self):
metadata = []
metadata.append(models.InstanceMetadata(key="Open", value="Stack"))
metadata = nova_utils.metadata_to_dict(metadata)
self.instance['metadata'] = metadata
image_bookmark = "http://localhost/fake/images/5"
flavor_bookmark = "http://localhost/fake/flavors/1"
expected_server = {
"server": {
"id": self.uuid,
"user_id": "fake_user",
"tenant_id": "fake_project",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 0,
"name": "test_server",
"status": "BUILD",
"hostId": '',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {
'test1': [
{'version': 4, 'addr': '192.168.1.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': '2001:db8:0:1::1',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 4, 'addr': '192.168.2.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'bb:bb:bb:bb:bb:bb'}
],
'test2': [
{'version': 4, 'addr': '192.168.3.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'cc:cc:cc:cc:cc:cc'},
]
},
"metadata": {"Open": "Stack"},
"links": [
{
"rel": "self",
"href": self.self_link,
},
{
"rel": "bookmark",
"href": self.bookmark_link,
},
],
}
}
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
class ServersAllExtensionsTestCase(test.TestCase):
"""Servers tests using default API router with all extensions enabled.
The intent here is to catch cases where extensions end up throwing
an exception because of a malformed request before the core API
gets a chance to validate the request and return a 422 response.
For example, AccessIPsController extends servers.Controller::
| @wsgi.extends
| def create(self, req, resp_obj, body):
| context = req.environ['nova.context']
| if authorize(context) and 'server' in resp_obj.obj:
| resp_obj.attach(xml=AccessIPTemplate())
| server = resp_obj.obj['server']
| self._extend_server(req, server)
we want to ensure that the extension isn't barfing on an invalid
body.
"""
def setUp(self):
super(ServersAllExtensionsTestCase, self).setUp()
self.app = compute.APIRouterV21()
def test_create_missing_server(self):
# Test create with malformed body.
def fake_create(*args, **kwargs):
raise test.TestingException("Should not reach the compute API.")
self.stubs.Set(compute_api.API, 'create', fake_create)
req = fakes.HTTPRequestV21.blank('/fake/servers')
req.method = 'POST'
req.content_type = 'application/json'
body = {'foo': {'a': 'b'}}
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(400, res.status_int)
def test_update_missing_server(self):
# Test update with malformed body.
req = fakes.HTTPRequestV21.blank('/fake/servers/1')
req.method = 'PUT'
req.content_type = 'application/json'
body = {'foo': {'a': 'b'}}
req.body = jsonutils.dumps(body)
with mock.patch('nova.objects.Instance.save') as mock_save:
res = req.get_response(self.app)
self.assertFalse(mock_save.called)
self.assertEqual(400, res.status_int)
class ServersInvalidRequestTestCase(test.TestCase):
"""Tests of places we throw 400 Bad Request from."""
def setUp(self):
super(ServersInvalidRequestTestCase, self).setUp()
ext_info = extension_info.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
def _invalid_server_create(self, body):
req = fakes.HTTPRequestV21.blank('/fake/servers')
req.method = 'POST'
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
def test_create_server_no_body(self):
self._invalid_server_create(body=None)
def test_create_server_missing_server(self):
body = {'foo': {'a': 'b'}}
self._invalid_server_create(body=body)
def test_create_server_malformed_entity(self):
body = {'server': 'string'}
self._invalid_server_create(body=body)
def _unprocessable_server_update(self, body):
req = fakes.HTTPRequestV21.blank('/fake/servers/%s' % FAKE_UUID)
req.method = 'PUT'
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, FAKE_UUID, body=body)
def test_update_server_no_body(self):
self._invalid_server_create(body=None)
def test_update_server_missing_server(self):
body = {'foo': {'a': 'b'}}
self._invalid_server_create(body=body)
def test_create_update_malformed_entity(self):
body = {'server': 'string'}
self._invalid_server_create(body=body)
class FakeExt(extensions.V21APIExtensionBase):
name = "DiskConfig"
alias = 'os-disk-config'
version = 1
fake_schema = {'fake_ext_attr': {'type': 'string'}}
def fake_extension_point(self, *args, **kwargs):
pass
def fake_schema_extension_point(self, version):
if version == '2.1':
return self.fake_schema
elif version == '2.0':
return {}
# This fake method should reuturn the schema for expected version
# Return None will make the tests failed, that means there is something
# in the code.
return None
def get_controller_extensions(self):
return []
def get_resources(self):
return []
class TestServersExtensionPoint(test.NoDBTestCase):
def setUp(self):
super(TestServersExtensionPoint, self).setUp()
CONF.set_override('extensions_whitelist', ['os-disk-config'],
'osapi_v21')
self.stubs.Set(disk_config, 'DiskConfig', FakeExt)
def _test_load_extension_point(self, name):
setattr(FakeExt, 'server_%s' % name,
FakeExt.fake_extension_point)
ext_info = extension_info.LoadedExtensionInfo()
controller = servers.ServersController(extension_info=ext_info)
self.assertEqual(
'os-disk-config',
list(getattr(controller,
'%s_extension_manager' % name))[0].obj.alias)
delattr(FakeExt, 'server_%s' % name)
def test_load_update_extension_point(self):
self._test_load_extension_point('update')
def test_load_rebuild_extension_point(self):
self._test_load_extension_point('rebuild')
def test_load_create_extension_point(self):
self._test_load_extension_point('create')
def test_load_resize_extension_point(self):
self._test_load_extension_point('resize')
class TestServersExtensionSchema(test.NoDBTestCase):
def setUp(self):
super(TestServersExtensionSchema, self).setUp()
CONF.set_override('extensions_whitelist', ['os-disk-config'],
'osapi_v21')
self.stubs.Set(disk_config, 'DiskConfig', FakeExt)
def _test_load_extension_schema(self, name):
setattr(FakeExt, 'get_server_%s_schema' % name,
FakeExt.fake_schema_extension_point)
ext_info = extension_info.LoadedExtensionInfo()
controller = servers.ServersController(extension_info=ext_info)
self.assertTrue(hasattr(controller, '%s_schema_manager' % name))
delattr(FakeExt, 'get_server_%s_schema' % name)
return getattr(controller, 'schema_server_%s' % name)
def test_load_create_extension_point(self):
# The expected is the schema combination of base and keypairs
# because of the above extensions_whitelist.
expected_schema = copy.deepcopy(servers_schema.base_create)
expected_schema['properties']['server']['properties'].update(
FakeExt.fake_schema)
actual_schema = self._test_load_extension_schema('create')
self.assertEqual(expected_schema, actual_schema)
def test_load_update_extension_point(self):
# keypair extension does not contain update_server() and
# here checks that any extension is not added to the schema.
expected_schema = copy.deepcopy(servers_schema.base_update)
expected_schema['properties']['server']['properties'].update(
FakeExt.fake_schema)
actual_schema = self._test_load_extension_schema('update')
self.assertEqual(expected_schema, actual_schema)
def test_load_rebuild_extension_point(self):
# keypair extension does not contain rebuild_server() and
# here checks that any extension is not added to the schema.
expected_schema = copy.deepcopy(servers_schema.base_rebuild)
expected_schema['properties']['rebuild']['properties'].update(
FakeExt.fake_schema)
actual_schema = self._test_load_extension_schema('rebuild')
self.assertEqual(expected_schema, actual_schema)
def test_load_resize_extension_point(self):
# keypair extension does not contain resize_server() and
# here checks that any extension is not added to the schema.
expected_schema = copy.deepcopy(servers_schema.base_resize)
expected_schema['properties']['resize']['properties'].update(
FakeExt.fake_schema)
actual_schema = self._test_load_extension_schema('resize')
self.assertEqual(expected_schema, actual_schema)
# TODO(alex_xu): There isn't specified file for ips extension. Most of
# unittest related to ips extension is in this file. So put the ips policy
# enforcement tests at here until there is specified file for ips extension.
class IPsPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(IPsPolicyEnforcementV21, self).setUp()
self.controller = ips.IPsController()
self.req = fakes.HTTPRequest.blank("/v2/fake")
def test_index_policy_failed(self):
rule_name = "os_compute_api:ips:index"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.index, self.req, fakes.FAKE_UUID)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_show_policy_failed(self):
rule_name = "os_compute_api:ips:show"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.show, self.req, fakes.FAKE_UUID, fakes.FAKE_UUID)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
class ServersPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(ServersPolicyEnforcementV21, self).setUp()
ext_info = extension_info.LoadedExtensionInfo()
ext_info.extensions.update({'os-networks': 'fake'})
self.controller = servers.ServersController(extension_info=ext_info)
self.req = fakes.HTTPRequest.blank('')
self.image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def _common_policy_check(self, rules, rule_name, func, *arg, **kwarg):
self.policy.set_rules(rules)
exc = self.assertRaises(
exception.PolicyNotAuthorized, func, *arg, **kwarg)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
@mock.patch.object(servers.ServersController, '_get_instance')
def test_start_policy_failed(self, _get_instance_mock):
_get_instance_mock.return_value = None
rule_name = "os_compute_api:servers:start"
rule = {rule_name: "project:non_fake"}
self._common_policy_check(
rule, rule_name, self.controller._start_server,
self.req, FAKE_UUID, body={})
@mock.patch.object(servers.ServersController, '_get_instance')
def test_stop_policy_failed(self, _get_instance_mock):
_get_instance_mock.return_value = None
rule_name = "os_compute_api:servers:stop"
rule = {rule_name: "project:non_fake"}
self._common_policy_check(
rule, rule_name, self.controller._stop_server,
self.req, FAKE_UUID, body={})
def test_index_policy_failed(self):
rule_name = "os_compute_api:servers:index"
rule = {rule_name: "project:non_fake"}
self._common_policy_check(
rule, rule_name, self.controller.index, self.req)
def test_detail_policy_failed(self):
rule_name = "os_compute_api:servers:detail"
rule = {rule_name: "project:non_fake"}
self._common_policy_check(
rule, rule_name, self.controller.detail, self.req)
def test_detail_get_tenants_policy_failed(self):
req = fakes.HTTPRequest.blank('')
req.GET["all_tenants"] = "True"
rule_name = "os_compute_api:servers:detail:get_all_tenants"
rule = {rule_name: "project:non_fake"}
self._common_policy_check(
rule, rule_name, self.controller._get_servers, req, True)
def test_index_get_tenants_policy_failed(self):
req = fakes.HTTPRequest.blank('')
req.GET["all_tenants"] = "True"
rule_name = "os_compute_api:servers:index:get_all_tenants"
rule = {rule_name: "project:non_fake"}
self._common_policy_check(
rule, rule_name, self.controller._get_servers, req, False)
@mock.patch.object(common, 'get_instance')
def test_show_policy_failed(self, get_instance_mock):
get_instance_mock.return_value = None
rule_name = "os_compute_api:servers:show"
rule = {rule_name: "project:non_fake"}
self._common_policy_check(
rule, rule_name, self.controller.show, self.req, FAKE_UUID)
def test_delete_policy_failed(self):
rule_name = "os_compute_api:servers:delete"
rule = {rule_name: "project:non_fake"}
self._common_policy_check(
rule, rule_name, self.controller.delete, self.req, FAKE_UUID)
def test_update_policy_failed(self):
rule_name = "os_compute_api:servers:update"
rule = {rule_name: "project:non_fake"}
body = {'server': {'name': 'server_test'}}
self._common_policy_check(
rule, rule_name, self.controller.update, self.req,
FAKE_UUID, body=body)
def test_confirm_resize_policy_failed(self):
rule_name = "os_compute_api:servers:confirm_resize"
rule = {rule_name: "project:non_fake"}
body = {'server': {'name': 'server_test'}}
self._common_policy_check(
rule, rule_name, self.controller._action_confirm_resize,
self.req, FAKE_UUID, body=body)
def test_revert_resize_policy_failed(self):
rule_name = "os_compute_api:servers:revert_resize"
rule = {rule_name: "project:non_fake"}
body = {'server': {'name': 'server_test'}}
self._common_policy_check(
rule, rule_name, self.controller._action_revert_resize,
self.req, FAKE_UUID, body=body)
def test_reboot_policy_failed(self):
rule_name = "os_compute_api:servers:reboot"
rule = {rule_name: "project:non_fake"}
body = {'reboot': {'type': 'HARD'}}
self._common_policy_check(
rule, rule_name, self.controller._action_reboot,
self.req, FAKE_UUID, body=body)
def test_resize_policy_failed(self):
rule_name = "os_compute_api:servers:resize"
rule = {rule_name: "project:non_fake"}
flavor_id = 1
self._common_policy_check(
rule, rule_name, self.controller._resize, self.req,
FAKE_UUID, flavor_id)
def test_create_image_policy_failed(self):
rule_name = "os_compute_api:servers:create_image"
rule = {rule_name: "project:non_fake"}
body = {
'createImage': {
'name': 'Snapshot 1',
},
}
self._common_policy_check(
rule, rule_name, self.controller._action_create_image,
self.req, FAKE_UUID, body=body)
@mock.patch.object(compute_api.API, 'is_volume_backed_instance',
return_value=True)
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(servers.ServersController, '_get_server')
def test_create_vol_backed_img_snapshotting_policy_blocks_project(self,
mock_is_vol_back,
mock_get_uuidi,
mock_get_server):
"""Don't permit a snapshot of a volume backed instance if configured
not to based on project
"""
rule_name = "os_compute_api:servers:create_image:allow_volume_backed"
rules = {
rule_name: "project:non_fake",
"os_compute_api:servers:create_image": "",
}
body = {
'createImage': {
'name': 'Snapshot 1',
},
}
self._common_policy_check(
rules, rule_name, self.controller._action_create_image,
self.req, FAKE_UUID, body=body)
@mock.patch.object(compute_api.API, 'is_volume_backed_instance',
return_value=True)
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(servers.ServersController, '_get_server')
def test_create_vol_backed_img_snapshotting_policy_blocks_role(self,
mock_is_vol_back,
mock_get_uuidi,
mock_get_server):
"""Don't permit a snapshot of a volume backed instance if configured
not to based on role
"""
rule_name = "os_compute_api:servers:create_image:allow_volume_backed"
rules = {
rule_name: "role:non_fake",
"os_compute_api:servers:create_image": "",
}
body = {
'createImage': {
'name': 'Snapshot 1',
},
}
self._common_policy_check(
rules, rule_name, self.controller._action_create_image,
self.req, FAKE_UUID, body=body)
def _create_policy_check(self, rules, rule_name):
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
'name': 'server_test',
'imageRef': self.image_uuid,
'flavorRef': flavor_ref,
'availability_zone': "zone1:host1:node1",
'block_device_mapping': [{'device_name': "/dev/sda1"}],
'networks': [{'uuid': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'}],
'metadata': {
'hello': 'world',
'open': 'stack',
},
},
}
self._common_policy_check(
rules, rule_name, self.controller.create, self.req, body=body)
def test_create_policy_failed(self):
rule_name = "os_compute_api:servers:create"
rules = {rule_name: "project:non_fake"}
self._create_policy_check(rules, rule_name)
def test_create_forced_host_policy_failed(self):
rule_name = "os_compute_api:servers:create:forced_host"
rule = {"os_compute_api:servers:create": "@",
rule_name: "project:non_fake"}
self._create_policy_check(rule, rule_name)
def test_create_attach_volume_policy_failed(self):
rule_name = "os_compute_api:servers:create:attach_volume"
rules = {"os_compute_api:servers:create": "@",
"os_compute_api:servers:create:forced_host": "@",
rule_name: "project:non_fake"}
self._create_policy_check(rules, rule_name)
def test_create_attach_attach_network_policy_failed(self):
rule_name = "os_compute_api:servers:create:attach_network"
rules = {"os_compute_api:servers:create": "@",
"os_compute_api:servers:create:forced_host": "@",
"os_compute_api:servers:create:attach_volume": "@",
rule_name: "project:non_fake"}
self._create_policy_check(rules, rule_name)
|
apporc/nova
|
nova/tests/unit/api/openstack/compute/test_serversV21.py
|
Python
|
apache-2.0
| 173,256
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils import JYTHON
from .argumentmapper import DefaultValue
from .argumentparser import (PythonArgumentParser, UserKeywordArgumentParser,
DynamicArgumentParser, JavaArgumentParser)
from .argumentspec import ArgumentSpec
from .embedded import EmbeddedArguments
if JYTHON:
from .javaargumentcoercer import JavaArgumentCoercer
else:
JavaArgumentCoercer = None
|
snyderr/robotframework
|
src/robot/running/arguments/__init__.py
|
Python
|
apache-2.0
| 1,015
|
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import os
import json
import time
import networkx as nx
from cloudify.workflows import api
from cloudify.workflows import tasks
class TaskDependencyGraph(object):
"""
A task graph builder
:param workflow_context: A WorkflowContext instance (used for logging)
"""
def __init__(self, workflow_context,
default_subgraph_task_config=None):
self.ctx = workflow_context
self.graph = nx.DiGraph()
default_subgraph_task_config = default_subgraph_task_config or {}
self._default_subgraph_task_config = default_subgraph_task_config
def add_task(self, task):
"""Add a WorkflowTask to this graph
:param task: The task
"""
self.ctx.logger.debug('adding task: {0}'.format(task))
self.graph.add_node(task.id, task=task)
def get_task(self, task_id):
"""Get a task instance that was inserted to this graph by its id
:param task_id: the task id
:return: a WorkflowTask instance for the requested task if found.
None, otherwise.
"""
data = self.graph.node.get(task_id)
return data['task'] if data is not None else None
def remove_task(self, task):
"""Remove the provided task from the graph
:param task: The task
"""
self.graph.remove_node(task.id)
# src depends on dst
def add_dependency(self, src_task, dst_task):
"""
Add a dependency between tasks.
The source task will only be executed after the target task terminates.
A task may depend on several tasks, in which case it will only be
executed after all its 'destination' tasks terminate
:param src_task: The source task
:param dst_task: The target task
"""
self.ctx.logger.debug('adding dependency: {0} -> {1}'.format(src_task,
dst_task))
if not self.graph.has_node(src_task.id):
raise RuntimeError('source task {0} is not in graph (task id: '
'{1})'.format(src_task, src_task.id))
if not self.graph.has_node(dst_task.id):
raise RuntimeError('destination task {0} is not in graph (task '
'id: {1})'.format(dst_task, dst_task.id))
self.graph.add_edge(src_task.id, dst_task.id)
def sequence(self):
"""
:return: a new TaskSequence for this graph
"""
return TaskSequence(self)
def subgraph(self, name):
task = SubgraphTask(name, self, **self._default_subgraph_task_config)
self.add_task(task)
return task
def execute(self):
"""
Start executing the graph based on tasks and dependencies between
them.
Calling this method will block until one of the following occurs:
1. all tasks terminated
2. a task failed
3. an unhandled exception is raised
4. the execution is cancelled
Note: This method will raise an api.ExecutionCancelled error if the
execution has been cancelled. When catching errors raised from this
method, make sure to re-raise the error if it's
api.ExecutionsCancelled in order to allow the execution to be set in
cancelled mode properly.
Also note that for the time being, if such a cancelling event
occurs, the method might return even while there's some operations
still being executed.
"""
while True:
if self._is_execution_cancelled():
raise api.ExecutionCancelled()
self._check_dump_request()
# handle all terminated tasks
# it is important this happens before handling
# executable tasks so we get to make tasks executable
# and then execute them in this iteration (otherwise, it would
# be the next one)
for task in self._terminated_tasks():
self._handle_terminated_task(task)
# handle all executable tasks
for task in self._executable_tasks():
self._handle_executable_task(task)
# no more tasks to process, time to move on
if len(self.graph.node) == 0:
return
# sleep some and do it all over again
else:
time.sleep(0.1)
@staticmethod
def _is_execution_cancelled():
return api.has_cancel_request()
def _executable_tasks(self):
"""
A task is executable if it is in pending state
, it has no dependencies at the moment (i.e. all of its dependencies
already terminated) and its execution timestamp is smaller then the
current timestamp
:return: An iterator for executable tasks
"""
now = time.time()
return (task for task in self.tasks_iter()
if task.get_state() == tasks.TASK_PENDING and
task.execute_after <= now and
not (task.containing_subgraph and
task.containing_subgraph.get_state() ==
tasks.TASK_FAILED) and
not self._task_has_dependencies(task))
def _terminated_tasks(self):
"""
A task is terminated if it is in 'succeeded' or 'failed' state
:return: An iterator for terminated tasks
"""
return (task for task in self.tasks_iter()
if task.get_state() in tasks.TERMINATED_STATES)
def _task_has_dependencies(self, task):
"""
:param task: The task
:return: Does this task have any dependencies
"""
return (len(self.graph.succ.get(task.id, {})) > 0 or
(task.containing_subgraph and self._task_has_dependencies(
task.containing_subgraph)))
def tasks_iter(self):
"""
An iterator on tasks added to the graph
"""
return (data['task'] for _, data in self.graph.nodes_iter(data=True))
def _handle_executable_task(self, task):
"""Handle executable task"""
task.set_state(tasks.TASK_SENDING)
task.apply_async()
def _handle_terminated_task(self, task):
"""Handle terminated task"""
handler_result = task.handle_task_terminated()
if handler_result.action == tasks.HandlerResult.HANDLER_FAIL:
if isinstance(task, SubgraphTask) and task.failed_task:
task = task.failed_task
raise RuntimeError(
"Workflow failed: Task failed '{0}' -> {1}".format(task.name,
task.error))
dependents = self.graph.predecessors(task.id)
removed_edges = [(dependent, task.id)
for dependent in dependents]
self.graph.remove_edges_from(removed_edges)
self.graph.remove_node(task.id)
if handler_result.action == tasks.HandlerResult.HANDLER_RETRY:
new_task = handler_result.retried_task
self.add_task(new_task)
added_edges = [(dependent, new_task.id)
for dependent in dependents]
self.graph.add_edges_from(added_edges)
def _check_dump_request(self):
task_dump = os.environ.get('WORKFLOW_TASK_DUMP')
if not (task_dump and os.path.exists(task_dump)):
return
os.remove(task_dump)
task_dump_path = '{0}.{1}'.format(task_dump, time.time())
with open(task_dump_path, 'w') as f:
f.write(json.dumps({
'tasks': [task.dump() for task in self.tasks_iter()],
'edges': [[s, t] for s, t in self.graph.edges_iter()]}))
class forkjoin(object):
"""
A simple wrapper for tasks. Used in conjunction with TaskSequence.
Defined to make the code easier to read (instead of passing a list)
see ``TaskSequence.add`` for more details
"""
def __init__(self, *tasks):
self.tasks = tasks
class TaskSequence(object):
"""
Helper class to add tasks in a sequential manner to a task dependency
graph
:param graph: The TaskDependencyGraph instance
"""
def __init__(self, graph):
self.graph = graph
self.last_fork_join_tasks = None
def add(self, *tasks):
"""
Add tasks to the sequence.
:param tasks: Each task might be:
* A WorkflowTask instance, in which case, it will be
added to the graph with a dependency between it and
the task previously inserted into the sequence
* A forkjoin of tasks, in which case it will be treated
as a "fork-join" task in the sequence, i.e. all the
fork-join tasks will depend on the last task in the
sequence (could be fork join) and the next added task
will depend on all tasks in this fork-join task
"""
for fork_join_tasks in tasks:
if isinstance(fork_join_tasks, forkjoin):
fork_join_tasks = fork_join_tasks.tasks
else:
fork_join_tasks = [fork_join_tasks]
for task in fork_join_tasks:
self.graph.add_task(task)
if self.last_fork_join_tasks is not None:
for last_fork_join_task in self.last_fork_join_tasks:
self.graph.add_dependency(task, last_fork_join_task)
if fork_join_tasks:
self.last_fork_join_tasks = fork_join_tasks
class SubgraphTask(tasks.WorkflowTask):
def __init__(self,
name,
graph,
task_id=None,
info=None,
on_success=None,
on_failure=None,
total_retries=tasks.DEFAULT_SUBGRAPH_TOTAL_RETRIES,
retry_interval=tasks.DEFAULT_RETRY_INTERVAL,
send_task_events=tasks.DEFAULT_SEND_TASK_EVENTS):
super(SubgraphTask, self).__init__(
graph.ctx,
task_id,
info=info,
on_success=on_success,
on_failure=on_failure,
total_retries=total_retries,
retry_interval=retry_interval,
send_task_events=send_task_events)
self.graph = graph
self._name = name
self.tasks = {}
self.failed_task = None
if not self.on_failure:
self.on_failure = lambda tsk: tasks.HandlerResult.fail()
self.async_result = tasks.StubAsyncResult()
def _duplicate(self):
raise NotImplementedError('self.retried_task should be set explicitly'
' in self.on_failure handler')
@property
def cloudify_context(self):
return {}
def is_local(self):
return True
@property
def name(self):
return self._name
def sequence(self):
return TaskSequence(self)
def subgraph(self, name):
task = SubgraphTask(name, self.graph,
**self.graph._default_subgraph_task_config)
self.add_task(task)
return task
def add_task(self, task):
self.graph.add_task(task)
self.tasks[task.id] = task
if task.containing_subgraph and task.containing_subgraph is not self:
raise RuntimeError('task {0}[{1}] cannot be contained in more '
'than one subgraph. It is currently contained '
'in {2} and it is now being added to {3}'
.format(task,
task.id,
task.containing_subgraph.name,
self.name))
task.containing_subgraph = self
def add_dependency(self, src_task, dst_task):
self.graph.add_dependency(src_task, dst_task)
def apply_async(self):
if not self.tasks:
self.set_state(tasks.TASK_SUCCEEDED)
else:
self.set_state(tasks.TASK_STARTED)
def task_terminated(self, task, new_task=None):
del self.tasks[task.id]
if new_task:
self.tasks[new_task.id] = new_task
new_task.containing_subgraph = self
if not self.tasks and self.get_state() not in tasks.TERMINATED_STATES:
self.set_state(tasks.TASK_SUCCEEDED)
|
isaac-s/cloudify-plugins-common
|
cloudify/workflows/tasks_graph.py
|
Python
|
apache-2.0
| 13,226
|
# Copyright 2007 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for naming.py module."""
__author__ = 'watson@google.com (Tony Watson)'
import io
import unittest
from lib import nacaddr
from lib import naming
class NamingUnitTest(unittest.TestCase):
"""Unit Test for naming.py.
The Naming class allows us to specify if we want to use arrays of text
instead of files. Most of the tests below create an empty Naming class.
To populate the class with data, we simply pass our test data in arrays
to the ParseList method, or in some cases, pass an io.BytesIO stream.
"""
def setUp(self):
self.defs = naming.Naming(None)
servicedata = []
servicedata.append('SVC1 = 80/tcp 81/udp 82/tcp')
servicedata.append('SVC2 = 80/tcp 81/udp 82/tcp SVC2')
servicedata.append('SVC3 = 80/tcp 81/udp')
servicedata.append('SVC4 = 80/tcp # some service')
servicedata.append('TCP_90 = 90/tcp')
servicedata.append('SVC5 = TCP_90')
servicedata.append('SVC6 = SVC1 SVC5')
networkdata = []
networkdata.append('NET1 = 10.1.0.0/8 # network1')
networkdata.append('NET2 = 10.2.0.0/16 # network2.0')
networkdata.append(' NET1')
networkdata.append('9OCLOCK = 1.2.3.4/32 # 9 is the time')
networkdata.append('FOOBAR = 9OCLOCK')
networkdata.append('FOO_V6 = ::FFFF:FFFF:FFFF:FFFF')
networkdata.append('BAR_V6 = ::1/128')
networkdata.append('BAZ = FOO_V6')
networkdata.append(' BAR_V6')
networkdata.append('BING = NET1')
networkdata.append(' FOO_V6')
self.defs.ParseServiceList(servicedata)
self.defs.ParseNetworkList(networkdata)
def testCommentedServices(self):
self.assertEqual(self.defs.GetService('SVC4'), ['80/tcp'])
self.assertListEqual(self.defs.GetServiceByProto('SVC4', 'tcp'),
['80'])
def testBadGetRequest(self):
"""Test proper handling of a non-existant service request."""
self.assertRaises(naming.UndefinedServiceError, self.defs.GetService, 'FOO')
self.assertRaises(naming.UndefinedServiceError, self.defs.GetServiceByProto,
'FOO', 'tcp')
def testGetServiceRecursion(self):
"""Ensure we don't slip into recursion hell when object contains itself."""
self.assertListEqual(self.defs.GetService('SVC2'),
['80/tcp', '81/udp', '82/tcp'])
def testGetService(self):
"""Verify proper results from a service lookup request."""
self.assertListEqual(self.defs.GetService('SVC1'),
['80/tcp', '81/udp', '82/tcp'])
def testBadProtocol(self):
"""Test proper handling of a non-existant service request."""
self.assertListEqual(self.defs.GetServiceByProto('SVC1', 'fud'), [])
def testGetServiceByProto(self):
self.assertListEqual(self.defs.GetServiceByProto('SVC1', 'tcp'),
['80', '82'])
def testGetServiceByProtoWithoutProtocols(self):
"""Ensure services with protocol are not returned when type is specified."""
self.assertListEqual(self.defs.GetServiceByProto('SVC3', 'tcp'), ['80'])
def testNetworkComment(self):
self.assertEqual(self.defs.GetNetAddr('NET1')[0].text, 'network1')
def testNestedNetworkComment(self):
self.assertEqual(self.defs.GetNetAddr('NET2')[1].text, 'network1')
def testUndefinedAddress(self):
self.assertRaises(naming.UndefinedAddressError, self.defs.GetNetAddr, 'FOO')
def testNamespaceCollisionError(self):
badservicedata = []
badservicedata.append('SVC1 = 80/tcp')
badservicedata.append('SVC1 = 81/udp')
testdefs = naming.Naming(None)
self.assertRaises(naming.NamespaceCollisionError,
testdefs.ParseServiceList, badservicedata)
def testNetworkAddress(self):
self.assertListEqual(self.defs.GetNetAddr('NET1'),
[nacaddr.IPv4('10.0.0.0/8')])
def testInet6Address(self):
self.assertListEqual(self.defs.GetNetAddr('BAZ'),
[nacaddr.IPv6('::FFFF:FFFF:FFFF:FFFF'),
nacaddr.IPv6('::1/128')])
def testMixedAddresses(self):
self.assertListEqual(self.defs.GetNetAddr('BING'),
[nacaddr.IPv4('10.0.0.0/8'),
nacaddr.IPv6('::FFFF:FFFF:FFFF:FFFF')])
# same thing but letting nacaddr decide which v4 or v6.
self.assertListEqual(self.defs.GetNetAddr('BING'),
[nacaddr.IP('10.0.0.0/8'),
nacaddr.IP('::FFFF:FFFF:FFFF:FFFF')])
def testNestedServices(self):
self.assertListEqual(self.defs.GetServiceByProto('SVC6', 'tcp'),
['80', '82', '90'])
def testServiceParents(self):
"""SVC6 contains SVC5 which contains TCP_90 which contains 90/tcp."""
self.assertListEqual(self.defs.GetServiceParents('90/tcp'),
['TCP_90', 'SVC5', 'SVC6'])
def testNetParents(self):
"""BIN & NET2 contain NET1, BING & BAZ contain FOO_V6."""
self.assertItemsEqual(self.defs.GetNetParents('NET1'),
['BING', 'NET2'])
self.assertItemsEqual(self.defs.GetNetParents('FOO_V6'), ['BING', 'BAZ'])
def testGetIpParents(self):
"""Ensure GetIpParents returns proper results."""
self.assertListEqual(self.defs.GetIpParents('10.11.12.13/32'),
['BING', 'NET1', 'NET2'])
def testUndefinedTokenNesting(self):
bad_servicedata = ['FOO = 7/tcp BAR']
bad_networkdata = ['NETGROUP = 10.0.0.0/8 FOOBAR']
baddefs = naming.Naming(None)
baddefs.ParseServiceList(bad_servicedata)
baddefs.ParseNetworkList(bad_networkdata)
self.assertRaises(naming.UndefinedServiceError,
baddefs._CheckUnseen, 'services')
self.assertRaises(naming.UndefinedAddressError,
baddefs._CheckUnseen, 'networks')
def testParseNetFile(self):
filedefs = naming.Naming(None)
data = io.BytesIO('FOO = 127.0.0.1 # some network\n')
filedefs._ParseFile(data, 'networks')
self.assertEqual(filedefs.GetNetAddr('FOO'), [nacaddr.IPv4('127.0.0.1')])
def testParseServiceFile(self):
filedefs = naming.Naming(None)
data = io.BytesIO('HTTP = 80/tcp\n')
filedefs._ParseFile(data, 'services')
self.assertEqual(filedefs.GetService('HTTP'), ['80/tcp'])
if __name__ == '__main__':
unittest.main()
|
pettai/capirca
|
tests/naming_test.py
|
Python
|
apache-2.0
| 6,887
|
from __future__ import annotations
from io import StringIO
import toolz
import ibis.common.exceptions as com
import ibis.expr.operations as ops
import ibis.expr.types as ir
import ibis.util as util
from ibis.backends.base.sql.registry import quote_identifier
from ibis.config import options
from .base import DML, QueryAST, SetOp
from .select_builder import SelectBuilder
from .translator import ExprTranslator, QueryContext
class TableSetFormatter:
_join_names = {
ops.InnerJoin: 'INNER JOIN',
ops.LeftJoin: 'LEFT OUTER JOIN',
ops.RightJoin: 'RIGHT OUTER JOIN',
ops.OuterJoin: 'FULL OUTER JOIN',
ops.LeftAntiJoin: 'LEFT ANTI JOIN',
ops.LeftSemiJoin: 'LEFT SEMI JOIN',
ops.CrossJoin: 'CROSS JOIN',
}
def __init__(self, parent, expr, indent=2):
# `parent` is a `Select` instance, not a `TableSetFormatter`
self.parent = parent
self.context = parent.context
self.expr = expr
self.indent = indent
self.join_tables = []
self.join_types = []
self.join_predicates = []
def _translate(self, expr):
return self.parent._translate(expr)
def _walk_join_tree(self, op):
left = op.left.op()
right = op.right.op()
if util.all_of([left, right], ops.Join):
raise NotImplementedError(
'Do not support joins between ' 'joins yet'
)
self._validate_join_predicates(op.predicates)
jname = self._get_join_type(op)
# Read off tables and join predicates left-to-right in
# depth-first order
if isinstance(left, ops.Join):
self._walk_join_tree(left)
self.join_tables.append(self._format_table(op.right))
self.join_types.append(jname)
self.join_predicates.append(op.predicates)
elif isinstance(right, ops.Join):
# When rewrites are possible at the expression IR stage, we should
# do them. Otherwise subqueries might be necessary in some cases
# here
raise NotImplementedError(
'not allowing joins on right ' 'side yet'
)
else:
# Both tables
self.join_tables.append(self._format_table(op.left))
self.join_tables.append(self._format_table(op.right))
self.join_types.append(jname)
self.join_predicates.append(op.predicates)
# Placeholder; revisit when supporting other databases
_non_equijoin_supported = True
def _validate_join_predicates(self, predicates):
for pred in predicates:
op = pred.op()
if (
not isinstance(op, ops.Equals)
and not self._non_equijoin_supported
):
raise com.TranslationError(
'Non-equality join predicates, '
'i.e. non-equijoins, are not '
'supported'
)
def _get_join_type(self, op):
return self._join_names[type(op)]
def _quote_identifier(self, name):
return quote_identifier(name)
def _format_table(self, expr):
# TODO: This could probably go in a class and be significantly nicer
ctx = self.context
ref_expr = expr
op = ref_op = expr.op()
if isinstance(op, ops.SelfReference):
ref_expr = op.table
ref_op = ref_expr.op()
if isinstance(ref_op, ops.PhysicalTable):
name = ref_op.name
if name is None:
raise com.RelationError(f'Table did not have a name: {expr!r}')
result = self._quote_identifier(name)
is_subquery = False
else:
# A subquery
if ctx.is_extracted(ref_expr):
# Was put elsewhere, e.g. WITH block, we just need to grab its
# alias
alias = ctx.get_ref(expr)
# HACK: self-references have to be treated more carefully here
if isinstance(op, ops.SelfReference):
return f'{ctx.get_ref(ref_expr)} {alias}'
else:
return alias
subquery = ctx.get_compiled_expr(expr)
result = f'(\n{util.indent(subquery, self.indent)}\n)'
is_subquery = True
if is_subquery or ctx.need_aliases(expr):
result += f' {ctx.get_ref(expr)}'
return result
def get_result(self):
# Got to unravel the join stack; the nesting order could be
# arbitrary, so we do a depth first search and push the join tokens
# and predicates onto a flat list, then format them
op = self.expr.op()
if isinstance(op, ops.Join):
self._walk_join_tree(op)
else:
self.join_tables.append(self._format_table(self.expr))
# TODO: Now actually format the things
buf = StringIO()
buf.write(self.join_tables[0])
for jtype, table, preds in zip(
self.join_types, self.join_tables[1:], self.join_predicates
):
buf.write('\n')
buf.write(util.indent(f'{jtype} {table}', self.indent))
fmt_preds = []
npreds = len(preds)
for pred in preds:
new_pred = self._translate(pred)
if npreds > 1:
new_pred = f'({new_pred})'
fmt_preds.append(new_pred)
if len(fmt_preds):
buf.write('\n')
conj = ' AND\n{}'.format(' ' * 3)
fmt_preds = util.indent(
'ON ' + conj.join(fmt_preds), self.indent * 2
)
buf.write(fmt_preds)
return buf.getvalue()
class Select(DML):
"""
A SELECT statement which, after execution, might yield back to the user a
table, array/list, or scalar value, depending on the expression that
generated it
"""
def __init__(
self,
table_set,
select_set,
translator_class,
table_set_formatter_class,
context,
subqueries=None,
where=None,
group_by=None,
having=None,
order_by=None,
limit=None,
distinct=False,
indent=2,
result_handler=None,
parent_expr=None,
):
self.translator_class = translator_class
self.table_set_formatter_class = table_set_formatter_class
self.context = context
self.select_set = select_set
self.table_set = table_set
self.distinct = distinct
self.parent_expr = parent_expr
self.where = where or []
# Group keys and post-predicates for aggregations
self.group_by = group_by or []
self.having = having or []
self.order_by = order_by or []
self.limit = limit
self.subqueries = subqueries or []
self.indent = indent
self.result_handler = result_handler
def _translate(self, expr, named=False, permit_subquery=False):
translator = self.translator_class(
expr,
context=self.context,
named=named,
permit_subquery=permit_subquery,
)
return translator.get_result()
def equals(self, other, cache=None):
if cache is None:
cache = {}
key = self, other
try:
return cache[key]
except KeyError:
cache[key] = result = self is other or (
isinstance(other, Select)
and self.limit == other.limit
and ops.all_equal(self._all_exprs(), other._all_exprs())
)
return result
def _all_exprs(self):
# Gnarly, maybe we can improve this somehow
expr_attrs = (
'select_set',
'table_set',
'where',
'group_by',
'having',
'order_by',
'subqueries',
)
exprs = []
for attr in expr_attrs:
val = getattr(self, attr)
if isinstance(val, list):
exprs.extend(val)
else:
exprs.append(val)
return exprs
def compile(self):
"""
This method isn't yet idempotent; calling multiple times may yield
unexpected results
"""
# Can't tell if this is a hack or not. Revisit later
self.context.set_query(self)
# If any subqueries, translate them and add to beginning of query as
# part of the WITH section
with_frag = self.format_subqueries()
# SELECT
select_frag = self.format_select_set()
# FROM, JOIN, UNION
from_frag = self.format_table_set()
# WHERE
where_frag = self.format_where()
# GROUP BY and HAVING
groupby_frag = self.format_group_by()
# ORDER BY
order_frag = self.format_order_by()
# LIMIT
limit_frag = self.format_limit()
# Glue together the query fragments and return
query = '\n'.join(
filter(
None,
[
with_frag,
select_frag,
from_frag,
where_frag,
groupby_frag,
order_frag,
limit_frag,
],
)
)
return query
def format_subqueries(self):
if not self.subqueries:
return
context = self.context
buf = []
for i, expr in enumerate(self.subqueries):
formatted = util.indent(context.get_compiled_expr(expr), 2)
alias = context.get_ref(expr)
buf.append(f'{alias} AS (\n{formatted}\n)')
return 'WITH {}'.format(',\n'.join(buf))
def format_select_set(self):
# TODO:
context = self.context
formatted = []
for expr in self.select_set:
if isinstance(expr, ir.ValueExpr):
expr_str = self._translate(expr, named=True)
elif isinstance(expr, ir.TableExpr):
# A * selection, possibly prefixed
if context.need_aliases(expr):
alias = context.get_ref(expr)
expr_str = f'{alias}.*' if alias else '*'
else:
expr_str = '*'
formatted.append(expr_str)
buf = StringIO()
line_length = 0
max_length = 70
tokens = 0
for i, val in enumerate(formatted):
# always line-break for multi-line expressions
if val.count('\n'):
if i:
buf.write(',')
buf.write('\n')
indented = util.indent(val, self.indent)
buf.write(indented)
# set length of last line
line_length = len(indented.split('\n')[-1])
tokens = 1
elif (
tokens > 0
and line_length
and len(val) + line_length > max_length
):
# There is an expr, and adding this new one will make the line
# too long
buf.write(',\n ') if i else buf.write('\n')
buf.write(val)
line_length = len(val) + 7
tokens = 1
else:
if i:
buf.write(',')
buf.write(' ')
buf.write(val)
tokens += 1
line_length += len(val) + 2
if self.distinct:
select_key = 'SELECT DISTINCT'
else:
select_key = 'SELECT'
return f'{select_key}{buf.getvalue()}'
def format_table_set(self):
if self.table_set is None:
return None
fragment = 'FROM '
helper = self.table_set_formatter_class(self, self.table_set)
fragment += helper.get_result()
return fragment
def format_group_by(self):
if not len(self.group_by):
# There is no aggregation, nothing to see here
return None
lines = []
if len(self.group_by) > 0:
clause = 'GROUP BY {}'.format(
', '.join([str(x + 1) for x in self.group_by])
)
lines.append(clause)
if len(self.having) > 0:
trans_exprs = []
for expr in self.having:
translated = self._translate(expr)
trans_exprs.append(translated)
lines.append('HAVING {}'.format(' AND '.join(trans_exprs)))
return '\n'.join(lines)
def format_where(self):
if not self.where:
return None
buf = StringIO()
buf.write('WHERE ')
fmt_preds = []
npreds = len(self.where)
for pred in self.where:
new_pred = self._translate(pred, permit_subquery=True)
if npreds > 1:
new_pred = f'({new_pred})'
fmt_preds.append(new_pred)
conj = ' AND\n{}'.format(' ' * 6)
buf.write(conj.join(fmt_preds))
return buf.getvalue()
def format_order_by(self):
if not self.order_by:
return None
buf = StringIO()
buf.write('ORDER BY ')
formatted = []
for expr in self.order_by:
key = expr.op()
translated = self._translate(key.expr)
if not key.ascending:
translated += ' DESC'
formatted.append(translated)
buf.write(', '.join(formatted))
return buf.getvalue()
def format_limit(self):
if not self.limit:
return None
buf = StringIO()
n, offset = self.limit['n'], self.limit['offset']
buf.write(f'LIMIT {n}')
if offset is not None and offset != 0:
buf.write(f' OFFSET {offset}')
return buf.getvalue()
class Union(SetOp):
def __init__(self, tables, expr, context, distincts):
super().__init__(tables, expr, context)
self.distincts = distincts
@staticmethod
def keyword(distinct):
return 'UNION' if distinct else 'UNION ALL'
def _get_keyword_list(self):
return map(self.keyword, self.distincts)
class Intersection(SetOp):
def _get_keyword_list(self):
return ["INTERSECT"] * (len(self.tables) - 1)
class Difference(SetOp):
def _get_keyword_list(self):
return ["EXCEPT"] * (len(self.tables) - 1)
def flatten_union(table: ir.TableExpr):
"""Extract all union queries from `table`.
Parameters
----------
table : TableExpr
Returns
-------
Iterable[Union[TableExpr, bool]]
"""
op = table.op()
if isinstance(op, ops.Union):
# For some reason mypy considers `op.left` and `op.right`
# of `Argument` type, and fails the validation. While in
# `flatten` types are the same, and it works
return toolz.concatv(
flatten_union(op.left), # type: ignore
[op.distinct],
flatten_union(op.right), # type: ignore
)
return [table]
def flatten(table: ir.TableExpr):
"""Extract all intersection or difference queries from `table`.
Parameters
----------
table : TableExpr
Returns
-------
Iterable[Union[TableExpr]]
"""
op = table.op()
return list(toolz.concatv(flatten_union(op.left), flatten_union(op.right)))
class Compiler:
translator_class = ExprTranslator
context_class = QueryContext
select_builder_class = SelectBuilder
table_set_formatter_class = TableSetFormatter
select_class = Select
union_class = Union
@classmethod
def make_context(cls, params=None):
params = params or {}
params = {expr.op(): value for expr, value in params.items()}
return cls.context_class(compiler=cls, params=params)
@classmethod
def to_ast(cls, expr, context=None):
if context is None:
context = cls.make_context()
op = expr.op()
# collect setup and teardown queries
setup_queries = cls._generate_setup_queries(expr, context)
teardown_queries = cls._generate_teardown_queries(expr, context)
# TODO: any setup / teardown DDL statements will need to be done prior
# to building the result set-generating statements.
if isinstance(op, ops.Union):
query = cls._make_union(cls.union_class, expr, context)
elif isinstance(op, ops.Intersection):
query = Intersection(flatten(expr), expr, context=context)
elif isinstance(op, ops.Difference):
query = Difference(flatten(expr), expr, context=context)
else:
query = cls.select_builder_class().to_select(
select_class=cls.select_class,
table_set_formatter_class=cls.table_set_formatter_class,
expr=expr,
context=context,
translator_class=cls.translator_class,
)
return QueryAST(
context,
query,
setup_queries=setup_queries,
teardown_queries=teardown_queries,
)
@classmethod
def to_ast_ensure_limit(cls, expr, limit=None, params=None):
context = cls.make_context(params=params)
query_ast = cls.to_ast(expr, context)
# note: limit can still be None at this point, if the global
# default_limit is None
for query in reversed(query_ast.queries):
if (
isinstance(query, Select)
and not isinstance(expr, ir.ScalarExpr)
and query.table_set is not None
):
if query.limit is None:
if limit == 'default':
query_limit = options.sql.default_limit
else:
query_limit = limit
if query_limit:
query.limit = {'n': query_limit, 'offset': 0}
elif limit is not None and limit != 'default':
query.limit = {'n': limit, 'offset': query.limit['offset']}
return query_ast
@classmethod
def to_sql(cls, expr, context=None, params=None):
if context is None:
context = cls.make_context(params=params)
return cls.to_ast(expr, context).queries[0].compile()
@staticmethod
def _generate_setup_queries(expr, context):
return []
@staticmethod
def _generate_teardown_queries(expr, context):
return []
@staticmethod
def _make_union(union_class, expr, context):
# flatten unions so that we can codegen them all at once
union_info = list(flatten_union(expr))
# since op is a union, we have at least 3 elements in union_info (left
# distinct right) and if there is more than a single union we have an
# additional two elements per union (distinct right) which means the
# total number of elements is at least 3 + (2 * number of unions - 1)
# and is therefore an odd number
npieces = len(union_info)
assert npieces >= 3 and npieces % 2 != 0, 'Invalid union expression'
# 1. every other object starting from 0 is a TableExpr instance
# 2. every other object starting from 1 is a bool indicating the type
# of union (distinct or not distinct)
table_exprs, distincts = union_info[::2], union_info[1::2]
return union_class(
table_exprs, expr, distincts=distincts, context=context
)
|
cpcloud/ibis
|
ibis/backends/base/sql/compiler/query_builder.py
|
Python
|
apache-2.0
| 19,766
|
# Copyright 2015, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Interface to depends.exe on Windows.
We use depends.exe to investigate needed DLLs of Python DLLs.
"""
import os
import sys
from logging import info
from nuitka import Tracing
from nuitka.__past__ import raw_input, urlretrieve # pylint: disable=W0622
from nuitka.utils import Utils
def getDependsExePath():
""" Return the path of depends.exe (for Windows).
Will prompt the user to download if not already cached in AppData
directory for Nuitka.
"""
if Utils.getArchitecture() == "x86":
depends_url = "http://dependencywalker.com/depends22_x86.zip"
else:
depends_url = "http://dependencywalker.com/depends22_x64.zip"
if "APPDATA" not in os.environ:
sys.exit("Error, standalone mode cannot find 'APPDATA' environment.")
nuitka_app_dir = Utils.joinpath(os.environ["APPDATA"], "nuitka")
if not Utils.isDir(nuitka_app_dir):
Utils.makePath(nuitka_app_dir)
nuitka_depends_zip = Utils.joinpath(
nuitka_app_dir,
Utils.basename(depends_url)
)
if not Utils.isFile(nuitka_depends_zip):
Tracing.printLine("""\
Nuitka will make use of Dependency Walker (http://dependencywalker.com) tool
to analyze the dependencies of Python extension modules. Is it OK to download
and put it in APPDATA (no installer needed, cached, one time question).""")
reply = raw_input("Proceed and download? [Yes]/No ")
if reply.lower() in ("no", 'n'):
sys.exit("Nuitka does not work in --standalone on Windows without.")
info("Downloading '%s'" % depends_url)
urlretrieve(
depends_url,
nuitka_depends_zip
)
nuitka_depends_dir = Utils.joinpath(
nuitka_app_dir,
Utils.getArchitecture()
)
if not Utils.isDir(nuitka_depends_dir):
os.makedirs(nuitka_depends_dir)
depends_exe = os.path.join(
nuitka_depends_dir,
"depends.exe"
)
if not Utils.isFile(depends_exe):
info("Extracting to '%s'" % depends_exe)
import zipfile
try:
depends_zip = zipfile.ZipFile(nuitka_depends_zip)
depends_zip.extractall(nuitka_depends_dir)
except Exception: # Catching anything zip throws, pylint:disable=W0703
info("Problem with the downloaded zip file, deleting it.")
Utils.deleteFile(depends_exe, must_exist = False)
Utils.deleteFile(nuitka_depends_zip, must_exist = True)
sys.exit(
"Error, need '%s' as extracted from '%s'." % (
depends_exe,
depends_url
)
)
assert Utils.isFile(depends_exe)
return depends_exe
|
wfxiang08/Nuitka
|
nuitka/freezer/DependsExe.py
|
Python
|
apache-2.0
| 3,504
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This is CoGroupByKey load test with Synthetic Source. Besides of the standard
input options there are additional options:
* project (optional) - the gcp project in case of saving
metrics in Big Query (in case of Dataflow Runner
it is required to specify project of runner),
* publish_to_big_query - if metrics should be published in big query,
* metrics_namespace (optional) - name of BigQuery dataset where metrics
will be stored,
* metrics_table (optional) - name of BigQuery table where metrics
will be stored,
* input_options - options for Synthetic Sources,
* co_input_options - options for Synthetic Sources,
* iterations - number of reiterations over per-key-grouped values to perform
(default: 1).
Example test run on DirectRunner:
python setup.py nosetests \
--test-pipeline-options="
--project=big-query-project
--publish_to_big_query=true
--metrics_dataset=python_load_tests
--metrics_table=co_gbk
--iterations=1
--input_options='{
\"num_records\": 1000,
\"key_size\": 5,
\"value_size\":15,
\"bundle_size_distribution_type\": \"const\",
\"bundle_size_distribution_param\": 1,
\"force_initial_num_bundles\":0}'
--co_input_options='{
\"num_records\": 1000,
\"key_size\": 5,
\"value_size\":15,
\"bundle_size_distribution_type\": \"const\",
\"bundle_size_distribution_param\": 1,
\"force_initial_num_bundles\":0}'" \
--tests apache_beam.testing.load_tests.co_group_by_key_test
or:
./gradlew -PloadTest.args='
--publish_to_big_query=true
--project=...
--metrics_dataset=python_load_tests
--metrics_table=co_gbk
--iterations=1
--input_options=\'
{"num_records": 1,
"key_size": 1,
"value_size":1,
"bundle_size_distribution_type": "const",
"bundle_size_distribution_param": 1,
"force_initial_num_bundles": 1}\'
--co_input_options=\'{
"num_records": 1,
"key_size": 1,
"value_size": 1,
"bundle_size_distribution_type": "const",
"bundle_size_distribution_param": 1,
"force_initial_num_bundles":0}\'
--runner=DirectRunner' \
-PloadTest.mainClass=
apache_beam.testing.load_tests.co_group_by_key_test \
-Prunner=DirectRunner :sdks:python:apache_beam:testing:load-tests:run
To run test on other runner (ex. Dataflow):
python setup.py nosetests \
--test-pipeline-options="
--runner=TestDataflowRunner
--project=...
--staging_location=gs://...
--temp_location=gs://...
--sdk_location=./dist/apache-beam-x.x.x.dev0.tar.gz
--publish_to_big_query=true
--metrics_dataset=python_load_tests
--metrics_table=co_gbk
--iterations=1
--input_options='{
\"num_records\": 1000,
\"key_size\": 5,
\"value_size\":15,
\"bundle_size_distribution_type\": \"const\",
\"bundle_size_distribution_param\": 1,
\"force_initial_num_bundles\":0
}'
--co_input_options='{
\"num_records\": 1000,
\"key_size\": 5,
\"value_size\":15,
\"bundle_size_distribution_type\": \"const\",
\"bundle_size_distribution_param\": 1,
\"force_initial_num_bundles\":0
}'" \
--tests apache_beam.testing.load_tests.co_group_by_key_test
or:
./gradlew -PloadTest.args='
--publish_to_big_query=true
--project=...
--metrics_dataset=python_load_tests
--metrics_table=co_gbk
--iterations=1
--temp_location=gs://...
--input_options=\'
{"num_records": 1,
"key_size": 1,
"value_size":1,
"bundle_size_distribution_type": "const",
"bundle_size_distribution_param": 1,
"force_initial_num_bundles": 1}\'
--co_input_options=\'{
"num_records": 1,
"key_size": 1,
"value_size": 1,
"bundle_size_distribution_type": "const",
"bundle_size_distribution_param": 1,
"force_initial_num_bundles":0}\'
--runner=TestDataflowRunner' \
-PloadTest.mainClass=
apache_beam.testing.load_tests.co_group_by_key_test \
-Prunner=TestDataflowRunner :sdks:python:apache_beam:testing:load-tests:run
"""
from __future__ import absolute_import
import json
import logging
import os
import unittest
import apache_beam as beam
from apache_beam.testing import synthetic_pipeline
from apache_beam.testing.load_tests.load_test import LoadTest
from apache_beam.testing.load_tests.load_test_metrics_utils import MeasureTime
INPUT_TAG = 'pc1'
CO_INPUT_TAG = 'pc2'
load_test_enabled = False
if os.environ.get('LOAD_TEST_ENABLED') == 'true':
load_test_enabled = True
@unittest.skipIf(not load_test_enabled, 'Enabled only for phrase triggering.')
class CoGroupByKeyTest(LoadTest):
def setUp(self):
super(CoGroupByKeyTest, self).setUp()
self.co_input_options = json.loads(
self.pipeline.get_option('co_input_options'))
self.iterations = self.get_option_or_default('iterations', 1)
class _UngroupAndReiterate(beam.DoFn):
def process(self, element, iterations):
values = element[1]
inputs = values.get(INPUT_TAG)
co_inputs = values.get(CO_INPUT_TAG)
for i in range(iterations):
for value in inputs:
if i == iterations - 1:
yield value
for value in co_inputs:
if i == iterations - 1:
yield value
def testCoGroupByKey(self):
pc1 = (self.pipeline
| 'Read ' + INPUT_TAG >> beam.io.Read(
synthetic_pipeline.SyntheticSource(
self.parseTestPipelineOptions(self.input_options)))
| 'Make ' + INPUT_TAG + ' iterable' >> beam.Map(lambda x: (x, x))
| 'Measure time: Start pc1' >> beam.ParDo(
MeasureTime(self.metrics_namespace))
)
pc2 = (self.pipeline
| 'Read ' + CO_INPUT_TAG >> beam.io.Read(
synthetic_pipeline.SyntheticSource(
self.parseTestPipelineOptions(self.co_input_options)))
| 'Make ' + CO_INPUT_TAG + ' iterable' >> beam.Map(
lambda x: (x, x))
| 'Measure time: Start pc2' >> beam.ParDo(
MeasureTime(self.metrics_namespace))
)
# pylint: disable=expression-not-assigned
({INPUT_TAG: pc1, CO_INPUT_TAG: pc2}
| 'CoGroupByKey ' >> beam.CoGroupByKey()
| 'Consume Joined Collections' >> beam.ParDo(self._UngroupAndReiterate(),
self.iterations)
| 'Measure time: End' >> beam.ParDo(MeasureTime(self.metrics_namespace))
)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
RyanSkraba/beam
|
sdks/python/apache_beam/testing/load_tests/co_group_by_key_test.py
|
Python
|
apache-2.0
| 7,476
|
# ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from __future__ import print_function
import os
import six.moves.cPickle as pickle
from traits.api import Range, Instance, Bool, Button, Any
from pychron.core.helpers.isotope_utils import sort_isotopes
from pychron.core.ui.thread import Thread
from pychron.graph.graph import Graph
from pychron.managers.manager import Manager
from pychron.paths import paths
from pychron.pychron_constants import NULL_STR
from pychron.spectrometer.base_detector import BaseDetector
from pychron.spectrometer.base_spectrometer import NoIntensityChange
from pychron.spectrometer.ion_optics.coincidence_config import CoincidenceConfig
from pychron.spectrometer.ion_optics.peak_center_config import PeakCenterConfigurer
from pychron.spectrometer.jobs.coincidence import Coincidence
from pychron.spectrometer.jobs.peak_center import PeakCenter, AccelVoltagePeakCenter
class MFTableCTX(object):
def __init__(self, manager, mftable):
self._manager = manager
self._opath = manager.spectrometer.magnet.get_field_table_path()
self._mftable = mftable
def __enter__(self):
self._manager.set_mftable(self._mftable)
def __exit__(self, exc_type, exc_val, exc_tb):
self._manager.set_mftable(self._opath)
class IonOpticsManager(Manager):
reference_detector = Instance(BaseDetector)
reference_isotope = Any
magnet_dac = Range(0.0, 6.0)
graph = Instance(Graph)
peak_center_button = Button("Peak Center")
stop_button = Button("Stop")
alive = Bool(False)
spectrometer = Any
peak_center = Instance(PeakCenter)
coincidence = Instance(Coincidence)
peak_center_config = Instance(PeakCenterConfigurer)
# coincidence_config = Instance(CoincidenceConfig)
canceled = False
peak_center_result = None
_centering_thread = None
def close(self):
self.cancel_peak_center()
def cancel_peak_center(self):
self.alive = False
self.canceled = True
self.peak_center.canceled = True
self.peak_center.stop()
self.info("peak center canceled")
def get_mass(self, isotope_key):
spec = self.spectrometer
molweights = spec.molecular_weights
return molweights[isotope_key]
def mftable_ctx(self, mftable):
return MFTableCTX(self, mftable)
def set_mftable(self, name=None):
"""
if mt is None set to the default mftable located at setupfiles/spectrometer/mftable.csv
:param mt:
:return:
"""
if name == "ic_mftable":
self.spectrometer.use_deflection_correction = False
else:
self.spectrometer.use_deflection_correction = True
self.spectrometer.magnet.set_mftable(name)
def get_position(self, *args, **kw):
kw["update_isotopes"] = False
return self._get_position(*args, **kw)
def av_position(self, pos, detector, *args, **kw):
av = self._get_av_position(pos, detector)
self.spectrometer.source.set_hv(av)
self.info("positioning {} ({}) on {}".format(pos, av, detector))
return av
def position(self, pos, detector, use_af_demag=True, *args, **kw):
dac = self._get_position(pos, detector, *args, **kw)
mag = self.spectrometer.magnet
self.info("positioning {} ({}) on {}".format(pos, dac, detector))
return mag.set_dac(dac, use_af_demag=use_af_demag)
def do_coincidence_scan(self, new_thread=True):
if new_thread:
t = Thread(name="ion_optics.coincidence", target=self._coincidence)
t.start()
self._centering_thread = t
def setup_coincidence(self):
pcc = self.coincidence_config
pcc.dac = self.spectrometer.magnet.dac
info = pcc.edit_traits()
if not info.result:
return
detector = pcc.detector
isotope = pcc.isotope
detectors = [d for d in pcc.additional_detectors]
# integration_time = pcc.integration_time
if pcc.use_nominal_dac:
center_dac = self.get_position(isotope, detector)
elif pcc.use_current_dac:
center_dac = self.spectrometer.magnet.dac
else:
center_dac = pcc.dac
# self.spectrometer.save_integration()
# self.spectrometer.set_integration(integration_time)
cs = Coincidence(
spectrometer=self.spectrometer,
center_dac=center_dac,
reference_detector=detector,
reference_isotope=isotope,
additional_detectors=detectors,
)
self.coincidence = cs
return cs
def get_center_dac(self, det, iso):
spec = self.spectrometer
det = spec.get_detector(det)
molweights = spec.molecular_weights
mass = molweights[iso]
dac = spec.magnet.map_mass_to_dac(mass, det.name)
# correct for deflection
return spec.correct_dac(det, dac)
def do_peak_center(
self,
save=True,
confirm_save=False,
warn=False,
new_thread=True,
message="",
on_end=None,
timeout=None,
):
self.debug("doing pc")
self.canceled = False
self.alive = True
self.peak_center_result = None
args = (save, confirm_save, warn, message, on_end, timeout)
if new_thread:
t = Thread(
name="ion_optics.peak_center", target=self._peak_center, args=args
)
t.start()
self._centering_thread = t
return t
else:
self._peak_center(*args)
def setup_peak_center(
self,
detector=None,
isotope=None,
integration_time=1.04,
directions="Increase",
center_dac=None,
name="",
show_label=False,
window=0.015,
step_width=0.0005,
min_peak_height=1.0,
percent=80,
deconvolve=None,
use_interpolation=False,
interpolation_kind="linear",
dac_offset=None,
calculate_all_peaks=False,
config_name=None,
use_configuration_dac=True,
new=False,
update_others=True,
plot_panel=None,
):
if deconvolve is None:
n_peaks, select_peak = 1, 1
use_dac_offset = False
if dac_offset is not None:
use_dac_offset = True
spec = self.spectrometer
pcconfig = self.peak_center_config
spec.save_integration()
self.debug(
"setup peak center. detector={}, isotope={}".format(detector, isotope)
)
pcc = None
dataspace = "dac"
use_accel_voltage = False
use_extend = False
self._setup_config()
if config_name:
pcconfig.load()
pcconfig.active_name = config_name
pcc = pcconfig.active_item
elif detector is None or isotope is None:
self.debug("ask user for peak center configuration")
pcconfig.load()
if config_name:
pcconfig.active_name = config_name
info = pcconfig.edit_traits()
if not info.result:
return
else:
pcc = pcconfig.active_item
if pcc:
if not detector:
detector = pcc.active_detectors
if not isotope:
isotope = pcc.isotope
directions = pcc.directions
integration_time = pcc.integration_time
dataspace = pcc.dataspace
use_accel_voltage = pcc.use_accel_voltage
use_extend = pcc.use_extend
window = pcc.window
min_peak_height = pcc.min_peak_height
step_width = pcc.step_width
percent = pcc.percent
use_interpolation = pcc.use_interpolation
interpolation_kind = pcc.interpolation_kind
n_peaks = pcc.n_peaks
select_peak = pcc.select_n_peak
use_dac_offset = pcc.use_dac_offset
dac_offset = pcc.dac_offset
calculate_all_peaks = pcc.calculate_all_peaks
update_others = pcc.update_others
if not pcc.use_mftable_dac and center_dac is None and use_configuration_dac:
center_dac = pcc.dac
spec.set_integration_time(integration_time)
period = int(integration_time * 1000 * 0.9)
if not isinstance(detector, (tuple, list)):
detector = (detector,)
ref = spec.get_detector(detector[0])
if center_dac is None:
center_dac = self.get_center_dac(ref, isotope)
# if mass:
# mag = spec.magnet
# center_dac = mag.map_mass_to_dac(mass, ref)
# low = mag.map_mass_to_dac(mass - window / 2., ref)
# high = mag.map_mass_to_dac(mass + window / 2., ref)
# window = high - low
# step_width = abs(mag.map_mass_to_dac(mass + step_width, ref) - center_dac)
if len(detector) > 1:
ad = detector[1:]
else:
ad = []
pc = self.peak_center
klass = AccelVoltagePeakCenter if use_accel_voltage else PeakCenter
if (
not pc
or new
or (use_accel_voltage and not isinstance(pc, AccelVoltagePeakCenter))
):
pc = klass()
pc.trait_set(
center_dac=center_dac,
dataspace=dataspace,
use_accel_voltage=use_accel_voltage,
use_extend=use_extend,
period=period,
window=window,
percent=percent,
min_peak_height=min_peak_height,
step_width=step_width,
directions=directions,
reference_detector=ref,
additional_detectors=ad,
reference_isotope=isotope,
spectrometer=spec,
show_label=show_label,
use_interpolation=use_interpolation,
interpolation_kind=interpolation_kind,
n_peaks=n_peaks,
select_peak=select_peak,
use_dac_offset=use_dac_offset,
dac_offset=dac_offset,
calculate_all_peaks=calculate_all_peaks,
update_others=update_others,
)
graph = pc.graph
graph.name = name
if plot_panel:
plot_panel.set_peak_center_graph(graph)
self.peak_center = pc
self.reference_detector = ref
self.reference_isotope = isotope
return self.peak_center
def backup_mftable(self):
self.spectrometer.magnet.field_table.backup()
# private
def _setup_config(self):
config = self.peak_center_config
config.detectors = self.spectrometer.detector_names
keys = list(self.spectrometer.molecular_weights.keys())
config.isotopes = sort_isotopes(keys)
config.integration_times = self.spectrometer.integration_times
# def _get_peak_center_config(self, config_name):
# if config_name is None:
# config_name = 'default'
#
# config = self.peak_center_config.get(config_name)
#
# config.detectors = self.spectrometer.detectors_names
# if config.detector_name:
# config.detector = next((di for di in config.detectors if di == config.detector_name), None)
#
# if not config.detector:
# config.detector = config.detectors[0]
#
# keys = self.spectrometer.molecular_weights.keys()
# config.isotopes = sort_isotopes(keys)
# config.integration_times = self.spectrometer.integration_times
# return config
# def _timeout_func(self, timeout, evt):
# st = time.time()
# while not evt.is_set():
# if not self.alive:
# break
#
# if time.time() - st > timeout:
# self.warning('Peak Centering timed out after {}s'.format(timeout))
# self.cancel_peak_center()
# break
#
# time.sleep(0.01)
def _peak_center(self, save, confirm_save, warn, message, on_end, timeout):
pc = self.peak_center
spec = self.spectrometer
ref = self.reference_detector
isotope = self.reference_isotope
try:
center_value = pc.get_peak_center()
except NoIntensityChange as e:
self.warning("Peak Centering failed. No Intensity change. {}".format(e))
center_value = None
self.peak_center_result = center_value
if center_value:
det = spec.get_detector(ref)
if pc.use_accel_voltage:
args = ref, isotope, center_value
else:
dac_a = spec.uncorrect_dac(det, center_value)
self.info("dac uncorrected for HV and deflection {}".format(dac_a))
args = ref, isotope, dac_a
self.adjusted_peak_center_result = dac_a
self.info("new center pos {} ({}) @ {}".format(*args))
if save:
if confirm_save:
msg = "Update Magnet Field Table with new peak center- {} ({}) @ RefDetUnits= {}".format(
*args
)
if pc.use_accel_voltage:
msg = "Update Accel Voltage Table with new peak center- {} ({}) @ RefDetUnits= {}".format(
*args
)
save = self.confirmation_dialog(msg)
if save:
if pc.use_accel_voltage:
spec.source.update_field_table(
det, isotope, center_value, message
)
else:
spec.magnet.update_field_table(
det, isotope, dac_a, message, update_others=pc.update_others
)
spec.magnet.set_dac(dac_a)
elif not self.canceled:
msg = "centering failed"
if warn:
self.warning_dialog(msg)
self.warning(msg)
# needs to be called on the main thread to properly update
# the menubar actions. alive=False enables IonOptics>Peak Center
# d = lambda:self.trait_set(alive=False)
# still necessary with qt? and tasks
if on_end:
on_end()
self.trait_set(alive=False)
self.spectrometer.restore_integration()
def _get_av_position(self, pos, detector, update_isotopes=True):
self.debug("AV POSITION {} {}".format(pos, detector))
spec = self.spectrometer
if not isinstance(detector, str):
detector = detector.name
if isinstance(pos, str):
try:
pos = float(pos)
except ValueError:
# pos is isotope
if update_isotopes:
# if the pos is an isotope then update the detectors
spec.update_isotopes(pos, detector)
pos = self.get_mass(pos)
# pos is mass i.e 39.962
av = spec.source.map_mass_to_hv(pos, detector)
return av
def _get_position(self, pos, detector, use_dac=False, update_isotopes=True):
"""
pos can be str or float
"Ar40", "39.962", 39.962
to set in DAC space set use_dac=True
"""
if pos == NULL_STR:
return
spec = self.spectrometer
mag = spec.magnet
if isinstance(detector, str):
det = spec.get_detector(detector)
else:
det = detector
self.debug("detector {}".format(det))
if use_dac:
dac = pos
else:
self.debug("POSITION {} {}".format(pos, detector))
if isinstance(pos, str):
try:
pos = float(pos)
except ValueError:
# pos is isotope
if update_isotopes:
# if the pos is an isotope then update the detectors
spec.update_isotopes(pos, detector)
pos = self.get_mass(pos)
mag.mass_change(pos)
# pos is mass i.e 39.962
print("det is", det)
dac = mag.map_mass_to_dac(pos, det.name)
dac = spec.correct_dac(det, dac)
return dac
def _coincidence(self):
self.coincidence.get_peak_center()
self.info("coincidence finished")
self.spectrometer.restore_integration()
# ===============================================================================
# handler
# ===============================================================================
def _coincidence_config_default(self):
config = None
p = os.path.join(paths.hidden_dir, "coincidence_config.p")
if os.path.isfile(p):
try:
with open(p) as rfile:
config = pickle.load(rfile)
config.detectors = dets = self.spectrometer.detectors
config.detector = next(
(di for di in dets if di.name == config.detector_name), None
)
except Exception as e:
print("coincidence config", e)
if config is None:
config = CoincidenceConfig()
config.detectors = self.spectrometer.detectors
config.detector = config.detectors[0]
keys = list(self.spectrometer.molecular_weights.keys())
config.isotopes = sort_isotopes(keys)
return config
def _peak_center_config_default(self):
config = PeakCenterConfigurer()
return config
# def _peak_center_config_default(self):
# config = None
# p = os.path.join(paths.hidden_dir, 'peak_center_config.p')
# if os.path.isfile(p):
# try:
# with open(p) as rfile:
# config = pickle.load(rfile)
# config.detectors = dets = self.spectrometer.detectors
# config.detector = next((di for di in dets if di.name == config.detector_name), None)
#
# except Exception, e:
# print 'peak center config', e
#
# if config is None:
# config = PeakCenterConfig()
# config.detectors = self.spectrometer.detectors
# config.detector = config.detectors[0]
#
# keys = self.spectrometer.molecular_weights.keys()
# config.isotopes = sort_isotopes(keys)
#
# return config
if __name__ == "__main__":
io = IonOpticsManager()
io.configure_traits()
# ============= EOF =============================================
# def _graph_factory(self):
# g = Graph(
# container_dict=dict(padding=5, bgcolor='gray'))
# g.new_plot()
# return g
#
# def _graph_default(self):
# return self._graph_factory()
# def _detector_default(self):
# return self.detectors[0]
# def peak_center_config_view(self):
# v = View(Item('detector', editor=EnumEditor(name='detectors')),
# Item('isotope'),
# Item('dac'),
# Item('directions'),
# buttons=['OK', 'Cancel'],
# kind='livemodal',
# title='Peak Center'
# )
# return v
# def graph_view(self):
# v = View(Item('graph', show_label=False, style='custom'),
# width=300,
# height=500
# )
# return v
# def peak_center_view(self):
# v = View(Item('graph', show_label=False, style='custom'),
# width=300,
# height=500,
# handler=self.handler_klass
# )
# return v
# def traits_view(self):
# v = View(Item('magnet_dac'),
# Item('peak_center_button',
# enabled_when='not alive',
# show_label=False),
# Item('stop_button', enabled_when='alive',
# show_label=False),
#
# Item('graph', show_label=False, style='custom'),
#
#
# resizable=True)
# return v
# def _correct_dac(self, det, dac):
# # dac is in axial units
#
# # convert to detector
# dac *= det.relative_position
#
# '''
# convert to axial detector
# dac_a= dac_d / relpos
#
# relpos==dac_detA/dac_axial
#
# '''
# #correct for deflection
# dev = det.get_deflection_correction()
#
# dac += dev
#
# # #correct for hv
# dac *= self.spectrometer.get_hv_correction(current=True)
# return dac
|
USGSDenverPychron/pychron
|
pychron/spectrometer/ion_optics/ion_optics_manager.py
|
Python
|
apache-2.0
| 21,795
|
# Copyright 2016 Google LLC All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import unittest
import mock
class TestAppEngineHandler(unittest.TestCase):
PROJECT = "PROJECT"
def _get_target_class(self):
from google.cloud.logging.handlers.app_engine import AppEngineHandler
return AppEngineHandler
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_constructor(self):
from google.cloud.logging.handlers.app_engine import _GAE_PROJECT_ENV_FLEX
from google.cloud.logging.handlers.app_engine import _GAE_PROJECT_ENV_STANDARD
from google.cloud.logging.handlers.app_engine import _GAE_SERVICE_ENV
from google.cloud.logging.handlers.app_engine import _GAE_VERSION_ENV
client = mock.Mock(project=self.PROJECT, spec=["project"])
# Verify that project/service/version are picked up from the
# environment.
with mock.patch(
"os.environ",
new={
_GAE_PROJECT_ENV_STANDARD: "test_project",
_GAE_SERVICE_ENV: "test_service",
_GAE_VERSION_ENV: "test_version",
},
):
handler = self._make_one(client, transport=_Transport)
self.assertIs(handler.client, client)
self.assertEqual(handler.resource.type, "gae_app")
self.assertEqual(handler.resource.labels["project_id"], "test_project")
self.assertEqual(handler.resource.labels["module_id"], "test_service")
self.assertEqual(handler.resource.labels["version_id"], "test_version")
# Verify that _GAE_PROJECT_ENV_FLEX environment variable takes
# precedence over _GAE_PROJECT_ENV_STANDARD.
with mock.patch(
"os.environ",
new={
_GAE_PROJECT_ENV_FLEX: "test_project_2",
_GAE_PROJECT_ENV_STANDARD: "test_project_should_be_overridden",
_GAE_SERVICE_ENV: "test_service_2",
_GAE_VERSION_ENV: "test_version_2",
},
):
handler = self._make_one(client, transport=_Transport)
self.assertIs(handler.client, client)
self.assertEqual(handler.resource.type, "gae_app")
self.assertEqual(handler.resource.labels["project_id"], "test_project_2")
self.assertEqual(handler.resource.labels["module_id"], "test_service_2")
self.assertEqual(handler.resource.labels["version_id"], "test_version_2")
def test_emit(self):
client = mock.Mock(project=self.PROJECT, spec=["project"])
handler = self._make_one(client, transport=_Transport)
gae_resource = handler.get_gae_resource()
gae_labels = handler.get_gae_labels()
trace = None
logname = "app"
message = "hello world"
record = logging.LogRecord(logname, logging, None, None, message, None, None)
handler.emit(record)
self.assertIs(handler.transport.client, client)
self.assertEqual(handler.transport.name, logname)
self.assertEqual(
handler.transport.send_called_with,
(record, message, gae_resource, gae_labels, trace),
)
def _get_gae_labels_helper(self, trace_id):
get_trace_patch = mock.patch(
"google.cloud.logging.handlers.app_engine.get_trace_id",
return_value=trace_id,
)
client = mock.Mock(project=self.PROJECT, spec=["project"])
# The handler actually calls ``get_gae_labels()``.
with get_trace_patch as mock_get_trace:
handler = self._make_one(client, transport=_Transport)
gae_labels = handler.get_gae_labels()
self.assertEqual(mock_get_trace.mock_calls, [mock.call()])
return gae_labels
def test_get_gae_labels_with_label(self):
from google.cloud.logging.handlers import app_engine
trace_id = "test-gae-trace-id"
gae_labels = self._get_gae_labels_helper(trace_id)
expected_labels = {app_engine._TRACE_ID_LABEL: trace_id}
self.assertEqual(gae_labels, expected_labels)
def test_get_gae_labels_without_label(self):
gae_labels = self._get_gae_labels_helper(None)
self.assertEqual(gae_labels, {})
class _Transport(object):
def __init__(self, client, name):
self.client = client
self.name = name
def send(self, record, message, resource, labels, trace):
self.send_called_with = (record, message, resource, labels, trace)
|
dhermes/google-cloud-python
|
logging/tests/unit/handlers/test_app_engine.py
|
Python
|
apache-2.0
| 5,042
|
# Copyright 2020 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
from recipe_engine import recipe_test_api
from PB.go.chromium.org.luci.buildbucket.proto import build as build_pb2
from PB.go.chromium.org.luci.buildbucket.proto import common as common_pb2
class LegacyAnnotationTestApi(recipe_test_api.RecipeTestApi):
"""For legacy `allow_subannotations` feature, the step status was deciced by
the command's return code (i.e. is zero or not). However, since the
`legacy_annotation` module runs the command as a sub-build/sub-luciexe.
The step status is now the same as the status of the result build. This
test api provides properties which represent different step status and
help populate the sub-build placeholder.
"""
@property
def success_step(self):
"""Returns a StepTestData that indicating a succeeding step"""
return self.m.step.sub_build(build_pb2.Build(status=common_pb2.SUCCESS))
@property
def failure_step(self):
"""Returns a StepTestData that fails the step and raises `step.StepFailure`.
"""
ret = self.m.step.sub_build(build_pb2.Build(status=common_pb2.FAILURE))
ret.retcode = 1
return ret
@property
def infra_failure_step(self):
"""Returns a StepTestData that fails the step and raise `step.InfraFailure`.
"""
ret = self.m.step.sub_build(
build_pb2.Build(status=common_pb2.INFRA_FAILURE))
ret.retcode = 1
return ret
@recipe_test_api.mod_test_data
@staticmethod
def simulate_kitchen():
"""Simulate Kitchen behavior in test instead of bbagent/luciexe behavior.
"""
return True
|
luci/recipes-py
|
recipe_modules/legacy_annotation/test_api.py
|
Python
|
apache-2.0
| 1,706
|
# Copyright 2012 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for network API."""
import itertools
import uuid
import mock
from mox3 import mox
from oslo_policy import policy as oslo_policy
from nova.compute import flavors
from nova import context
from nova import exception
from nova import network
from nova.network import api
from nova.network import base_api
from nova.network import floating_ips
from nova.network import model as network_model
from nova.network import rpcapi as network_rpcapi
from nova import objects
from nova.objects import fields
from nova import policy
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_fixed_ip
from nova.tests.unit.objects import test_virtual_interface
FAKE_UUID = 'a47ae74e-ab08-547f-9eee-ffd23fc46c16'
fake_info_cache = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'instance_uuid': 'fake-uuid',
'network_info': '[]',
}
class NetworkPolicyTestCase(test.TestCase):
def setUp(self):
super(NetworkPolicyTestCase, self).setUp()
policy.reset()
policy.init()
self.context = context.get_admin_context()
def tearDown(self):
super(NetworkPolicyTestCase, self).tearDown()
policy.reset()
def test_check_policy(self):
self.mox.StubOutWithMock(policy, 'enforce')
target = {
'project_id': self.context.project_id,
'user_id': self.context.user_id,
}
policy.enforce(self.context, 'network:get_all', target)
self.mox.ReplayAll()
api.check_policy(self.context, 'get_all')
def test_skip_policy(self):
policy.reset()
rules = {'network:get_all': '!'}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
api = network.API()
self.assertRaises(exception.PolicyNotAuthorized,
api.get_all, self.context)
api = network.API(skip_policy_check=True)
api.get_all(self.context)
class ApiTestCase(test.TestCase):
def setUp(self):
super(ApiTestCase, self).setUp()
self.network_api = network.API()
self.context = context.RequestContext('fake-user',
'fake-project')
@mock.patch('nova.objects.NetworkList.get_all')
def test_get_all(self, mock_get_all):
mock_get_all.return_value = mock.sentinel.get_all
self.assertEqual(mock.sentinel.get_all,
self.network_api.get_all(self.context))
mock_get_all.assert_called_once_with(self.context,
project_only=True)
@mock.patch('nova.objects.NetworkList.get_all')
def test_get_all_liberal(self, mock_get_all):
self.flags(network_manager='nova.network.manager.FlatDHCPManaager')
mock_get_all.return_value = mock.sentinel.get_all
self.assertEqual(mock.sentinel.get_all,
self.network_api.get_all(self.context))
mock_get_all.assert_called_once_with(self.context,
project_only="allow_none")
@mock.patch('nova.objects.NetworkList.get_all')
def test_get_all_no_networks(self, mock_get_all):
mock_get_all.side_effect = exception.NoNetworksFound
self.assertEqual([], self.network_api.get_all(self.context))
mock_get_all.assert_called_once_with(self.context,
project_only=True)
@mock.patch('nova.objects.Network.get_by_uuid')
def test_get(self, mock_get):
mock_get.return_value = mock.sentinel.get_by_uuid
self.assertEqual(mock.sentinel.get_by_uuid,
self.network_api.get(self.context, 'fake-uuid'))
@mock.patch('nova.objects.Network.get_by_id')
@mock.patch('nova.db.virtual_interface_get_by_instance')
def test_get_vifs_by_instance(self, mock_get_by_instance,
mock_get_by_id):
mock_get_by_instance.return_value = [
dict(test_virtual_interface.fake_vif,
network_id=123)]
mock_get_by_id.return_value = objects.Network()
mock_get_by_id.return_value.uuid = mock.sentinel.network_uuid
instance = objects.Instance(uuid=mock.sentinel.inst_uuid)
vifs = self.network_api.get_vifs_by_instance(self.context,
instance)
self.assertEqual(1, len(vifs))
self.assertEqual(123, vifs[0].network_id)
self.assertEqual(str(mock.sentinel.network_uuid), vifs[0].net_uuid)
mock_get_by_instance.assert_called_once_with(
self.context, str(mock.sentinel.inst_uuid), use_slave=False)
mock_get_by_id.assert_called_once_with(self.context, 123,
project_only='allow_none')
@mock.patch('nova.objects.Network.get_by_id')
@mock.patch('nova.db.virtual_interface_get_by_address')
def test_get_vif_by_mac_address(self, mock_get_by_address,
mock_get_by_id):
mock_get_by_address.return_value = dict(
test_virtual_interface.fake_vif, network_id=123)
mock_get_by_id.return_value = objects.Network(
uuid=mock.sentinel.network_uuid)
vif = self.network_api.get_vif_by_mac_address(self.context,
mock.sentinel.mac)
self.assertEqual(123, vif.network_id)
self.assertEqual(str(mock.sentinel.network_uuid), vif.net_uuid)
mock_get_by_address.assert_called_once_with(self.context,
mock.sentinel.mac)
mock_get_by_id.assert_called_once_with(self.context, 123,
project_only='allow_none')
def test_allocate_for_instance_handles_macs_passed(self):
# If a macs argument is supplied to the 'nova-network' API, it is just
# ignored. This test checks that the call down to the rpcapi layer
# doesn't pass macs down: nova-network doesn't support hypervisor
# mac address limits (today anyhow).
macs = set(['ab:cd:ef:01:23:34'])
self.mox.StubOutWithMock(
self.network_api.network_rpcapi, "allocate_for_instance")
kwargs = dict(zip(['host', 'instance_id', 'project_id',
'requested_networks', 'rxtx_factor', 'vpn', 'macs',
'dhcp_options'],
itertools.repeat(mox.IgnoreArg())))
self.network_api.network_rpcapi.allocate_for_instance(
mox.IgnoreArg(), **kwargs).AndReturn([])
self.mox.ReplayAll()
flavor = flavors.get_default_flavor()
flavor['rxtx_factor'] = 0
instance = objects.Instance(id=1, uuid='uuid', project_id='project_id',
host='host', system_metadata={},
flavor=flavor)
self.network_api.allocate_for_instance(
self.context, instance, 'vpn', 'requested_networks', macs=macs)
def _do_test_associate_floating_ip(self, orig_instance_uuid):
"""Test post-association logic."""
new_instance = objects.Instance(uuid=FAKE_UUID)
def fake_associate(*args, **kwargs):
return orig_instance_uuid
self.stubs.Set(floating_ips.FloatingIP, 'associate_floating_ip',
fake_associate)
def fake_instance_get_by_uuid(context, instance_uuid,
columns_to_join=None,
use_slave=None):
return fake_instance.fake_db_instance(uuid=instance_uuid)
self.stubs.Set(self.network_api.db, 'instance_get_by_uuid',
fake_instance_get_by_uuid)
def fake_get_nw_info(ctxt, instance):
class FakeNWInfo(object):
def json(self):
pass
return FakeNWInfo()
self.stubs.Set(self.network_api, '_get_instance_nw_info',
fake_get_nw_info)
if orig_instance_uuid:
expected_updated_instances = [new_instance.uuid,
orig_instance_uuid]
else:
expected_updated_instances = [new_instance.uuid]
def fake_instance_info_cache_update(context, instance_uuid, cache):
self.assertEqual(instance_uuid,
expected_updated_instances.pop())
return fake_info_cache
self.stubs.Set(self.network_api.db, 'instance_info_cache_update',
fake_instance_info_cache_update)
def fake_update_instance_cache_with_nw_info(api, context, instance,
nw_info=None,
update_cells=True):
return
self.stubs.Set(base_api, "update_instance_cache_with_nw_info",
fake_update_instance_cache_with_nw_info)
self.network_api.associate_floating_ip(self.context,
new_instance,
'172.24.4.225',
'10.0.0.2')
def test_associate_preassociated_floating_ip(self):
self._do_test_associate_floating_ip('orig-uuid')
def test_associate_unassociated_floating_ip(self):
self._do_test_associate_floating_ip(None)
def test_get_floating_ip_invalid_id(self):
self.assertRaises(exception.InvalidID,
self.network_api.get_floating_ip,
self.context, '123zzz')
@mock.patch('nova.objects.FloatingIP.get_by_id')
def test_get_floating_ip(self, mock_get):
floating = mock.sentinel.floating
mock_get.return_value = floating
self.assertEqual(floating,
self.network_api.get_floating_ip(self.context, 123))
mock_get.assert_called_once_with(self.context, 123)
@mock.patch('nova.objects.FloatingIP.get_pool_names')
def test_get_floating_ip_pools(self, mock_get):
pools = ['foo', 'bar']
mock_get.return_value = pools
self.assertEqual(pools,
self.network_api.get_floating_ip_pools(
self.context))
@mock.patch('nova.objects.FloatingIP.get_by_address')
def test_get_floating_ip_by_address(self, mock_get):
floating = mock.sentinel.floating
mock_get.return_value = floating
self.assertEqual(floating,
self.network_api.get_floating_ip_by_address(
self.context, mock.sentinel.address))
mock_get.assert_called_once_with(self.context,
mock.sentinel.address)
@mock.patch('nova.objects.FloatingIPList.get_by_project')
def test_get_floating_ips_by_project(self, mock_get):
floatings = mock.sentinel.floating_ips
mock_get.return_value = floatings
self.assertEqual(floatings,
self.network_api.get_floating_ips_by_project(
self.context))
mock_get.assert_called_once_with(self.context,
self.context.project_id)
def _stub_migrate_instance_calls(self, method, multi_host, info):
fake_flavor = flavors.get_default_flavor()
fake_flavor['rxtx_factor'] = 1.21
fake_instance = objects.Instance(
uuid=uuid.uuid4().hex,
project_id='fake_project_id',
instance_type_id=fake_flavor['id'],
flavor=fake_flavor,
system_metadata={})
fake_migration = {'source_compute': 'fake_compute_source',
'dest_compute': 'fake_compute_dest'}
def fake_mig_inst_method(*args, **kwargs):
info['kwargs'] = kwargs
def fake_get_multi_addresses(*args, **kwargs):
return multi_host, ['fake_float1', 'fake_float2']
self.stubs.Set(network_rpcapi.NetworkAPI, method,
fake_mig_inst_method)
self.stubs.Set(self.network_api, '_get_multi_addresses',
fake_get_multi_addresses)
expected = {'instance_uuid': fake_instance.uuid,
'source_compute': 'fake_compute_source',
'dest_compute': 'fake_compute_dest',
'rxtx_factor': 1.21,
'project_id': 'fake_project_id',
'floating_addresses': None}
if multi_host:
expected['floating_addresses'] = ['fake_float1', 'fake_float2']
return fake_instance, fake_migration, expected
def test_migrate_instance_start_with_multhost(self):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_start', True, info)
expected['host'] = 'fake_compute_source'
self.network_api.migrate_instance_start(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
def test_migrate_instance_start_without_multhost(self):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_start', False, info)
self.network_api.migrate_instance_start(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
def test_migrate_instance_finish_with_multhost(self):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_finish', True, info)
expected['host'] = 'fake_compute_dest'
self.network_api.migrate_instance_finish(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
def test_migrate_instance_finish_without_multhost(self):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_finish', False, info)
self.network_api.migrate_instance_finish(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
def test_is_multi_host_instance_has_no_fixed_ip(self):
def fake_fixed_ip_get_by_instance(ctxt, uuid):
raise exception.FixedIpNotFoundForInstance(instance_uuid=uuid)
self.stubs.Set(self.network_api.db, 'fixed_ip_get_by_instance',
fake_fixed_ip_get_by_instance)
instance = objects.Instance(uuid=FAKE_UUID)
result, floats = self.network_api._get_multi_addresses(self.context,
instance)
self.assertFalse(result)
@mock.patch('nova.objects.fixed_ip.FixedIPList.get_by_instance_uuid')
def _test_is_multi_host_network_has_no_project_id(self, is_multi_host,
fip_get):
network = objects.Network(
id=123, project_id=None,
multi_host=is_multi_host)
fip_get.return_value = [
objects.FixedIP(instance_uuid=FAKE_UUID, network=network,
floating_ips=objects.FloatingIPList())]
instance = objects.Instance(uuid=FAKE_UUID)
result, floats = self.network_api._get_multi_addresses(self.context,
instance)
self.assertEqual(is_multi_host, result)
def test_is_multi_host_network_has_no_project_id_multi(self):
self._test_is_multi_host_network_has_no_project_id(True)
def test_is_multi_host_network_has_no_project_id_non_multi(self):
self._test_is_multi_host_network_has_no_project_id(False)
@mock.patch('nova.objects.fixed_ip.FixedIPList.get_by_instance_uuid')
def _test_is_multi_host_network_has_project_id(self, is_multi_host,
fip_get):
network = objects.Network(
id=123, project_id=self.context.project_id,
multi_host=is_multi_host)
fip_get.return_value = [
objects.FixedIP(instance_uuid=FAKE_UUID, network=network,
floating_ips=objects.FloatingIPList())]
instance = objects.Instance(uuid=FAKE_UUID)
result, floats = self.network_api._get_multi_addresses(self.context,
instance)
self.assertEqual(is_multi_host, result)
def test_is_multi_host_network_has_project_id_multi(self):
self._test_is_multi_host_network_has_project_id(True)
def test_is_multi_host_network_has_project_id_non_multi(self):
self._test_is_multi_host_network_has_project_id(False)
@mock.patch('nova.objects.Network.get_by_uuid')
@mock.patch('nova.objects.Network.disassociate')
def test_network_disassociate_project(self, mock_disassociate, mock_get):
net_obj = objects.Network(context=self.context, id=1)
mock_get.return_value = net_obj
self.network_api.associate(self.context, FAKE_UUID, project=None)
mock_disassociate.assert_called_once_with(self.context, net_obj.id,
host=False, project=True)
@mock.patch('nova.objects.Network.get_by_uuid')
@mock.patch('nova.objects.Network.disassociate')
def test_network_disassociate_host(self, mock_disassociate, mock_get):
net_obj = objects.Network(context=self.context, id=1)
mock_get.return_value = net_obj
self.network_api.associate(self.context, FAKE_UUID, host=None)
mock_disassociate.assert_called_once_with(self.context, net_obj.id,
host=True, project=False)
@mock.patch('nova.objects.Network.get_by_uuid')
@mock.patch('nova.objects.Network.associate')
def test_network_associate_project(self, mock_associate, mock_get):
net_obj = objects.Network(context=self.context, id=1)
mock_get.return_value = net_obj
project = mock.sentinel.project
self.network_api.associate(self.context, FAKE_UUID, project=project)
mock_associate.assert_called_once_with(self.context, project,
network_id=net_obj.id,
force=True)
@mock.patch('nova.objects.Network.get_by_uuid')
@mock.patch('nova.objects.Network.save')
def test_network_associate_host(self, mock_save, mock_get):
net_obj = objects.Network(context=self.context, id=1)
mock_get.return_value = net_obj
host = str(mock.sentinel.host)
self.network_api.associate(self.context, FAKE_UUID, host=host)
mock_save.assert_called_once_with()
self.assertEqual(host, net_obj.host)
@mock.patch('nova.objects.Network.get_by_uuid')
@mock.patch('nova.objects.Network.disassociate')
def test_network_disassociate(self, mock_disassociate, mock_get):
mock_get.return_value = objects.Network(context=self.context, id=123)
self.network_api.disassociate(self.context, FAKE_UUID)
mock_disassociate.assert_called_once_with(self.context, 123,
project=True, host=True)
def _test_refresh_cache(self, method, *args, **kwargs):
# This test verifies that no call to get_instance_nw_info() is made
# from the @refresh_cache decorator for the tested method.
with test.nested(
mock.patch.object(self.network_api.network_rpcapi, method),
mock.patch.object(self.network_api.network_rpcapi,
'get_instance_nw_info'),
mock.patch.object(network_model.NetworkInfo, 'hydrate'),
mock.patch.object(objects.InstanceInfoCache, 'save'),
) as (
method_mock, nwinfo_mock, hydrate_mock, save_mock
):
nw_info = network_model.NetworkInfo([])
method_mock.return_value = nw_info
hydrate_mock.return_value = nw_info
getattr(self.network_api, method)(*args, **kwargs)
hydrate_mock.assert_called_once_with(nw_info)
self.assertFalse(nwinfo_mock.called)
def test_allocate_for_instance_refresh_cache(self):
instance = fake_instance.fake_instance_obj(self.context)
vpn = 'fake-vpn'
requested_networks = 'fake-networks'
self._test_refresh_cache('allocate_for_instance', self.context,
instance, vpn, requested_networks)
def test_add_fixed_ip_to_instance_refresh_cache(self):
instance = fake_instance.fake_instance_obj(self.context)
network_id = 'fake-network-id'
self._test_refresh_cache('add_fixed_ip_to_instance', self.context,
instance, network_id)
def test_remove_fixed_ip_from_instance_refresh_cache(self):
instance = fake_instance.fake_instance_obj(self.context)
address = 'fake-address'
self._test_refresh_cache('remove_fixed_ip_from_instance', self.context,
instance, address)
@mock.patch('nova.db.fixed_ip_get_by_address')
def test_get_fixed_ip_by_address(self, fip_get):
fip_get.return_value = test_fixed_ip.fake_fixed_ip
fip = self.network_api.get_fixed_ip_by_address(self.context,
'fake-addr')
self.assertIsInstance(fip, objects.FixedIP)
@mock.patch('nova.objects.FixedIP.get_by_id')
def test_get_fixed_ip(self, mock_get_by_id):
mock_get_by_id.return_value = mock.sentinel.fixed_ip
self.assertEqual(mock.sentinel.fixed_ip,
self.network_api.get_fixed_ip(self.context,
mock.sentinel.id))
mock_get_by_id.assert_called_once_with(self.context, mock.sentinel.id)
@mock.patch('nova.objects.FixedIP.get_by_floating_address')
def test_get_instance_by_floating_address(self, mock_get_by_floating):
mock_get_by_floating.return_value = objects.FixedIP(
instance_uuid = mock.sentinel.instance_uuid)
self.assertEqual(str(mock.sentinel.instance_uuid),
self.network_api.get_instance_id_by_floating_address(
self.context, mock.sentinel.floating))
mock_get_by_floating.assert_called_once_with(self.context,
mock.sentinel.floating)
@mock.patch('nova.objects.FixedIP.get_by_floating_address')
def test_get_instance_by_floating_address_none(self, mock_get_by_floating):
mock_get_by_floating.return_value = None
self.assertIsNone(
self.network_api.get_instance_id_by_floating_address(
self.context, mock.sentinel.floating))
mock_get_by_floating.assert_called_once_with(self.context,
mock.sentinel.floating)
@mock.patch('nova.network.api.API.migrate_instance_start')
def test_cleanup_instance_network_on_host(self, fake_migrate_start):
instance = fake_instance.fake_instance_obj(self.context)
self.network_api.cleanup_instance_network_on_host(
self.context, instance, 'fake_compute_source')
fake_migrate_start.assert_called_once_with(
self.context, instance,
{'source_compute': 'fake_compute_source', 'dest_compute': None})
@mock.patch('nova.network.api.API.migrate_instance_finish')
def test_setup_instance_network_on_host(self, fake_migrate_finish):
instance = fake_instance.fake_instance_obj(self.context)
self.network_api.setup_instance_network_on_host(
self.context, instance, 'fake_compute_source')
fake_migrate_finish.assert_called_once_with(
self.context, instance,
{'source_compute': None, 'dest_compute': 'fake_compute_source'})
@mock.patch('oslo_concurrency.lockutils.lock')
@mock.patch.object(api.API, '_get_instance_nw_info')
@mock.patch('nova.network.base_api.update_instance_cache_with_nw_info')
def test_get_instance_nw_info(self, mock_update, mock_get, mock_lock):
fake_result = mock.sentinel.get_nw_info_result
mock_get.return_value = fake_result
instance = fake_instance.fake_instance_obj(self.context)
result = self.network_api.get_instance_nw_info(self.context, instance)
mock_get.assert_called_once_with(self.context, instance)
mock_update.assert_called_once_with(self.network_api, self.context,
instance, nw_info=fake_result,
update_cells=False)
self.assertEqual(fake_result, result)
@mock.patch('nova.network.api.API')
@mock.patch('nova.db.instance_info_cache_update', return_value=fake_info_cache)
class TestUpdateInstanceCache(test.NoDBTestCase):
def setUp(self):
super(TestUpdateInstanceCache, self).setUp()
self.context = context.get_admin_context()
self.instance = objects.Instance(uuid=FAKE_UUID)
vifs = [network_model.VIF(id='super_vif')]
self.nw_info = network_model.NetworkInfo(vifs)
self.nw_json = fields.NetworkModel.to_primitive(self, 'network_info',
self.nw_info)
def test_update_nw_info_none(self, db_mock, api_mock):
api_mock._get_instance_nw_info.return_value = self.nw_info
base_api.update_instance_cache_with_nw_info(api_mock, self.context,
self.instance, None)
api_mock._get_instance_nw_info.assert_called_once_with(self.context,
self.instance)
db_mock.assert_called_once_with(self.context, self.instance.uuid,
{'network_info': self.nw_json})
def test_update_nw_info_one_network(self, db_mock, api_mock):
api_mock._get_instance_nw_info.return_value = self.nw_info
base_api.update_instance_cache_with_nw_info(api_mock, self.context,
self.instance, self.nw_info)
self.assertFalse(api_mock._get_instance_nw_info.called)
db_mock.assert_called_once_with(self.context, self.instance.uuid,
{'network_info': self.nw_json})
def test_update_nw_info_empty_list(self, db_mock, api_mock):
api_mock._get_instance_nw_info.return_value = self.nw_info
base_api.update_instance_cache_with_nw_info(api_mock, self.context,
self.instance,
network_model.NetworkInfo([]))
self.assertFalse(api_mock._get_instance_nw_info.called)
db_mock.assert_called_once_with(self.context, self.instance.uuid,
{'network_info': '[]'})
def test_decorator_return_object(self, db_mock, api_mock):
@base_api.refresh_cache
def func(self, context, instance):
return network_model.NetworkInfo([])
func(api_mock, self.context, self.instance)
self.assertFalse(api_mock._get_instance_nw_info.called)
db_mock.assert_called_once_with(self.context, self.instance.uuid,
{'network_info': '[]'})
def test_decorator_return_none(self, db_mock, api_mock):
@base_api.refresh_cache
def func(self, context, instance):
pass
api_mock._get_instance_nw_info.return_value = self.nw_info
func(api_mock, self.context, self.instance)
api_mock._get_instance_nw_info.assert_called_once_with(self.context,
self.instance)
db_mock.assert_called_once_with(self.context, self.instance.uuid,
{'network_info': self.nw_json})
class NetworkHooksTestCase(test.BaseHookTestCase):
def test_instance_network_info_hook(self):
info_func = base_api.update_instance_cache_with_nw_info
self.assert_has_hook('instance_network_info', info_func)
|
apporc/nova
|
nova/tests/unit/network/test_api.py
|
Python
|
apache-2.0
| 28,985
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import re
import os
from tg import tmpl_context as c
from alluratest.tools import assert_equal, assert_not_in, assert_in
from ming.orm import ThreadLocalORMSession
import allura
from allura.tests import TestController
from allura.tests import decorators as td
from allura import model as M
class TestProjectHome(TestController):
@td.with_wiki
def test_project_nav(self):
response = self.app.get('/p/test/_nav.json')
root = self.app.get('/p/test/wiki/').follow()
assert re.search(r'<!-- Server: \S+ -->',
str(root.html)), 'Missing Server comment'
nav_links = root.html.find('div', dict(id='top_nav')).findAll('a')
nav_links = [nl for nl in nav_links if 'add-tool-toggle' not in nl['class']]
assert_equal(len(nav_links), len(response.json['menu']))
for nl, entry in zip(nav_links, response.json['menu']):
assert nl['href'] == entry['url']
@td.with_wiki
def test_project_nav_with_admin_options(self):
r = self.app.get('/p/test/_nav.json?admin_options=1')
assert_in({
"text": "Wiki",
"href": "/p/test/admin/install_tool?tool_name=wiki",
"tooltip":
"Documentation is key to your project and the wiki tool helps make it easy for anyone to contribute."
}, r.json['installable_tools'])
for m in r.json['menu']:
if m['mount_point'] == 'sub1':
assert_equal(m['admin_options'],
[{'className': None,
'text': 'Subproject Admin',
'href': '/p/test/sub1/admin',
}])
break
else:
raise AssertionError('Did not find sub1 subproject in menu results: {}'.format(r.json['menu']))
for m in r.json['menu']:
if m['mount_point'] == 'wiki':
assert_in({'className': 'admin_modal',
'text': 'Set Home',
'href': '/p/test/admin/wiki/home',
}, m['admin_options'])
assert_in({'className': None,
'text': 'Permissions',
'href': '/p/test/admin/wiki/permissions',
}, m['admin_options'])
assert_in({'className': 'admin_modal',
'text': 'Delete Everything',
'href': '/p/test/admin/wiki/delete',
}, m['admin_options'])
break
else:
raise AssertionError('Did not find wiki in menu results: {}'.format(r.json['menu']))
@td.with_wiki
def test_project_group_nav(self):
c.user = M.User.by_username('test-admin')
p = M.Project.query.get(shortname='test')
c.project = p
if 'wiki2' and not p.app_instance('wiki2'):
c.app = p.install_app('wiki', 'wiki2', 'wiki2', 9)
response = self.app.get('/p/test/_nav.json')
menu = response.json['menu']
wiki_group = menu[-2]
wikis = wiki_group.pop('children')
assert_equal({'url': '/p/test/_list/wiki', 'name': 'Wiki \u25be', 'mount_point': None,
'icon': 'tool-wiki', 'tool_name': 'wiki', 'is_anchored': False}, wiki_group)
assert_equal(len(wikis), 2)
assert_in({'url': '/p/test/wiki/', 'name': 'Wiki', 'mount_point': 'wiki',
'icon': 'tool-wiki', 'tool_name': 'wiki', 'is_anchored': False}, wikis)
assert_in({'url': '/p/test/wiki2/', 'name': 'wiki2', 'mount_point': 'wiki2',
'icon': 'tool-wiki', 'tool_name': 'wiki', 'is_anchored': False}, wikis)
def test_sitemap_limit_per_tool(self):
"""Test that sitemap is limited to max of 10 items per tool type."""
c.user = M.User.by_username('test-admin')
p = M.Project.query.get(shortname='test')
c.project = p
for i in range(11):
mnt = 'wiki' + str(i)
p.install_app('wiki', mnt, mnt, 10 + i)
response = self.app.get('/p/test/_nav.json')
menu = response.json['menu']
wikis = menu[-2]['children']
assert_equal(len(wikis), 10)
@td.with_wiki
def test_project_group_nav_more_than_ten(self):
for i in range(1, 15):
tool_name = "wiki%s" % str(i)
c.user = M.User.by_username('test-admin')
p = M.Project.query.get(shortname='test')
c.project = p
if tool_name and not p.app_instance(tool_name):
c.app = p.install_app('wiki', tool_name, tool_name, i)
response = self.app.get('/p/test/_nav.json')
menu = response.json['menu']
wiki_menu = [m for m in menu if m['tool_name'] == 'wiki'][0]
assert_equal(len(wiki_menu['children']), 10)
assert_in({'url': '/p/test/_list/wiki', 'name': 'More...', 'mount_point': None,
'icon': 'tool-wiki', 'tool_name': 'wiki', 'is_anchored': False}, wiki_menu['children'])
@td.with_wiki
def test_neighborhood_home(self):
self.app.get('/p/test/wiki/', status=301)
self.app.get('/adobe/test/wiki/', status=404)
self.app.get('/adobe/no_such_project/wiki/', status=404)
@td.with_user_project('test-admin')
def test_user_subproject_home_not_profile(self):
u_proj = M.Project.query.get(shortname='u/test-admin')
u_proj.new_subproject('sub1')
ThreadLocalORMSession.flush_all()
r = self.app.get('/u/test-admin/sub1/')
assert r.location.endswith('admin/'), r.location
assert_not_in('Profile', r.follow().text)
def test_user_icon_missing(self):
r = self.app.get('/u/test-user/user_icon', status=302)
assert r.location.endswith('images/user.png')
def test_user_icon(self):
file_name = 'neo-icon-set-454545-256x350.png'
file_path = os.path.join(allura.__path__[0], 'nf', 'allura', 'images', file_name)
file_data = open(file_path, 'rb').read()
upload = ('icon', file_name, file_data)
with td.audits('update project icon'):
self.app.post('/u/test-admin/admin/update', params=dict(
name='Test Project',
shortname='test',
short_description='A Test Project'),
upload_files=[upload])
r = self.app.get('/u/test-admin/user_icon')
assert_equal(r.content_type, 'image/png')
def test_user_search(self):
r = self.app.get('/p/test/user_search?term=test', status=200)
j = json.loads(r.text)
assert j['users'][0]['id'].startswith('test')
def test_user_search_for_disabled_user(self):
user = M.User.by_username('test-admin')
user.disabled = True
ThreadLocalORMSession.flush_all()
r = self.app.get('/p/test/user_search?term=test', status=200)
j = json.loads(r.text)
assert j == {'users': []}
def test_user_search_noparam(self):
self.app.get('/p/test/user_search', status=400)
def test_user_search_shortparam(self):
self.app.get('/p/test/user_search?term=ad', status=400)
def test_users(self):
r = self.app.get('/p/test/users', status=200)
j = json.loads(r.text)
expected = [{
'value': 'test-admin',
'label': 'Test Admin (test-admin)'
}]
assert_equal(j['options'], expected)
def test_members(self):
nbhd = M.Neighborhood.query.get(name='Projects')
self.app.post('/admin/groups/create', params={'name': 'B_role'})
test_project = M.Project.query.get(
shortname='test', neighborhood_id=nbhd._id)
test_project.add_user(M.User.by_username('test-user-1'), ['B_role'])
test_project.add_user(M.User.by_username('test-user'), ['Developer'])
test_project.add_user(M.User.by_username('test-user-0'), ['Member'])
test_project.add_user(M.User.by_username('test-user-2'), ['Member'])
test_project.add_user(M.User.by_username('test-user-3'), ['Member'])
test_project.add_user(M.User.by_username('test-user-3'), ['Developer'])
test_project.add_user(M.User.by_username('test-user-4'), ['Admin'])
ThreadLocalORMSession.flush_all()
r = self.app.get('/p/test/_members/')
assert '<td>Test Admin</td>' in r
assert '<td><a href="/u/test-admin/">test-admin</a></td>' in r
assert '<td>Admin</td>' in r
tr = r.html.findAll('tr')
assert "<td>Test Admin</td>" in str(tr[1])
assert "<td>Test User 4</td>" in str(tr[2])
assert "<td>Test User</td>" in str(tr[3])
assert "<td>Test User 3</td>" in str(tr[4])
assert "<td>Test User 0</td>" in str(tr[5])
assert "<td>Test User 1</td>" in str(tr[6])
assert "<td>Test User 2</td>" in str(tr[7])
def test_members_anonymous(self):
r = self.app.get('/p/test/_members/',
extra_environ=dict(username='*anonymous'))
assert '<td>Test Admin</td>' in r
assert '<td><a href="/u/test-admin/">test-admin</a></td>' in r
assert '<td>Admin</td>' in r
def test_toolaccess_before_subproject(self):
self.app.extra_environ = {'username': 'test-admin'}
# Add the subproject with a wiki.
self.app.post('/p/test/admin/update_mounts', params={
'new.install': 'install',
'new.ep_name': '',
'new.ordinal': '1',
'new.mount_point': 'test-mount',
'new.mount_label': 'Test Mount'})
r = self.app.get('/p/test/test-mount/')
assert r.location.endswith('admin/'), r.location
pr = M.Project.query.get(shortname='test/test-mount')
assert pr is not None
c.user = M.User.query.get(username='test-admin')
# Install and Verify a Tool in the subproject.
pr.install_app(ep_name='Wiki', mount_point='test-sub', mount_label='Test Sub', ordinal='1')
r = self.app.get('/p/test/test-mount/test-sub/').follow()
active_link = r.html.findAll('li', {'class': 'selected'})
assert_equal(len(active_link), 1)
assert active_link[0].contents[1]['href'] == '/p/test/test-mount/test-sub/'
assert 'Welcome to your wiki!' in r
# Delete the Subproject.
self.app.post('/p/test/admin/update_mounts', params={
'subproject-0.delete': 'on',
'subproject-0.shortname': 'test/test-mount',
'new.ep_name': '',
})
# Try to access the installed tool as anon.
r = self.app.get('/p/test/test-mount/test-sub/', extra_environ=dict(username='*anonymous'), status=404)
# Try to access the installed tool as Admin.
r = self.app.get('/p/test/test-mount/test-sub/').follow()
assert 'Wiki' in r
# Install a new tool with same mount point in parent project. Here a Wiki is installed.
p = M.Project.query.get(shortname='test')
p.install_app(ep_name='Wiki', mount_point='test-mount', mount_label='Test Sub', ordinal='1')
# Check if the tool is accessed and not the subproject.
r = self.app.get('/p/test/test-mount/').follow()
active_link = r.html.findAll('li', {'class': 'selected'})
assert_equal(len(active_link), 1)
assert active_link[0].contents[1]['href'] == '/p/test/test-mount/'
assert 'Welcome to your wiki!' in r
|
apache/allura
|
Allura/allura/tests/functional/test_home.py
|
Python
|
apache-2.0
| 12,364
|
"""
dsq_postgres.signals
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
from django.dispatch import Signal
from django.db.backends.signals import connection_created
reconnect_attempt = Signal(providing_args=["connection"])
|
disqus/disqus-postgres
|
src/dsq_postgres/signals.py
|
Python
|
apache-2.0
| 298
|
#
# Copyright 2013 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
import oslo_messaging
from ceilometer.agent import plugin_base
from ceilometer import sample
cfg.CONF.import_opt('nova_control_exchange',
'ceilometer.compute.notifications')
cfg.CONF.import_opt('glance_control_exchange',
'ceilometer.image.notifications')
cfg.CONF.import_opt('neutron_control_exchange',
'ceilometer.network.notifications')
cfg.CONF.import_opt('cinder_control_exchange',
'ceilometer.volume.notifications')
OPTS = [
cfg.MultiStrOpt('http_control_exchanges',
default=[cfg.CONF.nova_control_exchange,
cfg.CONF.glance_control_exchange,
cfg.CONF.neutron_control_exchange,
cfg.CONF.cinder_control_exchange],
help="Exchanges name to listen for notifications."),
]
cfg.CONF.register_opts(OPTS)
class HTTPRequest(plugin_base.NotificationBase,
plugin_base.NonMetricNotificationBase):
event_types = ['http.request']
@staticmethod
def get_targets(conf):
"""Return a sequence of oslo_messaging.Target
This sequence is defining the exchange and topics to be connected for
this plugin.
"""
return [oslo_messaging.Target(topic=topic, exchange=exchange)
for topic in conf.notification_topics
for exchange in conf.http_control_exchanges]
def process_notification(self, message):
yield sample.Sample.from_notification(
name=message['event_type'],
type=sample.TYPE_DELTA,
volume=1,
unit=message['event_type'].split('.')[1],
user_id=message['payload']['request'].get('HTTP_X_USER_ID'),
project_id=message['payload']['request'].get('HTTP_X_PROJECT_ID'),
resource_id=message['payload']['request'].get(
'HTTP_X_SERVICE_NAME'),
message=message)
class HTTPResponse(HTTPRequest):
event_types = ['http.response']
|
pkilambi/ceilometer
|
ceilometer/middleware.py
|
Python
|
apache-2.0
| 2,656
|
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tests.integration import get_server_versions, use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCaseWFunctionTable, BasicSharedKeyspaceUnitTestCase, execute_until_pass
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from cassandra.cluster import Cluster, ResultSet
from cassandra.query import tuple_factory, named_tuple_factory, dict_factory, ordered_dict_factory
from cassandra.util import OrderedDict
def setup_module():
use_singledc()
class NameTupleFactory(BasicSharedKeyspaceUnitTestCase):
def setUp(self):
super(NameTupleFactory, self).setUp()
self.session.row_factory = named_tuple_factory
ddl = '''
CREATE TABLE {0}.{1} (
k int PRIMARY KEY,
v1 text,
v2 text,
v3 text)'''.format(self.ks_name, self.function_table_name)
self.session.execute(ddl)
execute_until_pass(self.session, ddl)
def test_sanitizing(self):
"""
Test to ensure that same named results are surfaced in the NamedTupleFactory
Creates a table with a few different text fields. Inserts a few values in that table.
It then fetches the values and confirms that despite all be being selected as the same name
they are propagated in the result set differently.
@since 3.3
@jira_ticket PYTHON-467
@expected_result duplicate named results have unique row names.
@test_category queries
"""
for x in range(5):
insert1 = '''
INSERT INTO {0}.{1}
( k , v1, v2, v3 )
VALUES
( 1 , 'v1{2}', 'v2{2}','v3{2}' )
'''.format(self.keyspace_name, self.function_table_name, str(x))
self.session.execute(insert1)
query = "SELECT v1 AS duplicate, v2 AS duplicate, v3 AS duplicate from {0}.{1}".format(self.ks_name, self.function_table_name)
rs = self.session.execute(query)
row = rs[0]
self.assertTrue(hasattr(row, 'duplicate'))
self.assertTrue(hasattr(row, 'duplicate_'))
self.assertTrue(hasattr(row, 'duplicate__'))
class RowFactoryTests(BasicSharedKeyspaceUnitTestCaseWFunctionTable):
"""
Test different row_factories and access code
"""
def setUp(self):
super(RowFactoryTests, self).setUp()
self.insert1 = '''
INSERT INTO {0}.{1}
( k , v )
VALUES
( 1 , 1 )
'''.format(self.keyspace_name, self.function_table_name)
self.insert2 = '''
INSERT INTO {0}.{1}
( k , v )
VALUES
( 2 , 2 )
'''.format(self.keyspace_name, self.function_table_name)
self.select = '''
SELECT * FROM {0}.{1}
'''.format(self.keyspace_name, self.function_table_name)
def tearDown(self):
self.drop_function_table()
def test_tuple_factory(self):
session = self.session
session.row_factory = tuple_factory
session.execute(self.insert1)
session.execute(self.insert2)
result = session.execute(self.select)
self.assertIsInstance(result, ResultSet)
self.assertIsInstance(result[0], tuple)
for row in result:
self.assertEqual(row[0], row[1])
self.assertEqual(result[0][0], result[0][1])
self.assertEqual(result[0][0], 1)
self.assertEqual(result[1][0], result[1][1])
self.assertEqual(result[1][0], 2)
def test_named_tuple_factory(self):
session = self.session
session.row_factory = named_tuple_factory
session.execute(self.insert1)
session.execute(self.insert2)
result = session.execute(self.select)
self.assertIsInstance(result, ResultSet)
result = list(result)
for row in result:
self.assertEqual(row.k, row.v)
self.assertEqual(result[0].k, result[0].v)
self.assertEqual(result[0].k, 1)
self.assertEqual(result[1].k, result[1].v)
self.assertEqual(result[1].k, 2)
def test_dict_factory(self):
session = self.session
session.row_factory = dict_factory
session.execute(self.insert1)
session.execute(self.insert2)
result = session.execute(self.select)
self.assertIsInstance(result, ResultSet)
self.assertIsInstance(result[0], dict)
for row in result:
self.assertEqual(row['k'], row['v'])
self.assertEqual(result[0]['k'], result[0]['v'])
self.assertEqual(result[0]['k'], 1)
self.assertEqual(result[1]['k'], result[1]['v'])
self.assertEqual(result[1]['k'], 2)
def test_ordered_dict_factory(self):
session = self.session
session.row_factory = ordered_dict_factory
session.execute(self.insert1)
session.execute(self.insert2)
result = session.execute(self.select)
self.assertIsInstance(result, ResultSet)
self.assertIsInstance(result[0], OrderedDict)
for row in result:
self.assertEqual(row['k'], row['v'])
self.assertEqual(result[0]['k'], result[0]['v'])
self.assertEqual(result[0]['k'], 1)
self.assertEqual(result[1]['k'], result[1]['v'])
self.assertEqual(result[1]['k'], 2)
class NamedTupleFactoryAndNumericColNamesTests(unittest.TestCase):
"""
Test for PYTHON-122: Improve Error Handling/Reporting for named_tuple_factory and Numeric Column Names
"""
@classmethod
def setup_class(cls):
cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cls.session = cls.cluster.connect()
cls._cass_version, cls._cql_version = get_server_versions()
ddl = '''
CREATE TABLE test1rf.table_num_col ( key blob PRIMARY KEY, "626972746864617465" blob )
WITH COMPACT STORAGE'''
cls.session.execute(ddl)
@classmethod
def teardown_class(cls):
cls.session.execute("DROP TABLE test1rf.table_num_col")
cls.cluster.shutdown()
def test_no_exception_on_select(self):
"""
no exception on SELECT for numeric column name
"""
try:
self.session.execute('SELECT * FROM test1rf.table_num_col')
except ValueError as e:
self.fail("Unexpected ValueError exception: %s" % e.message)
def test_can_select_using_alias(self):
"""
can SELECT "<numeric col name>" AS aliases
"""
if self._cass_version < (2, 0, 0):
raise unittest.SkipTest("Alias in SELECT not supported before 2.0")
try:
self.session.execute('SELECT key, "626972746864617465" AS my_col from test1rf.table_num_col')
except ValueError as e:
self.fail("Unexpected ValueError exception: %s" % e.message)
def test_can_select_with_dict_factory(self):
"""
can SELECT numeric column using dict_factory
"""
self.session.row_factory = dict_factory
try:
self.session.execute('SELECT * FROM test1rf.table_num_col')
except ValueError as e:
self.fail("Unexpected ValueError exception: %s" % e.message)
|
thelastpickle/python-driver
|
tests/integration/standard/test_row_factories.py
|
Python
|
apache-2.0
| 7,874
|
# Copyright 2018 SUSE Linux GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Initial migration for full schema (Git revision 00597b5c8325664c2c534625525f59232d243d66).
Revision ID: 00597b5c8325
Revises: N/A
Create Date: 2018-04-12 09:09:48.212206
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '00597b5c8325'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# Enum tables (will be prepopulated with values through bulk_insert)
alarm_states = op.create_table('alarm_state',
sa.Column('name',
sa.String(length=20),
nullable=False),
sa.PrimaryKeyConstraint('name'),
mysql_charset='utf8mb4',
mysql_collate='utf8mb4_unicode_ci'
)
op.bulk_insert(alarm_states,
[{'name': 'UNDETERMINED'},
{'name': 'OK'},
{'name': 'ALARM'}])
ad_severities = op.create_table(
'alarm_definition_severity',
sa.Column('name',
sa.String(length=20),
nullable=False),
sa.PrimaryKeyConstraint('name'),
mysql_charset='utf8mb4',
mysql_collate='utf8mb4_unicode_ci')
op.bulk_insert(ad_severities,
[{'name': 'LOW'},
{'name': 'MEDIUM'},
{'name': 'HIGH'},
{'name': 'CRITICAL'}])
nm_types = op.create_table(
'notification_method_type',
sa.Column('name',
sa.String(length=20),
nullable=False),
sa.PrimaryKeyConstraint('name'),
mysql_charset='utf8mb4',
mysql_collate='utf8mb4_unicode_ci')
op.bulk_insert(nm_types,
[{'name': 'EMAIL'},
{'name': 'WEBHOOK'},
{'name': 'PAGERDUTY'}])
stream_action_types = op.create_table(
'stream_actions_action_type',
sa.Column('name',
sa.String(length=20),
nullable=False),
sa.PrimaryKeyConstraint('name'),
mysql_charset='utf8mb4',
mysql_collate='utf8mb4_unicode_ci')
op.bulk_insert(stream_action_types,
[{'name': 'FIRE'},
{'name': 'EXPIRE'}])
op.create_table(
'alarm_definition',
sa.Column('id',
sa.String(length=36),
nullable=False),
sa.Column('tenant_id',
sa.String(length=36),
nullable=False),
sa.Column('name',
sa.String(length=255),
nullable=False,
server_default=''),
sa.Column('description',
sa.String(length=255),
nullable=True,
server_default=None),
sa.Column('expression',
sa.dialects.mysql.LONGTEXT(),
nullable=False),
sa.Column('severity',
sa.String(length=20),
nullable=False),
sa.Column('match_by',
sa.String(length=255),
nullable=True,
server_default=''),
sa.Column('actions_enabled',
sa.Boolean(),
nullable=False,
server_default='1'),
sa.Column('created_at',
sa.DateTime(),
nullable=False),
sa.Column('updated_at',
sa.DateTime(),
nullable=False),
sa.Column('deleted_at',
sa.DateTime(),
nullable=True,
server_default=None),
sa.PrimaryKeyConstraint('id'),
sa.Index('tenant_id', 'tenant_id'),
sa.Index('deleted_at', 'deleted_at'),
sa.Index('fk_alarm_definition_severity', 'severity'),
sa.ForeignKeyConstraint(['severity'],
['alarm_definition_severity.name']),
mysql_charset='utf8mb4',
mysql_collate='utf8mb4_unicode_ci')
op.create_table(
'alarm',
sa.Column('id',
sa.String(length=36),
nullable=False),
sa.Column('alarm_definition_id',
sa.String(length=36),
nullable=False,
server_default=''),
sa.Column('state',
sa.String(length=20),
nullable=False),
sa.Column('lifecycle_state',
sa.String(length=50, collation=False),
nullable=True,
server_default=None),
sa.Column('link',
sa.String(length=512, collation=False),
nullable=True,
server_default=None),
sa.Column('created_at',
sa.DateTime(),
nullable=False),
sa.Column('state_updated_at',
sa.DateTime(),
nullable=True),
sa.Column('updated_at',
sa.DateTime(),
nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.Index('alarm_definition_id', 'alarm_definition_id'),
sa.Index('fk_alarm_alarm_state', 'state'),
sa.ForeignKeyConstraint(['alarm_definition_id'],
['alarm_definition.id'],
name='fk_alarm_definition_id',
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['state'],
['alarm_state.name'],
name='fk_alarm_alarm_state'),
mysql_charset='utf8mb4',
mysql_collate='utf8mb4_unicode_ci')
op.create_table(
'notification_method',
sa.Column('id',
sa.String(length=36),
nullable=False),
sa.Column('tenant_id',
sa.String(length=36),
nullable=False),
sa.Column('name',
sa.String(length=250),
nullable=True,
server_default=None),
sa.Column('type',
sa.String(length=20),
# Note: the typo below is deliberate since we need to match
# the constraint name from the SQL script where it is
# misspelled as well.
sa.ForeignKey('notification_method_type.name',
name='fk_alarm_noticication_method_type'),
nullable=False),
sa.Column('address',
sa.String(length=512),
nullable=True,
server_default=None),
sa.Column('created_at',
sa.DateTime(),
nullable=False),
sa.Column('updated_at',
sa.DateTime(),
nullable=False),
sa.PrimaryKeyConstraint('id'),
mysql_charset='utf8mb4',
mysql_collate='utf8mb4_unicode_ci')
op.create_table(
'alarm_action',
sa.Column('alarm_definition_id',
sa.String(length=36),
nullable=False,),
sa.Column('alarm_state',
sa.String(length=20),
nullable=False),
sa.Column('action_id',
sa.String(length=36),
nullable=False),
sa.PrimaryKeyConstraint('alarm_definition_id', 'alarm_state',
'action_id'),
sa.ForeignKeyConstraint(['action_id'],
['notification_method.id'],
name='fk_alarm_action_notification_method_id',
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['alarm_state'],
['alarm_state.name']),
sa.ForeignKeyConstraint(['alarm_definition_id'],
['alarm_definition.id'],
ondelete='CASCADE'),
mysql_charset='utf8mb4',
mysql_collate='utf8mb4_unicode_ci')
op.create_table(
'alarm_metric',
sa.Column('alarm_id',
sa.String(length=36),
nullable=False),
sa.Column('metric_definition_dimensions_id',
sa.BINARY(20),
nullable=False,
server_default='\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0'),
sa.PrimaryKeyConstraint('alarm_id', 'metric_definition_dimensions_id'),
sa.Index('alarm_id', 'alarm_id'),
sa.Index('metric_definition_dimensions_id', 'metric_definition_dimensions_id'),
mysql_charset='utf8mb4',
mysql_collate='utf8mb4_unicode_ci')
# For some mysterious alembic/sqlalchemy reason this foreign key constraint
# ends up missing when specified upon table creation. Hence we need to add
# it through an ALTER TABLE operation:
op.create_foreign_key('fk_alarm_id',
'alarm_metric',
'alarm',
['alarm_id'],
['id'], ondelete='CASCADE')
op.create_table(
'metric_definition',
sa.Column('id',
sa.BINARY(20),
nullable=False,
server_default='\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0'),
sa.Column('name',
sa.String(length=255),
nullable=False),
sa.Column('tenant_id',
sa.String(length=36),
nullable=False),
sa.Column('region',
sa.String(length=255),
nullable=False,
server_default=''),
sa.PrimaryKeyConstraint('id'),
mysql_charset='utf8mb4',
mysql_collate='utf8mb4_unicode_ci')
op.create_table(
'metric_definition_dimensions',
sa.Column('id',
sa.BINARY(20),
nullable=False,
server_default='\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0'),
sa.Column('metric_definition_id',
sa.BINARY(20),
nullable=False,
server_default='\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0'),
sa.Column('metric_dimension_set_id',
sa.BINARY(20),
nullable=False,
server_default='\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0'),
sa.PrimaryKeyConstraint('id'),
sa.Index('metric_definition_id', 'metric_definition_id'),
sa.Index('metric_dimension_set_id', 'metric_dimension_set_id'),
mysql_charset='utf8mb4',
mysql_collate='utf8mb4_unicode_ci')
# mysql limits the size of a unique key to 767 bytes. The utf8mb4 charset
# requires 4 bytes to be allocated for each character while the utf8
# charset requires 3 bytes. The utf8 charset should be sufficient for any
# reasonable characters, see the definition of supplementary characters for
# what it doesn't support. Even with utf8, the unique key length would be
# 785 bytes so only a subset of the name is used. Potentially the size of
# the name should be limited to 250 characters which would resolve this
# issue.
#
# The unique key is required to allow high performance inserts without
# doing a select by using the "insert into metric_dimension ... on
# duplicate key update dimension_set_id=dimension_set_id syntax
op.create_table(
'metric_dimension',
sa.Column('dimension_set_id',
sa.BINARY(20),
nullable=False,
server_default='\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0'),
sa.Column('name',
sa.String(length=255),
nullable=False,
server_default=''),
sa.Column('value',
sa.String(length=255),
nullable=False,
server_default=''),
sa.Index('metric_dimension_key',
'dimension_set_id', 'name',
unique=True,
mysql_length={'name': 252}),
sa.Index('dimension_set_id', 'dimension_set_id'),
mysql_charset='utf8',
mysql_collate='utf8_unicode_ci',
mysql_comment='PRIMARY KEY (`id`)')
op.create_table(
'sub_alarm_definition',
sa.Column('id',
sa.String(length=36),
nullable=False),
sa.Column('alarm_definition_id',
sa.String(length=36),
sa.ForeignKey('alarm_definition.id', ondelete='CASCADE',
name='fk_sub_alarm_definition'),
nullable=False,
server_default=''),
sa.Column('function',
sa.String(length=10),
nullable=False),
sa.Column('metric_name',
sa.String(length=100),
nullable=True,
server_default=None),
sa.Column('operator',
sa.String(length=5),
nullable=False),
sa.Column('threshold',
sa.dialects.mysql.DOUBLE(),
nullable=False),
sa.Column('period',
sa.Integer(),
nullable=False),
sa.Column('periods',
sa.Integer(),
nullable=False),
sa.Column('created_at',
sa.DateTime(),
nullable=False),
sa.Column('updated_at',
sa.DateTime(),
nullable=False),
sa.PrimaryKeyConstraint('id'),
mysql_charset='utf8mb4',
mysql_collate='utf8mb4_unicode_ci')
op.create_table(
'sub_alarm_definition_dimension',
sa.Column('sub_alarm_definition_id',
sa.String(length=36),
sa.ForeignKey('sub_alarm_definition.id', ondelete='CASCADE',
name='fk_sub_alarm_definition_dimension'),
nullable=False,
server_default=''),
sa.Column('dimension_name',
sa.String(length=255),
nullable=False,
server_default=''),
sa.Column('value',
sa.String(length=255),
nullable=True,
server_default=None),
mysql_charset='utf8mb4',
mysql_collate='utf8mb4_unicode_ci')
op.create_table(
'sub_alarm',
sa.Column('id',
sa.String(length=36),
nullable=False),
sa.Column('alarm_id',
sa.String(length=36),
sa.ForeignKey('alarm.id', ondelete='CASCADE',
name='fk_sub_alarm'),
nullable=False,
server_default=''),
sa.Column('sub_expression_id',
sa.String(length=36),
sa.ForeignKey('sub_alarm_definition.id',
name='fk_sub_alarm_expr'),
nullable=False,
server_default=''),
sa.Column('expression',
sa.dialects.mysql.LONGTEXT(),
nullable=False),
sa.Column('created_at',
sa.DateTime(),
nullable=False),
sa.Column('updated_at',
sa.DateTime(),
nullable=False),
sa.PrimaryKeyConstraint('id'),
mysql_charset='utf8mb4',
mysql_collate='utf8mb4_unicode_ci')
op.create_table(
'schema_migrations',
sa.Column('version',
sa.String(length=255),
nullable=False),
sa.UniqueConstraint('version', name='unique_schema_migrations'),
mysql_charset='latin1')
op.create_table(
'stream_definition',
sa.Column('id',
sa.String(length=36),
nullable=False),
sa.Column('tenant_id',
sa.String(length=36),
nullable=False),
sa.Column('name',
sa.String(length=190),
nullable=False,
server_default=''),
sa.Column('description',
sa.String(length=255),
nullable=True,
server_default=None),
sa.Column('select_by',
sa.dialects.mysql.LONGTEXT(),
nullable=True,
server_default=None),
sa.Column('group_by',
sa.dialects.mysql.LONGTEXT(length=20),
nullable=True,
server_default=None),
sa.Column('fire_criteria',
sa.dialects.mysql.LONGTEXT(length=20),
nullable=True,
server_default=None),
sa.Column('expiration',
sa.dialects.mysql.INTEGER(display_width=10,
unsigned=True),
nullable=True,
server_default='0'),
sa.Column('actions_enabled',
sa.Boolean(),
nullable=False,
server_default='1'),
sa.Column('created_at',
sa.DateTime(),
nullable=False),
sa.Column('updated_at',
sa.DateTime(),
nullable=False),
sa.Column('deleted_at',
sa.DateTime(),
nullable=True,
server_default=None),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('tenant_id', 'name', name='tenant_name'),
sa.Index('name', 'name'),
sa.Index('tenant_id', 'tenant_id'),
sa.Index('deleted_at', 'deleted_at'),
sa.Index('created_at', 'created_at'),
sa.Index('updated_at', 'updated_at'),
mysql_charset='utf8mb4',
mysql_collate='utf8mb4_unicode_ci')
op.create_table(
'stream_actions',
sa.Column('stream_definition_id',
sa.String(length=36),
sa.ForeignKey
('stream_definition.id',
name='fk_stream_action_stream_definition_id',
ondelete='CASCADE'),
nullable=False),
sa.Column('action_id',
sa.String(length=36),
sa.ForeignKey('notification_method.id',
name='fk_stream_action_notification_method_id',
ondelete='CASCADE'),
nullable=False),
sa.Column('action_type',
sa.String(length=20),
sa.ForeignKey('stream_actions_action_type.name'),
nullable=False),
sa.PrimaryKeyConstraint('stream_definition_id', 'action_id',
'action_type'),
sa.Index('stream_definition_id', 'stream_definition_id'),
sa.Index('action_type', 'action_type'),
mysql_charset='utf8mb4',
mysql_collate='utf8mb4_unicode_ci')
op.create_table(
'event_transform',
sa.Column('id',
sa.dialects.mysql.VARCHAR(length=36, charset='utf8mb4',
collation='utf8mb4_unicode_ci'),
nullable=False),
sa.Column('tenant_id',
sa.dialects.mysql.VARCHAR(length=36, charset='utf8mb4',
collation='utf8mb4_unicode_ci'),
nullable=False),
sa.Column('name',
sa.dialects.mysql.VARCHAR(length=64, charset='utf8mb4',
collation='utf8mb4_unicode_ci'),
nullable=False),
sa.Column('description',
sa.dialects.mysql.VARCHAR(length=250, charset='utf8mb4',
collation='utf8mb4_unicode_ci'),
nullable=False),
sa.Column('specification',
sa.dialects.mysql.LONGTEXT(charset='utf8mb4',
collation='utf8mb4_unicode_ci'),
nullable=False),
sa.Column('enabled',
sa.Boolean(),
nullable=True,
server_default=None),
sa.Column('created_at',
sa.DateTime(),
nullable=False),
sa.Column('updated_at',
sa.DateTime(),
nullable=False),
sa.Column('deleted_at',
sa.DateTime(),
nullable=True,
server_default=None),
sa.PrimaryKeyConstraint('id'),
sa.Index('name', 'name'),
sa.Index('tenant_id', 'tenant_id'),
sa.Index('deleted_at', 'deleted_at'),
sa.Index('created_at', 'created_at'),
sa.Index('updated_at', 'updated_at'),
sa.UniqueConstraint('tenant_id', 'name', name='tenant_name'),
mysql_charset='utf8mb4')
def downgrade():
op.drop_table('alarm_state')
op.drop_table('alarm_definition_severity')
op.drop_table('notification_method_type')
op.drop_table('stream_actions_action_type')
op.drop_table('alarm_definition')
op.drop_table('alarm')
op.drop_table('notification_method')
op.drop_table('alarm_action')
op.drop_table('alarm_metric')
op.drop_table('metric_definition')
op.drop_table('metric_definition_dimensions')
op.drop_table('metric_dimension')
op.drop_table('sub_alarm_definition')
op.drop_table('sub_alarm_definition_dimension')
op.drop_table('sub_alarm')
op.drop_table('schema_migrations')
op.drop_table('stream_definition')
op.drop_table('stream_actions')
op.drop_table('event_transform')
|
openstack/monasca-api
|
monasca_api/db/alembic/versions/00597b5c8325_initial.py
|
Python
|
apache-2.0
| 22,497
|
import functools
from collections import deque
import pytest
from plenum.common.constants import PreVCStrategies
from plenum.common.messages.node_messages import ViewChangeDone, InstanceChange, NewView, ViewChange
from plenum.test.helper import sdk_send_random_and_check
from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data
from plenum.test.test_node import TestNode, ensureElectionsDone
from plenum.test.view_change.helper import ensure_view_change
from stp_core.loop.eventually import eventually
REQ_COUNT = 10
stashed_vc_done_msgs = deque()
stashed_ic_msgs = deque()
got_start_vc_msg = False
@pytest.fixture(scope="module")
def tconf(tconf):
tconf.PRE_VC_STRATEGY = PreVCStrategies.VC_START_MSG_STRATEGY
yield tconf
del tconf.PRE_VC_STRATEGY
def not_processing_view_change_done(node):
async def processNodeInBoxWithoutVCDone(self):
"""
Process the messages in the node inbox asynchronously.
"""
self.nodeIbStasher.process()
for i in range(len(self.nodeInBox)):
m = self.nodeInBox.popleft()
if isinstance(m, tuple) and len(
m) == 2 and not hasattr(m, '_field_types') and \
isinstance(m[0], (NewView, InstanceChange, ViewChange)) and \
m[0].viewNo > self.viewNo:
if isinstance(m[0], NewView):
stashed_vc_done_msgs.append(m)
else:
stashed_ic_msgs.append(m)
continue
await self.process_one_node_message(m)
node.processNodeInBox = functools.partial(processNodeInBoxWithoutVCDone, node)
def test_complete_with_delayed_view_change(looper,
txnPoolNodeSet,
sdk_wallet_steward,
sdk_pool_handle):
def chk_len_stashed_msgs():
# We are waiting for one message from selected primary
assert len(stashed_vc_done_msgs) == 1
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, REQ_COUNT)
slow_node = txnPoolNodeSet[-1]
not_processing_view_change_done(slow_node)
ensure_view_change(looper, txnPoolNodeSet[:-1])
looper.run(eventually(chk_len_stashed_msgs))
while stashed_ic_msgs:
slow_node.nodeInBox.append(stashed_ic_msgs.popleft())
while stashed_vc_done_msgs:
slow_node.nodeInBox.append(stashed_vc_done_msgs.popleft())
slow_node.processNodeInBox = functools.partial(TestNode.processNodeInBox, slow_node)
ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet)
ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
|
evernym/plenum
|
plenum/test/view_change/test_complete_with_delayed_view_change.py
|
Python
|
apache-2.0
| 2,720
|
# Copyright 2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import typing as T
logfile = 'meson-logs/install-log.txt'
def do_uninstall(log: str) -> None:
failures = 0
successes = 0
for line in open(log):
if line.startswith('#'):
continue
fname = line.strip()
try:
if os.path.isdir(fname) and not os.path.islink(fname):
os.rmdir(fname)
else:
os.unlink(fname)
print('Deleted:', fname)
successes += 1
except Exception as e:
print(f'Could not delete {fname}: {e}.')
failures += 1
print('\nUninstall finished.\n')
print('Deleted:', successes)
print('Failed:', failures)
print('\nRemember that files created by custom scripts have not been removed.')
def run(args: T.List[str]) -> int:
if args:
print('Weird error.')
return 1
if not os.path.exists(logfile):
print('Log file does not exist, no installation has been done.')
return 0
do_uninstall(logfile)
return 0
|
QuLogic/meson
|
mesonbuild/scripts/uninstall.py
|
Python
|
apache-2.0
| 1,622
|
# Copyright 2013 IBM Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import exceptions
from tempest import test
class FlavorsAccessNegativeTestJSON(base.BaseV2ComputeAdminTest):
"""
Tests Flavor Access API extension.
Add and remove Flavor Access require admin privileges.
"""
_interface = 'json'
@classmethod
def setUpClass(cls):
super(FlavorsAccessNegativeTestJSON, cls).setUpClass()
if not test.is_extension_enabled('FlavorExtraData', 'compute'):
msg = "FlavorExtraData extension not enabled."
raise cls.skipException(msg)
cls.client = cls.os_adm.flavors_client
admin_client = cls._get_identity_admin_client()
cls.tenant = admin_client.get_tenant_by_name(cls.flavors_client.
tenant_name)
cls.tenant_id = cls.tenant['id']
cls.adm_tenant = admin_client.get_tenant_by_name(cls.os_adm.
flavors_client.
tenant_name)
cls.adm_tenant_id = cls.adm_tenant['id']
cls.flavor_name_prefix = 'test_flavor_access_'
cls.ram = 512
cls.vcpus = 1
cls.disk = 10
@test.attr(type=['negative', 'gate'])
def test_flavor_access_list_with_public_flavor(self):
# Test to list flavor access with exceptions by querying public flavor
flavor_name = data_utils.rand_name(self.flavor_name_prefix)
new_flavor_id = data_utils.rand_int_id(start=1000)
resp, new_flavor = self.client.create_flavor(flavor_name,
self.ram, self.vcpus,
self.disk,
new_flavor_id,
is_public='True')
self.addCleanup(self.client.delete_flavor, new_flavor['id'])
self.assertEqual(resp.status, 200)
self.assertRaises(exceptions.NotFound,
self.client.list_flavor_access,
new_flavor_id)
@test.attr(type=['negative', 'gate'])
def test_flavor_non_admin_add(self):
# Test to add flavor access as a user without admin privileges.
flavor_name = data_utils.rand_name(self.flavor_name_prefix)
new_flavor_id = data_utils.rand_int_id(start=1000)
resp, new_flavor = self.client.create_flavor(flavor_name,
self.ram, self.vcpus,
self.disk,
new_flavor_id,
is_public='False')
self.addCleanup(self.client.delete_flavor, new_flavor['id'])
self.assertRaises(exceptions.Unauthorized,
self.flavors_client.add_flavor_access,
new_flavor['id'],
self.tenant_id)
@test.attr(type=['negative', 'gate'])
def test_flavor_non_admin_remove(self):
# Test to remove flavor access as a user without admin privileges.
flavor_name = data_utils.rand_name(self.flavor_name_prefix)
new_flavor_id = data_utils.rand_int_id(start=1000)
resp, new_flavor = self.client.create_flavor(flavor_name,
self.ram, self.vcpus,
self.disk,
new_flavor_id,
is_public='False')
self.addCleanup(self.client.delete_flavor, new_flavor['id'])
# Add flavor access to a tenant.
self.client.add_flavor_access(new_flavor['id'], self.tenant_id)
self.addCleanup(self.client.remove_flavor_access,
new_flavor['id'], self.tenant_id)
self.assertRaises(exceptions.Unauthorized,
self.flavors_client.remove_flavor_access,
new_flavor['id'],
self.tenant_id)
@test.attr(type=['negative', 'gate'])
def test_add_flavor_access_duplicate(self):
# Create a new flavor.
flavor_name = data_utils.rand_name(self.flavor_name_prefix)
new_flavor_id = data_utils.rand_int_id(start=1000)
resp, new_flavor = self.client.create_flavor(flavor_name,
self.ram, self.vcpus,
self.disk,
new_flavor_id,
is_public='False')
self.addCleanup(self.client.delete_flavor, new_flavor['id'])
# Add flavor access to a tenant.
self.client.add_flavor_access(new_flavor['id'], self.tenant_id)
self.addCleanup(self.client.remove_flavor_access,
new_flavor['id'], self.tenant_id)
# An exception should be raised when adding flavor access to the same
# tenant
self.assertRaises(exceptions.Conflict,
self.client.add_flavor_access,
new_flavor['id'],
self.tenant_id)
@test.attr(type=['negative', 'gate'])
def test_remove_flavor_access_not_found(self):
# Create a new flavor.
flavor_name = data_utils.rand_name(self.flavor_name_prefix)
new_flavor_id = data_utils.rand_int_id(start=1000)
resp, new_flavor = self.client.create_flavor(flavor_name,
self.ram, self.vcpus,
self.disk,
new_flavor_id,
is_public='False')
self.addCleanup(self.client.delete_flavor, new_flavor['id'])
# An exception should be raised when flavor access is not found
self.assertRaises(exceptions.NotFound,
self.client.remove_flavor_access,
new_flavor['id'],
str(uuid.uuid4()))
class FlavorsAdminNegativeTestXML(FlavorsAccessNegativeTestJSON):
_interface = 'xml'
|
BeenzSyed/tempest
|
tempest/api/compute/admin/test_flavors_access_negative.py
|
Python
|
apache-2.0
| 7,083
|
class FormPanel:
def getTextContents(self, iframe):
JS("""
try {
if (!@{{iframe}}.contentWindow.document)
return null;
return @{{iframe}}.contentWindow.document.body.innerText;
} catch (e) {
return null;
}
""")
def hookEvents(self, iframe, form, listener):
JS("""
if (@{{iframe}}) {
@{{iframe}}.onreadystatechange = function() {
if (!@{{iframe}}.__formAction)
return;
if (@{{iframe}}.readyState == 'complete') {
@{{listener}}.onFrameLoad();
}
};
}
@{{form}}.onsubmit = function() {
if (@{{iframe}})
@{{iframe}}.__formAction = @{{form}}.action;
return @{{listener}}.onFormSubmit();
};
""")
def unhookEvents(self, iframe, form):
JS("""
if (@{{iframe}})
@{{iframe}}.onreadystatechange = null;
@{{form}}.onsubmit = null;
""")
|
minghuascode/pyj
|
library/pyjamas/ui/FormPanel.ie6.py
|
Python
|
apache-2.0
| 1,092
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import os.path
import logging
from build_pack_utils import FileUtil
_log = logging.getLogger('helpers')
class FakeBuilder(object):
def __init__(self, ctx):
self._ctx = ctx
class FakeInstaller(object):
def __init__(self, builder, installer):
self._installer = installer
self.builder = builder
def setup_htdocs_if_it_doesnt_exist(ctx):
if is_web_app(ctx):
htdocsPath = os.path.join(ctx['BUILD_DIR'], 'htdocs')
if not os.path.exists(htdocsPath):
fu = FileUtil(FakeBuilder(ctx), move=True)
fu.under('BUILD_DIR')
fu.into('htdocs')
fu.where_name_does_not_match(
'^%s.*$' % os.path.join(ctx['BUILD_DIR'], '.bp'))
fu.where_name_does_not_match(
'^%s.*$' % os.path.join(ctx['BUILD_DIR'], '.extensions'))
fu.where_name_does_not_match(
'^%s.*$' % os.path.join(ctx['BUILD_DIR'], '.bp-config'))
fu.where_name_does_not_match(
'^%s.*$' % os.path.join(ctx['BUILD_DIR'], 'manifest.yml'))
fu.where_name_does_not_match(
'^%s.*$' % os.path.join(ctx['BUILD_DIR'], 'lib'))
fu.done()
def convert_php_extensions(ctx):
_log.debug('Converting PHP extensions')
SKIP = ('cli', 'pear', 'cgi')
ctx['PHP_EXTENSIONS'] = \
"\n".join(["extension=%s.so" % ex
for ex in ctx['PHP_EXTENSIONS'] if ex not in SKIP])
path = '@{HOME}/php/lib/php/extensions/no-debug-non-zts-20100525'
ctx['ZEND_EXTENSIONS'] = \
"\n".join(['zend_extension="%s/%s.so"' % (path, ze)
for ze in ctx['ZEND_EXTENSIONS']])
def build_php_environment(ctx):
_log.debug('Building PHP environment variables')
ctx["PHP_ENV"] = \
"\n".join(["env[%s] = $%s" % (k, k) for k in os.environ.keys()])
def is_web_app(ctx):
return ctx.get('WEB_SERVER', '') != 'none'
def find_stand_alone_app_to_run(ctx):
app = ctx.get('APP_START_CMD', None)
if not app:
possible_files = ('app.php', 'main.php', 'run.php', 'start.php')
for pf in possible_files:
if os.path.exists(os.path.join(ctx['BUILD_DIR'], pf)):
app = pf
break
if not app:
print 'Build pack could not find a PHP file to execute!'
_log.info('Build pack could not find a file to execute. Either '
'set "APP_START_CMD" or include one of these files [%s]',
", ".join(possible_files))
app = 'app.php'
return app
|
NobleNoob/buildpack
|
lib/compile_helpers.py
|
Python
|
apache-2.0
| 3,380
|
# This file is part of the MapProxy project.
# Copyright (C) 2010 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Service requests (parsing, handling, etc).
"""
from __future__ import with_statement
import codecs
from mapproxy.request.wms import exception
from mapproxy.exception import RequestError
from mapproxy.srs import SRS, make_lin_transf
from mapproxy.request.base import RequestParams, BaseRequest, split_mime_type
from mapproxy.compat import string_type, iteritems
import logging
log = logging.getLogger(__name__)
class WMSMapRequestParams(RequestParams):
"""
This class represents key-value parameters for WMS map requests.
All values can be accessed as a property.
Some properties return processed values. ``size`` returns a tuple of the width
and height, ``layers`` returns an iterator of all layers, etc.
"""
def _get_layers(self):
"""
List with all layer names.
"""
return sum((layers.split(',') for layers in self.params.get_all('layers')), [])
def _set_layers(self, layers):
if isinstance(layers, (list, tuple)):
layers = ','.join(layers)
self.params['layers'] = layers
layers = property(_get_layers, _set_layers)
del _get_layers
del _set_layers
def _get_bbox(self):
"""
``bbox`` as a tuple (minx, miny, maxx, maxy).
"""
if 'bbox' not in self.params or self.params['bbox'] is None:
return None
points = map(float, self.params['bbox'].split(','))
return tuple(points)
def _set_bbox(self, value):
if value is not None and not isinstance(value, string_type):
value = ','.join(str(x) for x in value)
self['bbox'] = value
bbox = property(_get_bbox, _set_bbox)
del _get_bbox
del _set_bbox
def _get_size(self):
"""
Size of the request in pixel as a tuple (width, height),
or None if one is missing.
"""
if 'height' not in self or 'width' not in self:
return None
width = int(self.params['width'])
height = int(self.params['height'])
return (width, height)
def _set_size(self, value):
self['width'] = str(value[0])
self['height'] = str(value[1])
size = property(_get_size, _set_size)
del _get_size
del _set_size
def _get_srs(self):
return self.params.get('srs', None)
def _set_srs(self, srs):
if hasattr(srs, 'srs_code'):
self.params['srs'] = srs.srs_code
else:
self.params['srs'] = srs
srs = property(_get_srs, _set_srs)
del _get_srs
del _set_srs
def _get_transparent(self):
"""
``True`` if transparent is set to true, otherwise ``False``.
"""
if self.get('transparent', 'false').lower() == 'true':
return True
return False
def _set_transparent(self, transparent):
self.params['transparent'] = str(transparent).lower()
transparent = property(_get_transparent, _set_transparent)
del _get_transparent
del _set_transparent
@property
def bgcolor(self):
"""
The background color in PIL format (#rrggbb). Defaults to '#ffffff'.
"""
color = self.get('bgcolor', '0xffffff')
return '#'+color[2:]
def _get_format(self):
"""
The requested format as string (w/o any 'image/', 'text/', etc prefixes)
"""
_mime_class, format, options = split_mime_type(self.get('format', default=''))
return format
def _set_format(self, format):
if '/' not in format:
format = 'image/' + format
self['format'] = format
format = property(_get_format, _set_format)
del _get_format
del _set_format
@property
def format_mime_type(self):
return self.get('format')
def __repr__(self):
return '%s(param=%r)' % (self.__class__.__name__, self.params)
class WMSRequest(BaseRequest):
request_params = RequestParams
request_handler_name = None
fixed_params = {}
expected_param = []
non_strict_params = set()
#pylint: disable-msg=E1102
xml_exception_handler = None
def __init__(self, param=None, url='', validate=False, non_strict=False, **kw):
self.non_strict = non_strict
BaseRequest.__init__(self, param=param, url=url, validate=validate, **kw)
self.adapt_to_111()
def adapt_to_111(self):
pass
def adapt_params_to_version(self):
params = self.params.copy()
for key, value in iteritems(self.fixed_params):
params[key] = value
if 'styles' not in params:
params['styles'] = ''
return params
@property
def query_string(self):
return self.adapt_params_to_version().query_string
class WMSMapRequest(WMSRequest):
"""
Base class for all WMS GetMap requests.
:ivar requests: the ``RequestParams`` class for this request
:ivar request_handler_name: the name of the server handler
:ivar fixed_params: parameters that are fixed for a request
:ivar expected_param: required parameters, used for validating
"""
request_params = WMSMapRequestParams
request_handler_name = 'map'
fixed_params = {'request': 'GetMap', 'service': 'WMS'}
expected_param = ['version', 'request', 'layers', 'styles', 'srs', 'bbox',
'width', 'height', 'format']
#pylint: disable-msg=E1102
xml_exception_handler = None
prevent_image_exception = False
def __init__(self, param=None, url='', validate=False, non_strict=False, **kw):
WMSRequest.__init__(self, param=param, url=url, validate=validate,
non_strict=non_strict, **kw)
def validate(self):
self.validate_param()
self.validate_bbox()
self.validate_styles()
def validate_param(self):
missing_param = []
for param in self.expected_param:
if self.non_strict and param in self.non_strict_params:
continue
if param not in self.params:
missing_param.append(param)
if missing_param:
if 'format' in missing_param:
self.params['format'] = 'image/png'
raise RequestError('missing parameters ' + str(missing_param),
request=self)
def validate_bbox(self):
x0, y0, x1, y1 = self.params.bbox
if x0 >= x1 or y0 >= y1:
raise RequestError('invalid bbox ' + self.params.get('bbox', None),
request=self)
def validate_format(self, image_formats):
format = self.params['format']
if format not in image_formats:
format = self.params['format']
self.params['format'] = 'image/png'
raise RequestError('unsupported image format: ' + format,
code='InvalidFormat', request=self)
def validate_srs(self, srs):
if self.params['srs'].upper() not in srs:
raise RequestError('unsupported srs: ' + self.params['srs'],
code='InvalidSRS', request=self)
def validate_styles(self):
if 'styles' in self.params:
styles = self.params['styles']
if not set(styles.split(',')).issubset(set(['default', '', 'inspire_common:DEFAULT'])):
raise RequestError('unsupported styles: ' + self.params['styles'],
code='StyleNotDefined', request=self)
@property
def exception_handler(self):
if self.prevent_image_exception:
return self.xml_exception_handler()
if 'exceptions' in self.params:
if 'image' in self.params['exceptions'].lower():
return exception.WMSImageExceptionHandler()
elif 'blank' in self.params['exceptions'].lower():
return exception.WMSBlankExceptionHandler()
return self.xml_exception_handler()
def copy(self):
return self.__class__(param=self.params.copy(), url=self.url)
class Version(object):
_versions = {}
def __new__(cls, version):
if version in cls._versions:
return cls._versions[version]
version_obj = object.__new__(cls)
version_obj.__init__(version)
cls._versions[version] = version_obj
return version_obj
def __init__(self, version):
self.parts = tuple(int(x) for x in version.split('.'))
def __lt__(self, other):
if not isinstance(other, Version):
return NotImplemented
return self.parts < other.parts
def __ge__(self, other):
if not isinstance(other, Version):
return NotImplemented
return self.parts >= other.parts
def __repr__(self):
return "Version('%s')" % ('.'.join(str(part) for part in self.parts),)
class WMS100MapRequest(WMSMapRequest):
version = Version('1.0.0')
xml_exception_handler = exception.WMS100ExceptionHandler
fixed_params = {'request': 'map', 'wmtver': '1.0.0'}
expected_param = ['wmtver', 'request', 'layers', 'styles', 'srs', 'bbox',
'width', 'height', 'format']
def adapt_to_111(self):
del self.params['wmtver']
self.params['version'] = '1.0.0'
self.params['request'] = 'GetMap'
def adapt_params_to_version(self):
params = WMSMapRequest.adapt_params_to_version(self)
del params['version']
del params['service']
image_format = params['format']
if '/' in image_format:
params['format'] = image_format.split('/', 1)[1].upper()
return params
def validate_format(self, image_formats):
format = self.params['format']
image_formats100 = [f.split('/', 1)[1].upper() for f in image_formats]
if format in image_formats100:
format = 'image/' + format.lower()
self.params['format'] = format
if format not in image_formats:
format = self.params['format']
self.params['format'] = 'image/png'
raise RequestError('unsupported image format: ' + format,
code='InvalidFormat', request=self)
class WMS110MapRequest(WMSMapRequest):
version = Version('1.1.0')
fixed_params = {'request': 'GetMap', 'version': '1.1.0', 'service': 'WMS'}
xml_exception_handler = exception.WMS110ExceptionHandler
def adapt_to_111(self):
del self.params['wmtver']
class WMS111MapRequest(WMSMapRequest):
version = Version('1.1.1')
fixed_params = {'request': 'GetMap', 'version': '1.1.1', 'service': 'WMS'}
xml_exception_handler = exception.WMS111ExceptionHandler
def adapt_to_111(self):
del self.params['wmtver']
def switch_bbox_epsg_axis_order(bbox, srs):
if bbox is not None and srs is not None:
try:
if SRS(srs).is_axis_order_ne:
return bbox[1], bbox[0], bbox[3], bbox[2]
except RuntimeError:
log.warn('unknown SRS %s' % srs)
return bbox
def _switch_bbox(self):
self.bbox = switch_bbox_epsg_axis_order(self.bbox, self.srs)
class WMS130MapRequestParams(WMSMapRequestParams):
"""
RequestParams for WMS 1.3.0 GetMap requests. Handles bbox axis-order.
"""
switch_bbox = _switch_bbox
class WMS130MapRequest(WMSMapRequest):
version = Version('1.3.0')
request_params = WMS130MapRequestParams
xml_exception_handler = exception.WMS130ExceptionHandler
fixed_params = {'request': 'GetMap', 'version': '1.3.0', 'service': 'WMS'}
expected_param = ['version', 'request', 'layers', 'styles', 'crs', 'bbox',
'width', 'height', 'format']
def adapt_to_111(self):
del self.params['wmtver']
if 'crs' in self.params:
self.params['srs'] = self.params['crs']
del self.params['crs']
self.params.switch_bbox()
def adapt_params_to_version(self):
params = WMSMapRequest.adapt_params_to_version(self)
params.switch_bbox()
if 'srs' in params:
params['crs'] = params['srs']
del params['srs']
return params
def validate_srs(self, srs):
# its called crs in 1.3.0 and we validate before adapt_to_111
if self.params['srs'].upper() not in srs:
raise RequestError('unsupported crs: ' + self.params['srs'],
code='InvalidCRS', request=self)
def copy_with_request_params(self, req):
new_req = WMSMapRequest.copy_with_request_params(self, req)
new_req.params.switch_bbox()
return new_req
class WMSLegendGraphicRequestParams(WMSMapRequestParams):
"""
RequestParams for WMS GetLegendGraphic requests.
"""
def _set_layer(self, value):
self.params['layer'] = value
def _get_layer(self):
"""
Layer for which to produce legend graphic.
"""
return self.params.get('layer')
layer = property(_get_layer, _set_layer)
del _set_layer
del _get_layer
@property
def sld_version(self):
"""
Specification version for SLD-specification
"""
return self.params.get('sld_version')
def _set_scale(self, value):
self.params['scale'] = value
def _get_scale(self):
if self.params.get('scale') is not None:
return float(self['scale'])
return None
scale = property(_get_scale,_set_scale)
del _set_scale
del _get_scale
class WMSFeatureInfoRequestParams(WMSMapRequestParams):
"""
RequestParams for WMS GetFeatureInfo requests.
"""
@property
def query_layers(self):
"""
List with all query_layers.
"""
return sum((layers.split(',') for layers in self.params.get_all('query_layers')), [])
def _get_pos(self):
"""x, y query image coordinates (in pixel)"""
return int(self['x']), int(self['y'])
def _set_pos(self, value):
self['x'] = str(int(round(value[0])))
self['y'] = str(int(round(value[1])))
pos = property(_get_pos, _set_pos)
del _get_pos
del _set_pos
@property
def pos_coords(self):
"""x, y query coordinates (in request SRS)"""
width, height = self.size
bbox = self.bbox
return make_lin_transf((0, 0, width, height), bbox)(self.pos)
class WMS130FeatureInfoRequestParams(WMSFeatureInfoRequestParams):
switch_bbox = _switch_bbox
class WMSLegendGraphicRequest(WMSMapRequest):
request_params = WMSLegendGraphicRequestParams
request_handler_name = 'legendgraphic'
non_strict_params = set(['sld_version', 'scale'])
fixed_params = {'request': 'GetLegendGraphic', 'service': 'WMS', 'sld_version': '1.1.0'}
expected_param = ['version', 'request', 'layer', 'format', 'sld_version']
def validate(self):
self.validate_param()
self.validate_sld_version()
def validate_sld_version(self):
if self.params.get('sld_version', '1.1.0') != '1.1.0':
raise RequestError('invalid sld_version ' + self.params.get('sld_version'),
request=self)
class WMS111LegendGraphicRequest(WMSLegendGraphicRequest):
version = Version('1.1.1')
fixed_params = WMSLegendGraphicRequest.fixed_params.copy()
fixed_params['version'] = '1.1.1'
xml_exception_handler = exception.WMS111ExceptionHandler
class WMS130LegendGraphicRequest(WMSLegendGraphicRequest):
version = Version('1.3.0')
fixed_params = WMSLegendGraphicRequest.fixed_params.copy()
fixed_params['version'] = '1.3.0'
xml_exception_handler = exception.WMS130ExceptionHandler
class WMSFeatureInfoRequest(WMSMapRequest):
non_strict_params = set(['format', 'styles'])
def validate_format(self, image_formats):
if self.non_strict: return
WMSMapRequest.validate_format(self, image_formats)
class WMS111FeatureInfoRequest(WMSFeatureInfoRequest):
version = Version('1.1.1')
request_params = WMSFeatureInfoRequestParams
xml_exception_handler = exception.WMS111ExceptionHandler
request_handler_name = 'featureinfo'
fixed_params = WMS111MapRequest.fixed_params.copy()
fixed_params['request'] = 'GetFeatureInfo'
expected_param = WMSMapRequest.expected_param[:] + ['query_layers', 'x', 'y']
class WMS110FeatureInfoRequest(WMSFeatureInfoRequest):
version = Version('1.1.0')
request_params = WMSFeatureInfoRequestParams
xml_exception_handler = exception.WMS110ExceptionHandler
request_handler_name = 'featureinfo'
fixed_params = WMS110MapRequest.fixed_params.copy()
fixed_params['request'] = 'GetFeatureInfo'
expected_param = WMSMapRequest.expected_param[:] + ['query_layers', 'x', 'y']
class WMS100FeatureInfoRequest(WMSFeatureInfoRequest):
version = Version('1.0.0')
request_params = WMSFeatureInfoRequestParams
xml_exception_handler = exception.WMS100ExceptionHandler
request_handler_name = 'featureinfo'
fixed_params = WMS100MapRequest.fixed_params.copy()
fixed_params['request'] = 'feature_info'
expected_param = WMS100MapRequest.expected_param[:] + ['query_layers', 'x', 'y']
def adapt_to_111(self):
del self.params['wmtver']
def adapt_params_to_version(self):
params = WMSMapRequest.adapt_params_to_version(self)
del params['version']
return params
class WMS130FeatureInfoRequest(WMS130MapRequest):
# XXX: this class inherits from WMS130MapRequest to reuse
# the axis order stuff
version = Version('1.3.0')
request_params = WMS130FeatureInfoRequestParams
xml_exception_handler = exception.WMS130ExceptionHandler
request_handler_name = 'featureinfo'
fixed_params = WMS130MapRequest.fixed_params.copy()
fixed_params['request'] = 'GetFeatureInfo'
expected_param = WMS130MapRequest.expected_param[:] + ['query_layers', 'i', 'j']
non_strict_params = set(['format', 'styles'])
def adapt_to_111(self):
WMS130MapRequest.adapt_to_111(self)
# only set x,y when present,
# avoids empty values for request templates
if 'i' in self.params:
self.params['x'] = self.params['i']
if 'j' in self.params:
self.params['y'] = self.params['j']
del self.params['i']
del self.params['j']
def adapt_params_to_version(self):
params = WMS130MapRequest.adapt_params_to_version(self)
params['i'] = self.params['x']
params['j'] = self.params['y']
del params['x']
del params['y']
return params
def validate_format(self, image_formats):
if self.non_strict: return
WMSMapRequest.validate_format(self, image_formats)
class WMSCapabilitiesRequest(WMSRequest):
request_handler_name = 'capabilities'
exception_handler = None
mime_type = 'text/xml'
fixed_params = {}
def __init__(self, param=None, url='', validate=False, non_strict=False, **kw):
WMSRequest.__init__(self, param=param, url=url, validate=validate, **kw)
def adapt_to_111(self):
pass
def validate(self):
pass
class WMS100CapabilitiesRequest(WMSCapabilitiesRequest):
version = Version('1.0.0')
capabilities_template = 'wms100capabilities.xml'
fixed_params = {'request': 'capabilities', 'wmtver': '1.0.0'}
@property
def exception_handler(self):
return exception.WMS100ExceptionHandler()
class WMS110CapabilitiesRequest(WMSCapabilitiesRequest):
version = Version('1.1.0')
capabilities_template = 'wms110capabilities.xml'
mime_type = 'application/vnd.ogc.wms_xml'
fixed_params = {'request': 'GetCapabilities', 'version': '1.1.0', 'service': 'WMS'}
@property
def exception_handler(self):
return exception.WMS110ExceptionHandler()
class WMS111CapabilitiesRequest(WMSCapabilitiesRequest):
version = Version('1.1.1')
capabilities_template = 'wms111capabilities.xml'
mime_type = 'application/vnd.ogc.wms_xml'
fixed_params = {'request': 'GetCapabilities', 'version': '1.1.1', 'service': 'WMS'}
@property
def exception_handler(self):
return exception.WMS111ExceptionHandler()
class WMS130CapabilitiesRequest(WMSCapabilitiesRequest):
version = Version('1.3.0')
capabilities_template = 'wms130capabilities.xml'
fixed_params = {'request': 'GetCapabilities', 'version': '1.3.0', 'service': 'WMS'}
@property
def exception_handler(self):
return exception.WMS130ExceptionHandler()
request_mapping = {Version('1.0.0'): {'featureinfo': WMS100FeatureInfoRequest,
'map': WMS100MapRequest,
'capabilities': WMS100CapabilitiesRequest},
Version('1.1.0'): {'featureinfo': WMS110FeatureInfoRequest,
'map': WMS110MapRequest,
'capabilities': WMS110CapabilitiesRequest},
Version('1.1.1'): {'featureinfo': WMS111FeatureInfoRequest,
'map': WMS111MapRequest,
'capabilities': WMS111CapabilitiesRequest,
'legendgraphic': WMS111LegendGraphicRequest},
Version('1.3.0'): {'featureinfo': WMS130FeatureInfoRequest,
'map': WMS130MapRequest,
'capabilities': WMS130CapabilitiesRequest,
'legendgraphic': WMS130LegendGraphicRequest},
}
def _parse_version(req):
if 'version' in req.args:
return Version(req.args['version'])
if 'wmtver' in req.args:
return Version(req.args['wmtver'])
return Version('1.1.1') # default
def _parse_request_type(req):
if 'request' in req.args:
request_type = req.args['request'].lower()
if request_type in ('getmap', 'map'):
return 'map'
elif request_type in ('getfeatureinfo', 'feature_info'):
return 'featureinfo'
elif request_type in ('getcapabilities', 'capabilities'):
return 'capabilities'
elif request_type in ('getlegendgraphic',):
return 'legendgraphic'
else:
return request_type
else:
return None
def negotiate_version(version, supported_versions=None):
"""
>>> negotiate_version(Version('0.9.0'))
Version('1.0.0')
>>> negotiate_version(Version('2.0.0'))
Version('1.3.0')
>>> negotiate_version(Version('1.1.1'))
Version('1.1.1')
>>> negotiate_version(Version('1.1.0'))
Version('1.1.0')
>>> negotiate_version(Version('1.1.0'), [Version('1.0.0')])
Version('1.0.0')
>>> negotiate_version(Version('1.3.0'), sorted([Version('1.1.0'), Version('1.1.1')]))
Version('1.1.1')
"""
if not supported_versions:
supported_versions = list(request_mapping.keys())
supported_versions.sort()
if version < supported_versions[0]:
return supported_versions[0] # smallest version we support
if version > supported_versions[-1]:
return supported_versions[-1] # highest version we support
while True:
next_highest_version = supported_versions.pop()
if version >= next_highest_version:
return next_highest_version
def wms_request(req, validate=True, strict=True, versions=None):
version = _parse_version(req)
req_type = _parse_request_type(req)
if versions and version not in versions:
version_requests = None
else:
version_requests = request_mapping.get(version, None)
if version_requests is None:
negotiated_version = negotiate_version(version, supported_versions=versions)
version_requests = request_mapping[negotiated_version]
req_class = version_requests.get(req_type, None)
if req_class is None:
# use map request to get an exception handler for the requested version
dummy_req = version_requests['map'](param=req.args, url=req.base_url,
validate=False)
raise RequestError("unknown WMS request type '%s'" % req_type, request=dummy_req)
return req_class(param=req.args, url=req.base_url, validate=True,
non_strict=not strict, http=req)
def create_request(req_data, param, req_type='map', version='1.1.1', abspath=None):
url = req_data['url']
req_data = req_data.copy()
del req_data['url']
if 'request_format' in param:
req_data['format'] = param['request_format']
elif 'format' in param:
req_data['format'] = param['format']
if 'info_format' in param:
req_data['info_format'] = param['info_format']
if 'transparent' in req_data:
# we don't want a boolean
req_data['transparent'] = str(req_data['transparent'])
if req_data.get('sld', '').startswith('file://'):
sld_path = req_data['sld'][len('file://'):]
if abspath:
sld_path = abspath(sld_path)
with codecs.open(sld_path, 'r', 'utf-8') as f:
req_data['sld_body'] = f.read()
del req_data['sld']
return request_mapping[Version(version)][req_type](url=url, param=req_data)
info_formats = {
Version('1.3.0'): (('text', 'text/plain'),
('html', 'text/html'),
('xml', 'text/xml'),
),
None: (('text', 'text/plain'),
('html', 'text/html'),
('xml', 'application/vnd.ogc.gml'),
)
}
def infotype_from_mimetype(version, mime_type):
if version in info_formats:
formats = info_formats[version]
else:
formats = info_formats[None] # default
for t, m in formats:
if m == mime_type: return t
def mimetype_from_infotype(version, info_type):
if version in info_formats:
formats = info_formats[version]
else:
formats = info_formats[None] # default
for t, m in formats:
if t == info_type: return m
return 'text/plain'
|
faegi/mapproxy
|
mapproxy/request/wms/__init__.py
|
Python
|
apache-2.0
| 26,728
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import cStringIO
import json
import os
from PIL import Image
import pandas as pd
import six
import shutil
import tensorflow as tf
import tempfile
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.contrib import lookup
from tensorflow.contrib.slim.python.slim.nets.inception_v3 import inception_v3
from tensorflow.contrib.slim.python.slim.nets.inception_v3 import inception_v3_arg_scope
from tensorflow.python.lib.io import file_io
# ------------------------------------------------------------------------------
# public constants. Changing these could break user's code
# ------------------------------------------------------------------------------
# Individual transforms
IDENTITY_TRANSFORM = 'identity'
SCALE_TRANSFORM = 'scale'
ONE_HOT_TRANSFORM = 'one_hot'
MULTI_HOT_TRANSFORM = 'multi_hot'
TARGET_TRANSFORM = 'target'
IMAGE_TRANSFORM = 'image_to_vec'
# ------------------------------------------------------------------------------
# internal constants.
# ------------------------------------------------------------------------------
# Files
SCHEMA_FILE = 'schema.json'
FEATURES_FILE = 'features.json'
STATS_FILE = 'stats.json'
VOCAB_ANALYSIS_FILE = 'vocab_%s.csv'
# Transform collections
NUMERIC_TRANSFORMS = [IDENTITY_TRANSFORM, SCALE_TRANSFORM]
CATEGORICAL_TRANSFORMS = [ONE_HOT_TRANSFORM]
TEXT_TRANSFORMS = [MULTI_HOT_TRANSFORM]
# If the features file is missing transforms, apply these.
DEFAULT_NUMERIC_TRANSFORM = IDENTITY_TRANSFORM
DEFAULT_CATEGORICAL_TRANSFORM = ONE_HOT_TRANSFORM
# BigQuery Schema values supported
INTEGER_SCHEMA = 'integer'
FLOAT_SCHEMA = 'float'
STRING_SCHEMA = 'string'
NUMERIC_SCHEMA = [INTEGER_SCHEMA, FLOAT_SCHEMA]
# Inception Checkpoint
INCEPTION_V3_CHECKPOINT = 'gs://cloud-ml-data/img/flower_photos/inception_v3_2016_08_28.ckpt'
INCEPTION_EXCLUDED_VARIABLES = ['InceptionV3/AuxLogits', 'InceptionV3/Logits', 'global_step']
_img_buf = cStringIO.StringIO()
Image.new('RGB', (16, 16)).save(_img_buf, 'jpeg')
IMAGE_DEFAULT_STRING = base64.urlsafe_b64encode(_img_buf.getvalue())
IMAGE_BOTTLENECK_TENSOR_SIZE = 2048
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# start of transform functions
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def _scale(x, min_x_value, max_x_value, output_min, output_max):
"""Scale a column to [output_min, output_max].
Assumes the columns's range is [min_x_value, max_x_value]. If this is not
true at training or prediction time, the output value of this scale could be
outside the range [output_min, output_max].
Raises:
ValueError: if min_x_value = max_x_value, as the column is constant.
"""
if round(min_x_value - max_x_value, 7) == 0:
# There is something wrong with the data.
# Why round to 7 places? It's the same as unittest's assertAlmostEqual.
raise ValueError('In make_scale_tito, min_x_value == max_x_value')
def _scale(x):
min_x_valuef = tf.to_float(min_x_value)
max_x_valuef = tf.to_float(max_x_value)
output_minf = tf.to_float(output_min)
output_maxf = tf.to_float(output_max)
return ((((tf.to_float(x) - min_x_valuef) * (output_maxf - output_minf)) /
(max_x_valuef - min_x_valuef)) + output_minf)
return _scale(x)
def _string_to_int(x, vocab):
"""Given a vocabulary and a string tensor `x`, maps `x` into an int tensor.
Args:
x: A `Column` representing a string value.
vocab: list of strings.
Returns:
A `Column` where each string value is mapped to an integer representing
its index in the vocab. Out of vocab values are mapped to len(vocab).
"""
def _map_to_int(x):
"""Maps string tensor into indexes using vocab.
Args:
x : a Tensor/SparseTensor of string.
Returns:
a Tensor/SparseTensor of indexes (int) of the same shape as x.
"""
table = lookup.index_table_from_tensor(
vocab,
default_value=len(vocab))
return table.lookup(x)
return _map_to_int(x)
def _make_image_to_vec_tito(feature_name, tmp_dir=None, checkpoint=None):
"""Creates a tensor-in-tensor-out function that produces embeddings from image bytes.
Image to embedding is implemented with Tensorflow's inception v3 model and a pretrained
checkpoint. It returns 1x2048 'PreLogits' embeddings for each image.
Args:
feature_name: The name of the feature. Used only to identify the image tensors so
we can get gradients for probe in image prediction explaining.
tmp_dir: a local directory that is used for downloading the checkpoint. If
non, a temp folder will be made and deleted.
checkpoint: the inception v3 checkpoint gs or local path. If None, default checkpoint
is used.
Returns: a tensor-in-tensor-out function that takes image string tensor and returns embeddings.
"""
def _image_to_vec(image_str_tensor):
def _decode_and_resize(image_tensor):
"""Decodes jpeg string, resizes it and returns a uint8 tensor."""
# These constants are set by Inception v3's expectations.
height = 299
width = 299
channels = 3
image_tensor = tf.where(tf.equal(image_tensor, ''), IMAGE_DEFAULT_STRING, image_tensor)
# Fork by whether image_tensor value is a file path, or a base64 encoded string.
slash_positions = tf.equal(tf.string_split([image_tensor], delimiter="").values, '/')
is_file_path = tf.cast(tf.count_nonzero(slash_positions), tf.bool)
# The following two functions are required for tf.cond. Note that we can not replace them
# with lambda. According to TF docs, if using inline lambda, both branches of condition
# will be executed. The workaround is to use a function call.
def _read_file():
return tf.read_file(image_tensor)
def _decode_base64():
return tf.decode_base64(image_tensor)
image = tf.cond(is_file_path, lambda: _read_file(), lambda: _decode_base64())
image = tf.image.decode_jpeg(image, channels=channels)
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width], align_corners=False)
image = tf.squeeze(image, squeeze_dims=[0])
image = tf.cast(image, dtype=tf.uint8)
return image
# The CloudML Prediction API always "feeds" the Tensorflow graph with
# dynamic batch sizes e.g. (?,). decode_jpeg only processes scalar
# strings because it cannot guarantee a batch of images would have
# the same output size. We use tf.map_fn to give decode_jpeg a scalar
# string from dynamic batches.
image = tf.map_fn(_decode_and_resize, image_str_tensor, back_prop=False, dtype=tf.uint8)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# "gradients_[feature_name]" will be used for computing integrated gradients.
image = tf.identity(image, name='gradients_' + feature_name)
image = tf.subtract(image, 0.5)
inception_input = tf.multiply(image, 2.0)
# Build Inception layers, which expect a tensor of type float from [-1, 1)
# and shape [batch_size, height, width, channels].
with tf.contrib.slim.arg_scope(inception_v3_arg_scope()):
_, end_points = inception_v3(inception_input, is_training=False)
embeddings = end_points['PreLogits']
inception_embeddings = tf.squeeze(embeddings, [1, 2], name='SpatialSqueeze')
return inception_embeddings
def _tito_from_checkpoint(tito_in, checkpoint, exclude):
""" Create an all-constants tito function from an original tito function.
Given a tensor-in-tensor-out function which contains variables and a checkpoint path,
create a new tensor-in-tensor-out function which includes only constants, and can be
used in tft.map.
"""
def _tito_out(tensor_in):
checkpoint_dir = tmp_dir
if tmp_dir is None:
checkpoint_dir = tempfile.mkdtemp()
g = tf.Graph()
with g.as_default():
si = tf.placeholder(dtype=tensor_in.dtype, shape=tensor_in.shape, name=tensor_in.op.name)
so = tito_in(si)
all_vars = tf.contrib.slim.get_variables_to_restore(exclude=exclude)
saver = tf.train.Saver(all_vars)
# Downloading the checkpoint from GCS to local speeds up saver.restore() a lot.
checkpoint_tmp = os.path.join(checkpoint_dir, 'checkpoint')
with file_io.FileIO(checkpoint, 'r') as f_in, file_io.FileIO(checkpoint_tmp, 'w') as f_out:
f_out.write(f_in.read())
with tf.Session() as sess:
saver.restore(sess, checkpoint_tmp)
output_graph_def = tf.graph_util.convert_variables_to_constants(sess,
g.as_graph_def(),
[so.op.name])
file_io.delete_file(checkpoint_tmp)
if tmp_dir is None:
shutil.rmtree(checkpoint_dir)
tensors_out = tf.import_graph_def(output_graph_def,
input_map={si.name: tensor_in},
return_elements=[so.name])
return tensors_out[0]
return _tito_out
if not checkpoint:
checkpoint = INCEPTION_V3_CHECKPOINT
return _tito_from_checkpoint(_image_to_vec, checkpoint, INCEPTION_EXCLUDED_VARIABLES)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# end of transform functions
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def make_preprocessing_fn(output_dir, features, keep_target):
"""Makes a preprocessing function.
Args:
output_dir: folder path that contains the vocab and stats files.
features: the features dict
Returns:
a function that takes a dict of input tensors
"""
def preprocessing_fn(inputs):
"""Preprocessing function.
Args:
inputs: dictionary of raw input tensors
Returns:
A dictionary of transformed tensors
"""
stats = json.loads(
file_io.read_file_to_string(
os.path.join(output_dir, STATS_FILE)).decode())
result = {}
for name, transform in six.iteritems(features):
transform_name = transform['transform']
source_column = transform['source_column']
if transform_name == TARGET_TRANSFORM:
if not keep_target:
continue
if file_io.file_exists(os.path.join(output_dir, VOCAB_ANALYSIS_FILE % source_column)):
transform_name = 'one_hot'
else:
transform_name = 'identity'
if transform_name == 'identity':
result[name] = inputs[source_column]
elif transform_name == 'scale':
result[name] = _scale(
inputs[name],
min_x_value=stats['column_stats'][source_column]['min'],
max_x_value=stats['column_stats'][source_column]['max'],
output_min=transform.get('value', 1) * (-1),
output_max=transform.get('value', 1))
elif transform_name in [ONE_HOT_TRANSFORM, MULTI_HOT_TRANSFORM]:
vocab, ex_count = read_vocab_file(
os.path.join(output_dir, VOCAB_ANALYSIS_FILE % source_column))
if transform_name == MULTI_HOT_TRANSFORM:
separator = transform.get('separator', ' ')
tokens = tf.string_split(inputs[source_column], separator)
result[name] = _string_to_int(tokens, vocab)
else:
result[name] = _string_to_int(inputs[source_column], vocab)
elif transform_name == IMAGE_TRANSFORM:
make_image_to_vec_fn = _make_image_to_vec_tito(
name, checkpoint=transform.get('checkpoint', None))
result[name] = make_image_to_vec_fn(inputs[source_column])
else:
raise ValueError('unknown transform %s' % transform_name)
return result
return preprocessing_fn
def csv_header_and_defaults(features, schema, stats, keep_target):
"""Gets csv header and default lists."""
target_name = get_target_name(features)
if keep_target and not target_name:
raise ValueError('Cannot find target transform')
csv_header = []
record_defaults = []
for col in schema:
if not keep_target and col['name'] == target_name:
continue
# Note that numerical key columns do not have a stats entry, hence the use
# of get(col['name'], {})
csv_header.append(col['name'])
if col['type'].lower() == INTEGER_SCHEMA:
dtype = tf.int64
default = int(stats['column_stats'].get(col['name'], {}).get('mean', 0))
elif col['type'].lower() == FLOAT_SCHEMA:
dtype = tf.float32
default = float(stats['column_stats'].get(col['name'], {}).get('mean', 0.0))
else:
dtype = tf.string
default = ''
record_defaults.append(tf.constant([default], dtype=dtype))
return csv_header, record_defaults
def build_csv_serving_tensors_for_transform_step(analysis_path,
features,
schema,
stats,
keep_target):
"""Builds a serving function starting from raw csv.
This should only be used by transform.py (the transform step), and the
For image columns, the image should be a base64 string encoding the image.
The output of this function will transform that image to a 2048 long vector
using the inception model.
"""
csv_header, record_defaults = csv_header_and_defaults(features, schema, stats, keep_target)
placeholder = tf.placeholder(dtype=tf.string, shape=(None,),
name='csv_input_placeholder')
tensors = tf.decode_csv(placeholder, record_defaults)
raw_features = dict(zip(csv_header, tensors))
transform_fn = make_preprocessing_fn(analysis_path, features, keep_target)
transformed_tensors = transform_fn(raw_features)
transformed_features = {}
# Expand the dims of non-sparse tensors
for k, v in six.iteritems(transformed_tensors):
if isinstance(v, tf.Tensor) and v.get_shape().ndims == 1:
transformed_features[k] = tf.expand_dims(v, -1)
else:
transformed_features[k] = v
return input_fn_utils.InputFnOps(
transformed_features, None, {"csv_example": placeholder})
def get_target_name(features):
for name, transform in six.iteritems(features):
if transform['transform'] == TARGET_TRANSFORM:
return name
return None
def read_vocab_file(file_path):
"""Reads a vocab file to memeory.
Args:
file_path: Each line of the vocab is in the form "token,example_count"
Returns:
Two lists, one for the vocab, and one for just the example counts.
"""
with file_io.FileIO(file_path, 'r') as f:
vocab_pd = pd.read_csv(
f,
header=None,
names=['vocab', 'count'],
dtype=str, # Prevent pd from converting numerical categories.
na_filter=False) # Prevent pd from converting 'NA' to a NaN.
vocab = vocab_pd['vocab'].tolist()
ex_count = vocab_pd['count'].astype(int).tolist()
return vocab, ex_count
def get_transformed_feature_indices(features, stats):
"""Returns information about the transformed features.
Returns:
List in the from
[(transformed_feature_name, {size: int, index_start: int})]
"""
feature_indices = []
index_start = 1
for name, transform in sorted(six.iteritems(features)):
transform_name = transform['transform']
source_column = transform['source_column']
info = {}
if transform_name in [IDENTITY_TRANSFORM, SCALE_TRANSFORM]:
info['size'] = 1
elif transform_name in [ONE_HOT_TRANSFORM, MULTI_HOT_TRANSFORM]:
info['size'] = stats['column_stats'][source_column]['vocab_size']
elif transform_name == IMAGE_TRANSFORM:
info['size'] = IMAGE_BOTTLENECK_TENSOR_SIZE
elif transform_name == TARGET_TRANSFORM:
info['size'] = 0
else:
raise ValueError('xgboost does not support transform "%s"' % transform)
info['index_start'] = index_start
index_start += info['size']
feature_indices.append((name, info))
return feature_indices
def create_feature_map(features, feature_indices, output_dir):
"""Returns feature_map about the transformed features.
feature_map includes information such as:
1, cat1=0
2, cat1=1
3, numeric1
...
Returns:
List in the from
[(index, feature_description)]
"""
feature_map = []
for name, info in feature_indices:
transform_name = features[name]['transform']
source_column = features[name]['source_column']
if transform_name in [IDENTITY_TRANSFORM, SCALE_TRANSFORM]:
feature_map.append((info['index_start'], name))
elif transform_name in [ONE_HOT_TRANSFORM, MULTI_HOT_TRANSFORM]:
vocab, _ = read_vocab_file(
os.path.join(output_dir, VOCAB_ANALYSIS_FILE % source_column))
for i, word in enumerate(vocab):
if transform_name == ONE_HOT_TRANSFORM:
feature_map.append((info['index_start'] + i, '%s=%s' % (source_column, word)))
elif transform_name == MULTI_HOT_TRANSFORM:
feature_map.append((info['index_start'] + i, '%s has "%s"' % (source_column, word)))
elif transform_name == IMAGE_TRANSFORM:
for i in range(info['size']):
feature_map.append((info['index_start'] + i, '%s image feature %d' % (source_column, i)))
return feature_map
|
yebrahim/pydatalab
|
solutionbox/ml_workbench/xgboost/trainer/feature_transforms.py
|
Python
|
apache-2.0
| 17,794
|
"""
Auto-generated class for Cluster
"""
from .EnumClusterDriveType import EnumClusterDriveType
from .EnumClusterStatus import EnumClusterStatus
from .StorageServer import StorageServer
from . import client_support
class Cluster(object):
"""
auto-generated. don't touch.
"""
@staticmethod
def create(clusterType, driveType, label, nodes, status, storageServers):
"""
:type clusterType: str
:type driveType: EnumClusterDriveType
:type label: str
:type nodes: list[str]
:type status: EnumClusterStatus
:type storageServers: list[StorageServer]
:rtype: Cluster
"""
return Cluster(
clusterType=clusterType,
driveType=driveType,
label=label,
nodes=nodes,
status=status,
storageServers=storageServers,
)
def __init__(self, json=None, **kwargs):
if json is None and not kwargs:
raise ValueError('No data or kwargs present')
class_name = 'Cluster'
create_error = '{cls}: unable to create {prop} from value: {val}: {err}'
required_error = '{cls}: missing required property {prop}'
data = json or kwargs
property_name = 'clusterType'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.clusterType = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'driveType'
val = data.get(property_name)
if val is not None:
datatypes = [EnumClusterDriveType]
try:
self.driveType = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'label'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.label = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'nodes'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.nodes = client_support.list_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'status'
val = data.get(property_name)
if val is not None:
datatypes = [EnumClusterStatus]
try:
self.status = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'storageServers'
val = data.get(property_name)
if val is not None:
datatypes = [StorageServer]
try:
self.storageServers = client_support.list_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
def __str__(self):
return self.as_json(indent=4)
def as_json(self, indent=0):
return client_support.to_json(self, indent=indent)
def as_dict(self):
return client_support.to_dict(self)
|
zero-os/0-orchestrator
|
pyclient/zeroos/orchestrator/client/Cluster.py
|
Python
|
apache-2.0
| 4,340
|
from .profile import ProfileView
|
andrewsosa/hackfsu_com
|
api/api/views/mentor/get/__init__.py
|
Python
|
apache-2.0
| 33
|
# hgweb/hgweb_mod.py - Web interface for a repository.
#
# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import os
from mercurial import ui, hg, hook, error, encoding, templater
from common import get_mtime, ErrorResponse, permhooks
from common import HTTP_OK, HTTP_BAD_REQUEST, HTTP_NOT_FOUND, HTTP_SERVER_ERROR
from common import HTTP_UNAUTHORIZED, HTTP_METHOD_NOT_ALLOWED
from request import wsgirequest
import webcommands, protocol, webutil
perms = {
'changegroup': 'pull',
'changegroupsubset': 'pull',
'unbundle': 'push',
'stream_out': 'pull',
}
class hgweb(object):
def __init__(self, repo, name=None):
if isinstance(repo, str):
u = ui.ui()
u.setconfig('ui', 'report_untrusted', 'off')
u.setconfig('ui', 'interactive', 'off')
self.repo = hg.repository(u, repo)
else:
self.repo = repo
hook.redirect(True)
self.mtime = -1
self.reponame = name
self.archives = 'zip', 'gz', 'bz2'
self.stripecount = 1
# a repo owner may set web.templates in .hg/hgrc to get any file
# readable by the user running the CGI script
self.templatepath = self.config('web', 'templates')
# The CGI scripts are often run by a user different from the repo owner.
# Trust the settings from the .hg/hgrc files by default.
def config(self, section, name, default=None, untrusted=True):
return self.repo.ui.config(section, name, default,
untrusted=untrusted)
def configbool(self, section, name, default=False, untrusted=True):
return self.repo.ui.configbool(section, name, default,
untrusted=untrusted)
def configlist(self, section, name, default=None, untrusted=True):
return self.repo.ui.configlist(section, name, default,
untrusted=untrusted)
def refresh(self, request=None):
if request:
self.repo.ui.environ = request.env
mtime = get_mtime(self.repo.spath)
if mtime != self.mtime:
self.mtime = mtime
self.repo = hg.repository(self.repo.ui, self.repo.root)
self.maxchanges = int(self.config("web", "maxchanges", 10))
self.stripecount = int(self.config("web", "stripes", 1))
self.maxshortchanges = int(self.config("web", "maxshortchanges", 60))
self.maxfiles = int(self.config("web", "maxfiles", 10))
self.allowpull = self.configbool("web", "allowpull", True)
encoding.encoding = self.config("web", "encoding",
encoding.encoding)
def run(self):
if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."):
raise RuntimeError("This function is only intended to be "
"called while running as a CGI script.")
import mercurial.hgweb.wsgicgi as wsgicgi
wsgicgi.launch(self)
def __call__(self, env, respond):
req = wsgirequest(env, respond)
return self.run_wsgi(req)
def run_wsgi(self, req):
self.refresh(req)
# work with CGI variables to create coherent structure
# use SCRIPT_NAME, PATH_INFO and QUERY_STRING as well as our REPO_NAME
req.url = req.env['SCRIPT_NAME']
if not req.url.endswith('/'):
req.url += '/'
if 'REPO_NAME' in req.env:
req.url += req.env['REPO_NAME'] + '/'
if 'PATH_INFO' in req.env:
parts = req.env['PATH_INFO'].strip('/').split('/')
repo_parts = req.env.get('REPO_NAME', '').split('/')
if parts[:len(repo_parts)] == repo_parts:
parts = parts[len(repo_parts):]
query = '/'.join(parts)
else:
query = req.env['QUERY_STRING'].split('&', 1)[0]
query = query.split(';', 1)[0]
# process this if it's a protocol request
# protocol bits don't need to create any URLs
# and the clients always use the old URL structure
cmd = req.form.get('cmd', [''])[0]
if cmd and cmd in protocol.__all__:
if query:
raise ErrorResponse(HTTP_NOT_FOUND)
try:
if cmd in perms:
try:
self.check_perm(req, perms[cmd])
except ErrorResponse, inst:
if cmd == 'unbundle':
req.drain()
raise
method = getattr(protocol, cmd)
return method(self.repo, req)
except ErrorResponse, inst:
req.respond(inst, protocol.HGTYPE)
if not inst.message:
return []
return '0\n%s\n' % inst.message,
# translate user-visible url structure to internal structure
args = query.split('/', 2)
if 'cmd' not in req.form and args and args[0]:
cmd = args.pop(0)
style = cmd.rfind('-')
if style != -1:
req.form['style'] = [cmd[:style]]
cmd = cmd[style + 1:]
# avoid accepting e.g. style parameter as command
if hasattr(webcommands, cmd):
req.form['cmd'] = [cmd]
else:
cmd = ''
if cmd == 'static':
req.form['file'] = ['/'.join(args)]
else:
if args and args[0]:
node = args.pop(0)
req.form['node'] = [node]
if args:
req.form['file'] = args
ua = req.env.get('HTTP_USER_AGENT', '')
if cmd == 'rev' and 'mercurial' in ua:
req.form['style'] = ['raw']
if cmd == 'archive':
fn = req.form['node'][0]
for type_, spec in self.archive_specs.iteritems():
ext = spec[2]
if fn.endswith(ext):
req.form['node'] = [fn[:-len(ext)]]
req.form['type'] = [type_]
# process the web interface request
try:
tmpl = self.templater(req)
ctype = tmpl('mimetype', encoding=encoding.encoding)
ctype = templater.stringify(ctype)
# check read permissions non-static content
if cmd != 'static':
self.check_perm(req, None)
if cmd == '':
req.form['cmd'] = [tmpl.cache['default']]
cmd = req.form['cmd'][0]
if cmd not in webcommands.__all__:
msg = 'no such method: %s' % cmd
raise ErrorResponse(HTTP_BAD_REQUEST, msg)
elif cmd == 'file' and 'raw' in req.form.get('style', []):
self.ctype = ctype
content = webcommands.rawfile(self, req, tmpl)
else:
content = getattr(webcommands, cmd)(self, req, tmpl)
req.respond(HTTP_OK, ctype)
return content
except error.LookupError, err:
req.respond(HTTP_NOT_FOUND, ctype)
msg = str(err)
if 'manifest' not in msg:
msg = 'revision not found: %s' % err.name
return tmpl('error', error=msg)
except (error.RepoError, error.RevlogError), inst:
req.respond(HTTP_SERVER_ERROR, ctype)
return tmpl('error', error=str(inst))
except ErrorResponse, inst:
req.respond(inst, ctype)
return tmpl('error', error=inst.message)
def templater(self, req):
# determine scheme, port and server name
# this is needed to create absolute urls
proto = req.env.get('wsgi.url_scheme')
if proto == 'https':
proto = 'https'
default_port = "443"
else:
proto = 'http'
default_port = "80"
port = req.env["SERVER_PORT"]
port = port != default_port and (":" + port) or ""
urlbase = '%s://%s%s' % (proto, req.env['SERVER_NAME'], port)
staticurl = self.config("web", "staticurl") or req.url + 'static/'
if not staticurl.endswith('/'):
staticurl += '/'
# some functions for the templater
def header(**map):
yield tmpl('header', encoding=encoding.encoding, **map)
def footer(**map):
yield tmpl("footer", **map)
def motd(**map):
yield self.config("web", "motd", "")
# figure out which style to use
vars = {}
styles = (
req.form.get('style', [None])[0],
self.config('web', 'style'),
'paper',
)
style, mapfile = templater.stylemap(styles, self.templatepath)
if style == styles[0]:
vars['style'] = style
start = req.url[-1] == '?' and '&' or '?'
sessionvars = webutil.sessionvars(vars, start)
if not self.reponame:
self.reponame = (self.config("web", "name")
or req.env.get('REPO_NAME')
or req.url.strip('/') or self.repo.root)
# create the templater
tmpl = templater.templater(mapfile,
defaults={"url": req.url,
"staticurl": staticurl,
"urlbase": urlbase,
"repo": self.reponame,
"header": header,
"footer": footer,
"motd": motd,
"sessionvars": sessionvars
})
return tmpl
def archivelist(self, nodeid):
allowed = self.configlist("web", "allow_archive")
for i, spec in self.archive_specs.iteritems():
if i in allowed or self.configbool("web", "allow" + i):
yield {"type" : i, "extension" : spec[2], "node" : nodeid}
archive_specs = {
'bz2': ('application/x-tar', 'tbz2', '.tar.bz2', None),
'gz': ('application/x-tar', 'tgz', '.tar.gz', None),
'zip': ('application/zip', 'zip', '.zip', None),
}
def check_perm(self, req, op):
for hook in permhooks:
hook(self, req, op)
|
joewalnes/idea-community
|
plugins/hg4idea/testData/bin/mercurial/hgweb/hgweb_mod.py
|
Python
|
apache-2.0
| 10,731
|
# -*- coding: utf-8 -*-
import datetime
import json
import signal
import docker
import pytest
import six
from . import fake_api
from ..helpers import requires_api_version
from .api_test import (
DockerClientTest, url_prefix, fake_request, DEFAULT_TIMEOUT_SECONDS,
fake_inspect_container
)
try:
from unittest import mock
except ImportError:
import mock
def fake_inspect_container_tty(self, container):
return fake_inspect_container(self, container, tty=True)
class StartContainerTest(DockerClientTest):
def test_start_container(self):
self.client.start(fake_api.FAKE_CONTAINER_ID)
args = fake_request.call_args
self.assertEqual(
args[0][1],
url_prefix + 'containers/3cc2351ab11b/start'
)
self.assertEqual(json.loads(args[1]['data']), {})
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_start_container_none(self):
with pytest.raises(ValueError) as excinfo:
self.client.start(container=None)
self.assertEqual(
str(excinfo.value),
'image or container param is undefined',
)
with pytest.raises(ValueError) as excinfo:
self.client.start(None)
self.assertEqual(
str(excinfo.value),
'image or container param is undefined',
)
def test_start_container_regression_573(self):
self.client.start(**{'container': fake_api.FAKE_CONTAINER_ID})
def test_start_container_with_lxc_conf(self):
def call_start():
self.client.start(
fake_api.FAKE_CONTAINER_ID,
lxc_conf={'lxc.conf.k': 'lxc.conf.value'}
)
pytest.deprecated_call(call_start)
def test_start_container_with_lxc_conf_compat(self):
def call_start():
self.client.start(
fake_api.FAKE_CONTAINER_ID,
lxc_conf=[{'Key': 'lxc.conf.k', 'Value': 'lxc.conf.value'}]
)
pytest.deprecated_call(call_start)
def test_start_container_with_binds_ro(self):
def call_start():
self.client.start(
fake_api.FAKE_CONTAINER_ID, binds={
'/tmp': {
"bind": '/mnt',
"ro": True
}
}
)
pytest.deprecated_call(call_start)
def test_start_container_with_binds_rw(self):
def call_start():
self.client.start(
fake_api.FAKE_CONTAINER_ID, binds={
'/tmp': {"bind": '/mnt', "ro": False}
}
)
pytest.deprecated_call(call_start)
def test_start_container_with_port_binds(self):
self.maxDiff = None
def call_start():
self.client.start(fake_api.FAKE_CONTAINER_ID, port_bindings={
1111: None,
2222: 2222,
'3333/udp': (3333,),
4444: ('127.0.0.1',),
5555: ('127.0.0.1', 5555),
6666: [('127.0.0.1',), ('192.168.0.1',)]
})
pytest.deprecated_call(call_start)
def test_start_container_with_links(self):
def call_start():
self.client.start(
fake_api.FAKE_CONTAINER_ID, links={'path': 'alias'}
)
pytest.deprecated_call(call_start)
def test_start_container_with_multiple_links(self):
def call_start():
self.client.start(
fake_api.FAKE_CONTAINER_ID,
links={
'path1': 'alias1',
'path2': 'alias2'
}
)
pytest.deprecated_call(call_start)
def test_start_container_with_links_as_list_of_tuples(self):
def call_start():
self.client.start(fake_api.FAKE_CONTAINER_ID,
links=[('path', 'alias')])
pytest.deprecated_call(call_start)
def test_start_container_privileged(self):
def call_start():
self.client.start(fake_api.FAKE_CONTAINER_ID, privileged=True)
pytest.deprecated_call(call_start)
def test_start_container_with_dict_instead_of_id(self):
self.client.start({'Id': fake_api.FAKE_CONTAINER_ID})
args = fake_request.call_args
self.assertEqual(
args[0][1],
url_prefix + 'containers/3cc2351ab11b/start'
)
self.assertEqual(json.loads(args[1]['data']), {})
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
class CreateContainerTest(DockerClientTest):
def test_create_container(self):
self.client.create_container('busybox', 'true')
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox", "Cmd": ["true"],
"AttachStdin": false,
"AttachStderr": true, "AttachStdout": true,
"StdinOnce": false,
"OpenStdin": false, "NetworkDisabled": false}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_binds(self):
mount_dest = '/mnt'
self.client.create_container('busybox', ['ls', mount_dest],
volumes=[mount_dest])
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls", "/mnt"], "AttachStdin": false,
"Volumes": {"/mnt": {}},
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_volume_string(self):
mount_dest = '/mnt'
self.client.create_container('busybox', ['ls', mount_dest],
volumes=mount_dest)
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls", "/mnt"], "AttachStdin": false,
"Volumes": {"/mnt": {}},
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_ports(self):
self.client.create_container('busybox', 'ls',
ports=[1111, (2222, 'udp'), (3333,)])
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"ExposedPorts": {
"1111/tcp": {},
"2222/udp": {},
"3333/tcp": {}
},
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_entrypoint(self):
self.client.create_container('busybox', 'hello',
entrypoint='cowsay entry')
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["hello"], "AttachStdin": false,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"Entrypoint": ["cowsay", "entry"]}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_cpu_shares(self):
with pytest.deprecated_call():
self.client.create_container('busybox', 'ls', cpu_shares=5)
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"CpuShares": 5}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
@requires_api_version('1.18')
def test_create_container_with_host_config_cpu_shares(self):
self.client.create_container(
'busybox', 'ls', host_config=self.client.create_host_config(
cpu_shares=512
)
)
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"HostConfig": {
"CpuShares": 512,
"NetworkMode": "default"
}}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_cpuset(self):
with pytest.deprecated_call():
self.client.create_container('busybox', 'ls', cpuset='0,1')
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"Cpuset": "0,1",
"CpusetCpus": "0,1"}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
@requires_api_version('1.18')
def test_create_container_with_host_config_cpuset(self):
self.client.create_container(
'busybox', 'ls', host_config=self.client.create_host_config(
cpuset_cpus='0,1'
)
)
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"HostConfig": {
"CpuSetCpus": "0,1",
"NetworkMode": "default"
}}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_cgroup_parent(self):
self.client.create_container(
'busybox', 'ls', host_config=self.client.create_host_config(
cgroup_parent='test'
)
)
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
data = json.loads(args[1]['data'])
self.assertIn('HostConfig', data)
self.assertIn('CgroupParent', data['HostConfig'])
self.assertEqual(data['HostConfig']['CgroupParent'], 'test')
def test_create_container_with_working_dir(self):
self.client.create_container('busybox', 'ls',
working_dir='/root')
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"WorkingDir": "/root"}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_stdin_open(self):
self.client.create_container('busybox', 'true', stdin_open=True)
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox", "Cmd": ["true"],
"AttachStdin": true,
"AttachStderr": true, "AttachStdout": true,
"StdinOnce": true,
"OpenStdin": true, "NetworkDisabled": false}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_volumes_from(self):
vol_names = ['foo', 'bar']
try:
self.client.create_container('busybox', 'true',
volumes_from=vol_names)
except docker.errors.DockerException:
self.assertTrue(
docker.utils.compare_version('1.10', self.client._version) >= 0
)
return
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data'])['VolumesFrom'],
','.join(vol_names))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_empty_volumes_from(self):
self.client.create_container('busybox', 'true', volumes_from=[])
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertTrue('VolumesFrom' not in data)
def test_create_named_container(self):
self.client.create_container('busybox', 'true',
name='marisa-kirisame')
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox", "Cmd": ["true"],
"AttachStdin": false,
"AttachStderr": true, "AttachStdout": true,
"StdinOnce": false,
"OpenStdin": false, "NetworkDisabled": false}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(args[1]['params'], {'name': 'marisa-kirisame'})
def test_create_container_with_mem_limit_as_int(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
mem_limit=128.0
)
)
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(data['HostConfig']['Memory'], 128.0)
def test_create_container_with_mem_limit_as_string(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
mem_limit='128'
)
)
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(data['HostConfig']['Memory'], 128.0)
def test_create_container_with_mem_limit_as_string_with_k_unit(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
mem_limit='128k'
)
)
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(data['HostConfig']['Memory'], 128.0 * 1024)
def test_create_container_with_mem_limit_as_string_with_m_unit(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
mem_limit='128m'
)
)
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(data['HostConfig']['Memory'], 128.0 * 1024 * 1024)
def test_create_container_with_mem_limit_as_string_with_g_unit(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
mem_limit='128g'
)
)
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(
data['HostConfig']['Memory'], 128.0 * 1024 * 1024 * 1024
)
def test_create_container_with_mem_limit_as_string_with_wrong_value(self):
self.assertRaises(
docker.errors.DockerException,
self.client.create_host_config, mem_limit='128p'
)
self.assertRaises(
docker.errors.DockerException,
self.client.create_host_config, mem_limit='1f28'
)
def test_create_container_with_lxc_conf(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
lxc_conf={'lxc.conf.k': 'lxc.conf.value'}
)
)
args = fake_request.call_args
self.assertEqual(
args[0][1],
url_prefix + 'containers/create'
)
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['LxcConf'] = [
{"Value": "lxc.conf.value", "Key": "lxc.conf.k"}
]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'],
{'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_lxc_conf_compat(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
lxc_conf=[{'Key': 'lxc.conf.k', 'Value': 'lxc.conf.value'}]
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['LxcConf'] = [
{"Value": "lxc.conf.value", "Key": "lxc.conf.k"}
]
self.assertEqual(
json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_binds_ro(self):
mount_dest = '/mnt'
mount_origin = '/tmp'
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
binds={mount_origin: {
"bind": mount_dest,
"ro": True
}}
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix +
'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Binds'] = ["/tmp:/mnt:ro"]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_binds_rw(self):
mount_dest = '/mnt'
mount_origin = '/tmp'
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
binds={mount_origin: {
"bind": mount_dest,
"ro": False
}}
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix +
'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Binds'] = ["/tmp:/mnt:rw"]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_binds_mode(self):
mount_dest = '/mnt'
mount_origin = '/tmp'
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
binds={mount_origin: {
"bind": mount_dest,
"mode": "z",
}}
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix +
'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Binds'] = ["/tmp:/mnt:z"]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_binds_mode_and_ro_error(self):
with pytest.raises(ValueError):
mount_dest = '/mnt'
mount_origin = '/tmp'
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
binds={mount_origin: {
"bind": mount_dest,
"mode": "z",
"ro": True,
}}
)
)
def test_create_container_with_binds_list(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
binds=[
"/tmp:/mnt/1:ro",
"/tmp:/mnt/2",
],
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix +
'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Binds'] = [
"/tmp:/mnt/1:ro",
"/tmp:/mnt/2",
]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_port_binds(self):
self.maxDiff = None
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
port_bindings={
1111: None,
2222: 2222,
'3333/udp': (3333,),
4444: ('127.0.0.1',),
5555: ('127.0.0.1', 5555),
6666: [('127.0.0.1',), ('192.168.0.1',)]
}
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
data = json.loads(args[1]['data'])
port_bindings = data['HostConfig']['PortBindings']
self.assertTrue('1111/tcp' in port_bindings)
self.assertTrue('2222/tcp' in port_bindings)
self.assertTrue('3333/udp' in port_bindings)
self.assertTrue('4444/tcp' in port_bindings)
self.assertTrue('5555/tcp' in port_bindings)
self.assertTrue('6666/tcp' in port_bindings)
self.assertEqual(
[{"HostPort": "", "HostIp": ""}],
port_bindings['1111/tcp']
)
self.assertEqual(
[{"HostPort": "2222", "HostIp": ""}],
port_bindings['2222/tcp']
)
self.assertEqual(
[{"HostPort": "3333", "HostIp": ""}],
port_bindings['3333/udp']
)
self.assertEqual(
[{"HostPort": "", "HostIp": "127.0.0.1"}],
port_bindings['4444/tcp']
)
self.assertEqual(
[{"HostPort": "5555", "HostIp": "127.0.0.1"}],
port_bindings['5555/tcp']
)
self.assertEqual(len(port_bindings['6666/tcp']), 2)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_mac_address(self):
expected = "02:42:ac:11:00:0a"
self.client.create_container(
'busybox',
['sleep', '60'],
mac_address=expected
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
data = json.loads(args[1]['data'])
assert data['MacAddress'] == expected
def test_create_container_with_links(self):
link_path = 'path'
alias = 'alias'
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
links={link_path: alias}
)
)
args = fake_request.call_args
self.assertEqual(
args[0][1], url_prefix + 'containers/create'
)
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Links'] = ['path:alias']
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
def test_create_container_with_multiple_links(self):
link_path = 'path'
alias = 'alias'
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
links={
link_path + '1': alias + '1',
link_path + '2': alias + '2'
}
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Links'] = [
'path1:alias1', 'path2:alias2'
]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
def test_create_container_with_links_as_list_of_tuples(self):
link_path = 'path'
alias = 'alias'
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
links=[(link_path, alias)]
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Links'] = ['path:alias']
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
def test_create_container_privileged(self):
self.client.create_container(
'busybox', 'true',
host_config=self.client.create_host_config(privileged=True)
)
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Privileged'] = True
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_restart_policy(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
restart_policy={
"Name": "always",
"MaximumRetryCount": 0
}
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['RestartPolicy'] = {
"MaximumRetryCount": 0, "Name": "always"
}
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_added_capabilities(self):
self.client.create_container(
'busybox', 'true',
host_config=self.client.create_host_config(cap_add=['MKNOD'])
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['CapAdd'] = ['MKNOD']
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_dropped_capabilities(self):
self.client.create_container(
'busybox', 'true',
host_config=self.client.create_host_config(cap_drop=['MKNOD'])
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['CapDrop'] = ['MKNOD']
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_devices(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
devices=['/dev/sda:/dev/xvda:rwm',
'/dev/sdb:/dev/xvdb',
'/dev/sdc']
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Devices'] = [
{'CgroupPermissions': 'rwm',
'PathInContainer': '/dev/xvda',
'PathOnHost': '/dev/sda'},
{'CgroupPermissions': 'rwm',
'PathInContainer': '/dev/xvdb',
'PathOnHost': '/dev/sdb'},
{'CgroupPermissions': 'rwm',
'PathInContainer': '/dev/sdc',
'PathOnHost': '/dev/sdc'}
]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_labels_dict(self):
labels_dict = {
six.text_type('foo'): six.text_type('1'),
six.text_type('bar'): six.text_type('2'),
}
self.client.create_container(
'busybox', 'true',
labels=labels_dict,
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data'])['Labels'], labels_dict)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_labels_list(self):
labels_list = [
six.text_type('foo'),
six.text_type('bar'),
]
labels_dict = {
six.text_type('foo'): six.text_type(),
six.text_type('bar'): six.text_type(),
}
self.client.create_container(
'busybox', 'true',
labels=labels_list,
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data'])['Labels'], labels_dict)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_named_volume(self):
mount_dest = '/mnt'
volume_name = 'name'
self.client.create_container(
'busybox', 'true',
host_config=self.client.create_host_config(
binds={volume_name: {
"bind": mount_dest,
"ro": False
}}),
volume_driver='foodriver',
)
args = fake_request.call_args
self.assertEqual(
args[0][1], url_prefix + 'containers/create'
)
expected_payload = self.base_create_payload()
expected_payload['VolumeDriver'] = 'foodriver'
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Binds'] = ["name:/mnt:rw"]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_stop_signal(self):
self.client.create_container('busybox', 'ls',
stop_signal='SIGINT')
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"StopSignal": "SIGINT"}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
@requires_api_version('1.22')
def test_create_container_with_aliases(self):
self.client.create_container(
'busybox', 'ls',
host_config=self.client.create_host_config(
network_mode='some-network',
),
networking_config=self.client.create_networking_config({
'some-network': self.client.create_endpoint_config(
aliases=['foo', 'bar'],
),
}),
)
args = fake_request.call_args
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"HostConfig": {
"NetworkMode": "some-network"
},
"NetworkingConfig": {
"EndpointsConfig": {
"some-network": {"Aliases": ["foo", "bar"]}
}
}}'''))
@requires_api_version('1.22')
def test_create_container_with_tmpfs_list(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
tmpfs=[
"/tmp",
"/mnt:size=3G,uid=100"
]
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix +
'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Tmpfs'] = {
"/tmp": "",
"/mnt": "size=3G,uid=100"
}
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
@requires_api_version('1.22')
def test_create_container_with_tmpfs_dict(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
tmpfs={
"/tmp": "",
"/mnt": "size=3G,uid=100"
}
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix +
'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Tmpfs'] = {
"/tmp": "",
"/mnt": "size=3G,uid=100"
}
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
@requires_api_version('1.24')
def test_create_container_with_sysctl(self):
self.client.create_container(
'busybox', 'true',
host_config=self.client.create_host_config(
sysctls={
'net.core.somaxconn': 1024,
'net.ipv4.tcp_syncookies': '0',
}
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Sysctls'] = {
'net.core.somaxconn': '1024', 'net.ipv4.tcp_syncookies': '0',
}
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_unicode_envvars(self):
envvars_dict = {
'foo': u'☃',
}
expected = [
u'foo=☃'
]
self.client.create_container(
'busybox', 'true',
environment=envvars_dict,
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data'])['Env'], expected)
class ContainerTest(DockerClientTest):
def test_list_containers(self):
self.client.containers(all=True)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/json',
params={
'all': 1,
'since': None,
'size': 0,
'limit': -1,
'trunc_cmd': 0,
'before': None
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_resize_container(self):
self.client.resize(
{'Id': fake_api.FAKE_CONTAINER_ID},
height=15,
width=120
)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/resize',
params={'h': 15, 'w': 120},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_rename_container(self):
self.client.rename(
{'Id': fake_api.FAKE_CONTAINER_ID},
name='foobar'
)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/rename',
params={'name': 'foobar'},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_wait(self):
self.client.wait(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/wait',
timeout=None
)
def test_wait_with_dict_instead_of_id(self):
self.client.wait({'Id': fake_api.FAKE_CONTAINER_ID})
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/wait',
timeout=None
)
def test_logs(self):
with mock.patch('docker.Client.inspect_container',
fake_inspect_container):
logs = self.client.logs(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1,
'tail': 'all'},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=False
)
self.assertEqual(
logs,
'Flowering Nights\n(Sakuya Iyazoi)\n'.encode('ascii')
)
def test_logs_with_dict_instead_of_id(self):
with mock.patch('docker.Client.inspect_container',
fake_inspect_container):
logs = self.client.logs({'Id': fake_api.FAKE_CONTAINER_ID})
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1,
'tail': 'all'},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=False
)
self.assertEqual(
logs,
'Flowering Nights\n(Sakuya Iyazoi)\n'.encode('ascii')
)
def test_log_streaming(self):
with mock.patch('docker.Client.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=True,
follow=False)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1,
'tail': 'all'},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=True
)
def test_log_following(self):
with mock.patch('docker.Client.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
follow=True)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 1, 'stderr': 1, 'stdout': 1,
'tail': 'all'},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=False
)
def test_log_following_backwards(self):
with mock.patch('docker.Client.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=True)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 1, 'stderr': 1, 'stdout': 1,
'tail': 'all'},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=True
)
def test_log_streaming_and_following(self):
with mock.patch('docker.Client.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=True,
follow=True)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 1, 'stderr': 1, 'stdout': 1,
'tail': 'all'},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=True
)
def test_log_tail(self):
with mock.patch('docker.Client.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
follow=False, tail=10)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1,
'tail': 10},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=False
)
def test_log_since(self):
ts = 809222400
with mock.patch('docker.Client.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
follow=False, since=ts)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1,
'tail': 'all', 'since': ts},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=False
)
def test_log_since_with_datetime(self):
ts = 809222400
time = datetime.datetime.utcfromtimestamp(ts)
with mock.patch('docker.Client.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
follow=False, since=time)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1,
'tail': 'all', 'since': ts},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=False
)
def test_log_tty(self):
m = mock.Mock()
with mock.patch('docker.Client.inspect_container',
fake_inspect_container_tty):
with mock.patch('docker.Client._stream_raw_result',
m):
self.client.logs(fake_api.FAKE_CONTAINER_ID,
follow=True, stream=True)
self.assertTrue(m.called)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 1, 'stderr': 1, 'stdout': 1,
'tail': 'all'},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=True
)
def test_diff(self):
self.client.diff(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/changes',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_diff_with_dict_instead_of_id(self):
self.client.diff({'Id': fake_api.FAKE_CONTAINER_ID})
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/changes',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_port(self):
self.client.port({'Id': fake_api.FAKE_CONTAINER_ID}, 1111)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/json',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_stop_container(self):
timeout = 2
self.client.stop(fake_api.FAKE_CONTAINER_ID, timeout=timeout)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/stop',
params={'t': timeout},
timeout=(DEFAULT_TIMEOUT_SECONDS + timeout)
)
def test_stop_container_with_dict_instead_of_id(self):
timeout = 2
self.client.stop({'Id': fake_api.FAKE_CONTAINER_ID},
timeout=timeout)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/stop',
params={'t': timeout},
timeout=(DEFAULT_TIMEOUT_SECONDS + timeout)
)
def test_pause_container(self):
self.client.pause(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/pause',
timeout=(DEFAULT_TIMEOUT_SECONDS)
)
def test_unpause_container(self):
self.client.unpause(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/unpause',
timeout=(DEFAULT_TIMEOUT_SECONDS)
)
def test_kill_container(self):
self.client.kill(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/kill',
params={},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_kill_container_with_dict_instead_of_id(self):
self.client.kill({'Id': fake_api.FAKE_CONTAINER_ID})
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/kill',
params={},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_kill_container_with_signal(self):
self.client.kill(fake_api.FAKE_CONTAINER_ID, signal=signal.SIGTERM)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/kill',
params={'signal': signal.SIGTERM},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_restart_container(self):
self.client.restart(fake_api.FAKE_CONTAINER_ID, timeout=2)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/restart',
params={'t': 2},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_restart_container_with_dict_instead_of_id(self):
self.client.restart({'Id': fake_api.FAKE_CONTAINER_ID}, timeout=2)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/restart',
params={'t': 2},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_remove_container(self):
self.client.remove_container(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'DELETE',
url_prefix + 'containers/3cc2351ab11b',
params={'v': False, 'link': False, 'force': False},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_remove_container_with_dict_instead_of_id(self):
self.client.remove_container({'Id': fake_api.FAKE_CONTAINER_ID})
fake_request.assert_called_with(
'DELETE',
url_prefix + 'containers/3cc2351ab11b',
params={'v': False, 'link': False, 'force': False},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_export(self):
self.client.export(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/export',
stream=True,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_export_with_dict_instead_of_id(self):
self.client.export({'Id': fake_api.FAKE_CONTAINER_ID})
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/export',
stream=True,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_inspect_container(self):
self.client.inspect_container(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/json',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_inspect_container_undefined_id(self):
for arg in None, '', {True: True}:
with pytest.raises(docker.errors.NullResource) as excinfo:
self.client.inspect_container(arg)
self.assertEqual(
excinfo.value.args[0], 'image or container param is undefined'
)
def test_container_stats(self):
self.client.stats(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/stats',
timeout=60,
stream=True
)
def test_container_top(self):
self.client.top(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/top',
params={},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_container_top_with_psargs(self):
self.client.top(fake_api.FAKE_CONTAINER_ID, 'waux')
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/top',
params={'ps_args': 'waux'},
timeout=DEFAULT_TIMEOUT_SECONDS
)
@requires_api_version('1.22')
def test_container_update(self):
self.client.update_container(
fake_api.FAKE_CONTAINER_ID, mem_limit='2k', cpu_shares=124,
blkio_weight=345
)
args = fake_request.call_args
self.assertEqual(
args[0][1], url_prefix + 'containers/3cc2351ab11b/update'
)
self.assertEqual(
json.loads(args[1]['data']),
{'Memory': 2 * 1024, 'CpuShares': 124, 'BlkioWeight': 345}
)
self.assertEqual(
args[1]['headers']['Content-Type'], 'application/json'
)
|
shakamunyi/docker-py
|
tests/unit/container_test.py
|
Python
|
apache-2.0
| 59,433
|
# -*- coding: utf-8 -*-
'''
Test database compression functions
'''
# Import sorbic libs
import sorbic.db
# Import python libs
import os
import random
import shutil
import unittest
import tempfile
class TestCompress(unittest.TestCase):
'''
Cover compression possibilities
'''
def test_compress_no_changes(self):
'''
Run a scale db execution with the given db kwargs
'''
entries = 100
w_dir = tempfile.mkdtemp()
root = os.path.join(w_dir, 'db_root')
db = sorbic.db.DB(root)
data = {1:1}
for num in xrange(entries):
key = str(num)
db.insert(key, data)
db.compress('', 0)
for num in xrange(entries):
key = str(num)
pull_data = db.get(key)
self.assertEqual(data, pull_data)
shutil.rmtree(w_dir)
def test_compress_no_changes_depth(self):
'''
Run a scale db execution with the given db kwargs
'''
entries = 100
w_dir = tempfile.mkdtemp()
root = os.path.join(w_dir, 'db_root')
db = sorbic.db.DB(root)
data = {1:1}
key = 'foo/bar'
for num in xrange(entries):
db.insert(key, data)
db.compress('foo', 0)
for num in xrange(entries):
pull_data = db.get(key)
self.assertEqual(data, pull_data)
shutil.rmtree(w_dir)
def test_compress_changes(self):
'''
Compress a db with removed keys
'''
entries = 100
w_dir = tempfile.mkdtemp()
root = os.path.join(w_dir, 'db_root')
db = sorbic.db.DB(root)
rands = set()
data = {1:1}
for num in xrange(entries):
key = str(num)
db.insert(key, data)
for _ in xrange(entries):
rands.add(random.randint(0, entries - 1))
for key in rands:
db.rm(str(key))
db.compress('', 0)
for num in xrange(entries):
key = str(num)
pull_data = db.get(key)
if num in rands:
self.assertIsNone(pull_data)
else:
self.assertEqual(data, pull_data)
shutil.rmtree(w_dir)
def test_compress_changes_depth(self):
'''
Run a scale db execution with the given db kwargs
'''
entries = 100
w_dir = tempfile.mkdtemp()
root = os.path.join(w_dir, 'db_root')
db = sorbic.db.DB(root)
data = {1:1}
key = 'foo/bar'
ids = []
rm_ids = set()
for num in xrange(entries):
ids.append(db.insert(key, data)['id'])
for _ in xrange(entries):
rm_ids.add(ids[random.randint(0, entries - 1)])
for rm_id in rm_ids:
db.rm(key, rm_id)
db.compress('foo', 0)
for num in xrange(entries):
pull_data = db.get(key)
self.assertEqual(data, pull_data)
shutil.rmtree(w_dir)
|
s0undt3ch/sorbic
|
tests/unit/test_compress.py
|
Python
|
apache-2.0
| 2,999
|
import json
import datetime
import sqlalchemy as sa
from neutron.db import models_v2
from neutron.db import model_base
from sqlalchemy.dialects import postgresql
from sqlalchemy.dialects.mysql.base import VARCHAR
from sqlalchemy.orm import relationship, backref
from sqlalchemy.types import Enum, TIMESTAMP, TypeDecorator
from sqlalchemy import Table, Column, ForeignKey, func, Integer
class JSONEncodedDict(TypeDecorator):
"""Represents an immutable structure as a json-encoded string.
Usage::
JSONEncodedDict(255)
"""
impl = VARCHAR
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
class ReachabilityTest(model_base.BASEV2):
'''
A table to store user configured reachability tests.
'''
__tablename__ = 'reachabilitytest'
id = Column(Integer, primary_key=True)
tenant_id = sa.Column(sa.String(64), nullable=False)
test_id = sa.Column(sa.String(64), nullable=False)
src_tenant_id = sa.Column(sa.String(64), nullable=False)
src_segment_id = sa.Column(sa.String(64), nullable=False)
src_ip = sa.Column(sa.String(16), nullable=False)
dst_ip = sa.Column(sa.String(16), nullable=False)
expected_result = sa.Column(Enum("dropped by route",
"dropped by policy",
"dropped by security group",
"dropped due to private segment",
"dropped due to loop",
"packet in", "forwarded", "dropped",
"unspecified sources",
"unsupported", "invalid input",
"inconsistent status",
"no traffic detected",
name="expected_result"), nullable=False)
def get_connection_source(self):
source = {}
source['tenant'] = self.src_tenant_id
source['segment'] = self.src_segment_id
source['ip'] = self.src_ip
return source
def get_connection_destination(self):
destination = {}
destination['ip'] = self.dst_ip
return destination
class ReachabilityTestResult(model_base.BASEV2):
'''
A table to store the results of user configured reachability tests.
'''
__tablename__ = 'reachabilitytestresult'
id = Column(Integer, primary_key=True)
test_primary_key = sa.Column(Integer, ForeignKey('reachabilitytest.id'),
nullable=False)
tenant_id = sa.Column(sa.String(64), nullable=False)
test_id = sa.Column(sa.String(64), nullable=False)
test_time = sa.Column(TIMESTAMP(timezone=True),
nullable=False, default=func.now())
test_result = sa.Column(Enum("pass", "fail", "pending"), nullable=False)
detail = sa.Column(JSONEncodedDict(8192), nullable=True)
reachabilitytest = relationship("ReachabilityTest",
backref=backref('reachabilitytestresult',
order_by=id, uselist=True,
cascade='delete,all'))
class ReachabilityQuickTest(model_base.BASEV2):
'''
A table to store user configured quick tests.
'''
__tablename__ = 'reachabilityquicktest'
id = Column(Integer, primary_key=True)
tenant_id = sa.Column(sa.String(64), nullable=False)
src_tenant_id = sa.Column(sa.String(64), nullable=False)
src_segment_id = sa.Column(sa.String(64), nullable=False)
src_ip = sa.Column(sa.String(16), nullable=False)
dst_ip = sa.Column(sa.String(16), nullable=False)
expected_result = sa.Column(Enum("dropped by route",
"dropped by policy",
"dropped by security group",
"dropped due to private segment",
"dropped due to loop",
"packet in", "forwarded", "dropped",
"unspecified sources",
"unsupported", "invalid input",
"inconsistent status",
"no traffic detected",
name="expected_result"),
nullable=False)
def get_connection_source(self):
source = {}
source['tenent'] = self.src_tenant_id
source['segment'] = self.src_segment_id
source['ip'] = self.src_ip
return source
def get_connection_destination(self):
destination = {}
destination['ip'] = self.dst_ip
return destination
class ReachabilityQuickTestResult(model_base.BASEV2):
'''
A table to store the results of user configured quick tests.
'''
__tablename__ = 'reachabilityquicktestresult'
id = Column(Integer, primary_key=True)
test_primary_key = sa.Column(Integer,
ForeignKey('reachabilityquicktest.id'), nullable=False)
tenant_id = sa.Column(sa.String(64), nullable=False)
test_time = sa.Column(TIMESTAMP(timezone=True),
nullable=False, default=func.now())
test_result = sa.Column(Enum("pass", "fail", "pending"), nullable=False)
detail = sa.Column(JSONEncodedDict(8192), nullable=True)
reachabilitytest = relationship("ReachabilityQuickTest",
backref=backref('reachabilityquicktestresult',
order_by=id, uselist=True,
cascade='delete,all'))
|
bigswitch/horizon
|
openstack_dashboard/dashboards/project/connections/reachability_tests/reachability_test_db.py
|
Python
|
apache-2.0
| 6,018
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Funnel checkpoint."""
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path, base_model):
# Initialise PyTorch model
config = FunnelConfig.from_json_file(config_file)
print(f"Building PyTorch model from configuration: {config}")
model = FunnelBaseModel(config) if base_model else FunnelModel(config)
# Load weights from tf checkpoint
load_tf_weights_in_funnel(model, config, tf_checkpoint_path)
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}")
torch.save(model.state_dict(), pytorch_dump_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
args = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
|
huggingface/transformers
|
src/transformers/models/funnel/convert_funnel_original_tf_checkpoint_to_pytorch.py
|
Python
|
apache-2.0
| 2,346
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2013,2014 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq show building --all`."""
from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611
from aquilon.worker.commands.show_location_type import CommandShowLocationType
class CommandShowBuildingAll(CommandShowLocationType):
required_parameters = []
def render(self, session, **arguments):
return CommandShowLocationType.render(self, session=session,
type='building', name=None,
**arguments)
|
quattor/aquilon
|
lib/aquilon/worker/commands/show_building_all.py
|
Python
|
apache-2.0
| 1,251
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
""" This provides a more convenient harness for running this
benchmark and collecting separate timings for each component.
"""
import sys, os
sys.path.append(os.path.join([os.environ[x] for x in os.environ.keys() if x.lower() == "dlr_root"][0], "External.LCA_RESTRICTED", "Languages", "IronPython", "27", "Lib", "test"))
def test_main(type="short"):
import pystone
loops = { "full": 50000, "short" : 50000, "medium" : 250000, "long" : 1000000 }[type]
pystone.main(loops)
if __name__=="__main__":
kind = "short"
if len(sys.argv) > 1: kind = sys.argv[1]
test_main(kind)
|
slozier/ironpython2
|
Src/Scripts/test_pystone.py
|
Python
|
apache-2.0
| 809
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
class TestEventFiring(object):
def testShouldFireClickEventWhenClicking(self, driver, pages):
pages.load("javascriptPage.html")
self._clickOnElementWhichRecordsEvents(driver)
self._assertEventFired(driver, "click")
def testShouldFireMouseDownEventWhenClicking(self, driver, pages):
pages.load("javascriptPage.html")
self._clickOnElementWhichRecordsEvents(driver)
self._assertEventFired(driver, "mousedown")
def testShouldFireMouseUpEventWhenClicking(self, driver, pages):
pages.load("javascriptPage.html")
self._clickOnElementWhichRecordsEvents(driver)
self._assertEventFired(driver, "mouseup")
def testShouldIssueMouseDownEvents(self, driver, pages):
pages.load("javascriptPage.html")
driver.find_element_by_id("mousedown").click()
result = driver.find_element_by_id("result").text
assert result == "mouse down"
def testShouldIssueClickEvents(self, driver, pages):
pages.load("javascriptPage.html")
driver.find_element_by_id("mouseclick").click()
result = driver.find_element_by_id("result").text
assert result == "mouse click"
def testShouldIssueMouseUpEvents(self, driver, pages):
pages.load("javascriptPage.html")
driver.find_element_by_id("mouseup").click()
result = driver.find_element_by_id("result").text
assert result == "mouse up"
def testMouseEventsShouldBubbleUpToContainingElements(self, driver, pages):
pages.load("javascriptPage.html")
driver.find_element_by_id("child").click()
result = driver.find_element_by_id("result").text
assert result == "mouse down"
def testShouldEmitOnChangeEventsWhenSelectingElements(self, driver, pages):
pages.load("javascriptPage.html")
select = driver.find_element_by_id('selector')
options = select.find_elements_by_tag_name('option')
initialTextValue = driver.find_element_by_id("result").text
select.click()
assert driver.find_element_by_id("result").text == initialTextValue
options[1].click()
assert driver.find_element_by_id("result").text == "bar"
def testShouldEmitOnChangeEventsWhenChangingTheStateOfACheckbox(self, driver, pages):
pages.load("javascriptPage.html")
checkbox = driver.find_element_by_id("checkbox")
checkbox.click()
assert driver.find_element_by_id("result").text == "checkbox thing"
def testShouldEmitClickEventWhenClickingOnATextInputElement(self, driver, pages):
pages.load("javascriptPage.html")
clicker = driver.find_element_by_id("clickField")
clicker.click()
assert clicker.get_attribute("value") == "Clicked"
def testClearingAnElementShouldCauseTheOnChangeHandlerToFire(self, driver, pages):
pages.load("javascriptPage.html")
element = driver.find_element_by_id("clearMe")
element.clear()
result = driver.find_element_by_id("result")
assert result.text == "Cleared"
# TODO Currently Failing and needs fixing
# def testSendingKeysToAnotherElementShouldCauseTheBlurEventToFire(self, driver, pages):
# pages.load("javascriptPage.html")
# element = driver.find_element_by_id("theworks")
# element.send_keys("foo")
# element2 = driver.find_element_by_id("changeable")
# element2.send_keys("bar")
# self._assertEventFired(driver, "blur")
# TODO Currently Failing and needs fixing
# def testSendingKeysToAnElementShouldCauseTheFocusEventToFire(self, driver, pages):
# pages.load("javascriptPage.html")
# element = driver.find_element_by_id("theworks")
# element.send_keys("foo")
# self._assertEventFired(driver, "focus")
def _clickOnElementWhichRecordsEvents(self, driver):
driver.find_element_by_id("plainButton").click()
def _assertEventFired(self, driver, eventName):
result = driver.find_element_by_id("result")
text = result.text
assert eventName in text, "No " + eventName + " fired: " + text
|
TikhomirovSergey/selenium
|
py/test/selenium/webdriver/common/correct_event_firing_tests.py
|
Python
|
apache-2.0
| 4,932
|
"""The test for sensor device automation."""
import pytest
from homeassistant.components.sensor import DOMAIN
from homeassistant.components.sensor.device_condition import ENTITY_CONDITIONS
from homeassistant.const import STATE_UNKNOWN, CONF_PLATFORM
from homeassistant.setup import async_setup_component
import homeassistant.components.automation as automation
from homeassistant.helpers import device_registry
from tests.common import (
MockConfigEntry,
async_mock_service,
mock_device_registry,
mock_registry,
async_get_device_automations,
async_get_device_automation_capabilities,
)
from tests.testing_config.custom_components.test.sensor import DEVICE_CLASSES
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock serivce."""
return async_mock_service(hass, "test", "automation")
async def test_get_conditions(hass, device_reg, entity_reg):
"""Test we get the expected conditions from a sensor."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
for device_class in DEVICE_CLASSES:
entity_reg.async_get_or_create(
DOMAIN,
"test",
platform.ENTITIES[device_class].unique_id,
device_id=device_entry.id,
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
expected_conditions = [
{
"condition": "device",
"domain": DOMAIN,
"type": condition["type"],
"device_id": device_entry.id,
"entity_id": platform.ENTITIES[device_class].entity_id,
}
for device_class in DEVICE_CLASSES
for condition in ENTITY_CONDITIONS[device_class]
if device_class != "none"
]
conditions = await async_get_device_automations(hass, "condition", device_entry.id)
assert conditions == expected_conditions
async def test_get_condition_capabilities(hass, device_reg, entity_reg):
"""Test we get the expected capabilities from a sensor condition."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN,
"test",
platform.ENTITIES["battery"].unique_id,
device_id=device_entry.id,
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
expected_capabilities = {
"extra_fields": [
{
"description": {"suffix": "%"},
"name": "above",
"optional": True,
"type": "float",
},
{
"description": {"suffix": "%"},
"name": "below",
"optional": True,
"type": "float",
},
]
}
conditions = await async_get_device_automations(hass, "condition", device_entry.id)
assert len(conditions) == 1
for condition in conditions:
capabilities = await async_get_device_automation_capabilities(
hass, "condition", condition
)
assert capabilities == expected_capabilities
async def test_get_condition_capabilities_none(hass, device_reg, entity_reg):
"""Test we get the expected capabilities from a sensor condition."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
conditions = [
{
"condition": "device",
"device_id": "8770c43885354d5fa27604db6817f63f",
"domain": "sensor",
"entity_id": "sensor.beer",
"type": "is_battery_level",
},
{
"condition": "device",
"device_id": "8770c43885354d5fa27604db6817f63f",
"domain": "sensor",
"entity_id": platform.ENTITIES["none"].entity_id,
"type": "is_battery_level",
},
]
expected_capabilities = {}
for condition in conditions:
capabilities = await async_get_device_automation_capabilities(
hass, "condition", condition
)
assert capabilities == expected_capabilities
async def test_if_state_not_above_below(hass, calls, caplog):
"""Test for bad value conditions."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
sensor1 = platform.ENTITIES["battery"]
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "is_battery_level",
}
],
"action": {"service": "test.automation"},
}
]
},
)
assert "must contain at least one of below, above" in caplog.text
async def test_if_state_above(hass, calls):
"""Test for value conditions."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
sensor1 = platform.ENTITIES["battery"]
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "is_battery_level",
"above": 10,
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(("platform", "event.event_type"))
},
},
}
]
},
)
await hass.async_block_till_done()
assert hass.states.get(sensor1.entity_id).state == STATE_UNKNOWN
assert len(calls) == 0
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set(sensor1.entity_id, 9)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set(sensor1.entity_id, 11)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "event - test_event1"
async def test_if_state_below(hass, calls):
"""Test for value conditions."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
sensor1 = platform.ENTITIES["battery"]
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "is_battery_level",
"below": 10,
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(("platform", "event.event_type"))
},
},
}
]
},
)
await hass.async_block_till_done()
assert hass.states.get(sensor1.entity_id).state == STATE_UNKNOWN
assert len(calls) == 0
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set(sensor1.entity_id, 11)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set(sensor1.entity_id, 9)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "event - test_event1"
async def test_if_state_between(hass, calls):
"""Test for value conditions."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
sensor1 = platform.ENTITIES["battery"]
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "is_battery_level",
"above": 10,
"below": 20,
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(("platform", "event.event_type"))
},
},
}
]
},
)
await hass.async_block_till_done()
assert hass.states.get(sensor1.entity_id).state == STATE_UNKNOWN
assert len(calls) == 0
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set(sensor1.entity_id, 9)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set(sensor1.entity_id, 11)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "event - test_event1"
hass.states.async_set(sensor1.entity_id, 21)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set(sensor1.entity_id, 19)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "event - test_event1"
|
joopert/home-assistant
|
tests/components/sensor/test_device_condition.py
|
Python
|
apache-2.0
| 12,296
|
#!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate an Abstract Syntax Tree (AST) for C++."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
# TODO:
# * Tokens should never be exported, need to convert to Nodes
# (return types, parameters, etc.)
# * Handle static class data for templatized classes
# * Handle casts (both C++ and C-style)
# * Handle conditions and loops (if/else, switch, for, while/do)
#
# TODO much, much later:
# * Handle #define
# * exceptions
try:
# Python 3.x
import builtins
except ImportError:
# Python 2.x
import __builtin__ as builtins
import sys
import traceback
from cpp import keywords
from cpp import tokenize
from cpp import utils
if not hasattr(builtins, 'reversed'):
# Support Python 2.3 and earlier.
def reversed(seq):
for i in range(len(seq)-1, -1, -1):
yield seq[i]
if not hasattr(builtins, 'next'):
# Support Python 2.5 and earlier.
def next(obj):
return obj.next()
VISIBILITY_PUBLIC, VISIBILITY_PROTECTED, VISIBILITY_PRIVATE = range(3)
FUNCTION_NONE = 0x00
FUNCTION_CONST = 0x01
FUNCTION_VIRTUAL = 0x02
FUNCTION_PURE_VIRTUAL = 0x04
FUNCTION_CTOR = 0x08
FUNCTION_DTOR = 0x10
FUNCTION_ATTRIBUTE = 0x20
FUNCTION_UNKNOWN_ANNOTATION = 0x40
FUNCTION_THROW = 0x80
"""
These are currently unused. Should really handle these properly at some point.
TYPE_MODIFIER_INLINE = 0x010000
TYPE_MODIFIER_EXTERN = 0x020000
TYPE_MODIFIER_STATIC = 0x040000
TYPE_MODIFIER_CONST = 0x080000
TYPE_MODIFIER_REGISTER = 0x100000
TYPE_MODIFIER_VOLATILE = 0x200000
TYPE_MODIFIER_MUTABLE = 0x400000
TYPE_MODIFIER_MAP = {
'inline': TYPE_MODIFIER_INLINE,
'extern': TYPE_MODIFIER_EXTERN,
'static': TYPE_MODIFIER_STATIC,
'const': TYPE_MODIFIER_CONST,
'register': TYPE_MODIFIER_REGISTER,
'volatile': TYPE_MODIFIER_VOLATILE,
'mutable': TYPE_MODIFIER_MUTABLE,
}
"""
_INTERNAL_TOKEN = 'internal'
_NAMESPACE_POP = 'ns-pop'
# TODO(nnorwitz): use this as a singleton for templated_types, etc
# where we don't want to create a new empty dict each time. It is also const.
class _NullDict(object):
__contains__ = lambda self: False
keys = values = items = iterkeys = itervalues = iteritems = lambda self: ()
# TODO(nnorwitz): move AST nodes into a separate module.
class Node(object):
"""Base AST node."""
def __init__(self, start, end):
self.start = start
self.end = end
def IsDeclaration(self):
"""Returns bool if this node is a declaration."""
return False
def IsDefinition(self):
"""Returns bool if this node is a definition."""
return False
def IsExportable(self):
"""Returns bool if this node exportable from a header file."""
return False
def Requires(self, node):
"""Does this AST node require the definition of the node passed in?"""
return False
def XXX__str__(self):
return self._StringHelper(self.__class__.__name__, '')
def _StringHelper(self, name, suffix):
if not utils.DEBUG:
return '%s(%s)' % (name, suffix)
return '%s(%d, %d, %s)' % (name, self.start, self.end, suffix)
def __repr__(self):
return str(self)
class Define(Node):
def __init__(self, start, end, name, definition):
Node.__init__(self, start, end)
self.name = name
self.definition = definition
def __str__(self):
value = '%s %s' % (self.name, self.definition)
return self._StringHelper(self.__class__.__name__, value)
class Include(Node):
def __init__(self, start, end, filename, system):
Node.__init__(self, start, end)
self.filename = filename
self.system = system
def __str__(self):
fmt = '"%s"'
if self.system:
fmt = '<%s>'
return self._StringHelper(self.__class__.__name__, fmt % self.filename)
class Goto(Node):
def __init__(self, start, end, label):
Node.__init__(self, start, end)
self.label = label
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.label))
class Expr(Node):
def __init__(self, start, end, expr):
Node.__init__(self, start, end)
self.expr = expr
def Requires(self, node):
# TODO(nnorwitz): impl.
return False
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.expr))
class Return(Expr):
pass
class Delete(Expr):
pass
class Friend(Expr):
def __init__(self, start, end, expr, namespace):
Expr.__init__(self, start, end, expr)
self.namespace = namespace[:]
class Using(Node):
def __init__(self, start, end, names):
Node.__init__(self, start, end)
self.names = names
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.names))
class Parameter(Node):
def __init__(self, start, end, name, parameter_type, default):
Node.__init__(self, start, end)
self.name = name
self.type = parameter_type
self.default = default
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
return self.type.name == node.name
def __str__(self):
name = str(self.type)
suffix = '%s %s' % (name, self.name)
if self.default:
suffix += ' = ' + ''.join([d.name for d in self.default])
return self._StringHelper(self.__class__.__name__, suffix)
class _GenericDeclaration(Node):
def __init__(self, start, end, name, namespace):
Node.__init__(self, start, end)
self.name = name
self.namespace = namespace[:]
def FullName(self):
prefix = ''
if self.namespace and self.namespace[-1]:
prefix = '::'.join(self.namespace) + '::'
return prefix + self.name
def _TypeStringHelper(self, suffix):
if self.namespace:
names = [n or '<anonymous>' for n in self.namespace]
suffix += ' in ' + '::'.join(names)
return self._StringHelper(self.__class__.__name__, suffix)
# TODO(nnorwitz): merge with Parameter in some way?
class VariableDeclaration(_GenericDeclaration):
def __init__(self, start, end, name, var_type, initial_value, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.type = var_type
self.initial_value = initial_value
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
return self.type.name == node.name
def ToString(self):
"""Return a string that tries to reconstitute the variable decl."""
suffix = '%s %s' % (self.type, self.name)
if self.initial_value:
suffix += ' = ' + self.initial_value
return suffix
def __str__(self):
return self._StringHelper(self.__class__.__name__, self.ToString())
class Typedef(_GenericDeclaration):
def __init__(self, start, end, name, alias, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.alias = alias
def IsDefinition(self):
return True
def IsExportable(self):
return True
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
name = node.name
for token in self.alias:
if token is not None and name == token.name:
return True
return False
def __str__(self):
suffix = '%s, %s' % (self.name, self.alias)
return self._TypeStringHelper(suffix)
class _NestedType(_GenericDeclaration):
def __init__(self, start, end, name, fields, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.fields = fields
def IsDefinition(self):
return True
def IsExportable(self):
return True
def __str__(self):
suffix = '%s, {%s}' % (self.name, self.fields)
return self._TypeStringHelper(suffix)
class Union(_NestedType):
pass
class Enum(_NestedType):
pass
class Class(_GenericDeclaration):
def __init__(self, start, end, name, bases, templated_types, body, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.bases = bases
self.body = body
self.templated_types = templated_types
def IsDeclaration(self):
return self.bases is None and self.body is None
def IsDefinition(self):
return not self.IsDeclaration()
def IsExportable(self):
return not self.IsDeclaration()
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
if self.bases:
for token_list in self.bases:
# TODO(nnorwitz): bases are tokens, do name comparision.
for token in token_list:
if token.name == node.name:
return True
# TODO(nnorwitz): search in body too.
return False
def __str__(self):
name = self.name
if self.templated_types:
name += '<%s>' % self.templated_types
suffix = '%s, %s, %s' % (name, self.bases, self.body)
return self._TypeStringHelper(suffix)
class Struct(Class):
pass
class Function(_GenericDeclaration):
def __init__(self, start, end, name, return_type, parameters,
modifiers, templated_types, body, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
converter = TypeConverter(namespace)
self.return_type = converter.CreateReturnType(return_type)
self.parameters = converter.ToParameters(parameters)
self.modifiers = modifiers
self.body = body
self.templated_types = templated_types
def IsDeclaration(self):
return self.body is None
def IsDefinition(self):
return self.body is not None
def IsExportable(self):
if self.return_type and 'static' in self.return_type.modifiers:
return False
return None not in self.namespace
def Requires(self, node):
if self.parameters:
# TODO(nnorwitz): parameters are tokens, do name comparision.
for p in self.parameters:
if p.name == node.name:
return True
# TODO(nnorwitz): search in body too.
return False
def __str__(self):
# TODO(nnorwitz): add templated_types.
suffix = ('%s %s(%s), 0x%02x, %s' %
(self.return_type, self.name, self.parameters,
self.modifiers, self.body))
return self._TypeStringHelper(suffix)
class Method(Function):
def __init__(self, start, end, name, in_class, return_type, parameters,
modifiers, templated_types, body, namespace):
Function.__init__(self, start, end, name, return_type, parameters,
modifiers, templated_types, body, namespace)
# TODO(nnorwitz): in_class could also be a namespace which can
# mess up finding functions properly.
self.in_class = in_class
class Type(_GenericDeclaration):
"""Type used for any variable (eg class, primitive, struct, etc)."""
def __init__(self, start, end, name, templated_types, modifiers,
reference, pointer, array):
"""
Args:
name: str name of main type
templated_types: [Class (Type?)] template type info between <>
modifiers: [str] type modifiers (keywords) eg, const, mutable, etc.
reference, pointer, array: bools
"""
_GenericDeclaration.__init__(self, start, end, name, [])
self.templated_types = templated_types
if not name and modifiers:
self.name = modifiers.pop()
self.modifiers = modifiers
self.reference = reference
self.pointer = pointer
self.array = array
def __str__(self):
prefix = ''
if self.modifiers:
prefix = ' '.join(self.modifiers) + ' '
name = str(self.name)
if self.templated_types:
name += '<%s>' % self.templated_types
suffix = prefix + name
if self.reference:
suffix += '&'
if self.pointer:
suffix += '*'
if self.array:
suffix += '[]'
return self._TypeStringHelper(suffix)
# By definition, Is* are always False. A Type can only exist in
# some sort of variable declaration, parameter, or return value.
def IsDeclaration(self):
return False
def IsDefinition(self):
return False
def IsExportable(self):
return False
class TypeConverter(object):
def __init__(self, namespace_stack):
self.namespace_stack = namespace_stack
def _GetTemplateEnd(self, tokens, start):
count = 1
end = start
while 1:
token = tokens[end]
end += 1
if token.name == '<':
count += 1
elif token.name == '>':
count -= 1
if count == 0:
break
return tokens[start:end-1], end
def ToType(self, tokens):
"""Convert [Token,...] to [Class(...), ] useful for base classes.
For example, code like class Foo : public Bar<x, y> { ... };
the "Bar<x, y>" portion gets converted to an AST.
Returns:
[Class(...), ...]
"""
result = []
name_tokens = []
reference = pointer = array = False
def AddType(templated_types):
# Partition tokens into name and modifier tokens.
names = []
modifiers = []
for t in name_tokens:
if keywords.IsKeyword(t.name):
modifiers.append(t.name)
else:
names.append(t.name)
name = ''.join(names)
result.append(Type(name_tokens[0].start, name_tokens[-1].end,
name, templated_types, modifiers,
reference, pointer, array))
del name_tokens[:]
i = 0
end = len(tokens)
while i < end:
token = tokens[i]
if token.name == '<':
new_tokens, new_end = self._GetTemplateEnd(tokens, i+1)
AddType(self.ToType(new_tokens))
# If there is a comma after the template, we need to consume
# that here otherwise it becomes part of the name.
i = new_end
reference = pointer = array = False
elif token.name == ',':
AddType([])
reference = pointer = array = False
elif token.name == '*':
pointer = True
elif token.name == '&':
reference = True
elif token.name == '[':
pointer = True
elif token.name == ']':
pass
else:
name_tokens.append(token)
i += 1
if name_tokens:
# No '<' in the tokens, just a simple name and no template.
AddType([])
return result
def DeclarationToParts(self, parts, needs_name_removed):
name = None
default = []
if needs_name_removed:
# Handle default (initial) values properly.
for i, t in enumerate(parts):
if t.name == '=':
default = parts[i+1:]
name = parts[i-1].name
if name == ']' and parts[i-2].name == '[':
name = parts[i-3].name
i -= 1
parts = parts[:i-1]
break
else:
if parts[-1].token_type == tokenize.NAME:
name = parts.pop().name
else:
# TODO(nnorwitz): this is a hack that happens for code like
# Register(Foo<T>); where it thinks this is a function call
# but it's actually a declaration.
name = '???'
modifiers = []
type_name = []
other_tokens = []
templated_types = []
i = 0
end = len(parts)
while i < end:
p = parts[i]
if keywords.IsKeyword(p.name):
modifiers.append(p.name)
elif p.name == '<':
templated_tokens, new_end = self._GetTemplateEnd(parts, i+1)
templated_types = self.ToType(templated_tokens)
i = new_end - 1
# Don't add a spurious :: to data members being initialized.
next_index = i + 1
if next_index < end and parts[next_index].name == '::':
i += 1
elif p.name in ('[', ']', '='):
# These are handled elsewhere.
other_tokens.append(p)
elif p.name not in ('*', '&', '>'):
# Ensure that names have a space between them.
if (type_name and type_name[-1].token_type == tokenize.NAME and
p.token_type == tokenize.NAME):
type_name.append(tokenize.Token(tokenize.SYNTAX, ' ', 0, 0))
type_name.append(p)
else:
other_tokens.append(p)
i += 1
type_name = ''.join([t.name for t in type_name])
return name, type_name, templated_types, modifiers, default, other_tokens
def ToParameters(self, tokens):
if not tokens:
return []
result = []
name = type_name = ''
type_modifiers = []
pointer = reference = array = False
first_token = None
default = []
def AddParameter():
if default:
del default[0] # Remove flag.
end = type_modifiers[-1].end
parts = self.DeclarationToParts(type_modifiers, True)
(name, type_name, templated_types, modifiers,
unused_default, unused_other_tokens) = parts
parameter_type = Type(first_token.start, first_token.end,
type_name, templated_types, modifiers,
reference, pointer, array)
p = Parameter(first_token.start, end, name,
parameter_type, default)
result.append(p)
template_count = 0
for s in tokens:
if not first_token:
first_token = s
if s.name == '<':
template_count += 1
elif s.name == '>':
template_count -= 1
if template_count > 0:
type_modifiers.append(s)
continue
if s.name == ',':
AddParameter()
name = type_name = ''
type_modifiers = []
pointer = reference = array = False
first_token = None
default = []
elif s.name == '*':
pointer = True
elif s.name == '&':
reference = True
elif s.name == '[':
array = True
elif s.name == ']':
pass # Just don't add to type_modifiers.
elif s.name == '=':
# Got a default value. Add any value (None) as a flag.
default.append(None)
elif default:
default.append(s)
else:
type_modifiers.append(s)
AddParameter()
return result
def CreateReturnType(self, return_type_seq):
if not return_type_seq:
return None
start = return_type_seq[0].start
end = return_type_seq[-1].end
_, name, templated_types, modifiers, default, other_tokens = \
self.DeclarationToParts(return_type_seq, False)
names = [n.name for n in other_tokens]
reference = '&' in names
pointer = '*' in names
array = '[' in names
return Type(start, end, name, templated_types, modifiers,
reference, pointer, array)
def GetTemplateIndices(self, names):
# names is a list of strings.
start = names.index('<')
end = len(names) - 1
while end > 0:
if names[end] == '>':
break
end -= 1
return start, end+1
class AstBuilder(object):
def __init__(self, token_stream, filename, in_class='', visibility=None,
namespace_stack=[]):
self.tokens = token_stream
self.filename = filename
# TODO(nnorwitz): use a better data structure (deque) for the queue.
# Switching directions of the "queue" improved perf by about 25%.
# Using a deque should be even better since we access from both sides.
self.token_queue = []
self.namespace_stack = namespace_stack[:]
self.in_class = in_class
if in_class is None:
self.in_class_name_only = None
else:
self.in_class_name_only = in_class.split('::')[-1]
self.visibility = visibility
self.in_function = False
self.current_token = None
# Keep the state whether we are currently handling a typedef or not.
self._handling_typedef = False
self.converter = TypeConverter(self.namespace_stack)
def HandleError(self, msg, token):
printable_queue = list(reversed(self.token_queue[-20:]))
sys.stderr.write('Got %s in %s @ %s %s\n' %
(msg, self.filename, token, printable_queue))
def Generate(self):
while 1:
token = self._GetNextToken()
if not token:
break
# Get the next token.
self.current_token = token
# Dispatch on the next token type.
if token.token_type == _INTERNAL_TOKEN:
if token.name == _NAMESPACE_POP:
self.namespace_stack.pop()
continue
try:
result = self._GenerateOne(token)
if result is not None:
yield result
except:
self.HandleError('exception', token)
raise
def _CreateVariable(self, pos_token, name, type_name, type_modifiers,
ref_pointer_name_seq, templated_types, value=None):
reference = '&' in ref_pointer_name_seq
pointer = '*' in ref_pointer_name_seq
array = '[' in ref_pointer_name_seq
var_type = Type(pos_token.start, pos_token.end, type_name,
templated_types, type_modifiers,
reference, pointer, array)
return VariableDeclaration(pos_token.start, pos_token.end,
name, var_type, value, self.namespace_stack)
def _GenerateOne(self, token):
if token.token_type == tokenize.NAME:
if (keywords.IsKeyword(token.name) and
not keywords.IsBuiltinType(token.name)):
method = getattr(self, 'handle_' + token.name)
return method()
elif token.name == self.in_class_name_only:
# The token name is the same as the class, must be a ctor if
# there is a paren. Otherwise, it's the return type.
# Peek ahead to get the next token to figure out which.
next = self._GetNextToken()
self._AddBackToken(next)
if next.token_type == tokenize.SYNTAX and next.name == '(':
return self._GetMethod([token], FUNCTION_CTOR, None, True)
# Fall through--handle like any other method.
# Handle data or function declaration/definition.
syntax = tokenize.SYNTAX
temp_tokens, last_token = \
self._GetVarTokensUpTo(syntax, '(', ';', '{', '[')
temp_tokens.insert(0, token)
if last_token.name == '(':
# If there is an assignment before the paren,
# this is an expression, not a method.
expr = bool([e for e in temp_tokens if e.name == '='])
if expr:
new_temp = self._GetTokensUpTo(tokenize.SYNTAX, ';')
temp_tokens.append(last_token)
temp_tokens.extend(new_temp)
last_token = tokenize.Token(tokenize.SYNTAX, ';', 0, 0)
if last_token.name == '[':
# Handle array, this isn't a method, unless it's an operator.
# TODO(nnorwitz): keep the size somewhere.
# unused_size = self._GetTokensUpTo(tokenize.SYNTAX, ']')
temp_tokens.append(last_token)
if temp_tokens[-2].name == 'operator':
temp_tokens.append(self._GetNextToken())
else:
temp_tokens2, last_token = \
self._GetVarTokensUpTo(tokenize.SYNTAX, ';')
temp_tokens.extend(temp_tokens2)
if last_token.name == ';':
# Handle data, this isn't a method.
parts = self.converter.DeclarationToParts(temp_tokens, True)
(name, type_name, templated_types, modifiers, default,
unused_other_tokens) = parts
t0 = temp_tokens[0]
names = [t.name for t in temp_tokens]
if templated_types:
start, end = self.converter.GetTemplateIndices(names)
names = names[:start] + names[end:]
default = ''.join([t.name for t in default])
return self._CreateVariable(t0, name, type_name, modifiers,
names, templated_types, default)
if last_token.name == '{':
self._AddBackTokens(temp_tokens[1:])
self._AddBackToken(last_token)
method_name = temp_tokens[0].name
method = getattr(self, 'handle_' + method_name, None)
if not method:
# Must be declaring a variable.
# TODO(nnorwitz): handle the declaration.
return None
return method()
return self._GetMethod(temp_tokens, 0, None, False)
elif token.token_type == tokenize.SYNTAX:
if token.name == '~' and self.in_class:
# Must be a dtor (probably not in method body).
token = self._GetNextToken()
# self.in_class can contain A::Name, but the dtor will only
# be Name. Make sure to compare against the right value.
if (token.token_type == tokenize.NAME and
token.name == self.in_class_name_only):
return self._GetMethod([token], FUNCTION_DTOR, None, True)
# TODO(nnorwitz): handle a lot more syntax.
elif token.token_type == tokenize.PREPROCESSOR:
# TODO(nnorwitz): handle more preprocessor directives.
# token starts with a #, so remove it and strip whitespace.
name = token.name[1:].lstrip()
if name.startswith('include'):
# Remove "include".
name = name[7:].strip()
assert name
# Handle #include \<newline> "header-on-second-line.h".
if name.startswith('\\'):
name = name[1:].strip()
assert name[0] in '<"', token
assert name[-1] in '>"', token
system = name[0] == '<'
filename = name[1:-1]
return Include(token.start, token.end, filename, system)
if name.startswith('define'):
# Remove "define".
name = name[6:].strip()
assert name
value = ''
for i, c in enumerate(name):
if c.isspace():
value = name[i:].lstrip()
name = name[:i]
break
return Define(token.start, token.end, name, value)
if name.startswith('if') and name[2:3].isspace():
condition = name[3:].strip()
if condition.startswith('0') or condition.startswith('(0)'):
self._SkipIf0Blocks()
return None
def _GetTokensUpTo(self, expected_token_type, expected_token):
return self._GetVarTokensUpTo(expected_token_type, expected_token)[0]
def _GetVarTokensUpTo(self, expected_token_type, *expected_tokens):
last_token = self._GetNextToken()
tokens = []
while (last_token.token_type != expected_token_type or
last_token.name not in expected_tokens):
tokens.append(last_token)
last_token = self._GetNextToken()
return tokens, last_token
# TODO(nnorwitz): remove _IgnoreUpTo() it shouldn't be necesary.
def _IgnoreUpTo(self, token_type, token):
unused_tokens = self._GetTokensUpTo(token_type, token)
def _SkipIf0Blocks(self):
count = 1
while 1:
token = self._GetNextToken()
if token.token_type != tokenize.PREPROCESSOR:
continue
name = token.name[1:].lstrip()
if name.startswith('endif'):
count -= 1
if count == 0:
break
elif name.startswith('if'):
count += 1
def _GetMatchingChar(self, open_paren, close_paren, GetNextToken=None):
if GetNextToken is None:
GetNextToken = self._GetNextToken
# Assumes the current token is open_paren and we will consume
# and return up to the close_paren.
count = 1
token = GetNextToken()
while 1:
if token.token_type == tokenize.SYNTAX:
if token.name == open_paren:
count += 1
elif token.name == close_paren:
count -= 1
if count == 0:
break
yield token
token = GetNextToken()
yield token
def _GetParameters(self):
return self._GetMatchingChar('(', ')')
def GetScope(self):
return self._GetMatchingChar('{', '}')
def _GetNextToken(self):
if self.token_queue:
return self.token_queue.pop()
return next(self.tokens)
def _AddBackToken(self, token):
if token.whence == tokenize.WHENCE_STREAM:
token.whence = tokenize.WHENCE_QUEUE
self.token_queue.insert(0, token)
else:
assert token.whence == tokenize.WHENCE_QUEUE, token
self.token_queue.append(token)
def _AddBackTokens(self, tokens):
if tokens:
if tokens[-1].whence == tokenize.WHENCE_STREAM:
for token in tokens:
token.whence = tokenize.WHENCE_QUEUE
self.token_queue[:0] = reversed(tokens)
else:
assert tokens[-1].whence == tokenize.WHENCE_QUEUE, tokens
self.token_queue.extend(reversed(tokens))
def GetName(self, seq=None):
"""Returns ([tokens], next_token_info)."""
GetNextToken = self._GetNextToken
if seq is not None:
it = iter(seq)
GetNextToken = lambda: next(it)
next_token = GetNextToken()
tokens = []
last_token_was_name = False
while (next_token.token_type == tokenize.NAME or
(next_token.token_type == tokenize.SYNTAX and
next_token.name in ('::', '<'))):
# Two NAMEs in a row means the identifier should terminate.
# It's probably some sort of variable declaration.
if last_token_was_name and next_token.token_type == tokenize.NAME:
break
last_token_was_name = next_token.token_type == tokenize.NAME
tokens.append(next_token)
# Handle templated names.
if next_token.name == '<':
tokens.extend(self._GetMatchingChar('<', '>', GetNextToken))
last_token_was_name = True
next_token = GetNextToken()
return tokens, next_token
def GetMethod(self, modifiers, templated_types):
return_type_and_name = self._GetTokensUpTo(tokenize.SYNTAX, '(')
assert len(return_type_and_name) >= 1
return self._GetMethod(return_type_and_name, modifiers, templated_types,
False)
def _GetMethod(self, return_type_and_name, modifiers, templated_types,
get_paren):
template_portion = None
if get_paren:
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
if token.name == '<':
# Handle templatized dtors.
template_portion = [token]
template_portion.extend(self._GetMatchingChar('<', '>'))
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '(', token
name = return_type_and_name.pop()
# Handle templatized ctors.
if name.name == '>':
index = 1
while return_type_and_name[index].name != '<':
index += 1
template_portion = return_type_and_name[index:] + [name]
del return_type_and_name[index:]
name = return_type_and_name.pop()
elif name.name == ']':
rt = return_type_and_name
assert rt[-1].name == '[', return_type_and_name
assert rt[-2].name == 'operator', return_type_and_name
name_seq = return_type_and_name[-2:]
del return_type_and_name[-2:]
name = tokenize.Token(tokenize.NAME, 'operator[]',
name_seq[0].start, name.end)
# Get the open paren so _GetParameters() below works.
unused_open_paren = self._GetNextToken()
# TODO(nnorwitz): store template_portion.
return_type = return_type_and_name
indices = name
if return_type:
indices = return_type[0]
# Force ctor for templatized ctors.
if name.name == self.in_class and not modifiers:
modifiers |= FUNCTION_CTOR
parameters = list(self._GetParameters())
del parameters[-1] # Remove trailing ')'.
# Handling operator() is especially weird.
if name.name == 'operator' and not parameters:
token = self._GetNextToken()
assert token.name == '(', token
parameters = list(self._GetParameters())
del parameters[-1] # Remove trailing ')'.
token = self._GetNextToken()
while token.token_type == tokenize.NAME:
modifier_token = token
token = self._GetNextToken()
if modifier_token.name == 'const':
modifiers |= FUNCTION_CONST
elif modifier_token.name == '__attribute__':
# TODO(nnorwitz): handle more __attribute__ details.
modifiers |= FUNCTION_ATTRIBUTE
assert token.name == '(', token
# Consume everything between the (parens).
unused_tokens = list(self._GetMatchingChar('(', ')'))
token = self._GetNextToken()
elif modifier_token.name == 'throw':
modifiers |= FUNCTION_THROW
assert token.name == '(', token
# Consume everything between the (parens).
unused_tokens = list(self._GetMatchingChar('(', ')'))
token = self._GetNextToken()
elif modifier_token.name == modifier_token.name.upper():
# HACK(nnorwitz): assume that all upper-case names
# are some macro we aren't expanding.
modifiers |= FUNCTION_UNKNOWN_ANNOTATION
else:
self.HandleError('unexpected token', modifier_token)
assert token.token_type == tokenize.SYNTAX, token
# Handle ctor initializers.
if token.name == ':':
# TODO(nnorwitz): anything else to handle for initializer list?
while token.name != ';' and token.name != '{':
token = self._GetNextToken()
# Handle pointer to functions that are really data but look
# like method declarations.
if token.name == '(':
if parameters[0].name == '*':
# name contains the return type.
name = parameters.pop()
# parameters contains the name of the data.
modifiers = [p.name for p in parameters]
# Already at the ( to open the parameter list.
function_parameters = list(self._GetMatchingChar('(', ')'))
del function_parameters[-1] # Remove trailing ')'.
# TODO(nnorwitz): store the function_parameters.
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == ';', token
return self._CreateVariable(indices, name.name, indices.name,
modifiers, '', None)
# At this point, we got something like:
# return_type (type::*name_)(params);
# This is a data member called name_ that is a function pointer.
# With this code: void (sq_type::*field_)(string&);
# We get: name=void return_type=[] parameters=sq_type ... field_
# TODO(nnorwitz): is return_type always empty?
# TODO(nnorwitz): this isn't even close to being correct.
# Just put in something so we don't crash and can move on.
real_name = parameters[-1]
modifiers = [p.name for p in self._GetParameters()]
del modifiers[-1] # Remove trailing ')'.
return self._CreateVariable(indices, real_name.name, indices.name,
modifiers, '', None)
if token.name == '{':
body = list(self.GetScope())
del body[-1] # Remove trailing '}'.
else:
body = None
if token.name == '=':
token = self._GetNextToken()
assert token.token_type == tokenize.CONSTANT, token
assert token.name == '0', token
modifiers |= FUNCTION_PURE_VIRTUAL
token = self._GetNextToken()
if token.name == '[':
# TODO(nnorwitz): store tokens and improve parsing.
# template <typename T, size_t N> char (&ASH(T (&seq)[N]))[N];
tokens = list(self._GetMatchingChar('[', ']'))
token = self._GetNextToken()
assert token.name == ';', (token, return_type_and_name, parameters)
# Looks like we got a method, not a function.
if len(return_type) > 2 and return_type[-1].name == '::':
return_type, in_class = \
self._GetReturnTypeAndClassName(return_type)
return Method(indices.start, indices.end, name.name, in_class,
return_type, parameters, modifiers, templated_types,
body, self.namespace_stack)
return Function(indices.start, indices.end, name.name, return_type,
parameters, modifiers, templated_types, body,
self.namespace_stack)
def _GetReturnTypeAndClassName(self, token_seq):
# Splitting the return type from the class name in a method
# can be tricky. For example, Return::Type::Is::Hard::To::Find().
# Where is the return type and where is the class name?
# The heuristic used is to pull the last name as the class name.
# This includes all the templated type info.
# TODO(nnorwitz): if there is only One name like in the
# example above, punt and assume the last bit is the class name.
# Ignore a :: prefix, if exists so we can find the first real name.
i = 0
if token_seq[0].name == '::':
i = 1
# Ignore a :: suffix, if exists.
end = len(token_seq) - 1
if token_seq[end-1].name == '::':
end -= 1
# Make a copy of the sequence so we can append a sentinel
# value. This is required for GetName will has to have some
# terminating condition beyond the last name.
seq_copy = token_seq[i:end]
seq_copy.append(tokenize.Token(tokenize.SYNTAX, '', 0, 0))
names = []
while i < end:
# Iterate through the sequence parsing out each name.
new_name, next = self.GetName(seq_copy[i:])
assert new_name, 'Got empty new_name, next=%s' % next
# We got a pointer or ref. Add it to the name.
if next and next.token_type == tokenize.SYNTAX:
new_name.append(next)
names.append(new_name)
i += len(new_name)
# Now that we have the names, it's time to undo what we did.
# Remove the sentinel value.
names[-1].pop()
# Flatten the token sequence for the return type.
return_type = [e for seq in names[:-1] for e in seq]
# The class name is the last name.
class_name = names[-1]
return return_type, class_name
def handle_bool(self):
pass
def handle_char(self):
pass
def handle_int(self):
pass
def handle_long(self):
pass
def handle_short(self):
pass
def handle_double(self):
pass
def handle_float(self):
pass
def handle_void(self):
pass
def handle_wchar_t(self):
pass
def handle_unsigned(self):
pass
def handle_signed(self):
pass
def _GetNestedType(self, ctor):
name = None
name_tokens, token = self.GetName()
if name_tokens:
name = ''.join([t.name for t in name_tokens])
# Handle forward declarations.
if token.token_type == tokenize.SYNTAX and token.name == ';':
return ctor(token.start, token.end, name, None,
self.namespace_stack)
if token.token_type == tokenize.NAME and self._handling_typedef:
self._AddBackToken(token)
return ctor(token.start, token.end, name, None,
self.namespace_stack)
# Must be the type declaration.
fields = list(self._GetMatchingChar('{', '}'))
del fields[-1] # Remove trailing '}'.
if token.token_type == tokenize.SYNTAX and token.name == '{':
next = self._GetNextToken()
new_type = ctor(token.start, token.end, name, fields,
self.namespace_stack)
# A name means this is an anonymous type and the name
# is the variable declaration.
if next.token_type != tokenize.NAME:
return new_type
name = new_type
token = next
# Must be variable declaration using the type prefixed with keyword.
assert token.token_type == tokenize.NAME, token
return self._CreateVariable(token, token.name, name, [], '', None)
def handle_struct(self):
# Special case the handling typedef/aliasing of structs here.
# It would be a pain to handle in the class code.
name_tokens, var_token = self.GetName()
if name_tokens:
next_token = self._GetNextToken()
is_syntax = (var_token.token_type == tokenize.SYNTAX and
var_token.name[0] in '*&')
is_variable = (var_token.token_type == tokenize.NAME and
next_token.name == ';')
variable = var_token
if is_syntax and not is_variable:
variable = next_token
temp = self._GetNextToken()
if temp.token_type == tokenize.SYNTAX and temp.name == '(':
# Handle methods declared to return a struct.
t0 = name_tokens[0]
struct = tokenize.Token(tokenize.NAME, 'struct',
t0.start-7, t0.start-2)
type_and_name = [struct]
type_and_name.extend(name_tokens)
type_and_name.extend((var_token, next_token))
return self._GetMethod(type_and_name, 0, None, False)
assert temp.name == ';', (temp, name_tokens, var_token)
if is_syntax or (is_variable and not self._handling_typedef):
modifiers = ['struct']
type_name = ''.join([t.name for t in name_tokens])
position = name_tokens[0]
return self._CreateVariable(position, variable.name, type_name,
modifiers, var_token.name, None)
name_tokens.extend((var_token, next_token))
self._AddBackTokens(name_tokens)
else:
self._AddBackToken(var_token)
return self._GetClass(Struct, VISIBILITY_PUBLIC, None)
def handle_union(self):
return self._GetNestedType(Union)
def handle_enum(self):
return self._GetNestedType(Enum)
def handle_auto(self):
# TODO(nnorwitz): warn about using auto? Probably not since it
# will be reclaimed and useful for C++0x.
pass
def handle_register(self):
pass
def handle_const(self):
pass
def handle_inline(self):
pass
def handle_extern(self):
pass
def handle_static(self):
pass
def handle_virtual(self):
# What follows must be a method.
token = token2 = self._GetNextToken()
if token.name == 'inline':
# HACK(nnorwitz): handle inline dtors by ignoring 'inline'.
token2 = self._GetNextToken()
if token2.token_type == tokenize.SYNTAX and token2.name == '~':
return self.GetMethod(FUNCTION_VIRTUAL + FUNCTION_DTOR, None)
assert token.token_type == tokenize.NAME or token.name == '::', token
return_type_and_name = self._GetTokensUpTo(tokenize.SYNTAX, '(')
return_type_and_name.insert(0, token)
if token2 is not token:
return_type_and_name.insert(1, token2)
return self._GetMethod(return_type_and_name, FUNCTION_VIRTUAL,
None, False)
def handle_volatile(self):
pass
def handle_mutable(self):
pass
def handle_public(self):
assert self.in_class
self.visibility = VISIBILITY_PUBLIC
def handle_protected(self):
assert self.in_class
self.visibility = VISIBILITY_PROTECTED
def handle_private(self):
assert self.in_class
self.visibility = VISIBILITY_PRIVATE
def handle_friend(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
t0 = tokens[0]
return Friend(t0.start, t0.end, tokens, self.namespace_stack)
def handle_static_cast(self):
pass
def handle_const_cast(self):
pass
def handle_dynamic_cast(self):
pass
def handle_reinterpret_cast(self):
pass
def handle_new(self):
pass
def handle_delete(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
return Delete(tokens[0].start, tokens[0].end, tokens)
def handle_typedef(self):
token = self._GetNextToken()
if (token.token_type == tokenize.NAME and
keywords.IsKeyword(token.name)):
# Token must be struct/enum/union/class.
method = getattr(self, 'handle_' + token.name)
self._handling_typedef = True
tokens = [method()]
self._handling_typedef = False
else:
tokens = [token]
# Get the remainder of the typedef up to the semi-colon.
tokens.extend(self._GetTokensUpTo(tokenize.SYNTAX, ';'))
# TODO(nnorwitz): clean all this up.
assert tokens
name = tokens.pop()
indices = name
if tokens:
indices = tokens[0]
if not indices:
indices = token
if name.name == ')':
# HACK(nnorwitz): Handle pointers to functions "properly".
if (len(tokens) >= 4 and
tokens[1].name == '(' and tokens[2].name == '*'):
tokens.append(name)
name = tokens[3]
elif name.name == ']':
# HACK(nnorwitz): Handle arrays properly.
if len(tokens) >= 2:
tokens.append(name)
name = tokens[1]
new_type = tokens
if tokens and isinstance(tokens[0], tokenize.Token):
new_type = self.converter.ToType(tokens)[0]
return Typedef(indices.start, indices.end, name.name,
new_type, self.namespace_stack)
def handle_typeid(self):
pass # Not needed yet.
def handle_typename(self):
pass # Not needed yet.
def _GetTemplatedTypes(self):
result = {}
tokens = list(self._GetMatchingChar('<', '>'))
len_tokens = len(tokens) - 1 # Ignore trailing '>'.
i = 0
while i < len_tokens:
key = tokens[i].name
i += 1
if keywords.IsKeyword(key) or key == ',':
continue
type_name = default = None
if i < len_tokens:
i += 1
if tokens[i-1].name == '=':
assert i < len_tokens, '%s %s' % (i, tokens)
default, unused_next_token = self.GetName(tokens[i:])
i += len(default)
else:
if tokens[i-1].name != ',':
# We got something like: Type variable.
# Re-adjust the key (variable) and type_name (Type).
key = tokens[i-1].name
type_name = tokens[i-2]
result[key] = (type_name, default)
return result
def handle_template(self):
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '<', token
templated_types = self._GetTemplatedTypes()
# TODO(nnorwitz): for now, just ignore the template params.
token = self._GetNextToken()
if token.token_type == tokenize.NAME:
if token.name == 'class':
return self._GetClass(Class, VISIBILITY_PRIVATE, templated_types)
elif token.name == 'struct':
return self._GetClass(Struct, VISIBILITY_PUBLIC, templated_types)
elif token.name == 'friend':
return self.handle_friend()
self._AddBackToken(token)
tokens, last = self._GetVarTokensUpTo(tokenize.SYNTAX, '(', ';')
tokens.append(last)
self._AddBackTokens(tokens)
if last.name == '(':
return self.GetMethod(FUNCTION_NONE, templated_types)
# Must be a variable definition.
return None
def handle_true(self):
pass # Nothing to do.
def handle_false(self):
pass # Nothing to do.
def handle_asm(self):
pass # Not needed yet.
def handle_class(self):
return self._GetClass(Class, VISIBILITY_PRIVATE, None)
def _GetBases(self):
# Get base classes.
bases = []
while 1:
token = self._GetNextToken()
assert token.token_type == tokenize.NAME, token
# TODO(nnorwitz): store kind of inheritance...maybe.
if token.name not in ('public', 'protected', 'private'):
# If inheritance type is not specified, it is private.
# Just put the token back so we can form a name.
# TODO(nnorwitz): it would be good to warn about this.
self._AddBackToken(token)
else:
# Check for virtual inheritance.
token = self._GetNextToken()
if token.name != 'virtual':
self._AddBackToken(token)
else:
# TODO(nnorwitz): store that we got virtual for this base.
pass
base, next_token = self.GetName()
bases_ast = self.converter.ToType(base)
assert len(bases_ast) == 1, bases_ast
bases.append(bases_ast[0])
assert next_token.token_type == tokenize.SYNTAX, next_token
if next_token.name == '{':
token = next_token
break
# Support multiple inheritance.
assert next_token.name == ',', next_token
return bases, token
def _GetClass(self, class_type, visibility, templated_types):
class_name = None
class_token = self._GetNextToken()
if class_token.token_type != tokenize.NAME:
assert class_token.token_type == tokenize.SYNTAX, class_token
token = class_token
else:
self._AddBackToken(class_token)
name_tokens, token = self.GetName()
class_name = ''.join([t.name for t in name_tokens])
bases = None
if token.token_type == tokenize.SYNTAX:
if token.name == ';':
# Forward declaration.
return class_type(class_token.start, class_token.end,
class_name, None, templated_types, None,
self.namespace_stack)
if token.name in '*&':
# Inline forward declaration. Could be method or data.
name_token = self._GetNextToken()
next_token = self._GetNextToken()
if next_token.name == ';':
# Handle data
modifiers = ['class']
return self._CreateVariable(class_token, name_token.name,
class_name,
modifiers, token.name, None)
else:
# Assume this is a method.
tokens = (class_token, token, name_token, next_token)
self._AddBackTokens(tokens)
return self.GetMethod(FUNCTION_NONE, None)
if token.name == ':':
bases, token = self._GetBases()
body = None
if token.token_type == tokenize.SYNTAX and token.name == '{':
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '{', token
ast = AstBuilder(self.GetScope(), self.filename, class_name,
visibility, self.namespace_stack)
body = list(ast.Generate())
if not self._handling_typedef:
token = self._GetNextToken()
if token.token_type != tokenize.NAME:
assert token.token_type == tokenize.SYNTAX, token
assert token.name == ';', token
else:
new_class = class_type(class_token.start, class_token.end,
class_name, bases, None,
body, self.namespace_stack)
modifiers = []
return self._CreateVariable(class_token,
token.name, new_class,
modifiers, token.name, None)
else:
if not self._handling_typedef:
self.HandleError('non-typedef token', token)
self._AddBackToken(token)
return class_type(class_token.start, class_token.end, class_name,
bases, None, body, self.namespace_stack)
def handle_namespace(self):
token = self._GetNextToken()
# Support anonymous namespaces.
name = None
if token.token_type == tokenize.NAME:
name = token.name
token = self._GetNextToken()
self.namespace_stack.append(name)
assert token.token_type == tokenize.SYNTAX, token
if token.name == '=':
# TODO(nnorwitz): handle aliasing namespaces.
name, next_token = self.GetName()
assert next_token.name == ';', next_token
else:
assert token.name == '{', token
tokens = list(self.GetScope())
del tokens[-1] # Remove trailing '}'.
# Handle namespace with nothing in it.
self._AddBackTokens(tokens)
token = tokenize.Token(_INTERNAL_TOKEN, _NAMESPACE_POP, None, None)
self._AddBackToken(token)
return None
def handle_using(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
return Using(tokens[0].start, tokens[0].end, tokens)
def handle_explicit(self):
assert self.in_class
# Nothing much to do.
# TODO(nnorwitz): maybe verify the method name == class name.
# This must be a ctor.
return self.GetMethod(FUNCTION_CTOR, None)
def handle_this(self):
pass # Nothing to do.
def handle_operator(self):
# Pull off the next token(s?) and make that part of the method name.
pass
def handle_sizeof(self):
pass
def handle_case(self):
pass
def handle_switch(self):
pass
def handle_default(self):
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX
assert token.name == ':'
def handle_if(self):
pass
def handle_else(self):
pass
def handle_return(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
if not tokens:
return Return(self.current_token.start, self.current_token.end, None)
return Return(tokens[0].start, tokens[0].end, tokens)
def handle_goto(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert len(tokens) == 1, str(tokens)
return Goto(tokens[0].start, tokens[0].end, tokens[0].name)
def handle_try(self):
pass # Not needed yet.
def handle_catch(self):
pass # Not needed yet.
def handle_throw(self):
pass # Not needed yet.
def handle_while(self):
pass
def handle_do(self):
pass
def handle_for(self):
pass
def handle_break(self):
self._IgnoreUpTo(tokenize.SYNTAX, ';')
def handle_continue(self):
self._IgnoreUpTo(tokenize.SYNTAX, ';')
def BuilderFromSource(source, filename):
"""Utility method that returns an AstBuilder from source code.
Args:
source: 'C++ source code'
filename: 'file1'
Returns:
AstBuilder
"""
return AstBuilder(tokenize.GetTokens(source), filename)
def PrintIndentifiers(filename, should_print):
"""Prints all identifiers for a C++ source file.
Args:
filename: 'file1'
should_print: predicate with signature: bool Function(token)
"""
source = utils.ReadFile(filename, False)
if source is None:
sys.stderr.write('Unable to find: %s\n' % filename)
return
#print('Processing %s' % actual_filename)
builder = BuilderFromSource(source, filename)
try:
for node in builder.Generate():
if should_print(node):
print(node.name)
except KeyboardInterrupt:
return
except:
pass
def PrintAllIndentifiers(filenames, should_print):
"""Prints all identifiers for each C++ source file in filenames.
Args:
filenames: ['file1', 'file2', ...]
should_print: predicate with signature: bool Function(token)
"""
for path in filenames:
PrintIndentifiers(path, should_print)
def main(argv):
for filename in argv[1:]:
source = utils.ReadFile(filename)
if source is None:
continue
print('Processing %s' % filename)
builder = BuilderFromSource(source, filename)
try:
entire_ast = filter(None, builder.Generate())
except KeyboardInterrupt:
return
except:
# Already printed a warning, print the traceback and continue.
traceback.print_exc()
else:
if utils.DEBUG:
for ast in entire_ast:
print(ast)
if __name__ == '__main__':
main(sys.argv)
|
spthaolt/breakpad-qt
|
third-party/google-breakpad/src/testing/scripts/generator/cpp/ast.py
|
Python
|
bsd-2-clause
| 61,778
|
import matplotlib.pyplot
import scipy
import colorsys
import json
import sys
import os
import copy
matplotlib.pyplot.figure(1, figsize=(14,14))
fname = os.path.splitext(os.path.basename(sys.argv[1]))[0]
fin = open(sys.argv[1], "r")
jin = json.load(fin)
fin.close()
# 4 timer values are: total, min, max, average
timer_groups = [
[
"sirius::Global::generate_radial_functions",
"sirius::Global::generate_radial_integrals",
"sirius::K_set::find_eigen_states",
"sirius::Density::generate",
"sirius::Potential::generate_effective_potential",
"exciting::sym_rho_mag",
"exciting::mixer"
],
[
"sirius::Band::set_fv_h_o",
"sirius::Band::solve_fv_evp",
"sirius::K_point::generate_fv_states",
"sirius::Band::solve_sv",
"sirius::K_point::generate_spinor_wave_functions"
],
[
"sirius::Potential::poisson",
"sirius::Potential::xc"
],
[
"sirius::Reciprocal_lattice::init",
"sirius::Step_function::init",
"sirius::Unit_cell::get_symmetry",
"sirius::Unit_cell::find_nearest_neighbours",
"sirius::K_point::initialize",
"sirius::Potential::Potential",
"sirius::Atom_type::solve_free_atom"
]
]
for itg in range(len(timer_groups)):
timer_names = []
timer_values = []
total_time = 0.0
for timer_name in timer_groups[itg]:
if timer_name in jin["timers"]:
tname = timer_name
# effective potential is generated once before the scf loop
# the first timer is reported in percentage
if itg == 0:
if timer_name == "sirius::Potential::generate_effective_potential":
# (total - average) of effective potential / total of iterations
t = (jin["timers"][timer_name][0] - jin["timers"][timer_name][3]) / jin["timers"]["exciting::iteration"][0]
else:
t = jin["timers"][timer_name][0] / jin["timers"]["exciting::iteration"][0]
t = t * 100
# show average time in legend
timer_names.append(tname + " (%6.2f%%, %6.2f sec./call)"%(t, jin["timers"][timer_name][3]))
# show total time for intialization routines
elif itg == 3:
t = jin["timers"][timer_name][0]
timer_names.append(tname + " (%6.2f sec.)"%t)
# show average time
else:
t = jin["timers"][timer_name][3]
timer_names.append(tname + " (%6.2f sec./call)"%t)
timer_values.append(t)
total_time += t
print "total time for timer group ", itg, " ", total_time
plot = matplotlib.pyplot.subplot("41%i"%(itg+1))
box = plot.get_position()
plot.set_position([box.x0, box.y0, box.width * 0.1, box.height])
box = plot.get_position()
ytics = [0]
for i in range(len(timer_values)):
ytics.append(ytics[i] + timer_values[i])
plots = []
for i in range(len(timer_values)):
rgb = colorsys.hsv_to_rgb(i / float(len(timer_values)), 0.75, 0.95)
c = "#%X%X%X"%(rgb[0]*255, rgb[1]*255, rgb[2]*255)
plots.append(matplotlib.pyplot.bar(0, timer_values[i], 2, bottom=ytics[i], color=c))
matplotlib.pyplot.xticks([], ())
matplotlib.pyplot.yticks(ytics)
matplotlib.pyplot.ylim([0, ytics[len(ytics)-1]])
matplotlib.pyplot.legend(plots[::-1], timer_names[::-1], bbox_to_anchor=(1.2, 1), loc=2)
matplotlib.pyplot.savefig(fname+".pdf", format="pdf")
|
toxa81/sirius
|
apps/timers/timers2.py
|
Python
|
bsd-2-clause
| 3,539
|
"""
Test handling of the private typecodes: _C_UNICHAR
This typecode doesn't actually exists in the ObjC runtime but
are private to PyObjC. We use these to simplify the bridge code
while at the same time getting a higher fidelity bridge.
- Add tests for calling methods from ObjC
"""
from __future__ import unicode_literals
import weakref
from PyObjCTools.TestSupport import *
from PyObjCTest.fnd import NSObject
from PyObjCTest.specialtypecodes import *
import array
import sys
if sys.version_info[0] == 3:
unichr = chr
unicode = str
def setupMetaData():
objc.registerMetaDataForSelector(b"OC_TestSpecialTypeCode", b"UniCharValue",
dict(
retval=dict(type=objc._C_UNICHAR),
))
objc.registerMetaDataForSelector(b"OC_TestSpecialTypeCode", b"UniCharArray",
dict(
retval=dict(type=objc._C_PTR+objc._C_UNICHAR, c_array_of_fixed_length=4),
))
objc.registerMetaDataForSelector(b"OC_TestSpecialTypeCode", b"UniCharString",
dict(
retval=dict(type=objc._C_PTR + objc._C_UNICHAR, c_array_delimited_by_null=True),
))
objc.registerMetaDataForSelector(b"OC_TestSpecialTypeCode", b"UniCharStringArg:",
dict(
arguments={
2: dict(type=objc._C_PTR + objc._C_UNICHAR, c_array_delimited_by_null=True, type_modifier=objc._C_IN),
}
))
objc.registerMetaDataForSelector(b"OC_TestSpecialTypeCode", b"UniCharArg:andUniCharArg:",
dict(
arguments={
2: dict(type=objc._C_UNICHAR),
3: dict(type=objc._C_UNICHAR),
}
))
objc.registerMetaDataForSelector(b"OC_TestSpecialTypeCode", b"UniCharArrayOf4In:",
dict(
arguments={
2: dict(type=objc._C_PTR+objc._C_UNICHAR, type_modifier=objc._C_IN, c_array_of_fixed_length=4),
}
))
objc.registerMetaDataForSelector(b"OC_TestSpecialTypeCode", b"UniCharArrayOf4Out:",
dict(
arguments={
2: dict(type=objc._C_PTR+objc._C_UNICHAR, type_modifier=objc._C_OUT, c_array_of_fixed_length=4),
}
))
objc.registerMetaDataForSelector(b"OC_TestSpecialTypeCode", b"UniCharArrayOf4InOut:",
dict(
arguments={
2: dict(type=objc._C_PTR+objc._C_UNICHAR, type_modifier=objc._C_INOUT, c_array_of_fixed_length=4),
}
))
objc.registerMetaDataForSelector(b"OC_TestSpecialTypeCode", b"UniCharArrayOfCount:In:",
dict(
arguments={
3: dict(type=objc._C_PTR+objc._C_UNICHAR, type_modifier=objc._C_IN, c_array_of_lenght_in_arg=2),
}
))
objc.registerMetaDataForSelector(b"OC_TestSpecialTypeCode", b"UniCharArrayOfCount:Out:",
dict(
arguments={
3: dict(type=objc._C_PTR+objc._C_UNICHAR, type_modifier=objc._C_OUT, c_array_of_lenght_in_arg=2),
}
))
objc.registerMetaDataForSelector(b"OC_TestSpecialTypeCode", b"UniCharArrayOfCount:InOut:",
dict(
arguments={
3: dict(type=objc._C_PTR+objc._C_UNICHAR, type_modifier=objc._C_INOUT, c_array_of_lenght_in_arg=2),
}
))
setupMetaData()
class TestTypeCode_UniChar (TestCase):
def testReturnValue(self):
o = OC_TestSpecialTypeCode.alloc().init()
self.assertEqual(o.UniCharValue(), 'a')
self.assertEqual(o.UniCharValue(), unichr(55))
self.assertEqual(o.UniCharValue(), unichr(9243))
def testReturnValueArray(self):
o = OC_TestSpecialTypeCode.alloc().init()
v = o.UniCharArray()
self.assertEqual(len(v), 4)
self.assertEqual(v[0], unichr(100))
self.assertEqual(v[1], unichr(400))
self.assertEqual(v[2], unichr(955))
self.assertEqual(v[3], unichr(40000))
def testReturnValueString(self):
o = OC_TestSpecialTypeCode.alloc().init()
v = o.UniCharString()
self.assertIsInstance(v, unicode)
self.assertEqual(v, "help");
def testSimpleArg(self):
o = OC_TestSpecialTypeCode.alloc().init()
v = o.UniCharArg_andUniCharArg_(unichr(44), unichr(450))
self.assertEqual(v, (unichr(44), unichr(450)))
v = o.UniCharArg_andUniCharArg_('a', 'b')
self.assertEqual(v, ('a', 'b'))
v = o.UniCharArg_andUniCharArg_('a', 'b')
self.assertEqual(v, ('a', 'b'))
self.assertRaises(ValueError, o.UniCharArg_andUniCharArg_, 400, 401)
def testStringArgument(self):
o = OC_TestSpecialTypeCode.alloc().init()
v = o.UniCharStringArg_("hello world")
self.assertEqual(v, "hello world")
self.assertIsInstance(v, unicode)
v = o.UniCharStringArg_("hello world")
self.assertEqual(v, "hello world")
self.assertIsInstance(v, unicode)
v = o.UniCharStringArg_(['a', 'b'])
self.assertEqual(v, "ab")
self.assertIsInstance(v, unicode)
self.assertRaises(ValueError, o.UniCharStringArg_, [99, 100, 100, 0])
def testFixedArrayIn(self):
o = OC_TestSpecialTypeCode.alloc().init()
v = o.UniCharArrayOf4In_("work")
self.assertEqual(v, "work")
v = o.UniCharArrayOf4In_(['a', 'b', 'c', 'd'])
self.assertEqual(v, 'abcd')
if sys.version_info[0] == 2:
a = array.array(b'h', [200, 300, 400, 500])
else:
a = array.array('h', [200, 300, 400, 500])
v = o.UniCharArrayOf4In_(a)
self.assertEqual(v, ''.join([
unichr(200), unichr(300), unichr(400), unichr(500)]))
def testFixedArrayOut(self):
o = OC_TestSpecialTypeCode.alloc().init()
v = o.UniCharArrayOf4Out_(None)
self.assertEqual(v, "boat")
o = OC_TestSpecialTypeCode.alloc().init()
if sys.version_info[0] == 2:
a = array.array(b'h', [0] * 4)
else:
a = array.array('h', [0] * 4)
v = o.UniCharArrayOf4Out_(a)
self.assertIs(v, a)
self.assertEqual(v[0], ord('b'))
self.assertEqual(v[1], ord('o'))
self.assertEqual(v[2], ord('a'))
self.assertEqual(v[3], ord('t'))
def testFixedArrayInOut_(self):
o = OC_TestSpecialTypeCode.alloc().init()
v, w = o.UniCharArrayOf4InOut_("foot")
self.assertEqual(v, "foot")
self.assertEqual(w, "hand")
if __name__ == "__main__":
main()
|
albertz/music-player
|
mac/pyobjc-core/PyObjCTest/test_specialtypecodes_unichar.py
|
Python
|
bsd-2-clause
| 6,465
|
# -*- coding: UTF-8 -*-
logger.info("Loading 353 objects to table cal_event...")
# fields: id, modified, created, project, start_date, start_time, end_date, end_time, build_time, build_method, owner_type, owner_id, user, assigned_to, summary, description, access_class, sequence, auto_type, priority, event_type, transparent, room, state, amount
loader.save(create_cal_event(1,dt(2018,12,22,12,25,8),dt(2018,12,22,12,24,52),None,date(2013,1,1),None,None,None,None,u'appypdf',cal_RecurrentEvent,1,None,None,u"New Year's Day",u'',None,0,1,u'30',1,False,None,u'50',None))
loader.save(create_cal_event(2,dt(2018,12,22,12,25,8),dt(2018,12,22,12,24,52),None,date(2014,1,1),None,None,None,None,u'appypdf',cal_RecurrentEvent,1,None,None,u"New Year's Day",u'',None,0,2,u'30',1,False,None,u'50',None))
loader.save(create_cal_event(3,dt(2018,12,22,12,25,8),dt(2018,12,22,12,24,52),None,date(2015,1,1),None,None,None,None,u'appypdf',cal_RecurrentEvent,1,None,None,u"New Year's Day",u'',None,0,3,u'30',1,False,None,u'50',None))
loader.save(create_cal_event(4,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2016,1,1),None,None,None,None,None,cal_RecurrentEvent,1,None,None,u"New Year's Day",u'',None,0,4,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(5,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2017,1,1),None,None,None,None,None,cal_RecurrentEvent,1,None,None,u"New Year's Day",u'',None,0,5,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(6,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2018,1,1),None,None,None,None,None,cal_RecurrentEvent,1,None,None,u"New Year's Day",u'',None,0,6,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(7,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2019,1,1),None,None,None,None,None,cal_RecurrentEvent,1,None,None,u"New Year's Day",u'',None,0,7,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(8,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2020,1,1),None,None,None,None,None,cal_RecurrentEvent,1,None,None,u"New Year's Day",u'',None,0,8,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(9,dt(2018,12,22,12,25,8),dt(2018,12,22,12,24,52),None,date(2013,5,1),None,None,None,None,u'appypdf',cal_RecurrentEvent,2,None,None,u"International Workers' Day",u'',None,0,1,u'30',1,False,None,u'50',None))
loader.save(create_cal_event(10,dt(2018,12,22,12,25,8),dt(2018,12,22,12,24,52),None,date(2014,5,1),None,None,None,None,u'appypdf',cal_RecurrentEvent,2,None,None,u"International Workers' Day",u'',None,0,2,u'30',1,False,None,u'60',None))
loader.save(create_cal_event(11,dt(2018,12,22,12,25,8),dt(2018,12,22,12,24,52),None,date(2015,5,1),None,None,None,None,u'appypdf',cal_RecurrentEvent,2,None,None,u"International Workers' Day",u'',None,0,3,u'30',1,False,None,u'50',None))
loader.save(create_cal_event(12,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2016,5,1),None,None,None,None,None,cal_RecurrentEvent,2,None,None,u"International Workers' Day",u'',None,0,4,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(13,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2017,5,1),None,None,None,None,None,cal_RecurrentEvent,2,None,None,u"International Workers' Day",u'',None,0,5,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(14,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2018,5,1),None,None,None,None,None,cal_RecurrentEvent,2,None,None,u"International Workers' Day",u'',None,0,6,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(15,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2019,5,1),None,None,None,None,None,cal_RecurrentEvent,2,None,None,u"International Workers' Day",u'',None,0,7,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(16,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2020,5,1),None,None,None,None,None,cal_RecurrentEvent,2,None,None,u"International Workers' Day",u'',None,0,8,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(17,dt(2018,12,22,12,25,8),dt(2018,12,22,12,24,52),None,date(2013,7,21),None,None,None,None,u'appypdf',cal_RecurrentEvent,3,None,None,u'National Day',u'',None,0,1,u'30',1,False,None,u'50',None))
loader.save(create_cal_event(18,dt(2018,12,22,12,25,8),dt(2018,12,22,12,24,52),None,date(2014,7,21),None,None,None,None,u'appypdf',cal_RecurrentEvent,3,None,None,u'National Day',u'',None,0,2,u'30',1,False,None,u'50',None))
loader.save(create_cal_event(19,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2015,7,21),None,None,None,None,None,cal_RecurrentEvent,3,None,None,u'National Day',u'',None,0,3,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(20,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2016,7,21),None,None,None,None,None,cal_RecurrentEvent,3,None,None,u'National Day',u'',None,0,4,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(21,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2017,7,21),None,None,None,None,None,cal_RecurrentEvent,3,None,None,u'National Day',u'',None,0,5,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(22,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2018,7,21),None,None,None,None,None,cal_RecurrentEvent,3,None,None,u'National Day',u'',None,0,6,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(23,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2019,7,21),None,None,None,None,None,cal_RecurrentEvent,3,None,None,u'National Day',u'',None,0,7,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(24,dt(2018,12,22,12,25,8),dt(2018,12,22,12,24,52),None,date(2013,8,15),None,None,None,None,u'appypdf',cal_RecurrentEvent,4,None,None,u'Assumption of Mary',u'',None,0,1,u'30',1,False,None,u'50',None))
loader.save(create_cal_event(25,dt(2018,12,22,12,25,8),dt(2018,12,22,12,24,52),None,date(2014,8,15),None,None,None,None,u'appypdf',cal_RecurrentEvent,4,None,None,u'Assumption of Mary',u'',None,0,2,u'30',1,False,None,u'60',None))
loader.save(create_cal_event(26,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2015,8,15),None,None,None,None,None,cal_RecurrentEvent,4,None,None,u'Assumption of Mary',u'',None,0,3,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(27,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2016,8,15),None,None,None,None,None,cal_RecurrentEvent,4,None,None,u'Assumption of Mary',u'',None,0,4,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(28,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2017,8,15),None,None,None,None,None,cal_RecurrentEvent,4,None,None,u'Assumption of Mary',u'',None,0,5,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(29,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2018,8,15),None,None,None,None,None,cal_RecurrentEvent,4,None,None,u'Assumption of Mary',u'',None,0,6,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(30,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2019,8,15),None,None,None,None,None,cal_RecurrentEvent,4,None,None,u'Assumption of Mary',u'',None,0,7,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(31,dt(2018,12,22,12,25,8),dt(2018,12,22,12,24,52),None,date(2013,10,31),None,None,None,None,u'appypdf',cal_RecurrentEvent,5,None,None,u"All Souls' Day",u'',None,0,1,u'30',1,False,None,u'50',None))
loader.save(create_cal_event(32,dt(2018,12,22,12,25,8),dt(2018,12,22,12,24,52),None,date(2014,10,31),None,None,None,None,u'appypdf',cal_RecurrentEvent,5,None,None,u"All Souls' Day",u'',None,0,2,u'30',1,False,None,u'50',None))
loader.save(create_cal_event(33,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2015,10,31),None,None,None,None,None,cal_RecurrentEvent,5,None,None,u"All Souls' Day",u'',None,0,3,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(34,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2016,10,31),None,None,None,None,None,cal_RecurrentEvent,5,None,None,u"All Souls' Day",u'',None,0,4,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(35,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2017,10,31),None,None,None,None,None,cal_RecurrentEvent,5,None,None,u"All Souls' Day",u'',None,0,5,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(36,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2018,10,31),None,None,None,None,None,cal_RecurrentEvent,5,None,None,u"All Souls' Day",u'',None,0,6,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(37,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2019,10,31),None,None,None,None,None,cal_RecurrentEvent,5,None,None,u"All Souls' Day",u'',None,0,7,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(38,dt(2018,12,22,12,25,8),dt(2018,12,22,12,24,52),None,date(2013,11,1),None,None,None,None,u'appypdf',cal_RecurrentEvent,6,None,None,u"All Saints' Day",u'',None,0,1,u'30',1,False,None,u'50',None))
loader.save(create_cal_event(39,dt(2018,12,22,12,25,8),dt(2018,12,22,12,24,52),None,date(2014,11,1),None,None,None,None,u'appypdf',cal_RecurrentEvent,6,None,None,u"All Saints' Day",u'',None,0,2,u'30',1,False,None,u'50',None))
loader.save(create_cal_event(40,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2015,11,1),None,None,None,None,None,cal_RecurrentEvent,6,None,None,u"All Saints' Day",u'',None,0,3,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(41,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2016,11,1),None,None,None,None,None,cal_RecurrentEvent,6,None,None,u"All Saints' Day",u'',None,0,4,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(42,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2017,11,1),None,None,None,None,None,cal_RecurrentEvent,6,None,None,u"All Saints' Day",u'',None,0,5,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(43,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2018,11,1),None,None,None,None,None,cal_RecurrentEvent,6,None,None,u"All Saints' Day",u'',None,0,6,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(44,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2019,11,1),None,None,None,None,None,cal_RecurrentEvent,6,None,None,u"All Saints' Day",u'',None,0,7,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(45,dt(2018,12,22,12,25,8),dt(2018,12,22,12,24,52),None,date(2013,11,11),None,None,None,None,u'appypdf',cal_RecurrentEvent,7,None,None,u'Armistice with Germany',u'',None,0,1,u'30',1,False,None,u'60',None))
loader.save(create_cal_event(46,dt(2018,12,22,12,25,8),dt(2018,12,22,12,24,52),None,date(2014,11,11),None,None,None,None,u'appypdf',cal_RecurrentEvent,7,None,None,u'Armistice with Germany',u'',None,0,2,u'30',1,False,None,u'50',None))
loader.save(create_cal_event(47,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2015,11,11),None,None,None,None,None,cal_RecurrentEvent,7,None,None,u'Armistice with Germany',u'',None,0,3,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(48,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2016,11,11),None,None,None,None,None,cal_RecurrentEvent,7,None,None,u'Armistice with Germany',u'',None,0,4,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(49,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2017,11,11),None,None,None,None,None,cal_RecurrentEvent,7,None,None,u'Armistice with Germany',u'',None,0,5,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(50,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2018,11,11),None,None,None,None,None,cal_RecurrentEvent,7,None,None,u'Armistice with Germany',u'',None,0,6,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(51,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2019,11,11),None,None,None,None,None,cal_RecurrentEvent,7,None,None,u'Armistice with Germany',u'',None,0,7,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(52,dt(2018,12,22,12,25,8),dt(2018,12,22,12,24,52),None,date(2013,12,25),None,None,None,None,u'appypdf',cal_RecurrentEvent,8,None,None,u'Christmas',u'',None,0,1,u'30',1,False,None,u'50',None))
loader.save(create_cal_event(53,dt(2018,12,22,12,25,8),dt(2018,12,22,12,24,52),None,date(2014,12,25),None,None,None,None,u'appypdf',cal_RecurrentEvent,8,None,None,u'Christmas',u'',None,0,2,u'30',1,False,None,u'50',None))
loader.save(create_cal_event(54,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2015,12,25),None,None,None,None,None,cal_RecurrentEvent,8,None,None,u'Christmas',u'',None,0,3,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(55,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2016,12,25),None,None,None,None,None,cal_RecurrentEvent,8,None,None,u'Christmas',u'',None,0,4,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(56,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2017,12,25),None,None,None,None,None,cal_RecurrentEvent,8,None,None,u'Christmas',u'',None,0,5,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(57,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2018,12,25),None,None,None,None,None,cal_RecurrentEvent,8,None,None,u'Christmas',u'',None,0,6,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(58,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2019,12,25),None,None,None,None,None,cal_RecurrentEvent,8,None,None,u'Christmas',u'',None,0,7,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(59,dt(2018,12,22,12,25,8),dt(2018,12,22,12,24,52),None,date(2013,3,31),None,None,None,None,u'appypdf',cal_RecurrentEvent,9,None,None,u'Easter sunday',u'',None,0,1,u'30',1,False,None,u'50',None))
loader.save(create_cal_event(60,dt(2018,12,22,12,25,8),dt(2018,12,22,12,24,52),None,date(2014,4,20),None,None,None,None,u'appypdf',cal_RecurrentEvent,9,None,None,u'Easter sunday',u'',None,0,2,u'30',1,False,None,u'60',None))
loader.save(create_cal_event(61,dt(2018,12,22,12,25,8),dt(2018,12,22,12,24,52),None,date(2015,4,5),None,None,None,None,u'appypdf',cal_RecurrentEvent,9,None,None,u'Easter sunday',u'',None,0,3,u'30',1,False,None,u'50',None))
loader.save(create_cal_event(62,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2016,3,27),None,None,None,None,None,cal_RecurrentEvent,9,None,None,u'Easter sunday',u'',None,0,4,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(63,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2017,4,16),None,None,None,None,None,cal_RecurrentEvent,9,None,None,u'Easter sunday',u'',None,0,5,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(64,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2018,4,1),None,None,None,None,None,cal_RecurrentEvent,9,None,None,u'Easter sunday',u'',None,0,6,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(65,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2019,4,21),None,None,None,None,None,cal_RecurrentEvent,9,None,None,u'Easter sunday',u'',None,0,7,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(66,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2020,4,12),None,None,None,None,None,cal_RecurrentEvent,9,None,None,u'Easter sunday',u'',None,0,8,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(67,dt(2018,12,22,12,25,8),dt(2018,12,22,12,24,52),None,date(2013,4,1),None,None,None,None,u'appypdf',cal_RecurrentEvent,10,None,None,u'Easter monday',u'',None,0,1,u'30',1,False,None,u'50',None))
loader.save(create_cal_event(68,dt(2018,12,22,12,25,8),dt(2018,12,22,12,24,52),None,date(2014,4,21),None,None,None,None,u'appypdf',cal_RecurrentEvent,10,None,None,u'Easter monday',u'',None,0,2,u'30',1,False,None,u'50',None))
loader.save(create_cal_event(69,dt(2018,12,22,12,25,8),dt(2018,12,22,12,24,52),None,date(2015,4,6),None,None,None,None,u'appypdf',cal_RecurrentEvent,10,None,None,u'Easter monday',u'',None,0,3,u'30',1,False,None,u'50',None))
loader.save(create_cal_event(70,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2016,3,28),None,None,None,None,None,cal_RecurrentEvent,10,None,None,u'Easter monday',u'',None,0,4,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(71,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2017,4,17),None,None,None,None,None,cal_RecurrentEvent,10,None,None,u'Easter monday',u'',None,0,5,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(72,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2018,4,2),None,None,None,None,None,cal_RecurrentEvent,10,None,None,u'Easter monday',u'',None,0,6,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(73,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2019,4,22),None,None,None,None,None,cal_RecurrentEvent,10,None,None,u'Easter monday',u'',None,0,7,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(74,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2020,4,13),None,None,None,None,None,cal_RecurrentEvent,10,None,None,u'Easter monday',u'',None,0,8,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(75,dt(2018,12,22,12,25,8),dt(2018,12,22,12,24,52),None,date(2013,5,9),None,None,None,None,u'appypdf',cal_RecurrentEvent,11,None,None,u'Ascension of Jesus',u'',None,0,1,u'30',1,False,None,u'60',None))
loader.save(create_cal_event(76,dt(2018,12,22,12,25,8),dt(2018,12,22,12,24,52),None,date(2014,5,29),None,None,None,None,u'appypdf',cal_RecurrentEvent,11,None,None,u'Ascension of Jesus',u'',None,0,2,u'30',1,False,None,u'50',None))
loader.save(create_cal_event(77,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2015,5,14),None,None,None,None,None,cal_RecurrentEvent,11,None,None,u'Ascension of Jesus',u'',None,0,3,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(78,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2016,5,5),None,None,None,None,None,cal_RecurrentEvent,11,None,None,u'Ascension of Jesus',u'',None,0,4,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(79,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2017,5,25),None,None,None,None,None,cal_RecurrentEvent,11,None,None,u'Ascension of Jesus',u'',None,0,5,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(80,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2018,5,10),None,None,None,None,None,cal_RecurrentEvent,11,None,None,u'Ascension of Jesus',u'',None,0,6,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(81,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2019,5,30),None,None,None,None,None,cal_RecurrentEvent,11,None,None,u'Ascension of Jesus',u'',None,0,7,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(82,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2020,5,21),None,None,None,None,None,cal_RecurrentEvent,11,None,None,u'Ascension of Jesus',u'',None,0,8,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(83,dt(2018,12,22,12,25,8),dt(2018,12,22,12,24,52),None,date(2013,5,20),None,None,None,None,u'appypdf',cal_RecurrentEvent,12,None,None,u'Pentecost',u'',None,0,1,u'30',1,False,None,u'50',None))
loader.save(create_cal_event(84,dt(2018,12,22,12,25,8),dt(2018,12,22,12,24,52),None,date(2014,6,9),None,None,None,None,u'appypdf',cal_RecurrentEvent,12,None,None,u'Pentecost',u'',None,0,2,u'30',1,False,None,u'50',None))
loader.save(create_cal_event(85,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2015,5,25),None,None,None,None,None,cal_RecurrentEvent,12,None,None,u'Pentecost',u'',None,0,3,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(86,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2016,5,16),None,None,None,None,None,cal_RecurrentEvent,12,None,None,u'Pentecost',u'',None,0,4,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(87,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2017,6,5),None,None,None,None,None,cal_RecurrentEvent,12,None,None,u'Pentecost',u'',None,0,5,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(88,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2018,5,21),None,None,None,None,None,cal_RecurrentEvent,12,None,None,u'Pentecost',u'',None,0,6,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(89,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2019,6,10),None,None,None,None,None,cal_RecurrentEvent,12,None,None,u'Pentecost',u'',None,0,7,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(90,dt(2018,12,22,12,25,8),dt(2018,12,22,12,24,52),None,date(2013,3,29),None,None,None,None,u'appypdf',cal_RecurrentEvent,13,None,None,u'Good Friday',u'',None,0,1,u'30',1,False,None,u'60',None))
loader.save(create_cal_event(91,dt(2018,12,22,12,25,8),dt(2018,12,22,12,24,52),None,date(2014,4,18),None,None,None,None,u'appypdf',cal_RecurrentEvent,13,None,None,u'Good Friday',u'',None,0,2,u'30',1,False,None,u'50',None))
loader.save(create_cal_event(92,dt(2018,12,22,12,25,8),dt(2018,12,22,12,24,52),None,date(2015,4,3),None,None,None,None,u'appypdf',cal_RecurrentEvent,13,None,None,u'Good Friday',u'',None,0,3,u'30',1,False,None,u'50',None))
loader.save(create_cal_event(93,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2016,3,25),None,None,None,None,None,cal_RecurrentEvent,13,None,None,u'Good Friday',u'',None,0,4,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(94,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2017,4,14),None,None,None,None,None,cal_RecurrentEvent,13,None,None,u'Good Friday',u'',None,0,5,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(95,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2018,3,30),None,None,None,None,None,cal_RecurrentEvent,13,None,None,u'Good Friday',u'',None,0,6,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(96,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2019,4,19),None,None,None,None,None,cal_RecurrentEvent,13,None,None,u'Good Friday',u'',None,0,7,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(97,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2020,4,10),None,None,None,None,None,cal_RecurrentEvent,13,None,None,u'Good Friday',u'',None,0,8,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(98,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,52),None,date(2013,2,13),None,None,None,None,u'appypdf',cal_RecurrentEvent,14,None,None,u'Ash Wednesday',u'',None,0,1,u'30',1,False,None,u'50',None))
loader.save(create_cal_event(99,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,52),None,date(2014,3,5),None,None,None,None,u'appypdf',cal_RecurrentEvent,14,None,None,u'Ash Wednesday',u'',None,0,2,u'30',1,False,None,u'50',None))
loader.save(create_cal_event(100,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,52),None,date(2015,2,18),None,None,None,None,u'appypdf',cal_RecurrentEvent,14,None,None,u'Ash Wednesday',u'',None,0,3,u'30',1,False,None,u'60',None))
loader.save(create_cal_event(101,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2016,2,10),None,None,None,None,None,cal_RecurrentEvent,14,None,None,u'Ash Wednesday',u'',None,0,4,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(102,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2017,3,1),None,None,None,None,None,cal_RecurrentEvent,14,None,None,u'Ash Wednesday',u'',None,0,5,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(103,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2018,2,14),None,None,None,None,None,cal_RecurrentEvent,14,None,None,u'Ash Wednesday',u'',None,0,6,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(104,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2019,3,6),None,None,None,None,None,cal_RecurrentEvent,14,None,None,u'Ash Wednesday',u'',None,0,7,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(105,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2020,2,26),None,None,None,None,None,cal_RecurrentEvent,14,None,None,u'Ash Wednesday',u'',None,0,8,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(106,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,52),None,date(2013,2,11),None,None,None,None,u'appypdf',cal_RecurrentEvent,15,None,None,u'Rosenmontag',u'',None,0,1,u'30',1,False,None,u'50',None))
loader.save(create_cal_event(107,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,52),None,date(2014,3,3),None,None,None,None,u'appypdf',cal_RecurrentEvent,15,None,None,u'Rosenmontag',u'',None,0,2,u'30',1,False,None,u'50',None))
loader.save(create_cal_event(108,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,52),None,date(2015,2,16),None,None,None,None,u'appypdf',cal_RecurrentEvent,15,None,None,u'Rosenmontag',u'',None,0,3,u'30',1,False,None,u'50',None))
loader.save(create_cal_event(109,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2016,2,8),None,None,None,None,None,cal_RecurrentEvent,15,None,None,u'Rosenmontag',u'',None,0,4,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(110,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2017,2,27),None,None,None,None,None,cal_RecurrentEvent,15,None,None,u'Rosenmontag',u'',None,0,5,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(111,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2018,2,12),None,None,None,None,None,cal_RecurrentEvent,15,None,None,u'Rosenmontag',u'',None,0,6,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(112,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2019,3,4),None,None,None,None,None,cal_RecurrentEvent,15,None,None,u'Rosenmontag',u'',None,0,7,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(113,dt(2018,12,22,12,24,52),dt(2018,12,22,12,24,52),None,date(2020,2,24),None,None,None,None,None,cal_RecurrentEvent,15,None,None,u'Rosenmontag',u'',None,0,8,u'30',1,False,None,u'10',None))
loader.save(create_cal_event(114,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,58),1,date(2014,11,4),None,None,None,None,u'appypdf',courses_Course,1,4,None,u' 1',u'',u'30',0,1,u'30',4,False,None,u'50','5.00'))
loader.save(create_cal_event(115,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,58),1,date(2014,11,18),None,None,None,None,u'appypdf',courses_Course,1,4,None,u' 2',u'',u'30',0,2,u'30',4,False,None,u'60',None))
loader.save(create_cal_event(116,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,58),1,date(2014,12,2),None,None,None,None,u'appypdf',courses_Course,1,4,None,u' 3',u'',u'30',0,3,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(117,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,58),1,date(2014,12,16),None,None,None,None,u'appypdf',courses_Course,1,4,None,u' 4',u'',u'30',0,4,u'30',4,False,None,u'50','15.00'))
loader.save(create_cal_event(118,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,58),1,date(2014,12,30),None,None,None,None,u'appypdf',courses_Course,1,4,None,u' 5',u'',u'30',0,5,u'30',4,False,None,u'50','20.00'))
loader.save(create_cal_event(119,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,58),1,date(2015,1,13),None,None,None,None,u'appypdf',courses_Course,1,4,None,u' 6',u'',u'30',0,6,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(120,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,58),1,date(2015,1,27),None,None,None,None,u'appypdf',courses_Course,1,4,None,u' 7',u'',u'30',0,7,u'30',4,False,None,u'60',None))
loader.save(create_cal_event(121,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,58),1,date(2015,2,10),None,None,None,None,u'appypdf',courses_Course,1,4,None,u' 8',u'',u'30',0,8,u'30',4,False,None,u'50','5.00'))
loader.save(create_cal_event(122,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,58),1,date(2015,2,24),None,None,None,None,u'appypdf',courses_Course,1,4,None,u' 9',u'',u'30',0,9,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(123,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,58),1,date(2015,3,10),None,None,None,None,u'appypdf',courses_Course,1,4,None,u' 10',u'',u'30',0,10,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(124,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,58),2,date(2014,11,5),None,None,None,None,u'appypdf',courses_Course,2,5,None,u' 1',u'',u'30',0,1,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(125,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,58),2,date(2014,11,19),None,None,None,None,u'appypdf',courses_Course,2,5,None,u' 2',u'',u'30',0,2,u'30',5,False,None,u'60',None))
loader.save(create_cal_event(126,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,58),2,date(2014,12,3),None,None,None,None,u'appypdf',courses_Course,2,5,None,u' 3',u'',u'30',0,3,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(127,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,58),2,date(2014,12,17),None,None,None,None,u'appypdf',courses_Course,2,5,None,u' 4',u'',u'30',0,4,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(128,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,58),2,date(2014,12,31),None,None,None,None,u'appypdf',courses_Course,2,5,None,u' 5',u'',u'30',0,5,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(129,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,58),2,date(2015,1,14),None,None,None,None,u'appypdf',courses_Course,2,5,None,u' 6',u'',u'30',0,6,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(130,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,58),2,date(2015,1,28),None,None,None,None,u'appypdf',courses_Course,2,5,None,u' 7',u'',u'30',0,7,u'30',5,False,None,u'60',None))
loader.save(create_cal_event(131,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,58),2,date(2015,2,11),None,None,None,None,u'appypdf',courses_Course,2,5,None,u' 8',u'',u'30',0,8,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(132,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,58),2,date(2015,2,25),None,None,None,None,u'appypdf',courses_Course,2,5,None,u' 9',u'',u'30',0,9,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(133,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,58),2,date(2015,3,11),None,None,None,None,u'appypdf',courses_Course,2,5,None,u' 10',u'',u'30',0,10,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(134,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,59),7,date(2014,11,10),None,None,None,None,u'appypdf',courses_Course,7,4,None,u' 1',u'',u'30',0,1,u'30',4,False,None,u'50','15.00'))
loader.save(create_cal_event(135,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,59),7,date(2014,11,24),None,None,None,None,u'appypdf',courses_Course,7,4,None,u' 2',u'',u'30',0,2,u'30',4,False,None,u'60','20.00'))
loader.save(create_cal_event(136,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,59),7,date(2014,12,8),None,None,None,None,u'appypdf',courses_Course,7,4,None,u' 3',u'',u'30',0,3,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(137,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,59),7,date(2014,12,22),None,None,None,None,u'appypdf',courses_Course,7,4,None,u' 4',u'',u'30',0,4,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(138,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,59),7,date(2015,1,5),None,None,None,None,u'appypdf',courses_Course,7,4,None,u' 5',u'',u'30',0,5,u'30',4,False,None,u'50','5.00'))
loader.save(create_cal_event(139,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,59),7,date(2015,1,19),None,None,None,None,u'appypdf',courses_Course,7,4,None,u' 6',u'',u'30',0,6,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(140,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,59),7,date(2015,2,2),None,None,None,None,u'appypdf',courses_Course,7,4,None,u' 7',u'',u'30',0,7,u'30',4,False,None,u'60',None))
loader.save(create_cal_event(141,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,59),7,date(2015,2,17),None,None,None,None,u'appypdf',courses_Course,7,4,None,u' 8',u'',u'30',0,8,u'30',4,False,None,u'50','15.00'))
loader.save(create_cal_event(142,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,59),7,date(2015,3,3),None,None,None,None,u'appypdf',courses_Course,7,4,None,u' 9',u'',u'30',0,9,u'30',4,False,None,u'50','20.00'))
loader.save(create_cal_event(143,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,59),7,date(2015,3,17),None,None,None,None,u'appypdf',courses_Course,7,4,None,u' 10',u'',u'30',0,10,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(144,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,59),8,date(2014,11,12),None,None,None,None,u'appypdf',courses_Course,8,5,None,u' 1',u'',u'30',0,1,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(145,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,59),8,date(2014,11,26),None,None,None,None,u'appypdf',courses_Course,8,5,None,u' 2',u'',u'30',0,2,u'30',5,False,None,u'60',None))
loader.save(create_cal_event(146,dt(2018,12,22,12,25,9),dt(2018,12,22,12,24,59),8,date(2014,12,10),None,None,None,None,u'appypdf',courses_Course,8,5,None,u' 3',u'',u'30',0,3,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(147,dt(2018,12,22,12,25,10),dt(2018,12,22,12,24,59),8,date(2014,12,24),None,None,None,None,u'appypdf',courses_Course,8,5,None,u' 4',u'',u'30',0,4,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(148,dt(2018,12,22,12,25,10),dt(2018,12,22,12,24,59),8,date(2015,1,7),None,None,None,None,u'appypdf',courses_Course,8,5,None,u' 5',u'',u'30',0,5,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(149,dt(2018,12,22,12,25,10),dt(2018,12,22,12,24,59),8,date(2015,1,21),None,None,None,None,u'appypdf',courses_Course,8,5,None,u' 6',u'',u'30',0,6,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(150,dt(2018,12,22,12,25,10),dt(2018,12,22,12,24,59),8,date(2015,2,4),None,None,None,None,u'appypdf',courses_Course,8,5,None,u' 7',u'',u'30',0,7,u'30',5,False,None,u'60',None))
loader.save(create_cal_event(151,dt(2018,12,22,12,25,10),dt(2018,12,22,12,24,59),8,date(2015,2,19),None,None,None,None,u'appypdf',courses_Course,8,5,None,u' 8',u'',u'30',0,8,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(152,dt(2018,12,22,12,25,10),dt(2018,12,22,12,24,59),8,date(2015,3,5),None,None,None,None,u'appypdf',courses_Course,8,5,None,u' 9',u'',u'30',0,9,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(153,dt(2018,12,22,12,25,10),dt(2018,12,22,12,24,59),8,date(2015,3,19),None,None,None,None,u'appypdf',courses_Course,8,5,None,u' 10',u'',u'30',0,10,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(154,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,0),13,date(2014,11,16),None,None,None,None,u'appypdf',courses_Course,13,4,None,u' 1',u'',u'30',0,1,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(155,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,0),13,date(2014,11,30),None,None,None,None,u'appypdf',courses_Course,13,4,None,u' 2',u'',u'30',0,2,u'30',4,False,None,u'60','5.00'))
loader.save(create_cal_event(156,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,0),13,date(2014,12,14),None,None,None,None,u'appypdf',courses_Course,13,4,None,u' 3',u'',u'30',0,3,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(157,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,0),13,date(2014,12,28),None,None,None,None,u'appypdf',courses_Course,13,4,None,u' 4',u'',u'30',0,4,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(158,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,0),13,date(2015,1,11),None,None,None,None,u'appypdf',courses_Course,13,4,None,u' 5',u'',u'30',0,5,u'30',4,False,None,u'50','15.00'))
loader.save(create_cal_event(159,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,0),13,date(2015,1,25),None,None,None,None,u'appypdf',courses_Course,13,4,None,u' 6',u'',u'30',0,6,u'30',4,False,None,u'50','20.00'))
loader.save(create_cal_event(160,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,0),13,date(2015,2,8),None,None,None,None,u'appypdf',courses_Course,13,4,None,u' 7',u'',u'30',0,7,u'30',4,False,None,u'60',None))
loader.save(create_cal_event(161,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,0),13,date(2015,2,22),None,None,None,None,u'appypdf',courses_Course,13,4,None,u' 8',u'',u'30',0,8,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(162,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,0),13,date(2015,3,8),None,None,None,None,u'appypdf',courses_Course,13,4,None,u' 9',u'',u'30',0,9,u'30',4,False,None,u'50','5.00'))
loader.save(create_cal_event(163,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,0),13,date(2015,3,22),None,None,None,None,u'appypdf',courses_Course,13,4,None,u' 10',u'',u'30',0,10,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(164,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,0),14,date(2014,11,17),None,None,None,None,u'appypdf',courses_Course,14,5,None,u' 1',u'',u'30',0,1,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(165,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,0),14,date(2014,12,1),None,None,None,None,u'appypdf',courses_Course,14,5,None,u' 2',u'',u'30',0,2,u'30',5,False,None,u'60',None))
loader.save(create_cal_event(166,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,0),14,date(2014,12,15),None,None,None,None,u'appypdf',courses_Course,14,5,None,u' 3',u'',u'30',0,3,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(167,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,0),14,date(2014,12,29),None,None,None,None,u'appypdf',courses_Course,14,5,None,u' 4',u'',u'30',0,4,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(168,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,0),14,date(2015,1,12),None,None,None,None,u'appypdf',courses_Course,14,5,None,u' 5',u'',u'30',0,5,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(169,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,0),14,date(2015,1,26),None,None,None,None,u'appypdf',courses_Course,14,5,None,u' 6',u'',u'30',0,6,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(170,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,0),14,date(2015,2,9),None,None,None,None,u'appypdf',courses_Course,14,5,None,u' 7',u'',u'30',0,7,u'30',5,False,None,u'60',None))
loader.save(create_cal_event(171,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,0),14,date(2015,2,23),None,None,None,None,u'appypdf',courses_Course,14,5,None,u' 8',u'',u'30',0,8,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(172,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,0),14,date(2015,3,9),None,None,None,None,u'appypdf',courses_Course,14,5,None,u' 9',u'',u'30',0,9,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(173,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,0),14,date(2015,3,23),None,None,None,None,u'appypdf',courses_Course,14,5,None,u' 10',u'',u'30',0,10,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(174,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,0),19,date(2014,11,22),None,None,None,None,u'appypdf',courses_Course,19,4,None,u' 1',u'',u'30',0,1,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(175,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,0),19,date(2014,12,6),None,None,None,None,u'appypdf',courses_Course,19,4,None,u' 2',u'',u'30',0,2,u'30',4,False,None,u'60','15.00'))
loader.save(create_cal_event(176,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,0),19,date(2014,12,20),None,None,None,None,u'appypdf',courses_Course,19,4,None,u' 3',u'',u'30',0,3,u'30',4,False,None,u'50','20.00'))
loader.save(create_cal_event(177,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,0),19,date(2015,1,3),None,None,None,None,u'appypdf',courses_Course,19,4,None,u' 4',u'',u'30',0,4,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(178,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,0),19,date(2015,1,17),None,None,None,None,u'appypdf',courses_Course,19,4,None,u' 5',u'',u'30',0,5,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(179,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,0),19,date(2015,1,31),None,None,None,None,u'appypdf',courses_Course,19,4,None,u' 6',u'',u'30',0,6,u'30',4,False,None,u'50','5.00'))
loader.save(create_cal_event(180,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,0),19,date(2015,2,14),None,None,None,None,u'appypdf',courses_Course,19,4,None,u' 7',u'',u'30',0,7,u'30',4,False,None,u'60',None))
loader.save(create_cal_event(181,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,0),19,date(2015,2,28),None,None,None,None,u'appypdf',courses_Course,19,4,None,u' 8',u'',u'30',0,8,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(182,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,0),19,date(2015,3,14),None,None,None,None,u'appypdf',courses_Course,19,4,None,u' 9',u'',u'30',0,9,u'30',4,False,None,u'50','15.00'))
loader.save(create_cal_event(183,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,0),19,date(2015,3,28),None,None,None,None,u'appypdf',courses_Course,19,4,None,u' 10',u'',u'30',0,10,u'30',4,False,None,u'50','20.00'))
loader.save(create_cal_event(184,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,1),20,date(2014,11,23),None,None,None,None,u'appypdf',courses_Course,20,5,None,u' 1',u'',u'30',0,1,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(185,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,1),20,date(2014,12,7),None,None,None,None,u'appypdf',courses_Course,20,5,None,u' 2',u'',u'30',0,2,u'30',5,False,None,u'60',None))
loader.save(create_cal_event(186,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,1),20,date(2014,12,21),None,None,None,None,u'appypdf',courses_Course,20,5,None,u' 3',u'',u'30',0,3,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(187,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,1),20,date(2015,1,4),None,None,None,None,u'appypdf',courses_Course,20,5,None,u' 4',u'',u'30',0,4,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(188,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,1),20,date(2015,1,18),None,None,None,None,u'appypdf',courses_Course,20,5,None,u' 5',u'',u'30',0,5,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(189,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,1),20,date(2015,2,1),None,None,None,None,u'appypdf',courses_Course,20,5,None,u' 6',u'',u'30',0,6,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(190,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,1),20,date(2015,2,15),None,None,None,None,u'appypdf',courses_Course,20,5,None,u' 7',u'',u'30',0,7,u'30',5,False,None,u'60',None))
loader.save(create_cal_event(191,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,1),20,date(2015,3,1),None,None,None,None,u'appypdf',courses_Course,20,5,None,u' 8',u'',u'30',0,8,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(192,dt(2018,12,22,12,25,10),dt(2018,12,22,12,25,1),20,date(2015,3,15),None,None,None,None,u'appypdf',courses_Course,20,5,None,u' 9',u'',u'30',0,9,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(193,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,1),20,date(2015,3,29),None,None,None,None,u'appypdf',courses_Course,20,5,None,u' 10',u'',u'30',0,10,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(194,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,1),25,date(2014,11,28),None,None,None,None,u'appypdf',courses_Course,25,4,None,u' 1',u'',u'30',0,1,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(195,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,1),25,date(2014,12,12),None,None,None,None,u'appypdf',courses_Course,25,4,None,u' 2',u'',u'30',0,2,u'30',4,False,None,u'60',None))
loader.save(create_cal_event(196,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,1),25,date(2014,12,26),None,None,None,None,u'appypdf',courses_Course,25,4,None,u' 3',u'',u'30',0,3,u'30',4,False,None,u'50','5.00'))
loader.save(create_cal_event(197,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,1),25,date(2015,1,9),None,None,None,None,u'appypdf',courses_Course,25,4,None,u' 4',u'',u'30',0,4,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(198,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,1),25,date(2015,1,23),None,None,None,None,u'appypdf',courses_Course,25,4,None,u' 5',u'',u'30',0,5,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(199,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,1),25,date(2015,2,6),None,None,None,None,u'appypdf',courses_Course,25,4,None,u' 6',u'',u'30',0,6,u'30',4,False,None,u'50','15.00'))
loader.save(create_cal_event(200,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,1),25,date(2015,2,20),None,None,None,None,u'appypdf',courses_Course,25,4,None,u' 7',u'',u'30',0,7,u'30',4,False,None,u'60','20.00'))
loader.save(create_cal_event(201,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,1),25,date(2015,3,6),None,None,None,None,u'appypdf',courses_Course,25,4,None,u' 8',u'',u'30',0,8,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(202,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,1),25,date(2015,3,20),None,None,None,None,u'appypdf',courses_Course,25,4,None,u' 9',u'',u'30',0,9,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(203,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,1),25,date(2015,4,4),None,None,None,None,u'appypdf',courses_Course,25,4,None,u' 10',u'',u'30',0,10,u'30',4,False,None,u'50','5.00'))
loader.save(create_cal_event(204,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,1),26,date(2014,11,29),None,None,None,None,u'appypdf',courses_Course,26,5,None,u' 1',u'',u'30',0,1,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(205,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,1),26,date(2014,12,13),None,None,None,None,u'appypdf',courses_Course,26,5,None,u' 2',u'',u'30',0,2,u'30',5,False,None,u'60',None))
loader.save(create_cal_event(206,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,1),26,date(2014,12,27),None,None,None,None,u'appypdf',courses_Course,26,5,None,u' 3',u'',u'30',0,3,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(207,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,1),26,date(2015,1,10),None,None,None,None,u'appypdf',courses_Course,26,5,None,u' 4',u'',u'30',0,4,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(208,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,1),26,date(2015,1,24),None,None,None,None,u'appypdf',courses_Course,26,5,None,u' 5',u'',u'30',0,5,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(209,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,1),26,date(2015,2,7),None,None,None,None,u'appypdf',courses_Course,26,5,None,u' 6',u'',u'30',0,6,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(210,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,2),26,date(2015,2,21),None,None,None,None,u'appypdf',courses_Course,26,5,None,u' 7',u'',u'30',0,7,u'30',5,False,None,u'60',None))
loader.save(create_cal_event(211,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,2),26,date(2015,3,7),None,None,None,None,u'appypdf',courses_Course,26,5,None,u' 8',u'',u'30',0,8,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(212,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,2),26,date(2015,3,21),None,None,None,None,u'appypdf',courses_Course,26,5,None,u' 9',u'',u'30',0,9,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(213,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,2),26,date(2015,4,4),None,None,None,None,u'appypdf',courses_Course,26,5,None,u' 10',u'',u'30',0,10,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(214,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,2),31,date(2014,12,4),None,None,None,None,u'appypdf',courses_Course,31,4,None,u' 1',u'',u'30',0,1,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(215,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,2),31,date(2014,12,18),None,None,None,None,u'appypdf',courses_Course,31,4,None,u' 2',u'',u'30',0,2,u'30',4,False,None,u'60',None))
loader.save(create_cal_event(216,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,2),31,date(2015,1,2),None,None,None,None,u'appypdf',courses_Course,31,4,None,u' 3',u'',u'30',0,3,u'30',4,False,None,u'50','15.00'))
loader.save(create_cal_event(217,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,2),31,date(2015,1,16),None,None,None,None,u'appypdf',courses_Course,31,4,None,u' 4',u'',u'30',0,4,u'30',4,False,None,u'50','20.00'))
loader.save(create_cal_event(218,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,2),31,date(2015,1,30),None,None,None,None,u'appypdf',courses_Course,31,4,None,u' 5',u'',u'30',0,5,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(219,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,2),31,date(2015,2,13),None,None,None,None,u'appypdf',courses_Course,31,4,None,u' 6',u'',u'30',0,6,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(220,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,2),31,date(2015,2,27),None,None,None,None,u'appypdf',courses_Course,31,4,None,u' 7',u'',u'30',0,7,u'30',4,False,None,u'60','5.00'))
loader.save(create_cal_event(221,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,2),31,date(2015,3,13),None,None,None,None,u'appypdf',courses_Course,31,4,None,u' 8',u'',u'30',0,8,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(222,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,2),31,date(2015,3,27),None,None,None,None,u'appypdf',courses_Course,31,4,None,u' 9',u'',u'30',0,9,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(223,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,2),31,date(2015,4,10),None,None,None,None,u'appypdf',courses_Course,31,4,None,u' 10',u'',u'30',0,10,u'30',4,False,None,u'50','15.00'))
loader.save(create_cal_event(224,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,3),32,date(2014,12,5),None,None,None,None,u'appypdf',courses_Course,32,5,None,u' 1',u'',u'30',0,1,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(225,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,3),32,date(2014,12,19),None,None,None,None,u'appypdf',courses_Course,32,5,None,u' 2',u'',u'30',0,2,u'30',5,False,None,u'60',None))
loader.save(create_cal_event(226,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,3),32,date(2015,1,2),None,None,None,None,u'appypdf',courses_Course,32,5,None,u' 3',u'',u'30',0,3,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(227,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,3),32,date(2015,1,16),None,None,None,None,u'appypdf',courses_Course,32,5,None,u' 4',u'',u'30',0,4,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(228,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,3),32,date(2015,1,30),None,None,None,None,u'appypdf',courses_Course,32,5,None,u' 5',u'',u'30',0,5,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(229,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,3),32,date(2015,2,13),None,None,None,None,u'appypdf',courses_Course,32,5,None,u' 6',u'',u'30',0,6,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(230,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,3),32,date(2015,2,27),None,None,None,None,u'appypdf',courses_Course,32,5,None,u' 7',u'',u'30',0,7,u'30',5,False,None,u'60',None))
loader.save(create_cal_event(231,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,3),32,date(2015,3,13),None,None,None,None,u'appypdf',courses_Course,32,5,None,u' 8',u'',u'30',0,8,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(232,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,3),32,date(2015,3,27),None,None,None,None,u'appypdf',courses_Course,32,5,None,u' 9',u'',u'30',0,9,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(233,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,3),32,date(2015,4,10),None,None,None,None,u'appypdf',courses_Course,32,5,None,u' 10',u'',u'30',0,10,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(234,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,3),37,date(2014,12,10),None,None,None,None,u'appypdf',courses_Course,37,4,None,u' 1',u'',u'30',0,1,u'30',4,False,None,u'50','20.00'))
loader.save(create_cal_event(235,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,3),37,date(2014,12,24),None,None,None,None,u'appypdf',courses_Course,37,4,None,u' 2',u'',u'30',0,2,u'30',4,False,None,u'60',None))
loader.save(create_cal_event(236,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,3),37,date(2015,1,7),None,None,None,None,u'appypdf',courses_Course,37,4,None,u' 3',u'',u'30',0,3,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(237,dt(2018,12,22,12,25,11),dt(2018,12,22,12,25,3),37,date(2015,1,21),None,None,None,None,u'appypdf',courses_Course,37,4,None,u' 4',u'',u'30',0,4,u'30',4,False,None,u'50','5.00'))
loader.save(create_cal_event(238,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,3),37,date(2015,2,4),None,None,None,None,u'appypdf',courses_Course,37,4,None,u' 5',u'',u'30',0,5,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(239,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,3),37,date(2015,2,19),None,None,None,None,u'appypdf',courses_Course,37,4,None,u' 6',u'',u'30',0,6,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(240,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,4),37,date(2015,3,5),None,None,None,None,u'appypdf',courses_Course,37,4,None,u' 7',u'',u'30',0,7,u'30',4,False,None,u'60','15.00'))
loader.save(create_cal_event(241,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,4),37,date(2015,3,19),None,None,None,None,u'appypdf',courses_Course,37,4,None,u' 8',u'',u'30',0,8,u'30',4,False,None,u'50','20.00'))
loader.save(create_cal_event(242,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,4),37,date(2015,4,2),None,None,None,None,u'appypdf',courses_Course,37,4,None,u' 9',u'',u'30',0,9,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(243,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,4),37,date(2015,4,16),None,None,None,None,u'appypdf',courses_Course,37,4,None,u' 10',u'',u'30',0,10,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(244,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,4),38,date(2014,12,11),None,None,None,None,u'appypdf',courses_Course,38,5,None,u' 1',u'',u'30',0,1,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(245,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,4),38,date(2014,12,26),None,None,None,None,u'appypdf',courses_Course,38,5,None,u' 2',u'',u'30',0,2,u'30',5,False,None,u'60',None))
loader.save(create_cal_event(246,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,4),38,date(2015,1,9),None,None,None,None,u'appypdf',courses_Course,38,5,None,u' 3',u'',u'30',0,3,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(247,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,4),38,date(2015,1,23),None,None,None,None,u'appypdf',courses_Course,38,5,None,u' 4',u'',u'30',0,4,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(248,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,4),38,date(2015,2,6),None,None,None,None,u'appypdf',courses_Course,38,5,None,u' 5',u'',u'30',0,5,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(249,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,4),38,date(2015,2,20),None,None,None,None,u'appypdf',courses_Course,38,5,None,u' 6',u'',u'30',0,6,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(250,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,4),38,date(2015,3,6),None,None,None,None,u'appypdf',courses_Course,38,5,None,u' 7',u'',u'30',0,7,u'30',5,False,None,u'60',None))
loader.save(create_cal_event(251,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,4),38,date(2015,3,20),None,None,None,None,u'appypdf',courses_Course,38,5,None,u' 8',u'',u'30',0,8,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(252,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,4),38,date(2015,4,4),None,None,None,None,u'appypdf',courses_Course,38,5,None,u' 9',u'',u'30',0,9,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(253,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,4),38,date(2015,4,18),None,None,None,None,u'appypdf',courses_Course,38,5,None,u' 10',u'',u'30',0,10,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(254,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,5),43,date(2014,12,16),None,None,None,None,u'appypdf',courses_Course,43,4,None,u' 1',u'',u'30',0,1,u'30',4,False,None,u'50','5.00'))
loader.save(create_cal_event(255,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,5),43,date(2014,12,30),None,None,None,None,u'appypdf',courses_Course,43,4,None,u' 2',u'',u'30',0,2,u'30',4,False,None,u'60',None))
loader.save(create_cal_event(256,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,5),43,date(2015,1,13),None,None,None,None,u'appypdf',courses_Course,43,4,None,u' 3',u'',u'30',0,3,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(257,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,5),43,date(2015,1,27),None,None,None,None,u'appypdf',courses_Course,43,4,None,u' 4',u'',u'30',0,4,u'30',4,False,None,u'50','15.00'))
loader.save(create_cal_event(258,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,5),43,date(2015,2,10),None,None,None,None,u'appypdf',courses_Course,43,4,None,u' 5',u'',u'30',0,5,u'30',4,False,None,u'50','20.00'))
loader.save(create_cal_event(259,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,5),43,date(2015,2,24),None,None,None,None,u'appypdf',courses_Course,43,4,None,u' 6',u'',u'30',0,6,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(260,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,5),43,date(2015,3,10),None,None,None,None,u'appypdf',courses_Course,43,4,None,u' 7',u'',u'30',0,7,u'30',4,False,None,u'60',None))
loader.save(create_cal_event(261,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,5),43,date(2015,3,24),None,None,None,None,u'appypdf',courses_Course,43,4,None,u' 8',u'',u'30',0,8,u'30',4,False,None,u'50','5.00'))
loader.save(create_cal_event(262,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,5),43,date(2015,4,7),None,None,None,None,u'appypdf',courses_Course,43,4,None,u' 9',u'',u'30',0,9,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(263,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,5),43,date(2015,4,21),None,None,None,None,u'appypdf',courses_Course,43,4,None,u' 10',u'',u'30',0,10,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(264,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,5),44,date(2014,12,17),None,None,None,None,u'appypdf',courses_Course,44,5,None,u' 1',u'',u'30',0,1,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(265,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,5),44,date(2014,12,31),None,None,None,None,u'appypdf',courses_Course,44,5,None,u' 2',u'',u'30',0,2,u'30',5,False,None,u'60',None))
loader.save(create_cal_event(266,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,5),44,date(2015,1,14),None,None,None,None,u'appypdf',courses_Course,44,5,None,u' 3',u'',u'30',0,3,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(267,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,5),44,date(2015,1,28),None,None,None,None,u'appypdf',courses_Course,44,5,None,u' 4',u'',u'30',0,4,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(268,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,5),44,date(2015,2,11),None,None,None,None,u'appypdf',courses_Course,44,5,None,u' 5',u'',u'30',0,5,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(269,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,5),44,date(2015,2,25),None,None,None,None,u'appypdf',courses_Course,44,5,None,u' 6',u'',u'30',0,6,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(270,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,5),44,date(2015,3,11),None,None,None,None,u'appypdf',courses_Course,44,5,None,u' 7',u'',u'30',0,7,u'30',5,False,None,u'60',None))
loader.save(create_cal_event(271,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,5),44,date(2015,3,25),None,None,None,None,u'appypdf',courses_Course,44,5,None,u' 8',u'',u'30',0,8,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(272,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,5),44,date(2015,4,8),None,None,None,None,u'appypdf',courses_Course,44,5,None,u' 9',u'',u'30',0,9,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(273,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,5),44,date(2015,4,22),None,None,None,None,u'appypdf',courses_Course,44,5,None,u' 10',u'',u'30',0,10,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(274,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,6),49,date(2014,11,4),None,None,None,None,u'appypdf',courses_Course,49,4,None,u' 1',u'',u'30',0,1,u'30',4,False,None,u'50','15.00'))
loader.save(create_cal_event(275,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,6),49,date(2014,11,12),None,None,None,None,u'appypdf',courses_Course,49,4,None,u' 2',u'',u'30',0,2,u'30',4,False,None,u'60','20.00'))
loader.save(create_cal_event(276,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,6),49,date(2014,11,19),None,None,None,None,u'appypdf',courses_Course,49,4,None,u' 3',u'',u'30',0,3,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(277,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,6),49,date(2014,11,26),None,None,None,None,u'appypdf',courses_Course,49,4,None,u' 4',u'',u'30',0,4,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(278,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,6),49,date(2014,12,3),None,None,None,None,u'appypdf',courses_Course,49,4,None,u' 5',u'',u'30',0,5,u'30',4,False,None,u'50','5.00'))
loader.save(create_cal_event(279,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,6),49,date(2014,12,10),None,None,None,None,u'appypdf',courses_Course,49,4,None,u' 6',u'',u'30',0,6,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(280,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,6),49,date(2014,12,17),None,None,None,None,u'appypdf',courses_Course,49,4,None,u' 7',u'',u'30',0,7,u'30',4,False,None,u'60',None))
loader.save(create_cal_event(281,dt(2018,12,22,12,25,12),dt(2018,12,22,12,25,6),49,date(2014,12,24),None,None,None,None,u'appypdf',courses_Course,49,4,None,u' 8',u'',u'30',0,8,u'30',4,False,None,u'50','15.00'))
loader.save(create_cal_event(282,dt(2018,12,22,12,25,13),dt(2018,12,22,12,25,6),49,date(2014,12,31),None,None,None,None,u'appypdf',courses_Course,49,4,None,u' 9',u'',u'30',0,9,u'30',4,False,None,u'50','20.00'))
loader.save(create_cal_event(283,dt(2018,12,22,12,25,13),dt(2018,12,22,12,25,6),49,date(2015,1,7),None,None,None,None,u'appypdf',courses_Course,49,4,None,u' 10',u'',u'30',0,10,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(284,dt(2018,12,22,12,25,13),dt(2018,12,22,12,25,7),50,date(2014,11,5),None,None,None,None,u'appypdf',courses_Course,50,5,None,u' 1',u'',u'30',0,1,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(285,dt(2018,12,22,12,25,13),dt(2018,12,22,12,25,7),50,date(2014,11,12),None,None,None,None,u'appypdf',courses_Course,50,5,None,u' 2',u'',u'30',0,2,u'30',5,False,None,u'60',None))
loader.save(create_cal_event(286,dt(2018,12,22,12,25,13),dt(2018,12,22,12,25,7),50,date(2014,11,19),None,None,None,None,u'appypdf',courses_Course,50,5,None,u' 3',u'',u'30',0,3,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(287,dt(2018,12,22,12,25,13),dt(2018,12,22,12,25,7),50,date(2014,11,26),None,None,None,None,u'appypdf',courses_Course,50,5,None,u' 4',u'',u'30',0,4,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(288,dt(2018,12,22,12,25,13),dt(2018,12,22,12,25,7),50,date(2014,12,3),None,None,None,None,u'appypdf',courses_Course,50,5,None,u' 5',u'',u'30',0,5,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(289,dt(2018,12,22,12,25,13),dt(2018,12,22,12,25,7),50,date(2014,12,10),None,None,None,None,u'appypdf',courses_Course,50,5,None,u' 6',u'',u'30',0,6,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(290,dt(2018,12,22,12,25,13),dt(2018,12,22,12,25,7),50,date(2014,12,17),None,None,None,None,u'appypdf',courses_Course,50,5,None,u' 7',u'',u'30',0,7,u'30',5,False,None,u'60',None))
loader.save(create_cal_event(291,dt(2018,12,22,12,25,13),dt(2018,12,22,12,25,7),50,date(2014,12,24),None,None,None,None,u'appypdf',courses_Course,50,5,None,u' 8',u'',u'30',0,8,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(292,dt(2018,12,22,12,25,13),dt(2018,12,22,12,25,7),50,date(2014,12,31),None,None,None,None,u'appypdf',courses_Course,50,5,None,u' 9',u'',u'30',0,9,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(293,dt(2018,12,22,12,25,13),dt(2018,12,22,12,25,7),50,date(2015,1,7),None,None,None,None,u'appypdf',courses_Course,50,5,None,u' 10',u'',u'30',0,10,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(294,dt(2018,12,22,12,25,41),dt(2018,12,22,12,25,41),None,date(2015,5,3),time(8,30,0),None,time(9,30,0),None,u'appypdf',None,None,3,None,u'Diner',u'',u'10',0,None,u'30',2,False,None,u'10',None))
loader.save(create_cal_event(295,dt(2018,12,22,12,25,41),dt(2018,12,22,12,25,41),None,date(2015,5,4),time(9,40,0),None,time(10,55,0),None,u'appypdf',None,None,2,None,u'Abendessen',u'',u'20',0,None,u'30',3,False,None,u'20',None))
loader.save(create_cal_event(296,dt(2018,12,22,12,25,41),dt(2018,12,22,12,25,41),None,date(2015,5,5),time(10,20,0),None,time(11,50,0),None,u'appypdf',None,None,1,None,u'Breakfast',u'',u'30',0,None,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(297,dt(2018,12,22,12,25,41),dt(2018,12,22,12,25,41),None,date(2015,5,5),time(11,10,0),None,time(12,55,0),None,u'appypdf',None,None,3,None,u'Rencontre',u'',u'10',0,None,u'30',5,False,None,u'70',None))
loader.save(create_cal_event(298,dt(2018,12,22,12,25,41),dt(2018,12,22,12,25,41),None,date(2015,5,6),time(13,30,0),None,time(15,30,0),None,u'appypdf',None,None,2,None,u'Beratung',u'',u'20',0,None,u'30',2,False,None,u'60',None))
loader.save(create_cal_event(299,dt(2018,12,22,12,25,41),dt(2018,12,22,12,25,41),None,date(2015,5,7),time(8,30,0),None,time(11,0,0),None,u'appypdf',None,None,1,None,u'Seminar',u'',u'30',0,None,u'30',3,False,None,u'10',None))
loader.save(create_cal_event(300,dt(2018,12,22,12,25,41),dt(2018,12,22,12,25,41),None,date(2015,5,7),time(9,40,0),None,time(12,40,0),None,u'appypdf',None,None,3,None,u'Evaluation',u'',u'10',0,None,u'30',4,False,None,u'20',None))
loader.save(create_cal_event(301,dt(2018,12,22,12,25,41),dt(2018,12,22,12,25,41),None,date(2015,5,8),time(10,20,0),None,time(11,20,0),None,u'appypdf',None,None,2,None,u'Erstgespr\xe4ch',u'',u'20',0,None,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(302,dt(2018,12,22,12,25,41),dt(2018,12,22,12,25,41),None,date(2015,5,9),time(11,10,0),None,time(12,25,0),None,u'appypdf',None,None,1,None,u'Interview',u'',u'30',0,None,u'30',2,False,None,u'70',None))
loader.save(create_cal_event(303,dt(2018,12,22,12,25,41),dt(2018,12,22,12,25,41),None,date(2015,5,9),time(13,30,0),None,time(15,0,0),None,u'appypdf',None,None,3,None,u'Diner',u'',u'10',0,None,u'30',3,False,None,u'60',None))
loader.save(create_cal_event(304,dt(2018,12,22,12,25,41),dt(2018,12,22,12,25,41),None,date(2015,5,10),time(8,30,0),None,time(10,15,0),None,u'appypdf',None,None,2,None,u'Abendessen',u'',u'20',0,None,u'30',4,False,None,u'10',None))
loader.save(create_cal_event(305,dt(2018,12,22,12,25,41),dt(2018,12,22,12,25,41),None,date(2015,5,11),time(9,40,0),None,time(11,40,0),None,u'appypdf',None,None,1,None,u'Breakfast',u'',u'30',0,None,u'30',5,False,None,u'20',None))
loader.save(create_cal_event(306,dt(2018,12,22,12,25,41),dt(2018,12,22,12,25,41),None,date(2015,5,11),time(10,20,0),None,time(12,50,0),None,u'appypdf',None,None,3,None,u'Rencontre',u'',u'10',0,None,u'30',2,False,None,u'50',None))
loader.save(create_cal_event(307,dt(2018,12,22,12,25,41),dt(2018,12,22,12,25,41),None,date(2015,5,12),time(11,10,0),None,time(14,10,0),None,u'appypdf',None,None,2,None,u'Beratung',u'',u'20',0,None,u'30',3,False,None,u'70',None))
loader.save(create_cal_event(308,dt(2018,12,22,12,25,41),dt(2018,12,22,12,25,41),None,date(2015,5,13),time(13,30,0),None,time(14,30,0),None,u'appypdf',None,None,1,None,u'Seminar',u'',u'30',0,None,u'30',4,False,None,u'60',None))
loader.save(create_cal_event(309,dt(2018,12,22,12,25,41),dt(2018,12,22,12,25,41),None,date(2015,5,13),time(8,30,0),None,time(9,45,0),None,u'appypdf',None,None,3,None,u'Evaluation',u'',u'10',0,None,u'30',5,False,None,u'10',None))
loader.save(create_cal_event(310,dt(2018,12,22,12,25,41),dt(2018,12,22,12,25,41),None,date(2015,5,14),time(9,40,0),None,time(11,10,0),None,u'appypdf',None,None,2,None,u'Erstgespr\xe4ch',u'',u'20',0,None,u'30',2,False,None,u'20',None))
loader.save(create_cal_event(311,dt(2018,12,22,12,25,41),dt(2018,12,22,12,25,41),None,date(2015,5,15),time(10,20,0),None,time(12,5,0),None,u'appypdf',None,None,1,None,u'Interview',u'',u'30',0,None,u'30',3,False,None,u'50',None))
loader.save(create_cal_event(312,dt(2018,12,22,12,25,41),dt(2018,12,22,12,25,41),None,date(2015,5,15),time(11,10,0),None,time(13,10,0),None,u'appypdf',None,None,3,None,u'Diner',u'',u'10',0,None,u'30',4,False,None,u'70',None))
loader.save(create_cal_event(313,dt(2018,12,22,12,25,41),dt(2018,12,22,12,25,41),None,date(2015,5,16),time(13,30,0),None,time(16,0,0),None,u'appypdf',None,None,2,None,u'Abendessen',u'',u'20',0,None,u'30',5,False,None,u'60',None))
loader.save(create_cal_event(314,dt(2018,12,22,12,25,41),dt(2018,12,22,12,25,41),None,date(2015,5,17),time(8,30,0),None,time(11,30,0),None,u'appypdf',None,None,1,None,u'Breakfast',u'',u'30',0,None,u'30',2,False,None,u'10',None))
loader.save(create_cal_event(315,dt(2018,12,22,12,25,41),dt(2018,12,22,12,25,41),None,date(2015,5,17),time(9,40,0),None,time(10,40,0),None,u'appypdf',None,None,3,None,u'Rencontre',u'',u'10',0,None,u'30',3,False,None,u'20',None))
loader.save(create_cal_event(316,dt(2018,12,22,12,25,41),dt(2018,12,22,12,25,41),None,date(2015,5,18),time(10,20,0),None,time(11,35,0),None,u'appypdf',None,None,2,None,u'Beratung',u'',u'20',0,None,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(317,dt(2018,12,22,12,25,41),dt(2018,12,22,12,25,41),None,date(2015,5,19),time(11,10,0),None,time(12,40,0),None,u'appypdf',None,None,1,None,u'Seminar',u'',u'30',0,None,u'30',5,False,None,u'70',None))
loader.save(create_cal_event(318,dt(2018,12,22,12,25,41),dt(2018,12,22,12,25,41),None,date(2015,5,19),time(13,30,0),None,time(15,15,0),None,u'appypdf',None,None,3,None,u'Evaluation',u'',u'10',0,None,u'30',2,False,None,u'60',None))
loader.save(create_cal_event(319,dt(2018,12,22,12,25,41),dt(2018,12,22,12,25,41),None,date(2015,5,20),time(8,30,0),None,time(10,30,0),None,u'appypdf',None,None,2,None,u'Erstgespr\xe4ch',u'',u'20',0,None,u'30',3,False,None,u'10',None))
loader.save(create_cal_event(320,dt(2018,12,22,12,25,41),dt(2018,12,22,12,25,41),None,date(2015,5,21),time(9,40,0),None,time(12,10,0),None,u'appypdf',None,None,1,None,u'Interview',u'',u'30',0,None,u'30',4,False,None,u'20',None))
loader.save(create_cal_event(321,dt(2018,12,22,12,25,41),dt(2018,12,22,12,25,41),None,date(2015,5,21),time(10,20,0),None,time(13,20,0),None,u'appypdf',None,None,3,None,u'Diner',u'',u'10',0,None,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(322,dt(2018,12,22,12,25,41),dt(2018,12,22,12,25,41),None,date(2015,5,22),time(11,10,0),None,time(12,10,0),None,u'appypdf',None,None,2,None,u'Abendessen',u'',u'20',0,None,u'30',2,False,None,u'70',None))
loader.save(create_cal_event(323,dt(2018,12,22,12,25,41),dt(2018,12,22,12,25,41),None,date(2015,5,23),time(13,30,0),None,time(14,45,0),None,u'appypdf',None,None,1,None,u'Breakfast',u'',u'30',0,None,u'30',3,False,None,u'60',None))
loader.save(create_cal_event(324,dt(2018,12,22,12,25,41),dt(2018,12,22,12,25,41),None,date(2015,5,23),time(8,30,0),None,time(10,0,0),None,u'appypdf',None,None,3,None,u'Rencontre',u'',u'10',0,None,u'30',4,False,None,u'10',None))
loader.save(create_cal_event(325,dt(2018,12,22,12,25,41),dt(2018,12,22,12,25,41),None,date(2015,5,24),time(9,40,0),None,time(11,25,0),None,u'appypdf',None,None,2,None,u'Beratung',u'',u'20',0,None,u'30',5,False,None,u'20',None))
loader.save(create_cal_event(326,dt(2018,12,22,12,25,42),dt(2018,12,22,12,25,42),None,date(2015,5,25),time(10,20,0),None,time(12,20,0),None,u'appypdf',None,None,1,None,u'Seminar',u'',u'30',0,None,u'30',2,False,None,u'50',None))
loader.save(create_cal_event(327,dt(2018,12,22,12,25,42),dt(2018,12,22,12,25,42),None,date(2015,5,25),time(11,10,0),None,time(13,40,0),None,u'appypdf',None,None,3,None,u'Evaluation',u'',u'10',0,None,u'30',3,False,None,u'70',None))
loader.save(create_cal_event(328,dt(2018,12,22,12,25,42),dt(2018,12,22,12,25,42),None,date(2015,5,26),time(13,30,0),None,time(16,30,0),None,u'appypdf',None,None,2,None,u'Erstgespr\xe4ch',u'',u'20',0,None,u'30',4,False,None,u'60',None))
loader.save(create_cal_event(329,dt(2018,12,22,12,25,42),dt(2018,12,22,12,25,42),None,date(2015,5,27),time(8,30,0),None,time(9,30,0),None,u'appypdf',None,None,1,None,u'Interview',u'',u'30',0,None,u'30',5,False,None,u'10',None))
loader.save(create_cal_event(330,dt(2018,12,22,12,25,42),dt(2018,12,22,12,25,42),None,date(2015,5,27),time(9,40,0),None,time(10,55,0),None,u'appypdf',None,None,3,None,u'Diner',u'',u'10',0,None,u'30',2,False,None,u'20',None))
loader.save(create_cal_event(331,dt(2018,12,22,12,25,42),dt(2018,12,22,12,25,42),None,date(2015,5,28),time(10,20,0),None,time(11,50,0),None,u'appypdf',None,None,2,None,u'Abendessen',u'',u'20',0,None,u'30',3,False,None,u'50',None))
loader.save(create_cal_event(332,dt(2018,12,22,12,25,42),dt(2018,12,22,12,25,42),None,date(2015,5,29),time(11,10,0),None,time(12,55,0),None,u'appypdf',None,None,1,None,u'Breakfast',u'',u'30',0,None,u'30',4,False,None,u'70',None))
loader.save(create_cal_event(333,dt(2018,12,22,12,25,42),dt(2018,12,22,12,25,42),None,date(2015,5,29),time(13,30,0),None,time(15,30,0),None,u'appypdf',None,None,3,None,u'Rencontre',u'',u'10',0,None,u'30',5,False,None,u'60',None))
loader.save(create_cal_event(334,dt(2018,12,22,12,25,42),dt(2018,12,22,12,25,42),None,date(2015,5,30),time(8,30,0),None,time(11,0,0),None,u'appypdf',None,None,2,None,u'Beratung',u'',u'20',0,None,u'30',2,False,None,u'10',None))
loader.save(create_cal_event(335,dt(2018,12,22,12,25,42),dt(2018,12,22,12,25,42),None,date(2015,5,31),time(9,40,0),None,time(12,40,0),None,u'appypdf',None,None,1,None,u'Seminar',u'',u'30',0,None,u'30',3,False,None,u'20',None))
loader.save(create_cal_event(336,dt(2018,12,22,12,25,42),dt(2018,12,22,12,25,42),None,date(2015,5,31),time(10,20,0),None,time(11,20,0),None,u'appypdf',None,None,3,None,u'Evaluation',u'',u'10',0,None,u'30',4,False,None,u'50',None))
loader.save(create_cal_event(337,dt(2018,12,22,12,25,42),dt(2018,12,22,12,25,42),None,date(2015,6,1),time(11,10,0),None,time(12,25,0),None,u'appypdf',None,None,2,None,u'Erstgespr\xe4ch',u'',u'20',0,None,u'30',5,False,None,u'70',None))
loader.save(create_cal_event(338,dt(2018,12,22,12,25,42),dt(2018,12,22,12,25,42),None,date(2015,6,2),time(13,30,0),None,time(15,0,0),None,u'appypdf',None,None,1,None,u'Interview',u'',u'30',0,None,u'30',2,False,None,u'60',None))
loader.save(create_cal_event(339,dt(2018,12,22,12,25,42),dt(2018,12,22,12,25,42),None,date(2015,6,2),time(8,30,0),None,time(10,15,0),None,u'appypdf',None,None,3,None,u'Diner',u'',u'10',0,None,u'30',3,False,None,u'10',None))
loader.save(create_cal_event(340,dt(2018,12,22,12,25,42),dt(2018,12,22,12,25,42),None,date(2015,6,3),time(9,40,0),None,time(11,40,0),None,u'appypdf',None,None,2,None,u'Abendessen',u'',u'20',0,None,u'30',4,False,None,u'20',None))
loader.save(create_cal_event(341,dt(2018,12,22,12,25,42),dt(2018,12,22,12,25,42),None,date(2015,6,4),time(10,20,0),None,time(12,50,0),None,u'appypdf',None,None,1,None,u'Breakfast',u'',u'30',0,None,u'30',5,False,None,u'50',None))
loader.save(create_cal_event(342,dt(2018,12,22,12,25,42),dt(2018,12,22,12,25,42),None,date(2015,6,4),time(11,10,0),None,time(14,10,0),None,u'appypdf',None,None,3,None,u'Rencontre',u'',u'10',0,None,u'30',2,False,None,u'70',None))
loader.save(create_cal_event(343,dt(2018,12,22,12,25,42),dt(2018,12,22,12,25,42),None,date(2015,6,5),time(13,30,0),None,time(14,30,0),None,u'appypdf',None,None,2,None,u'Beratung',u'',u'20',0,None,u'30',3,False,None,u'60',None))
loader.save(create_cal_event(344,dt(2018,12,22,12,25,42),dt(2018,12,22,12,25,42),None,date(2015,6,6),time(8,30,0),None,time(9,45,0),None,u'appypdf',None,None,1,None,u'Seminar',u'',u'30',0,None,u'30',4,False,None,u'10',None))
loader.save(create_cal_event(345,dt(2018,12,22,12,25,42),dt(2018,12,22,12,25,42),None,date(2015,6,6),time(9,40,0),None,time(11,10,0),None,u'appypdf',None,None,3,None,u'Evaluation',u'',u'10',0,None,u'30',5,False,None,u'20',None))
loader.save(create_cal_event(346,dt(2018,12,22,12,25,42),dt(2018,12,22,12,25,42),None,date(2015,6,7),time(10,20,0),None,time(12,5,0),None,u'appypdf',None,None,2,None,u'Erstgespr\xe4ch',u'',u'20',0,None,u'30',2,False,None,u'50',None))
loader.save(create_cal_event(347,dt(2018,12,22,12,25,42),dt(2018,12,22,12,25,42),None,date(2015,6,8),time(11,10,0),None,time(13,10,0),None,u'appypdf',None,None,1,None,u'Interview',u'',u'30',0,None,u'30',3,False,None,u'70',None))
loader.save(create_cal_event(348,dt(2018,12,22,12,25,42),dt(2018,12,22,12,25,42),None,date(2015,6,8),time(13,30,0),None,time(16,0,0),None,u'appypdf',None,None,3,None,u'Diner',u'',u'10',0,None,u'30',4,False,None,u'60',None))
loader.save(create_cal_event(349,dt(2018,12,22,12,25,42),dt(2018,12,22,12,25,42),None,date(2015,6,9),time(8,30,0),None,time(11,30,0),None,u'appypdf',None,None,2,None,u'Abendessen',u'',u'20',0,None,u'30',5,False,None,u'10',None))
loader.save(create_cal_event(350,dt(2018,12,22,12,25,42),dt(2018,12,22,12,25,42),None,date(2015,6,10),time(9,40,0),None,time(10,40,0),None,u'appypdf',None,None,1,None,u'Breakfast',u'',u'30',0,None,u'30',2,False,None,u'20',None))
loader.save(create_cal_event(351,dt(2018,12,22,12,25,42),dt(2018,12,22,12,25,42),None,date(2015,6,10),time(10,20,0),None,time(11,35,0),None,u'appypdf',None,None,3,None,u'Rencontre',u'',u'10',0,None,u'30',3,False,None,u'50',None))
loader.save(create_cal_event(352,dt(2018,12,22,12,25,42),dt(2018,12,22,12,25,42),None,date(2015,6,11),time(11,10,0),None,time(12,40,0),None,u'appypdf',None,None,2,None,u'Beratung',u'',u'20',0,None,u'30',4,False,None,u'70',None))
loader.save(create_cal_event(353,dt(2018,12,22,12,25,42),dt(2018,12,22,12,25,42),None,date(2015,6,12),time(13,30,0),None,time(15,15,0),None,u'appypdf',None,None,1,None,u'Seminar',u'',u'30',0,None,u'30',5,False,None,u'60',None))
loader.flush_deferred_objects()
|
lino-framework/book
|
lino_book/projects/lydia/tests/dumps/18.12.0/cal_event.py
|
Python
|
bsd-2-clause
| 76,614
|
"""
There are three types of functions implemented in SymPy:
1) defined functions (in the sense that they can be evaluated) like
exp or sin; they have a name and a body:
f = exp
2) undefined function which have a name but no body. Undefined
functions can be defined using a Function class as follows:
f = Function('f')
(the result will be a Function instance)
3) anonymous function (or lambda function) which have a body (defined
with dummy variables) but have no name:
f = Lambda(x, exp(x)*x)
f = Lambda((x, y), exp(x)*y)
The fourth type of functions are composites, like (sin + cos)(x); these work in
SymPy core, but are not yet part of SymPy.
Examples
========
>>> import sympy
>>> f = sympy.Function("f")
>>> from sympy.abc import x
>>> f(x)
f(x)
>>> print(sympy.srepr(f(x).func))
Function('f')
>>> f(x).args
(x,)
"""
from __future__ import print_function, division
from .add import Add
from .assumptions import ManagedProperties
from .basic import Basic
from .cache import cacheit
from .compatibility import iterable, is_sequence, as_int, ordered
from .decorators import _sympifyit
from .expr import Expr, AtomicExpr
from .numbers import Rational, Float
from .operations import LatticeOp
from .rules import Transform
from .singleton import S
from .sympify import sympify
from sympy.core.containers import Tuple, Dict
from sympy.core.logic import fuzzy_and
from sympy.core.compatibility import string_types, with_metaclass, range
from sympy.utilities import default_sort_key
from sympy.utilities.misc import filldedent
from sympy.utilities.iterables import uniq
from sympy.core.evaluate import global_evaluate
import sys
import mpmath
import mpmath.libmp as mlib
import inspect
import collections
def _coeff_isneg(a):
"""Return True if the leading Number is negative.
Examples
========
>>> from sympy.core.function import _coeff_isneg
>>> from sympy import S, Symbol, oo, pi
>>> _coeff_isneg(-3*pi)
True
>>> _coeff_isneg(S(3))
False
>>> _coeff_isneg(-oo)
True
>>> _coeff_isneg(Symbol('n', negative=True)) # coeff is 1
False
"""
if a.is_Mul:
a = a.args[0]
return a.is_Number and a.is_negative
class PoleError(Exception):
pass
class ArgumentIndexError(ValueError):
def __str__(self):
return ("Invalid operation with argument number %s for Function %s" %
(self.args[1], self.args[0]))
def _getnargs(cls):
if hasattr(cls, 'eval'):
if sys.version_info < (3, ):
return _getnargs_old(cls.eval)
else:
return _getnargs_new(cls.eval)
else:
return None
def _getnargs_old(eval_):
evalargspec = inspect.getargspec(eval_)
if evalargspec.varargs:
return None
else:
evalargs = len(evalargspec.args) - 1 # subtract 1 for cls
if evalargspec.defaults:
# if there are default args then they are optional; the
# fewest args will occur when all defaults are used and
# the most when none are used (i.e. all args are given)
return tuple(range(
evalargs - len(evalargspec.defaults), evalargs + 1))
return evalargs
def _getnargs_new(eval_):
parameters = inspect.signature(eval_).parameters.items()
if [p for n,p in parameters if p.kind == p.VAR_POSITIONAL]:
return None
else:
p_or_k = [p for n,p in parameters if p.kind == p.POSITIONAL_OR_KEYWORD]
num_no_default = len(list(filter(lambda p:p.default == p.empty, p_or_k)))
num_with_default = len(list(filter(lambda p:p.default != p.empty, p_or_k)))
if not num_with_default:
return num_no_default
return tuple(range(num_no_default, num_no_default+num_with_default+1))
class FunctionClass(ManagedProperties):
"""
Base class for function classes. FunctionClass is a subclass of type.
Use Function('<function name>' [ , signature ]) to create
undefined function classes.
"""
_new = type.__new__
def __init__(cls, *args, **kwargs):
# honor kwarg value or class-defined value before using
# the number of arguments in the eval function (if present)
nargs = kwargs.pop('nargs', cls.__dict__.get('nargs', _getnargs(cls)))
super(FunctionClass, cls).__init__(args, kwargs)
# Canonicalize nargs here; change to set in nargs.
if is_sequence(nargs):
if not nargs:
raise ValueError(filldedent('''
Incorrectly specified nargs as %s:
if there are no arguments, it should be
`nargs = 0`;
if there are any number of arguments,
it should be
`nargs = None`''' % str(nargs)))
nargs = tuple(ordered(set(nargs)))
elif nargs is not None:
nargs = (as_int(nargs),)
cls._nargs = nargs
@property
def __signature__(self):
"""
Allow Python 3's inspect.signature to give a useful signature for
Function subclasses.
"""
# Python 3 only, but backports (like the one in IPython) still might
# call this.
try:
from inspect import signature
except ImportError:
return None
# TODO: Look at nargs
return signature(self.eval)
@property
def nargs(self):
"""Return a set of the allowed number of arguments for the function.
Examples
========
>>> from sympy.core.function import Function
>>> from sympy.abc import x, y
>>> f = Function('f')
If the function can take any number of arguments, the set of whole
numbers is returned:
>>> Function('f').nargs
Naturals0()
If the function was initialized to accept one or more arguments, a
corresponding set will be returned:
>>> Function('f', nargs=1).nargs
{1}
>>> Function('f', nargs=(2, 1)).nargs
{1, 2}
The undefined function, after application, also has the nargs
attribute; the actual number of arguments is always available by
checking the ``args`` attribute:
>>> f = Function('f')
>>> f(1).nargs
Naturals0()
>>> len(f(1).args)
1
"""
from sympy.sets.sets import FiniteSet
# XXX it would be nice to handle this in __init__ but there are import
# problems with trying to import FiniteSet there
return FiniteSet(*self._nargs) if self._nargs else S.Naturals0
def __repr__(cls):
return cls.__name__
class Application(with_metaclass(FunctionClass, Basic)):
"""
Base class for applied functions.
Instances of Application represent the result of applying an application of
any type to any object.
"""
is_Function = True
@cacheit
def __new__(cls, *args, **options):
from sympy.sets.fancysets import Naturals0
from sympy.sets.sets import FiniteSet
args = list(map(sympify, args))
evaluate = options.pop('evaluate', global_evaluate[0])
# WildFunction (and anything else like it) may have nargs defined
# and we throw that value away here
options.pop('nargs', None)
if options:
raise ValueError("Unknown options: %s" % options)
if evaluate:
evaluated = cls.eval(*args)
if evaluated is not None:
return evaluated
obj = super(Application, cls).__new__(cls, *args, **options)
# make nargs uniform here
try:
# things passing through here:
# - functions subclassed from Function (e.g. myfunc(1).nargs)
# - functions like cos(1).nargs
# - AppliedUndef with given nargs like Function('f', nargs=1)(1).nargs
# Canonicalize nargs here
if is_sequence(obj.nargs):
nargs = tuple(ordered(set(obj.nargs)))
elif obj.nargs is not None:
nargs = (as_int(obj.nargs),)
else:
nargs = None
except AttributeError:
# things passing through here:
# - WildFunction('f').nargs
# - AppliedUndef with no nargs like Function('f')(1).nargs
nargs = obj._nargs # note the underscore here
# convert to FiniteSet
obj.nargs = FiniteSet(*nargs) if nargs else Naturals0()
return obj
@classmethod
def eval(cls, *args):
"""
Returns a canonical form of cls applied to arguments args.
The eval() method is called when the class cls is about to be
instantiated and it should return either some simplified instance
(possible of some other class), or if the class cls should be
unmodified, return None.
Examples of eval() for the function "sign"
---------------------------------------------
@classmethod
def eval(cls, arg):
if arg is S.NaN:
return S.NaN
if arg is S.Zero: return S.Zero
if arg.is_positive: return S.One
if arg.is_negative: return S.NegativeOne
if isinstance(arg, Mul):
coeff, terms = arg.as_coeff_Mul(rational=True)
if coeff is not S.One:
return cls(coeff) * cls(terms)
"""
return
@property
def func(self):
return self.__class__
def _eval_subs(self, old, new):
if (old.is_Function and new.is_Function and
callable(old) and callable(new) and
old == self.func and len(self.args) in new.nargs):
return new(*self.args)
class Function(Application, Expr):
"""Base class for applied mathematical functions.
It also serves as a constructor for undefined function classes.
Examples
========
First example shows how to use Function as a constructor for undefined
function classes:
>>> from sympy import Function, Symbol
>>> x = Symbol('x')
>>> f = Function('f')
>>> g = Function('g')(x)
>>> f
f
>>> f(x)
f(x)
>>> g
g(x)
>>> f(x).diff(x)
Derivative(f(x), x)
>>> g.diff(x)
Derivative(g(x), x)
In the following example Function is used as a base class for
``my_func`` that represents a mathematical function *my_func*. Suppose
that it is well known, that *my_func(0)* is *1* and *my_func* at infinity
goes to *0*, so we want those two simplifications to occur automatically.
Suppose also that *my_func(x)* is real exactly when *x* is real. Here is
an implementation that honours those requirements:
>>> from sympy import Function, S, oo, I, sin
>>> class my_func(Function):
...
... @classmethod
... def eval(cls, x):
... if x.is_Number:
... if x is S.Zero:
... return S.One
... elif x is S.Infinity:
... return S.Zero
...
... def _eval_is_real(self):
... return self.args[0].is_real
...
>>> x = S('x')
>>> my_func(0) + sin(0)
1
>>> my_func(oo)
0
>>> my_func(3.54).n() # Not yet implemented for my_func.
my_func(3.54)
>>> my_func(I).is_real
False
In order for ``my_func`` to become useful, several other methods would
need to be implemented. See source code of some of the already
implemented functions for more complete examples.
Also, if the function can take more than one argument, then ``nargs``
must be defined, e.g. if ``my_func`` can take one or two arguments
then,
>>> class my_func(Function):
... nargs = (1, 2)
...
>>>
"""
@property
def _diff_wrt(self):
"""Allow derivatives wrt functions.
Examples
========
>>> from sympy import Function, Symbol
>>> f = Function('f')
>>> x = Symbol('x')
>>> f(x)._diff_wrt
True
"""
return True
@cacheit
def __new__(cls, *args, **options):
# Handle calls like Function('f')
if cls is Function:
return UndefinedFunction(*args, **options)
n = len(args)
if n not in cls.nargs:
# XXX: exception message must be in exactly this format to
# make it work with NumPy's functions like vectorize(). See,
# for example, https://github.com/numpy/numpy/issues/1697.
# The ideal solution would be just to attach metadata to
# the exception and change NumPy to take advantage of this.
temp = ('%(name)s takes %(qual)s %(args)s '
'argument%(plural)s (%(given)s given)')
raise TypeError(temp % {
'name': cls,
'qual': 'exactly' if len(cls.nargs) == 1 else 'at least',
'args': min(cls.nargs),
'plural': 's'*(min(cls.nargs) != 1),
'given': n})
evaluate = options.get('evaluate', global_evaluate[0])
result = super(Function, cls).__new__(cls, *args, **options)
if not evaluate or not isinstance(result, cls):
return result
pr = max(cls._should_evalf(a) for a in result.args)
pr2 = min(cls._should_evalf(a) for a in result.args)
if pr2 > 0:
return result.evalf(mlib.libmpf.prec_to_dps(pr))
return result
@classmethod
def _should_evalf(cls, arg):
"""
Decide if the function should automatically evalf().
By default (in this implementation), this happens if (and only if) the
ARG is a floating point number.
This function is used by __new__.
Returns the precision to evalf to, or -1 if it shouldn't evalf.
"""
from sympy.core.symbol import Wild
if arg.is_Float:
return arg._prec
if not arg.is_Add:
return -1
# Don't use as_real_imag() here, that's too much work
a, b = Wild('a'), Wild('b')
m = arg.match(a + b*S.ImaginaryUnit)
if not m or not (m[a].is_Float or m[b].is_Float):
return -1
l = [m[i]._prec for i in m if m[i].is_Float]
l.append(-1)
return max(l)
@classmethod
def class_key(cls):
from sympy.sets.fancysets import Naturals0
funcs = {
'exp': 10,
'log': 11,
'sin': 20,
'cos': 21,
'tan': 22,
'cot': 23,
'sinh': 30,
'cosh': 31,
'tanh': 32,
'coth': 33,
'conjugate': 40,
're': 41,
'im': 42,
'arg': 43,
}
name = cls.__name__
try:
i = funcs[name]
except KeyError:
i = 0 if isinstance(cls.nargs, Naturals0) else 10000
return 4, i, name
@property
def is_commutative(self):
"""
Returns whether the functon is commutative.
"""
if all(getattr(t, 'is_commutative') for t in self.args):
return True
else:
return False
def _eval_evalf(self, prec):
# Lookup mpmath function based on name
fname = self.func.__name__
try:
if not hasattr(mpmath, fname):
from sympy.utilities.lambdify import MPMATH_TRANSLATIONS
fname = MPMATH_TRANSLATIONS[fname]
func = getattr(mpmath, fname)
except (AttributeError, KeyError):
try:
return Float(self._imp_(*self.args), prec)
except (AttributeError, TypeError, ValueError):
return
# Convert all args to mpf or mpc
# Convert the arguments to *higher* precision than requested for the
# final result.
# XXX + 5 is a guess, it is similar to what is used in evalf.py. Should
# we be more intelligent about it?
try:
args = [arg._to_mpmath(prec + 5) for arg in self.args]
def bad(m):
from mpmath import mpf, mpc
# the precision of an mpf value is the last element
# if that is 1 (and m[1] is not 1 which would indicate a
# power of 2), then the eval failed; so check that none of
# the arguments failed to compute to a finite precision.
# Note: An mpc value has two parts, the re and imag tuple;
# check each of those parts, too. Anything else is allowed to
# pass
if isinstance(m, mpf):
m = m._mpf_
return m[1] !=1 and m[-1] == 1
elif isinstance(m, mpc):
m, n = m._mpc_
return m[1] !=1 and m[-1] == 1 and \
n[1] !=1 and n[-1] == 1
else:
return False
if any(bad(a) for a in args):
raise ValueError # one or more args failed to compute with significance
except ValueError:
return
with mpmath.workprec(prec):
v = func(*args)
return Expr._from_mpmath(v, prec)
def _eval_derivative(self, s):
# f(x).diff(s) -> x.diff(s) * f.fdiff(1)(s)
i = 0
l = []
for a in self.args:
i += 1
da = a.diff(s)
if da is S.Zero:
continue
try:
df = self.fdiff(i)
except ArgumentIndexError:
df = Function.fdiff(self, i)
l.append(df * da)
return Add(*l)
def _eval_is_commutative(self):
return fuzzy_and(a.is_commutative for a in self.args)
def _eval_is_complex(self):
return fuzzy_and(a.is_complex for a in self.args)
def as_base_exp(self):
"""
Returns the method as the 2-tuple (base, exponent).
"""
return self, S.One
def _eval_aseries(self, n, args0, x, logx):
"""
Compute an asymptotic expansion around args0, in terms of self.args.
This function is only used internally by _eval_nseries and should not
be called directly; derived classes can overwrite this to implement
asymptotic expansions.
"""
from sympy.utilities.misc import filldedent
raise PoleError(filldedent('''
Asymptotic expansion of %s around %s is
not implemented.''' % (type(self), args0)))
def _eval_nseries(self, x, n, logx):
"""
This function does compute series for multivariate functions,
but the expansion is always in terms of *one* variable.
Examples
========
>>> from sympy import atan2
>>> from sympy.abc import x, y
>>> atan2(x, y).series(x, n=2)
atan2(0, y) + x/y + O(x**2)
>>> atan2(x, y).series(y, n=2)
-y/x + atan2(x, 0) + O(y**2)
This function also computes asymptotic expansions, if necessary
and possible:
>>> from sympy import loggamma
>>> loggamma(1/x)._eval_nseries(x,0,None)
-1/x - log(x)/x + log(x)/2 + O(1)
"""
from sympy import Order
from sympy.sets.sets import FiniteSet
args = self.args
args0 = [t.limit(x, 0) for t in args]
if any(t.is_finite is False for t in args0):
from sympy import oo, zoo, nan
# XXX could use t.as_leading_term(x) here but it's a little
# slower
a = [t.compute_leading_term(x, logx=logx) for t in args]
a0 = [t.limit(x, 0) for t in a]
if any([t.has(oo, -oo, zoo, nan) for t in a0]):
return self._eval_aseries(n, args0, x, logx)
# Careful: the argument goes to oo, but only logarithmically so. We
# are supposed to do a power series expansion "around the
# logarithmic term". e.g.
# f(1+x+log(x))
# -> f(1+logx) + x*f'(1+logx) + O(x**2)
# where 'logx' is given in the argument
a = [t._eval_nseries(x, n, logx) for t in args]
z = [r - r0 for (r, r0) in zip(a, a0)]
p = [Dummy() for t in z]
q = []
v = None
for ai, zi, pi in zip(a0, z, p):
if zi.has(x):
if v is not None:
raise NotImplementedError
q.append(ai + pi)
v = pi
else:
q.append(ai)
e1 = self.func(*q)
if v is None:
return e1
s = e1._eval_nseries(v, n, logx)
o = s.getO()
s = s.removeO()
s = s.subs(v, zi).expand() + Order(o.expr.subs(v, zi), x)
return s
if (self.func.nargs is S.Naturals0
or (self.func.nargs == FiniteSet(1) and args0[0])
or any(c > 1 for c in self.func.nargs)):
e = self
e1 = e.expand()
if e == e1:
#for example when e = sin(x+1) or e = sin(cos(x))
#let's try the general algorithm
term = e.subs(x, S.Zero)
if term.is_finite is False or term is S.NaN:
raise PoleError("Cannot expand %s around 0" % (self))
series = term
fact = S.One
_x = Dummy('x')
e = e.subs(x, _x)
for i in range(n - 1):
i += 1
fact *= Rational(i)
e = e.diff(_x)
subs = e.subs(_x, S.Zero)
if subs is S.NaN:
# try to evaluate a limit if we have to
subs = e.limit(_x, S.Zero)
if subs.is_finite is False:
raise PoleError("Cannot expand %s around 0" % (self))
term = subs*(x**i)/fact
term = term.expand()
series += term
return series + Order(x**n, x)
return e1.nseries(x, n=n, logx=logx)
arg = self.args[0]
l = []
g = None
# try to predict a number of terms needed
nterms = n + 2
cf = Order(arg.as_leading_term(x), x).getn()
if cf != 0:
nterms = int(nterms / cf)
for i in range(nterms):
g = self.taylor_term(i, arg, g)
g = g.nseries(x, n=n, logx=logx)
l.append(g)
return Add(*l) + Order(x**n, x)
def fdiff(self, argindex=1):
"""
Returns the first derivative of the function.
"""
if not (1 <= argindex <= len(self.args)):
raise ArgumentIndexError(self, argindex)
if self.args[argindex - 1].is_Symbol:
for i in range(len(self.args)):
if i == argindex - 1:
continue
# See issue 8510
if self.args[argindex - 1] in self.args[i].free_symbols:
break
else:
return Derivative(self, self.args[argindex - 1], evaluate=False)
# See issue 4624 and issue 4719 and issue 5600
arg_dummy = Dummy('xi_%i' % argindex)
arg_dummy.dummy_index = hash(self.args[argindex - 1])
new_args = [arg for arg in self.args]
new_args[argindex-1] = arg_dummy
return Subs(Derivative(self.func(*new_args), arg_dummy),
arg_dummy, self.args[argindex - 1])
def _eval_as_leading_term(self, x):
"""Stub that should be overridden by new Functions to return
the first non-zero term in a series if ever an x-dependent
argument whose leading term vanishes as x -> 0 might be encountered.
See, for example, cos._eval_as_leading_term.
"""
from sympy import Order
args = [a.as_leading_term(x) for a in self.args]
o = Order(1, x)
if any(x in a.free_symbols and o.contains(a) for a in args):
# Whereas x and any finite number are contained in O(1, x),
# expressions like 1/x are not. If any arg simplified to a
# vanishing expression as x -> 0 (like x or x**2, but not
# 3, 1/x, etc...) then the _eval_as_leading_term is needed
# to supply the first non-zero term of the series,
#
# e.g. expression leading term
# ---------- ------------
# cos(1/x) cos(1/x)
# cos(cos(x)) cos(1)
# cos(x) 1 <- _eval_as_leading_term needed
# sin(x) x <- _eval_as_leading_term needed
#
raise NotImplementedError(
'%s has no _eval_as_leading_term routine' % self.func)
else:
return self.func(*args)
def _sage_(self):
import sage.all as sage
fname = self.func.__name__
func = getattr(sage, fname)
args = [arg._sage_() for arg in self.args]
return func(*args)
class AppliedUndef(Function):
"""
Base class for expressions resulting from the application of an undefined
function.
"""
def __new__(cls, *args, **options):
args = list(map(sympify, args))
obj = super(AppliedUndef, cls).__new__(cls, *args, **options)
return obj
def _eval_as_leading_term(self, x):
return self
def _sage_(self):
import sage.all as sage
fname = str(self.func)
args = [arg._sage_() for arg in self.args]
func = sage.function(fname)(*args)
return func
class UndefinedFunction(FunctionClass):
"""
The (meta)class of undefined functions.
"""
def __new__(mcl, name, bases=(AppliedUndef,), __dict__=None, **kwargs):
__dict__ = __dict__ or {}
__dict__.update(kwargs)
__dict__['__module__'] = None # For pickling
ret = super(UndefinedFunction, mcl).__new__(mcl, name, bases, __dict__)
return ret
def __instancecheck__(cls, instance):
return cls in type(instance).__mro__
UndefinedFunction.__eq__ = lambda s, o: (isinstance(o, s.__class__) and
(s.class_key() == o.class_key()))
class WildFunction(Function, AtomicExpr):
"""
A WildFunction function matches any function (with its arguments).
Examples
========
>>> from sympy import WildFunction, Function, cos
>>> from sympy.abc import x, y
>>> F = WildFunction('F')
>>> f = Function('f')
>>> F.nargs
Naturals0()
>>> x.match(F)
>>> F.match(F)
{F_: F_}
>>> f(x).match(F)
{F_: f(x)}
>>> cos(x).match(F)
{F_: cos(x)}
>>> f(x, y).match(F)
{F_: f(x, y)}
To match functions with a given number of arguments, set ``nargs`` to the
desired value at instantiation:
>>> F = WildFunction('F', nargs=2)
>>> F.nargs
{2}
>>> f(x).match(F)
>>> f(x, y).match(F)
{F_: f(x, y)}
To match functions with a range of arguments, set ``nargs`` to a tuple
containing the desired number of arguments, e.g. if ``nargs = (1, 2)``
then functions with 1 or 2 arguments will be matched.
>>> F = WildFunction('F', nargs=(1, 2))
>>> F.nargs
{1, 2}
>>> f(x).match(F)
{F_: f(x)}
>>> f(x, y).match(F)
{F_: f(x, y)}
>>> f(x, y, 1).match(F)
"""
include = set()
def __init__(cls, name, **assumptions):
from sympy.sets.sets import Set, FiniteSet
cls.name = name
nargs = assumptions.pop('nargs', S.Naturals0)
if not isinstance(nargs, Set):
# Canonicalize nargs here. See also FunctionClass.
if is_sequence(nargs):
nargs = tuple(ordered(set(nargs)))
elif nargs is not None:
nargs = (as_int(nargs),)
nargs = FiniteSet(*nargs)
cls.nargs = nargs
def matches(self, expr, repl_dict={}, old=False):
if not isinstance(expr, (AppliedUndef, Function)):
return None
if len(expr.args) not in self.nargs:
return None
repl_dict = repl_dict.copy()
repl_dict[self] = expr
return repl_dict
class Derivative(Expr):
"""
Carries out differentiation of the given expression with respect to symbols.
expr must define ._eval_derivative(symbol) method that returns
the differentiation result. This function only needs to consider the
non-trivial case where expr contains symbol and it should call the diff()
method internally (not _eval_derivative); Derivative should be the only
one to call _eval_derivative.
Simplification of high-order derivatives:
Because there can be a significant amount of simplification that can be
done when multiple differentiations are performed, results will be
automatically simplified in a fairly conservative fashion unless the
keyword ``simplify`` is set to False.
>>> from sympy import sqrt, diff
>>> from sympy.abc import x
>>> e = sqrt((x + 1)**2 + x)
>>> diff(e, x, 5, simplify=False).count_ops()
136
>>> diff(e, x, 5).count_ops()
30
Ordering of variables:
If evaluate is set to True and the expression can not be evaluated, the
list of differentiation symbols will be sorted, that is, the expression is
assumed to have continuous derivatives up to the order asked. This sorting
assumes that derivatives wrt Symbols commute, derivatives wrt non-Symbols
commute, but Symbol and non-Symbol derivatives don't commute with each
other.
Derivative wrt non-Symbols:
This class also allows derivatives wrt non-Symbols that have _diff_wrt
set to True, such as Function and Derivative. When a derivative wrt a non-
Symbol is attempted, the non-Symbol is temporarily converted to a Symbol
while the differentiation is performed.
Note that this may seem strange, that Derivative allows things like
f(g(x)).diff(g(x)), or even f(cos(x)).diff(cos(x)). The motivation for
allowing this syntax is to make it easier to work with variational calculus
(i.e., the Euler-Lagrange method). The best way to understand this is that
the action of derivative with respect to a non-Symbol is defined by the
above description: the object is substituted for a Symbol and the
derivative is taken with respect to that. This action is only allowed for
objects for which this can be done unambiguously, for example Function and
Derivative objects. Note that this leads to what may appear to be
mathematically inconsistent results. For example::
>>> from sympy import cos, sin, sqrt
>>> from sympy.abc import x
>>> (2*cos(x)).diff(cos(x))
2
>>> (2*sqrt(1 - sin(x)**2)).diff(cos(x))
0
This appears wrong because in fact 2*cos(x) and 2*sqrt(1 - sin(x)**2) are
identically equal. However this is the wrong way to think of this. Think
of it instead as if we have something like this::
>>> from sympy.abc import c, s
>>> def F(u):
... return 2*u
...
>>> def G(u):
... return 2*sqrt(1 - u**2)
...
>>> F(cos(x))
2*cos(x)
>>> G(sin(x))
2*sqrt(-sin(x)**2 + 1)
>>> F(c).diff(c)
2
>>> F(c).diff(c)
2
>>> G(s).diff(c)
0
>>> G(sin(x)).diff(cos(x))
0
Here, the Symbols c and s act just like the functions cos(x) and sin(x),
respectively. Think of 2*cos(x) as f(c).subs(c, cos(x)) (or f(c) *at*
c = cos(x)) and 2*sqrt(1 - sin(x)**2) as g(s).subs(s, sin(x)) (or g(s) *at*
s = sin(x)), where f(u) == 2*u and g(u) == 2*sqrt(1 - u**2). Here, we
define the function first and evaluate it at the function, but we can
actually unambiguously do this in reverse in SymPy, because
expr.subs(Function, Symbol) is well-defined: just structurally replace the
function everywhere it appears in the expression.
This is the same notational convenience used in the Euler-Lagrange method
when one says F(t, f(t), f'(t)).diff(f(t)). What is actually meant is
that the expression in question is represented by some F(t, u, v) at u =
f(t) and v = f'(t), and F(t, f(t), f'(t)).diff(f(t)) simply means F(t, u,
v).diff(u) at u = f(t).
We do not allow derivatives to be taken with respect to expressions where this
is not so well defined. For example, we do not allow expr.diff(x*y)
because there are multiple ways of structurally defining where x*y appears
in an expression, some of which may surprise the reader (for example, a
very strict definition would have that (x*y*z).diff(x*y) == 0).
>>> from sympy.abc import x, y, z
>>> (x*y*z).diff(x*y)
Traceback (most recent call last):
...
ValueError: Can't differentiate wrt the variable: x*y, 1
Note that this definition also fits in nicely with the definition of the
chain rule. Note how the chain rule in SymPy is defined using unevaluated
Subs objects::
>>> from sympy import symbols, Function
>>> f, g = symbols('f g', cls=Function)
>>> f(2*g(x)).diff(x)
2*Derivative(g(x), x)*Subs(Derivative(f(_xi_1), _xi_1),
(_xi_1,), (2*g(x),))
>>> f(g(x)).diff(x)
Derivative(g(x), x)*Subs(Derivative(f(_xi_1), _xi_1),
(_xi_1,), (g(x),))
Finally, note that, to be consistent with variational calculus, and to
ensure that the definition of substituting a Function for a Symbol in an
expression is well-defined, derivatives of functions are assumed to not be
related to the function. In other words, we have::
>>> from sympy import diff
>>> diff(f(x), x).diff(f(x))
0
The same is true for derivatives of different orders::
>>> diff(f(x), x, 2).diff(diff(f(x), x, 1))
0
>>> diff(f(x), x, 1).diff(diff(f(x), x, 2))
0
Note, any class can allow derivatives to be taken with respect to itself.
See the docstring of Expr._diff_wrt.
Examples
========
Some basic examples:
>>> from sympy import Derivative, Symbol, Function
>>> f = Function('f')
>>> g = Function('g')
>>> x = Symbol('x')
>>> y = Symbol('y')
>>> Derivative(x**2, x, evaluate=True)
2*x
>>> Derivative(Derivative(f(x,y), x), y)
Derivative(f(x, y), x, y)
>>> Derivative(f(x), x, 3)
Derivative(f(x), x, x, x)
>>> Derivative(f(x, y), y, x, evaluate=True)
Derivative(f(x, y), x, y)
Now some derivatives wrt functions:
>>> Derivative(f(x)**2, f(x), evaluate=True)
2*f(x)
>>> Derivative(f(g(x)), x, evaluate=True)
Derivative(g(x), x)*Subs(Derivative(f(_xi_1), _xi_1),
(_xi_1,), (g(x),))
"""
is_Derivative = True
@property
def _diff_wrt(self):
"""Allow derivatives wrt Derivatives if it contains a function.
Examples
========
>>> from sympy import Function, Symbol, Derivative
>>> f = Function('f')
>>> x = Symbol('x')
>>> Derivative(f(x),x)._diff_wrt
True
>>> Derivative(x**2,x)._diff_wrt
False
"""
if self.expr.is_Function:
return True
else:
return False
def __new__(cls, expr, *variables, **assumptions):
expr = sympify(expr)
# There are no variables, we differentiate wrt all of the free symbols
# in expr.
if not variables:
variables = expr.free_symbols
if len(variables) != 1:
if expr.is_number:
return S.Zero
from sympy.utilities.misc import filldedent
if len(variables) == 0:
raise ValueError(filldedent('''
Since there are no variables in the expression,
the variable(s) of differentiation must be supplied
to differentiate %s''' % expr))
else:
raise ValueError(filldedent('''
Since there is more than one variable in the
expression, the variable(s) of differentiation
must be supplied to differentiate %s''' % expr))
# Standardize the variables by sympifying them and making appending a
# count of 1 if there is only one variable: diff(e,x)->diff(e,x,1).
variables = list(sympify(variables))
if not variables[-1].is_Integer or len(variables) == 1:
variables.append(S.One)
# Split the list of variables into a list of the variables we are diff
# wrt, where each element of the list has the form (s, count) where
# s is the entity to diff wrt and count is the order of the
# derivative.
variable_count = []
all_zero = True
i = 0
while i < len(variables) - 1: # process up to final Integer
v, count = variables[i: i + 2]
iwas = i
if v._diff_wrt:
# We need to test the more specific case of count being an
# Integer first.
if count.is_Integer:
count = int(count)
i += 2
elif count._diff_wrt:
count = 1
i += 1
if i == iwas: # didn't get an update because of bad input
from sympy.utilities.misc import filldedent
last_digit = int(str(count)[-1])
ordinal = 'st' if last_digit == 1 else 'nd' if last_digit == 2 else 'rd' if last_digit == 3 else 'th'
raise ValueError(filldedent('''
Can\'t calculate %s%s derivative wrt %s.''' % (count, ordinal, v)))
if all_zero and not count == 0:
all_zero = False
if count:
variable_count.append((v, count))
# We make a special case for 0th derivative, because there is no
# good way to unambiguously print this.
if all_zero:
return expr
# Pop evaluate because it is not really an assumption and we will need
# to track it carefully below.
evaluate = assumptions.pop('evaluate', False)
# Look for a quick exit if there are symbols that don't appear in
# expression at all. Note, this cannnot check non-symbols like
# functions and Derivatives as those can be created by intermediate
# derivatives.
if evaluate and all(isinstance(sc[0], Symbol) for sc in variable_count):
symbol_set = set(sc[0] for sc in variable_count)
if symbol_set.difference(expr.free_symbols):
return S.Zero
# We make a generator so as to only generate a variable when necessary.
# If a high order of derivative is requested and the expr becomes 0
# after a few differentiations, then we won't need the other variables.
variablegen = (v for v, count in variable_count for i in range(count))
# If we can't compute the derivative of expr (but we wanted to) and
# expr is itself not a Derivative, finish building an unevaluated
# derivative class by calling Expr.__new__.
if (not (hasattr(expr, '_eval_derivative') and evaluate) and
(not isinstance(expr, Derivative))):
variables = list(variablegen)
# If we wanted to evaluate, we sort the variables into standard
# order for later comparisons. This is too aggressive if evaluate
# is False, so we don't do it in that case.
if evaluate:
#TODO: check if assumption of discontinuous derivatives exist
variables = cls._sort_variables(variables)
# Here we *don't* need to reinject evaluate into assumptions
# because we are done with it and it is not an assumption that
# Expr knows about.
obj = Expr.__new__(cls, expr, *variables, **assumptions)
return obj
# Compute the derivative now by repeatedly calling the
# _eval_derivative method of expr for each variable. When this method
# returns None, the derivative couldn't be computed wrt that variable
# and we save the variable for later.
unhandled_variables = []
# Once we encouter a non_symbol that is unhandled, we stop taking
# derivatives entirely. This is because derivatives wrt functions
# don't commute with derivatives wrt symbols and we can't safely
# continue.
unhandled_non_symbol = False
nderivs = 0 # how many derivatives were performed
for v in variablegen:
is_symbol = v.is_symbol
if unhandled_non_symbol:
obj = None
else:
if not is_symbol:
new_v = Dummy('xi_%i' % i)
new_v.dummy_index = hash(v)
expr = expr.xreplace({v: new_v})
old_v = v
v = new_v
obj = expr._eval_derivative(v)
nderivs += 1
if not is_symbol:
if obj is not None:
if not old_v.is_symbol and obj.is_Derivative:
# Derivative evaluated at a point that is not a
# symbol
obj = Subs(obj, v, old_v)
else:
obj = obj.xreplace({v: old_v})
v = old_v
if obj is None:
unhandled_variables.append(v)
if not is_symbol:
unhandled_non_symbol = True
elif obj is S.Zero:
return S.Zero
else:
expr = obj
if unhandled_variables:
unhandled_variables = cls._sort_variables(unhandled_variables)
expr = Expr.__new__(cls, expr, *unhandled_variables, **assumptions)
else:
# We got a Derivative at the end of it all, and we rebuild it by
# sorting its variables.
if isinstance(expr, Derivative):
expr = cls(
expr.args[0], *cls._sort_variables(expr.args[1:])
)
if nderivs > 1 and assumptions.get('simplify', True):
from sympy.core.exprtools import factor_terms
from sympy.simplify.simplify import signsimp
expr = factor_terms(signsimp(expr))
return expr
@classmethod
def _sort_variables(cls, vars):
"""Sort variables, but disallow sorting of non-symbols.
When taking derivatives, the following rules usually hold:
* Derivative wrt different symbols commute.
* Derivative wrt different non-symbols commute.
* Derivatives wrt symbols and non-symbols don't commute.
Examples
========
>>> from sympy import Derivative, Function, symbols
>>> vsort = Derivative._sort_variables
>>> x, y, z = symbols('x y z')
>>> f, g, h = symbols('f g h', cls=Function)
>>> vsort((x,y,z))
[x, y, z]
>>> vsort((h(x),g(x),f(x)))
[f(x), g(x), h(x)]
>>> vsort((z,y,x,h(x),g(x),f(x)))
[x, y, z, f(x), g(x), h(x)]
>>> vsort((x,f(x),y,f(y)))
[x, f(x), y, f(y)]
>>> vsort((y,x,g(x),f(x),z,h(x),y,x))
[x, y, f(x), g(x), z, h(x), x, y]
>>> vsort((z,y,f(x),x,f(x),g(x)))
[y, z, f(x), x, f(x), g(x)]
>>> vsort((z,y,f(x),x,f(x),g(x),z,z,y,x))
[y, z, f(x), x, f(x), g(x), x, y, z, z]
"""
sorted_vars = []
symbol_part = []
non_symbol_part = []
for v in vars:
if not v.is_symbol:
if len(symbol_part) > 0:
sorted_vars.extend(sorted(symbol_part,
key=default_sort_key))
symbol_part = []
non_symbol_part.append(v)
else:
if len(non_symbol_part) > 0:
sorted_vars.extend(sorted(non_symbol_part,
key=default_sort_key))
non_symbol_part = []
symbol_part.append(v)
if len(non_symbol_part) > 0:
sorted_vars.extend(sorted(non_symbol_part,
key=default_sort_key))
if len(symbol_part) > 0:
sorted_vars.extend(sorted(symbol_part,
key=default_sort_key))
return sorted_vars
def _eval_is_commutative(self):
return self.expr.is_commutative
def _eval_derivative(self, v):
# If the variable s we are diff wrt is not in self.variables, we
# assume that we might be able to take the derivative.
if v not in self.variables:
obj = self.expr.diff(v)
if obj is S.Zero:
return S.Zero
if isinstance(obj, Derivative):
return obj.func(obj.expr, *(self.variables + obj.variables))
# The derivative wrt s could have simplified things such that the
# derivative wrt things in self.variables can now be done. Thus,
# we set evaluate=True to see if there are any other derivatives
# that can be done. The most common case is when obj is a simple
# number so that the derivative wrt anything else will vanish.
return self.func(obj, *self.variables, evaluate=True)
# In this case s was in self.variables so the derivatve wrt s has
# already been attempted and was not computed, either because it
# couldn't be or evaluate=False originally.
return self.func(self.expr, *(self.variables + (v, )), evaluate=False)
def doit(self, **hints):
expr = self.expr
if hints.get('deep', True):
expr = expr.doit(**hints)
hints['evaluate'] = True
return self.func(expr, *self.variables, **hints)
@_sympifyit('z0', NotImplementedError)
def doit_numerically(self, z0):
"""
Evaluate the derivative at z numerically.
When we can represent derivatives at a point, this should be folded
into the normal evalf. For now, we need a special method.
"""
import mpmath
from sympy.core.expr import Expr
if len(self.free_symbols) != 1 or len(self.variables) != 1:
raise NotImplementedError('partials and higher order derivatives')
z = list(self.free_symbols)[0]
def eval(x):
f0 = self.expr.subs(z, Expr._from_mpmath(x, prec=mpmath.mp.prec))
f0 = f0.evalf(mlib.libmpf.prec_to_dps(mpmath.mp.prec))
return f0._to_mpmath(mpmath.mp.prec)
return Expr._from_mpmath(mpmath.diff(eval,
z0._to_mpmath(mpmath.mp.prec)),
mpmath.mp.prec)
@property
def expr(self):
return self._args[0]
@property
def variables(self):
return self._args[1:]
@property
def free_symbols(self):
return self.expr.free_symbols
def _eval_subs(self, old, new):
if old in self.variables and not new._diff_wrt:
# issue 4719
return Subs(self, old, new)
# If both are Derivatives with the same expr, check if old is
# equivalent to self or if old is a subderivative of self.
if old.is_Derivative and old.expr == self.expr:
# Check if canonnical order of variables is equal.
old_vars = collections.Counter(old.variables)
self_vars = collections.Counter(self.variables)
if old_vars == self_vars:
return new
# collections.Counter doesn't have __le__
def _subset(a, b):
return all(a[i] <= b[i] for i in a)
if _subset(old_vars, self_vars):
return Derivative(new, *(self_vars - old_vars).elements())
return Derivative(*(x._subs(old, new) for x in self.args))
def _eval_lseries(self, x, logx):
dx = self.variables
for term in self.expr.lseries(x, logx=logx):
yield self.func(term, *dx)
def _eval_nseries(self, x, n, logx):
arg = self.expr.nseries(x, n=n, logx=logx)
o = arg.getO()
dx = self.variables
rv = [self.func(a, *dx) for a in Add.make_args(arg.removeO())]
if o:
rv.append(o/x)
return Add(*rv)
def _eval_as_leading_term(self, x):
series_gen = self.expr.lseries(x)
d = S.Zero
for leading_term in series_gen:
d = diff(leading_term, *self.variables)
if d != 0:
break
return d
def _sage_(self):
import sage.all as sage
args = [arg._sage_() for arg in self.args]
return sage.derivative(*args)
def as_finite_difference(self, points=1, x0=None, wrt=None):
""" Expresses a Derivative instance as a finite difference.
Parameters
==========
points : sequence or coefficient, optional
If sequence: discrete values (length >= order+1) of the
independent variable used for generating the finite
difference weights.
If it is a coefficient, it will be used as the step-size
for generating an equidistant sequence of length order+1
centered around ``x0``. Default: 1 (step-size 1)
x0 : number or Symbol, optional
the value of the independent variable (``wrt``) at which the
derivative is to be approximated. Default: same as ``wrt``.
wrt : Symbol, optional
"with respect to" the variable for which the (partial)
derivative is to be approximated for. If not provided it
is required that the derivative is ordinary. Default: ``None``.
Examples
========
>>> from sympy import symbols, Function, exp, sqrt, Symbol
>>> x, h = symbols('x h')
>>> f = Function('f')
>>> f(x).diff(x).as_finite_difference()
-f(x - 1/2) + f(x + 1/2)
The default step size and number of points are 1 and
``order + 1`` respectively. We can change the step size by
passing a symbol as a parameter:
>>> f(x).diff(x).as_finite_difference(h)
-f(-h/2 + x)/h + f(h/2 + x)/h
We can also specify the discretized values to be used in a
sequence:
>>> f(x).diff(x).as_finite_difference([x, x+h, x+2*h])
-3*f(x)/(2*h) + 2*f(h + x)/h - f(2*h + x)/(2*h)
The algorithm is not restricted to use equidistant spacing, nor
do we need to make the approximation around ``x0``, but we can get
an expression estimating the derivative at an offset:
>>> e, sq2 = exp(1), sqrt(2)
>>> xl = [x-h, x+h, x+e*h]
>>> f(x).diff(x, 1).as_finite_difference(xl, x+h*sq2) # doctest: +ELLIPSIS
2*h*((h + sqrt(2)*h)/(2*h) - (-sqrt(2)*h + h)/(2*h))*f(E*h + x)/...
Partial derivatives are also supported:
>>> y = Symbol('y')
>>> d2fdxdy=f(x,y).diff(x,y)
>>> d2fdxdy.as_finite_difference(wrt=x)
-Derivative(f(x - 1/2, y), y) + Derivative(f(x + 1/2, y), y)
We can apply ``as_finite_difference`` to ``Derivative`` instances in
compound expressions using ``replace``:
>>> (1 + 42**f(x).diff(x)).replace(lambda arg: arg.is_Derivative,
... lambda arg: arg.as_finite_difference())
42**(-f(x - 1/2) + f(x + 1/2)) + 1
See also
========
sympy.calculus.finite_diff.apply_finite_diff
sympy.calculus.finite_diff.differentiate_finite
sympy.calculus.finite_diff.finite_diff_weights
"""
from ..calculus.finite_diff import _as_finite_diff
return _as_finite_diff(self, points, x0, wrt)
class Lambda(Expr):
"""
Lambda(x, expr) represents a lambda function similar to Python's
'lambda x: expr'. A function of several variables is written as
Lambda((x, y, ...), expr).
A simple example:
>>> from sympy import Lambda
>>> from sympy.abc import x
>>> f = Lambda(x, x**2)
>>> f(4)
16
For multivariate functions, use:
>>> from sympy.abc import y, z, t
>>> f2 = Lambda((x, y, z, t), x + y**z + t**z)
>>> f2(1, 2, 3, 4)
73
A handy shortcut for lots of arguments:
>>> p = x, y, z
>>> f = Lambda(p, x + y*z)
>>> f(*p)
x + y*z
"""
is_Function = True
def __new__(cls, variables, expr):
from sympy.sets.sets import FiniteSet
v = list(variables) if iterable(variables) else [variables]
for i in v:
if not getattr(i, 'is_Symbol', False):
raise TypeError('variable is not a symbol: %s' % i)
if len(v) == 1 and v[0] == expr:
return S.IdentityFunction
obj = Expr.__new__(cls, Tuple(*v), sympify(expr))
obj.nargs = FiniteSet(len(v))
return obj
@property
def variables(self):
"""The variables used in the internal representation of the function"""
return self._args[0]
@property
def expr(self):
"""The return value of the function"""
return self._args[1]
@property
def free_symbols(self):
return self.expr.free_symbols - set(self.variables)
def __call__(self, *args):
n = len(args)
if n not in self.nargs: # Lambda only ever has 1 value in nargs
# XXX: exception message must be in exactly this format to
# make it work with NumPy's functions like vectorize(). See,
# for example, https://github.com/numpy/numpy/issues/1697.
# The ideal solution would be just to attach metadata to
# the exception and change NumPy to take advantage of this.
## XXX does this apply to Lambda? If not, remove this comment.
temp = ('%(name)s takes exactly %(args)s '
'argument%(plural)s (%(given)s given)')
raise TypeError(temp % {
'name': self,
'args': list(self.nargs)[0],
'plural': 's'*(list(self.nargs)[0] != 1),
'given': n})
return self.expr.xreplace(dict(list(zip(self.variables, args))))
def __eq__(self, other):
if not isinstance(other, Lambda):
return False
if self.nargs != other.nargs:
return False
selfexpr = self.args[1]
otherexpr = other.args[1]
otherexpr = otherexpr.xreplace(dict(list(zip(other.args[0], self.args[0]))))
return selfexpr == otherexpr
def __ne__(self, other):
return not(self == other)
def __hash__(self):
return super(Lambda, self).__hash__()
def _hashable_content(self):
return (self.expr.xreplace(self.canonical_variables),)
@property
def is_identity(self):
"""Return ``True`` if this ``Lambda`` is an identity function. """
if len(self.args) == 2:
return self.args[0] == self.args[1]
else:
return None
class Subs(Expr):
"""
Represents unevaluated substitutions of an expression.
``Subs(expr, x, x0)`` receives 3 arguments: an expression, a variable or
list of distinct variables and a point or list of evaluation points
corresponding to those variables.
``Subs`` objects are generally useful to represent unevaluated derivatives
calculated at a point.
The variables may be expressions, but they are subjected to the limitations
of subs(), so it is usually a good practice to use only symbols for
variables, since in that case there can be no ambiguity.
There's no automatic expansion - use the method .doit() to effect all
possible substitutions of the object and also of objects inside the
expression.
When evaluating derivatives at a point that is not a symbol, a Subs object
is returned. One is also able to calculate derivatives of Subs objects - in
this case the expression is always expanded (for the unevaluated form, use
Derivative()).
A simple example:
>>> from sympy import Subs, Function, sin
>>> from sympy.abc import x, y, z
>>> f = Function('f')
>>> e = Subs(f(x).diff(x), x, y)
>>> e.subs(y, 0)
Subs(Derivative(f(x), x), (x,), (0,))
>>> e.subs(f, sin).doit()
cos(y)
An example with several variables:
>>> Subs(f(x)*sin(y) + z, (x, y), (0, 1))
Subs(z + f(x)*sin(y), (x, y), (0, 1))
>>> _.doit()
z + f(0)*sin(1)
"""
def __new__(cls, expr, variables, point, **assumptions):
from sympy import Symbol
if not is_sequence(variables, Tuple):
variables = [variables]
variables = list(sympify(variables))
if list(uniq(variables)) != variables:
repeated = [ v for v in set(variables) if variables.count(v) > 1 ]
raise ValueError('cannot substitute expressions %s more than '
'once.' % repeated)
point = Tuple(*(point if is_sequence(point, Tuple) else [point]))
if len(point) != len(variables):
raise ValueError('Number of point values must be the same as '
'the number of variables.')
expr = sympify(expr)
# use symbols with names equal to the point value (with preppended _)
# to give a variable-independent expression
pre = "_"
pts = sorted(set(point), key=default_sort_key)
from sympy.printing import StrPrinter
class CustomStrPrinter(StrPrinter):
def _print_Dummy(self, expr):
return str(expr) + str(expr.dummy_index)
def mystr(expr, **settings):
p = CustomStrPrinter(settings)
return p.doprint(expr)
while 1:
s_pts = {p: Symbol(pre + mystr(p)) for p in pts}
reps = [(v, s_pts[p])
for v, p in zip(variables, point)]
# if any underscore-preppended symbol is already a free symbol
# and is a variable with a different point value, then there
# is a clash, e.g. _0 clashes in Subs(_0 + _1, (_0, _1), (1, 0))
# because the new symbol that would be created is _1 but _1
# is already mapped to 0 so __0 and __1 are used for the new
# symbols
if any(r in expr.free_symbols and
r in variables and
Symbol(pre + mystr(point[variables.index(r)])) != r
for _, r in reps):
pre += "_"
continue
break
obj = Expr.__new__(cls, expr, Tuple(*variables), point)
obj._expr = expr.subs(reps)
return obj
def _eval_is_commutative(self):
return self.expr.is_commutative
def doit(self):
return self.expr.doit().subs(list(zip(self.variables, self.point)))
def evalf(self, prec=None, **options):
return self.doit().evalf(prec, **options)
n = evalf
@property
def variables(self):
"""The variables to be evaluated"""
return self._args[1]
@property
def expr(self):
"""The expression on which the substitution operates"""
return self._args[0]
@property
def point(self):
"""The values for which the variables are to be substituted"""
return self._args[2]
@property
def free_symbols(self):
return (self.expr.free_symbols - set(self.variables) |
set(self.point.free_symbols))
def _has(self, pattern):
if pattern in self.variables and pattern not in self.point:
return False
return super(Subs, self)._has(pattern)
def __eq__(self, other):
if not isinstance(other, Subs):
return False
return self._expr == other._expr
def __ne__(self, other):
return not(self == other)
def __hash__(self):
return super(Subs, self).__hash__()
def _hashable_content(self):
return (self._expr.xreplace(self.canonical_variables),)
def _eval_subs(self, old, new):
if old in self.variables:
if old in self.point:
newpoint = tuple(new if i == old else i for i in self.point)
return self.func(self.expr, self.variables, newpoint)
return self
def _eval_derivative(self, s):
if s not in self.free_symbols:
return S.Zero
return self.func(self.expr.diff(s), self.variables, self.point).doit() \
+ Add(*[ Subs(point.diff(s) * self.expr.diff(arg),
self.variables, self.point).doit() for arg,
point in zip(self.variables, self.point) ])
def _eval_nseries(self, x, n, logx):
if x in self.point:
# x is the variable being substituted into
apos = self.point.index(x)
other = self.variables[apos]
arg = self.expr.nseries(other, n=n, logx=logx)
o = arg.getO()
subs_args = [self.func(a, *self.args[1:]) for a in arg.removeO().args]
return Add(*subs_args) + o.subs(other, x)
arg = self.expr.nseries(x, n=n, logx=logx)
o = arg.getO()
subs_args = [self.func(a, *self.args[1:]) for a in arg.removeO().args]
return Add(*subs_args) + o
def _eval_as_leading_term(self, x):
if x in self.point:
ipos = self.point.index(x)
xvar = self.variables[ipos]
return self.expr.as_leading_term(xvar)
if x in self.variables:
# if `x` is a dummy variable, it means it won't exist after the
# substitution has been performed:
return self
# The variable is independent of the substitution:
return self.expr.as_leading_term(x)
def diff(f, *symbols, **kwargs):
"""
Differentiate f with respect to symbols.
This is just a wrapper to unify .diff() and the Derivative class; its
interface is similar to that of integrate(). You can use the same
shortcuts for multiple variables as with Derivative. For example,
diff(f(x), x, x, x) and diff(f(x), x, 3) both return the third derivative
of f(x).
You can pass evaluate=False to get an unevaluated Derivative class. Note
that if there are 0 symbols (such as diff(f(x), x, 0), then the result will
be the function (the zeroth derivative), even if evaluate=False.
Examples
========
>>> from sympy import sin, cos, Function, diff
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> diff(sin(x), x)
cos(x)
>>> diff(f(x), x, x, x)
Derivative(f(x), x, x, x)
>>> diff(f(x), x, 3)
Derivative(f(x), x, x, x)
>>> diff(sin(x)*cos(y), x, 2, y, 2)
sin(x)*cos(y)
>>> type(diff(sin(x), x))
cos
>>> type(diff(sin(x), x, evaluate=False))
<class 'sympy.core.function.Derivative'>
>>> type(diff(sin(x), x, 0))
sin
>>> type(diff(sin(x), x, 0, evaluate=False))
sin
>>> diff(sin(x))
cos(x)
>>> diff(sin(x*y))
Traceback (most recent call last):
...
ValueError: specify differentiation variables to differentiate sin(x*y)
Note that ``diff(sin(x))`` syntax is meant only for convenience
in interactive sessions and should be avoided in library code.
References
==========
http://reference.wolfram.com/legacy/v5_2/Built-inFunctions/AlgebraicComputation/Calculus/D.html
See Also
========
Derivative
sympy.geometry.util.idiff: computes the derivative implicitly
"""
kwargs.setdefault('evaluate', True)
try:
return f._eval_diff(*symbols, **kwargs)
except AttributeError:
pass
return Derivative(f, *symbols, **kwargs)
def expand(e, deep=True, modulus=None, power_base=True, power_exp=True,
mul=True, log=True, multinomial=True, basic=True, **hints):
"""
Expand an expression using methods given as hints.
Hints evaluated unless explicitly set to False are: ``basic``, ``log``,
``multinomial``, ``mul``, ``power_base``, and ``power_exp`` The following
hints are supported but not applied unless set to True: ``complex``,
``func``, and ``trig``. In addition, the following meta-hints are
supported by some or all of the other hints: ``frac``, ``numer``,
``denom``, ``modulus``, and ``force``. ``deep`` is supported by all
hints. Additionally, subclasses of Expr may define their own hints or
meta-hints.
The ``basic`` hint is used for any special rewriting of an object that
should be done automatically (along with the other hints like ``mul``)
when expand is called. This is a catch-all hint to handle any sort of
expansion that may not be described by the existing hint names. To use
this hint an object should override the ``_eval_expand_basic`` method.
Objects may also define their own expand methods, which are not run by
default. See the API section below.
If ``deep`` is set to ``True`` (the default), things like arguments of
functions are recursively expanded. Use ``deep=False`` to only expand on
the top level.
If the ``force`` hint is used, assumptions about variables will be ignored
in making the expansion.
Hints
=====
These hints are run by default
mul
---
Distributes multiplication over addition:
>>> from sympy import cos, exp, sin
>>> from sympy.abc import x, y, z
>>> (y*(x + z)).expand(mul=True)
x*y + y*z
multinomial
-----------
Expand (x + y + ...)**n where n is a positive integer.
>>> ((x + y + z)**2).expand(multinomial=True)
x**2 + 2*x*y + 2*x*z + y**2 + 2*y*z + z**2
power_exp
---------
Expand addition in exponents into multiplied bases.
>>> exp(x + y).expand(power_exp=True)
exp(x)*exp(y)
>>> (2**(x + y)).expand(power_exp=True)
2**x*2**y
power_base
----------
Split powers of multiplied bases.
This only happens by default if assumptions allow, or if the
``force`` meta-hint is used:
>>> ((x*y)**z).expand(power_base=True)
(x*y)**z
>>> ((x*y)**z).expand(power_base=True, force=True)
x**z*y**z
>>> ((2*y)**z).expand(power_base=True)
2**z*y**z
Note that in some cases where this expansion always holds, SymPy performs
it automatically:
>>> (x*y)**2
x**2*y**2
log
---
Pull out power of an argument as a coefficient and split logs products
into sums of logs.
Note that these only work if the arguments of the log function have the
proper assumptions--the arguments must be positive and the exponents must
be real--or else the ``force`` hint must be True:
>>> from sympy import log, symbols
>>> log(x**2*y).expand(log=True)
log(x**2*y)
>>> log(x**2*y).expand(log=True, force=True)
2*log(x) + log(y)
>>> x, y = symbols('x,y', positive=True)
>>> log(x**2*y).expand(log=True)
2*log(x) + log(y)
basic
-----
This hint is intended primarily as a way for custom subclasses to enable
expansion by default.
These hints are not run by default:
complex
-------
Split an expression into real and imaginary parts.
>>> x, y = symbols('x,y')
>>> (x + y).expand(complex=True)
re(x) + re(y) + I*im(x) + I*im(y)
>>> cos(x).expand(complex=True)
-I*sin(re(x))*sinh(im(x)) + cos(re(x))*cosh(im(x))
Note that this is just a wrapper around ``as_real_imag()``. Most objects
that wish to redefine ``_eval_expand_complex()`` should consider
redefining ``as_real_imag()`` instead.
func
----
Expand other functions.
>>> from sympy import gamma
>>> gamma(x + 1).expand(func=True)
x*gamma(x)
trig
----
Do trigonometric expansions.
>>> cos(x + y).expand(trig=True)
-sin(x)*sin(y) + cos(x)*cos(y)
>>> sin(2*x).expand(trig=True)
2*sin(x)*cos(x)
Note that the forms of ``sin(n*x)`` and ``cos(n*x)`` in terms of ``sin(x)``
and ``cos(x)`` are not unique, due to the identity `\sin^2(x) + \cos^2(x)
= 1`. The current implementation uses the form obtained from Chebyshev
polynomials, but this may change. See `this MathWorld article
<http://mathworld.wolfram.com/Multiple-AngleFormulas.html>`_ for more
information.
Notes
=====
- You can shut off unwanted methods::
>>> (exp(x + y)*(x + y)).expand()
x*exp(x)*exp(y) + y*exp(x)*exp(y)
>>> (exp(x + y)*(x + y)).expand(power_exp=False)
x*exp(x + y) + y*exp(x + y)
>>> (exp(x + y)*(x + y)).expand(mul=False)
(x + y)*exp(x)*exp(y)
- Use deep=False to only expand on the top level::
>>> exp(x + exp(x + y)).expand()
exp(x)*exp(exp(x)*exp(y))
>>> exp(x + exp(x + y)).expand(deep=False)
exp(x)*exp(exp(x + y))
- Hints are applied in an arbitrary, but consistent order (in the current
implementation, they are applied in alphabetical order, except
multinomial comes before mul, but this may change). Because of this,
some hints may prevent expansion by other hints if they are applied
first. For example, ``mul`` may distribute multiplications and prevent
``log`` and ``power_base`` from expanding them. Also, if ``mul`` is
applied before ``multinomial`, the expression might not be fully
distributed. The solution is to use the various ``expand_hint`` helper
functions or to use ``hint=False`` to this function to finely control
which hints are applied. Here are some examples::
>>> from sympy import expand, expand_mul, expand_power_base
>>> x, y, z = symbols('x,y,z', positive=True)
>>> expand(log(x*(y + z)))
log(x) + log(y + z)
Here, we see that ``log`` was applied before ``mul``. To get the mul
expanded form, either of the following will work::
>>> expand_mul(log(x*(y + z)))
log(x*y + x*z)
>>> expand(log(x*(y + z)), log=False)
log(x*y + x*z)
A similar thing can happen with the ``power_base`` hint::
>>> expand((x*(y + z))**x)
(x*y + x*z)**x
To get the ``power_base`` expanded form, either of the following will
work::
>>> expand((x*(y + z))**x, mul=False)
x**x*(y + z)**x
>>> expand_power_base((x*(y + z))**x)
x**x*(y + z)**x
>>> expand((x + y)*y/x)
y + y**2/x
The parts of a rational expression can be targeted::
>>> expand((x + y)*y/x/(x + 1), frac=True)
(x*y + y**2)/(x**2 + x)
>>> expand((x + y)*y/x/(x + 1), numer=True)
(x*y + y**2)/(x*(x + 1))
>>> expand((x + y)*y/x/(x + 1), denom=True)
y*(x + y)/(x**2 + x)
- The ``modulus`` meta-hint can be used to reduce the coefficients of an
expression post-expansion::
>>> expand((3*x + 1)**2)
9*x**2 + 6*x + 1
>>> expand((3*x + 1)**2, modulus=5)
4*x**2 + x + 1
- Either ``expand()`` the function or ``.expand()`` the method can be
used. Both are equivalent::
>>> expand((x + 1)**2)
x**2 + 2*x + 1
>>> ((x + 1)**2).expand()
x**2 + 2*x + 1
API
===
Objects can define their own expand hints by defining
``_eval_expand_hint()``. The function should take the form::
def _eval_expand_hint(self, **hints):
# Only apply the method to the top-level expression
...
See also the example below. Objects should define ``_eval_expand_hint()``
methods only if ``hint`` applies to that specific object. The generic
``_eval_expand_hint()`` method defined in Expr will handle the no-op case.
Each hint should be responsible for expanding that hint only.
Furthermore, the expansion should be applied to the top-level expression
only. ``expand()`` takes care of the recursion that happens when
``deep=True``.
You should only call ``_eval_expand_hint()`` methods directly if you are
100% sure that the object has the method, as otherwise you are liable to
get unexpected ``AttributeError``s. Note, again, that you do not need to
recursively apply the hint to args of your object: this is handled
automatically by ``expand()``. ``_eval_expand_hint()`` should
generally not be used at all outside of an ``_eval_expand_hint()`` method.
If you want to apply a specific expansion from within another method, use
the public ``expand()`` function, method, or ``expand_hint()`` functions.
In order for expand to work, objects must be rebuildable by their args,
i.e., ``obj.func(*obj.args) == obj`` must hold.
Expand methods are passed ``**hints`` so that expand hints may use
'metahints'--hints that control how different expand methods are applied.
For example, the ``force=True`` hint described above that causes
``expand(log=True)`` to ignore assumptions is such a metahint. The
``deep`` meta-hint is handled exclusively by ``expand()`` and is not
passed to ``_eval_expand_hint()`` methods.
Note that expansion hints should generally be methods that perform some
kind of 'expansion'. For hints that simply rewrite an expression, use the
.rewrite() API.
Examples
========
>>> from sympy import Expr, sympify
>>> class MyClass(Expr):
... def __new__(cls, *args):
... args = sympify(args)
... return Expr.__new__(cls, *args)
...
... def _eval_expand_double(self, **hints):
... '''
... Doubles the args of MyClass.
...
... If there more than four args, doubling is not performed,
... unless force=True is also used (False by default).
... '''
... force = hints.pop('force', False)
... if not force and len(self.args) > 4:
... return self
... return self.func(*(self.args + self.args))
...
>>> a = MyClass(1, 2, MyClass(3, 4))
>>> a
MyClass(1, 2, MyClass(3, 4))
>>> a.expand(double=True)
MyClass(1, 2, MyClass(3, 4, 3, 4), 1, 2, MyClass(3, 4, 3, 4))
>>> a.expand(double=True, deep=False)
MyClass(1, 2, MyClass(3, 4), 1, 2, MyClass(3, 4))
>>> b = MyClass(1, 2, 3, 4, 5)
>>> b.expand(double=True)
MyClass(1, 2, 3, 4, 5)
>>> b.expand(double=True, force=True)
MyClass(1, 2, 3, 4, 5, 1, 2, 3, 4, 5)
See Also
========
expand_log, expand_mul, expand_multinomial, expand_complex, expand_trig,
expand_power_base, expand_power_exp, expand_func, hyperexpand
"""
# don't modify this; modify the Expr.expand method
hints['power_base'] = power_base
hints['power_exp'] = power_exp
hints['mul'] = mul
hints['log'] = log
hints['multinomial'] = multinomial
hints['basic'] = basic
return sympify(e).expand(deep=deep, modulus=modulus, **hints)
# This is a special application of two hints
def _mexpand(expr, recursive=False):
# expand multinomials and then expand products; this may not always
# be sufficient to give a fully expanded expression (see
# test_issue_8247_8354 in test_arit)
if expr is None:
return
was = None
while was != expr:
was, expr = expr, expand_mul(expand_multinomial(expr))
if not recursive:
break
return expr
# These are simple wrappers around single hints.
def expand_mul(expr, deep=True):
"""
Wrapper around expand that only uses the mul hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import symbols, expand_mul, exp, log
>>> x, y = symbols('x,y', positive=True)
>>> expand_mul(exp(x+y)*(x+y)*log(x*y**2))
x*exp(x + y)*log(x*y**2) + y*exp(x + y)*log(x*y**2)
"""
return sympify(expr).expand(deep=deep, mul=True, power_exp=False,
power_base=False, basic=False, multinomial=False, log=False)
def expand_multinomial(expr, deep=True):
"""
Wrapper around expand that only uses the multinomial hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import symbols, expand_multinomial, exp
>>> x, y = symbols('x y', positive=True)
>>> expand_multinomial((x + exp(x + 1))**2)
x**2 + 2*x*exp(x + 1) + exp(2*x + 2)
"""
return sympify(expr).expand(deep=deep, mul=False, power_exp=False,
power_base=False, basic=False, multinomial=True, log=False)
def expand_log(expr, deep=True, force=False):
"""
Wrapper around expand that only uses the log hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import symbols, expand_log, exp, log
>>> x, y = symbols('x,y', positive=True)
>>> expand_log(exp(x+y)*(x+y)*log(x*y**2))
(x + y)*(log(x) + 2*log(y))*exp(x + y)
"""
return sympify(expr).expand(deep=deep, log=True, mul=False,
power_exp=False, power_base=False, multinomial=False,
basic=False, force=force)
def expand_func(expr, deep=True):
"""
Wrapper around expand that only uses the func hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import expand_func, gamma
>>> from sympy.abc import x
>>> expand_func(gamma(x + 2))
x*(x + 1)*gamma(x)
"""
return sympify(expr).expand(deep=deep, func=True, basic=False,
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_trig(expr, deep=True):
"""
Wrapper around expand that only uses the trig hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import expand_trig, sin
>>> from sympy.abc import x, y
>>> expand_trig(sin(x+y)*(x+y))
(x + y)*(sin(x)*cos(y) + sin(y)*cos(x))
"""
return sympify(expr).expand(deep=deep, trig=True, basic=False,
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_complex(expr, deep=True):
"""
Wrapper around expand that only uses the complex hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import expand_complex, exp, sqrt, I
>>> from sympy.abc import z
>>> expand_complex(exp(z))
I*exp(re(z))*sin(im(z)) + exp(re(z))*cos(im(z))
>>> expand_complex(sqrt(I))
sqrt(2)/2 + sqrt(2)*I/2
See Also
========
Expr.as_real_imag
"""
return sympify(expr).expand(deep=deep, complex=True, basic=False,
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_power_base(expr, deep=True, force=False):
"""
Wrapper around expand that only uses the power_base hint.
See the expand docstring for more information.
A wrapper to expand(power_base=True) which separates a power with a base
that is a Mul into a product of powers, without performing any other
expansions, provided that assumptions about the power's base and exponent
allow.
deep=False (default is True) will only apply to the top-level expression.
force=True (default is False) will cause the expansion to ignore
assumptions about the base and exponent. When False, the expansion will
only happen if the base is non-negative or the exponent is an integer.
>>> from sympy.abc import x, y, z
>>> from sympy import expand_power_base, sin, cos, exp
>>> (x*y)**2
x**2*y**2
>>> (2*x)**y
(2*x)**y
>>> expand_power_base(_)
2**y*x**y
>>> expand_power_base((x*y)**z)
(x*y)**z
>>> expand_power_base((x*y)**z, force=True)
x**z*y**z
>>> expand_power_base(sin((x*y)**z), deep=False)
sin((x*y)**z)
>>> expand_power_base(sin((x*y)**z), force=True)
sin(x**z*y**z)
>>> expand_power_base((2*sin(x))**y + (2*cos(x))**y)
2**y*sin(x)**y + 2**y*cos(x)**y
>>> expand_power_base((2*exp(y))**x)
2**x*exp(y)**x
>>> expand_power_base((2*cos(x))**y)
2**y*cos(x)**y
Notice that sums are left untouched. If this is not the desired behavior,
apply full ``expand()`` to the expression:
>>> expand_power_base(((x+y)*z)**2)
z**2*(x + y)**2
>>> (((x+y)*z)**2).expand()
x**2*z**2 + 2*x*y*z**2 + y**2*z**2
>>> expand_power_base((2*y)**(1+z))
2**(z + 1)*y**(z + 1)
>>> ((2*y)**(1+z)).expand()
2*2**z*y*y**z
"""
return sympify(expr).expand(deep=deep, log=False, mul=False,
power_exp=False, power_base=True, multinomial=False,
basic=False, force=force)
def expand_power_exp(expr, deep=True):
"""
Wrapper around expand that only uses the power_exp hint.
See the expand docstring for more information.
Examples
========
>>> from sympy import expand_power_exp
>>> from sympy.abc import x, y
>>> expand_power_exp(x**(y + 2))
x**2*x**y
"""
return sympify(expr).expand(deep=deep, complex=False, basic=False,
log=False, mul=False, power_exp=True, power_base=False, multinomial=False)
def count_ops(expr, visual=False):
"""
Return a representation (integer or expression) of the operations in expr.
If ``visual`` is ``False`` (default) then the sum of the coefficients of the
visual expression will be returned.
If ``visual`` is ``True`` then the number of each type of operation is shown
with the core class types (or their virtual equivalent) multiplied by the
number of times they occur.
If expr is an iterable, the sum of the op counts of the
items will be returned.
Examples
========
>>> from sympy.abc import a, b, x, y
>>> from sympy import sin, count_ops
Although there isn't a SUB object, minus signs are interpreted as
either negations or subtractions:
>>> (x - y).count_ops(visual=True)
SUB
>>> (-x).count_ops(visual=True)
NEG
Here, there are two Adds and a Pow:
>>> (1 + a + b**2).count_ops(visual=True)
2*ADD + POW
In the following, an Add, Mul, Pow and two functions:
>>> (sin(x)*x + sin(x)**2).count_ops(visual=True)
ADD + MUL + POW + 2*SIN
for a total of 5:
>>> (sin(x)*x + sin(x)**2).count_ops(visual=False)
5
Note that "what you type" is not always what you get. The expression
1/x/y is translated by sympy into 1/(x*y) so it gives a DIV and MUL rather
than two DIVs:
>>> (1/x/y).count_ops(visual=True)
DIV + MUL
The visual option can be used to demonstrate the difference in
operations for expressions in different forms. Here, the Horner
representation is compared with the expanded form of a polynomial:
>>> eq=x*(1 + x*(2 + x*(3 + x)))
>>> count_ops(eq.expand(), visual=True) - count_ops(eq, visual=True)
-MUL + 3*POW
The count_ops function also handles iterables:
>>> count_ops([x, sin(x), None, True, x + 2], visual=False)
2
>>> count_ops([x, sin(x), None, True, x + 2], visual=True)
ADD + SIN
>>> count_ops({x: sin(x), x + 2: y + 1}, visual=True)
2*ADD + SIN
"""
from sympy import Integral, Symbol
from sympy.simplify.radsimp import fraction
from sympy.logic.boolalg import BooleanFunction
expr = sympify(expr)
if isinstance(expr, Expr):
ops = []
args = [expr]
NEG = Symbol('NEG')
DIV = Symbol('DIV')
SUB = Symbol('SUB')
ADD = Symbol('ADD')
while args:
a = args.pop()
# XXX: This is a hack to support non-Basic args
if isinstance(a, string_types):
continue
if a.is_Rational:
#-1/3 = NEG + DIV
if a is not S.One:
if a.p < 0:
ops.append(NEG)
if a.q != 1:
ops.append(DIV)
continue
elif a.is_Mul:
if _coeff_isneg(a):
ops.append(NEG)
if a.args[0] is S.NegativeOne:
a = a.as_two_terms()[1]
else:
a = -a
n, d = fraction(a)
if n.is_Integer:
ops.append(DIV)
if n < 0:
ops.append(NEG)
args.append(d)
continue # won't be -Mul but could be Add
elif d is not S.One:
if not d.is_Integer:
args.append(d)
ops.append(DIV)
args.append(n)
continue # could be -Mul
elif a.is_Add:
aargs = list(a.args)
negs = 0
for i, ai in enumerate(aargs):
if _coeff_isneg(ai):
negs += 1
args.append(-ai)
if i > 0:
ops.append(SUB)
else:
args.append(ai)
if i > 0:
ops.append(ADD)
if negs == len(aargs): # -x - y = NEG + SUB
ops.append(NEG)
elif _coeff_isneg(aargs[0]): # -x + y = SUB, but already recorded ADD
ops.append(SUB - ADD)
continue
if a.is_Pow and a.exp is S.NegativeOne:
ops.append(DIV)
args.append(a.base) # won't be -Mul but could be Add
continue
if (a.is_Mul or
a.is_Pow or
a.is_Function or
isinstance(a, Derivative) or
isinstance(a, Integral)):
o = Symbol(a.func.__name__.upper())
# count the args
if (a.is_Mul or isinstance(a, LatticeOp)):
ops.append(o*(len(a.args) - 1))
else:
ops.append(o)
if not a.is_Symbol:
args.extend(a.args)
elif type(expr) is dict:
ops = [count_ops(k, visual=visual) +
count_ops(v, visual=visual) for k, v in expr.items()]
elif iterable(expr):
ops = [count_ops(i, visual=visual) for i in expr]
elif isinstance(expr, BooleanFunction):
ops = []
for arg in expr.args:
ops.append(count_ops(arg, visual=True))
o = Symbol(expr.func.__name__.upper())
ops.append(o)
elif not isinstance(expr, Basic):
ops = []
else: # it's Basic not isinstance(expr, Expr):
if not isinstance(expr, Basic):
raise TypeError("Invalid type of expr")
else:
ops = []
args = [expr]
while args:
a = args.pop()
# XXX: This is a hack to support non-Basic args
if isinstance(a, string_types):
continue
if a.args:
o = Symbol(a.func.__name__.upper())
if a.is_Boolean:
ops.append(o*(len(a.args)-1))
else:
ops.append(o)
args.extend(a.args)
if not ops:
if visual:
return S.Zero
return 0
ops = Add(*ops)
if visual:
return ops
if ops.is_Number:
return int(ops)
return sum(int((a.args or [1])[0]) for a in Add.make_args(ops))
def nfloat(expr, n=15, exponent=False):
"""Make all Rationals in expr Floats except those in exponents
(unless the exponents flag is set to True).
Examples
========
>>> from sympy.core.function import nfloat
>>> from sympy.abc import x, y
>>> from sympy import cos, pi, sqrt
>>> nfloat(x**4 + x/2 + cos(pi/3) + 1 + sqrt(y))
x**4 + 0.5*x + sqrt(y) + 1.5
>>> nfloat(x**4 + sqrt(y), exponent=True)
x**4.0 + y**0.5
"""
from sympy.core.power import Pow
from sympy.polys.rootoftools import RootOf
if iterable(expr, exclude=string_types):
if isinstance(expr, (dict, Dict)):
return type(expr)([(k, nfloat(v, n, exponent)) for k, v in
list(expr.items())])
return type(expr)([nfloat(a, n, exponent) for a in expr])
rv = sympify(expr)
if rv.is_Number:
return Float(rv, n)
elif rv.is_number:
# evalf doesn't always set the precision
rv = rv.n(n)
if rv.is_Number:
rv = Float(rv.n(n), n)
else:
pass # pure_complex(rv) is likely True
return rv
# watch out for RootOf instances that don't like to have
# their exponents replaced with Dummies and also sometimes have
# problems with evaluating at low precision (issue 6393)
rv = rv.xreplace({ro: ro.n(n) for ro in rv.atoms(RootOf)})
if not exponent:
reps = [(p, Pow(p.base, Dummy())) for p in rv.atoms(Pow)]
rv = rv.xreplace(dict(reps))
rv = rv.n(n)
if not exponent:
rv = rv.xreplace({d.exp: p.exp for p, d in reps})
else:
# Pow._eval_evalf special cases Integer exponents so if
# exponent is suppose to be handled we have to do so here
rv = rv.xreplace(Transform(
lambda x: Pow(x.base, Float(x.exp, n)),
lambda x: x.is_Pow and x.exp.is_Integer))
return rv.xreplace(Transform(
lambda x: x.func(*nfloat(x.args, n, exponent)),
lambda x: isinstance(x, Function)))
from sympy.core.symbol import Dummy, Symbol
|
yashsharan/sympy
|
sympy/core/function.py
|
Python
|
bsd-3-clause
| 90,464
|
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
import copy
import inspect
from itertools import product
import imp
import logging
import os
from pprint import pformat
import sys
import time
import traceback
import unittest
# project
from checks import AgentCheck
from config import get_checksd_path
from util import get_hostname, get_os
from utils.debug import get_check # noqa - FIXME 5.5.0 AgentCheck tests should not use this
log = logging.getLogger('tests')
def _is_sdk():
return "SDK_TESTING" in os.environ
def get_check_class(name):
checksd_path = get_checksd_path(get_os())
if checksd_path not in sys.path:
sys.path.append(checksd_path)
check_module = __import__(name)
check_class = None
classes = inspect.getmembers(check_module, inspect.isclass)
for _, clsmember in classes:
if clsmember == AgentCheck:
continue
if issubclass(clsmember, AgentCheck):
check_class = clsmember
if AgentCheck in clsmember.__bases__:
continue
else:
break
return check_class
def load_class(check_name, class_name):
"""
Retrieve a class with the given name within the given check module.
"""
checksd_path = get_checksd_path(get_os())
if checksd_path not in sys.path:
sys.path.append(checksd_path)
check_module = __import__(check_name)
classes = inspect.getmembers(check_module, inspect.isclass)
for name, clsmember in classes:
if name == class_name:
return clsmember
raise Exception(u"Unable to import class {0} from the check module.".format(class_name))
def load_check(name, config, agentConfig):
if not _is_sdk():
checksd_path = get_checksd_path(get_os())
# find (in checksd_path) and load the check module
fd, filename, desc = imp.find_module(name, [checksd_path])
check_module = imp.load_module(name, fd, filename, desc)
else:
check_module = __import__("check")
check_class = None
classes = inspect.getmembers(check_module, inspect.isclass)
for _, clsmember in classes:
if clsmember == AgentCheck:
continue
if issubclass(clsmember, AgentCheck):
check_class = clsmember
if AgentCheck in clsmember.__bases__:
continue
else:
break
if check_class is None:
raise Exception("Unable to import check %s. Missing a class that inherits AgentCheck" % name)
init_config = config.get('init_config', {})
instances = config.get('instances')
agentConfig['checksd_hostname'] = get_hostname(agentConfig)
# init the check class
try:
return check_class(name, init_config=init_config, agentConfig=agentConfig, instances=instances)
except TypeError as e:
raise Exception("Check is using old API, {0}".format(e))
except Exception:
raise
class Fixtures(object):
@staticmethod
def integration_name():
for stack in inspect.stack():
# stack[1] is the file path
file_name = os.path.basename(stack[1])
if 'test_' in file_name:
# test_name.py
# 5 -3
return file_name[5:-3]
raise Exception('No integration test file in stack')
@staticmethod
def directory():
return os.path.join(os.path.dirname(__file__), 'fixtures',
Fixtures.integration_name())
@staticmethod
def file(file_name):
return os.path.join(Fixtures.directory(), file_name)
@staticmethod
def read_file(file_name, string_escape=True):
with open(Fixtures.file(file_name)) as f:
contents = f.read()
if string_escape:
contents = contents.decode('string-escape')
return contents.decode("utf-8")
class AgentCheckTest(unittest.TestCase):
DEFAULT_AGENT_CONFIG = {
'version': '0.1',
'api_key': 'toto'
}
def __init__(self, *args, **kwargs):
super(AgentCheckTest, self).__init__(*args, **kwargs)
if not hasattr(self, 'CHECK_NAME'):
raise Exception("You must define CHECK_NAME")
self.check = None
def is_travis(self):
return "TRAVIS" in os.environ
def load_check(self, config, agent_config=None):
agent_config = agent_config or self.DEFAULT_AGENT_CONFIG
self.check = load_check(self.CHECK_NAME, config, agent_config)
def load_class(self, name):
"""
Retrieve a class with the given name among the check module.
"""
return load_class(self.CHECK_NAME, name)
# Helper function when testing rates
def run_check_twice(self, config, agent_config=None, mocks=None,
force_reload=False):
self.run_check(config, agent_config, mocks, force_reload)
time.sleep(1)
self.run_check(config, agent_config, mocks)
def run_check_n(self, config, agent_config=None, mocks=None,
force_reload=False, repeat=1, sleep=1):
for i in xrange(repeat):
if not i:
self.run_check(config, agent_config, mocks, force_reload)
else:
self.run_check(config, agent_config, mocks)
time.sleep(sleep)
def run_check(self, config, agent_config=None, mocks=None, force_reload=False):
# If not loaded already, do it!
if self.check is None or force_reload:
self.load_check(config, agent_config=agent_config)
if mocks is not None:
for func_name, mock in mocks.iteritems():
if not hasattr(self.check, func_name):
continue
else:
setattr(self.check, func_name, mock)
error = None
for instance in self.check.instances:
try:
# Deepcopy needed to avoid weird duplicate tagging situations
# ie the check edits the tags of the instance, problematic if
# run twice
self.check.check(copy.deepcopy(instance))
# FIXME: This should be called within the `run` method only
self.check._roll_up_instance_metadata()
except Exception as e:
# Catch error before re-raising it to be able to get service_checks
print "Exception {0} during check".format(e)
print traceback.format_exc()
error = e
self.metrics = self.check.get_metrics()
self.events = self.check.get_events()
self.service_checks = self.check.get_service_checks()
self.service_metadata = []
self.warnings = self.check.get_warnings()
# clean {} service_metadata (otherwise COVERAGE fails for nothing)
for metadata in self.check.get_service_metadata():
if metadata:
self.service_metadata.append(metadata)
if error is not None:
raise error # pylint: disable=E0702
def print_current_state(self):
log.debug("""++++++++ CURRENT STATE ++++++++
METRICS
{metrics}
EVENTS
{events}
SERVICE CHECKS
{sc}
SERVICE METADATA
{sm}
WARNINGS
{warnings}
++++++++++++++++++++++++++++""".format(
metrics=pformat(self.metrics),
events=pformat(self.events),
sc=pformat(self.service_checks),
sm=pformat(self.service_metadata),
warnings=pformat(self.warnings)
))
def _generate_coverage_metrics(self, data, indice=None):
total = len(data)
tested = 0
untested = []
for d in data:
if (indice and d[indice] or d).get('tested'):
tested += 1
else:
untested.append(d)
if total == 0:
coverage = 100.0
else:
coverage = 100.0 * tested / total
return tested, total, coverage, untested
def coverage_report(self):
tested_metrics, total_metrics, coverage_metrics, untested_metrics = \
self._generate_coverage_metrics(self.metrics, indice=3)
tested_sc, total_sc, coverage_sc, untested_sc = \
self._generate_coverage_metrics(self.service_checks)
tested_sm, total_sm, coverage_sm, untested_sm = \
self._generate_coverage_metrics(self.service_metadata)
tested_events, total_events, coverage_events, untested_events = \
self._generate_coverage_metrics(self.events)
coverage = """Coverage
========================================
METRICS
Tested {tested_metrics}/{total_metrics} ({coverage_metrics}%)
UNTESTED: {untested_metrics}
EVENTS
Tested {tested_events}/{total_events} ({coverage_events}%)
UNTESTED: {untested_events}
SERVICE CHECKS
Tested {tested_sc}/{total_sc} ({coverage_sc}%)
UNTESTED: {untested_sc}
SERVICE METADATA
Tested {tested_sm}/{total_sm} ({coverage_sm}%)
UNTESTED: {untested_sm}
========================================"""
log.info(coverage.format(
tested_metrics=tested_metrics,
total_metrics=total_metrics,
coverage_metrics=coverage_metrics,
untested_metrics=pformat(untested_metrics),
tested_sc=tested_sc,
total_sc=total_sc,
coverage_sc=coverage_sc,
untested_sc=pformat(untested_sc),
tested_sm=tested_sm,
total_sm=total_sm,
coverage_sm=coverage_sm,
untested_sm=pformat(untested_sm),
tested_events=tested_events,
total_events=total_events,
coverage_events=coverage_events,
untested_events=pformat(untested_events),
))
if not os.getenv('NO_COVERAGE'):
self.assertEquals(coverage_metrics, 100.0)
self.assertEquals(coverage_events, 100.0)
self.assertEquals(coverage_sc, 100.0)
self.assertEquals(coverage_sm, 100.0)
def _candidates_size_assert(self, candidates, count=None, at_least=1):
try:
if count is not None:
self.assertEquals(
len(candidates), count,
"Needed exactly %d candidates, got %d" % (count, len(candidates))
)
else:
self.assertTrue(
len(candidates) >= at_least,
"Needed at least %d candidates, got %d" % (at_least, len(candidates))
)
except AssertionError:
self.print_current_state()
raise
def assertMetric(self, metric_name, value=None, tags=None, count=None,
at_least=1, hostname=None, device_name=None, metric_type=None):
candidates = []
for m_name, ts, val, mdata in self.metrics:
if m_name == metric_name:
if value is not None and val != value:
continue
if tags is not None and sorted(tags) != sorted(mdata.get("tags", [])):
continue
if hostname is not None and mdata['hostname'] != hostname:
continue
if device_name is not None and mdata['device_name'] != device_name:
continue
if metric_type is not None and mdata['type'] != metric_type:
continue
candidates.append((m_name, ts, val, mdata))
try:
self._candidates_size_assert(candidates, count=count, at_least=at_least)
except AssertionError:
log.error("Candidates size assertion for {0} (value: {1}, tags: {2}, "
"count: {3}, at_least: {4}, hostname: {5}) failed"
.format(metric_name, value, tags, count, at_least, hostname))
raise
for mtuple in self.metrics:
for cmtuple in candidates:
if mtuple == cmtuple:
mtuple[3]['tested'] = True
log.debug("{0} FOUND !".format(metric_name))
def assertMetricTagPrefix(self, metric_name, tag_prefix, count=None, at_least=1):
log.debug("Looking for a tag starting with `{0}:` on metric {1}"
.format(tag_prefix, metric_name))
if count is not None:
log.debug(" * should have exactly {0} data points".format(count))
elif at_least is not None:
log.debug(" * should have at least {0} data points".format(at_least))
candidates = []
for m_name, ts, val, mdata in self.metrics:
if m_name == metric_name:
gtags = [t for t in mdata['tags'] if t.startswith(tag_prefix)]
if not gtags:
continue
candidates.append((m_name, ts, val, mdata))
try:
self._candidates_size_assert(candidates, count=count)
except AssertionError:
log.error("Candidates size assertion for {0} (tag_prefix: {1}, "
"count: {2}, at_least: {3}) failed".format(metric_name,
tag_prefix,
count,
at_least))
raise
for mtuple in self.metrics:
for cmtuple in candidates:
if mtuple == cmtuple:
mtuple[3]['tested'] = True
log.debug("{0} FOUND !".format(metric_name))
def assertMetricTag(self, metric_name, tag, count=None, at_least=1):
log.debug("Looking for tag {0} on metric {1}".format(tag, metric_name))
if count is not None:
log.debug(" * should have exactly {0} data points".format(count))
elif at_least is not None:
log.debug(" * should have at least {0} data points".format(at_least))
candidates = []
for m_name, ts, val, mdata in self.metrics:
if m_name == metric_name:
gtags = [t for t in mdata['tags'] if t == tag]
if not gtags:
continue
candidates.append((m_name, ts, val, mdata))
try:
self._candidates_size_assert(candidates, count=count)
except AssertionError:
log.error("Candidates size assertion for {0} (tag: {1}, count={2},"
" at_least={3}) failed".format(metric_name, tag, count, at_least))
raise
for mtuple in self.metrics:
for cmtuple in candidates:
if mtuple == cmtuple:
mtuple[3]['tested'] = True
log.debug("{0} FOUND !".format(metric_name))
def assertServiceMetadata(self, meta_keys, count=None, at_least=1):
log.debug("Looking for service metadata with keys {0}".format(meta_keys))
if count is not None:
log.debug(" * should be defined for exactly {0} instances".format(count))
elif at_least is not None:
log.debug(" * should be defined for at least {0} instances".format(at_least))
candidates = []
for sm in self.service_metadata:
if sorted(sm.keys()) != sorted(meta_keys):
continue
candidates.append(sm)
try:
self._candidates_size_assert(candidates, count=count, at_least=at_least)
except AssertionError:
log.error("Candidates size assertion for service metadata with keys {0}"
" (count: {1}, at_least: {2}) failed".format(meta_keys, count, at_least))
raise
for sm in self.service_metadata:
for csm in candidates:
if sm == csm:
sm['tested'] = True
log.debug("Service metadata FOUND !")
def assertServiceCheck(self, service_check_name, status=None, tags=None,
count=None, at_least=1):
log.debug("Looking for service check {0}".format(service_check_name))
if status is not None:
log.debug(" * with status {0}".format(status))
if tags is not None:
log.debug(" * tagged with {0}".format(tags))
if count is not None:
log.debug(" * should have exactly {0} statuses".format(count))
elif at_least is not None:
log.debug(" * should have at least {0} statuses".format(at_least))
candidates = []
for sc in self.service_checks:
if sc['check'] == service_check_name:
if status is not None and sc['status'] != status:
continue
if tags is not None and sorted(tags) != sorted(sc.get("tags")):
continue
candidates.append(sc)
try:
self._candidates_size_assert(candidates, count=count, at_least=at_least)
except AssertionError:
log.error("Candidates size assertion for {0} (status: {1}, "
"tags: {2}, count: {3}, at_least: {4}) failed".format(service_check_name,
status,
tags,
count,
at_least))
raise
for sc in self.service_checks:
for csc in candidates:
if sc == csc:
sc['tested'] = True
log.debug("{0} FOUND !".format(service_check_name))
def assertServiceCheckOK(self, service_check_name, tags=None, count=None, at_least=1):
self.assertServiceCheck(service_check_name,
status=AgentCheck.OK,
tags=tags,
count=count,
at_least=at_least)
def assertServiceCheckWarning(self, service_check_name, tags=None, count=None, at_least=1):
self.assertServiceCheck(service_check_name,
status=AgentCheck.WARNING,
tags=tags,
count=count,
at_least=at_least)
def assertServiceCheckCritical(self, service_check_name, tags=None, count=None, at_least=1):
self.assertServiceCheck(service_check_name,
status=AgentCheck.CRITICAL,
tags=tags,
count=count,
at_least=at_least)
def assertServiceCheckUnknown(self, service_check_name, tags=None, count=None, at_least=1):
self.assertServiceCheck(service_check_name,
status=AgentCheck.UNKNOWN,
tags=tags,
count=count,
at_least=at_least)
def assertIn(self, first, second):
self.assertTrue(first in second, "{0} not in {1}".format(first, second))
def assertNotIn(self, first, second):
self.assertTrue(first not in second, "{0} in {1}".format(first, second))
def assertWarning(self, warning, count=None, at_least=1, exact_match=True):
log.debug("Looking for warning {0}".format(warning))
if count is not None:
log.debug(" * should have exactly {0} statuses".format(count))
elif at_least is not None:
log.debug(" * should have at least {0} statuses".format(count))
if exact_match:
candidates = [w for w in self.warnings if w == warning]
else:
candidates = [w for w in self.warnings if warning in w]
try:
self._candidates_size_assert(candidates, count=count, at_least=at_least)
except AssertionError:
log.error("Candidates size assertion for {0}, count: {1}, "
"at_least: {2}) failed".format(warning, count, at_least))
raise
log.debug("{0} FOUND !".format(warning))
# Potential kwargs: aggregation_key, alert_type, event_type,
# msg_title, source_type_name
def assertEvent(self, msg_text, count=None, at_least=1, exact_match=True,
tags=None, **kwargs):
log.debug("Looking for event {0}".format(msg_text))
if tags is not None:
log.debug(" * tagged with {0}".format(tags))
for name, value in kwargs.iteritems():
if value is not None:
log.debug(" * with {0} {1}".format(name, value))
if count is not None:
log.debug(" * should have exactly {0} events".format(count))
elif at_least is not None:
log.debug(" * should have at least {0} events".format(count))
candidates = []
for e in self.events:
if exact_match and msg_text != e['msg_text'] or \
not exact_match and msg_text not in e['msg_text']:
continue
if tags and set(tags) != set(e['tags']):
continue
for name, value in kwargs.iteritems():
if e[name] != value:
break
else:
candidates.append(e)
try:
self._candidates_size_assert(candidates, count=count, at_least=at_least)
except AssertionError:
log.error("Candidates size assertion for {0}, count: {1}, "
"at_least: {2}) failed".format(msg_text, count, at_least))
raise
for ev, ec in product(self.events, candidates):
if ec == ev:
ev['tested'] = True
log.debug("{0} FOUND !".format(msg_text))
|
indeedops/dd-agent
|
tests/checks/common.py
|
Python
|
bsd-3-clause
| 21,959
|
"""IO with fif files containing events."""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Matti Hämäläinen <msh@nmr.mgh.harvard.edu>
# Teon Brooks <teon.brooks@gmail.com>
# Clement Moutard <clement.moutard@polytechnique.org>
#
# License: BSD-3-Clause
import os.path as op
import numpy as np
from .utils import (check_fname, logger, verbose, _get_stim_channel, warn,
_validate_type, _check_option, fill_doc)
from .io.constants import FIFF
from .io.tree import dir_tree_find
from .io.tag import read_tag
from .io.open import fiff_open
from .io.write import write_int, start_block, start_file, end_block, end_file
from .io.pick import pick_channels
@fill_doc
def pick_events(events, include=None, exclude=None, step=False):
"""Select some :term:`events`.
Parameters
----------
%(events)s
include : int | list | None
A event id to include or a list of them.
If None all events are included.
exclude : int | list | None
A event id to exclude or a list of them.
If None no event is excluded. If include is not None
the exclude parameter is ignored.
step : bool
If True (default is False), events have a step format according
to the argument output='step' in the function find_events().
In this case, the two last columns are considered in inclusion/
exclusion criteria.
Returns
-------
events : array, shape (n_events, 3)
The list of events.
"""
if include is not None:
if not isinstance(include, list):
include = [include]
mask = np.zeros(len(events), dtype=bool)
for e in include:
mask = np.logical_or(mask, events[:, 2] == e)
if step:
mask = np.logical_or(mask, events[:, 1] == e)
events = events[mask]
elif exclude is not None:
if not isinstance(exclude, list):
exclude = [exclude]
mask = np.ones(len(events), dtype=bool)
for e in exclude:
mask = np.logical_and(mask, events[:, 2] != e)
if step:
mask = np.logical_and(mask, events[:, 1] != e)
events = events[mask]
else:
events = np.copy(events)
if len(events) == 0:
raise RuntimeError("No events found")
return events
def define_target_events(events, reference_id, target_id, sfreq, tmin, tmax,
new_id=None, fill_na=None):
"""Define new events by co-occurrence of existing events.
This function can be used to evaluate events depending on the
temporal lag to another event. For example, this can be used to
analyze evoked responses which were followed by a button press within
a defined time window.
Parameters
----------
events : ndarray
Array as returned by mne.find_events.
reference_id : int
The reference event. The event defining the epoch of interest.
target_id : int
The target event. The event co-occurring in within a certain time
window around the reference event.
sfreq : float
The sampling frequency of the data.
tmin : float
The lower limit in seconds from the target event.
tmax : float
The upper limit border in seconds from the target event.
new_id : int
New ID for the new event.
fill_na : int | None
Fill event to be inserted if target is not available within the time
window specified. If None, the 'null' events will be dropped.
Returns
-------
new_events : ndarray
The new defined events.
lag : ndarray
Time lag between reference and target in milliseconds.
"""
if new_id is None:
new_id = reference_id
tsample = 1e3 / sfreq
imin = int(tmin * sfreq)
imax = int(tmax * sfreq)
new_events = []
lag = []
for event in events.copy().astype(int):
if event[2] == reference_id:
lower = event[0] + imin
upper = event[0] + imax
res = events[(events[:, 0] > lower) &
(events[:, 0] < upper) & (events[:, 2] == target_id)]
if res.any():
lag += [event[0] - res[0][0]]
event[2] = new_id
new_events += [event]
elif fill_na is not None:
event[2] = fill_na
new_events += [event]
lag.append(np.nan)
new_events = np.array(new_events)
with np.errstate(invalid='ignore'): # casting nans
lag = np.abs(lag, dtype='f8')
if lag.any():
lag *= tsample
else:
lag = np.array([])
return new_events if new_events.any() else np.array([]), lag
def _read_events_fif(fid, tree):
"""Aux function."""
# Find the desired block
events = dir_tree_find(tree, FIFF.FIFFB_MNE_EVENTS)
if len(events) == 0:
fid.close()
raise ValueError('Could not find event data')
events = events[0]
event_list = None
event_id = None
for d in events['directory']:
kind = d.kind
pos = d.pos
if kind == FIFF.FIFF_MNE_EVENT_LIST:
tag = read_tag(fid, pos)
event_list = tag.data
event_list.shape = (-1, 3)
break
if event_list is None:
raise ValueError('Could not find any events')
for d in events['directory']:
kind = d.kind
pos = d.pos
if kind == FIFF.FIFF_DESCRIPTION:
tag = read_tag(fid, pos)
event_id = tag.data
m_ = [[s[::-1] for s in m[::-1].split(':', 1)]
for m in event_id.split(';')]
event_id = {k: int(v) for v, k in m_}
break
elif kind == FIFF.FIFF_MNE_EVENT_COMMENTS:
tag = read_tag(fid, pos)
event_id = tag.data
event_id = event_id.tobytes().decode('latin-1').split('\x00')[:-1]
assert len(event_id) == len(event_list)
event_id = {k: v[2] for k, v in zip(event_id, event_list)}
break
return event_list, event_id
@verbose
def read_events(filename, include=None, exclude=None, mask=None,
mask_type='and', return_event_id=False, verbose=None):
"""Read :term:`events` from fif or text file.
See :ref:`tut-events-vs-annotations` and :ref:`tut-event-arrays`
for more information about events.
Parameters
----------
filename : str
Name of the input file.
If the extension is .fif, events are read assuming
the file is in FIF format, otherwise (e.g., .eve,
.lst, .txt) events are read as coming from text.
Note that new format event files do not contain
the "time" column (used to be the second column).
include : int | list | None
A event id to include or a list of them.
If None all events are included.
exclude : int | list | None
A event id to exclude or a list of them.
If None no event is excluded. If include is not None
the exclude parameter is ignored.
mask : int | None
The value of the digital mask to apply to the stim channel values.
If None (default), no masking is performed.
mask_type : 'and' | 'not_and'
The type of operation between the mask and the trigger.
Choose 'and' (default) for MNE-C masking behavior.
.. versionadded:: 0.13
return_event_id : bool
If True, ``event_id`` will be returned. This is only possible for
``-annot.fif`` files produced with MNE-C ``mne_browse_raw``.
.. versionadded:: 0.20
%(verbose)s
Returns
-------
%(events)s
event_id : dict
Dictionary of ``{str: int}`` mappings of event IDs.
See Also
--------
find_events, write_events
Notes
-----
This function will discard the offset line (i.e., first line with zero
event number) if it is present in a text file.
For more information on ``mask`` and ``mask_type``, see
:func:`mne.find_events`.
"""
check_fname(filename, 'events', ('.eve', '-eve.fif', '-eve.fif.gz',
'-eve.lst', '-eve.txt', '_eve.fif',
'_eve.fif.gz', '_eve.lst', '_eve.txt',
'-annot.fif', # MNE-C annot
))
ext = op.splitext(filename)[1].lower()
if ext == '.fif' or ext == '.gz':
fid, tree, _ = fiff_open(filename)
with fid as f:
event_list, event_id = _read_events_fif(f, tree)
# hack fix for windows to avoid bincount problems
event_list = event_list.astype(int)
else:
# Have to read this in as float64 then convert because old style
# eve/lst files had a second float column that will raise errors
lines = np.loadtxt(filename, dtype=np.float64).astype(int)
if len(lines) == 0:
raise ValueError('No text lines found')
if lines.ndim == 1: # Special case for only one event
lines = lines[np.newaxis, :]
if len(lines[0]) == 4: # Old format eve/lst
goods = [0, 2, 3] # Omit "time" variable
elif len(lines[0]) == 3:
goods = [0, 1, 2]
else:
raise ValueError('Unknown number of columns in event text file')
event_list = lines[:, goods]
if (mask is not None and event_list.shape[0] > 0 and
event_list[0, 2] == 0):
event_list = event_list[1:]
warn('first row of event file discarded (zero-valued)')
event_id = None
event_list = pick_events(event_list, include, exclude)
unmasked_len = event_list.shape[0]
if mask is not None:
event_list = _mask_trigs(event_list, mask, mask_type)
masked_len = event_list.shape[0]
if masked_len < unmasked_len:
warn('{} of {} events masked'.format(unmasked_len - masked_len,
unmasked_len))
out = event_list
if return_event_id:
if event_id is None:
raise RuntimeError('No event_id found in the file')
out = (out, event_id)
return out
@verbose
def write_events(filename, events, *, event_list=None, verbose=None):
"""Write :term:`events` to file.
Parameters
----------
filename : str
Name of the output file.
If the extension is .fif, events are written in
binary FIF format, otherwise (e.g., .eve, .lst,
.txt) events are written as plain text.
Note that new format event files do not contain
the "time" column (used to be the second column).
%(events)s
event_list : array, shape (n_events, 3)
Deprecated, use argument events instead.
%(verbose)s
See Also
--------
read_events
"""
if event_list is not None:
warn('Argument "event_list" is deprecated, use "events" instead.',
DeprecationWarning)
events = event_list
del event_list
check_fname(filename, 'events', ('.eve', '-eve.fif', '-eve.fif.gz',
'-eve.lst', '-eve.txt', '_eve.fif',
'_eve.fif.gz', '_eve.lst', '_eve.txt'))
ext = op.splitext(filename)[1].lower()
if ext == '.fif' or ext == '.gz':
# Start writing...
fid = start_file(filename)
start_block(fid, FIFF.FIFFB_MNE_EVENTS)
write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, events.T)
end_block(fid, FIFF.FIFFB_MNE_EVENTS)
end_file(fid)
else:
f = open(filename, 'w')
for e in events:
f.write('%6d %6d %3d\n' % tuple(e))
f.close()
def _find_stim_steps(data, first_samp, pad_start=None, pad_stop=None, merge=0):
changed = np.diff(data, axis=1) != 0
idx = np.where(np.all(changed, axis=0))[0]
if len(idx) == 0:
return np.empty((0, 3), dtype='int32')
pre_step = data[0, idx]
idx += 1
post_step = data[0, idx]
idx += first_samp
steps = np.c_[idx, pre_step, post_step]
if pad_start is not None:
v = steps[0, 1]
if v != pad_start:
steps = np.insert(steps, 0, [0, pad_start, v], axis=0)
if pad_stop is not None:
v = steps[-1, 2]
if v != pad_stop:
last_idx = len(data[0]) + first_samp
steps = np.append(steps, [[last_idx, v, pad_stop]], axis=0)
if merge != 0:
diff = np.diff(steps[:, 0])
idx = (diff <= abs(merge))
if np.any(idx):
where = np.where(idx)[0]
keep = np.logical_not(idx)
if merge > 0:
# drop the earlier event
steps[where + 1, 1] = steps[where, 1]
keep = np.append(keep, True)
else:
# drop the later event
steps[where, 2] = steps[where + 1, 2]
keep = np.insert(keep, 0, True)
is_step = (steps[:, 1] != steps[:, 2])
keep = np.logical_and(keep, is_step)
steps = steps[keep]
return steps
def find_stim_steps(raw, pad_start=None, pad_stop=None, merge=0,
stim_channel=None):
"""Find all steps in data from a stim channel.
Parameters
----------
raw : Raw object
The raw data.
pad_start : None | int
Values to assume outside of the stim channel (e.g., if pad_start=0 and
the stim channel starts with value 5, an event of [0, 0, 5] will be
inserted at the beginning). With None, no steps will be inserted.
pad_stop : None | int
Values to assume outside of the stim channel, see ``pad_start``.
merge : int
Merge steps occurring in neighboring samples. The integer value
indicates over how many samples events should be merged, and the sign
indicates in which direction they should be merged (negative means
towards the earlier event, positive towards the later event).
stim_channel : None | str | list of str
Name of the stim channel or all the stim channels
affected by the trigger. If None, the config variables
'MNE_STIM_CHANNEL', 'MNE_STIM_CHANNEL_1', 'MNE_STIM_CHANNEL_2',
etc. are read. If these are not found, it will default to
'STI101' or 'STI 014', whichever is present.
Returns
-------
steps : array, shape = (n_samples, 3)
For each step in the stim channel the values [sample, v_from, v_to].
The first column contains the event time in samples (the first sample
with the new value). The second column contains the stim channel value
before the step, and the third column contains value after the step.
See Also
--------
find_events : More sophisticated options for finding events in a Raw file.
"""
# pull stim channel from config if necessary
stim_channel = _get_stim_channel(stim_channel, raw.info)
picks = pick_channels(raw.info['ch_names'], include=stim_channel)
if len(picks) == 0:
raise ValueError('No stim channel found to extract event triggers.')
data, _ = raw[picks, :]
if np.any(data < 0):
warn('Trigger channel contains negative values, using absolute value.')
data = np.abs(data) # make sure trig channel is positive
data = data.astype(np.int64)
return _find_stim_steps(data, raw.first_samp, pad_start=pad_start,
pad_stop=pad_stop, merge=merge)
@verbose
def _find_events(data, first_samp, verbose=None, output='onset',
consecutive='increasing', min_samples=0, mask=None,
uint_cast=False, mask_type='and', initial_event=False):
"""Help find events."""
assert data.shape[0] == 1 # data should be only a row vector
if min_samples > 0:
merge = int(min_samples // 1)
if merge == min_samples:
merge -= 1
else:
merge = 0
data = data.astype(np.int64)
if uint_cast:
data = data.astype(np.uint16).astype(np.int64)
if data.min() < 0:
warn('Trigger channel contains negative values, using absolute '
'value. If data were acquired on a Neuromag system with '
'STI016 active, consider using uint_cast=True to work around '
'an acquisition bug')
data = np.abs(data) # make sure trig channel is positive
events = _find_stim_steps(data, first_samp, pad_stop=0, merge=merge)
initial_value = data[0, 0]
if initial_value != 0:
if initial_event:
events = np.insert(events, 0, [0, 0, initial_value], axis=0)
else:
logger.info('Trigger channel has a non-zero initial value of {} '
'(consider using initial_event=True to detect this '
'event)'.format(initial_value))
events = _mask_trigs(events, mask, mask_type)
# Determine event onsets and offsets
if consecutive == 'increasing':
onsets = (events[:, 2] > events[:, 1])
offsets = np.logical_and(np.logical_or(onsets, (events[:, 2] == 0)),
(events[:, 1] > 0))
elif consecutive:
onsets = (events[:, 2] > 0)
offsets = (events[:, 1] > 0)
else:
onsets = (events[:, 1] == 0)
offsets = (events[:, 2] == 0)
onset_idx = np.where(onsets)[0]
offset_idx = np.where(offsets)[0]
if len(onset_idx) == 0 or len(offset_idx) == 0:
return np.empty((0, 3), dtype='int32')
# delete orphaned onsets/offsets
if onset_idx[0] > offset_idx[0]:
logger.info("Removing orphaned offset at the beginning of the file.")
offset_idx = np.delete(offset_idx, 0)
if onset_idx[-1] > offset_idx[-1]:
logger.info("Removing orphaned onset at the end of the file.")
onset_idx = np.delete(onset_idx, -1)
if output == 'onset':
events = events[onset_idx]
elif output == 'step':
idx = np.union1d(onset_idx, offset_idx)
events = events[idx]
elif output == 'offset':
event_id = events[onset_idx, 2]
events = events[offset_idx]
events[:, 1] = events[:, 2]
events[:, 2] = event_id
events[:, 0] -= 1
else:
raise ValueError("Invalid output parameter %r" % output)
logger.info("%s events found" % len(events))
logger.info("Event IDs: %s" % np.unique(events[:, 2]))
return events
def _find_unique_events(events):
"""Uniquify events (ie remove duplicated rows."""
e = np.ascontiguousarray(events).view(
np.dtype((np.void, events.dtype.itemsize * events.shape[1])))
_, idx = np.unique(e, return_index=True)
n_dupes = len(events) - len(idx)
if n_dupes > 0:
warn("Some events are duplicated in your different stim channels."
" %d events were ignored during deduplication." % n_dupes)
return events[idx]
@verbose
def find_events(raw, stim_channel=None, output='onset',
consecutive='increasing', min_duration=0,
shortest_event=2, mask=None, uint_cast=False,
mask_type='and', initial_event=False, verbose=None):
"""Find :term:`events` from raw file.
See :ref:`tut-events-vs-annotations` and :ref:`tut-event-arrays`
for more information about events.
Parameters
----------
raw : Raw object
The raw data.
stim_channel : None | str | list of str
Name of the stim channel or all the stim channels
affected by triggers. If None, the config variables
'MNE_STIM_CHANNEL', 'MNE_STIM_CHANNEL_1', 'MNE_STIM_CHANNEL_2',
etc. are read. If these are not found, it will fall back to
'STI 014' if present, then fall back to the first channel of type
'stim', if present. If multiple channels are provided
then the returned events are the union of all the events
extracted from individual stim channels.
output : 'onset' | 'offset' | 'step'
Whether to report when events start, when events end, or both.
consecutive : bool | 'increasing'
If True, consider instances where the value of the events
channel changes without first returning to zero as multiple
events. If False, report only instances where the value of the
events channel changes from/to zero. If 'increasing', report
adjacent events only when the second event code is greater than
the first.
min_duration : float
The minimum duration of a change in the events channel required
to consider it as an event (in seconds).
shortest_event : int
Minimum number of samples an event must last (default is 2). If the
duration is less than this an exception will be raised.
mask : int | None
The value of the digital mask to apply to the stim channel values.
If None (default), no masking is performed.
uint_cast : bool
If True (default False), do a cast to ``uint16`` on the channel
data. This can be used to fix a bug with STI101 and STI014 in
Neuromag acquisition setups that use channel STI016 (channel 16
turns data into e.g. -32768), similar to ``mne_fix_stim14 --32``
in MNE-C.
.. versionadded:: 0.12
mask_type : 'and' | 'not_and'
The type of operation between the mask and the trigger.
Choose 'and' (default) for MNE-C masking behavior.
.. versionadded:: 0.13
initial_event : bool
If True (default False), an event is created if the stim channel has a
value different from 0 as its first sample. This is useful if an event
at t=0s is present.
.. versionadded:: 0.16
%(verbose)s
Returns
-------
%(events)s
See Also
--------
find_stim_steps : Find all the steps in the stim channel.
read_events : Read events from disk.
write_events : Write events to disk.
Notes
-----
.. warning:: If you are working with downsampled data, events computed
before decimation are no longer valid. Please recompute
your events after decimation, but note this reduces the
precision of event timing.
Examples
--------
Consider data with a stim channel that looks like::
[0, 32, 32, 33, 32, 0]
By default, find_events returns all samples at which the value of the
stim channel increases::
>>> print(find_events(raw)) # doctest: +SKIP
[[ 1 0 32]
[ 3 32 33]]
If consecutive is False, find_events only returns the samples at which
the stim channel changes from zero to a non-zero value::
>>> print(find_events(raw, consecutive=False)) # doctest: +SKIP
[[ 1 0 32]]
If consecutive is True, find_events returns samples at which the
event changes, regardless of whether it first returns to zero::
>>> print(find_events(raw, consecutive=True)) # doctest: +SKIP
[[ 1 0 32]
[ 3 32 33]
[ 4 33 32]]
If output is 'offset', find_events returns the last sample of each event
instead of the first one::
>>> print(find_events(raw, consecutive=True, # doctest: +SKIP
... output='offset'))
[[ 2 33 32]
[ 3 32 33]
[ 4 0 32]]
If output is 'step', find_events returns the samples at which an event
starts or ends::
>>> print(find_events(raw, consecutive=True, # doctest: +SKIP
... output='step'))
[[ 1 0 32]
[ 3 32 33]
[ 4 33 32]
[ 5 32 0]]
To ignore spurious events, it is also possible to specify a minimum
event duration. Assuming our events channel has a sample rate of
1000 Hz::
>>> print(find_events(raw, consecutive=True, # doctest: +SKIP
... min_duration=0.002))
[[ 1 0 32]]
For the digital mask, if mask_type is set to 'and' it will take the
binary representation of the digital mask, e.g. 5 -> '00000101', and will
allow the values to pass where mask is one, e.g.::
7 '0000111' <- trigger value
37 '0100101' <- mask
----------------
5 '0000101'
For the digital mask, if mask_type is set to 'not_and' it will take the
binary representation of the digital mask, e.g. 5 -> '00000101', and will
block the values where mask is one, e.g.::
7 '0000111' <- trigger value
37 '0100101' <- mask
----------------
2 '0000010'
"""
min_samples = min_duration * raw.info['sfreq']
# pull stim channel from config if necessary
try:
stim_channel = _get_stim_channel(stim_channel, raw.info)
except ValueError:
if len(raw.annotations) > 0:
raise ValueError("No stim channels found, but the raw object has "
"annotations. Consider using "
"mne.events_from_annotations to convert these to "
"events.")
else:
raise
picks = pick_channels(raw.info['ch_names'], include=stim_channel)
if len(picks) == 0:
raise ValueError('No stim channel found to extract event triggers.')
data, _ = raw[picks, :]
events_list = []
for d in data:
events = _find_events(d[np.newaxis, :], raw.first_samp,
verbose=verbose, output=output,
consecutive=consecutive, min_samples=min_samples,
mask=mask, uint_cast=uint_cast,
mask_type=mask_type, initial_event=initial_event)
# add safety check for spurious events (for ex. from neuromag syst.) by
# checking the number of low sample events
n_short_events = np.sum(np.diff(events[:, 0]) < shortest_event)
if n_short_events > 0:
raise ValueError("You have %i events shorter than the "
"shortest_event. These are very unusual and you "
"may want to set min_duration to a larger value "
"e.g. x / raw.info['sfreq']. Where x = 1 sample "
"shorter than the shortest event "
"length." % (n_short_events))
events_list.append(events)
events = np.concatenate(events_list, axis=0)
events = _find_unique_events(events)
events = events[np.argsort(events[:, 0])]
return events
def _mask_trigs(events, mask, mask_type):
"""Mask digital trigger values."""
_check_option('mask_type', mask_type, ['not_and', 'and'])
if mask is not None:
_validate_type(mask, "int", "mask", "int or None")
n_events = len(events)
if n_events == 0:
return events.copy()
if mask is not None:
if mask_type == 'not_and':
mask = np.bitwise_not(mask)
elif mask_type != 'and':
raise ValueError("'mask_type' should be either 'and'"
" or 'not_and', instead of '%s'" % mask_type)
events[:, 1:] = np.bitwise_and(events[:, 1:], mask)
events = events[events[:, 1] != events[:, 2]]
return events
def merge_events(events, ids, new_id, replace_events=True):
"""Merge a set of :term:`events`.
Parameters
----------
events : array, shape (n_events_in, 3)
Events.
ids : array of int
The ids of events to merge.
new_id : int
The new id.
replace_events : bool
If True (default), old event ids are replaced. Otherwise,
new events will be added to the old event list.
Returns
-------
new_events : array, shape (n_events_out, 3)
The new events.
Notes
-----
Rather than merging events you can use hierarchical event_id
in Epochs. For example, here::
>>> event_id = {'auditory/left': 1, 'auditory/right': 2}
And the condition 'auditory' would correspond to either 1 or 2.
Examples
--------
Here is quick example of the behavior::
>>> events = [[134, 0, 1], [341, 0, 2], [502, 0, 3]]
>>> merge_events(events, [1, 2], 12, replace_events=True)
array([[134, 0, 12],
[341, 0, 12],
[502, 0, 3]])
>>> merge_events(events, [1, 2], 12, replace_events=False)
array([[134, 0, 1],
[134, 0, 12],
[341, 0, 2],
[341, 0, 12],
[502, 0, 3]])
"""
events = np.asarray(events)
events_out = events.copy()
idx_touched = [] # to keep track of the original events we can keep
for col in [1, 2]:
for i in ids:
mask = events[:, col] == i
events_out[mask, col] = new_id
idx_touched.append(np.where(mask)[0])
if not replace_events:
idx_touched = np.unique(np.concatenate(idx_touched))
events_out = np.concatenate((events_out, events[idx_touched]), axis=0)
# Now sort in lexical order
events_out = events_out[np.lexsort(events_out.T[::-1])]
return events_out
@fill_doc
def shift_time_events(events, ids, tshift, sfreq):
"""Shift a set of :term:`events`.
Parameters
----------
%(events)s
ids : ndarray of int | None
The ids of events to shift.
tshift : float
Time-shift event. Use positive value tshift for forward shifting
the event and negative value for backward shift.
sfreq : float
The sampling frequency of the data.
Returns
-------
new_events : array of int, shape (n_new_events, 3)
The new events.
"""
events = events.copy()
if ids is None:
mask = slice(None)
else:
mask = np.in1d(events[:, 2], ids)
events[mask, 0] += int(tshift * sfreq)
return events
@fill_doc
def make_fixed_length_events(raw, id=1, start=0, stop=None, duration=1.,
first_samp=True, overlap=0.):
"""Make a set of :term:`events` separated by a fixed duration.
Parameters
----------
raw : instance of Raw
A raw object to use the data from.
id : int
The id to use (default 1).
start : float
Time of first event (in seconds).
stop : float | None
Maximum time of last event (in seconds). If None, events extend to the
end of the recording.
duration : float
The duration to separate events by (in seconds).
first_samp : bool
If True (default), times will have :term:`first_samp` added to them, as
in :func:`mne.find_events`. This behavior is not desirable if the
returned events will be combined with event times that already
have :term:`first_samp` added to them, e.g. event times that come
from :func:`mne.find_events`.
overlap : float
The overlap between events (in seconds).
Must be ``0 <= overlap < duration``.
.. versionadded:: 0.18
Returns
-------
%(events)s
"""
from .io.base import BaseRaw
_validate_type(raw, BaseRaw, "raw")
_validate_type(id, int, "id")
_validate_type(duration, "numeric", "duration")
_validate_type(overlap, "numeric", "overlap")
duration, overlap = float(duration), float(overlap)
if not 0 <= overlap < duration:
raise ValueError('overlap must be >=0 but < duration (%s), got %s'
% (duration, overlap))
start = raw.time_as_index(start, use_rounding=True)[0]
if stop is not None:
stop = raw.time_as_index(stop, use_rounding=True)[0]
else:
stop = raw.last_samp + 1
if first_samp:
start = start + raw.first_samp
stop = min([stop + raw.first_samp, raw.last_samp + 1])
else:
stop = min([stop, len(raw.times)])
# Make sure we don't go out the end of the file:
stop -= int(np.round(raw.info['sfreq'] * duration))
# This should be inclusive due to how we generally use start and stop...
ts = np.arange(start, stop + 1,
raw.info['sfreq'] * (duration - overlap)).astype(int)
n_events = len(ts)
if n_events == 0:
raise ValueError('No events produced, check the values of start, '
'stop, and duration')
events = np.c_[ts, np.zeros(n_events, dtype=int),
id * np.ones(n_events, dtype=int)]
return events
def concatenate_events(events, first_samps, last_samps):
"""Concatenate event lists to be compatible with concatenate_raws.
This is useful, for example, if you processed and/or changed
events in raw files separately before combining them using
:func:`mne.concatenate_raws`.
Parameters
----------
events : list of array
List of :term:`events` arrays, typically each extracted from a
corresponding raw file that is being concatenated.
first_samps : list or array of int
First sample numbers of the raw files concatenated.
last_samps : list or array of int
Last sample numbers of the raw files concatenated.
Returns
-------
events : array
The concatenated events.
See Also
--------
mne.concatenate_raws
"""
_validate_type(events, list, "events")
if not (len(events) == len(last_samps) and
len(events) == len(first_samps)):
raise ValueError('events, first_samps, and last_samps must all have '
'the same lengths')
first_samps = np.array(first_samps)
last_samps = np.array(last_samps)
n_samps = np.cumsum(last_samps - first_samps + 1)
events_out = events[0]
for e, f, n in zip(events[1:], first_samps[1:], n_samps[:-1]):
# remove any skip since it doesn't exist in concatenated files
e2 = e.copy()
e2[:, 0] -= f
# add offset due to previous files, plus original file offset
e2[:, 0] += n + first_samps[0]
events_out = np.concatenate((events_out, e2), axis=0)
return events_out
@fill_doc
class AcqParserFIF(object):
"""Parser for Elekta data acquisition settings.
This class parses parameters (e.g. events and averaging categories) that
are defined in the Elekta TRIUX/VectorView data acquisition software (DACQ)
and stored in ``info['acq_pars']``. It can be used to reaverage raw data
according to DACQ settings and modify original averaging settings if
necessary.
Parameters
----------
%(info_not_none)s This is where the DACQ parameters will be taken from.
Attributes
----------
categories : list
List of averaging categories marked active in DACQ.
events : list
List of events that are in use (referenced by some averaging category).
reject : dict
Rejection criteria from DACQ that can be used with mne.Epochs.
Note that mne does not support all DACQ rejection criteria
(e.g. spike, slope).
flat : dict
Flatness rejection criteria from DACQ that can be used with mne.Epochs.
acq_dict : dict
All DACQ parameters.
See Also
--------
mne.io.Raw.acqparser : Access the parser through a Raw attribute.
Notes
-----
Any averaging category (also non-active ones) can be accessed by indexing
as ``acqparserfif['category_name']``.
"""
# DACQ variables always start with one of these
_acq_var_magic = ['ERF', 'DEF', 'ACQ', 'TCP']
# averager related DACQ variable names (without preceding 'ERF')
# old versions (DACQ < 3.4)
_dacq_vars_compat = ('megMax', 'megMin', 'megNoise', 'megSlope',
'megSpike', 'eegMax', 'eegMin', 'eegNoise',
'eegSlope', 'eegSpike', 'eogMax', 'ecgMax', 'ncateg',
'nevent', 'stimSource', 'triggerMap', 'update',
'artefIgnore', 'averUpdate')
_event_vars_compat = ('Comment', 'Delay')
_cat_vars = ('Comment', 'Display', 'Start', 'State', 'End', 'Event',
'Nave', 'ReqEvent', 'ReqWhen', 'ReqWithin', 'SubAve')
# new versions only (DACQ >= 3.4)
_dacq_vars = _dacq_vars_compat + ('magMax', 'magMin', 'magNoise',
'magSlope', 'magSpike', 'version')
_event_vars = _event_vars_compat + ('Name', 'Channel', 'NewBits',
'OldBits', 'NewMask', 'OldMask')
def __init__(self, info): # noqa: D102
acq_pars = info['acq_pars']
if not acq_pars:
raise ValueError('No acquisition parameters')
self.acq_dict = dict(self._acqpars_gen(acq_pars))
if 'ERFversion' in self.acq_dict:
self.compat = False # DACQ ver >= 3.4
elif 'ERFncateg' in self.acq_dict: # probably DACQ < 3.4
self.compat = True
else:
raise ValueError('Cannot parse acquisition parameters')
dacq_vars = self._dacq_vars_compat if self.compat else self._dacq_vars
# set instance variables
for var in dacq_vars:
val = self.acq_dict['ERF' + var]
if var[:3] in ['mag', 'meg', 'eeg', 'eog', 'ecg']:
val = float(val)
elif var in ['ncateg', 'nevent']:
val = int(val)
setattr(self, var.lower(), val)
self.stimsource = (
'Internal' if self.stimsource == '1' else 'External')
# collect all events and categories
self._events = self._events_from_acq_pars()
self._categories = self._categories_from_acq_pars()
# mark events that are used by a category
for cat in self._categories.values():
if cat['event']:
self._events[cat['event']]['in_use'] = True
if cat['reqevent']:
self._events[cat['reqevent']]['in_use'] = True
# make mne rejection dicts based on the averager parameters
self.reject = {'grad': self.megmax, 'eeg': self.eegmax,
'eog': self.eogmax, 'ecg': self.ecgmax}
if not self.compat:
self.reject['mag'] = self.magmax
self.reject = {k: float(v) for k, v in self.reject.items()
if float(v) > 0}
self.flat = {'grad': self.megmin, 'eeg': self.eegmin}
if not self.compat:
self.flat['mag'] = self.magmin
self.flat = {k: float(v) for k, v in self.flat.items()
if float(v) > 0}
def __repr__(self): # noqa: D105
s = '<AcqParserFIF | '
s += 'categories: %d ' % self.ncateg
cats_in_use = len(self._categories_in_use)
s += '(%d in use), ' % cats_in_use
s += 'events: %d ' % self.nevent
evs_in_use = len(self._events_in_use)
s += '(%d in use)' % evs_in_use
if self.categories:
s += '\nAveraging categories:'
for cat in self.categories:
s += '\n%d: "%s"' % (cat['index'], cat['comment'])
s += '>'
return s
def __getitem__(self, item):
"""Return an averaging category, or list of categories.
Parameters
----------
item : str | list of str
Name of the category (comment field in DACQ).
Returns
-------
conds : dict | list of dict
Each dict should have the following keys:
comment: str
The comment field in DACQ.
state : bool
Whether the category was marked enabled in DACQ.
index : int
The index of the category in DACQ. Indices start from 1.
event : int
DACQ index of the reference event (trigger event, zero time for
the corresponding epochs). Note that the event indices start
from 1.
start : float
Start time of epoch relative to the reference event.
end : float
End time of epoch relative to the reference event.
reqevent : int
Index of the required (conditional) event.
reqwhen : int
Whether the required event is required before (1) or after (2)
the reference event.
reqwithin : float
The time range within which the required event must occur,
before or after the reference event.
display : bool
Whether the category was displayed online in DACQ.
nave : int
Desired number of averages. DACQ stops collecting averages once
this number is reached.
subave : int
Whether to compute normal and alternating subaverages, and
how many epochs to include. See the Elekta data acquisition
manual for details. Currently the class does not offer any
facility for computing subaverages, but it can be done manually
by the user after collecting the epochs.
"""
if isinstance(item, str):
item = [item]
else:
_validate_type(item, list, "Keys", "category names")
cats = list()
for it in item:
if it in self._categories:
cats.append(self._categories[it])
else:
raise KeyError('No such category')
return cats[0] if len(cats) == 1 else cats
def __len__(self):
"""Return number of averaging categories marked active in DACQ.
Returns
-------
n_cat : int
The number of categories.
"""
return len(self.categories)
def _events_from_acq_pars(self):
"""Collect DACQ events into a dict.
Events are keyed by number starting from 1 (DACQ index of event).
Each event is itself represented by a dict containing the event
parameters.
"""
# lookup table for event number -> bits for old DACQ versions
_compat_event_lookup = {1: 1, 2: 2, 3: 4, 4: 8, 5: 16, 6: 32, 7: 3,
8: 5, 9: 6, 10: 7, 11: 9, 12: 10, 13: 11,
14: 12, 15: 13, 16: 14, 17: 15}
events = dict()
for evnum in range(1, self.nevent + 1):
evnum_s = str(evnum).zfill(2) # '01', '02' etc.
evdi = dict()
event_vars = (self._event_vars_compat if self.compat
else self._event_vars)
for var in event_vars:
# name of DACQ variable, e.g. 'ERFeventNewBits01'
acq_key = 'ERFevent' + var + evnum_s
# corresponding dict key, e.g. 'newbits'
dict_key = var.lower()
val = self.acq_dict[acq_key]
# type convert numeric values
if dict_key in ['newbits', 'oldbits', 'newmask', 'oldmask']:
val = int(val)
elif dict_key in ['delay']:
val = float(val)
evdi[dict_key] = val
evdi['in_use'] = False # __init__() will set this
evdi['index'] = evnum
if self.compat:
evdi['name'] = str(evnum)
evdi['oldmask'] = 63
evdi['newmask'] = 63
evdi['oldbits'] = 0
evdi['newbits'] = _compat_event_lookup[evnum]
events[evnum] = evdi
return events
def _acqpars_gen(self, acq_pars):
"""Yield key/value pairs from ``info['acq_pars'])``."""
key, val = '', ''
for line in acq_pars.split():
if any([line.startswith(x) for x in self._acq_var_magic]):
key = line
val = ''
else:
if not key:
raise ValueError('Cannot parse acquisition parameters')
# DACQ splits items with spaces into multiple lines
val += ' ' + line if val else line
yield key, val
def _categories_from_acq_pars(self):
"""Collect DACQ averaging categories into a dict.
Categories are keyed by the comment field in DACQ. Each category is
itself represented a dict containing the category parameters.
"""
cats = dict()
for catnum in [str(x).zfill(2) for x in range(1, self.nevent + 1)]:
catdi = dict()
# read all category variables
for var in self._cat_vars:
acq_key = 'ERFcat' + var + catnum
class_key = var.lower()
val = self.acq_dict[acq_key]
catdi[class_key] = val
# some type conversions
catdi['display'] = (catdi['display'] == '1')
catdi['state'] = (catdi['state'] == '1')
for key in ['start', 'end', 'reqwithin']:
catdi[key] = float(catdi[key])
for key in ['nave', 'event', 'reqevent', 'reqwhen', 'subave']:
catdi[key] = int(catdi[key])
# some convenient extra (non-DACQ) vars
catdi['index'] = int(catnum) # index of category in DACQ list
cats[catdi['comment']] = catdi
return cats
def _events_mne_to_dacq(self, mne_events):
"""Create list of DACQ events based on mne trigger transitions list.
mne_events is typically given by mne.find_events (use consecutive=True
to get all transitions). Output consists of rows in the form
[t, 0, event_codes] where t is time in samples and event_codes is all
DACQ events compatible with the transition, bitwise ORed together:
e.g. [t1, 0, 5] means that events 1 and 3 occurred at time t1,
as 2**(1 - 1) + 2**(3 - 1) = 5.
"""
events_ = mne_events.copy()
events_[:, 1:3] = 0
for n, ev in self._events.items():
if ev['in_use']:
pre_ok = (
np.bitwise_and(ev['oldmask'],
mne_events[:, 1]) == ev['oldbits'])
post_ok = (
np.bitwise_and(ev['newmask'],
mne_events[:, 2]) == ev['newbits'])
ok_ind = np.where(pre_ok & post_ok)
events_[ok_ind, 2] |= 1 << (n - 1)
return events_
def _mne_events_to_category_t0(self, cat, mne_events, sfreq):
"""Translate mne_events to epoch zero times (t0).
First mne events (trigger transitions) are converted into DACQ events.
Then the zero times for the epochs are obtained by considering the
reference and conditional (required) events and the delay to stimulus.
"""
cat_ev = cat['event']
cat_reqev = cat['reqevent']
# first convert mne events to dacq event list
events = self._events_mne_to_dacq(mne_events)
# next, take req. events and delays into account
times = events[:, 0]
# indices of times where ref. event occurs
refEvents_inds = np.where(events[:, 2] & (1 << cat_ev - 1))[0]
refEvents_t = times[refEvents_inds]
if cat_reqev:
# indices of times where req. event occurs
reqEvents_inds = np.where(events[:, 2] & (
1 << cat_reqev - 1))[0]
reqEvents_t = times[reqEvents_inds]
# relative (to refevent) time window where req. event
# must occur (e.g. [0 .2])
twin = [0, (-1)**(cat['reqwhen']) * cat['reqwithin']]
win = np.round(np.array(sorted(twin)) * sfreq) # to samples
refEvents_wins = refEvents_t[:, None] + win
req_acc = np.zeros(refEvents_inds.shape, dtype=bool)
for t in reqEvents_t:
# mark time windows where req. condition is satisfied
reqEvent_in_win = np.logical_and(
t >= refEvents_wins[:, 0], t <= refEvents_wins[:, 1])
req_acc |= reqEvent_in_win
# drop ref. events where req. event condition is not satisfied
refEvents_inds = refEvents_inds[np.where(req_acc)]
refEvents_t = times[refEvents_inds]
# adjust for trigger-stimulus delay by delaying the ref. event
refEvents_t += int(np.round(self._events[cat_ev]['delay'] * sfreq))
return refEvents_t
@property
def categories(self):
"""Return list of averaging categories ordered by DACQ index.
Only returns categories marked active in DACQ.
"""
cats = sorted(self._categories_in_use.values(),
key=lambda cat: cat['index'])
return cats
@property
def events(self):
"""Return events ordered by DACQ index.
Only returns events that are in use (referred to by a category).
"""
evs = sorted(self._events_in_use.values(), key=lambda ev: ev['index'])
return evs
@property
def _categories_in_use(self):
return {k: v for k, v in self._categories.items() if v['state']}
@property
def _events_in_use(self):
return {k: v for k, v in self._events.items() if v['in_use']}
def get_condition(self, raw, condition=None, stim_channel=None, mask=None,
uint_cast=None, mask_type='and', delayed_lookup=True):
"""Get averaging parameters for a condition (averaging category).
Output is designed to be used with the Epochs class to extract the
corresponding epochs.
Parameters
----------
raw : Raw object
An instance of Raw.
condition : None | str | dict | list of dict
Condition or a list of conditions. Conditions can be strings
(DACQ comment field, e.g. 'Auditory left') or category dicts
(e.g. acqp['Auditory left'], where acqp is an instance of
AcqParserFIF). If None, get all conditions marked active in
DACQ.
stim_channel : None | str | list of str
Name of the stim channel or all the stim channels
affected by the trigger. If None, the config variables
'MNE_STIM_CHANNEL', 'MNE_STIM_CHANNEL_1', 'MNE_STIM_CHANNEL_2',
etc. are read. If these are not found, it will fall back to
'STI101' or 'STI 014' if present, then fall back to the first
channel of type 'stim', if present.
mask : int | None
The value of the digital mask to apply to the stim channel values.
If None (default), no masking is performed.
uint_cast : bool
If True (default False), do a cast to ``uint16`` on the channel
data. This can be used to fix a bug with STI101 and STI014 in
Neuromag acquisition setups that use channel STI016 (channel 16
turns data into e.g. -32768), similar to ``mne_fix_stim14 --32``
in MNE-C.
mask_type : 'and' | 'not_and'
The type of operation between the mask and the trigger.
Choose 'and' for MNE-C masking behavior.
delayed_lookup : bool
If True, use the 'delayed lookup' procedure implemented in Elekta
software. When a trigger transition occurs, the lookup of
the new trigger value will not happen immediately at the following
sample, but with a 1-sample delay. This allows a slight
asynchrony between trigger onsets, when they are intended to be
synchronous. If you have accurate hardware and want to detect
transitions with a resolution of one sample, use
delayed_lookup=False.
Returns
-------
conds_data : dict or list of dict
Each dict has the following keys:
events : array, shape (n_epochs_out, 3)
List of zero time points (t0) for the epochs matching the
condition. Use as the ``events`` parameter to Epochs. Note
that these are not (necessarily) actual events.
event_id : dict
Name of condition and index compatible with ``events``.
Should be passed as the ``event_id`` parameter to Epochs.
tmin : float
Epoch starting time relative to t0. Use as the ``tmin``
parameter to Epochs.
tmax : float
Epoch ending time relative to t0. Use as the ``tmax``
parameter to Epochs.
"""
if condition is None:
condition = self.categories # get all
if not isinstance(condition, list):
condition = [condition] # single cond -> listify
conds_data = list()
for cat in condition:
if isinstance(cat, str):
cat = self[cat]
mne_events = find_events(raw, stim_channel=stim_channel, mask=mask,
mask_type=mask_type, output='step',
uint_cast=uint_cast, consecutive=True,
verbose=False, shortest_event=1)
if delayed_lookup:
ind = np.where(np.diff(mne_events[:, 0]) == 1)[0]
if 1 in np.diff(ind):
raise ValueError('There are several subsequent '
'transitions on the trigger channel. '
'This will not work well with '
'delayed_lookup=True. You may want to '
'check your trigger data and '
'set delayed_lookup=False.')
mne_events[ind, 2] = mne_events[ind + 1, 2]
mne_events = np.delete(mne_events, ind + 1, axis=0)
sfreq = raw.info['sfreq']
cat_t0_ = self._mne_events_to_category_t0(cat, mne_events, sfreq)
# make it compatible with the usual events array
cat_t0 = np.c_[cat_t0_, np.zeros(cat_t0_.shape),
cat['index'] * np.ones(cat_t0_.shape)
].astype(np.uint32)
cat_id = {cat['comment']: cat['index']}
tmin, tmax = cat['start'], cat['end']
conds_data.append(dict(events=cat_t0, event_id=cat_id,
tmin=tmin, tmax=tmax))
return conds_data[0] if len(conds_data) == 1 else conds_data
|
drammock/mne-python
|
mne/event.py
|
Python
|
bsd-3-clause
| 54,525
|
from django.conf import settings
from django.contrib import admin
from django.contrib.admin.sites import AdminSite
class LeonardoAdminSite(AdminSite):
site_header = getattr(settings, "SITE_HEADER", "Leonardo administration")
site_title = getattr(settings, "SITE_TITLE", "Leonardo site admin")
def get_urls(self):
# connect all admin members
self._registry.update(admin.site._registry)
return super(LeonardoAdminSite, self).get_urls()
leonardo_admin = LeonardoAdminSite(name="admin")
|
django-leonardo/django-leonardo
|
leonardo/site.py
|
Python
|
bsd-3-clause
| 529
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.contrib.gis.db.models.fields
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('councils', '0002_auto_20160121_1522'),
]
operations = [
migrations.CreateModel(
name='LoggedPostcode',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('created', django_extensions.db.fields.CreationDateTimeField(verbose_name='created', auto_now_add=True)),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('postcode', models.CharField(max_length=100)),
('had_data', models.BooleanField(db_index=True, default=False)),
('council', models.ForeignKey(null=True, to='councils.Council')),
('location', django.contrib.gis.db.models.fields.PointField(blank=True, null=True, srid=4326)),
('brand', models.CharField(db_index=True, blank=True, max_length=100)),
('utm_campaign', models.CharField(db_index=True, blank=True, max_length=100)),
('utm_medium', models.CharField(db_index=True, blank=True, max_length=100)),
('utm_source', models.CharField(db_index=True, blank=True, max_length=100)),
],
options={
'abstract': False,
'ordering': ('-modified', '-created'),
'get_latest_by': 'modified',
},
),
]
|
chris48s/UK-Polling-Stations
|
polling_stations/apps/data_finder/migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 1,685
|
import logging
import emission.net.usercache.formatters.ios.location as fil
def format(entry):
return fil.format(entry)
|
joshzarrabi/e-mission-server
|
emission/net/usercache/formatters/ios/filtered_location.py
|
Python
|
bsd-3-clause
| 125
|
import collections
import functools
import contextlib
import astropy.units as u
_WITH_MEMOIZATION = False
_CACHE_SIZE = 20
@contextlib.contextmanager
def use_astromodels_memoization(switch, cache_size=_CACHE_SIZE):
"""
Activate/deactivate memoization temporarily
:param switch: True (memoization on) or False (memoization off)
:param cache_size: number of previous evaluation of functions to keep in memory. Default: 100
:return:
"""
global _WITH_MEMOIZATION
global _CACHE_SIZE
old_status = bool(_WITH_MEMOIZATION)
old_cache_size = int(_CACHE_SIZE)
_WITH_MEMOIZATION = bool(switch)
_CACHE_SIZE = int(cache_size)
yield
_WITH_MEMOIZATION = old_status
_CACHE_SIZE = old_cache_size
def memoize(method):
"""
A decorator for functions of sources which memoize the results of the last _CACHE_SIZE calls,
:param method: method to be memoized
:return: the decorated method
"""
cache = method.cache = collections.OrderedDict()
# Put these two methods in the local space (faster)
_get = cache.get
_popitem = cache.popitem
@functools.wraps(method)
def memoizer(instance, x, *args, **kwargs):
if not _WITH_MEMOIZATION or isinstance(x, u.Quantity):
# Memoization is not active or using units, do not use memoization
return method(instance, x, *args, **kwargs)
# Create a tuple because a tuple is hashable
unique_id = tuple(float(yy.value) for yy in instance.parameters.values()) + (x.size, x.min(), x.max())
# Create a unique identifier for this combination of inputs
key = hash(unique_id)
# Let's do it this way so we only look into the dictionary once
result = _get(key)
if result is not None:
return result
else:
result = method(instance, x, *args, **kwargs)
cache[key] = result
if len(cache) > _CACHE_SIZE:
# Remove half of the element (but at least 1, even if _CACHE_SIZE=1, which would be pretty idiotic ;-) )
[_popitem(False) for i in range(max(_CACHE_SIZE // 2, 1))]
return result
# Add the function as a "attribute" so we can access it
memoizer.input_object = method
return memoizer
|
giacomov/astromodels
|
astromodels/core/memoization.py
|
Python
|
bsd-3-clause
| 2,316
|
"""Base classes for all estimators."""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import copy
import warnings
from collections import defaultdict
import platform
import inspect
import re
import numpy as np
from . import __version__
from ._config import get_config
from .utils import _IS_32BIT
from .utils._tags import (
_DEFAULT_TAGS,
_safe_tags,
)
from .utils.validation import check_X_y
from .utils.validation import check_array
from .utils.validation import _num_features
from .utils._estimator_html_repr import estimator_html_repr
from .utils.validation import _deprecate_positional_args
@_deprecate_positional_args
def clone(estimator, *, safe=True):
"""Constructs a new unfitted estimator with the same parameters.
Clone does a deep copy of the model in an estimator
without actually copying attached data. It yields a new estimator
with the same parameters that has not been fitted on any data.
If the estimator's `random_state` parameter is an integer (or if the
estimator doesn't have a `random_state` parameter), an *exact clone* is
returned: the clone and the original estimator will give the exact same
results. Otherwise, *statistical clone* is returned: the clone might
yield different results from the original estimator. More details can be
found in :ref:`randomness`.
Parameters
----------
estimator : {list, tuple, set} of estimator instance or a single \
estimator instance
The estimator or group of estimators to be cloned.
safe : bool, default=True
If safe is False, clone will fall back to a deep copy on objects
that are not estimators.
"""
estimator_type = type(estimator)
# XXX: not handling dictionaries
if estimator_type in (list, tuple, set, frozenset):
return estimator_type([clone(e, safe=safe) for e in estimator])
elif not hasattr(estimator, 'get_params') or isinstance(estimator, type):
if not safe:
return copy.deepcopy(estimator)
else:
if isinstance(estimator, type):
raise TypeError("Cannot clone object. " +
"You should provide an instance of " +
"scikit-learn estimator instead of a class.")
else:
raise TypeError("Cannot clone object '%s' (type %s): "
"it does not seem to be a scikit-learn "
"estimator as it does not implement a "
"'get_params' method."
% (repr(estimator), type(estimator)))
klass = estimator.__class__
new_object_params = estimator.get_params(deep=False)
for name, param in new_object_params.items():
new_object_params[name] = clone(param, safe=False)
new_object = klass(**new_object_params)
params_set = new_object.get_params(deep=False)
# quick sanity check of the parameters of the clone
for name in new_object_params:
param1 = new_object_params[name]
param2 = params_set[name]
if param1 is not param2:
raise RuntimeError('Cannot clone object %s, as the constructor '
'either does not set or modifies parameter %s' %
(estimator, name))
return new_object
def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params : dict
The dictionary to pretty print
offset : int, default=0
The offset in characters to add at the begin of each line.
printer : callable, default=repr
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ',\n' + (1 + offset // 2) * ' '
for i, (k, v) in enumerate(sorted(params.items())):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = '%s=%s' % (k, str(v))
else:
# use repr of the rest
this_repr = '%s=%s' % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + '...' + this_repr[-100:]
if i > 0:
if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(', ')
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = ''.join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
return lines
class BaseEstimator:
"""Base class for all estimators in scikit-learn.
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their ``__init__`` as explicit keyword
arguments (no ``*args`` or ``**kwargs``).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = inspect.signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [p for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD]
for p in parameters:
if p.kind == p.VAR_POSITIONAL:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s with constructor %s doesn't "
" follow this convention."
% (cls, init_signature))
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters])
def get_params(self, deep=True):
"""
Get parameters for this estimator.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
value = getattr(self, key)
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""
Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as :class:`~sklearn.pipeline.Pipeline`). The latter have
parameters of the form ``<component>__<parameter>`` so that it's
possible to update each component of a nested object.
Parameters
----------
**params : dict
Estimator parameters.
Returns
-------
self : estimator instance
Estimator instance.
"""
if not params:
# Simple optimization to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
nested_params = defaultdict(dict) # grouped by prefix
for key, value in params.items():
key, delim, sub_key = key.partition('__')
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self))
if delim:
nested_params[key][sub_key] = value
else:
setattr(self, key, value)
valid_params[key] = value
for key, sub_params in nested_params.items():
valid_params[key].set_params(**sub_params)
return self
def __repr__(self, N_CHAR_MAX=700):
# N_CHAR_MAX is the (approximate) maximum number of non-blank
# characters to render. We pass it as an optional parameter to ease
# the tests.
from .utils._pprint import _EstimatorPrettyPrinter
N_MAX_ELEMENTS_TO_SHOW = 30 # number of elements to show in sequences
# use ellipsis for sequences with a lot of elements
pp = _EstimatorPrettyPrinter(
compact=True, indent=1, indent_at_name=True,
n_max_elements_to_show=N_MAX_ELEMENTS_TO_SHOW)
repr_ = pp.pformat(self)
# Use bruteforce ellipsis when there are a lot of non-blank characters
n_nonblank = len(''.join(repr_.split()))
if n_nonblank > N_CHAR_MAX:
lim = N_CHAR_MAX // 2 # apprx number of chars to keep on both ends
regex = r'^(\s*\S){%d}' % lim
# The regex '^(\s*\S){%d}' % n
# matches from the start of the string until the nth non-blank
# character:
# - ^ matches the start of string
# - (pattern){n} matches n repetitions of pattern
# - \s*\S matches a non-blank char following zero or more blanks
left_lim = re.match(regex, repr_).end()
right_lim = re.match(regex, repr_[::-1]).end()
if '\n' in repr_[left_lim:-right_lim]:
# The left side and right side aren't on the same line.
# To avoid weird cuts, e.g.:
# categoric...ore',
# we need to start the right side with an appropriate newline
# character so that it renders properly as:
# categoric...
# handle_unknown='ignore',
# so we add [^\n]*\n which matches until the next \n
regex += r'[^\n]*\n'
right_lim = re.match(regex, repr_[::-1]).end()
ellipsis = '...'
if left_lim + len(ellipsis) < len(repr_) - right_lim:
# Only add ellipsis if it results in a shorter repr
repr_ = repr_[:left_lim] + '...' + repr_[-right_lim:]
return repr_
def __getstate__(self):
try:
state = super().__getstate__()
except AttributeError:
state = self.__dict__.copy()
if type(self).__module__.startswith('sklearn.'):
return dict(state.items(), _sklearn_version=__version__)
else:
return state
def __setstate__(self, state):
if type(self).__module__.startswith('sklearn.'):
pickle_version = state.pop("_sklearn_version", "pre-0.18")
if pickle_version != __version__:
warnings.warn(
"Trying to unpickle estimator {0} from version {1} when "
"using version {2}. This might lead to breaking code or "
"invalid results. Use at your own risk.".format(
self.__class__.__name__, pickle_version, __version__),
UserWarning)
try:
super().__setstate__(state)
except AttributeError:
self.__dict__.update(state)
def _more_tags(self):
return _DEFAULT_TAGS
def _get_tags(self):
collected_tags = {}
for base_class in reversed(inspect.getmro(self.__class__)):
if hasattr(base_class, '_more_tags'):
# need the if because mixins might not have _more_tags
# but might do redundant work in estimators
# (i.e. calling more tags on BaseEstimator multiple times)
more_tags = base_class._more_tags(self)
collected_tags.update(more_tags)
return collected_tags
def _check_n_features(self, X, reset):
"""Set the `n_features_in_` attribute, or check against it.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The input samples.
reset : bool
If True, the `n_features_in_` attribute is set to `X.shape[1]`.
If False and the attribute exists, then check that it is equal to
`X.shape[1]`. If False and the attribute does *not* exist, then
the check is skipped.
.. note::
It is recommended to call reset=True in `fit` and in the first
call to `partial_fit`. All other methods that validate `X`
should set `reset=False`.
"""
try:
n_features = _num_features(X)
except TypeError as e:
if not reset and hasattr(self, "n_features_in_"):
raise ValueError(
"X does not contain any features, but "
f"{self.__class__.__name__} is expecting "
f"{self.n_features_in_} features"
) from e
# If the number of features is not defined and reset=True,
# then we skip this check
return
if reset:
self.n_features_in_ = n_features
return
if not hasattr(self, "n_features_in_"):
# Skip this check if the expected number of expected input features
# was not recorded by calling fit first. This is typically the case
# for stateless transformers.
return
if n_features != self.n_features_in_:
raise ValueError(
f"X has {n_features} features, but {self.__class__.__name__} "
f"is expecting {self.n_features_in_} features as input.")
def _validate_data(self, X, y='no_validation', reset=True,
validate_separately=False, **check_params):
"""Validate input data and set or check the `n_features_in_` attribute.
Parameters
----------
X : {array-like, sparse matrix, dataframe} of shape \
(n_samples, n_features)
The input samples.
y : array-like of shape (n_samples,), default='no_validation'
The targets.
- If `None`, `check_array` is called on `X`. If the estimator's
requires_y tag is True, then an error will be raised.
- If `'no_validation'`, `check_array` is called on `X` and the
estimator's requires_y tag is ignored. This is a default
placeholder and is never meant to be explicitly set.
- Otherwise, both `X` and `y` are checked with either `check_array`
or `check_X_y` depending on `validate_separately`.
reset : bool, default=True
Whether to reset the `n_features_in_` attribute.
If False, the input will be checked for consistency with data
provided when reset was last True.
.. note::
It is recommended to call reset=True in `fit` and in the first
call to `partial_fit`. All other methods that validate `X`
should set `reset=False`.
validate_separately : False or tuple of dicts, default=False
Only used if y is not None.
If False, call validate_X_y(). Else, it must be a tuple of kwargs
to be used for calling check_array() on X and y respectively.
**check_params : kwargs
Parameters passed to :func:`sklearn.utils.check_array` or
:func:`sklearn.utils.check_X_y`. Ignored if validate_separately
is not False.
Returns
-------
out : {ndarray, sparse matrix} or tuple of these
The validated input. A tuple is returned if `y` is not None.
"""
if y is None:
if self._get_tags()['requires_y']:
raise ValueError(
f"This {self.__class__.__name__} estimator "
f"requires y to be passed, but the target y is None."
)
X = check_array(X, **check_params)
out = X
elif isinstance(y, str) and y == 'no_validation':
X = check_array(X, **check_params)
out = X
else:
if validate_separately:
# We need this because some estimators validate X and y
# separately, and in general, separately calling check_array()
# on X and y isn't equivalent to just calling check_X_y()
# :(
check_X_params, check_y_params = validate_separately
X = check_array(X, **check_X_params)
y = check_array(y, **check_y_params)
else:
X, y = check_X_y(X, y, **check_params)
out = X, y
if check_params.get('ensure_2d', True):
self._check_n_features(X, reset=reset)
return out
@property
def _repr_html_(self):
"""HTML representation of estimator.
This is redundant with the logic of `_repr_mimebundle_`. The latter
should be favorted in the long term, `_repr_html_` is only
implemented for consumers who do not interpret `_repr_mimbundle_`.
"""
if get_config()["display"] != 'diagram':
raise AttributeError("_repr_html_ is only defined when the "
"'display' configuration option is set to "
"'diagram'")
return self._repr_html_inner
def _repr_html_inner(self):
"""This function is returned by the @property `_repr_html_` to make
`hasattr(estimator, "_repr_html_") return `True` or `False` depending
on `get_config()["display"]`.
"""
return estimator_html_repr(self)
def _repr_mimebundle_(self, **kwargs):
"""Mime bundle used by jupyter kernels to display estimator"""
output = {"text/plain": repr(self)}
if get_config()["display"] == 'diagram':
output["text/html"] = estimator_html_repr(self)
return output
class ClassifierMixin:
"""Mixin class for all classifiers in scikit-learn."""
_estimator_type = "classifier"
def score(self, X, y, sample_weight=None):
"""
Return the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True labels for `X`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
Mean accuracy of ``self.predict(X)`` wrt. `y`.
"""
from .metrics import accuracy_score
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
def _more_tags(self):
return {'requires_y': True}
class RegressorMixin:
"""Mixin class for all regression estimators in scikit-learn."""
_estimator_type = "regressor"
def score(self, X, y, sample_weight=None):
"""Return the coefficient of determination :math:`R^2` of the
prediction.
The coefficient :math:`R^2` is defined as :math:`(1 - \\frac{u}{v})`,
where :math:`u` is the residual sum of squares ``((y_true - y_pred)
** 2).sum()`` and :math:`v` is the total sum of squares ``((y_true -
y_true.mean()) ** 2).sum()``. The best possible score is 1.0 and it
can be negative (because the model can be arbitrarily worse). A
constant model that always predicts the expected value of `y`,
disregarding the input features, would get a :math:`R^2` score of
0.0.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples. For some estimators this may be a precomputed
kernel matrix or a list of generic objects instead with shape
``(n_samples, n_samples_fitted)``, where ``n_samples_fitted``
is the number of samples used in the fitting for the estimator.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True values for `X`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
:math:`R^2` of ``self.predict(X)`` wrt. `y`.
Notes
-----
The :math:`R^2` score used when calling ``score`` on a regressor uses
``multioutput='uniform_average'`` from version 0.23 to keep consistent
with default value of :func:`~sklearn.metrics.r2_score`.
This influences the ``score`` method of all the multioutput
regressors (except for
:class:`~sklearn.multioutput.MultiOutputRegressor`).
"""
from .metrics import r2_score
y_pred = self.predict(X)
return r2_score(y, y_pred, sample_weight=sample_weight)
def _more_tags(self):
return {'requires_y': True}
class ClusterMixin:
"""Mixin class for all cluster estimators in scikit-learn."""
_estimator_type = "clusterer"
def fit_predict(self, X, y=None):
"""
Perform clustering on `X` and returns cluster labels.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
labels : ndarray of shape (n_samples,), dtype=np.int64
Cluster labels.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
self.fit(X)
return self.labels_
def _more_tags(self):
return {"preserves_dtype": []}
class BiclusterMixin:
"""Mixin class for all bicluster estimators in scikit-learn."""
@property
def biclusters_(self):
"""Convenient way to get row and column indicators together.
Returns the ``rows_`` and ``columns_`` members.
"""
return self.rows_, self.columns_
def get_indices(self, i):
"""Row and column indices of the `i`'th bicluster.
Only works if ``rows_`` and ``columns_`` attributes exist.
Parameters
----------
i : int
The index of the cluster.
Returns
-------
row_ind : ndarray, dtype=np.intp
Indices of rows in the dataset that belong to the bicluster.
col_ind : ndarray, dtype=np.intp
Indices of columns in the dataset that belong to the bicluster.
"""
rows = self.rows_[i]
columns = self.columns_[i]
return np.nonzero(rows)[0], np.nonzero(columns)[0]
def get_shape(self, i):
"""Shape of the `i`'th bicluster.
Parameters
----------
i : int
The index of the cluster.
Returns
-------
n_rows : int
Number of rows in the bicluster.
n_cols : int
Number of columns in the bicluster.
"""
indices = self.get_indices(i)
return tuple(len(i) for i in indices)
def get_submatrix(self, i, data):
"""Return the submatrix corresponding to bicluster `i`.
Parameters
----------
i : int
The index of the cluster.
data : array-like of shape (n_samples, n_features)
The data.
Returns
-------
submatrix : ndarray of shape (n_rows, n_cols)
The submatrix corresponding to bicluster `i`.
Notes
-----
Works with sparse matrices. Only works if ``rows_`` and
``columns_`` attributes exist.
"""
from .utils.validation import check_array
data = check_array(data, accept_sparse='csr')
row_ind, col_ind = self.get_indices(i)
return data[row_ind[:, np.newaxis], col_ind]
class TransformerMixin:
"""Mixin class for all transformers in scikit-learn."""
def fit_transform(self, X, y=None, **fit_params):
"""
Fit to data, then transform it.
Fits transformer to `X` and `y` with optional parameters `fit_params`
and returns a transformed version of `X`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input samples.
y : array-like of shape (n_samples,) or (n_samples, n_outputs), \
default=None
Target values (None for unsupervised transformations).
**fit_params : dict
Additional fit parameters.
Returns
-------
X_new : ndarray array of shape (n_samples, n_features_new)
Transformed array.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X)
class DensityMixin:
"""Mixin class for all density estimators in scikit-learn."""
_estimator_type = "DensityEstimator"
def score(self, X, y=None):
"""Return the score of the model on the data `X`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
score : float
"""
pass
class OutlierMixin:
"""Mixin class for all outlier detection estimators in scikit-learn."""
_estimator_type = "outlier_detector"
def fit_predict(self, X, y=None):
"""Perform fit on X and returns labels for X.
Returns -1 for outliers and 1 for inliers.
Parameters
----------
X : {array-like, sparse matrix, dataframe} of shape \
(n_samples, n_features)
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
y : ndarray of shape (n_samples,)
1 for inliers, -1 for outliers.
"""
# override for transductive outlier detectors like LocalOulierFactor
return self.fit(X).predict(X)
class MetaEstimatorMixin:
_required_parameters = ["estimator"]
"""Mixin class for all meta estimators in scikit-learn."""
class MultiOutputMixin:
"""Mixin to mark estimators that support multioutput."""
def _more_tags(self):
return {'multioutput': True}
class _UnstableArchMixin:
"""Mark estimators that are non-determinstic on 32bit or PowerPC"""
def _more_tags(self):
return {'non_deterministic': (
_IS_32BIT or platform.machine().startswith(('ppc', 'powerpc')))}
def is_classifier(estimator):
"""Return True if the given estimator is (probably) a classifier.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a classifier and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "classifier"
def is_regressor(estimator):
"""Return True if the given estimator is (probably) a regressor.
Parameters
----------
estimator : estimator instance
Estimator object to test.
Returns
-------
out : bool
True if estimator is a regressor and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "regressor"
def is_outlier_detector(estimator):
"""Return True if the given estimator is (probably) an outlier detector.
Parameters
----------
estimator : estimator instance
Estimator object to test.
Returns
-------
out : bool
True if estimator is an outlier detector and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "outlier_detector"
def _is_pairwise(estimator):
"""Returns True if estimator is pairwise.
- If the `_pairwise` attribute and the tag are present and consistent,
then use the value and not issue a warning.
- If the `_pairwise` attribute and the tag are present and not
consistent, use the `_pairwise` value and issue a deprecation
warning.
- If only the `_pairwise` attribute is present and it is not False,
issue a deprecation warning and use the `_pairwise` value.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if the estimator is pairwise and False otherwise.
"""
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=FutureWarning)
has_pairwise_attribute = hasattr(estimator, '_pairwise')
pairwise_attribute = getattr(estimator, '_pairwise', False)
pairwise_tag = _safe_tags(estimator, key="pairwise")
if has_pairwise_attribute:
if pairwise_attribute != pairwise_tag:
warnings.warn(
"_pairwise was deprecated in 0.24 and will be removed in 1.1 "
"(renaming of 0.26). Set the estimator tags of your estimator "
"instead",
FutureWarning
)
return pairwise_attribute
# use pairwise tag when the attribute is not present
return pairwise_tag
|
glemaitre/scikit-learn
|
sklearn/base.py
|
Python
|
bsd-3-clause
| 30,835
|
#!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2013--, biocore development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
"""Tests for ParsInsert v1.03 application controller."""
from shutil import rmtree
from os.path import splitext
from os import getcwd, remove, rmdir, mkdir
from unittest import TestCase, main
from cogent.core.alignment import Alignment
from cogent.parse.tree import DndParser
from cogent.core.moltype import DNA
from skbio.parse.sequences import parse_fasta
from burrito.util import get_tmp_filename
from bfillings.parsinsert import ParsInsert, insert_sequences_into_tree
class ParsInsertTests(TestCase):
def setUp(self):
# create a list of files to cleanup
self._paths_to_clean_up = []
self._dirs_to_clean_up = []
# load query seqs
self.seqs = Alignment(parse_fasta(QUERY_SEQS.split()))
# generate temp filename
tmp_dir='/tmp'
self.outfile = get_tmp_filename(tmp_dir)
# create and write out reference sequence file
self.outfasta=splitext(self.outfile)[0]+'.fasta'
fastaout=open(self.outfasta,'w')
fastaout.write(REF_SEQS)
fastaout.close()
self._paths_to_clean_up.append(self.outfasta)
# create and write out starting tree file
self.outtree=splitext(self.outfile)[0]+'.tree'
treeout=open(self.outtree,'w')
treeout.write(REF_TREE)
treeout.close()
self._paths_to_clean_up.append(self.outtree)
def tearDown(self):
"""cleans up all files initially created"""
# remove the tempdir and contents
map(remove,self._paths_to_clean_up)
map(rmdir,self._dirs_to_clean_up)
def test_base_command(self):
"""Base command-calls"""
app = ParsInsert()
self.assertEqual(app.BaseCommand, \
''.join(['cd "',getcwd(),'/"; ','ParsInsert']))
def test_change_working_dir(self):
"""Change working dir"""
app = ParsInsert(WorkingDir='/tmp/ParsInsertTest')
self.assertEqual(app.BaseCommand, \
''.join(['cd "','/tmp/ParsInsertTest',\
'/"; ','ParsInsert']))
rmtree('/tmp/ParsInsertTest')
def test_insert_sequences_into_tree(self):
"""Inserts sequences into Tree"""
# define log fp
log_fp='/tmp/parsinsert.log'
self._paths_to_clean_up.append(log_fp)
# define tax assignment values fp
tax_assign_fp='/tmp/tax_assignments.log'
self._paths_to_clean_up.append(tax_assign_fp)
# set the reference alignment and starting tree
param={
'-t':self.outtree,
'-s':self.outfasta,
'-l':log_fp,
'-o':tax_assign_fp
}
seqs, align_map = self.seqs.toPhylip()
# insert sequences into tree
tree = insert_sequences_into_tree(seqs, DNA, params=param)
# rename tips back to query names
for node in tree.tips():
if node.Name in align_map:
node.Name = align_map[node.Name]
self.assertEqual(tree.getNewick(with_distances=True),exp_tree)
QUERY_SEQS= """\
>6
TGCATGTCAGTATAGCTTTGGTGAAACTGCGAATGGCTCATTAAATCAGT
>7
TGCATGTCAGTATAACTTTGGTGAAACTGCGAATGGCTCATTAAATCAGT
"""
REF_SEQS= """\
>seq0000011
TGCATGTCAGTATAGCTTTAGTGAAACTGCGAATGGCTCATTAAATCAGT
>seq0000012
TGCATGTCAGTATAGCTTTAGTGAAACTGCGAATGGCTNNTTAAATCAGT
>seq0000013
TGCATGTCAGTATAGCATTAGTGAAACTGCGAATGGCTCATTAAATCAGT
>seq0000014
TCCATGTCAGTATAACTTTGGTGAAACTGCGAATGGCTCATTAAATCAGG
>seq0000015
NNNNNNNNNNTATATCTTATGTGAAACTTCGAATGCCTCATTAAATCAGT
"""
REF_TREE="""((seq0000014:0.08408,seq0000015:0.13713)0.609:0.00215,seq0000013:0.02032,(seq0000011:0.00014,seq0000012:0.00014)0.766:0.00015);
"""
exp_tree = """((seq0000014:0.08408,seq0000015:0.13713,7:0.02027):0.00215,seq0000013:0.02032,(seq0000011:0.00014,seq0000012:0.00014,6:0.02027):0.00015):0.0;"""
if __name__ == '__main__':
main()
|
ekopylova/burrito-fillings
|
bfillings/tests/test_parsinsert.py
|
Python
|
bsd-3-clause
| 4,260
|
"""
This module contains functions to:
- solve a single equation for a single variable, in any domain either real or complex.
- solve a system of linear equations with N variables and M equations.
"""
from __future__ import print_function, division
from sympy.core.sympify import sympify
from sympy.core import S, Pow, Dummy, pi, Expr, Wild, Mul, Equality
from sympy.core.numbers import I, Number, Rational, oo
from sympy.core.function import (Lambda, expand, expand_complex)
from sympy.core.relational import Eq
from sympy.simplify.simplify import simplify, fraction, trigsimp
from sympy.core.symbol import Symbol
from sympy.functions import (log, Abs, tan, cot, sin, cos, sec, csc, exp,
acos, asin, atan, acsc, asec, arg,
Piecewise, piecewise_fold)
from sympy.functions.elementary.trigonometric import (TrigonometricFunction,
HyperbolicFunction)
from sympy.functions.elementary.miscellaneous import real_root
from sympy.sets import (FiniteSet, EmptySet, imageset, Interval, Intersection,
Union, ConditionSet)
from sympy.matrices import Matrix
from sympy.polys import (roots, Poly, degree, together, PolynomialError,
RootOf)
from sympy.solvers.solvers import checksol, denoms
from sympy.solvers.inequalities import solve_univariate_inequality
from sympy.utilities import filldedent
import warnings
def invert_real(f_x, y, x):
""" Inverts a real valued function
Reduces the real valued equation ``f(x) = y`` to a set of equations ``{g(x)
= h_1(y), g(x) = h_2(y), ..., g(x) = h_n(y) }`` where ``g(x)`` is a simpler
function than ``f(x)``. The return value is a tuple ``(g(x), set_h)``,
where ``g(x)`` is a function of ``x`` and ``set_h`` is the set of
functions ``{h_1(y), h_2(y), ..., h_n(y)}``.
Here, ``y`` is not necessarily a symbol.
The ``set_h`` contains the functions along with the information about their
domain in which they are valid, through set operations. For instance, if
``y = Abs(x) - n``, is inverted, then, the ``set_h`` doesn't simply
return `{-n, n}`, as it doesn't explicitly mentions about the nature of
`n` rather it will return:
`Intersection([0, oo) {n}) U Intersection((-oo, 0], {-n})`
Examples
========
>>> from sympy.solvers.solveset import invert_real
>>> from sympy import tan, Abs, exp
>>> from sympy.abc import x, y, n
>>> invert_real(exp(x), 1, x)
(x, {0})
>>> invert_real(tan(x), y, x)
(x, ImageSet(Lambda(_n, _n*pi + atan(y)), Integers()))
* ``set_h`` containing information about the domain
>>> invert_real(Abs(x**31 + x), y, x)
(x**31 + x, Intersection([0, oo), {y}) U Intersection((-oo, 0], {-y}))
>>> invert_real(exp(Abs(x)), y, x)
(x, Intersection([0, oo), {log(y)}) U Intersection((-oo, 0], {-log(y)}))
See Also
========
invert_complex
"""
y = sympify(y)
if not y.has(x):
return _invert_real(f_x, FiniteSet(y), x)
else:
raise ValueError(" y should be independent of x ")
def _invert_real(f, g_ys, symbol):
""" Helper function for invert_real """
if not f.has(symbol):
raise ValueError("Inverse of constant function doesn't exist")
if f is symbol:
return (f, g_ys)
n = Dummy('n')
if hasattr(f, 'inverse') and not isinstance(f, TrigonometricFunction) and \
not isinstance(f, HyperbolicFunction):
if len(f.args) > 1:
raise ValueError("Only functions with one argument are supported.")
return _invert_real(f.args[0],
imageset(Lambda(n, f.inverse()(n)), g_ys), symbol)
if isinstance(f, Abs):
return _invert_real(f.args[0],
Union(imageset(Lambda(n, n), g_ys).intersect(Interval(0, oo)),
imageset(Lambda(n, -n), g_ys).intersect(Interval(-oo, 0))),
symbol)
if f.is_Add:
# f = g + h
g, h = f.as_independent(symbol)
if g != S.Zero:
return _invert_real(h, imageset(Lambda(n, n - g), g_ys), symbol)
if f.is_Mul:
# f = g*h
g, h = f.as_independent(symbol)
if g != S.One:
return _invert_real(h, imageset(Lambda(n, n/g), g_ys), symbol)
if f.is_Pow:
base, expo = f.args
base_has_sym = base.has(symbol)
expo_has_sym = expo.has(symbol)
if not expo_has_sym:
res = imageset(Lambda(n, real_root(n, expo)), g_ys)
if expo.is_rational:
numer, denom = expo.as_numer_denom()
if numer == S.One or numer == - S.One:
return _invert_real(base, res, symbol)
else:
if numer % 2 == 0:
n = Dummy('n')
neg_res = imageset(Lambda(n, -n), res)
return _invert_real(base, res + neg_res, symbol)
else:
return _invert_real(base, res, symbol)
else:
if not base.is_positive:
raise ValueError("x**w where w is irrational is not "
"defined for negative x")
return _invert_real(base, res, symbol)
if not base_has_sym:
return _invert_real(expo, imageset(Lambda(n, log(n)/log(base)),
g_ys), symbol)
if isinstance(f, sin):
n = Dummy('n')
if isinstance(g_ys, FiniteSet):
sin_invs = Union(*[imageset(Lambda(n, n*pi + (-1)**n*asin(g_y)), \
S.Integers) for g_y in g_ys])
return _invert_real(f.args[0], sin_invs, symbol)
if isinstance(f, csc):
n = Dummy('n')
if isinstance(g_ys, FiniteSet):
csc_invs = Union(*[imageset(Lambda(n, n*pi + (-1)**n*acsc(g_y)), \
S.Integers) for g_y in g_ys])
return _invert_real(f.args[0], csc_invs, symbol)
if isinstance(f, cos):
n = Dummy('n')
if isinstance(g_ys, FiniteSet):
cos_invs_f1 = Union(*[imageset(Lambda(n, 2*n*pi + acos(g_y)), \
S.Integers) for g_y in g_ys])
cos_invs_f2 = Union(*[imageset(Lambda(n, 2*n*pi - acos(g_y)), \
S.Integers) for g_y in g_ys])
cos_invs = Union(cos_invs_f1, cos_invs_f2)
return _invert_real(f.args[0], cos_invs, symbol)
if isinstance(f, sec):
n = Dummy('n')
if isinstance(g_ys, FiniteSet):
sec_invs_f1 = Union(*[imageset(Lambda(n, 2*n*pi + asec(g_y)), \
S.Integers) for g_y in g_ys])
sec_invs_f2 = Union(*[imageset(Lambda(n, 2*n*pi - asec(g_y)), \
S.Integers) for g_y in g_ys])
sec_invs = Union(sec_invs_f1, sec_invs_f2)
return _invert_real(f.args[0], sec_invs, symbol)
if isinstance(f, tan) or isinstance(f, cot):
n = Dummy('n')
if isinstance(g_ys, FiniteSet):
tan_cot_invs = Union(*[imageset(Lambda(n, n*pi + f.inverse()(g_y)), \
S.Integers) for g_y in g_ys])
return _invert_real(f.args[0], tan_cot_invs, symbol)
return (f, g_ys)
def invert_complex(f_x, y, x):
""" Inverts a complex valued function.
Reduces the complex valued equation ``f(x) = y`` to a set of equations
``{g(x) = h_1(y), g(x) = h_2(y), ..., g(x) = h_n(y) }`` where ``g(x)`` is
a simpler function than ``f(x)``. The return value is a tuple ``(g(x),
set_h)``, where ``g(x)`` is a function of ``x`` and ``set_h`` is
the set of function ``{h_1(y), h_2(y), ..., h_n(y)}``.
Here, ``y`` is not necessarily a symbol.
Note that `invert\_complex` and `invert\_real` don't always produce the
same result even for a seemingly simple function like ``exp(x)`` because
the complex extension of real valued ``log`` is multivariate in the complex
system and has infinitely many branches. If you are working with real
values only or you are not sure with function to use you should use
`invert\_real`.
Examples
========
>>> from sympy.solvers.solveset import invert_complex
>>> from sympy.abc import x, y
>>> from sympy import exp, log
>>> invert_complex(log(x), y, x)
(x, {exp(y)})
>>> invert_complex(log(x), 0, x) # Second parameter is not a symbol
(x, {1})
>>> invert_complex(exp(x), y, x)
(x, ImageSet(Lambda(_n, I*(2*_n*pi + arg(y)) + log(Abs(y))), Integers()))
See Also
========
invert_real
"""
y = sympify(y)
if not y.has(x):
return _invert_complex(f_x, FiniteSet(y), x)
else:
raise ValueError(" y should be independent of x ")
def _invert_complex(f, g_ys, symbol):
""" Helper function for invert_complex """
if not f.has(symbol):
raise ValueError("Inverse of constant function doesn't exist")
if f is symbol:
return (f, g_ys)
n = Dummy('n')
if f.is_Add:
# f = g + h
g, h = f.as_independent(symbol)
if g != S.Zero:
return _invert_complex(h, imageset(Lambda(n, n - g), g_ys), symbol)
if f.is_Mul:
# f = g*h
g, h = f.as_independent(symbol)
if g != S.One:
return _invert_complex(h, imageset(Lambda(n, n/g), g_ys), symbol)
if hasattr(f, 'inverse') and \
not isinstance(f, TrigonometricFunction) and \
not isinstance(f, exp):
if len(f.args) > 1:
raise ValueError("Only functions with one argument are supported.")
return _invert_complex(f.args[0],
imageset(Lambda(n, f.inverse()(n)), g_ys), symbol)
if isinstance(f, exp):
if isinstance(g_ys, FiniteSet):
exp_invs = Union(*[imageset(Lambda(n, I*(2*n*pi + arg(g_y)) +
log(Abs(g_y))), S.Integers)
for g_y in g_ys if g_y != 0])
return _invert_complex(f.args[0], exp_invs, symbol)
return (f, g_ys)
def domain_check(f, symbol, p):
"""Returns False if point p is infinite or any subexpression of f
is infinite or becomes so after replacing symbol with p. If none of
these conditions is met then True will be returned.
Examples
========
>>> from sympy import Mul, oo
>>> from sympy.abc import x
>>> from sympy.solvers.solveset import domain_check
>>> g = 1/(1 + (1/(x + 1))**2)
>>> domain_check(g, x, -1)
False
>>> domain_check(x**2, x, 0)
True
>>> domain_check(1/x, x, oo)
False
* The function relies on the assumption that the original form
of the equation has not been changed by automatic simplification.
>>> domain_check(x/x, x, 0) # x/x is automatically simplified to 1
True
* To deal with automatic evaluations use evaluate=False:
>>> domain_check(Mul(x, 1/x, evaluate=False), x, 0)
False
"""
f, p = sympify(f), sympify(p)
if p.is_infinite:
return False
return _domain_check(f, symbol, p)
def _domain_check(f, symbol, p):
# helper for domain check
if f.is_Atom and f.is_finite:
return True
elif f.subs(symbol, p).is_infinite:
return False
else:
return all([_domain_check(g, symbol, p)
for g in f.args])
def _is_finite_with_finite_vars(f):
"""
Return True if the given expression is finite when all free symbols
(that are not already specified as finite) are made finite.
"""
reps = dict([(s, Dummy(s.name, finite=True, **s.assumptions0))
for s in f.free_symbols if s.is_finite is None])
return f.xreplace(reps).is_finite
def _is_function_class_equation(func_class, f, symbol):
""" Tests whether the equation is an equation of the given function class.
The given equation belongs to the given function class if it is
comprised of functions of the function class which are multiplied by
or added to expressions independent of the symbol. In addition, the
arguments of all such functions must be linear in the symbol as well.
Examples
========
>>> from sympy.solvers.solveset import _is_function_class_equation
>>> from sympy import tan, sin, tanh, sinh, exp
>>> from sympy.abc import x
>>> from sympy.functions.elementary.trigonometric import (TrigonometricFunction,
... HyperbolicFunction)
>>> _is_function_class_equation(TrigonometricFunction, exp(x) + tan(x), x)
False
>>> _is_function_class_equation(TrigonometricFunction, tan(x) + sin(x), x)
True
>>> _is_function_class_equation(TrigonometricFunction, tan(x**2), x)
False
>>> _is_function_class_equation(TrigonometricFunction, tan(x + 2), x)
True
>>> _is_function_class_equation(HyperbolicFunction, tanh(x) + sinh(x), x)
True
"""
if f.is_Mul or f.is_Add:
return all(_is_function_class_equation(func_class, arg, symbol)
for arg in f.args)
if f.is_Pow:
if not f.exp.has(symbol):
return _is_function_class_equation(func_class, f.base, symbol)
else:
return False
if not f.has(symbol):
return True
if isinstance(f, func_class):
try:
g = Poly(f.args[0], symbol)
return g.degree() <= 1
except PolynomialError:
return False
else:
return False
def solveset_real(f, symbol):
""" Solves a real valued equation.
Parameters
==========
f : Expr
The target equation
symbol : Symbol
The variable for which the equation is solved
Returns
=======
Set
A set of values for `symbol` for which `f` is equal to
zero. An `EmptySet` is returned if no solution is found.
A `ConditionSet` is returned as unsolved object if algorithms
to evaluate complete solutions are not yet implemented.
`solveset_real` claims to be complete in the set of the solution it
returns.
Raises
======
NotImplementedError
Algorithms to solve inequalities in complex domain are
not yet implemented.
ValueError
The input is not valid.
RuntimeError
It is a bug, please report to the github issue tracker.
See Also
=======
solveset_complex : solver for complex domain
Examples
========
>>> from sympy import Symbol, exp, sin, sqrt, I
>>> from sympy.solvers.solveset import solveset_real
>>> x = Symbol('x', real=True)
>>> a = Symbol('a', real=True, finite=True, positive=True)
>>> solveset_real(x**2 - 1, x)
{-1, 1}
>>> solveset_real(sqrt(5*x + 6) - 2 - x, x)
{-1, 2}
>>> solveset_real(x - I, x)
EmptySet()
>>> solveset_real(x - a, x)
{a}
>>> solveset_real(exp(x) - a, x)
{log(a)}
* In case the equation has infinitely many solutions an infinitely indexed
`ImageSet` is returned.
>>> solveset_real(sin(x) - 1, x)
ImageSet(Lambda(_n, 2*_n*pi + pi/2), Integers())
* If the equation is true for any arbitrary value of the symbol a `S.Reals`
set is returned.
>>> solveset_real(x - x, x)
(-oo, oo)
"""
if not getattr(symbol, 'is_Symbol', False):
raise ValueError('A Symbol must be given, not type %s: %s' %
(type(symbol), symbol))
f = sympify(f)
if not isinstance(f, (Expr, Number)):
raise ValueError("%s is not a valid SymPy expression" % (f))
original_eq = f
f = together(f)
# In this, unlike in solveset_complex, expression should only
# be expanded when fraction(f)[1] does not contain the symbol
# for which we are solving
if not symbol in fraction(f)[1].free_symbols and f.is_rational_function():
f = expand(f)
f = piecewise_fold(f)
result = EmptySet()
if f.expand().is_zero:
return S.Reals
elif not f.has(symbol):
return EmptySet()
elif f.is_Mul and all([_is_finite_with_finite_vars(m) for m in f.args]):
# if f(x) and g(x) are both finite we can say that the solution of
# f(x)*g(x) == 0 is same as Union(f(x) == 0, g(x) == 0) is not true in
# general. g(x) can grow to infinitely large for the values where
# f(x) == 0. To be sure that we are not silently allowing any
# wrong solutions we are using this technique only if both f and g are
# finite for a finite input.
result = Union(*[solveset_real(m, symbol) for m in f.args])
elif _is_function_class_equation(TrigonometricFunction, f, symbol) or \
_is_function_class_equation(HyperbolicFunction, f, symbol):
result = _solve_real_trig(f, symbol)
elif f.is_Piecewise:
result = EmptySet()
expr_set_pairs = f.as_expr_set_pairs()
for (expr, in_set) in expr_set_pairs:
solns = solveset_real(expr, symbol).intersect(in_set)
result = result + solns
else:
lhs, rhs_s = invert_real(f, 0, symbol)
if lhs == symbol:
result = rhs_s
elif isinstance(rhs_s, FiniteSet):
equations = [lhs - rhs for rhs in rhs_s]
for equation in equations:
if equation == f:
if any(_has_rational_power(g, symbol)[0]
for g in equation.args):
result += _solve_radical(equation,
symbol,
solveset_real)
elif equation.has(Abs):
result += _solve_abs(f, symbol)
else:
result += _solve_as_rational(equation, symbol,
solveset_solver=solveset_real,
as_poly_solver=_solve_as_poly_real)
else:
result += solveset_real(equation, symbol)
else:
result = ConditionSet(symbol, Eq(f, 0), S.Reals)
if isinstance(result, FiniteSet):
result = [s for s in result
if isinstance(s, RootOf)
or domain_check(original_eq, symbol, s)]
return FiniteSet(*result).intersect(S.Reals)
else:
return result.intersect(S.Reals)
def _solve_as_rational(f, symbol, solveset_solver, as_poly_solver):
""" solve rational functions"""
f = together(f, deep=True)
g, h = fraction(f)
if not h.has(symbol):
return as_poly_solver(g, symbol)
else:
valid_solns = solveset_solver(g, symbol)
invalid_solns = solveset_solver(h, symbol)
return valid_solns - invalid_solns
def _solve_real_trig(f, symbol):
""" Helper to solve trigonometric equations """
f = trigsimp(f)
f = f.rewrite(exp)
f = together(f)
g, h = fraction(f)
y = Dummy('y')
g, h = g.expand(), h.expand()
g, h = g.subs(exp(I*symbol), y), h.subs(exp(I*symbol), y)
if g.has(symbol) or h.has(symbol):
return ConditionSet(symbol, Eq(f, 0), S.Reals)
solns = solveset_complex(g, y) - solveset_complex(h, y)
if isinstance(solns, FiniteSet):
return Union(*[invert_complex(exp(I*symbol), s, symbol)[1]
for s in solns])
elif solns is S.EmptySet:
return S.EmptySet
else:
return ConditionSet(symbol, Eq(f, 0), S.Reals)
def _solve_as_poly(f, symbol, solveset_solver, invert_func):
"""
Solve the equation using polynomial techniques if it already is a
polynomial equation or, with a change of variables, can be made so.
"""
result = None
if f.is_polynomial(symbol):
solns = roots(f, symbol, cubics=True, quartics=True,
quintics=True, domain='EX')
num_roots = sum(solns.values())
if degree(f, symbol) <= num_roots:
result = FiniteSet(*solns.keys())
else:
poly = Poly(f, symbol)
solns = poly.all_roots()
if poly.degree() <= len(solns):
result = FiniteSet(*solns)
else:
result = ConditionSet(symbol, Eq(f, 0), S.Complexes)
else:
poly = Poly(f)
if poly is None:
result = ConditionSet(symbol, Eq(f, 0), S.Complexes)
gens = [g for g in poly.gens if g.has(symbol)]
if len(gens) == 1:
poly = Poly(poly, gens[0])
gen = poly.gen
deg = poly.degree()
poly = Poly(poly.as_expr(), poly.gen, composite=True)
poly_solns = FiniteSet(*roots(poly, cubics=True, quartics=True,
quintics=True).keys())
if len(poly_solns) < deg:
result = ConditionSet(symbol, Eq(f, 0), S.Complexes)
if gen != symbol:
y = Dummy('y')
lhs, rhs_s = invert_func(gen, y, symbol)
if lhs is symbol:
result = Union(*[rhs_s.subs(y, s) for s in poly_solns])
else:
result = ConditionSet(symbol, Eq(f, 0), S.Complexes)
else:
result = ConditionSet(symbol, Eq(f, 0), S.Complexes)
if result is not None:
if isinstance(result, FiniteSet):
# this is to simplify solutions like -sqrt(-I) to sqrt(2)/2
# - sqrt(2)*I/2. We are not expanding for solution with free
# variables because that makes the solution more complicated. For
# example expand_complex(a) returns re(a) + I*im(a)
if all([s.free_symbols == set() and not isinstance(s, RootOf)
for s in result]):
s = Dummy('s')
result = imageset(Lambda(s, expand_complex(s)), result)
return result
else:
return ConditionSet(symbol, Eq(f, 0), S.Complexes)
def _solve_as_poly_real(f, symbol):
"""
Solve real valued equation with methods to solve polynomial
equations.
"""
return _solve_as_poly(f, symbol,
solveset_solver=solveset_real,
invert_func=invert_real)
def _solve_as_poly_complex(f, symbol):
"""
Solve complex valued equation with methods to solve polynomial
equations.
"""
return _solve_as_poly(f, symbol,
solveset_solver=solveset_complex,
invert_func=invert_complex)
def _has_rational_power(expr, symbol):
"""
Returns (bool, den) where bool is True if the term has a
non-integer rational power and den is the denominator of the
expression's exponent.
Examples
========
>>> from sympy.solvers.solveset import _has_rational_power
>>> from sympy import sqrt
>>> from sympy.abc import x
>>> _has_rational_power(sqrt(x), x)
(True, 2)
>>> _has_rational_power(x**2, x)
(False, 1)
"""
a, p, q = Wild('a'), Wild('p'), Wild('q')
pattern_match = expr.match(a*p**q) or {}
if pattern_match.get(a, S.Zero) is S.Zero:
return (False, S.One)
elif p not in pattern_match.keys():
return (False, S.One)
elif isinstance(pattern_match[q], Rational) \
and pattern_match[p].has(symbol):
if not pattern_match[q].q == S.One:
return (True, pattern_match[q].q)
if not isinstance(pattern_match[a], Pow) \
or isinstance(pattern_match[a], Mul):
return (False, S.One)
else:
return _has_rational_power(pattern_match[a], symbol)
def _solve_radical(f, symbol, solveset_solver):
""" Helper function to solve equations with radicals """
from sympy.solvers.solvers import unrad
eq, cov = unrad(f)
if not cov:
result = solveset_solver(eq, symbol) - \
Union(*[solveset_solver(g, symbol) for g in denoms(f, [symbol])])
else:
y, yeq = cov
if not solveset_solver(y - I, y):
yreal = Dummy('yreal', real=True)
yeq = yeq.xreplace({y: yreal})
eq = eq.xreplace({y: yreal})
y = yreal
g_y_s = solveset_solver(yeq, symbol)
f_y_sols = solveset_solver(eq, y)
result = Union(*[imageset(Lambda(y, g_y), f_y_sols)
for g_y in g_y_s])
return FiniteSet(*[s for s in result if checksol(f, symbol, s) is True])
def _solve_abs(f, symbol):
""" Helper function to solve equation involving absolute value function """
p, q, r = Wild('p'), Wild('q'), Wild('r')
pattern_match = f.match(p*Abs(q) + r) or {}
if not pattern_match.get(p, S.Zero).is_zero:
f_p, f_q, f_r = pattern_match[p], pattern_match[q], pattern_match[r]
q_pos_cond = solve_univariate_inequality(f_q >= 0, symbol,
relational=False)
q_neg_cond = solve_univariate_inequality(f_q < 0, symbol,
relational=False)
sols_q_pos = solveset_real(f_p*f_q + f_r,
symbol).intersect(q_pos_cond)
sols_q_neg = solveset_real(f_p*(-f_q) + f_r,
symbol).intersect(q_neg_cond)
return Union(sols_q_pos, sols_q_neg)
else:
return ConditionSet(symbol, Eq(f, 0), S.Complexes)
def solveset_complex(f, symbol):
""" Solve a complex valued equation.
Parameters
==========
f : Expr
The target equation
symbol : Symbol
The variable for which the equation is solved
Returns
=======
Set
A set of values for `symbol` for which `f` equal to
zero. An `EmptySet` is returned if no solution is found.
A `ConditionSet` is returned as an unsolved object if algorithms
to evaluate complete solutions are not yet implemented.
`solveset_complex` claims to be complete in the solution set that
it returns.
Raises
======
NotImplementedError
The algorithms to solve inequalities in complex domain are
not yet implemented.
ValueError
The input is not valid.
RuntimeError
It is a bug, please report to the github issue tracker.
See Also
========
solveset_real: solver for real domain
Examples
========
>>> from sympy import Symbol, exp
>>> from sympy.solvers.solveset import solveset_complex
>>> from sympy.abc import x, a, b, c
>>> solveset_complex(a*x**2 + b*x +c, x)
{-b/(2*a) - sqrt(-4*a*c + b**2)/(2*a), -b/(2*a) + sqrt(-4*a*c + b**2)/(2*a)}
* Due to the fact that complex extension of my real valued functions are
multivariate even some simple equations can have infinitely many
solution.
>>> solveset_complex(exp(x) - 1, x)
ImageSet(Lambda(_n, 2*_n*I*pi), Integers())
"""
if not getattr(symbol, 'is_Symbol', False):
raise ValueError('A Symbol must be given, not type %s: %s' %
(type(symbol), symbol))
f = sympify(f)
original_eq = f
if not isinstance(f, (Expr, Number)):
raise ValueError(" %s is not a valid sympy expression" % (f))
f = together(f)
# Without this equations like a + 4*x**2 - E keep oscillating
# into form a/4 + x**2 - E/4 and (a + 4*x**2 - E)/4
if not fraction(f)[1].has(symbol):
f = expand(f)
if f.is_zero:
return S.Complexes
elif not f.has(symbol):
result = EmptySet()
elif f.is_Mul and all([_is_finite_with_finite_vars(m) for m in f.args]):
result = Union(*[solveset_complex(m, symbol) for m in f.args])
else:
lhs, rhs_s = invert_complex(f, 0, symbol)
if lhs == symbol:
result = rhs_s
elif isinstance(rhs_s, FiniteSet):
equations = [lhs - rhs for rhs in rhs_s]
result = EmptySet()
for equation in equations:
if equation == f:
if any(_has_rational_power(g, symbol)[0]
for g in equation.args):
result += _solve_radical(equation,
symbol,
solveset_complex)
else:
result += _solve_as_rational(equation, symbol,
solveset_solver=solveset_complex,
as_poly_solver=_solve_as_poly_complex)
else:
result += solveset_complex(equation, symbol)
else:
result = ConditionSet(symbol, Eq(f, 0), S.Complexes)
if isinstance(result, FiniteSet):
result = [s for s in result
if isinstance(s, RootOf)
or domain_check(original_eq, symbol, s)]
return FiniteSet(*result)
else:
return result
def solveset(f, symbol=None, domain=S.Complexes):
"""Solves a given inequality or equation with set as output
Parameters
==========
f : Expr or a relational.
The target equation or inequality
symbol : Symbol
The variable for which the equation is solved
domain : Set
The domain over which the equation is solved
Returns
=======
Set
A set of values for `symbol` for which `f` is True or is equal to
zero. An `EmptySet` is returned if `f` is False or nonzero.
A `ConditionSet` is returned as unsolved object if algorithms
to evaluatee complete solution are not yet implemented.
`solveset` claims to be complete in the solution set that it returns.
Raises
======
NotImplementedError
The algorithms to solve inequalities in complex domain are
not yet implemented.
ValueError
The input is not valid.
RuntimeError
It is a bug, please report to the github issue tracker.
`solveset` uses two underlying functions `solveset_real` and
`solveset_complex` to solve equations. They are the solvers for real and
complex domain respectively. `solveset` ignores the assumptions on the
variable being solved for and instead, uses the `domain` parameter to
decide which solver to use.
Notes
=====
Python interprets 0 and 1 as False and True, respectively, but
in this function they refer to solutions of an expression. So 0 and 1
return the Domain and EmptySet, respectively, while True and False
return the opposite (as they are assumed to be solutions of relational
expressions).
See Also
========
solveset_real: solver for real domain
solveset_complex: solver for complex domain
Examples
========
>>> from sympy import exp, Symbol, Eq, pprint, S, solveset
>>> from sympy.abc import x
* The default domain is complex. Not specifying a domain will lead to the
solving of the equation in the complex domain.
>>> pprint(solveset(exp(x) - 1, x), use_unicode=False)
{2*n*I*pi | n in Integers()}
* If you want to solve equation in real domain by the `solveset`
interface, then specify that the domain is real. Alternatively use
`solveset\_real`.
>>> x = Symbol('x')
>>> solveset(exp(x) - 1, x, S.Reals)
{0}
>>> solveset(Eq(exp(x), 1), x, S.Reals)
{0}
* Inequalities can be solved over the real domain only. Use of a complex
domain leads to a NotImplementedError.
>>> solveset(exp(x) > 1, x, S.Reals)
(0, oo)
"""
f = sympify(f)
if f is S.true:
return domain
if f is S.false:
return S.EmptySet
free_symbols = f.free_symbols
if not free_symbols:
b = Eq(f, 0)
if b is S.true:
return domain
elif b is S.false:
return S.EmptySet
else:
raise NotImplementedError(filldedent('''
relationship between value and 0 is unknown: %s''' % b))
if symbol is None:
if len(free_symbols) == 1:
symbol = free_symbols.pop()
else:
raise ValueError(filldedent('''
The independent variable must be specified for a
multivariate equation.'''))
elif not getattr(symbol, 'is_Symbol', False):
raise ValueError('A Symbol must be given, not type %s: %s' %
(type(symbol), symbol))
if isinstance(f, Eq):
from sympy.core import Add
f = Add(f.lhs, - f.rhs, evaluate=False)
if f.is_Relational:
if not domain.is_subset(S.Reals):
raise NotImplementedError(filldedent('''
Inequalities in the complex domain are
not supported. Try the real domain by
setting domain=S.Reals'''))
try:
result = solve_univariate_inequality(
f, symbol, relational=False).intersection(domain)
except NotImplementedError:
result = ConditionSet(symbol, f, domain)
return result
if isinstance(f, (Expr, Number)):
if domain is S.Reals:
return solveset_real(f, symbol)
elif domain is S.Complexes:
return solveset_complex(f, symbol)
elif domain.is_subset(S.Reals):
return Intersection(solveset_real(f, symbol), domain)
else:
return Intersection(solveset_complex(f, symbol), domain)
###############################################################################
################################ LINSOLVE #####################################
###############################################################################
def linear_eq_to_matrix(equations, *symbols):
r"""
Converts a given System of Equations into Matrix form.
Here `equations` must be a linear system of equations in
`symbols`. The order of symbols in input `symbols` will
determine the order of coefficients in the returned
Matrix.
The Matrix form corresponds to the augmented matrix form.
For example:
.. math:: 4x + 2y + 3z = 1
.. math:: 3x + y + z = -6
.. math:: 2x + 4y + 9z = 2
This system would return `A` & `b` as given below:
::
[ 4 2 3 ] [ 1 ]
A = [ 3 1 1 ] b = [-6 ]
[ 2 4 9 ] [ 2 ]
Examples
========
>>> from sympy import linear_eq_to_matrix, symbols
>>> x, y, z = symbols('x, y, z')
>>> eqns = [x + 2*y + 3*z - 1, 3*x + y + z + 6, 2*x + 4*y + 9*z - 2]
>>> A, b = linear_eq_to_matrix(eqns, [x, y, z])
>>> A
Matrix([
[1, 2, 3],
[3, 1, 1],
[2, 4, 9]])
>>> b
Matrix([
[ 1],
[-6],
[ 2]])
>>> eqns = [x + z - 1, y + z, x - y]
>>> A, b = linear_eq_to_matrix(eqns, [x, y, z])
>>> A
Matrix([
[1, 0, 1],
[0, 1, 1],
[1, -1, 0]])
>>> b
Matrix([
[1],
[0],
[0]])
* Symbolic coefficients are also supported
>>> a, b, c, d, e, f = symbols('a, b, c, d, e, f')
>>> eqns = [a*x + b*y - c, d*x + e*y - f]
>>> A, B = linear_eq_to_matrix(eqns, x, y)
>>> A
Matrix([
[a, b],
[d, e]])
>>> B
Matrix([
[c],
[f]])
"""
if not symbols:
raise ValueError('Symbols must be given, for which coefficients \
are to be found.')
if hasattr(symbols[0], '__iter__'):
symbols = symbols[0]
M = Matrix([symbols])
# initialise Matrix with symbols + 1 columns
M = M.col_insert(len(symbols), Matrix([1]))
row_no = 1
for equation in equations:
f = sympify(equation)
if isinstance(f, Equality):
f = f.lhs - f.rhs
# Extract coeff of symbols
coeff_list = []
for symbol in symbols:
coeff_list.append(f.coeff(symbol))
# append constant term (term free from symbols)
coeff_list.append(-f.as_coeff_add(*symbols)[0])
# insert equations coeff's into rows
M = M.row_insert(row_no, Matrix([coeff_list]))
row_no += 1
# delete the initialised (Ist) trivial row
M.row_del(0)
A, b = M[:, :-1], M[:, -1:]
return A, b
def linsolve(system, *symbols):
r"""
Solve system of N linear equations with M variables, which
means both under - and overdetermined systems are supported.
The possible number of solutions is zero, one or infinite.
Zero solutions throws a ValueError, where as infinite
solutions are represented parametrically in terms of given
symbols. For unique solution a FiniteSet of ordered tuple
is returned.
All Standard input formats are supported:
For the given set of Equations, the respective input types
are given below:
.. math:: 3x + 2y - z = 1
.. math:: 2x - 2y + 4z = -2
.. math:: 2x - y + 2z = 0
* Augmented Matrix Form, `system` given below:
::
[3 2 -1 1]
system = [2 -2 4 -2]
[2 -1 2 0]
* List Of Equations Form
`system = [3x + 2y - z - 1, 2x - 2y + 4z + 2, 2x - y + 2z]`
* Input A & b Matrix Form (from Ax = b) are given as below:
::
[3 2 -1 ] [ 1 ]
A = [2 -2 4 ] b = [ -2 ]
[2 -1 2 ] [ 0 ]
`system = (A, b)`
Symbols to solve for should be given as input in all the
cases either in an iterable or as comma separated arguments.
This is done to maintain consistency in returning solutions
in the form of variable input by the user.
The algorithm used here is Gauss-Jordan elimination, which
results, after elimination, in an row echelon form matrix.
Returns
=======
A FiniteSet of ordered tuple of values of `symbols` for which
the `system` has solution.
Please note that general FiniteSet is unordered, the solution
returned here is not simply a FiniteSet of solutions, rather
it is a FiniteSet of ordered tuple, i.e. the first & only
argument to FiniteSet is a tuple of solutions, which is ordered,
& hence the returned solution is ordered.
Also note that solution could also have been returned as an
ordered tuple, FiniteSet is just a wrapper `{}` around
the tuple. It has no other significance except for
the fact it is just used to maintain a consistent output
format throughout the solveset.
Returns EmptySet(), if the linear system is inconsistent.
Raises
======
ValueError
The input is not valid.
The symbols are not given.
Examples
========
>>> from sympy import Matrix, S, linsolve, symbols
>>> x, y, z = symbols("x, y, z")
>>> A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
>>> b = Matrix([3, 6, 9])
>>> A
Matrix([
[1, 2, 3],
[4, 5, 6],
[7, 8, 10]])
>>> b
Matrix([
[3],
[6],
[9]])
>>> linsolve((A, b), [x, y, z])
{(-1, 2, 0)}
* Parametric Solution: In case the system is under determined, the function
will return parametric solution in terms of the given symbols.
Free symbols in the system are returned as it is. For e.g. in the system
below, `z` is returned as the solution for variable z, which means z is a
free symbol, i.e. it can take arbitrary values.
>>> A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> b = Matrix([3, 6, 9])
>>> linsolve((A, b), [x, y, z])
{(z - 1, -2*z + 2, z)}
* List of Equations as input
>>> Eqns = [3*x + 2*y - z - 1, 2*x - 2*y + 4*z + 2, - x + S(1)/2*y - z]
>>> linsolve(Eqns, x, y, z)
{(1, -2, -2)}
* Augmented Matrix as input
>>> aug = Matrix([[2, 1, 3, 1], [2, 6, 8, 3], [6, 8, 18, 5]])
>>> aug
Matrix([
[2, 1, 3, 1],
[2, 6, 8, 3],
[6, 8, 18, 5]])
>>> linsolve(aug, x, y, z)
{(3/10, 2/5, 0)}
* Solve for symbolic coefficients
>>> a, b, c, d, e, f = symbols('a, b, c, d, e, f')
>>> eqns = [a*x + b*y - c, d*x + e*y - f]
>>> linsolve(eqns, x, y)
{((-b*f + c*e)/(a*e - b*d), (a*f - c*d)/(a*e - b*d))}
* A degenerate system returns solution as set of given
symbols.
>>> system = Matrix(([0,0,0], [0,0,0], [0,0,0]))
>>> linsolve(system, x, y)
{(x, y)}
* For an empty system linsolve returns empty set
>>> linsolve([ ], x)
EmptySet()
"""
if not system:
return S.EmptySet
if not symbols:
raise ValueError('Symbols must be given, for which solution of the '
'system is to be found.')
if hasattr(symbols[0], '__iter__'):
symbols = symbols[0]
try:
sym = symbols[0].is_Symbol
except AttributeError:
sym = False
if not sym:
raise ValueError('Symbols or iterable of symbols must be given as '
'second argument, not type %s: %s' % (type(symbols[0]), symbols[0]))
# 1). Augmented Matrix input Form
if isinstance(system, Matrix):
A, b = system[:, :-1], system[:, -1:]
elif hasattr(system, '__iter__'):
# 2). A & b as input Form
if len(system) == 2 and system[0].is_Matrix:
A, b = system[0], system[1]
# 3). List of equations Form
if not system[0].is_Matrix:
A, b = linear_eq_to_matrix(system, symbols)
else:
raise ValueError("Invalid arguments")
# Solve using Gauss-Jordan elimination
try:
sol, params, free_syms = A.gauss_jordan_solve(b, freevar=True)
except ValueError:
# No solution
return EmptySet()
# Replace free parameters with free symbols
solution = []
if params:
for s in sol:
for k, v in enumerate(params):
s = s.xreplace({v: symbols[free_syms[k]]})
solution.append(simplify(s))
else:
for s in sol:
solution.append(simplify(s))
# Return solutions
solution = FiniteSet(tuple(solution))
return solution
|
Shaswat27/sympy
|
sympy/solvers/solveset.py
|
Python
|
bsd-3-clause
| 42,157
|
import re
from utils import *
from entities.record import Record
def __extractTag(text):
"""
Extracts tag from record.
@tag
@param {string} text.
@return {string} tag.
"""
return re.match('@\w+', text).group(0)
def __extractType(text, tag):
"""
Extracts type expression from record.
{type}
@param {string} text.
@param {string} tag.
@return {string} Type expression.
"""
typeExpression = extractTextBetweenTokens(text, '{')
return typeExpression
def __extractName(text, tag):
"""
Extracts name of variable from record.
@param {string} text.
@param {string} tag.
@return {string} Name.
"""
name = None
if tag not in {'@return', '@inheritDoc'}:
name = text.split(' ')[0]
return name
def __extractDescription(text, tag):
"""
Extracts description of variable from record without newlines.
@param {string} text.
@param {string} tag.
@return {string} Description.
"""
return text.replace('\n', ' ')
def extractRecord(text):
"""
Extracts from code a record object, which contain such information as
tag, type, name of variable abd its description.
@param {string} text.
@return {jsCodeParser.record.Record} Record
"""
tag = __extractTag(text)
position = text.find(tag) + len(tag)
text = text[position:]
recordMap = {
'type': {
'extractor': __extractType,
'value': ''
},
'name': {
'extractor': __extractName,
'value': ''
},
'description': {
'extractor': __extractDescription,
'value': ''
}
}
while text:
for key in ['type', 'name', 'description']:
extractor = recordMap[key]['extractor']
value = extractor(text, tag)
if value:
recordMap[key]['value'] = value
position = text.find(value) + len(value)
text = text[position:]
text = text.strip('. ')
typeExpression = recordMap['type']['value']
name = recordMap['name']['value']
description = recordMap['description']['value']
return Record(tag, typeExpression, name, description)
|
LiveTex/Livetex-Tools
|
tools/externs-extractor/extractors/recordsExtractor.py
|
Python
|
bsd-3-clause
| 2,353
|
# -*- coding: utf-8 -*-
from distributed_frontera.backends.remote.remote import KafkaBackend, KafkaOverusedBackend
__all__ = ['KafkaBackend', 'KafkaOverusedBackend', 'KafkaJSONDecoder', 'KafkaJSONEncoder']
|
bowlofstew/distributed-frontera
|
distributed_frontera/backends/remote/__init__.py
|
Python
|
bsd-3-clause
| 206
|
##########################################################################
#
# Copyright (c) 2008, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import random
import unittest
import IECore
class RadixSortTest( unittest.TestCase ) :
def testFloat( self ) :
random.seed( 12 )
s = IECore.RadixSort()
d = IECore.FloatVectorData()
for i in range( 0, 10000 ):
d.append( random.uniform( IECore.FloatData().minValue, IECore.FloatData().maxValue ) )
idx = s.sort( d )
self.assertEqual( len(idx), 10000 )
for i in range( 1, 10000 ):
self.assert_( d[ idx[ i ] ] >= d[ idx[ i - 1 ] ] )
def testInt( self ) :
random.seed( 13 )
s = IECore.RadixSort()
d = IECore.IntVectorData()
for i in range( 0, 10000 ):
d.append( int( random.uniform( IECore.IntData().minValue, IECore.IntData().maxValue ) ) )
idx = s.sort( d )
self.assertEqual( len(idx), 10000 )
for i in range( 1, 10000 ):
self.assert_( d[ idx[ i ] ] >= d[ idx[ i - 1 ] ] )
def testUInt( self ) :
random.seed( 14 )
s = IECore.RadixSort()
d = IECore.UIntVectorData()
for i in range( 0, 10000 ):
d.append( int( random.uniform( IECore.UIntData().minValue, IECore.UIntData().maxValue ) ) )
idx = s.sort( d )
self.assertEqual( len(idx), 10000 )
for i in range( 1, 10000 ):
self.assert_( d[ idx[ i ] ] >= d[ idx[ i - 1 ] ] )
if __name__ == "__main__":
unittest.main()
|
appleseedhq/cortex
|
test/IECore/RadixSortTest.py
|
Python
|
bsd-3-clause
| 3,037
|
"""Gaussian processes classification."""
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve, solve
import scipy.optimize
from scipy.special import erf, expit
from ..base import BaseEstimator, ClassifierMixin, clone
from .kernels import RBF, CompoundKernel, ConstantKernel as C
from ..utils.validation import check_is_fitted
from ..utils import check_random_state
from ..utils.optimize import _check_optimize_result
from ..preprocessing import LabelEncoder
from ..multiclass import OneVsRestClassifier, OneVsOneClassifier
# Values required for approximating the logistic sigmoid by
# error functions. coefs are obtained via:
# x = np.array([0, 0.6, 2, 3.5, 4.5, np.inf])
# b = logistic(x)
# A = (erf(np.dot(x, self.lambdas)) + 1) / 2
# coefs = lstsq(A, b)[0]
LAMBDAS = np.array([0.41, 0.4, 0.37, 0.44, 0.39])[:, np.newaxis]
COEFS = np.array(
[-1854.8214151, 3516.89893646, 221.29346712, 128.12323805, -2010.49422654]
)[:, np.newaxis]
class _BinaryGaussianProcessClassifierLaplace(BaseEstimator):
"""Binary Gaussian process classification based on Laplace approximation.
The implementation is based on Algorithm 3.1, 3.2, and 5.1 of
``Gaussian Processes for Machine Learning'' (GPML) by Rasmussen and
Williams.
Internally, the Laplace approximation is used for approximating the
non-Gaussian posterior by a Gaussian.
Currently, the implementation is restricted to using the logistic link
function.
.. versionadded:: 0.18
Parameters
----------
kernel : kernel instance, default=None
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
optimizer : 'fmin_l_bfgs_b' or callable, default='fmin_l_bfgs_b'
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'L-BFGS-B' algorithm from scipy.optimize.minimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer : int, default=0
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer=0 implies that one
run is performed.
max_iter_predict : int, default=100
The maximum number of iterations in Newton's method for approximating
the posterior during predict. Smaller values will reduce computation
time at the cost of worse results.
warm_start : bool, default=False
If warm-starts are enabled, the solution of the last Newton iteration
on the Laplace approximation of the posterior mode is used as
initialization for the next call of _posterior_mode(). This can speed
up convergence when _posterior_mode is called several times on similar
problems as in hyperparameter optimization. See :term:`the Glossary
<warm_start>`.
copy_X_train : bool, default=True
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : int, RandomState instance or None, default=None
Determines random number generation used to initialize the centers.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
X_train_ : array-like of shape (n_samples, n_features) or list of object
Feature vectors or other representations of training data (also
required for prediction).
y_train_ : array-like of shape (n_samples,)
Target values in training data (also required for prediction)
classes_ : array-like of shape (n_classes,)
Unique class labels.
kernel_ : kernl instance
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_ : array-like of shape (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in X_train_
pi_ : array-like of shape (n_samples,)
The probabilities of the positive class for the training points
X_train_
W_sr_ : array-like of shape (n_samples,)
Square root of W, the Hessian of log-likelihood of the latent function
values for the observed labels. Since W is diagonal, only the diagonal
of sqrt(W) is stored.
log_marginal_likelihood_value_ : float
The log-marginal-likelihood of ``self.kernel_.theta``
"""
def __init__(
self,
kernel=None,
*,
optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0,
max_iter_predict=100,
warm_start=False,
copy_X_train=True,
random_state=None,
):
self.kernel = kernel
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.max_iter_predict = max_iter_predict
self.warm_start = warm_start
self.copy_X_train = copy_X_train
self.random_state = random_state
def fit(self, X, y):
"""Fit Gaussian process classification model.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Feature vectors or other representations of training data.
y : array-like of shape (n_samples,)
Target values, must be binary.
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") * RBF(
1.0, length_scale_bounds="fixed"
)
else:
self.kernel_ = clone(self.kernel)
self.rng = check_random_state(self.random_state)
self.X_train_ = np.copy(X) if self.copy_X_train else X
# Encode class labels and check that it is a binary classification
# problem
label_encoder = LabelEncoder()
self.y_train_ = label_encoder.fit_transform(y)
self.classes_ = label_encoder.classes_
if self.classes_.size > 2:
raise ValueError(
"%s supports only binary classification. y contains classes %s"
% (self.__class__.__name__, self.classes_)
)
elif self.classes_.size == 1:
raise ValueError(
"{0:s} requires 2 classes; got {1:d} class".format(
self.__class__.__name__, self.classes_.size
)
)
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True, clone_kernel=False
)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta, clone_kernel=False)
# First optimize starting from theta specified in kernel
optima = [
self._constrained_optimization(
obj_func, self.kernel_.theta, self.kernel_.bounds
)
]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite."
)
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = np.exp(self.rng.uniform(bounds[:, 0], bounds[:, 1]))
optima.append(
self._constrained_optimization(obj_func, theta_initial, bounds)
)
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.kernel_._check_bounds_params()
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = self.log_marginal_likelihood(
self.kernel_.theta
)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
_, (self.pi_, self.W_sr_, self.L_, _, _) = self._posterior_mode(
K, return_temporaries=True
)
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated for classification.
Returns
-------
C : ndarray of shape (n_samples,)
Predicted target values for X, values are from ``classes_``
"""
check_is_fitted(self)
# As discussed on Section 3.4.2 of GPML, for making hard binary
# decisions, it is enough to compute the MAP of the posterior and
# pass it through the link function
K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
f_star = K_star.T.dot(self.y_train_ - self.pi_) # Algorithm 3.2,Line 4
return np.where(f_star > 0, self.classes_[1], self.classes_[0])
def predict_proba(self, X):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated for classification.
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute ``classes_``.
"""
check_is_fitted(self)
# Based on Algorithm 3.2 of GPML
K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
f_star = K_star.T.dot(self.y_train_ - self.pi_) # Line 4
v = solve(self.L_, self.W_sr_[:, np.newaxis] * K_star) # Line 5
# Line 6 (compute np.diag(v.T.dot(v)) via einsum)
var_f_star = self.kernel_.diag(X) - np.einsum("ij,ij->j", v, v)
# Line 7:
# Approximate \int log(z) * N(z | f_star, var_f_star)
# Approximation is due to Williams & Barber, "Bayesian Classification
# with Gaussian Processes", Appendix A: Approximate the logistic
# sigmoid by a linear combination of 5 error functions.
# For information on how this integral can be computed see
# blitiri.blogspot.de/2012/11/gaussian-integral-of-error-function.html
alpha = 1 / (2 * var_f_star)
gamma = LAMBDAS * f_star
integrals = (
np.sqrt(np.pi / alpha)
* erf(gamma * np.sqrt(alpha / (alpha + LAMBDAS**2)))
/ (2 * np.sqrt(var_f_star * 2 * np.pi))
)
pi_star = (COEFS * integrals).sum(axis=0) + 0.5 * COEFS.sum()
return np.vstack((1 - pi_star, pi_star)).T
def log_marginal_likelihood(
self, theta=None, eval_gradient=False, clone_kernel=True
):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like of shape (n_kernel_params,), default=None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default=False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
clone_kernel : bool, default=True
If True, the kernel attribute is copied. If False, the kernel
attribute is modified, but may result in a performance improvement.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : ndarray of shape (n_kernel_params,), \
optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when `eval_gradient` is True.
"""
if theta is None:
if eval_gradient:
raise ValueError("Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
if clone_kernel:
kernel = self.kernel_.clone_with_theta(theta)
else:
kernel = self.kernel_
kernel.theta = theta
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
# Compute log-marginal-likelihood Z and also store some temporaries
# which can be reused for computing Z's gradient
Z, (pi, W_sr, L, b, a) = self._posterior_mode(K, return_temporaries=True)
if not eval_gradient:
return Z
# Compute gradient based on Algorithm 5.1 of GPML
d_Z = np.empty(theta.shape[0])
# XXX: Get rid of the np.diag() in the next line
R = W_sr[:, np.newaxis] * cho_solve((L, True), np.diag(W_sr)) # Line 7
C = solve(L, W_sr[:, np.newaxis] * K) # Line 8
# Line 9: (use einsum to compute np.diag(C.T.dot(C))))
s_2 = (
-0.5
* (np.diag(K) - np.einsum("ij, ij -> j", C, C))
* (pi * (1 - pi) * (1 - 2 * pi))
) # third derivative
for j in range(d_Z.shape[0]):
C = K_gradient[:, :, j] # Line 11
# Line 12: (R.T.ravel().dot(C.ravel()) = np.trace(R.dot(C)))
s_1 = 0.5 * a.T.dot(C).dot(a) - 0.5 * R.T.ravel().dot(C.ravel())
b = C.dot(self.y_train_ - pi) # Line 13
s_3 = b - K.dot(R.dot(b)) # Line 14
d_Z[j] = s_1 + s_2.T.dot(s_3) # Line 15
return Z, d_Z
def _posterior_mode(self, K, return_temporaries=False):
"""Mode-finding for binary Laplace GPC and fixed kernel.
This approximates the posterior of the latent function values for given
inputs and target observations with a Gaussian approximation and uses
Newton's iteration to find the mode of this approximation.
"""
# Based on Algorithm 3.1 of GPML
# If warm_start are enabled, we reuse the last solution for the
# posterior mode as initialization; otherwise, we initialize with 0
if (
self.warm_start
and hasattr(self, "f_cached")
and self.f_cached.shape == self.y_train_.shape
):
f = self.f_cached
else:
f = np.zeros_like(self.y_train_, dtype=np.float64)
# Use Newton's iteration method to find mode of Laplace approximation
log_marginal_likelihood = -np.inf
for _ in range(self.max_iter_predict):
# Line 4
pi = expit(f)
W = pi * (1 - pi)
# Line 5
W_sr = np.sqrt(W)
W_sr_K = W_sr[:, np.newaxis] * K
B = np.eye(W.shape[0]) + W_sr_K * W_sr
L = cholesky(B, lower=True)
# Line 6
b = W * f + (self.y_train_ - pi)
# Line 7
a = b - W_sr * cho_solve((L, True), W_sr_K.dot(b))
# Line 8
f = K.dot(a)
# Line 10: Compute log marginal likelihood in loop and use as
# convergence criterion
lml = (
-0.5 * a.T.dot(f)
- np.log1p(np.exp(-(self.y_train_ * 2 - 1) * f)).sum()
- np.log(np.diag(L)).sum()
)
# Check if we have converged (log marginal likelihood does
# not decrease)
# XXX: more complex convergence criterion
if lml - log_marginal_likelihood < 1e-10:
break
log_marginal_likelihood = lml
self.f_cached = f # Remember solution for later warm-starts
if return_temporaries:
return log_marginal_likelihood, (pi, W_sr, L, b, a)
else:
return log_marginal_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
opt_res = scipy.optimize.minimize(
obj_func, initial_theta, method="L-BFGS-B", jac=True, bounds=bounds
)
_check_optimize_result("lbfgs", opt_res)
theta_opt, func_min = opt_res.x, opt_res.fun
elif callable(self.optimizer):
theta_opt, func_min = self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
class GaussianProcessClassifier(ClassifierMixin, BaseEstimator):
"""Gaussian process classification (GPC) based on Laplace approximation.
The implementation is based on Algorithm 3.1, 3.2, and 5.1 of
Gaussian Processes for Machine Learning (GPML) by Rasmussen and
Williams.
Internally, the Laplace approximation is used for approximating the
non-Gaussian posterior by a Gaussian.
Currently, the implementation is restricted to using the logistic link
function. For multi-class classification, several binary one-versus rest
classifiers are fitted. Note that this class thus does not implement
a true multi-class Laplace approximation.
Read more in the :ref:`User Guide <gaussian_process>`.
.. versionadded:: 0.18
Parameters
----------
kernel : kernel instance, default=None
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting. Also kernel
cannot be a `CompoundKernel`.
optimizer : 'fmin_l_bfgs_b' or callable, default='fmin_l_bfgs_b'
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'L-BFGS-B' algorithm from scipy.optimize.minimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer : int, default=0
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer=0 implies that one
run is performed.
max_iter_predict : int, default=100
The maximum number of iterations in Newton's method for approximating
the posterior during predict. Smaller values will reduce computation
time at the cost of worse results.
warm_start : bool, default=False
If warm-starts are enabled, the solution of the last Newton iteration
on the Laplace approximation of the posterior mode is used as
initialization for the next call of _posterior_mode(). This can speed
up convergence when _posterior_mode is called several times on similar
problems as in hyperparameter optimization. See :term:`the Glossary
<warm_start>`.
copy_X_train : bool, default=True
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : int, RandomState instance or None, default=None
Determines random number generation used to initialize the centers.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
multi_class : {'one_vs_rest', 'one_vs_one'}, default='one_vs_rest'
Specifies how multi-class classification problems are handled.
Supported are 'one_vs_rest' and 'one_vs_one'. In 'one_vs_rest',
one binary Gaussian process classifier is fitted for each class, which
is trained to separate this class from the rest. In 'one_vs_one', one
binary Gaussian process classifier is fitted for each pair of classes,
which is trained to separate these two classes. The predictions of
these binary predictors are combined into multi-class predictions.
Note that 'one_vs_one' does not support predicting probability
estimates.
n_jobs : int, default=None
The number of jobs to use for the computation: the specified
multiclass problems are computed in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
base_estimator_ : ``Estimator`` instance
The estimator instance that defines the likelihood function
using the observed data.
kernel_ : kernel instance
The kernel used for prediction. In case of binary classification,
the structure of the kernel is the same as the one passed as parameter
but with optimized hyperparameters. In case of multi-class
classification, a CompoundKernel is returned which consists of the
different kernels used in the one-versus-rest classifiers.
log_marginal_likelihood_value_ : float
The log-marginal-likelihood of ``self.kernel_.theta``
classes_ : array-like of shape (n_classes,)
Unique class labels.
n_classes_ : int
The number of classes in the training data
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
GaussianProcessRegressor : Gaussian process regression (GPR).
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.gaussian_process import GaussianProcessClassifier
>>> from sklearn.gaussian_process.kernels import RBF
>>> X, y = load_iris(return_X_y=True)
>>> kernel = 1.0 * RBF(1.0)
>>> gpc = GaussianProcessClassifier(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpc.score(X, y)
0.9866...
>>> gpc.predict_proba(X[:2,:])
array([[0.83548752, 0.03228706, 0.13222543],
[0.79064206, 0.06525643, 0.14410151]])
"""
def __init__(
self,
kernel=None,
*,
optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0,
max_iter_predict=100,
warm_start=False,
copy_X_train=True,
random_state=None,
multi_class="one_vs_rest",
n_jobs=None,
):
self.kernel = kernel
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.max_iter_predict = max_iter_predict
self.warm_start = warm_start
self.copy_X_train = copy_X_train
self.random_state = random_state
self.multi_class = multi_class
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit Gaussian process classification model.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Feature vectors or other representations of training data.
y : array-like of shape (n_samples,)
Target values, must be binary.
Returns
-------
self : object
Returns an instance of self.
"""
if isinstance(self.kernel, CompoundKernel):
raise ValueError("kernel cannot be a CompoundKernel")
if self.kernel is None or self.kernel.requires_vector_input:
X, y = self._validate_data(
X, y, multi_output=False, ensure_2d=True, dtype="numeric"
)
else:
X, y = self._validate_data(
X, y, multi_output=False, ensure_2d=False, dtype=None
)
self.base_estimator_ = _BinaryGaussianProcessClassifierLaplace(
kernel=self.kernel,
optimizer=self.optimizer,
n_restarts_optimizer=self.n_restarts_optimizer,
max_iter_predict=self.max_iter_predict,
warm_start=self.warm_start,
copy_X_train=self.copy_X_train,
random_state=self.random_state,
)
self.classes_ = np.unique(y)
self.n_classes_ = self.classes_.size
if self.n_classes_ == 1:
raise ValueError(
"GaussianProcessClassifier requires 2 or more "
"distinct classes; got %d class (only class %s "
"is present)" % (self.n_classes_, self.classes_[0])
)
if self.n_classes_ > 2:
if self.multi_class == "one_vs_rest":
self.base_estimator_ = OneVsRestClassifier(
self.base_estimator_, n_jobs=self.n_jobs
)
elif self.multi_class == "one_vs_one":
self.base_estimator_ = OneVsOneClassifier(
self.base_estimator_, n_jobs=self.n_jobs
)
else:
raise ValueError("Unknown multi-class mode %s" % self.multi_class)
self.base_estimator_.fit(X, y)
if self.n_classes_ > 2:
self.log_marginal_likelihood_value_ = np.mean(
[
estimator.log_marginal_likelihood()
for estimator in self.base_estimator_.estimators_
]
)
else:
self.log_marginal_likelihood_value_ = (
self.base_estimator_.log_marginal_likelihood()
)
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated for classification.
Returns
-------
C : ndarray of shape (n_samples,)
Predicted target values for X, values are from ``classes_``.
"""
check_is_fitted(self)
if self.kernel is None or self.kernel.requires_vector_input:
X = self._validate_data(X, ensure_2d=True, dtype="numeric", reset=False)
else:
X = self._validate_data(X, ensure_2d=False, dtype=None, reset=False)
return self.base_estimator_.predict(X)
def predict_proba(self, X):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated for classification.
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
"""
check_is_fitted(self)
if self.n_classes_ > 2 and self.multi_class == "one_vs_one":
raise ValueError(
"one_vs_one multi-class mode does not support "
"predicting probability estimates. Use "
"one_vs_rest mode instead."
)
if self.kernel is None or self.kernel.requires_vector_input:
X = self._validate_data(X, ensure_2d=True, dtype="numeric", reset=False)
else:
X = self._validate_data(X, ensure_2d=False, dtype=None, reset=False)
return self.base_estimator_.predict_proba(X)
@property
def kernel_(self):
"""Return the kernel of the base estimator."""
if self.n_classes_ == 2:
return self.base_estimator_.kernel_
else:
return CompoundKernel(
[estimator.kernel_ for estimator in self.base_estimator_.estimators_]
)
def log_marginal_likelihood(
self, theta=None, eval_gradient=False, clone_kernel=True
):
"""Return log-marginal likelihood of theta for training data.
In the case of multi-class classification, the mean log-marginal
likelihood of the one-versus-rest classifiers are returned.
Parameters
----------
theta : array-like of shape (n_kernel_params,), default=None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. In the case of multi-class classification, theta may
be the hyperparameters of the compound kernel or of an individual
kernel. In the latter case, all individual kernel get assigned the
same theta values. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default=False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. Note that gradient computation is not supported
for non-binary classification. If True, theta must not be None.
clone_kernel : bool, default=True
If True, the kernel attribute is copied. If False, the kernel
attribute is modified, but may result in a performance improvement.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when `eval_gradient` is True.
"""
check_is_fitted(self)
if theta is None:
if eval_gradient:
raise ValueError("Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
theta = np.asarray(theta)
if self.n_classes_ == 2:
return self.base_estimator_.log_marginal_likelihood(
theta, eval_gradient, clone_kernel=clone_kernel
)
else:
if eval_gradient:
raise NotImplementedError(
"Gradient of log-marginal-likelihood not implemented for "
"multi-class GPC."
)
estimators = self.base_estimator_.estimators_
n_dims = estimators[0].kernel_.n_dims
if theta.shape[0] == n_dims: # use same theta for all sub-kernels
return np.mean(
[
estimator.log_marginal_likelihood(
theta, clone_kernel=clone_kernel
)
for i, estimator in enumerate(estimators)
]
)
elif theta.shape[0] == n_dims * self.classes_.shape[0]:
# theta for compound kernel
return np.mean(
[
estimator.log_marginal_likelihood(
theta[n_dims * i : n_dims * (i + 1)],
clone_kernel=clone_kernel,
)
for i, estimator in enumerate(estimators)
]
)
else:
raise ValueError(
"Shape of theta must be either %d or %d. "
"Obtained theta with shape %d."
% (n_dims, n_dims * self.classes_.shape[0], theta.shape[0])
)
|
manhhomienbienthuy/scikit-learn
|
sklearn/gaussian_process/_gpc.py
|
Python
|
bsd-3-clause
| 35,508
|
"""
The factory contains functions for creating SEA objects in FreeCAD. These functions should not be called directly.
"""
from Sea.adapter.object_maps import *
import Sea
import FreeCAD as App
import logging
#def makeComponent(system, sort, material, part):
#"""
#Add a component from :mod:`Sea.adapter.components` to an SEA model.
#:param system: a instance of :class:`Sea.adapter.system.System` to which the component will be added.
#:param sort: type of component as specified in :class:`Sea.adapter.components.components_map`
#:param material: an instance of a child of :class:`Sea.adapter.baseclasses.Material` that the component is made of.
#:param part: an instance of :class:`Freecad.Part` that the component is based on
#"""
#obj = system.ComponentsGroup.newObject("App::DocumentObjectGroupPython", 'Component')
#components_map[sort](obj, system, material, part)
#logging.info("Sea: Created %s.", obj.Name)
#obj.Document.recompute()
#return obj
#def makeComponentCavity(system, sort, material, position):
#"""
#Add a component from :mod:`Sea.adapter.components` to an SEA model.
#:param system: :class:`Sea.adapter.system.System` to which the component will be added
#:param position: a :class:`FreeCAD.Vector` describing the position in the cavity.
#:param sort: Type of component specified in :class:`Sea.adapter.components.components_map`
#"""
#obj = system.ComponentsGroup.newObject("App::DocumentObjectGroupPython", 'Component')
#components_map[sort](obj, system, material, position)
#logging.info("Sea: Created %s.", obj.Name)
#obj.Document.recompute()
#return obj
#def makeSubsystem(component, sort, model):
#"""
#Add a subsystem to a component.
#:param component: an instance of a child of :class:`Sea.adapter.baseclasses.Component`.
#:param sort: type of subsystem.
#:param model: model of the subsysten belonging to :attr:`component` and specified in :mod:`Sea.model.components`
#"""
#obj = component.newObject("App::FeaturePython", "Subsystem")
#subsystems_map[sort](obj, component, model)
#logging.info("Sea: Created %s.", obj.Name)
#obj.Document.recompute()
#return obj
#def makeConnection(system, sort, components):
#"""
#Add a connection to system.
#:param system: :class:`Sea.adapter.system.System` to which the connection will be added
#:param sort: sort
#:param components: list of components
#"""
#obj = system.ConnectionsGroup.newObject("App::DocumentObjectGroupPython", "Connection")
#connections_map[sort](obj, system, components)
#logging.info("Sea: Created %s.", obj.Name)
#obj.Document.recompute()
#return obj
#def makeCoupling(connection, component_from, subsystem_from, component_to, subsystem_to, sort):
#"""
#Add a coupling to system.
#:param connection: an instance of :class:`Sea.adapter.baseclasses.Connection`
#:param component_from: an instance of a child of :class:`Sea.adapter.baseclasses.Component`
#:param subsystem_from: string representing the type of subsystem
#:param component_to: an instance of a child of :class:`Sea.adapter.baseclasses.Component`
#:param subsystem_to: string representing the type of subsystem
#:param sort: sort of coupling as specified in :class:`Sea.adapter.couplings.couplings_map`
#"""
##if connection.System == component_from.System == component_to.System:
#obj = connection.newObject("App::FeaturePython", 'Coupling')
#couplings_map[sort](obj, connection, component_from, subsystem_from, component_to, subsystem_to)
#logging.info("Sea: Created %s.", obj.Name)
#obj.Document.recompute()
#return obj
#def makeExcitation(system, component, subsystem, sort):
#"""
#Add an excitation from :mod:`Sea.adapter.excitations` to the subsystem of component.
#:param component: an instance of a child of :class:`Sea.adapter.baseclasses.Component`
#:param subsystem: Subsystem that is excited
#:param sort: Type of excitation specified in :class:`Sea.adapter.excitations.excitations_map`
#"""
#obj = system.ExcitationsGroup.newObject("App::FeaturePython", 'Excitation')
##obj.Label = sort.capitalize()
#excitations_map[sort](obj, component, subsystem)
#logging.info("Sea: Created %s.", obj.Name)
#obj.Document.recompute()
#return obj
#def makeMaterial(system, sort):
#"""
#Add a material from :mod:`Sea.adapter.materials` to SEA system.
#:param system: :class:`Sea.adapter.system.System` to which the component will be added
#:param sort: Type of material specified in :class:`Sea.adapter.materials.materials_map`
#"""
#obj = system.MaterialsGroup.newObject("App::FeaturePython", 'Material')
##obj.Label = sort
#materials_map[sort](obj, system)
#logging.info("Sea: Created %s.", obj.Name)
#obj.Document.recompute()
#return obj
|
python-acoustics/Sea
|
Sea/actions/factory.py
|
Python
|
bsd-3-clause
| 5,057
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import unittest
from pykit.parsing import from_c
from pykit.analysis import cfa
from pykit.ir import findop, opcodes, verify
source = """
#include <pykit_ir.h>
double func_simple(double y) {
if (y > 5.0)
y = 5.0;
else
y = 2.0;
return y;
}
double func(double y) {
Int32 i = 0;
while (i < 10) {
if (i > 5) {
y = i;
}
i = i + 1;
}
return y;
}
"""
class TestCFA(unittest.TestCase):
def test_cfg(self):
mod = from_c(source)
f = mod.get_function('func_simple')
verify(f)
flow = cfa.cfg(f)
cond_block = findop(f, 'cbranch').block
self.assertEqual(len(flow[cond_block]), 2)
def test_ssa(self):
mod = from_c(source)
f = mod.get_function('func_simple')
verify(f)
self.assertEqual(opcodes(f.startblock),
['alloca', 'store', 'load', 'gt', 'cbranch'])
# SSA
CFG = cfa.cfg(f)
cfa.ssa(f, CFG)
assert len(f.blocks) == 4
blocks = list(f.blocks)
self.assertEqual(opcodes(blocks[0]), ['gt', 'cbranch'])
self.assertEqual(opcodes(blocks[1]), ['jump'])
self.assertEqual(opcodes(blocks[2]), ['jump'])
self.assertEqual(opcodes(blocks[3]), ['phi', 'ret'])
phi = findop(f, 'phi')
iblocks, ivals = phi.args
self.assertEqual(sorted(iblocks), sorted([blocks[1], blocks[2]]))
self.assertEqual(len(ivals), 2)
def test_ssa2(self):
mod = from_c(source)
f = mod.get_function('func')
cfa.run(f)
verify(f)
codes = opcodes(f)
self.assertEqual(codes.count('phi'), 3)
|
Inaimathi/pykit
|
pykit/analysis/tests/test_cfa.py
|
Python
|
bsd-3-clause
| 1,782
|
from pkg_resources import resource_filename
import pygridtools
from pygridgen.tests import requires
try:
import pytest
except ImportError:
pytest = None
@requires(pytest, 'pytest')
def test(*args):
options = [resource_filename('pygridtools', '')]
options.extend(list(args))
return pytest.main(options)
@requires(pytest, 'pytest')
def teststrict(*args):
options = list(set([
resource_filename('pygridtools', ''),
'--pep8',
'--mpl',
'--doctest-modules'
] + list(args)))
return pytest.main(options)
|
Geosyntec/pygridtools
|
pygridtools/tests/__init__.py
|
Python
|
bsd-3-clause
| 566
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function
import numpy as np
from numpy.testing import assert_allclose
from fermipy.hpx_utils import HPX
from fermipy.fits_utils import write_fits_image
from fermipy.skymap import HpxMap
def test_hpxmap(tmpdir):
n = np.ones((10, 192), 'd')
hpx = HPX(4, False, 'GAL')
filename = str(tmpdir / 'test_hpx.fits')
hpx.write_fits(n, filename, clobber=True)
ebins = np.logspace(2, 5, 8)
hpx_2 = HPX(1024, False, 'GAL', region='DISK(110.,75.,2.)', ebins=ebins)
npixels = hpx_2.npix
n2 = np.ndarray((8, npixels), 'd')
for i in range(8):
n2[i].flat = np.arange(npixels)
hpx_map = HpxMap(n2, hpx_2)
wcs, wcs_data = hpx_map.make_wcs_from_hpx(normalize=True)
wcs_out = hpx_2.make_wcs(3)
filename = str(tmpdir / 'test_hpx_2_wcs.fits')
write_fits_image(wcs_data, wcs_out.wcs, filename)
assert_allclose(wcs_data[0, 160, 160], 87.28571429)
assert_allclose(wcs_data[4, 160, 160], 87.28571429)
def test_hpx():
hpx0 = HPX(2**3, False, 'GAL', region='DISK(110.,75.,10.)')
assert_allclose(hpx0[hpx0._ipix], np.arange(len(hpx0._ipix)))
ebins = np.logspace(2, 5, 8)
hpx1 = HPX(2**3, False, 'GAL', region='DISK(110.,75.,10.)', ebins=ebins)
assert_allclose(hpx1[hpx1._ipix], np.arange(len(hpx1._ipix)))
|
fermiPy/fermipy
|
fermipy/tests/test_skymap.py
|
Python
|
bsd-3-clause
| 1,401
|
''' Module: fmfe
Fast Multilevel Fuzzy Enhancement
Implementation of algorithm from J. Wu et al. 2007.
Created by S.T. Castle on 2015-01-25
Enahnces an image prior to edge detection.
'''
import numpy as np
import scipy
#from astropy.io import fits
#import pyfits as fits # May need to use this if astropy library unavailable.
def fmfe(data, w, h, fuzzy_t):
'''Fast Multilevel Fuzzy Enhancement
data: 2d array
w: width
h: height
fuzzy_t: fuzzy threshold for enhancement
'''
# Get the threshold to separate into two sets of pixels.
q = get_threshold(data, w, h)
# Now get the mean values for the two sets, ab for the low (background)
# pixels and ao for the high (object) pixels.
sumb = 0.0 # Sum or pixel values.
numb = 0 # Number of pixels.
sumo = 0.0
numo = 0
# Also record min and max pixel values.
min = data[0][0]
max = 0.0
for i in xrange(h):
for j in xrange(w):
val = data[i][j]
if val < min: min = val
if val > max: max = val
if val < q:
sumb += val
numb += 1
else:
sumo += val
numo += 1
ab = sumb/numb
ao = sumo/numo
r = 2 # Number of times to enhance.
# Convert pixel values to values in fuzzy domain.
# Then enhance using the fuzzy threshold, and return to spatial domain.
for i in xrange(h):
for j in xrange(w):
p = convert_to_fuzzy(data[i][j], ab, ao, min, max)
# Enhance.
for k in range(r):
p = enhance(p, fuzzy_t)
# Transform back to the spatial domain.
p = convert_to_spatial(p, ab, ao, min, max)
data[i][j] = p
return data
# End fmfe.
def get_threshold(data, w, h):
'''Return the average pixel value, excluding zero-valued pixels.'''
sum = 0.0 # Sum of pixel values.
num = 0 # Number of nonzero pixel values.
for i in xrange(h):
for j in xrange(w):
val = data[i][j]
if val: # if nonzero
sum += val
num += 1
return (sum/num)
def convert_to_fuzzy(x, ab, ao, min, max):
'''Linear mapping transformation from pixel value to fuzzy value.'''
if x > ao:
return ( (max-x)/(max-ao) )
if x > ((ao+ab)/2):
return ( (2*x-ao-ab)/(ao-ab) )
if x > ab:
return ( (ao+ab-2*x)/(ao-ab) )
return ( (x-min)/(ab-min) )
def convert_to_spatial(x, ab, ao, min, max):
'''Linear mapping transformation from fuzzy value to pixel value.'''
if x > ao:
return ( max-(max-ao)*x )
if x > ((ao+ab)/2):
return ( ((ao-ab)*x+ao+ab)/2 )
if x > ab:
return ( (ao+ab-(ao-ab)*x)/2 )
return ( (ab-min)*x+min )
def enhance(x, t):
'''Enhance a fuzzy value x according to the threshold t.'''
if x > t:
return ( 1-((1-x)**2/(1-t)) )
return ( x**2/t )
|
castlest/shell-detection
|
fmfed/fmfe.py
|
Python
|
bsd-3-clause
| 3,003
|
###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
from vistrails.db.domain import IdScope
from vistrails.db.domain import DBMashup
from vistrails.core.mashup import conv_to_bool, conv_from_bool
from vistrails.core.mashup.alias import Alias
from vistrails.core.mashup.component import Component
from vistrails.core.system import get_elementtree_library
import unittest
import copy
ElementTree = get_elementtree_library()
class Mashup(DBMashup):
def __init__(self, id, name, vtid=None, version=None, alias_list=None,
t='vistrail', has_seq=None, layout='', geometry='',
id_scope=IdScope()):
if has_seq is None:
has_seq = 0
DBMashup.__init__(self, id, name, version, alias_list, t, vtid, layout,
geometry, has_seq)
self.id_scope = id_scope
if has_seq is None:
self.has_seq = False
if isinstance(self.alias_list, list):
for v in self.alias_list:
if v.component.seq == True:
self.has_seq = True
else:
self.has_seq = has_seq
id = DBMashup.db_id
name = DBMashup.db_name
version = DBMashup.db_version
alias_list = DBMashup.db_aliases
aliases = DBMashup.db_aliases
vtid = DBMashup.db_vtid
type = DBMashup.db_type
layout = DBMashup.db_layout
geometry = DBMashup.db_geometry
def _get_has_seq(self):
return conv_to_bool(self.db_has_seq)
def _set_has_seq(self, s):
self.db_has_seq = conv_from_bool(s)
has_seq = property(_get_has_seq,_set_has_seq)
@staticmethod
def convert(_mashup):
_mashup.__class__ = Mashup
for alias in _mashup.alias_list:
Alias.convert(alias)
def __copy__(self):
return Mashup.do_copy(self)
def do_copy(self, new_ids=False, id_scope=None, id_remap=None):
"""do_copy() -> Mashup
returns a clone of itself"""
cp = DBMashup.do_copy(self, new_ids, id_scope, id_remap)
Mashup.convert(cp)
return cp
##########################################################################
# Serialization / Unserialization
# def toXml(self, node=None):
# """toXml(node: ElementTree.Element) -> ElementTree.Element
# writes itself to xml
# """
#
# if node is None:
# node = ElementTree.Element('mashup')
#
# #set attributes
# node.set('id', self.convert_to_str(self.id,'long'))
# node.set('version', self.convert_to_str(self.version,'long'))
# node.set('vtid', self.convert_to_str(self.vtid,'str'))
# node.set('name', self.convert_to_str(self.name,'str'))
# node.set('type', self.convert_to_str(self.type,'str'))
# node.set('has_seq', self.convert_to_str(self.has_seq,'bool'))
# for v in self.alias_list:
# child_ = ElementTree.SubElement(node, 'alias')
# v.toXml(child_)
#
# layoutnode = ElementTree.SubElement(node,'layout')
# layoutnode.text = str(self.layout)
#
# geomnode = ElementTree.SubElement(node,'geometry')
# geomnode.text = str(self.geometry)
#
# return node
#
# @staticmethod
# def fromXml(node):
# if node.tag != 'mashup':
# #print "node.tag != 'mashup'"
# return None
# #read attributes
# data = node.get('id', None)
# id = Mashup.convert_from_str(data, 'long')
# data = node.get('name', None)
# name = Mashup.convert_from_str(data, 'str')
# data = node.get('version', None)
# version = Mashup.convert_from_str(data, 'long')
# data = node.get('vtid', None)
# vtid = Mashup.convert_from_str(data, 'str')
# data = node.get('type', None)
# type = Mashup.convert_from_str(data, 'str')
# data = node.get('has_seq', None)
# seq = Component.convert_from_str(data, 'bool')
# alias_list = []
# layout = None
# geometry = None
# for child in node.getchildren():
# if child.tag == "alias":
# alias = Alias.fromXml(child)
# alias_list.append(alias)
# if child.tag == "layout":
# layout = str(child.text).strip(" \n\t")
# if child.tag == "geometry":
# geometry = str(child.text).strip(" \n\t")
# return Mashup(id=id, name=name, vtid=vtid, version=version,
# alias_list=alias_list, t=type, has_seq=seq,
# layout=layout, geometry=geometry)
def loadAliasesFromPipeline(self, pipeline, id_scope):
"""loadAliasesFromPipelines(pipeline: Pipeline) -> None
This will replace current aliases with the ones present in Pipeline
"""
if pipeline:
self.alias_list = []
if len(pipeline.aliases) > 0:
pos = 0
for aname,info in pipeline.aliases.iteritems():
parameter = pipeline.db_get_object(info[0],info[1])
cid = id_scope.getNewId('component')
aid = id_scope.getNewId('alias')
component = Component(cid, parameter.vtType,
parameter.real_id, info[2], info[3],
info[4], parameter.type,
parameter.strValue, parameter.pos,
pos, "")
alias = Alias(aid, aname, component)
self.alias_list.append(alias)
pos += 1
def remapPipelineObjects(self, id_remap):
for alias in self.alias_list:
try:
new_pid = id_remap[(alias.component.vtparent_type,
alias.component.vtparent_id)]
alias.component.vtparent_id = new_pid
new_id = id_remap[(alias.component.vttype,alias.component.vtid)]
alias.component.vtid = new_id
except Exception:
pass
def validateForPipeline(self, pipeline):
"""validateForPipeline(pipeline) -> None
This will make sure that the parameters in the alias list are present
in the pipeline. If they were removed, the aliases pointing to it will
be removed. This changes the mashup in place """
to_remove = []
for alias in self.alias_list:
try:
param = pipeline.db_get_object(alias.component.vttype,
alias.component.vtid)
except Exception:
to_remove.append(alias)
for a in to_remove:
self.alias_list.remove(a)
pos = 0
mashup_aliases = []
for a in self.alias_list:
mashup_aliases.append(a.name)
a.component.pos = pos
pos+=1
for a, info in pipeline.aliases.iteritems():
if a not in mashup_aliases:
parameter = pipeline.db_get_object(info[0],info[1])
cid = self.id_scope.getNewId('component')
aid = self.id_scope.getNewId('alias')
component = Component(cid, parameter.vtType,
parameter.real_id, info[2], info[3],
info[4], parameter.type,
parameter.strValue, parameter.pos,
pos, "")
newalias = Alias(aid, a, component)
self.alias_list.append(newalias)
pos +=1
def getAliasByName(self, name):
for alias in self.alias_list:
if alias.name == name:
return alias
##########################################################################
# Operators
def __str__(self):
""" __str__() -> str - Returns a string representation of itself """
return ("(Mashup id='%s' name='%s' version='%s' vtid='%s' type='%s' \
layout='%s' geometry='%s' alias_list='%s')@%X" %
(self.id,
self.name,
self.version,
self.vtid,
self.type,
self.layout,
self.geometry,
self.alias_list,
id(self)))
def __eq__(self, other):
""" __eq__(other: Mashup) -> boolean
Returns True if self and other have the same attributes. Used by ==
operator.
"""
if type(self) != type(other):
return False
if self.name != other.name:
return False
if self.vtid != other.vtid:
return False
if self.version != other.version:
return False
if self.type != other.type:
return False
if self.layout != other.layout:
return False
if self.geometry != other.geometry:
return False
if len(self.alias_list) != len(other.alias_list):
return False
for p,q in zip(self.alias_list, other.alias_list):
if p != q:
return False
return True
def __ne__(self, other):
""" __ne__(other: Component) -> boolean
Returns True if self and other don't have the same attributes.
Used by != operator.
"""
return not self.__eq__(other)
################################################################################
class TestMashup(unittest.TestCase):
def create_mashup(self, id_scope=IdScope()):
c1 = Component(id=id_scope.getNewId('mashup_component'),
vttype='parameter', param_id=15L,
parent_vttype='function', parent_id=3L, mid=4L,
type='String', value='test', p_pos=0, pos=1,
strvaluelist='test1,test2', widget="text")
a1 = Alias(id=id_scope.getNewId('mashup_alias'), name='alias1', component=c1)
m = Mashup(id=id_scope.getNewId('mashup'), name='mashup1', vtid='empty.vt',
version=15L, alias_list=[a1])
return m
def test_copy(self):
id_scope = IdScope()
m1 = self.create_mashup(id_scope)
m2 = copy.copy(m1)
self.assertEqual(m1, m2)
self.assertEqual(m1.id, m2.id)
m3 = m2.do_copy(True, id_scope, {})
self.assertEqual(m1, m3)
self.assertNotEqual(m1.id, m3.id)
# def test_serialization(self):
# m1 = self.create_mashup()
# node = m1.toXml()
# m2 = Mashup.fromXml(node)
# self.assertEqual(m1, m2)
# self.assertEqual(m1.id, m2.id)
def test_str(self):
m1 = self.create_mashup()
str(m1)
|
VisTrails/VisTrails
|
vistrails/core/mashup/mashup.py
|
Python
|
bsd-3-clause
| 13,018
|
# -*- coding: utf-8 -*-
import logging
import os
from unittest.mock import Mock, patch
from django.test import TransactionTestCase
from eventkit_cloud.utils.gpkg.sqlite_utils import get_database_connection, Table
logger = logging.getLogger(__name__)
class TestSqliteUtils(TransactionTestCase):
def setUp(self):
self.path = os.path.dirname(os.path.realpath(__file__))
@patch("eventkit_cloud.utils.gpkg.sqlite_utils.connect")
def test_get_database_connection(self, connect):
from sqlite3 import Row
cursor_mock = Mock()
cursor_mock.fetchone.return_value = "test"
connect().__enter__().cursor.return_value = cursor_mock
# Test that a connection object is returned
with get_database_connection(self.path) as conn:
self.assertEqual(conn.cursor().fetchone(), "test")
# Test that the row_factory property is correctly set to sqlite3.Row
self.assertEqual(get_database_connection(self.path).row_factory, Row)
class TestTableQuery(TransactionTestCase):
def setUp(self):
self.path = os.path.dirname(os.path.realpath(__file__))
def test_get_table_query_validate(self):
cursor_mock = Mock()
def fill_cursor(*args):
if args[1][0] in ["gpkg_contents"]:
cursor_mock.fetchone.return_value = ("gpkg_contents",)
else:
cursor_mock.fetchone.return_value = tuple()
cursor_mock.execute.side_effect = fill_cursor
passed = True
try:
Table(cursor_mock, "gpkg_contents").validate()
except ValueError:
passed = False
self.assertTrue(passed)
self.assertRaises(ValueError, Table(cursor_mock, "gpkg_metadata").validate)
try:
Table(cursor_mock, "sqlite_master").validate()
except ValueError:
passed = False
self.assertTrue(passed)
class TestTable(TransactionTestCase):
def setUp(self):
self.path = os.path.dirname(os.path.realpath(__file__))
def test_get_table_exists(self):
cursor_mock = Mock()
def fill_cursor(*args):
if args[1][0] in ["gpkg_contents", "other_table"]:
cursor_mock.fetchone.return_value = ("gpkg_contents",)
else:
cursor_mock.fetchone.return_value = tuple()
cursor_mock.execute.side_effect = fill_cursor
self.assertTrue(Table.exists(cursor_mock, "gpkg_contents"))
self.assertTrue(cursor_mock.execute.called_once)
self.assertTrue(not Table.exists(cursor_mock, "gpkg_metadata"))
self.assertTrue(Table.exists(cursor_mock, "other_table"))
|
venicegeo/eventkit-cloud
|
eventkit_cloud/utils/gpkg/tests/test_sqlite_utils.py
|
Python
|
bsd-3-clause
| 2,673
|
"""
Logistic Regression
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <f@bianp.net>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Lars Buitinck
# Simon Wu <s8wu@uwaterloo.ca>
# Arthur Mensch <arthur.mensch@m4x.org
import numbers
import warnings
import numpy as np
from scipy import optimize
from joblib import Parallel, effective_n_jobs
from ._base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from ._linear_loss import LinearModelLoss
from ._sag import sag_solver
from .._loss.loss import HalfBinomialLoss, HalfMultinomialLoss
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm._base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import softmax
from ..utils.extmath import row_norms
from ..utils.optimize import _newton_cg, _check_optimize_result
from ..utils.validation import check_is_fitted, _check_sample_weight
from ..utils.multiclass import check_classification_targets
from ..utils.fixes import delayed
from ..model_selection import check_cv
from ..metrics import get_scorer
_LOGISTIC_SOLVER_CONVERGENCE_MSG = (
"Please also refer to the documentation for alternative solver options:\n"
" https://scikit-learn.org/stable/modules/linear_model.html"
"#logistic-regression"
)
def _check_solver(solver, penalty, dual):
all_solvers = ["liblinear", "newton-cg", "lbfgs", "sag", "saga"]
if solver not in all_solvers:
raise ValueError(
"Logistic Regression supports only solvers in %s, got %s."
% (all_solvers, solver)
)
all_penalties = ["l1", "l2", "elasticnet", "none"]
if penalty not in all_penalties:
raise ValueError(
"Logistic Regression supports only penalties in %s, got %s."
% (all_penalties, penalty)
)
if solver not in ["liblinear", "saga"] and penalty not in ("l2", "none"):
raise ValueError(
"Solver %s supports only 'l2' or 'none' penalties, got %s penalty."
% (solver, penalty)
)
if solver != "liblinear" and dual:
raise ValueError(
"Solver %s supports only dual=False, got dual=%s" % (solver, dual)
)
if penalty == "elasticnet" and solver != "saga":
raise ValueError(
"Only 'saga' solver supports elasticnet penalty, got solver={}.".format(
solver
)
)
if solver == "liblinear" and penalty == "none":
raise ValueError("penalty='none' is not supported for the liblinear solver")
return solver
def _check_multi_class(multi_class, solver, n_classes):
if multi_class == "auto":
if solver == "liblinear":
multi_class = "ovr"
elif n_classes > 2:
multi_class = "multinomial"
else:
multi_class = "ovr"
if multi_class not in ("multinomial", "ovr"):
raise ValueError(
"multi_class should be 'multinomial', 'ovr' or 'auto'. Got %s."
% multi_class
)
if multi_class == "multinomial" and solver == "liblinear":
raise ValueError("Solver %s does not support a multinomial backend." % solver)
return multi_class
def _logistic_regression_path(
X,
y,
pos_class=None,
Cs=10,
fit_intercept=True,
max_iter=100,
tol=1e-4,
verbose=0,
solver="lbfgs",
coef=None,
class_weight=None,
dual=False,
penalty="l2",
intercept_scaling=1.0,
multi_class="auto",
random_state=None,
check_input=True,
max_squared_sum=None,
sample_weight=None,
l1_ratio=None,
n_threads=1,
):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Note that there will be no speedup with liblinear solver, since it does
not handle warm-starting.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Input data, target values.
pos_class : int, default=None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : int or array-like of shape (n_cs,), default=10
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
fit_intercept : bool, default=True
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int, default=100
Maximum number of iterations for the solver.
tol : float, default=1e-4
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int, default=0
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}, \
default='lbfgs'
Numerical solver to use.
coef : array-like of shape (n_features,), default=None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool, default=False
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : {'l1', 'l2', 'elasticnet'}, default='l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
intercept_scaling : float, default=1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : {'ovr', 'multinomial', 'auto'}, default='auto'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.22
Default changed from 'ovr' to 'auto' in 0.22.
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the
data. See :term:`Glossary <random_state>` for details.
check_input : bool, default=True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default=None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like of shape(n_samples,), default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
l1_ratio : float, default=None
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
n_threads : int, default=1
Number of OpenMP threads to use.
Returns
-------
coefs : ndarray of shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept. For
``multiclass='multinomial'``, the shape is (n_classes, n_cs,
n_features) or (n_classes, n_cs, n_features + 1).
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array of shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slightly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
.. versionchanged:: 0.19
The "copy" parameter was removed.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
solver = _check_solver(solver, penalty, dual)
# Preprocessing.
if check_input:
X = check_array(
X,
accept_sparse="csr",
dtype=np.float64,
accept_large_sparse=solver not in ["liblinear", "sag", "saga"],
)
y = check_array(y, ensure_2d=False, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
multi_class = _check_multi_class(multi_class, solver, len(classes))
if pos_class is None and multi_class != "multinomial":
if classes.size > 2:
raise ValueError("To fit OvR, use the pos_class argument")
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype, copy=True)
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "balanced", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict) or multi_class == "multinomial":
class_weight_ = compute_class_weight(class_weight, classes=classes, y=y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == "ovr":
w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype)
mask = y == pos_class
y_bin = np.ones(y.shape, dtype=X.dtype)
if solver in ["lbfgs", "newton-cg"]:
# HalfBinomialLoss, used for those solvers, represents y in [0, 1] instead
# of in [-1, 1].
mask_classes = np.array([0, 1])
y_bin[~mask] = 0.0
else:
mask_classes = np.array([-1, 1])
y_bin[~mask] = -1.0
# for compute_class_weight
if class_weight == "balanced":
class_weight_ = compute_class_weight(
class_weight, classes=mask_classes, y=y_bin
)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
else:
if solver in ["sag", "saga", "lbfgs", "newton-cg"]:
# SAG, lbfgs and newton-cg multinomial solvers need LabelEncoder,
# not LabelBinarizer, i.e. y as a 1d-array of integers.
# LabelEncoder also saves memory compared to LabelBinarizer, especially
# when n_classes is large.
le = LabelEncoder()
Y_multi = le.fit_transform(y).astype(X.dtype, copy=False)
else:
# For liblinear solver, apply LabelBinarizer, i.e. y is one-hot encoded.
lbin = LabelBinarizer()
Y_multi = lbin.fit_transform(y)
if Y_multi.shape[1] == 1:
Y_multi = np.hstack([1 - Y_multi, Y_multi])
w0 = np.zeros(
(classes.size, n_features + int(fit_intercept)), order="F", dtype=X.dtype
)
if coef is not None:
# it must work both giving the bias term and not
if multi_class == "ovr":
if coef.size not in (n_features, w0.size):
raise ValueError(
"Initialization coef is of shape %d, expected shape %d or %d"
% (coef.size, n_features, w0.size)
)
w0[: coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_classes = classes.size
if n_classes == 2:
n_classes = 1
if coef.shape[0] != n_classes or coef.shape[1] not in (
n_features,
n_features + 1,
):
raise ValueError(
"Initialization coef is of shape (%d, %d), expected "
"shape (%d, %d) or (%d, %d)"
% (
coef.shape[0],
coef.shape[1],
classes.size,
n_features,
classes.size,
n_features + 1,
)
)
if n_classes == 1:
w0[0, : coef.shape[1]] = -coef
w0[1, : coef.shape[1]] = coef
else:
w0[:, : coef.shape[1]] = coef
if multi_class == "multinomial":
if solver in ["lbfgs", "newton-cg"]:
# scipy.optimize.minimize and newton-cg accept only ravelled parameters,
# i.e. 1d-arrays. LinearModelLoss expects classes to be contiguous and
# reconstructs the 2d-array via w0.reshape((n_classes, -1), order="F").
# As w0 is F-contiguous, ravel(order="F") also avoids a copy.
w0 = w0.ravel(order="F")
loss = LinearModelLoss(
base_loss=HalfMultinomialLoss(n_classes=classes.size),
fit_intercept=fit_intercept,
)
target = Y_multi
if solver in "lbfgs":
func = loss.loss_gradient
elif solver == "newton-cg":
func = loss.loss
grad = loss.gradient
hess = loss.gradient_hessian_product # hess = [gradient, hessp]
warm_start_sag = {"coef": w0.T}
else:
target = y_bin
if solver == "lbfgs":
loss = LinearModelLoss(
base_loss=HalfBinomialLoss(), fit_intercept=fit_intercept
)
func = loss.loss_gradient
elif solver == "newton-cg":
loss = LinearModelLoss(
base_loss=HalfBinomialLoss(), fit_intercept=fit_intercept
)
func = loss.loss
grad = loss.gradient
hess = loss.gradient_hessian_product # hess = [gradient, hessp]
warm_start_sag = {"coef": np.expand_dims(w0, axis=1)}
coefs = list()
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == "lbfgs":
l2_reg_strength = 1.0 / C
iprint = [-1, 50, 1, 100, 101][
np.searchsorted(np.array([0, 1, 2, 3]), verbose)
]
opt_res = optimize.minimize(
func,
w0,
method="L-BFGS-B",
jac=True,
args=(X, target, sample_weight, l2_reg_strength, n_threads),
options={"iprint": iprint, "gtol": tol, "maxiter": max_iter},
)
n_iter_i = _check_optimize_result(
solver,
opt_res,
max_iter,
extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG,
)
w0, loss = opt_res.x, opt_res.fun
elif solver == "newton-cg":
l2_reg_strength = 1.0 / C
args = (X, target, sample_weight, l2_reg_strength, n_threads)
w0, n_iter_i = _newton_cg(
hess, func, grad, w0, args=args, maxiter=max_iter, tol=tol
)
elif solver == "liblinear":
coef_, intercept_, n_iter_i, = _fit_liblinear(
X,
target,
C,
fit_intercept,
intercept_scaling,
None,
penalty,
dual,
verbose,
max_iter,
tol,
random_state,
sample_weight=sample_weight,
)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver in ["sag", "saga"]:
if multi_class == "multinomial":
target = target.astype(X.dtype, copy=False)
loss = "multinomial"
else:
loss = "log"
# alpha is for L2-norm, beta is for L1-norm
if penalty == "l1":
alpha = 0.0
beta = 1.0 / C
elif penalty == "l2":
alpha = 1.0 / C
beta = 0.0
else: # Elastic-Net penalty
alpha = (1.0 / C) * (1 - l1_ratio)
beta = (1.0 / C) * l1_ratio
w0, n_iter_i, warm_start_sag = sag_solver(
X,
target,
sample_weight,
loss,
alpha,
beta,
max_iter,
tol,
verbose,
random_state,
False,
max_squared_sum,
warm_start_sag,
is_saga=(solver == "saga"),
)
else:
raise ValueError(
"solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver
)
if multi_class == "multinomial":
n_classes = max(2, classes.size)
if solver in ["lbfgs", "newton-cg"]:
multi_w0 = np.reshape(w0, (n_classes, -1), order="F")
else:
multi_w0 = w0
if n_classes == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0.copy())
else:
coefs.append(w0.copy())
n_iter[i] = n_iter_i
return np.array(coefs), np.array(Cs), n_iter
# helper function for LogisticCV
def _log_reg_scoring_path(
X,
y,
train,
test,
pos_class=None,
Cs=10,
scoring=None,
fit_intercept=False,
max_iter=100,
tol=1e-4,
class_weight=None,
verbose=0,
solver="lbfgs",
penalty="l2",
dual=False,
intercept_scaling=1.0,
multi_class="auto",
random_state=None,
max_squared_sum=None,
sample_weight=None,
l1_ratio=None,
):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, default=None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : int or list of floats, default=10
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``. For a list of scoring functions
that can be used, look at :mod:`sklearn.metrics`. The
default scoring option used is accuracy_score.
fit_intercept : bool, default=False
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int, default=100
Maximum number of iterations for the solver.
tol : float, default=1e-4
Tolerance for stopping criteria.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
verbose : int, default=0
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}, \
default='lbfgs'
Decides which solver to use.
penalty : {'l1', 'l2', 'elasticnet'}, default='l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
dual : bool, default=False
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default=1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : {'auto', 'ovr', 'multinomial'}, default='auto'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the
data. See :term:`Glossary <random_state>` for details.
max_squared_sum : float, default=None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like of shape(n_samples,), default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
l1_ratio : float, default=None
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
Returns
-------
coefs : ndarray of shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray of shape (n_cs,)
Scores obtained for each Cs.
n_iter : ndarray of shape(n_cs,)
Actual number of iteration for each Cs.
"""
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
sample_weight = sample_weight[train]
coefs, Cs, n_iter = _logistic_regression_path(
X_train,
y_train,
Cs=Cs,
l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
solver=solver,
max_iter=max_iter,
class_weight=class_weight,
pos_class=pos_class,
multi_class=multi_class,
tol=tol,
verbose=verbose,
dual=dual,
penalty=penalty,
intercept_scaling=intercept_scaling,
random_state=random_state,
check_input=False,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight,
)
log_reg = LogisticRegression(solver=solver, multi_class=multi_class)
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == "ovr":
log_reg.classes_ = np.array([-1, 1])
elif multi_class == "multinomial":
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError(
"multi_class should be either multinomial or ovr, got %d" % multi_class
)
if pos_class is not None:
mask = y_test == pos_class
y_test = np.ones(y_test.shape, dtype=np.float64)
y_test[~mask] = -1.0
scores = list()
scoring = get_scorer(scoring)
for w in coefs:
if multi_class == "ovr":
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.0
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores), n_iter
class LogisticRegression(LinearClassifierMixin, SparseCoefMixin, BaseEstimator):
"""
Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr', and uses the
cross-entropy loss if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs',
'sag', 'saga' and 'newton-cg' solvers.)
This class implements regularized logistic regression using the
'liblinear' library, 'newton-cg', 'sag', 'saga' and 'lbfgs' solvers. **Note
that regularization is applied by default**. It can handle both dense
and sparse input. Use C-ordered arrays or CSR matrices containing 64-bit
floats for optimal performance; any other input format will be converted
(and copied).
The 'newton-cg', 'sag', and 'lbfgs' solvers support only L2 regularization
with primal formulation, or no regularization. The 'liblinear' solver
supports both L1 and L2 regularization, with a dual formulation only for
the L2 penalty. The Elastic-Net regularization is only supported by the
'saga' solver.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : {'l1', 'l2', 'elasticnet', 'none'}, default='l2'
Specify the norm of the penalty:
- `'none'`: no penalty is added;
- `'l2'`: add a L2 penalty term and it is the default choice;
- `'l1'`: add a L1 penalty term;
- `'elasticnet'`: both L1 and L2 penalty terms are added.
.. warning::
Some penalties may not work with some solvers. See the parameter
`solver` below, to know the compatibility between the penalty and
solver.
.. versionadded:: 0.19
l1 penalty with SAGA solver (allowing 'multinomial' + L1)
dual : bool, default=False
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
tol : float, default=1e-4
Tolerance for stopping criteria.
C : float, default=1.0
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default=True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
intercept_scaling : float, default=1
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
*class_weight='balanced'*
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the
data. See :term:`Glossary <random_state>` for details.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'}, \
default='lbfgs'
Algorithm to use in the optimization problem. Default is 'lbfgs'.
To choose a solver, you might want to consider the following aspects:
- For small datasets, 'liblinear' is a good choice, whereas 'sag'
and 'saga' are faster for large ones;
- For multiclass problems, only 'newton-cg', 'sag', 'saga' and
'lbfgs' handle multinomial loss;
- 'liblinear' is limited to one-versus-rest schemes.
.. warning::
The choice of the algorithm depends on the penalty chosen:
Supported penalties by solver:
- 'newton-cg' - ['l2', 'none']
- 'lbfgs' - ['l2', 'none']
- 'liblinear' - ['l1', 'l2']
- 'sag' - ['l2', 'none']
- 'saga' - ['elasticnet', 'l1', 'l2', 'none']
.. note::
'sag' and 'saga' fast convergence is only guaranteed on
features with approximately the same scale. You can
preprocess the data with a scaler from :mod:`sklearn.preprocessing`.
.. seealso::
Refer to the User Guide for more information regarding
:class:`LogisticRegression` and more specifically the
`Table <https://scikit-learn.org/dev/modules/linear_model.html#logistic-regression>`_
summarazing solver/penalty supports.
<!--
# noqa: E501
-->
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
.. versionchanged:: 0.22
The default solver changed from 'liblinear' to 'lbfgs' in 0.22.
max_iter : int, default=100
Maximum number of iterations taken for the solvers to converge.
multi_class : {'auto', 'ovr', 'multinomial'}, default='auto'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.22
Default changed from 'ovr' to 'auto' in 0.22.
verbose : int, default=0
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Useless for liblinear solver. See :term:`the Glossary <warm_start>`.
.. versionadded:: 0.17
*warm_start* to support *lbfgs*, *newton-cg*, *sag*, *saga* solvers.
n_jobs : int, default=None
Number of CPU cores used when parallelizing over classes if
multi_class='ovr'". This parameter is ignored when the ``solver`` is
set to 'liblinear' regardless of whether 'multi_class' is specified or
not. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`
context. ``-1`` means using all processors.
See :term:`Glossary <n_jobs>` for more details.
l1_ratio : float, default=None
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
Attributes
----------
classes_ : ndarray of shape (n_classes, )
A list of class labels known to the classifier.
coef_ : ndarray of shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem is binary.
In particular, when `multi_class='multinomial'`, `coef_` corresponds
to outcome 1 (True) and `-coef_` corresponds to outcome 0 (False).
intercept_ : ndarray of shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
`intercept_` is of shape (1,) when the given problem is binary.
In particular, when `multi_class='multinomial'`, `intercept_`
corresponds to outcome 1 (True) and `-intercept_` corresponds to
outcome 0 (False).
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : ndarray of shape (n_classes,) or (1, )
Actual number of iterations for all classes. If binary or multinomial,
it returns only 1 element. For liblinear solver, only the maximum
number of iteration across all classes is given.
.. versionchanged:: 0.20
In SciPy <= 1.0.0 the number of lbfgs iterations may exceed
``max_iter``. ``n_iter_`` will now report at most ``max_iter``.
See Also
--------
SGDClassifier : Incrementally trained logistic regression (when given
the parameter ``loss="log"``).
LogisticRegressionCV : Logistic regression with built-in cross validation.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
L-BFGS-B -- Software for Large-scale Bound-constrained Optimization
Ciyou Zhu, Richard Byrd, Jorge Nocedal and Jose Luis Morales.
http://users.iems.northwestern.edu/~nocedal/lbfgsb.html
LIBLINEAR -- A Library for Large Linear Classification
https://www.csie.ntu.edu.tw/~cjlin/liblinear/
SAG -- Mark Schmidt, Nicolas Le Roux, and Francis Bach
Minimizing Finite Sums with the Stochastic Average Gradient
https://hal.inria.fr/hal-00860051/document
SAGA -- Defazio, A., Bach F. & Lacoste-Julien S. (2014).
:arxiv:`"SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives" <1407.0202>`
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
https://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.linear_model import LogisticRegression
>>> X, y = load_iris(return_X_y=True)
>>> clf = LogisticRegression(random_state=0).fit(X, y)
>>> clf.predict(X[:2, :])
array([0, 0])
>>> clf.predict_proba(X[:2, :])
array([[9.8...e-01, 1.8...e-02, 1.4...e-08],
[9.7...e-01, 2.8...e-02, ...e-08]])
>>> clf.score(X, y)
0.97...
"""
def __init__(
self,
penalty="l2",
*,
dual=False,
tol=1e-4,
C=1.0,
fit_intercept=True,
intercept_scaling=1,
class_weight=None,
random_state=None,
solver="lbfgs",
max_iter=100,
multi_class="auto",
verbose=0,
warm_start=False,
n_jobs=None,
l1_ratio=None,
):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
self.l1_ratio = l1_ratio
def fit(self, X, y, sample_weight=None):
"""
Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target vector relative to X.
sample_weight : array-like of shape (n_samples,) default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
.. versionadded:: 0.17
*sample_weight* support to LogisticRegression.
Returns
-------
self
Fitted estimator.
Notes
-----
The SAGA solver supports both float64 and float32 bit arrays.
"""
solver = _check_solver(self.solver, self.penalty, self.dual)
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)" % self.C)
if self.penalty == "elasticnet":
if (
not isinstance(self.l1_ratio, numbers.Number)
or self.l1_ratio < 0
or self.l1_ratio > 1
):
raise ValueError(
"l1_ratio must be between 0 and 1; got (l1_ratio=%r)"
% self.l1_ratio
)
elif self.l1_ratio is not None:
warnings.warn(
"l1_ratio parameter is only used when penalty is "
"'elasticnet'. Got "
"(penalty={})".format(self.penalty)
)
if self.penalty == "none":
if self.C != 1.0: # default values
warnings.warn(
"Setting penalty='none' will ignore the C and l1_ratio parameters"
)
# Note that check for l1_ratio is done right above
C_ = np.inf
penalty = "l2"
else:
C_ = self.C
penalty = self.penalty
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError(
"Maximum number of iteration must be positive; got (max_iter=%r)"
% self.max_iter
)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError(
"Tolerance for stopping criteria must be positive; got (tol=%r)"
% self.tol
)
if solver == "lbfgs":
_dtype = np.float64
else:
_dtype = [np.float64, np.float32]
X, y = self._validate_data(
X,
y,
accept_sparse="csr",
dtype=_dtype,
order="C",
accept_large_sparse=solver not in ["liblinear", "sag", "saga"],
)
check_classification_targets(y)
self.classes_ = np.unique(y)
multi_class = _check_multi_class(self.multi_class, solver, len(self.classes_))
if solver == "liblinear":
if effective_n_jobs(self.n_jobs) != 1:
warnings.warn(
"'n_jobs' > 1 does not have any effect when"
" 'solver' is set to 'liblinear'. Got 'n_jobs'"
" = {}.".format(effective_n_jobs(self.n_jobs))
)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X,
y,
self.C,
self.fit_intercept,
self.intercept_scaling,
self.class_weight,
self.penalty,
self.dual,
self.verbose,
self.max_iter,
self.tol,
self.random_state,
sample_weight=sample_weight,
)
return self
if solver in ["sag", "saga"]:
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError(
"This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r"
% classes_[0]
)
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
if self.warm_start:
warm_start_coef = getattr(self, "coef_", None)
else:
warm_start_coef = None
if warm_start_coef is not None and self.fit_intercept:
warm_start_coef = np.append(
warm_start_coef, self.intercept_[:, np.newaxis], axis=1
)
# Hack so that we iterate only once for the multinomial case.
if multi_class == "multinomial":
classes_ = [None]
warm_start_coef = [warm_start_coef]
if warm_start_coef is None:
warm_start_coef = [None] * n_classes
path_func = delayed(_logistic_regression_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
if solver in ["sag", "saga"]:
prefer = "threads"
else:
prefer = "processes"
# TODO: Refactor this to avoid joblib parallelism entirely when doing binary
# and multinomial multiclass classification and use joblib only for the
# one-vs-rest multiclass case.
if (
solver in ["lbfgs", "newton-cg"]
and len(classes_) == 1
and effective_n_jobs(self.n_jobs) == 1
):
# In the future, we would like n_threads = _openmp_effective_n_threads()
# For the time being, we just do
n_threads = 1
else:
n_threads = 1
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, prefer=prefer)(
path_func(
X,
y,
pos_class=class_,
Cs=[C_],
l1_ratio=self.l1_ratio,
fit_intercept=self.fit_intercept,
tol=self.tol,
verbose=self.verbose,
solver=solver,
multi_class=multi_class,
max_iter=self.max_iter,
class_weight=self.class_weight,
check_input=False,
random_state=self.random_state,
coef=warm_start_coef_,
penalty=penalty,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight,
n_threads=n_threads,
)
for class_, warm_start_coef_ in zip(classes_, warm_start_coef)
)
fold_coefs_, _, n_iter_ = zip(*fold_coefs_)
self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0]
n_features = X.shape[1]
if multi_class == "multinomial":
self.coef_ = fold_coefs_[0][0]
else:
self.coef_ = np.asarray(fold_coefs_)
self.coef_ = self.coef_.reshape(
n_classes, n_features + int(self.fit_intercept)
)
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
else:
self.intercept_ = np.zeros(n_classes)
return self
def predict_proba(self, X):
"""
Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
For a multi_class problem, if multi_class is set to be "multinomial"
the softmax function is used to find the predicted probability of
each class.
Else use a one-vs-rest approach, i.e calculate the probability
of each class assuming it to be positive using the logistic function.
and normalize these values across all the classes.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Vector to be scored, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
T : array-like of shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
check_is_fitted(self)
ovr = self.multi_class in ["ovr", "warn"] or (
self.multi_class == "auto"
and (self.classes_.size <= 2 or self.solver == "liblinear")
)
if ovr:
return super()._predict_proba_lr(X)
else:
decision = self.decision_function(X)
if decision.ndim == 1:
# Workaround for multi_class="multinomial" and binary outcomes
# which requires softmax prediction with only a 1D decision.
decision_2d = np.c_[-decision, decision]
else:
decision_2d = decision
return softmax(decision_2d, copy=False)
def predict_log_proba(self, X):
"""
Predict logarithm of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Vector to be scored, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
T : array-like of shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, LinearClassifierMixin, BaseEstimator):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
See glossary entry for :term:`cross-validation estimator`.
This class implements logistic regression using liblinear, newton-cg, sag
of lbfgs optimizer. The newton-cg, sag and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
Elastic-Net penalty is only supported by the saga solver.
For the grid of `Cs` values and `l1_ratios` values, the best hyperparameter
is selected by the cross-validator
:class:`~sklearn.model_selection.StratifiedKFold`, but it can be changed
using the :term:`cv` parameter. The 'newton-cg', 'sag', 'saga' and 'lbfgs'
solvers can warm-start the coefficients (see :term:`Glossary<warm_start>`).
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : int or list of floats, default=10
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default=True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
cv : int or cross-validation generator, default=None
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.model_selection` module for the
list of possible cross-validation objects.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
dual : bool, default=False
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : {'l1', 'l2', 'elasticnet'}, default='l2'
Specify the norm of the penalty:
- `'l2'`: add a L2 penalty term (used by default);
- `'l1'`: add a L1 penalty term;
- `'elasticnet'`: both L1 and L2 penalty terms are added.
.. warning::
Some penalties may not work with some solvers. See the parameter
`solver` below, to know the compatibility between the penalty and
solver.
scoring : str or callable, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``. For a list of scoring functions
that can be used, look at :mod:`sklearn.metrics`. The
default scoring option used is 'accuracy'.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'}, \
default='lbfgs'
Algorithm to use in the optimization problem. Default is 'lbfgs'.
To choose a solver, you might want to consider the following aspects:
- For small datasets, 'liblinear' is a good choice, whereas 'sag'
and 'saga' are faster for large ones;
- For multiclass problems, only 'newton-cg', 'sag', 'saga' and
'lbfgs' handle multinomial loss;
- 'liblinear' might be slower in :class:`LogisticRegressionCV`
because it does not handle warm-starting. 'liblinear' is
limited to one-versus-rest schemes.
.. warning::
The choice of the algorithm depends on the penalty chosen:
- 'newton-cg' - ['l2']
- 'lbfgs' - ['l2']
- 'liblinear' - ['l1', 'l2']
- 'sag' - ['l2']
- 'saga' - ['elasticnet', 'l1', 'l2']
.. note::
'sag' and 'saga' fast convergence is only guaranteed on features
with approximately the same scale. You can preprocess the data with
a scaler from :mod:`sklearn.preprocessing`.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
tol : float, default=1e-4
Tolerance for stopping criteria.
max_iter : int, default=100
Maximum number of iterations of the optimization algorithm.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
class_weight == 'balanced'
n_jobs : int, default=None
Number of CPU cores used during the cross-validation loop.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int, default=0
For the 'liblinear', 'sag' and 'lbfgs' solvers set verbose to any
positive number for verbosity.
refit : bool, default=True
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
intercept_scaling : float, default=1
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : {'auto, 'ovr', 'multinomial'}, default='auto'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.22
Default changed from 'ovr' to 'auto' in 0.22.
random_state : int, RandomState instance, default=None
Used when `solver='sag'`, 'saga' or 'liblinear' to shuffle the data.
Note that this only applies to the solver and not the cross-validation
generator. See :term:`Glossary <random_state>` for details.
l1_ratios : list of float, default=None
The list of Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``.
Only used if ``penalty='elasticnet'``. A value of 0 is equivalent to
using ``penalty='l2'``, while 1 is equivalent to using
``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a combination
of L1 and L2.
Attributes
----------
classes_ : ndarray of shape (n_classes, )
A list of class labels known to the classifier.
coef_ : ndarray of shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
intercept_ : ndarray of shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
`intercept_` is of shape(1,) when the problem is binary.
Cs_ : ndarray of shape (n_cs)
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
l1_ratios_ : ndarray of shape (n_l1_ratios)
Array of l1_ratios used for cross-validation. If no l1_ratio is used
(i.e. penalty is not 'elasticnet'), this is set to ``[None]``
coefs_paths_ : ndarray of shape (n_folds, n_cs, n_features) or \
(n_folds, n_cs, n_features + 1)
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, n_cs, n_features)`` or
``(n_folds, n_cs, n_features + 1)`` depending on whether the
intercept is fit or not. If ``penalty='elasticnet'``, the shape is
``(n_folds, n_cs, n_l1_ratios_, n_features)`` or
``(n_folds, n_cs, n_l1_ratios_, n_features + 1)``.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class. Each dict value
has shape ``(n_folds, n_cs`` or ``(n_folds, n_cs, n_l1_ratios)`` if
``penalty='elasticnet'``.
C_ : ndarray of shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
`C_` is of shape(n_classes,) when the problem is binary.
l1_ratio_ : ndarray of shape (n_classes,) or (n_classes - 1,)
Array of l1_ratio that maps to the best scores across every class. If
refit is set to False, then for each class, the best l1_ratio is the
average of the l1_ratio's that correspond to the best scores for each
fold. `l1_ratio_` is of shape(n_classes,) when the problem is binary.
n_iter_ : ndarray of shape (n_classes, n_folds, n_cs) or (1, n_folds, n_cs)
Actual number of iterations for all classes, folds and Cs.
In the binary or multinomial cases, the first dimension is equal to 1.
If ``penalty='elasticnet'``, the shape is ``(n_classes, n_folds,
n_cs, n_l1_ratios)`` or ``(1, n_folds, n_cs, n_l1_ratios)``.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
LogisticRegression : Logistic regression without tuning the
hyperparameter `C`.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.linear_model import LogisticRegressionCV
>>> X, y = load_iris(return_X_y=True)
>>> clf = LogisticRegressionCV(cv=5, random_state=0).fit(X, y)
>>> clf.predict(X[:2, :])
array([0, 0])
>>> clf.predict_proba(X[:2, :]).shape
(2, 3)
>>> clf.score(X, y)
0.98...
"""
def __init__(
self,
*,
Cs=10,
fit_intercept=True,
cv=None,
dual=False,
penalty="l2",
scoring=None,
solver="lbfgs",
tol=1e-4,
max_iter=100,
class_weight=None,
n_jobs=None,
verbose=0,
refit=True,
intercept_scaling=1.0,
multi_class="auto",
random_state=None,
l1_ratios=None,
):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
self.random_state = random_state
self.l1_ratios = l1_ratios
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target vector relative to X.
sample_weight : array-like of shape (n_samples,) default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
Fitted LogisticRegressionCV estimator.
"""
solver = _check_solver(self.solver, self.penalty, self.dual)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError(
"Maximum number of iteration must be positive; got (max_iter=%r)"
% self.max_iter
)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError(
"Tolerance for stopping criteria must be positive; got (tol=%r)"
% self.tol
)
if self.penalty == "elasticnet":
if (
self.l1_ratios is None
or len(self.l1_ratios) == 0
or any(
(
not isinstance(l1_ratio, numbers.Number)
or l1_ratio < 0
or l1_ratio > 1
)
for l1_ratio in self.l1_ratios
)
):
raise ValueError(
"l1_ratios must be a list of numbers between "
"0 and 1; got (l1_ratios=%r)"
% self.l1_ratios
)
l1_ratios_ = self.l1_ratios
else:
if self.l1_ratios is not None:
warnings.warn(
"l1_ratios parameter is only used when penalty "
"is 'elasticnet'. Got (penalty={})".format(self.penalty)
)
l1_ratios_ = [None]
if self.penalty == "none":
raise ValueError(
"penalty='none' is not useful and not supported by "
"LogisticRegressionCV."
)
X, y = self._validate_data(
X,
y,
accept_sparse="csr",
dtype=np.float64,
order="C",
accept_large_sparse=solver not in ["liblinear", "sag", "saga"],
)
check_classification_targets(y)
class_weight = self.class_weight
# Encode for string labels
label_encoder = LabelEncoder().fit(y)
y = label_encoder.transform(y)
if isinstance(class_weight, dict):
class_weight = {
label_encoder.transform([cls])[0]: v for cls, v in class_weight.items()
}
# The original class labels
classes = self.classes_ = label_encoder.classes_
encoded_labels = label_encoder.transform(label_encoder.classes_)
multi_class = _check_multi_class(self.multi_class, solver, len(classes))
if solver in ["sag", "saga"]:
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
# init cross-validation generator
cv = check_cv(self.cv, y, classifier=True)
folds = list(cv.split(X, y))
# Use the label encoded classes
n_classes = len(encoded_labels)
if n_classes < 2:
raise ValueError(
"This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r"
% classes[0]
)
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
encoded_labels = encoded_labels[1:]
classes = classes[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
if multi_class == "multinomial":
iter_encoded_labels = iter_classes = [None]
else:
iter_encoded_labels = encoded_labels
iter_classes = classes
# compute the class weights for the entire dataset y
if class_weight == "balanced":
class_weight = compute_class_weight(
class_weight, classes=np.arange(len(self.classes_)), y=y
)
class_weight = dict(enumerate(class_weight))
path_func = delayed(_log_reg_scoring_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
if self.solver in ["sag", "saga"]:
prefer = "threads"
else:
prefer = "processes"
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, prefer=prefer)(
path_func(
X,
y,
train,
test,
pos_class=label,
Cs=self.Cs,
fit_intercept=self.fit_intercept,
penalty=self.penalty,
dual=self.dual,
solver=solver,
tol=self.tol,
max_iter=self.max_iter,
verbose=self.verbose,
class_weight=class_weight,
scoring=self.scoring,
multi_class=multi_class,
intercept_scaling=self.intercept_scaling,
random_state=self.random_state,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight,
l1_ratio=l1_ratio,
)
for label in iter_encoded_labels
for train, test in folds
for l1_ratio in l1_ratios_
)
# _log_reg_scoring_path will output different shapes depending on the
# multi_class param, so we need to reshape the outputs accordingly.
# Cs is of shape (n_classes . n_folds . n_l1_ratios, n_Cs) and all the
# rows are equal, so we just take the first one.
# After reshaping,
# - scores is of shape (n_classes, n_folds, n_Cs . n_l1_ratios)
# - coefs_paths is of shape
# (n_classes, n_folds, n_Cs . n_l1_ratios, n_features)
# - n_iter is of shape
# (n_classes, n_folds, n_Cs . n_l1_ratios) or
# (1, n_folds, n_Cs . n_l1_ratios)
coefs_paths, Cs, scores, n_iter_ = zip(*fold_coefs_)
self.Cs_ = Cs[0]
if multi_class == "multinomial":
coefs_paths = np.reshape(
coefs_paths,
(len(folds), len(l1_ratios_) * len(self.Cs_), n_classes, -1),
)
# equiv to coefs_paths = np.moveaxis(coefs_paths, (0, 1, 2, 3),
# (1, 2, 0, 3))
coefs_paths = np.swapaxes(coefs_paths, 0, 1)
coefs_paths = np.swapaxes(coefs_paths, 0, 2)
self.n_iter_ = np.reshape(
n_iter_, (1, len(folds), len(self.Cs_) * len(l1_ratios_))
)
# repeat same scores across all classes
scores = np.tile(scores, (n_classes, 1, 1))
else:
coefs_paths = np.reshape(
coefs_paths,
(n_classes, len(folds), len(self.Cs_) * len(l1_ratios_), -1),
)
self.n_iter_ = np.reshape(
n_iter_, (n_classes, len(folds), len(self.Cs_) * len(l1_ratios_))
)
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(classes, scores))
self.coefs_paths_ = dict(zip(classes, coefs_paths))
self.C_ = list()
self.l1_ratio_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
for index, (cls, encoded_label) in enumerate(
zip(iter_classes, iter_encoded_labels)
):
if multi_class == "ovr":
scores = self.scores_[cls]
coefs_paths = self.coefs_paths_[cls]
else:
# For multinomial, all scores are the same across classes
scores = scores[0]
# coefs_paths will keep its original shape because
# logistic_regression_path expects it this way
if self.refit:
# best_index is between 0 and (n_Cs . n_l1_ratios - 1)
# for example, with n_cs=2 and n_l1_ratios=3
# the layout of scores is
# [c1, c2, c1, c2, c1, c2]
# l1_1 , l1_2 , l1_3
best_index = scores.sum(axis=0).argmax()
best_index_C = best_index % len(self.Cs_)
C_ = self.Cs_[best_index_C]
self.C_.append(C_)
best_index_l1 = best_index // len(self.Cs_)
l1_ratio_ = l1_ratios_[best_index_l1]
self.l1_ratio_.append(l1_ratio_)
if multi_class == "multinomial":
coef_init = np.mean(coefs_paths[:, :, best_index, :], axis=1)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
# Note that y is label encoded and hence pos_class must be
# the encoded label / None (for 'multinomial')
w, _, _ = _logistic_regression_path(
X,
y,
pos_class=encoded_label,
Cs=[C_],
solver=solver,
fit_intercept=self.fit_intercept,
coef=coef_init,
max_iter=self.max_iter,
tol=self.tol,
penalty=self.penalty,
class_weight=class_weight,
multi_class=multi_class,
verbose=max(0, self.verbose - 1),
random_state=self.random_state,
check_input=False,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight,
l1_ratio=l1_ratio_,
)
w = w[0]
else:
# Take the best scores across every fold and the average of
# all coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
if multi_class == "ovr":
w = np.mean(
[coefs_paths[i, best_indices[i], :] for i in range(len(folds))],
axis=0,
)
else:
w = np.mean(
[
coefs_paths[:, i, best_indices[i], :]
for i in range(len(folds))
],
axis=0,
)
best_indices_C = best_indices % len(self.Cs_)
self.C_.append(np.mean(self.Cs_[best_indices_C]))
if self.penalty == "elasticnet":
best_indices_l1 = best_indices // len(self.Cs_)
self.l1_ratio_.append(np.mean(l1_ratios_[best_indices_l1]))
else:
self.l1_ratio_.append(None)
if multi_class == "multinomial":
self.C_ = np.tile(self.C_, n_classes)
self.l1_ratio_ = np.tile(self.l1_ratio_, n_classes)
self.coef_ = w[:, : X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
self.l1_ratio_ = np.asarray(self.l1_ratio_)
self.l1_ratios_ = np.asarray(l1_ratios_)
# if elasticnet was used, add the l1_ratios dimension to some
# attributes
if self.l1_ratios is not None:
# with n_cs=2 and n_l1_ratios=3
# the layout of scores is
# [c1, c2, c1, c2, c1, c2]
# l1_1 , l1_2 , l1_3
# To get a 2d array with the following layout
# l1_1, l1_2, l1_3
# c1 [[ . , . , . ],
# c2 [ . , . , . ]]
# We need to first reshape and then transpose.
# The same goes for the other arrays
for cls, coefs_path in self.coefs_paths_.items():
self.coefs_paths_[cls] = coefs_path.reshape(
(len(folds), self.l1_ratios_.size, self.Cs_.size, -1)
)
self.coefs_paths_[cls] = np.transpose(
self.coefs_paths_[cls], (0, 2, 1, 3)
)
for cls, score in self.scores_.items():
self.scores_[cls] = score.reshape(
(len(folds), self.l1_ratios_.size, self.Cs_.size)
)
self.scores_[cls] = np.transpose(self.scores_[cls], (0, 2, 1))
self.n_iter_ = self.n_iter_.reshape(
(-1, len(folds), self.l1_ratios_.size, self.Cs_.size)
)
self.n_iter_ = np.transpose(self.n_iter_, (0, 1, 3, 2))
return self
def score(self, X, y, sample_weight=None):
"""Score using the `scoring` option on the given test data and labels.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,)
True labels for X.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
Score of self.predict(X) wrt. y.
"""
scoring = self.scoring or "accuracy"
scoring = get_scorer(scoring)
return scoring(self, X, y, sample_weight=sample_weight)
def _more_tags(self):
return {
"_xfail_checks": {
"check_sample_weights_invariance": (
"zero sample_weight is not equivalent to removing samples"
),
}
}
|
manhhomienbienthuy/scikit-learn
|
sklearn/linear_model/_logistic.py
|
Python
|
bsd-3-clause
| 78,200
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""The Python implementation of the GRPC interoperability test client."""
import argparse
from grpc.early_adopter import implementations
from interop import methods
from interop import resources
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
def _args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--server_host', help='the host to which to connect', type=str)
parser.add_argument(
'--server_host_override',
help='the server host to which to claim to connect', type=str)
parser.add_argument(
'--server_port', help='the port to which to connect', type=int)
parser.add_argument(
'--test_case', help='the test case to execute', type=str)
parser.add_argument(
'--use_tls', help='require a secure connection', dest='use_tls',
action='store_true')
parser.add_argument(
'--use_test_ca', help='replace platform root CAs with ca.pem',
action='store_true')
return parser.parse_args()
def _stub(args):
if args.use_tls:
if args.use_test_ca:
root_certificates = resources.test_root_certificates()
else:
root_certificates = resources.prod_root_certificates()
stub = implementations.secure_stub(
methods.CLIENT_METHODS, args.server_host, args.server_port,
root_certificates, None, None,
server_host_override=args.server_host_override)
else:
stub = implementations.insecure_stub(
methods.CLIENT_METHODS, args.server_host, args.server_port)
return stub
def _test_case_from_arg(test_case_arg):
for test_case in methods.TestCase:
if test_case_arg == test_case.value:
return test_case
else:
raise ValueError('No test case "%s"!' % test_case_arg)
def _test_interoperability():
args = _args()
stub = _stub(args)
test_case = _test_case_from_arg(args.test_case)
test_case.test_interoperability(stub)
if __name__ == '__main__':
_test_interoperability()
|
Juzley/grpc
|
src/python/interop/interop/client.py
|
Python
|
bsd-3-clause
| 3,438
|
#! /usr/bin/python
import sys
import re
import os
from subprocess import Popen, PIPE, STDOUT
BASE_DICT="/usr/local/share/pocketsphinx/model/lm/en_US/cmu07a.dic"
G2P=os.path.dirname(sys.argv[0]) + "/en-g2p.sh"
words = {}
for l in open(BASE_DICT):
ss = l.split()
word = ss[0]
word = re.sub(r"\(\d\)$", "", word)
try:
prob = float(ss[1])
pron = ss[2:]
except ValueError:
prob = 1
pron = ss[1:]
words.setdefault(word, []).append((pron, prob))
input_words = set()
for l in sys.stdin:
if l.startswith("TRANSITION"):
ss = l.split()
if len(ss) == 5:
input_words.add(ss[-1])
g2p_words = []
for w in input_words:
if w.lower() in words:
for (i, pron) in enumerate(words[w.lower()]):
if i == 0:
print w,
else:
print "%s(%d)" % (w, i+1),
print " ".join(pron[0])
else:
g2p_words.append(w)
if len(g2p_words) > 0:
proc = Popen(G2P,stdin=PIPE, stdout=PIPE, stderr=STDOUT )
#stdout, stderr = proc.communicate()
for w in g2p_words:
print >>proc.stdin, w
proc.stdin.close()
#return_code = proc.wait()
for l in proc.stdout:
print l,
|
chinshr/ruby-pocketsphinx-server
|
scripts/fsg-to-dict_en.py
|
Python
|
bsd-3-clause
| 1,179
|
from django import template
from django.template import Node, TemplateSyntaxError
register = template.Library()
class LineModeNode(Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
context["__lines__"] = []
self.nodelist.render(context)
return "\n".join(context["__lines__"])
@register.tag
def linemode(parser, token):
""" Skip content outside {% line %} blocks.
Intended to be used for precise control of whitespace and newlines.
Example usage::
{% linemode %}
{% line %}Line one.{% endline %}
This content will be ignored.
{% if True %}
{% line %}Line two.{% endline %}
{% endif %}
{% endlinemode %}
This example returns this text::
Line one.
Line two.
"""
nodelist = parser.parse(("endlinemode",))
parser.delete_first_token()
return LineModeNode(nodelist)
class LineNode(Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
if "__lines__" not in context:
raise TemplateSyntaxError("The line tag used without outer linemode tags")
context["__lines__"].append(self.nodelist.render(context))
return ""
@register.tag
def line(parser, token):
""" For use with {% linemode %}.
Renders the enclosed content and appends it to context["__lines__"]
instead of returning it.
"""
nodelist = parser.parse(("endline",))
parser.delete_first_token()
return LineNode(nodelist)
|
healthchecks/healthchecks
|
hc/front/templatetags/linemode.py
|
Python
|
bsd-3-clause
| 1,607
|
def user(request):
"""
Context processor that adds boolean of whether current user is an admin for current org
"""
if request.user.is_anonymous or not request.org:
is_admin = False
partner = None
is_faq_only = True
else:
is_admin = request.user.can_administer(request.org)
partner = request.user.get_partner(request.org)
is_faq_only = request.user.must_use_faq()
return {"user_is_admin": is_admin, "user_partner": partner, "user_is_faq_only": is_faq_only}
|
praekelt/casepro
|
casepro/profiles/context_processors.py
|
Python
|
bsd-3-clause
| 529
|
"""
Test display and Python APIs on file and class static variables.
"""
from __future__ import print_function
import os
import time
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class StaticVariableTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break at.
self.line = line_number('main.cpp', '// Set break point at this line.')
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr24764")
def test_with_run_command(self):
"""Test that file and class static variables display correctly."""
self.build()
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_file_and_line(
self, "main.cpp", self.line, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# Global variables are no longer displayed with the "frame variable"
# command.
self.expect(
'target variable A::g_points',
VARIABLES_DISPLAYED_CORRECTLY,
patterns=['\(PointType \[[1-9]*\]\) A::g_points = {'])
self.expect('target variable g_points', VARIABLES_DISPLAYED_CORRECTLY,
substrs=['(PointType [2]) g_points'])
# On Mac OS X, gcc 4.2 emits the wrong debug info for A::g_points.
# A::g_points is an array of two elements.
if self.platformIsDarwin() or self.getPlatform() == "linux":
self.expect(
"target variable A::g_points[1].x",
VARIABLES_DISPLAYED_CORRECTLY,
startstr="(int) A::g_points[1].x = 11")
@expectedFailureAll(
compiler=["gcc"],
bugnumber="Compiler emits incomplete debug info")
@expectedFailureAll(
compiler=["clang"],
compiler_version=["<", "3.9"],
bugnumber='llvm.org/pr20550')
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr24764")
def test_with_run_command_complete(self):
"""
Test that file and class static variables display correctly with
complete debug information.
"""
self.build()
target = self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
self.assertTrue(target, VALID_TARGET)
# Global variables are no longer displayed with the "frame variable"
# command.
self.expect(
'target variable A::g_points',
VARIABLES_DISPLAYED_CORRECTLY,
patterns=[
'\(PointType \[[1-9]*\]\) A::g_points = {', '(x = 1, y = 2)',
'(x = 11, y = 22)'
])
# Ensure that we take the context into account and only print
# A::g_points.
self.expect(
'target variable A::g_points',
VARIABLES_DISPLAYED_CORRECTLY,
matching=False,
patterns=['(x = 3, y = 4)', '(x = 33, y = 44)'])
# Finally, ensure that we print both points when not specifying a
# context.
self.expect(
'target variable g_points',
VARIABLES_DISPLAYED_CORRECTLY,
substrs=[
'(PointType [2]) g_points', '(x = 1, y = 2)',
'(x = 11, y = 22)', '(x = 3, y = 4)', '(x = 33, y = 44)'
])
@expectedFailureAll(
compiler=["gcc"],
bugnumber="Compiler emits incomplete debug info")
@expectedFailureAll(
compiler=["clang"],
compiler_version=["<", "3.9"],
bugnumber='llvm.org/pr20550')
@add_test_categories(['pyapi'])
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr24764")
def test_with_python_api(self):
"""Test Python APIs on file and class static variables."""
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation("main.cpp", self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
# The stop reason of the thread should be breakpoint.
thread = lldbutil.get_stopped_thread(
process, lldb.eStopReasonBreakpoint)
self.assertIsNotNone(thread)
# Get the SBValue of 'A::g_points' and 'g_points'.
frame = thread.GetFrameAtIndex(0)
# arguments => False
# locals => False
# statics => True
# in_scope_only => False
valList = frame.GetVariables(False, False, True, False)
for val in valList:
self.DebugSBValue(val)
name = val.GetName()
self.assertTrue(name in ['g_points', 'A::g_points'])
if name == 'g_points':
self.assertTrue(
val.GetValueType() == lldb.eValueTypeVariableStatic)
self.assertEqual(val.GetNumChildren(), 2)
elif name == 'A::g_points':
self.assertTrue(
val.GetValueType() == lldb.eValueTypeVariableGlobal)
self.assertEqual(val.GetNumChildren(), 2)
child1 = val.GetChildAtIndex(1)
self.DebugSBValue(child1)
child1_x = child1.GetChildAtIndex(0)
self.DebugSBValue(child1_x)
self.assertTrue(child1_x.GetTypeName() == 'int' and
child1_x.GetValue() == '11')
# SBFrame.FindValue() should also work.
val = frame.FindValue("A::g_points", lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
self.assertTrue(val.GetName() == 'A::g_points')
# Also exercise the "parameter" and "local" scopes while we are at it.
val = frame.FindValue("argc", lldb.eValueTypeVariableArgument)
self.DebugSBValue(val)
self.assertTrue(val.GetName() == 'argc')
val = frame.FindValue("argv", lldb.eValueTypeVariableArgument)
self.DebugSBValue(val)
self.assertTrue(val.GetName() == 'argv')
val = frame.FindValue("hello_world", lldb.eValueTypeVariableLocal)
self.DebugSBValue(val)
self.assertTrue(val.GetName() == 'hello_world')
|
youtube/cobalt
|
third_party/llvm-project/lldb/packages/Python/lldbsuite/test/lang/cpp/class_static/TestStaticVariables.py
|
Python
|
bsd-3-clause
| 6,774
|
#!/usr/bin/env python
"""
A simple bench runner which delegates to the ./dotest.py test driver to run the
benchmarks defined in the list named 'benches'.
You need to hand edit 'benches' to modify/change the command lines passed to the
test driver.
Use the following to get only the benchmark results in your terminal output:
./bench.py -e /Volumes/data/lldb/svn/regression/build/Debug/lldb -x '-F Driver::MainLoop()' 2>&1 | grep -P '^lldb.*benchmark:'
"""
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import re
from optparse import OptionParser
# dotest.py invocation with no '-e exe-path' uses lldb as the inferior program,
# unless there is a mentioning of custom executable program.
benches = [
# Measure startup delays creating a target, setting a breakpoint, and run
# to breakpoint stop.
'./dotest.py -v +b %E %X -n -p TestStartupDelays.py',
# Measure 'frame variable' response after stopping at a breakpoint.
'./dotest.py -v +b %E %X -n -p TestFrameVariableResponse.py',
# Measure stepping speed after stopping at a breakpoint.
'./dotest.py -v +b %E %X -n -p TestSteppingSpeed.py',
# Measure expression cmd response with a simple custom executable program.
'./dotest.py +b -n -p TestExpressionCmd.py',
# Attach to a spawned process then run disassembly benchmarks.
'./dotest.py -v +b -n %E -p TestDoAttachThenDisassembly.py'
]
def main():
"""Read the items from 'benches' and run the command line one by one."""
parser = OptionParser(usage="""\
%prog [options]
Run the standard benchmarks defined in the list named 'benches'.\
""")
parser.add_option('-e', '--executable',
type='string', action='store',
dest='exe',
help='The target program launched by lldb.')
parser.add_option('-x', '--breakpoint-spec',
type='string', action='store',
dest='break_spec',
help='The lldb breakpoint spec for the target program.')
# Parses the options, if any.
opts, args = parser.parse_args()
print("Starting bench runner....")
for item in benches:
command = item.replace('%E',
'-e "%s"' % opts.exe if opts.exe else '')
command = command.replace('%X', '-x "%s"' %
opts.break_spec if opts.break_spec else '')
print("Running %s" % (command))
os.system(command)
print("Bench runner done.")
if __name__ == '__main__':
main()
|
youtube/cobalt
|
third_party/llvm-project/lldb/packages/Python/lldbsuite/test/bench.py
|
Python
|
bsd-3-clause
| 2,598
|
import pystache
class Delimiters(pystache.View):
template_path = 'examples'
def first(self):
return "It worked the first time."
def second(self):
return "And it worked the second time."
def third(self):
return "Then, surprisingly, it worked the third time."
|
dmfrancisco/django-pystache
|
examples/delimiters.py
|
Python
|
mit
| 302
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of <%= package.name %>.
# <%= package.url %>
# Licensed under the <%= package.license %> license:
# http://www.opensource.org/licenses/<%= package.license%>-license
# Copyright (c) <%= package.created.year %>, <%= package.author.name %> <<%= package.author.email %>>
from preggy import expect
from <%= package.pythonName %> import __version__
from tests.base import TestCase
class VersionTestCase(TestCase):
def test_has_proper_version(self):
expect(__version__).to_equal('<%= package.version %>')
|
rfloriano/generator-django-app
|
app/templates/_tests/_test_version.py
|
Python
|
mit
| 582
|
import socket
import threading
import telnetlib
import time
from unittest import TestCase
from test import support
HOST = support.HOST
def server(evt, serv):
serv.listen(5)
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
finally:
serv.close()
evt.set()
class GeneralTests(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(3)
self.port = support.bind_port(self.sock)
threading.Thread(target=server, args=(self.evt,self.sock)).start()
self.evt.wait()
self.evt.clear()
time.sleep(.1)
def tearDown(self):
self.evt.wait()
def testBasic(self):
# connects
telnet = telnetlib.Telnet(HOST, self.port)
telnet.sock.close()
def testTimeoutDefault(self):
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
telnet = telnetlib.Telnet("localhost", self.port)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(telnet.sock.gettimeout(), 30)
telnet.sock.close()
def testTimeoutNone(self):
# None, having other default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
telnet = telnetlib.Telnet(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(telnet.sock.gettimeout() is None)
telnet.sock.close()
def testTimeoutValue(self):
telnet = telnetlib.Telnet("localhost", self.port, timeout=30)
self.assertEqual(telnet.sock.gettimeout(), 30)
telnet.sock.close()
def testTimeoutOpen(self):
telnet = telnetlib.Telnet()
telnet.open("localhost", self.port, timeout=30)
self.assertEqual(telnet.sock.gettimeout(), 30)
telnet.sock.close()
def test_main(verbose=None):
support.run_unittest(GeneralTests)
if __name__ == '__main__':
test_main()
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-3.0/Lib/test/test_telnetlib.py
|
Python
|
mit
| 2,142
|
from __future__ import print_function
import fileinput
import os
import shutil
import subprocess
import click
def _get_version():
__version__ = None
with open('flexget/_version.py') as f:
exec(f.read())
if not __version__:
raise click.ClickException('Could not find __version__ from flexget/_version.py')
return __version__
@click.group()
def cli():
pass
@cli.command()
def version():
"""Prints the version number of the source"""
click.echo(_get_version())
@cli.command()
@click.argument('bump_type', type=click.Choice(['dev', 'release']))
def bump_version(bump_type):
"""Bumps version to the next release, or development version."""
cur_ver = _get_version()
click.echo('current version: %s' % cur_ver)
ver_split = cur_ver.split('.')
if 'dev' in ver_split[-1]:
if bump_type == 'dev':
# If this is already a development version, increment the dev count by 1
ver_split[-1] = 'dev%d' % (int(ver_split[-1].strip('dev') or 0) + 1)
else:
# Just strip off dev tag for next release version
ver_split = ver_split[:-1]
else:
# Increment the revision number by one
if len(ver_split) == 2:
# We don't have a revision number, assume 0
ver_split.append('1')
else:
ver_split[-1] = str(int(ver_split[-1]) + 1)
if bump_type == 'dev':
ver_split.append('dev')
new_version = '.'.join(ver_split)
for line in fileinput.FileInput('flexget/_version.py', inplace=1):
if line.startswith('__version__ ='):
line = "__version__ = '%s'\n" % new_version
print(line, end='')
click.echo('new version: %s' % new_version)
@cli.command()
def build_webui():
cwd = os.path.join('flexget', 'ui')
# Cleanup previous builds
click.echo('cleaning previous builds')
for folder in ['bower_components' 'node_modules']:
folder = os.path.join(cwd, folder)
if os.path.exists(folder):
shutil.rmtree(folder)
# Install npm packages
click.echo('running `npm install`')
subprocess.check_call('npm install', cwd=cwd, shell=True)
# Build the ui
click.echo('running `bower install`')
subprocess.check_call('bower install', cwd=cwd, shell=True)
# Build the ui
click.echo('running `gulp buildapp`')
subprocess.check_call('gulp buildapp', cwd=cwd, shell=True)
@cli.command()
def upgrade_deps():
try:
import pip
except ImportError:
raise click.ClickException('FATAL: Unable to import pip, please install it and run this again!')
pip.main(['install', '--upgrade', '-r', 'requirements.txt'])
if __name__ == '__main__':
cli()
|
antivirtel/Flexget
|
dev_tools.py
|
Python
|
mit
| 2,746
|
"""This script runs the Flask application in development environment"""
import os
from app.views import create_app
#pylint: disable=invalid-name
port = int(os.environ.get('PORT', 5000))
app = create_app('app.config')
app.run(host='0.0.0.0', port=port)
|
franchais3226/coral
|
server/run.py
|
Python
|
mit
| 254
|
import zmq
context = zmq.Context()
def send_task(sisyphus_host, task_name, *args, **kwargs):
# We create a new socket and connect each time because sisyphus tasks
# should not be sent very often, therefore it's not useful to hold an open
# socket for large lengths of time without sending much of anything.
socket = context.socket(zmq.REQ)
socket.connect(sisyphus_host)
try:
socket.send_json({
"task_name": task_name,
"args": args,
"kwargs": kwargs
})
# Wait for a reply from the sisyphus.
poller = zmq.Poller()
poller.register(socket, zmq.POLLIN)
if poller.poll(2 * 1000):
reply = socket.recv_json()
else:
raise RuntimeError("sisyphus did not respond.")
if not reply["success"]:
raise RuntimeError(
"sisyphus did not accept task.\n\t" + reply["error_string"]
)
finally:
# Forcibly close the socket.
socket.close(0)
#print send_task("ipc:///tmp/sisyphus.sock", "test_task", "hi")
|
galah-group/galah
|
galah/sisyphus/api.py
|
Python
|
mit
| 1,094
|
# -*- coding: utf-8 -
#
# This file is part of couchdbkit released under the MIT license.
# See the NOTICE for more information.
from imp import load_source
import os
import sys
if not hasattr(sys, 'version_info') or sys.version_info < (2, 6, 0, 'final'):
raise SystemExit("couchdbkit requires Python 2.6 or later.")
from setuptools import setup, find_packages
# open version module
version = load_source("version", os.path.join("couchdbkit",
"version.py"))
setup(
name = 'couchdbkit',
version = version.__version__,
description = 'Python couchdb kit',
long_description = file(
os.path.join(
os.path.dirname(__file__),
'README.rst'
)
).read(),
author = 'Benoit Chesneau',
author_email = 'benoitc@e-engura.com',
license = 'Apache License 2',
url = 'http://couchdbkit.org',
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Database',
'Topic :: Utilities',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages = find_packages(exclude=['tests']),
zip_safe = False,
install_requires = [ 'restkit>=4.2.2' ],
entry_points="""
[couchdbkit.consumers]
sync=couchdbkit.consumer.sync:SyncConsumer
eventlet=couchdbkit.consumer.ceventlet:EventletConsumer
gevent=couchdbkit.consumer.cgevent:GeventConsumer
""",
test_suite='nose.collector',
)
|
ghickman/couchdbkit
|
setup.py
|
Python
|
mit
| 1,830
|
# 10_rover.py
# Attach: SR-04 Range finder, switch on SW2, and the motors.
# The switch stops and starts the robot
from rrb3 import *
import time, random
rr = RRB3(9.0, 6.0)
motor_speed = 0.6
# if you dont have a switch connected, change the value below to True
running = False
def turn_randomly():
turn_time = random.randint(1, 3)
if random.randint(1, 2) == 1:
rr.left(turn_time, motor_speed)
else:
rr.right(turn_time, motor_speed)
rr.stop()
print("Press CTRL-c to quit the program")
while True:
distance = rr.get_distance()
if distance < 20 and running:
turn_randomly()
if running:
rr.forward(0, motor_speed)
if rr.sw2_closed():
running = not running
if not running:
rr.stop()
time.sleep(0.2)
|
simonmonk/raspirobotboard3
|
python/examples_python3/rover_kit_examples/10_rover.py
|
Python
|
mit
| 790
|
#!/usr/bin/env python
# Copyright (c) 2002-2005 ActiveState Corp.
# See LICENSE.txt for license details.
# Author:
# Trent Mick (TrentM@ActiveState.com)
# Home:
# http://trentm.com/projects/px/
"""Test p4lib.py's interface to 'p4 foo'."""
import os
import sys
import unittest
import pprint
import testsupport
from p4lib import P4, P4LibError
class FooTestCase(unittest.TestCase):
def test_something_as_andrew(self):
top = os.getcwd()
andrew = testsupport.users['andrew']
try:
os.chdir(andrew['home'])
# play with p4lib ...
finally:
os.chdir(top)
def test_something_as_bertha(self):
top = os.getcwd()
bertha = testsupport.users['bertha']
try:
os.chdir(bertha['home'])
# play with p4lib ...
finally:
os.chdir(top)
def suite():
"""Return a unittest.TestSuite to be used by test.py."""
return unittest.makeSuite(FooTestCase)
|
Mokona/python-p4lib
|
test/functionnal/_test_template.py
|
Python
|
mit
| 984
|
"""
A simple way of shutting down the mitmproxy instance to stop everything.
Usage:
mitmproxy -s shutdown.py
and then send a HTTP request to trigger the shutdown:
curl --proxy localhost:8080 http://example.com/path
"""
from mitmproxy import ctx, http
def request(flow: http.HTTPFlow) -> None:
# a random condition to make this example a bit more interactive
if flow.request.pretty_url == "http://example.com/path":
ctx.log.info("Shutting down everything...")
ctx.master.shutdown()
|
mhils/mitmproxy
|
examples/addons/shutdown.py
|
Python
|
mit
| 522
|
from django.conf import settings
from django.conf.urls import *
from pau.constants import USERNAME_RE, explore_slug_url_regex
user_patterns = patterns(
'',
# Because user detail has an optional trailing slash, it is below.
url(r'^post/(?P<post_id>\d+)$', 'pau.views.alpha.post_detail', name='post_detail_view'),
url(r'^post/(?P<post_id>\d+)/stars/$', 'pau.views.alpha.starred_by', name='starred_by'),
url(r'^post/(?P<post_id>\d+)/reposters/$', 'pau.views.alpha.reposters', name='reposters'),
url(r'^post/(?P<post_id>\d+)/photo/(?P<photo_id>\d+)$', 'pau.views.alpha.photo', name='photo'),
url(r'^post/(?P<post_id>\d+)/attachment/(?P<attachment_id>\d+)$', 'pau.views.alpha.attachment', name='attachment'),
url(r'^followers/$', 'pau.views.alpha.follows_to', name='follows_to'),
url(r'^following/$', 'pau.views.alpha.follows_from', name='follows_from'),
url(r'^stars/$', 'pau.views.alpha.stars_from_user', name='stars_from_user'),
)
urlpatterns = patterns(
'',
url(r'^$', 'pau.views.alpha.index_router', name='home'),
# oauth/access_token is a POST endpoint so we can't just redirect it
# url(r'^oauth/access_token$', 'moku.views.auth.access_token', name='access_token'),
# social auth
url(r'^login/$', 'social.apps.django_app.views.auth', {'backend': 'appdotnet'}, name='login'),
url(r'^logout/$', 'pau.views.auth.logout', name='logout'),
url(r'^complete/(?P<backend>[^/]+)/$', 'pau.views.auth.complete', name='complete'),
# I'd like to kill this since I'm mostly overriding what I want but it wants this to url resolve things like social:complete
url('', include('social.apps.django_app.urls', namespace='social')),
# alpha URLs
url(r'^global/$', 'pau.views.alpha.global_stream', name='global'),
url(r'^omo-api-proxy/posts$', 'pau.views.alpha.create_post'),
url(r'^omo-api-proxy/(?P<path>.+)?$', 'pau.views.proxy.ajax_api_proxy', name='omo_api_proxy'),
url(r'^mentions/$', 'pau.views.alpha.mentions', name='mentions'),
url(r'^interactions/$', 'pau.views.alpha.interactions', name='interactions'),
url(r'^browse/%s/$' % explore_slug_url_regex, 'pau.views.alpha.explore_stream', name='explore'),
url(r'^hashtags/(?P<hashtag>.+)$', 'pau.views.alpha.hashtags', name='hashtags'),
url(r'^\.well-known/webfinger$', 'pau.views.alpha.well_known_webfinger'),
# Because the trailing slash on user detail is optional, I'm special-casing this. But
# otherwise, we can stop c/p'ing the username regular expression below.
# Add views that should be under usernames to user_patterns above.
url(r'^(?P<username>%s)/?$' % (USERNAME_RE), 'pau.views.alpha.user_detail', name='user_detail_view'),
url(r'^(?P<username>%s)/' % (USERNAME_RE), include(user_patterns)),
)
if settings.DEBUG:
urlpatterns += patterns(
'',
(r'^pau-static/(?P<path>.*)$', 'django.contrib.staticfiles.views.serve'),
(r'^static/pau/(?P<path>.*)$', 'django.contrib.staticfiles.views.serve'),
)
|
0xMF/alpha
|
alpha/urls.py
|
Python
|
mit
| 3,019
|
import re
import tweepy
from tweepy import OAuthHandler
from textblob import TextBlob
class TwitterClient(object):
'''
Generic Twitter Class for sentiment analysis.
'''
def __init__(self):
'''
Class constructor or initialization method.
'''
consumer_key = 'XXXXXXXXXXXXXXXXXXXXXXXX'
consumer_secret = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXX'
access_token = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXX'
access_token_secret = 'XXXXXXXXXXXXXXXXXXXXXXXXX'
try:
self.auth = OAuthHandler(consumer_key, consumer_secret)
self.auth.set_access_token(access_token, access_token_secret)
self.api = tweepy.API(self.auth)
except:
print("Error: Authentication Failed")
def clean_tweet(self, tweet):
'''
Utility function to clean tweet text by removing links, special characters
using simple regex statements.
'''
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t]) |(\w+:\/\/\S+)", " ", tweet).split())
def get_tweet_sentiment(self, tweet):
'''
Utility function to classify sentiment of passed tweet
using textblob's sentiment method
'''
analysis = TextBlob(self.clean_tweet(tweet))
if analysis.sentiment.polarity > 0:
return 'positive'
elif analysis.sentiment.polarity == 0:
return 'neutral'
else:
return 'negative'
def get_tweets(self, query, count = 10):
'''
Main function to fetch tweets and parse them.
'''
tweets = []
try:
fetched_tweets = self.api.search(q = query, count = count)
for tweet in fetched_tweets:
parsed_tweet = {}
parsed_tweet['text'] = tweet.text
parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text)
if tweet.retweet_count > 0:
if parsed_tweet not in tweets:
tweets.append(parsed_tweet)
else:
tweets.append(parsed_tweet)
return tweets
except tweepy.TweepError as e:
print("Error : " + str(e))
def main():
api = TwitterClient()
tweets = api.get_tweets(query = 'Donald Trump', count = 200)
ptweets = [tweet for tweet in tweets if tweet['sentiment'] == 'positive']
print("Positive tweets percentage: {} %".format(100*len(ptweets)/len(tweets)))
ntweets = [tweet for tweet in tweets if tweet['sentiment'] == 'negative']
print("Negative tweets percentage: {} %".format(100*len(ntweets)/len(tweets)))
print("Neutral tweets percentage: {} % \
".format(100*len(tweets - ntweets - ptweets)/len(tweets)))
print("\n\nPositive tweets:")
for tweet in ptweets[:10]:
print(tweet['text'])
print("\n\nNegative tweets:")
for tweet in ntweets[:10]:
print(tweet['text'])
if __name__ == "__main__":
main()
|
manikTharaka/al-go-rithms
|
sentiment_analysis_twitter/sentiment_analysis.py
|
Python
|
mit
| 3,004
|
"""
Calculadora.
"""
from . import main
from . import ui
|
martinber/guia-sphinx
|
ejemplos_sphinx/pydoctor/pynprcalc/pynprcalc/calc/__init__.py
|
Python
|
mit
| 59
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# Sports On Demand parser para TV Ultra 7K
# Version 0.1 (20/12/2014)
#------------------------------------------------------------
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
# Librerías Plugintools por Jesús (www.mimediacenter.info)
import os
import sys
import urllib
import urllib2
import re
import shutil
import zipfile
import xbmc
import xbmcgui
import xbmcaddon
import xbmcplugin
import plugintools
from resources.tools.resolvers import *
def sod0(params):
plugintools.log("[tv.ultra.7k 0.3.3].Sports on Demand "+repr(params))
thumbnail = "http://nba.lemons.se/gfx/nbadavkabt.gif"
fanart = "http://www.acrossthenba.com/wp-content/uploads/2015/01/NBA-Logo-Wallpaper-HD.jpg"
plugintools.add_item(action="davka0", title='[COLOR white]Davka BT [/COLOR][I][COLOR lightgreen](Basketball) [/I][/COLOR]', url = 'http://bt.davka.info/', thumbnail = thumbnail, fanart = fanart, folder = True, isPlayable = False)
plugintools.add_item(action="sv_hockey0", title='[COLOR white]Sport-Video [/COLOR][I][COLOR lightgreen](Hockey) [/I][/COLOR]', url = 'http://www.sport-video.org.ua/hockey.html' , thumbnail = thumbnail, fanart = fanart, folder = True, isPlayable = False)
plugintools.add_item(action="sv_hockey0", title='[COLOR white]Sport-Video [/COLOR][I][COLOR lightgreen](European Football/Soccer) [/I][/COLOR]', url = 'http://www.sport-video.org.ua/soccer.html' , thumbnail = thumbnail, fanart = fanart, folder = True, isPlayable = False)
plugintools.add_item(action="sv_hockey0", title='[COLOR white]Sport-Video [/COLOR][I][COLOR lightgreen](Basketball) [/I][/COLOR]', url = 'http://www.sport-video.org.ua/basketball.html' , thumbnail = thumbnail, fanart = fanart, folder = True, isPlayable = False)
plugintools.add_item(action="sv_hockey0", title='[COLOR white]Sport-Video [/COLOR][I][COLOR lightgreen](AFL/Gaelic Football) [/I][/COLOR]', url = 'http://www.sport-video.org.ua/gaelic.html' , thumbnail = thumbnail, fanart = fanart, folder = True, isPlayable = False)
plugintools.add_item(action="sv_hockey0", title='[COLOR white]Sport-Video [/COLOR][I][COLOR lightgreen](Rugby) [/I][/COLOR]', url = 'http://www.sport-video.org.ua/rugby.html' , thumbnail = thumbnail, fanart = fanart, folder = True, isPlayable = False)
plugintools.add_item(action="sv_hockey0", title='[COLOR white]Sport-Video [/COLOR][I][COLOR lightgreen](American Football) [/I][/COLOR]', url = 'http://www.sport-video.org.ua/americanfootball.html' , thumbnail = thumbnail, fanart = fanart, folder = True, isPlayable = False)
plugintools.add_item(action="sv_hockey0", title='[COLOR white]Sport-Video [/COLOR][I][COLOR lightgreen](Basketball) [/I][/COLOR]', url = 'http://www.sport-video.org.ua/rugby.html' , thumbnail = thumbnail, fanart = fanart, folder = True, isPlayable = False)
plugintools.add_item(action="sv_hockey0", title='[COLOR white]Sport-Video [/COLOR][I][COLOR lightgreen](Baseball) [/I][/COLOR]', url = 'http://www.sport-video.org.ua/baseball.html' , thumbnail = thumbnail, fanart = fanart, folder = True, isPlayable = False)
def davka0(params):
plugintools.log("[tv.ultra.7k 0.3.3].Davka BT "+repr(params))
thumbnail = "http://nba.lemons.se/gfx/nbadavkabt.gif"
fanart = "http://www.acrossthenba.com/wp-content/uploads/2015/01/NBA-Logo-Wallpaper-HD.jpg"
url = params.get("url")
referer = url
data = gethttp_referer_headers(url,referer)
plugintools.log("data= "+data)
matches = plugintools.find_multiple_matches(data, "<TD>name</TD>(.*?)</A></TD>")
for entry in matches:
plugintools.log("entry= "+entry)
url_match = plugintools.find_single_match(entry, "<TD colspan=7><B><A href='(.*?)</A></B></TD>")
plugintools.log("url_match= "+url_match)
url_match = url_match.split("'>")
url = url_match[0]
url = 'http://bt.davka.info/'+url
title_match = url_match[1]
title_match_fixed = title_match.split("(")
if len(title_match_fixed)>= 2:
title_match_fixed = title_match_fixed[0]
date = plugintools.find_single_match(title_match, '\((.*?)\)')
date = date.split(".")
month = date[0];day=date[1]
title_fixed = '[COLOR gold][B]'+day+'/'+month+' [/B][/COLOR]'+title_match_fixed
extra = plugintools.find_multiple_matches(entry, '<FONT color=white>(.*?)</FONT></TD>')
for entri in extra:
plugintools.log("entri= "+entri)
title_fixed = title_fixed + ' [COLOR lightyellow][I]['+entri+'][/I][/COLOR]'
plugintools.log("url= "+url)
plugintools.log("title_match= "+title_match)
plugintools.add_item(action="sport_launchtorrent", title=title_fixed, url = url, thumbnail = thumbnail, fanart = fanart , folder = False, isPlayable = True)
def sport_launchtorrent(params):
plugintools.log("Set URL to launch torrent: "+repr(params))
url = params.get("url")
url = url.replace(" ", "%20")
plugintools.log("url= "+url)
#url = urllib.quote_plus(url)
addon_torrent = plugintools.get_setting("addon_torrent")
plugintools.log("addon_torrent= "+addon_torrent)
if addon_torrent == "0": # Stream (por defecto)
url = urllib.quote_plus(url)
url = 'plugin://plugin.video.stream/play/'+url
elif addon_torrent == "1": # Pulsar
url = 'plugin://plugin.video.pulsar/play?uri=' + url
else:
url = 'plugin://plugin.video.stream/play/'+url
plugintools.log("url= "+url)
plugintools.play_resolved_url(url)
def sv_hockey0(params):
plugintools.log("[tv.ultra.7k 0.3.3].Sport-video Hockey "+repr(params))
thumbnail = 'http://www.mytinyphone.com/uploads/users/hockeyfan49/189572.jpg'
url = params.get("url")
if url.endswith("gaelic.html") == True:
title = '[COLOR gold][B]Sport-Video Gaelic Football[/B][/COLOR]'
fanart = 'http://gaa.eircom.ie/image/var/files/experience-speaks/Football-tackles/PIC_2.jpg'
elif url.endswith("hockey.html") == True:
title = '[COLOR gold][B]Sport-Video Hockey[/B][/COLOR]'
fanart = 'http://www.thesportsdb.com/images/media/team/fanart/vtrqwu1421535548.jpg'
elif url.endswith("rugby.html") == True:
title = '[COLOR gold][B]Sport-Video Rugby[/B][/COLOR]'
fanart = 'http://static.trueachievements.com/customimages/012874.jpg'
elif url.endswith("americanfootball.html") == True:
title = '[COLOR gold][B]Sport-Video American Football[/B][/COLOR]'
fanart = 'http://s1.picswalls.com/wallpapers/2014/07/25/awesome-rugby-wallpaper_041253717_99.jpg'
elif url.endswith("soccer.html") == True:
title = '[COLOR gold][B]Sport-Video European Football (Soccer)[/B][/COLOR]'
fanart = 'http://images5.alphacoders.com/481/481998.jpg'
elif url.endswith("baseball.html") == True:
title = '[COLOR gold][B]Sport-Video Baseball[/B][/COLOR]'
fanart = 'http://3.bp.blogspot.com/-toqMAo5-7WM/TpAeLJsqCDI/AAAAAAAACYQ/FGXLGdNo47I/s1600/The-best-top-desktop-baseball-wallpapers+baseball-america-wallpaper2012.jpg'
elif url.endswith("basketball.html") == True:
title = '[COLOR gold][B]Sport-Video Basketball[/B][/COLOR]'
fanart = 'http://www.hdwallpaperscool.com/wp-content/uploads/2013/11/basketball-hd-wallpapers-beautiful-desktop-backgrounds-widescreen.jpg'
plugintools.add_item(action="", title=title, url = "", thumbnail = thumbnail, fanart = fanart , folder = False, isPlayable = True)
referer = url
data = gethttp_referer_headers(url,referer)
plugintools.log("data= "+data)
#Paginación
matches = plugintools.find_multiple_matches(data, '<span style="color:#000000(.*?)</a></strong></span></div>')
for entry in matches:
#plugintools.log("entry= "+entry)
page_matches = plugintools.find_multiple_matches(entry, '<a href="([^"]+)')
for entry in page_matches:
entry = entry.strip()
if entry.endswith(".html") == True:
plugintools.log("entry= "+entry)
#Resultados de partidos
matches = plugintools.find_multiple_matches(data, '<a href="javascript:popupwnd(.*?).torrent')
for entry in matches:
plugintools.log("entry= "+entry)
title_match = plugintools.find_multiple_matches(entry, 'title="([^"]+)')
if len(title_match)>= 2: title_match=title_match[1]
else: title_match=title_match[0]
thumbnail_match = plugintools.find_single_match(entry, 'img src="(.*?)" id="Image')
thumbnail_match = 'http://www.sport-video.org.ua/'+thumbnail_match
url_match = plugintools.find_single_match(entry, '<a href="./(.*?).mkv')
url = plugintools.find_single_match(url_match, '<a href="([^"]+)')
url = url.replace("./", "http://www.sport-video.org.ua/")
url = url.replace(" ", "%20").replace("%2F", "/").replace("%3A", ":").strip()
plugintools.log("url= "+url)
if url != "":
url = url + '.mkv.torrent'
plugintools.log("title_match= "+title_match)
plugintools.log("thumbnail_match= "+thumbnail_match)
plugintools.log("url= "+url)
plugintools.add_item(action="sport_launchtorrent", title=title_match, url = url, thumbnail = thumbnail_match , fanart = fanart, folder = False, isPlayable = True)
else:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('TV Ultra 7K', "URL no encontrada...", 3 , art+'icon.png'))
# http://www.sport-video.org.ua/Detroit%20Red%20Wings%20-%20Carolina%20Hurricanes%2007.04.15.mkv.torrent
# http://www.sport-video.org.ua/Ottawa%20Senators%20-%20Pittsburgh%20Penguins%2007.04.15.mkv
def gethttp_referer_headers(url,referer):
plugintools.log("tv.ultra.7k 0.3.3 Gethttp_referer_headers "+url)
request_headers=[]
request_headers.append(["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31"])
request_headers.append(["Referer", referer])
body,response_headers = plugintools.read_body_and_headers(url, headers=request_headers)
return body
|
corvorepack/REPOIVAN
|
plugin.video.tv.ultra.7k/resources/tools/sportsondemand.py
|
Python
|
gpl-2.0
| 10,356
|
import unittest
from nose.plugins.skip import SkipTest
import numpy
import theano
import theano.gof.op as op
from six import string_types
from theano.gof.type import Type, Generic
from theano.gof.graph import Apply, Variable
import theano.tensor as T
from theano import scalar
from theano import shared
config = theano.config
Op = op.Op
utils = op.utils
def as_variable(x):
assert isinstance(x, Variable)
return x
class MyType(Type):
def __init__(self, thingy):
self.thingy = thingy
def __eq__(self, other):
return type(other) == type(self) and other.thingy == self.thingy
def __str__(self):
return str(self.thingy)
def __repr__(self):
return str(self.thingy)
def filter(self, x, strict=False, allow_downcast=None):
# Dummy filter: we want this type to represent strings that
# start with `self.thingy`.
if not isinstance(x, string_types):
raise TypeError("Invalid type")
if not x.startswith(self.thingy):
raise ValueError("Invalid value")
return x
# Added to make those tests pass in DebugMode
@staticmethod
def may_share_memory(a, b):
# As this represent a string and string are immutable, they
# never share memory in the DebugMode sence. This is needed as
# Python reuse string internally.
return False
class MyOp(Op):
__props__ = ()
def make_node(self, *inputs):
inputs = list(map(as_variable, inputs))
for input in inputs:
if not isinstance(input.type, MyType):
raise Exception("Error 1")
outputs = [MyType(sum([input.type.thingy for input in inputs]))()]
return Apply(self, inputs, outputs)
MyOp = MyOp()
class NoInputOp(Op):
"""An Op to test the corner-case of an Op with no input."""
__props__ = ()
def make_node(self):
return Apply(self, [], [MyType('test')()])
def perform(self, node, inputs, output_storage):
output_storage[0][0] = 'test Op no input'
class StructOp(Op):
__props__ = ()
def do_constant_folding(self, node):
# we are not constant
return False
# The input only serves to distinguish thunks
def make_node(self, i):
return Apply(self, [i], [scalar.uint64()])
def c_support_code_struct(self, node, name):
return "npy_uint64 counter%s;" % (name,)
def c_init_code_struct(self, node, name, sub):
return "counter%s = 0;" % (name,)
def c_code(self, node, name, input_names, outputs_names, sub):
return """
%(out)s = counter%(name)s;
counter%(name)s++;
""" % dict(out=outputs_names[0], name=name)
def c_code_cache_version(self):
return (1,)
class TestOp:
# Sanity tests
def test_sanity_0(self):
r1, r2 = MyType(1)(), MyType(2)()
node = MyOp.make_node(r1, r2)
# Are the inputs what I provided?
assert [x for x in node.inputs] == [r1, r2]
# Are the outputs what I expect?
assert [x.type for x in node.outputs] == [MyType(3)]
assert node.outputs[0].owner is node and node.outputs[0].index == 0
# validate
def test_validate(self):
try:
MyOp(Generic()(), MyType(1)()) # MyOp requires MyType instances
raise Exception("Expected an exception")
except Exception as e:
if str(e) != "Error 1":
raise
def test_op_no_input(self):
x = NoInputOp()()
f = theano.function([], x)
rval = f()
assert rval == 'test Op no input'
def test_op_struct(self):
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
sop = StructOp()
c = sop(theano.tensor.constant(0))
mode = None
if theano.config.mode == 'FAST_COMPILE':
mode = 'FAST_RUN'
f = theano.function([], c, mode=mode)
rval = f()
assert rval == 0
rval = f()
assert rval == 1
c2 = sop(theano.tensor.constant(1))
f2 = theano.function([], [c, c2], mode=mode)
rval = f2()
assert rval == [0, 0]
class TestMakeThunk(unittest.TestCase):
def test_no_c_code(self):
class IncOnePython(Op):
"""An Op with only a Python (perform) implementation"""
__props__ = ()
def make_node(self, input):
input = scalar.as_scalar(input)
output = input.type()
return Apply(self, [input], [output])
def perform(self, node, inputs, outputs):
input, = inputs
output, = outputs
output[0] = input + 1
i = scalar.int32('i')
o = IncOnePython()(i)
# Check that the c_code function is not implemented
self.assertRaises((NotImplementedError, utils.MethodNotDefined),
o.owner.op.c_code,
o.owner, 'o', ['x'], 'z', {'fail': ''})
storage_map = {i: [numpy.int32(3)],
o: [None]}
compute_map = {i: [True],
o: [False]}
thunk = o.owner.op.make_thunk(o.owner, storage_map, compute_map,
no_recycling=[])
required = thunk()
# Check everything went OK
assert not required # We provided all inputs
assert compute_map[o][0]
assert storage_map[o][0] == 4
def test_no_perform(self):
class IncOneC(Op):
"""An Op with only a C (c_code) implementation"""
__props__ = ()
def make_node(self, input):
input = scalar.as_scalar(input)
output = input.type()
return Apply(self, [input], [output])
def c_code(self, node, name, inputs, outputs, sub):
x, = inputs
z, = outputs
return "%(z)s = %(x)s + 1;" % locals()
i = scalar.int32('i')
o = IncOneC()(i)
# Check that the perform function is not implemented
self.assertRaises((NotImplementedError, utils.MethodNotDefined),
o.owner.op.perform,
o.owner, 0, [None])
storage_map = {i: [numpy.int32(3)],
o: [None]}
compute_map = {i: [True],
o: [False]}
thunk = o.owner.op.make_thunk(o.owner, storage_map, compute_map,
no_recycling=[])
if theano.config.cxx:
required = thunk()
# Check everything went OK
assert not required # We provided all inputs
assert compute_map[o][0]
assert storage_map[o][0] == 4
else:
self.assertRaises((NotImplementedError, utils.MethodNotDefined),
thunk)
def test_test_value_python_objects():
for x in ([0, 1, 2], 0, 0.5, 1):
assert (op.get_test_value(x) == x).all()
def test_test_value_ndarray():
x = numpy.zeros((5, 5))
v = op.get_test_value(x)
assert (v == x).all()
def test_test_value_constant():
x = T.as_tensor_variable(numpy.zeros((5, 5)))
v = op.get_test_value(x)
assert numpy.all(v == numpy.zeros((5, 5)))
def test_test_value_shared():
x = shared(numpy.zeros((5, 5)))
v = op.get_test_value(x)
assert numpy.all(v == numpy.zeros((5, 5)))
def test_test_value_op():
try:
prev_value = config.compute_test_value
config.compute_test_value = 'raise'
x = T.log(numpy.ones((5, 5)))
v = op.get_test_value(x)
assert numpy.allclose(v, numpy.zeros((5, 5)))
finally:
config.compute_test_value = prev_value
def test_get_debug_values_no_debugger():
'get_debug_values should return [] when debugger is off'
prev_value = config.compute_test_value
try:
config.compute_test_value = 'off'
x = T.vector()
for x_val in op.get_debug_values(x):
assert False
finally:
config.compute_test_value = prev_value
def test_get_det_debug_values_ignore():
"""get_debug_values should return [] when debugger is ignore
and some values are missing """
prev_value = config.compute_test_value
try:
config.compute_test_value = 'ignore'
x = T.vector()
for x_val in op.get_debug_values(x):
assert False
finally:
config.compute_test_value = prev_value
def test_get_debug_values_success():
"""tests that get_debug_value returns values when available
(and the debugger is on)"""
prev_value = config.compute_test_value
for mode in ['ignore', 'warn', 'raise']:
try:
config.compute_test_value = mode
x = T.vector()
x.tag.test_value = numpy.zeros((4,), dtype=config.floatX)
y = numpy.zeros((5, 5))
iters = 0
for x_val, y_val in op.get_debug_values(x, y):
assert x_val.shape == (4,)
assert y_val.shape == (5, 5)
iters += 1
assert iters == 1
finally:
config.compute_test_value = prev_value
def test_get_debug_values_exc():
"""tests that get_debug_value raises an exception when
debugger is set to raise and a value is missing """
prev_value = config.compute_test_value
try:
config.compute_test_value = 'raise'
x = T.vector()
try:
for x_val in op.get_debug_values(x):
# this assert catches the case where we
# erroneously get a value returned
assert False
raised = False
except AttributeError:
raised = True
# this assert catches the case where we got []
# returned, and possibly issued a warning,
# rather than raising an exception
assert raised
finally:
config.compute_test_value = prev_value
def test_debug_error_message():
"""tests that debug_error_message raises an
exception when it should."""
prev_value = config.compute_test_value
for mode in ['ignore', 'raise']:
try:
config.compute_test_value = mode
try:
op.debug_error_message('msg')
raised = False
except ValueError:
raised = True
assert raised
finally:
config.compute_test_value = prev_value
if __name__ == '__main__':
unittest.main()
|
valexandersaulys/airbnb_kaggle_contest
|
venv/lib/python3.4/site-packages/Theano-0.7.0-py3.4.egg/theano/gof/tests/test_op.py
|
Python
|
gpl-2.0
| 10,616
|